]>
Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
2b6080f2 | 4 | * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> |
bc0c38d1 SR |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * | |
7 | * Originally taken from the RT patch by: | |
8 | * Arnaldo Carvalho de Melo <acme@redhat.com> | |
9 | * | |
10 | * Based on code from the latency_tracer, that is: | |
11 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 12 | * Copyright (C) 2004 Nadia Yvette Chambers |
bc0c38d1 | 13 | */ |
2cadf913 | 14 | #include <linux/ring_buffer.h> |
273b281f | 15 | #include <generated/utsrelease.h> |
2cadf913 SR |
16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | |
bc0c38d1 SR |
18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | |
3f5a54e3 | 20 | #include <linux/notifier.h> |
2cadf913 | 21 | #include <linux/irqflags.h> |
bc0c38d1 | 22 | #include <linux/debugfs.h> |
8434dc93 | 23 | #include <linux/tracefs.h> |
4c11d7ae | 24 | #include <linux/pagemap.h> |
bc0c38d1 SR |
25 | #include <linux/hardirq.h> |
26 | #include <linux/linkage.h> | |
27 | #include <linux/uaccess.h> | |
76c813e2 | 28 | #include <linux/vmalloc.h> |
bc0c38d1 SR |
29 | #include <linux/ftrace.h> |
30 | #include <linux/module.h> | |
31 | #include <linux/percpu.h> | |
2cadf913 | 32 | #include <linux/splice.h> |
3f5a54e3 | 33 | #include <linux/kdebug.h> |
5f0c6c03 | 34 | #include <linux/string.h> |
f76180bc | 35 | #include <linux/mount.h> |
7e53bd42 | 36 | #include <linux/rwsem.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
bc0c38d1 SR |
38 | #include <linux/ctype.h> |
39 | #include <linux/init.h> | |
2a2cc8f7 | 40 | #include <linux/poll.h> |
b892e5c8 | 41 | #include <linux/nmi.h> |
bc0c38d1 | 42 | #include <linux/fs.h> |
478409dd | 43 | #include <linux/trace.h> |
8bd75c77 | 44 | #include <linux/sched/rt.h> |
86387f7e | 45 | |
bc0c38d1 | 46 | #include "trace.h" |
f0868d1e | 47 | #include "trace_output.h" |
bc0c38d1 | 48 | |
73c5162a SR |
49 | /* |
50 | * On boot up, the ring buffer is set to the minimum size, so that | |
51 | * we do not waste memory on systems that are not using tracing. | |
52 | */ | |
55034cd6 | 53 | bool ring_buffer_expanded; |
73c5162a | 54 | |
8e1b82e0 FW |
55 | /* |
56 | * We need to change this state when a selftest is running. | |
ff32504f FW |
57 | * A selftest will lurk into the ring-buffer to count the |
58 | * entries inserted during the selftest although some concurrent | |
5e1607a0 | 59 | * insertions into the ring-buffer such as trace_printk could occurred |
ff32504f FW |
60 | * at the same time, giving false positive or negative results. |
61 | */ | |
8e1b82e0 | 62 | static bool __read_mostly tracing_selftest_running; |
ff32504f | 63 | |
b2821ae6 SR |
64 | /* |
65 | * If a tracer is running, we do not want to run SELFTEST. | |
66 | */ | |
020e5f85 | 67 | bool __read_mostly tracing_selftest_disabled; |
b2821ae6 | 68 | |
0daa2302 SRRH |
69 | /* Pipe tracepoints to printk */ |
70 | struct trace_iterator *tracepoint_print_iter; | |
71 | int tracepoint_printk; | |
72 | ||
adf9f195 FW |
73 | /* For tracers that don't implement custom flags */ |
74 | static struct tracer_opt dummy_tracer_opt[] = { | |
75 | { } | |
76 | }; | |
77 | ||
8c1a49ae SRRH |
78 | static int |
79 | dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
adf9f195 FW |
80 | { |
81 | return 0; | |
82 | } | |
0f048701 | 83 | |
7ffbd48d SR |
84 | /* |
85 | * To prevent the comm cache from being overwritten when no | |
86 | * tracing is active, only save the comm when a trace event | |
87 | * occurred. | |
88 | */ | |
89 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | |
90 | ||
0f048701 SR |
91 | /* |
92 | * Kill all tracing for good (never come back). | |
93 | * It is initialized to 1 but will turn to zero if the initialization | |
94 | * of the tracer is successful. But that is the only place that sets | |
95 | * this back to zero. | |
96 | */ | |
4fd27358 | 97 | static int tracing_disabled = 1; |
0f048701 | 98 | |
955b61e5 | 99 | cpumask_var_t __read_mostly tracing_buffer_mask; |
ab46428c | 100 | |
944ac425 SR |
101 | /* |
102 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | |
103 | * | |
104 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | |
105 | * is set, then ftrace_dump is called. This will output the contents | |
106 | * of the ftrace buffers to the console. This is very useful for | |
107 | * capturing traces that lead to crashes and outputing it to a | |
108 | * serial console. | |
109 | * | |
110 | * It is default off, but you can enable it with either specifying | |
111 | * "ftrace_dump_on_oops" in the kernel command line, or setting | |
cecbca96 FW |
112 | * /proc/sys/kernel/ftrace_dump_on_oops |
113 | * Set 1 if you want to dump buffers of all CPUs | |
114 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | |
944ac425 | 115 | */ |
cecbca96 FW |
116 | |
117 | enum ftrace_dump_mode ftrace_dump_on_oops; | |
944ac425 | 118 | |
de7edd31 SRRH |
119 | /* When set, tracing will stop when a WARN*() is hit */ |
120 | int __disable_trace_on_warning; | |
121 | ||
9828413d SRRH |
122 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE |
123 | /* Map of enums to their values, for "enum_map" file */ | |
124 | struct trace_enum_map_head { | |
125 | struct module *mod; | |
126 | unsigned long length; | |
127 | }; | |
128 | ||
129 | union trace_enum_map_item; | |
130 | ||
131 | struct trace_enum_map_tail { | |
132 | /* | |
133 | * "end" is first and points to NULL as it must be different | |
134 | * than "mod" or "enum_string" | |
135 | */ | |
136 | union trace_enum_map_item *next; | |
137 | const char *end; /* points to NULL */ | |
138 | }; | |
139 | ||
140 | static DEFINE_MUTEX(trace_enum_mutex); | |
141 | ||
142 | /* | |
143 | * The trace_enum_maps are saved in an array with two extra elements, | |
144 | * one at the beginning, and one at the end. The beginning item contains | |
145 | * the count of the saved maps (head.length), and the module they | |
146 | * belong to if not built in (head.mod). The ending item contains a | |
147 | * pointer to the next array of saved enum_map items. | |
148 | */ | |
149 | union trace_enum_map_item { | |
150 | struct trace_enum_map map; | |
151 | struct trace_enum_map_head head; | |
152 | struct trace_enum_map_tail tail; | |
153 | }; | |
154 | ||
155 | static union trace_enum_map_item *trace_enum_maps; | |
156 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | |
157 | ||
607e2ea1 | 158 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
b2821ae6 | 159 | |
ee6c2c1b LZ |
160 | #define MAX_TRACER_SIZE 100 |
161 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | |
b2821ae6 | 162 | static char *default_bootup_tracer; |
d9e54076 | 163 | |
55034cd6 SRRH |
164 | static bool allocate_snapshot; |
165 | ||
1beee96b | 166 | static int __init set_cmdline_ftrace(char *str) |
d9e54076 | 167 | { |
67012ab1 | 168 | strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
b2821ae6 | 169 | default_bootup_tracer = bootup_tracer_buf; |
73c5162a | 170 | /* We are using ftrace early, expand it */ |
55034cd6 | 171 | ring_buffer_expanded = true; |
d9e54076 PZ |
172 | return 1; |
173 | } | |
1beee96b | 174 | __setup("ftrace=", set_cmdline_ftrace); |
d9e54076 | 175 | |
944ac425 SR |
176 | static int __init set_ftrace_dump_on_oops(char *str) |
177 | { | |
cecbca96 FW |
178 | if (*str++ != '=' || !*str) { |
179 | ftrace_dump_on_oops = DUMP_ALL; | |
180 | return 1; | |
181 | } | |
182 | ||
183 | if (!strcmp("orig_cpu", str)) { | |
184 | ftrace_dump_on_oops = DUMP_ORIG; | |
185 | return 1; | |
186 | } | |
187 | ||
188 | return 0; | |
944ac425 SR |
189 | } |
190 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | |
60a11774 | 191 | |
de7edd31 SRRH |
192 | static int __init stop_trace_on_warning(char *str) |
193 | { | |
933ff9f2 LCG |
194 | if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) |
195 | __disable_trace_on_warning = 1; | |
de7edd31 SRRH |
196 | return 1; |
197 | } | |
933ff9f2 | 198 | __setup("traceoff_on_warning", stop_trace_on_warning); |
de7edd31 | 199 | |
3209cff4 | 200 | static int __init boot_alloc_snapshot(char *str) |
55034cd6 SRRH |
201 | { |
202 | allocate_snapshot = true; | |
203 | /* We also need the main ring buffer expanded */ | |
204 | ring_buffer_expanded = true; | |
205 | return 1; | |
206 | } | |
3209cff4 | 207 | __setup("alloc_snapshot", boot_alloc_snapshot); |
55034cd6 | 208 | |
7bcfaf54 SR |
209 | |
210 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; | |
7bcfaf54 SR |
211 | |
212 | static int __init set_trace_boot_options(char *str) | |
213 | { | |
67012ab1 | 214 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); |
7bcfaf54 SR |
215 | return 0; |
216 | } | |
217 | __setup("trace_options=", set_trace_boot_options); | |
218 | ||
e1e232ca SR |
219 | static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; |
220 | static char *trace_boot_clock __initdata; | |
221 | ||
222 | static int __init set_trace_boot_clock(char *str) | |
223 | { | |
224 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | |
225 | trace_boot_clock = trace_boot_clock_buf; | |
226 | return 0; | |
227 | } | |
228 | __setup("trace_clock=", set_trace_boot_clock); | |
229 | ||
0daa2302 SRRH |
230 | static int __init set_tracepoint_printk(char *str) |
231 | { | |
232 | if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) | |
233 | tracepoint_printk = 1; | |
234 | return 1; | |
235 | } | |
236 | __setup("tp_printk", set_tracepoint_printk); | |
de7edd31 | 237 | |
cf8e3474 | 238 | unsigned long long ns2usecs(cycle_t nsec) |
bc0c38d1 SR |
239 | { |
240 | nsec += 500; | |
241 | do_div(nsec, 1000); | |
242 | return nsec; | |
243 | } | |
244 | ||
983f938a SRRH |
245 | /* trace_flags holds trace_options default values */ |
246 | #define TRACE_DEFAULT_FLAGS \ | |
247 | (FUNCTION_DEFAULT_FLAGS | \ | |
248 | TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ | |
249 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ | |
250 | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ | |
251 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) | |
252 | ||
16270145 SRRH |
253 | /* trace_options that are only supported by global_trace */ |
254 | #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ | |
255 | TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) | |
256 | ||
20550622 SRRH |
257 | /* trace_flags that are default zero for instances */ |
258 | #define ZEROED_TRACE_FLAGS \ | |
259 | TRACE_ITER_EVENT_FORK | |
16270145 | 260 | |
4fcdae83 SR |
261 | /* |
262 | * The global_trace is the descriptor that holds the tracing | |
263 | * buffers for the live tracing. For each CPU, it contains | |
264 | * a link list of pages that will store trace entries. The | |
265 | * page descriptor of the pages in the memory is used to hold | |
266 | * the link list by linking the lru item in the page descriptor | |
267 | * to each of the pages in the buffer per CPU. | |
268 | * | |
269 | * For each active CPU there is a data field that holds the | |
270 | * pages for the buffer for that CPU. Each CPU has the same number | |
271 | * of pages allocated for its buffer. | |
272 | */ | |
983f938a SRRH |
273 | static struct trace_array global_trace = { |
274 | .trace_flags = TRACE_DEFAULT_FLAGS, | |
275 | }; | |
bc0c38d1 | 276 | |
ae63b31e | 277 | LIST_HEAD(ftrace_trace_arrays); |
bc0c38d1 | 278 | |
ff451961 SRRH |
279 | int trace_array_get(struct trace_array *this_tr) |
280 | { | |
281 | struct trace_array *tr; | |
282 | int ret = -ENODEV; | |
283 | ||
284 | mutex_lock(&trace_types_lock); | |
285 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
286 | if (tr == this_tr) { | |
287 | tr->ref++; | |
288 | ret = 0; | |
289 | break; | |
290 | } | |
291 | } | |
292 | mutex_unlock(&trace_types_lock); | |
293 | ||
294 | return ret; | |
295 | } | |
296 | ||
297 | static void __trace_array_put(struct trace_array *this_tr) | |
298 | { | |
299 | WARN_ON(!this_tr->ref); | |
300 | this_tr->ref--; | |
301 | } | |
302 | ||
303 | void trace_array_put(struct trace_array *this_tr) | |
304 | { | |
305 | mutex_lock(&trace_types_lock); | |
306 | __trace_array_put(this_tr); | |
307 | mutex_unlock(&trace_types_lock); | |
308 | } | |
309 | ||
2425bcb9 | 310 | int call_filter_check_discard(struct trace_event_call *call, void *rec, |
f306cc82 TZ |
311 | struct ring_buffer *buffer, |
312 | struct ring_buffer_event *event) | |
313 | { | |
314 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | |
315 | !filter_match_preds(call->filter, rec)) { | |
0fc1b09f | 316 | __trace_event_discard_commit(buffer, event); |
f306cc82 TZ |
317 | return 1; |
318 | } | |
319 | ||
320 | return 0; | |
eb02ce01 TZ |
321 | } |
322 | ||
76c813e2 SRRH |
323 | void trace_free_pid_list(struct trace_pid_list *pid_list) |
324 | { | |
325 | vfree(pid_list->pids); | |
326 | kfree(pid_list); | |
327 | } | |
328 | ||
d8275c45 SR |
329 | /** |
330 | * trace_find_filtered_pid - check if a pid exists in a filtered_pid list | |
331 | * @filtered_pids: The list of pids to check | |
332 | * @search_pid: The PID to find in @filtered_pids | |
333 | * | |
334 | * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. | |
335 | */ | |
336 | bool | |
337 | trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) | |
338 | { | |
339 | /* | |
340 | * If pid_max changed after filtered_pids was created, we | |
341 | * by default ignore all pids greater than the previous pid_max. | |
342 | */ | |
343 | if (search_pid >= filtered_pids->pid_max) | |
344 | return false; | |
345 | ||
346 | return test_bit(search_pid, filtered_pids->pids); | |
347 | } | |
348 | ||
349 | /** | |
350 | * trace_ignore_this_task - should a task be ignored for tracing | |
351 | * @filtered_pids: The list of pids to check | |
352 | * @task: The task that should be ignored if not filtered | |
353 | * | |
354 | * Checks if @task should be traced or not from @filtered_pids. | |
355 | * Returns true if @task should *NOT* be traced. | |
356 | * Returns false if @task should be traced. | |
357 | */ | |
358 | bool | |
359 | trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) | |
360 | { | |
361 | /* | |
362 | * Return false, because if filtered_pids does not exist, | |
363 | * all pids are good to trace. | |
364 | */ | |
365 | if (!filtered_pids) | |
366 | return false; | |
367 | ||
368 | return !trace_find_filtered_pid(filtered_pids, task->pid); | |
369 | } | |
370 | ||
371 | /** | |
372 | * trace_pid_filter_add_remove - Add or remove a task from a pid_list | |
373 | * @pid_list: The list to modify | |
374 | * @self: The current task for fork or NULL for exit | |
375 | * @task: The task to add or remove | |
376 | * | |
377 | * If adding a task, if @self is defined, the task is only added if @self | |
378 | * is also included in @pid_list. This happens on fork and tasks should | |
379 | * only be added when the parent is listed. If @self is NULL, then the | |
380 | * @task pid will be removed from the list, which would happen on exit | |
381 | * of a task. | |
382 | */ | |
383 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, | |
384 | struct task_struct *self, | |
385 | struct task_struct *task) | |
386 | { | |
387 | if (!pid_list) | |
388 | return; | |
389 | ||
390 | /* For forks, we only add if the forking task is listed */ | |
391 | if (self) { | |
392 | if (!trace_find_filtered_pid(pid_list, self->pid)) | |
393 | return; | |
394 | } | |
395 | ||
396 | /* Sorry, but we don't support pid_max changing after setting */ | |
397 | if (task->pid >= pid_list->pid_max) | |
398 | return; | |
399 | ||
400 | /* "self" is set for forks, and NULL for exits */ | |
401 | if (self) | |
402 | set_bit(task->pid, pid_list->pids); | |
403 | else | |
404 | clear_bit(task->pid, pid_list->pids); | |
405 | } | |
406 | ||
5cc8976b SRRH |
407 | /** |
408 | * trace_pid_next - Used for seq_file to get to the next pid of a pid_list | |
409 | * @pid_list: The pid list to show | |
410 | * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) | |
411 | * @pos: The position of the file | |
412 | * | |
413 | * This is used by the seq_file "next" operation to iterate the pids | |
414 | * listed in a trace_pid_list structure. | |
415 | * | |
416 | * Returns the pid+1 as we want to display pid of zero, but NULL would | |
417 | * stop the iteration. | |
418 | */ | |
419 | void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) | |
420 | { | |
421 | unsigned long pid = (unsigned long)v; | |
422 | ||
423 | (*pos)++; | |
424 | ||
425 | /* pid already is +1 of the actual prevous bit */ | |
426 | pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); | |
427 | ||
428 | /* Return pid + 1 to allow zero to be represented */ | |
429 | if (pid < pid_list->pid_max) | |
430 | return (void *)(pid + 1); | |
431 | ||
432 | return NULL; | |
433 | } | |
434 | ||
435 | /** | |
436 | * trace_pid_start - Used for seq_file to start reading pid lists | |
437 | * @pid_list: The pid list to show | |
438 | * @pos: The position of the file | |
439 | * | |
440 | * This is used by seq_file "start" operation to start the iteration | |
441 | * of listing pids. | |
442 | * | |
443 | * Returns the pid+1 as we want to display pid of zero, but NULL would | |
444 | * stop the iteration. | |
445 | */ | |
446 | void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) | |
447 | { | |
448 | unsigned long pid; | |
449 | loff_t l = 0; | |
450 | ||
451 | pid = find_first_bit(pid_list->pids, pid_list->pid_max); | |
452 | if (pid >= pid_list->pid_max) | |
453 | return NULL; | |
454 | ||
455 | /* Return pid + 1 so that zero can be the exit value */ | |
456 | for (pid++; pid && l < *pos; | |
457 | pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) | |
458 | ; | |
459 | return (void *)pid; | |
460 | } | |
461 | ||
462 | /** | |
463 | * trace_pid_show - show the current pid in seq_file processing | |
464 | * @m: The seq_file structure to write into | |
465 | * @v: A void pointer of the pid (+1) value to display | |
466 | * | |
467 | * Can be directly used by seq_file operations to display the current | |
468 | * pid value. | |
469 | */ | |
470 | int trace_pid_show(struct seq_file *m, void *v) | |
471 | { | |
472 | unsigned long pid = (unsigned long)v - 1; | |
473 | ||
474 | seq_printf(m, "%lu\n", pid); | |
475 | return 0; | |
476 | } | |
477 | ||
76c813e2 SRRH |
478 | /* 128 should be much more than enough */ |
479 | #define PID_BUF_SIZE 127 | |
480 | ||
481 | int trace_pid_write(struct trace_pid_list *filtered_pids, | |
482 | struct trace_pid_list **new_pid_list, | |
483 | const char __user *ubuf, size_t cnt) | |
484 | { | |
485 | struct trace_pid_list *pid_list; | |
486 | struct trace_parser parser; | |
487 | unsigned long val; | |
488 | int nr_pids = 0; | |
489 | ssize_t read = 0; | |
490 | ssize_t ret = 0; | |
491 | loff_t pos; | |
492 | pid_t pid; | |
493 | ||
494 | if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) | |
495 | return -ENOMEM; | |
496 | ||
497 | /* | |
498 | * Always recreate a new array. The write is an all or nothing | |
499 | * operation. Always create a new array when adding new pids by | |
500 | * the user. If the operation fails, then the current list is | |
501 | * not modified. | |
502 | */ | |
503 | pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); | |
504 | if (!pid_list) | |
505 | return -ENOMEM; | |
506 | ||
507 | pid_list->pid_max = READ_ONCE(pid_max); | |
508 | ||
509 | /* Only truncating will shrink pid_max */ | |
510 | if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) | |
511 | pid_list->pid_max = filtered_pids->pid_max; | |
512 | ||
513 | pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); | |
514 | if (!pid_list->pids) { | |
515 | kfree(pid_list); | |
516 | return -ENOMEM; | |
517 | } | |
518 | ||
519 | if (filtered_pids) { | |
520 | /* copy the current bits to the new max */ | |
67f20b08 WY |
521 | for_each_set_bit(pid, filtered_pids->pids, |
522 | filtered_pids->pid_max) { | |
76c813e2 | 523 | set_bit(pid, pid_list->pids); |
76c813e2 SRRH |
524 | nr_pids++; |
525 | } | |
526 | } | |
527 | ||
528 | while (cnt > 0) { | |
529 | ||
530 | pos = 0; | |
531 | ||
532 | ret = trace_get_user(&parser, ubuf, cnt, &pos); | |
533 | if (ret < 0 || !trace_parser_loaded(&parser)) | |
534 | break; | |
535 | ||
536 | read += ret; | |
537 | ubuf += ret; | |
538 | cnt -= ret; | |
539 | ||
540 | parser.buffer[parser.idx] = 0; | |
541 | ||
542 | ret = -EINVAL; | |
543 | if (kstrtoul(parser.buffer, 0, &val)) | |
544 | break; | |
545 | if (val >= pid_list->pid_max) | |
546 | break; | |
547 | ||
548 | pid = (pid_t)val; | |
549 | ||
550 | set_bit(pid, pid_list->pids); | |
551 | nr_pids++; | |
552 | ||
553 | trace_parser_clear(&parser); | |
554 | ret = 0; | |
555 | } | |
556 | trace_parser_put(&parser); | |
557 | ||
558 | if (ret < 0) { | |
559 | trace_free_pid_list(pid_list); | |
560 | return ret; | |
561 | } | |
562 | ||
563 | if (!nr_pids) { | |
564 | /* Cleared the list of pids */ | |
565 | trace_free_pid_list(pid_list); | |
566 | read = ret; | |
567 | pid_list = NULL; | |
568 | } | |
569 | ||
570 | *new_pid_list = pid_list; | |
571 | ||
572 | return read; | |
573 | } | |
574 | ||
ad1438a0 | 575 | static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
37886f6a SR |
576 | { |
577 | u64 ts; | |
578 | ||
579 | /* Early boot up does not have a buffer yet */ | |
9457158b | 580 | if (!buf->buffer) |
37886f6a SR |
581 | return trace_clock_local(); |
582 | ||
9457158b AL |
583 | ts = ring_buffer_time_stamp(buf->buffer, cpu); |
584 | ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); | |
37886f6a SR |
585 | |
586 | return ts; | |
587 | } | |
bc0c38d1 | 588 | |
9457158b AL |
589 | cycle_t ftrace_now(int cpu) |
590 | { | |
591 | return buffer_ftrace_now(&global_trace.trace_buffer, cpu); | |
592 | } | |
593 | ||
10246fa3 SRRH |
594 | /** |
595 | * tracing_is_enabled - Show if global_trace has been disabled | |
596 | * | |
597 | * Shows if the global trace has been enabled or not. It uses the | |
598 | * mirror flag "buffer_disabled" to be used in fast paths such as for | |
599 | * the irqsoff tracer. But it may be inaccurate due to races. If you | |
600 | * need to know the accurate state, use tracing_is_on() which is a little | |
601 | * slower, but accurate. | |
602 | */ | |
9036990d SR |
603 | int tracing_is_enabled(void) |
604 | { | |
10246fa3 SRRH |
605 | /* |
606 | * For quick access (irqsoff uses this in fast path), just | |
607 | * return the mirror variable of the state of the ring buffer. | |
608 | * It's a little racy, but we don't really care. | |
609 | */ | |
610 | smp_rmb(); | |
611 | return !global_trace.buffer_disabled; | |
9036990d SR |
612 | } |
613 | ||
4fcdae83 | 614 | /* |
3928a8a2 SR |
615 | * trace_buf_size is the size in bytes that is allocated |
616 | * for a buffer. Note, the number of bytes is always rounded | |
617 | * to page size. | |
3f5a54e3 SR |
618 | * |
619 | * This number is purposely set to a low number of 16384. | |
620 | * If the dump on oops happens, it will be much appreciated | |
621 | * to not have to wait for all that output. Anyway this can be | |
622 | * boot time and run time configurable. | |
4fcdae83 | 623 | */ |
3928a8a2 | 624 | #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ |
3f5a54e3 | 625 | |
3928a8a2 | 626 | static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; |
bc0c38d1 | 627 | |
4fcdae83 | 628 | /* trace_types holds a link list of available tracers. */ |
bc0c38d1 | 629 | static struct tracer *trace_types __read_mostly; |
4fcdae83 | 630 | |
4fcdae83 SR |
631 | /* |
632 | * trace_types_lock is used to protect the trace_types list. | |
4fcdae83 | 633 | */ |
a8227415 | 634 | DEFINE_MUTEX(trace_types_lock); |
4fcdae83 | 635 | |
7e53bd42 LJ |
636 | /* |
637 | * serialize the access of the ring buffer | |
638 | * | |
639 | * ring buffer serializes readers, but it is low level protection. | |
640 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | |
641 | * are not protected by ring buffer. | |
642 | * | |
643 | * The content of events may become garbage if we allow other process consumes | |
644 | * these events concurrently: | |
645 | * A) the page of the consumed events may become a normal page | |
646 | * (not reader page) in ring buffer, and this page will be rewrited | |
647 | * by events producer. | |
648 | * B) The page of the consumed events may become a page for splice_read, | |
649 | * and this page will be returned to system. | |
650 | * | |
651 | * These primitives allow multi process access to different cpu ring buffer | |
652 | * concurrently. | |
653 | * | |
654 | * These primitives don't distinguish read-only and read-consume access. | |
655 | * Multi read-only access are also serialized. | |
656 | */ | |
657 | ||
658 | #ifdef CONFIG_SMP | |
659 | static DECLARE_RWSEM(all_cpu_access_lock); | |
660 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | |
661 | ||
662 | static inline void trace_access_lock(int cpu) | |
663 | { | |
ae3b5093 | 664 | if (cpu == RING_BUFFER_ALL_CPUS) { |
7e53bd42 LJ |
665 | /* gain it for accessing the whole ring buffer. */ |
666 | down_write(&all_cpu_access_lock); | |
667 | } else { | |
668 | /* gain it for accessing a cpu ring buffer. */ | |
669 | ||
ae3b5093 | 670 | /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ |
7e53bd42 LJ |
671 | down_read(&all_cpu_access_lock); |
672 | ||
673 | /* Secondly block other access to this @cpu ring buffer. */ | |
674 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | |
675 | } | |
676 | } | |
677 | ||
678 | static inline void trace_access_unlock(int cpu) | |
679 | { | |
ae3b5093 | 680 | if (cpu == RING_BUFFER_ALL_CPUS) { |
7e53bd42 LJ |
681 | up_write(&all_cpu_access_lock); |
682 | } else { | |
683 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | |
684 | up_read(&all_cpu_access_lock); | |
685 | } | |
686 | } | |
687 | ||
688 | static inline void trace_access_lock_init(void) | |
689 | { | |
690 | int cpu; | |
691 | ||
692 | for_each_possible_cpu(cpu) | |
693 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | |
694 | } | |
695 | ||
696 | #else | |
697 | ||
698 | static DEFINE_MUTEX(access_lock); | |
699 | ||
700 | static inline void trace_access_lock(int cpu) | |
701 | { | |
702 | (void)cpu; | |
703 | mutex_lock(&access_lock); | |
704 | } | |
705 | ||
706 | static inline void trace_access_unlock(int cpu) | |
707 | { | |
708 | (void)cpu; | |
709 | mutex_unlock(&access_lock); | |
710 | } | |
711 | ||
712 | static inline void trace_access_lock_init(void) | |
713 | { | |
714 | } | |
715 | ||
716 | #endif | |
717 | ||
d78a4614 SRRH |
718 | #ifdef CONFIG_STACKTRACE |
719 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | |
720 | unsigned long flags, | |
721 | int skip, int pc, struct pt_regs *regs); | |
2d34f489 SRRH |
722 | static inline void ftrace_trace_stack(struct trace_array *tr, |
723 | struct ring_buffer *buffer, | |
73dddbb5 SRRH |
724 | unsigned long flags, |
725 | int skip, int pc, struct pt_regs *regs); | |
ca475e83 | 726 | |
d78a4614 SRRH |
727 | #else |
728 | static inline void __ftrace_trace_stack(struct ring_buffer *buffer, | |
729 | unsigned long flags, | |
730 | int skip, int pc, struct pt_regs *regs) | |
731 | { | |
732 | } | |
2d34f489 SRRH |
733 | static inline void ftrace_trace_stack(struct trace_array *tr, |
734 | struct ring_buffer *buffer, | |
73dddbb5 SRRH |
735 | unsigned long flags, |
736 | int skip, int pc, struct pt_regs *regs) | |
ca475e83 SRRH |
737 | { |
738 | } | |
739 | ||
d78a4614 SRRH |
740 | #endif |
741 | ||
5280bcef | 742 | static void tracer_tracing_on(struct trace_array *tr) |
10246fa3 SRRH |
743 | { |
744 | if (tr->trace_buffer.buffer) | |
745 | ring_buffer_record_on(tr->trace_buffer.buffer); | |
746 | /* | |
747 | * This flag is looked at when buffers haven't been allocated | |
748 | * yet, or by some tracers (like irqsoff), that just want to | |
749 | * know if the ring buffer has been disabled, but it can handle | |
750 | * races of where it gets disabled but we still do a record. | |
751 | * As the check is in the fast path of the tracers, it is more | |
752 | * important to be fast than accurate. | |
753 | */ | |
754 | tr->buffer_disabled = 0; | |
755 | /* Make the flag seen by readers */ | |
756 | smp_wmb(); | |
757 | } | |
758 | ||
499e5470 SR |
759 | /** |
760 | * tracing_on - enable tracing buffers | |
761 | * | |
762 | * This function enables tracing buffers that may have been | |
763 | * disabled with tracing_off. | |
764 | */ | |
765 | void tracing_on(void) | |
766 | { | |
10246fa3 | 767 | tracer_tracing_on(&global_trace); |
499e5470 SR |
768 | } |
769 | EXPORT_SYMBOL_GPL(tracing_on); | |
770 | ||
09ae7234 SRRH |
771 | /** |
772 | * __trace_puts - write a constant string into the trace buffer. | |
773 | * @ip: The address of the caller | |
774 | * @str: The constant string to write | |
775 | * @size: The size of the string. | |
776 | */ | |
777 | int __trace_puts(unsigned long ip, const char *str, int size) | |
778 | { | |
779 | struct ring_buffer_event *event; | |
780 | struct ring_buffer *buffer; | |
781 | struct print_entry *entry; | |
782 | unsigned long irq_flags; | |
783 | int alloc; | |
8abfb872 J |
784 | int pc; |
785 | ||
983f938a | 786 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
f0160a5a J |
787 | return 0; |
788 | ||
8abfb872 | 789 | pc = preempt_count(); |
09ae7234 | 790 | |
3132e107 SRRH |
791 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
792 | return 0; | |
793 | ||
09ae7234 SRRH |
794 | alloc = sizeof(*entry) + size + 2; /* possible \n added */ |
795 | ||
796 | local_save_flags(irq_flags); | |
797 | buffer = global_trace.trace_buffer.buffer; | |
798 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, | |
8abfb872 | 799 | irq_flags, pc); |
09ae7234 SRRH |
800 | if (!event) |
801 | return 0; | |
802 | ||
803 | entry = ring_buffer_event_data(event); | |
804 | entry->ip = ip; | |
805 | ||
806 | memcpy(&entry->buf, str, size); | |
807 | ||
808 | /* Add a newline if necessary */ | |
809 | if (entry->buf[size - 1] != '\n') { | |
810 | entry->buf[size] = '\n'; | |
811 | entry->buf[size + 1] = '\0'; | |
812 | } else | |
813 | entry->buf[size] = '\0'; | |
814 | ||
815 | __buffer_unlock_commit(buffer, event); | |
2d34f489 | 816 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
09ae7234 SRRH |
817 | |
818 | return size; | |
819 | } | |
820 | EXPORT_SYMBOL_GPL(__trace_puts); | |
821 | ||
822 | /** | |
823 | * __trace_bputs - write the pointer to a constant string into trace buffer | |
824 | * @ip: The address of the caller | |
825 | * @str: The constant string to write to the buffer to | |
826 | */ | |
827 | int __trace_bputs(unsigned long ip, const char *str) | |
828 | { | |
829 | struct ring_buffer_event *event; | |
830 | struct ring_buffer *buffer; | |
831 | struct bputs_entry *entry; | |
832 | unsigned long irq_flags; | |
833 | int size = sizeof(struct bputs_entry); | |
8abfb872 J |
834 | int pc; |
835 | ||
983f938a | 836 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
f0160a5a J |
837 | return 0; |
838 | ||
8abfb872 | 839 | pc = preempt_count(); |
09ae7234 | 840 | |
3132e107 SRRH |
841 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
842 | return 0; | |
843 | ||
09ae7234 SRRH |
844 | local_save_flags(irq_flags); |
845 | buffer = global_trace.trace_buffer.buffer; | |
846 | event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, | |
8abfb872 | 847 | irq_flags, pc); |
09ae7234 SRRH |
848 | if (!event) |
849 | return 0; | |
850 | ||
851 | entry = ring_buffer_event_data(event); | |
852 | entry->ip = ip; | |
853 | entry->str = str; | |
854 | ||
855 | __buffer_unlock_commit(buffer, event); | |
2d34f489 | 856 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
09ae7234 SRRH |
857 | |
858 | return 1; | |
859 | } | |
860 | EXPORT_SYMBOL_GPL(__trace_bputs); | |
861 | ||
ad909e21 SRRH |
862 | #ifdef CONFIG_TRACER_SNAPSHOT |
863 | /** | |
864 | * trace_snapshot - take a snapshot of the current buffer. | |
865 | * | |
866 | * This causes a swap between the snapshot buffer and the current live | |
867 | * tracing buffer. You can use this to take snapshots of the live | |
868 | * trace when some condition is triggered, but continue to trace. | |
869 | * | |
870 | * Note, make sure to allocate the snapshot with either | |
871 | * a tracing_snapshot_alloc(), or by doing it manually | |
872 | * with: echo 1 > /sys/kernel/debug/tracing/snapshot | |
873 | * | |
874 | * If the snapshot buffer is not allocated, it will stop tracing. | |
875 | * Basically making a permanent snapshot. | |
876 | */ | |
877 | void tracing_snapshot(void) | |
878 | { | |
879 | struct trace_array *tr = &global_trace; | |
880 | struct tracer *tracer = tr->current_trace; | |
881 | unsigned long flags; | |
882 | ||
1b22e382 SRRH |
883 | if (in_nmi()) { |
884 | internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); | |
885 | internal_trace_puts("*** snapshot is being ignored ***\n"); | |
886 | return; | |
887 | } | |
888 | ||
ad909e21 | 889 | if (!tr->allocated_snapshot) { |
ca268da6 SRRH |
890 | internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); |
891 | internal_trace_puts("*** stopping trace here! ***\n"); | |
ad909e21 SRRH |
892 | tracing_off(); |
893 | return; | |
894 | } | |
895 | ||
896 | /* Note, snapshot can not be used when the tracer uses it */ | |
897 | if (tracer->use_max_tr) { | |
ca268da6 SRRH |
898 | internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); |
899 | internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); | |
ad909e21 SRRH |
900 | return; |
901 | } | |
902 | ||
903 | local_irq_save(flags); | |
904 | update_max_tr(tr, current, smp_processor_id()); | |
905 | local_irq_restore(flags); | |
906 | } | |
1b22e382 | 907 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
ad909e21 SRRH |
908 | |
909 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, | |
910 | struct trace_buffer *size_buf, int cpu_id); | |
3209cff4 SRRH |
911 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); |
912 | ||
913 | static int alloc_snapshot(struct trace_array *tr) | |
914 | { | |
915 | int ret; | |
916 | ||
917 | if (!tr->allocated_snapshot) { | |
918 | ||
919 | /* allocate spare buffer */ | |
920 | ret = resize_buffer_duplicate_size(&tr->max_buffer, | |
921 | &tr->trace_buffer, RING_BUFFER_ALL_CPUS); | |
922 | if (ret < 0) | |
923 | return ret; | |
924 | ||
925 | tr->allocated_snapshot = true; | |
926 | } | |
927 | ||
928 | return 0; | |
929 | } | |
930 | ||
ad1438a0 | 931 | static void free_snapshot(struct trace_array *tr) |
3209cff4 SRRH |
932 | { |
933 | /* | |
934 | * We don't free the ring buffer. instead, resize it because | |
935 | * The max_tr ring buffer has some state (e.g. ring->clock) and | |
936 | * we want preserve it. | |
937 | */ | |
938 | ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); | |
939 | set_buffer_entries(&tr->max_buffer, 1); | |
940 | tracing_reset_online_cpus(&tr->max_buffer); | |
941 | tr->allocated_snapshot = false; | |
942 | } | |
ad909e21 | 943 | |
93e31ffb TZ |
944 | /** |
945 | * tracing_alloc_snapshot - allocate snapshot buffer. | |
946 | * | |
947 | * This only allocates the snapshot buffer if it isn't already | |
948 | * allocated - it doesn't also take a snapshot. | |
949 | * | |
950 | * This is meant to be used in cases where the snapshot buffer needs | |
951 | * to be set up for events that can't sleep but need to be able to | |
952 | * trigger a snapshot. | |
953 | */ | |
954 | int tracing_alloc_snapshot(void) | |
955 | { | |
956 | struct trace_array *tr = &global_trace; | |
957 | int ret; | |
958 | ||
959 | ret = alloc_snapshot(tr); | |
960 | WARN_ON(ret < 0); | |
961 | ||
962 | return ret; | |
963 | } | |
964 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); | |
965 | ||
ad909e21 SRRH |
966 | /** |
967 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. | |
968 | * | |
969 | * This is similar to trace_snapshot(), but it will allocate the | |
970 | * snapshot buffer if it isn't already allocated. Use this only | |
971 | * where it is safe to sleep, as the allocation may sleep. | |
972 | * | |
973 | * This causes a swap between the snapshot buffer and the current live | |
974 | * tracing buffer. You can use this to take snapshots of the live | |
975 | * trace when some condition is triggered, but continue to trace. | |
976 | */ | |
977 | void tracing_snapshot_alloc(void) | |
978 | { | |
ad909e21 SRRH |
979 | int ret; |
980 | ||
93e31ffb TZ |
981 | ret = tracing_alloc_snapshot(); |
982 | if (ret < 0) | |
3209cff4 | 983 | return; |
ad909e21 SRRH |
984 | |
985 | tracing_snapshot(); | |
986 | } | |
1b22e382 | 987 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
ad909e21 SRRH |
988 | #else |
989 | void tracing_snapshot(void) | |
990 | { | |
991 | WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); | |
992 | } | |
1b22e382 | 993 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
93e31ffb TZ |
994 | int tracing_alloc_snapshot(void) |
995 | { | |
996 | WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); | |
997 | return -ENODEV; | |
998 | } | |
999 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); | |
ad909e21 SRRH |
1000 | void tracing_snapshot_alloc(void) |
1001 | { | |
1002 | /* Give warning */ | |
1003 | tracing_snapshot(); | |
1004 | } | |
1b22e382 | 1005 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
ad909e21 SRRH |
1006 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
1007 | ||
5280bcef | 1008 | static void tracer_tracing_off(struct trace_array *tr) |
10246fa3 SRRH |
1009 | { |
1010 | if (tr->trace_buffer.buffer) | |
1011 | ring_buffer_record_off(tr->trace_buffer.buffer); | |
1012 | /* | |
1013 | * This flag is looked at when buffers haven't been allocated | |
1014 | * yet, or by some tracers (like irqsoff), that just want to | |
1015 | * know if the ring buffer has been disabled, but it can handle | |
1016 | * races of where it gets disabled but we still do a record. | |
1017 | * As the check is in the fast path of the tracers, it is more | |
1018 | * important to be fast than accurate. | |
1019 | */ | |
1020 | tr->buffer_disabled = 1; | |
1021 | /* Make the flag seen by readers */ | |
1022 | smp_wmb(); | |
1023 | } | |
1024 | ||
499e5470 SR |
1025 | /** |
1026 | * tracing_off - turn off tracing buffers | |
1027 | * | |
1028 | * This function stops the tracing buffers from recording data. | |
1029 | * It does not disable any overhead the tracers themselves may | |
1030 | * be causing. This function simply causes all recording to | |
1031 | * the ring buffers to fail. | |
1032 | */ | |
1033 | void tracing_off(void) | |
1034 | { | |
10246fa3 | 1035 | tracer_tracing_off(&global_trace); |
499e5470 SR |
1036 | } |
1037 | EXPORT_SYMBOL_GPL(tracing_off); | |
1038 | ||
de7edd31 SRRH |
1039 | void disable_trace_on_warning(void) |
1040 | { | |
1041 | if (__disable_trace_on_warning) | |
1042 | tracing_off(); | |
1043 | } | |
1044 | ||
10246fa3 SRRH |
1045 | /** |
1046 | * tracer_tracing_is_on - show real state of ring buffer enabled | |
1047 | * @tr : the trace array to know if ring buffer is enabled | |
1048 | * | |
1049 | * Shows real state of the ring buffer if it is enabled or not. | |
1050 | */ | |
e7c15cd8 | 1051 | int tracer_tracing_is_on(struct trace_array *tr) |
10246fa3 SRRH |
1052 | { |
1053 | if (tr->trace_buffer.buffer) | |
1054 | return ring_buffer_record_is_on(tr->trace_buffer.buffer); | |
1055 | return !tr->buffer_disabled; | |
1056 | } | |
1057 | ||
499e5470 SR |
1058 | /** |
1059 | * tracing_is_on - show state of ring buffers enabled | |
1060 | */ | |
1061 | int tracing_is_on(void) | |
1062 | { | |
10246fa3 | 1063 | return tracer_tracing_is_on(&global_trace); |
499e5470 SR |
1064 | } |
1065 | EXPORT_SYMBOL_GPL(tracing_is_on); | |
1066 | ||
3928a8a2 | 1067 | static int __init set_buf_size(char *str) |
bc0c38d1 | 1068 | { |
3928a8a2 | 1069 | unsigned long buf_size; |
c6caeeb1 | 1070 | |
bc0c38d1 SR |
1071 | if (!str) |
1072 | return 0; | |
9d612bef | 1073 | buf_size = memparse(str, &str); |
c6caeeb1 | 1074 | /* nr_entries can not be zero */ |
9d612bef | 1075 | if (buf_size == 0) |
c6caeeb1 | 1076 | return 0; |
3928a8a2 | 1077 | trace_buf_size = buf_size; |
bc0c38d1 SR |
1078 | return 1; |
1079 | } | |
3928a8a2 | 1080 | __setup("trace_buf_size=", set_buf_size); |
bc0c38d1 | 1081 | |
0e950173 TB |
1082 | static int __init set_tracing_thresh(char *str) |
1083 | { | |
87abb3b1 | 1084 | unsigned long threshold; |
0e950173 TB |
1085 | int ret; |
1086 | ||
1087 | if (!str) | |
1088 | return 0; | |
bcd83ea6 | 1089 | ret = kstrtoul(str, 0, &threshold); |
0e950173 TB |
1090 | if (ret < 0) |
1091 | return 0; | |
87abb3b1 | 1092 | tracing_thresh = threshold * 1000; |
0e950173 TB |
1093 | return 1; |
1094 | } | |
1095 | __setup("tracing_thresh=", set_tracing_thresh); | |
1096 | ||
57f50be1 SR |
1097 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
1098 | { | |
1099 | return nsecs / 1000; | |
1100 | } | |
1101 | ||
a3418a36 SRRH |
1102 | /* |
1103 | * TRACE_FLAGS is defined as a tuple matching bit masks with strings. | |
1104 | * It uses C(a, b) where 'a' is the enum name and 'b' is the string that | |
1105 | * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list | |
1106 | * of strings in the order that the enums were defined. | |
1107 | */ | |
1108 | #undef C | |
1109 | #define C(a, b) b | |
1110 | ||
4fcdae83 | 1111 | /* These must match the bit postions in trace_iterator_flags */ |
bc0c38d1 | 1112 | static const char *trace_options[] = { |
a3418a36 | 1113 | TRACE_FLAGS |
bc0c38d1 SR |
1114 | NULL |
1115 | }; | |
1116 | ||
5079f326 Z |
1117 | static struct { |
1118 | u64 (*func)(void); | |
1119 | const char *name; | |
8be0709f | 1120 | int in_ns; /* is this clock in nanoseconds? */ |
5079f326 | 1121 | } trace_clocks[] = { |
1b3e5c09 TG |
1122 | { trace_clock_local, "local", 1 }, |
1123 | { trace_clock_global, "global", 1 }, | |
1124 | { trace_clock_counter, "counter", 0 }, | |
e7fda6c4 | 1125 | { trace_clock_jiffies, "uptime", 0 }, |
1b3e5c09 TG |
1126 | { trace_clock, "perf", 1 }, |
1127 | { ktime_get_mono_fast_ns, "mono", 1 }, | |
aabfa5f2 | 1128 | { ktime_get_raw_fast_ns, "mono_raw", 1 }, |
8cbd9cc6 | 1129 | ARCH_TRACE_CLOCKS |
5079f326 Z |
1130 | }; |
1131 | ||
b63f39ea | 1132 | /* |
1133 | * trace_parser_get_init - gets the buffer for trace parser | |
1134 | */ | |
1135 | int trace_parser_get_init(struct trace_parser *parser, int size) | |
1136 | { | |
1137 | memset(parser, 0, sizeof(*parser)); | |
1138 | ||
1139 | parser->buffer = kmalloc(size, GFP_KERNEL); | |
1140 | if (!parser->buffer) | |
1141 | return 1; | |
1142 | ||
1143 | parser->size = size; | |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * trace_parser_put - frees the buffer for trace parser | |
1149 | */ | |
1150 | void trace_parser_put(struct trace_parser *parser) | |
1151 | { | |
1152 | kfree(parser->buffer); | |
1153 | } | |
1154 | ||
1155 | /* | |
1156 | * trace_get_user - reads the user input string separated by space | |
1157 | * (matched by isspace(ch)) | |
1158 | * | |
1159 | * For each string found the 'struct trace_parser' is updated, | |
1160 | * and the function returns. | |
1161 | * | |
1162 | * Returns number of bytes read. | |
1163 | * | |
1164 | * See kernel/trace/trace.h for 'struct trace_parser' details. | |
1165 | */ | |
1166 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |
1167 | size_t cnt, loff_t *ppos) | |
1168 | { | |
1169 | char ch; | |
1170 | size_t read = 0; | |
1171 | ssize_t ret; | |
1172 | ||
1173 | if (!*ppos) | |
1174 | trace_parser_clear(parser); | |
1175 | ||
1176 | ret = get_user(ch, ubuf++); | |
1177 | if (ret) | |
1178 | goto out; | |
1179 | ||
1180 | read++; | |
1181 | cnt--; | |
1182 | ||
1183 | /* | |
1184 | * The parser is not finished with the last write, | |
1185 | * continue reading the user input without skipping spaces. | |
1186 | */ | |
1187 | if (!parser->cont) { | |
1188 | /* skip white space */ | |
1189 | while (cnt && isspace(ch)) { | |
1190 | ret = get_user(ch, ubuf++); | |
1191 | if (ret) | |
1192 | goto out; | |
1193 | read++; | |
1194 | cnt--; | |
1195 | } | |
1196 | ||
1197 | /* only spaces were written */ | |
1198 | if (isspace(ch)) { | |
1199 | *ppos += read; | |
1200 | ret = read; | |
1201 | goto out; | |
1202 | } | |
1203 | ||
1204 | parser->idx = 0; | |
1205 | } | |
1206 | ||
1207 | /* read the non-space input */ | |
1208 | while (cnt && !isspace(ch)) { | |
3c235a33 | 1209 | if (parser->idx < parser->size - 1) |
b63f39ea | 1210 | parser->buffer[parser->idx++] = ch; |
1211 | else { | |
1212 | ret = -EINVAL; | |
1213 | goto out; | |
1214 | } | |
1215 | ret = get_user(ch, ubuf++); | |
1216 | if (ret) | |
1217 | goto out; | |
1218 | read++; | |
1219 | cnt--; | |
1220 | } | |
1221 | ||
1222 | /* We either got finished input or we have to wait for another call. */ | |
1223 | if (isspace(ch)) { | |
1224 | parser->buffer[parser->idx] = 0; | |
1225 | parser->cont = false; | |
057db848 | 1226 | } else if (parser->idx < parser->size - 1) { |
b63f39ea | 1227 | parser->cont = true; |
1228 | parser->buffer[parser->idx++] = ch; | |
057db848 SR |
1229 | } else { |
1230 | ret = -EINVAL; | |
1231 | goto out; | |
b63f39ea | 1232 | } |
1233 | ||
1234 | *ppos += read; | |
1235 | ret = read; | |
1236 | ||
1237 | out: | |
1238 | return ret; | |
1239 | } | |
1240 | ||
3a161d99 | 1241 | /* TODO add a seq_buf_to_buffer() */ |
b8b94265 | 1242 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
3c56819b EGM |
1243 | { |
1244 | int len; | |
3c56819b | 1245 | |
5ac48378 | 1246 | if (trace_seq_used(s) <= s->seq.readpos) |
3c56819b EGM |
1247 | return -EBUSY; |
1248 | ||
5ac48378 | 1249 | len = trace_seq_used(s) - s->seq.readpos; |
3c56819b EGM |
1250 | if (cnt > len) |
1251 | cnt = len; | |
3a161d99 | 1252 | memcpy(buf, s->buffer + s->seq.readpos, cnt); |
3c56819b | 1253 | |
3a161d99 | 1254 | s->seq.readpos += cnt; |
3c56819b EGM |
1255 | return cnt; |
1256 | } | |
1257 | ||
0e950173 TB |
1258 | unsigned long __read_mostly tracing_thresh; |
1259 | ||
5d4a9dba | 1260 | #ifdef CONFIG_TRACER_MAX_TRACE |
5d4a9dba SR |
1261 | /* |
1262 | * Copy the new maximum trace into the separate maximum-trace | |
1263 | * structure. (this way the maximum trace is permanently saved, | |
1264 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | |
1265 | */ | |
1266 | static void | |
1267 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |
1268 | { | |
12883efb SRRH |
1269 | struct trace_buffer *trace_buf = &tr->trace_buffer; |
1270 | struct trace_buffer *max_buf = &tr->max_buffer; | |
1271 | struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); | |
1272 | struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); | |
5d4a9dba | 1273 | |
12883efb SRRH |
1274 | max_buf->cpu = cpu; |
1275 | max_buf->time_start = data->preempt_timestamp; | |
5d4a9dba | 1276 | |
6d9b3fa5 | 1277 | max_data->saved_latency = tr->max_latency; |
8248ac05 SR |
1278 | max_data->critical_start = data->critical_start; |
1279 | max_data->critical_end = data->critical_end; | |
5d4a9dba | 1280 | |
1acaa1b2 | 1281 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
8248ac05 | 1282 | max_data->pid = tsk->pid; |
f17a5194 SRRH |
1283 | /* |
1284 | * If tsk == current, then use current_uid(), as that does not use | |
1285 | * RCU. The irq tracer can be called out of RCU scope. | |
1286 | */ | |
1287 | if (tsk == current) | |
1288 | max_data->uid = current_uid(); | |
1289 | else | |
1290 | max_data->uid = task_uid(tsk); | |
1291 | ||
8248ac05 SR |
1292 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
1293 | max_data->policy = tsk->policy; | |
1294 | max_data->rt_priority = tsk->rt_priority; | |
5d4a9dba SR |
1295 | |
1296 | /* record this tasks comm */ | |
1297 | tracing_record_cmdline(tsk); | |
1298 | } | |
1299 | ||
4fcdae83 SR |
1300 | /** |
1301 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | |
1302 | * @tr: tracer | |
1303 | * @tsk: the task with the latency | |
1304 | * @cpu: The cpu that initiated the trace. | |
1305 | * | |
1306 | * Flip the buffers between the @tr and the max_tr and record information | |
1307 | * about which task was the cause of this latency. | |
1308 | */ | |
e309b41d | 1309 | void |
bc0c38d1 SR |
1310 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
1311 | { | |
2721e72d | 1312 | struct ring_buffer *buf; |
bc0c38d1 | 1313 | |
2b6080f2 | 1314 | if (tr->stop_count) |
b8de7bd1 SR |
1315 | return; |
1316 | ||
4c11d7ae | 1317 | WARN_ON_ONCE(!irqs_disabled()); |
34600f0e | 1318 | |
45ad21ca | 1319 | if (!tr->allocated_snapshot) { |
debdd57f | 1320 | /* Only the nop tracer should hit this when disabling */ |
2b6080f2 | 1321 | WARN_ON_ONCE(tr->current_trace != &nop_trace); |
34600f0e | 1322 | return; |
debdd57f | 1323 | } |
34600f0e | 1324 | |
0b9b12c1 | 1325 | arch_spin_lock(&tr->max_lock); |
3928a8a2 | 1326 | |
12883efb SRRH |
1327 | buf = tr->trace_buffer.buffer; |
1328 | tr->trace_buffer.buffer = tr->max_buffer.buffer; | |
1329 | tr->max_buffer.buffer = buf; | |
3928a8a2 | 1330 | |
bc0c38d1 | 1331 | __update_max_tr(tr, tsk, cpu); |
0b9b12c1 | 1332 | arch_spin_unlock(&tr->max_lock); |
bc0c38d1 SR |
1333 | } |
1334 | ||
1335 | /** | |
1336 | * update_max_tr_single - only copy one trace over, and reset the rest | |
1337 | * @tr - tracer | |
1338 | * @tsk - task with the latency | |
1339 | * @cpu - the cpu of the buffer to copy. | |
4fcdae83 SR |
1340 | * |
1341 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. | |
bc0c38d1 | 1342 | */ |
e309b41d | 1343 | void |
bc0c38d1 SR |
1344 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
1345 | { | |
3928a8a2 | 1346 | int ret; |
bc0c38d1 | 1347 | |
2b6080f2 | 1348 | if (tr->stop_count) |
b8de7bd1 SR |
1349 | return; |
1350 | ||
4c11d7ae | 1351 | WARN_ON_ONCE(!irqs_disabled()); |
6c24499f | 1352 | if (!tr->allocated_snapshot) { |
2930e04d | 1353 | /* Only the nop tracer should hit this when disabling */ |
9e8529af | 1354 | WARN_ON_ONCE(tr->current_trace != &nop_trace); |
ef710e10 | 1355 | return; |
2930e04d | 1356 | } |
ef710e10 | 1357 | |
0b9b12c1 | 1358 | arch_spin_lock(&tr->max_lock); |
bc0c38d1 | 1359 | |
12883efb | 1360 | ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); |
3928a8a2 | 1361 | |
e8165dbb SR |
1362 | if (ret == -EBUSY) { |
1363 | /* | |
1364 | * We failed to swap the buffer due to a commit taking | |
1365 | * place on this CPU. We fail to record, but we reset | |
1366 | * the max trace buffer (no one writes directly to it) | |
1367 | * and flag that it failed. | |
1368 | */ | |
12883efb | 1369 | trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, |
e8165dbb SR |
1370 | "Failed to swap buffers due to commit in progress\n"); |
1371 | } | |
1372 | ||
e8165dbb | 1373 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
bc0c38d1 SR |
1374 | |
1375 | __update_max_tr(tr, tsk, cpu); | |
0b9b12c1 | 1376 | arch_spin_unlock(&tr->max_lock); |
bc0c38d1 | 1377 | } |
5d4a9dba | 1378 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
bc0c38d1 | 1379 | |
e30f53aa | 1380 | static int wait_on_pipe(struct trace_iterator *iter, bool full) |
0d5c6e1c | 1381 | { |
15693458 SRRH |
1382 | /* Iterators are static, they should be filled or empty */ |
1383 | if (trace_buffer_iter(iter, iter->cpu_file)) | |
8b8b3683 | 1384 | return 0; |
0d5c6e1c | 1385 | |
e30f53aa RV |
1386 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
1387 | full); | |
0d5c6e1c SR |
1388 | } |
1389 | ||
f4e781c0 SRRH |
1390 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
1391 | static int run_tracer_selftest(struct tracer *type) | |
1392 | { | |
1393 | struct trace_array *tr = &global_trace; | |
1394 | struct tracer *saved_tracer = tr->current_trace; | |
1395 | int ret; | |
0d5c6e1c | 1396 | |
f4e781c0 SRRH |
1397 | if (!type->selftest || tracing_selftest_disabled) |
1398 | return 0; | |
0d5c6e1c SR |
1399 | |
1400 | /* | |
f4e781c0 SRRH |
1401 | * Run a selftest on this tracer. |
1402 | * Here we reset the trace buffer, and set the current | |
1403 | * tracer to be this tracer. The tracer can then run some | |
1404 | * internal tracing to verify that everything is in order. | |
1405 | * If we fail, we do not register this tracer. | |
0d5c6e1c | 1406 | */ |
f4e781c0 | 1407 | tracing_reset_online_cpus(&tr->trace_buffer); |
0d5c6e1c | 1408 | |
f4e781c0 SRRH |
1409 | tr->current_trace = type; |
1410 | ||
1411 | #ifdef CONFIG_TRACER_MAX_TRACE | |
1412 | if (type->use_max_tr) { | |
1413 | /* If we expanded the buffers, make sure the max is expanded too */ | |
1414 | if (ring_buffer_expanded) | |
1415 | ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, | |
1416 | RING_BUFFER_ALL_CPUS); | |
1417 | tr->allocated_snapshot = true; | |
1418 | } | |
1419 | #endif | |
1420 | ||
1421 | /* the test is responsible for initializing and enabling */ | |
1422 | pr_info("Testing tracer %s: ", type->name); | |
1423 | ret = type->selftest(type, tr); | |
1424 | /* the test is responsible for resetting too */ | |
1425 | tr->current_trace = saved_tracer; | |
1426 | if (ret) { | |
1427 | printk(KERN_CONT "FAILED!\n"); | |
1428 | /* Add the warning after printing 'FAILED' */ | |
1429 | WARN_ON(1); | |
1430 | return -1; | |
1431 | } | |
1432 | /* Only reset on passing, to avoid touching corrupted buffers */ | |
1433 | tracing_reset_online_cpus(&tr->trace_buffer); | |
1434 | ||
1435 | #ifdef CONFIG_TRACER_MAX_TRACE | |
1436 | if (type->use_max_tr) { | |
1437 | tr->allocated_snapshot = false; | |
0d5c6e1c | 1438 | |
f4e781c0 SRRH |
1439 | /* Shrink the max buffer again */ |
1440 | if (ring_buffer_expanded) | |
1441 | ring_buffer_resize(tr->max_buffer.buffer, 1, | |
1442 | RING_BUFFER_ALL_CPUS); | |
1443 | } | |
1444 | #endif | |
1445 | ||
1446 | printk(KERN_CONT "PASSED\n"); | |
1447 | return 0; | |
1448 | } | |
1449 | #else | |
1450 | static inline int run_tracer_selftest(struct tracer *type) | |
1451 | { | |
1452 | return 0; | |
0d5c6e1c | 1453 | } |
f4e781c0 | 1454 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
0d5c6e1c | 1455 | |
41d9c0be SRRH |
1456 | static void add_tracer_options(struct trace_array *tr, struct tracer *t); |
1457 | ||
a4d1e688 JW |
1458 | static void __init apply_trace_boot_options(void); |
1459 | ||
4fcdae83 SR |
1460 | /** |
1461 | * register_tracer - register a tracer with the ftrace system. | |
1462 | * @type - the plugin for the tracer | |
1463 | * | |
1464 | * Register a new plugin tracer. | |
1465 | */ | |
a4d1e688 | 1466 | int __init register_tracer(struct tracer *type) |
bc0c38d1 SR |
1467 | { |
1468 | struct tracer *t; | |
bc0c38d1 SR |
1469 | int ret = 0; |
1470 | ||
1471 | if (!type->name) { | |
1472 | pr_info("Tracer must have a name\n"); | |
1473 | return -1; | |
1474 | } | |
1475 | ||
24a461d5 | 1476 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
ee6c2c1b LZ |
1477 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
1478 | return -1; | |
1479 | } | |
1480 | ||
bc0c38d1 | 1481 | mutex_lock(&trace_types_lock); |
86fa2f60 | 1482 | |
8e1b82e0 FW |
1483 | tracing_selftest_running = true; |
1484 | ||
bc0c38d1 SR |
1485 | for (t = trace_types; t; t = t->next) { |
1486 | if (strcmp(type->name, t->name) == 0) { | |
1487 | /* already found */ | |
ee6c2c1b | 1488 | pr_info("Tracer %s already registered\n", |
bc0c38d1 SR |
1489 | type->name); |
1490 | ret = -1; | |
1491 | goto out; | |
1492 | } | |
1493 | } | |
1494 | ||
adf9f195 FW |
1495 | if (!type->set_flag) |
1496 | type->set_flag = &dummy_set_flag; | |
d39cdd20 CH |
1497 | if (!type->flags) { |
1498 | /*allocate a dummy tracer_flags*/ | |
1499 | type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); | |
c8ca003b CH |
1500 | if (!type->flags) { |
1501 | ret = -ENOMEM; | |
1502 | goto out; | |
1503 | } | |
d39cdd20 CH |
1504 | type->flags->val = 0; |
1505 | type->flags->opts = dummy_tracer_opt; | |
1506 | } else | |
adf9f195 FW |
1507 | if (!type->flags->opts) |
1508 | type->flags->opts = dummy_tracer_opt; | |
6eaaa5d5 | 1509 | |
d39cdd20 CH |
1510 | /* store the tracer for __set_tracer_option */ |
1511 | type->flags->trace = type; | |
1512 | ||
f4e781c0 SRRH |
1513 | ret = run_tracer_selftest(type); |
1514 | if (ret < 0) | |
1515 | goto out; | |
60a11774 | 1516 | |
bc0c38d1 SR |
1517 | type->next = trace_types; |
1518 | trace_types = type; | |
41d9c0be | 1519 | add_tracer_options(&global_trace, type); |
60a11774 | 1520 | |
bc0c38d1 | 1521 | out: |
8e1b82e0 | 1522 | tracing_selftest_running = false; |
bc0c38d1 SR |
1523 | mutex_unlock(&trace_types_lock); |
1524 | ||
dac74940 SR |
1525 | if (ret || !default_bootup_tracer) |
1526 | goto out_unlock; | |
1527 | ||
ee6c2c1b | 1528 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
dac74940 SR |
1529 | goto out_unlock; |
1530 | ||
1531 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | |
1532 | /* Do we want this tracer to start on bootup? */ | |
607e2ea1 | 1533 | tracing_set_tracer(&global_trace, type->name); |
dac74940 | 1534 | default_bootup_tracer = NULL; |
a4d1e688 JW |
1535 | |
1536 | apply_trace_boot_options(); | |
1537 | ||
dac74940 | 1538 | /* disable other selftests, since this will break it. */ |
55034cd6 | 1539 | tracing_selftest_disabled = true; |
b2821ae6 | 1540 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
dac74940 SR |
1541 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", |
1542 | type->name); | |
b2821ae6 | 1543 | #endif |
b2821ae6 | 1544 | |
dac74940 | 1545 | out_unlock: |
bc0c38d1 SR |
1546 | return ret; |
1547 | } | |
1548 | ||
12883efb | 1549 | void tracing_reset(struct trace_buffer *buf, int cpu) |
f633903a | 1550 | { |
12883efb | 1551 | struct ring_buffer *buffer = buf->buffer; |
f633903a | 1552 | |
a5416411 HT |
1553 | if (!buffer) |
1554 | return; | |
1555 | ||
f633903a SR |
1556 | ring_buffer_record_disable(buffer); |
1557 | ||
1558 | /* Make sure all commits have finished */ | |
1559 | synchronize_sched(); | |
68179686 | 1560 | ring_buffer_reset_cpu(buffer, cpu); |
f633903a SR |
1561 | |
1562 | ring_buffer_record_enable(buffer); | |
1563 | } | |
1564 | ||
12883efb | 1565 | void tracing_reset_online_cpus(struct trace_buffer *buf) |
213cc060 | 1566 | { |
12883efb | 1567 | struct ring_buffer *buffer = buf->buffer; |
213cc060 PE |
1568 | int cpu; |
1569 | ||
a5416411 HT |
1570 | if (!buffer) |
1571 | return; | |
1572 | ||
621968cd SR |
1573 | ring_buffer_record_disable(buffer); |
1574 | ||
1575 | /* Make sure all commits have finished */ | |
1576 | synchronize_sched(); | |
1577 | ||
9457158b | 1578 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
213cc060 PE |
1579 | |
1580 | for_each_online_cpu(cpu) | |
68179686 | 1581 | ring_buffer_reset_cpu(buffer, cpu); |
621968cd SR |
1582 | |
1583 | ring_buffer_record_enable(buffer); | |
213cc060 PE |
1584 | } |
1585 | ||
09d8091c | 1586 | /* Must have trace_types_lock held */ |
873c642f | 1587 | void tracing_reset_all_online_cpus(void) |
9456f0fa | 1588 | { |
873c642f SRRH |
1589 | struct trace_array *tr; |
1590 | ||
873c642f | 1591 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
12883efb SRRH |
1592 | tracing_reset_online_cpus(&tr->trace_buffer); |
1593 | #ifdef CONFIG_TRACER_MAX_TRACE | |
1594 | tracing_reset_online_cpus(&tr->max_buffer); | |
1595 | #endif | |
873c642f | 1596 | } |
9456f0fa SR |
1597 | } |
1598 | ||
939c7a4f | 1599 | #define SAVED_CMDLINES_DEFAULT 128 |
2c7eea4c | 1600 | #define NO_CMDLINE_MAP UINT_MAX |
edc35bd7 | 1601 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
939c7a4f YY |
1602 | struct saved_cmdlines_buffer { |
1603 | unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |
1604 | unsigned *map_cmdline_to_pid; | |
1605 | unsigned cmdline_num; | |
1606 | int cmdline_idx; | |
1607 | char *saved_cmdlines; | |
1608 | }; | |
1609 | static struct saved_cmdlines_buffer *savedcmd; | |
25b0b44a | 1610 | |
25b0b44a | 1611 | /* temporary disable recording */ |
4fd27358 | 1612 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
bc0c38d1 | 1613 | |
939c7a4f YY |
1614 | static inline char *get_saved_cmdlines(int idx) |
1615 | { | |
1616 | return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; | |
1617 | } | |
1618 | ||
1619 | static inline void set_cmdline(int idx, const char *cmdline) | |
bc0c38d1 | 1620 | { |
939c7a4f YY |
1621 | memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); |
1622 | } | |
1623 | ||
1624 | static int allocate_cmdlines_buffer(unsigned int val, | |
1625 | struct saved_cmdlines_buffer *s) | |
1626 | { | |
1627 | s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid), | |
1628 | GFP_KERNEL); | |
1629 | if (!s->map_cmdline_to_pid) | |
1630 | return -ENOMEM; | |
1631 | ||
1632 | s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL); | |
1633 | if (!s->saved_cmdlines) { | |
1634 | kfree(s->map_cmdline_to_pid); | |
1635 | return -ENOMEM; | |
1636 | } | |
1637 | ||
1638 | s->cmdline_idx = 0; | |
1639 | s->cmdline_num = val; | |
1640 | memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, | |
1641 | sizeof(s->map_pid_to_cmdline)); | |
1642 | memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, | |
1643 | val * sizeof(*s->map_cmdline_to_pid)); | |
1644 | ||
1645 | return 0; | |
1646 | } | |
1647 | ||
1648 | static int trace_create_savedcmd(void) | |
1649 | { | |
1650 | int ret; | |
1651 | ||
a6af8fbf | 1652 | savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); |
939c7a4f YY |
1653 | if (!savedcmd) |
1654 | return -ENOMEM; | |
1655 | ||
1656 | ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); | |
1657 | if (ret < 0) { | |
1658 | kfree(savedcmd); | |
1659 | savedcmd = NULL; | |
1660 | return -ENOMEM; | |
1661 | } | |
1662 | ||
1663 | return 0; | |
bc0c38d1 SR |
1664 | } |
1665 | ||
b5130b1e CE |
1666 | int is_tracing_stopped(void) |
1667 | { | |
2b6080f2 | 1668 | return global_trace.stop_count; |
b5130b1e CE |
1669 | } |
1670 | ||
0f048701 SR |
1671 | /** |
1672 | * tracing_start - quick start of the tracer | |
1673 | * | |
1674 | * If tracing is enabled but was stopped by tracing_stop, | |
1675 | * this will start the tracer back up. | |
1676 | */ | |
1677 | void tracing_start(void) | |
1678 | { | |
1679 | struct ring_buffer *buffer; | |
1680 | unsigned long flags; | |
1681 | ||
1682 | if (tracing_disabled) | |
1683 | return; | |
1684 | ||
2b6080f2 SR |
1685 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
1686 | if (--global_trace.stop_count) { | |
1687 | if (global_trace.stop_count < 0) { | |
b06a8301 SR |
1688 | /* Someone screwed up their debugging */ |
1689 | WARN_ON_ONCE(1); | |
2b6080f2 | 1690 | global_trace.stop_count = 0; |
b06a8301 | 1691 | } |
0f048701 SR |
1692 | goto out; |
1693 | } | |
1694 | ||
a2f80714 | 1695 | /* Prevent the buffers from switching */ |
0b9b12c1 | 1696 | arch_spin_lock(&global_trace.max_lock); |
0f048701 | 1697 | |
12883efb | 1698 | buffer = global_trace.trace_buffer.buffer; |
0f048701 SR |
1699 | if (buffer) |
1700 | ring_buffer_record_enable(buffer); | |
1701 | ||
12883efb SRRH |
1702 | #ifdef CONFIG_TRACER_MAX_TRACE |
1703 | buffer = global_trace.max_buffer.buffer; | |
0f048701 SR |
1704 | if (buffer) |
1705 | ring_buffer_record_enable(buffer); | |
12883efb | 1706 | #endif |
0f048701 | 1707 | |
0b9b12c1 | 1708 | arch_spin_unlock(&global_trace.max_lock); |
a2f80714 | 1709 | |
0f048701 | 1710 | out: |
2b6080f2 SR |
1711 | raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); |
1712 | } | |
1713 | ||
1714 | static void tracing_start_tr(struct trace_array *tr) | |
1715 | { | |
1716 | struct ring_buffer *buffer; | |
1717 | unsigned long flags; | |
1718 | ||
1719 | if (tracing_disabled) | |
1720 | return; | |
1721 | ||
1722 | /* If global, we need to also start the max tracer */ | |
1723 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
1724 | return tracing_start(); | |
1725 | ||
1726 | raw_spin_lock_irqsave(&tr->start_lock, flags); | |
1727 | ||
1728 | if (--tr->stop_count) { | |
1729 | if (tr->stop_count < 0) { | |
1730 | /* Someone screwed up their debugging */ | |
1731 | WARN_ON_ONCE(1); | |
1732 | tr->stop_count = 0; | |
1733 | } | |
1734 | goto out; | |
1735 | } | |
1736 | ||
12883efb | 1737 | buffer = tr->trace_buffer.buffer; |
2b6080f2 SR |
1738 | if (buffer) |
1739 | ring_buffer_record_enable(buffer); | |
1740 | ||
1741 | out: | |
1742 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); | |
0f048701 SR |
1743 | } |
1744 | ||
1745 | /** | |
1746 | * tracing_stop - quick stop of the tracer | |
1747 | * | |
1748 | * Light weight way to stop tracing. Use in conjunction with | |
1749 | * tracing_start. | |
1750 | */ | |
1751 | void tracing_stop(void) | |
1752 | { | |
1753 | struct ring_buffer *buffer; | |
1754 | unsigned long flags; | |
1755 | ||
2b6080f2 SR |
1756 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
1757 | if (global_trace.stop_count++) | |
0f048701 SR |
1758 | goto out; |
1759 | ||
a2f80714 | 1760 | /* Prevent the buffers from switching */ |
0b9b12c1 | 1761 | arch_spin_lock(&global_trace.max_lock); |
a2f80714 | 1762 | |
12883efb | 1763 | buffer = global_trace.trace_buffer.buffer; |
0f048701 SR |
1764 | if (buffer) |
1765 | ring_buffer_record_disable(buffer); | |
1766 | ||
12883efb SRRH |
1767 | #ifdef CONFIG_TRACER_MAX_TRACE |
1768 | buffer = global_trace.max_buffer.buffer; | |
0f048701 SR |
1769 | if (buffer) |
1770 | ring_buffer_record_disable(buffer); | |
12883efb | 1771 | #endif |
0f048701 | 1772 | |
0b9b12c1 | 1773 | arch_spin_unlock(&global_trace.max_lock); |
a2f80714 | 1774 | |
0f048701 | 1775 | out: |
2b6080f2 SR |
1776 | raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); |
1777 | } | |
1778 | ||
1779 | static void tracing_stop_tr(struct trace_array *tr) | |
1780 | { | |
1781 | struct ring_buffer *buffer; | |
1782 | unsigned long flags; | |
1783 | ||
1784 | /* If global, we need to also stop the max tracer */ | |
1785 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
1786 | return tracing_stop(); | |
1787 | ||
1788 | raw_spin_lock_irqsave(&tr->start_lock, flags); | |
1789 | if (tr->stop_count++) | |
1790 | goto out; | |
1791 | ||
12883efb | 1792 | buffer = tr->trace_buffer.buffer; |
2b6080f2 SR |
1793 | if (buffer) |
1794 | ring_buffer_record_disable(buffer); | |
1795 | ||
1796 | out: | |
1797 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); | |
0f048701 SR |
1798 | } |
1799 | ||
e309b41d | 1800 | void trace_stop_cmdline_recording(void); |
bc0c38d1 | 1801 | |
379cfdac | 1802 | static int trace_save_cmdline(struct task_struct *tsk) |
bc0c38d1 | 1803 | { |
a635cf04 | 1804 | unsigned pid, idx; |
bc0c38d1 SR |
1805 | |
1806 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | |
379cfdac | 1807 | return 0; |
bc0c38d1 SR |
1808 | |
1809 | /* | |
1810 | * It's not the end of the world if we don't get | |
1811 | * the lock, but we also don't want to spin | |
1812 | * nor do we want to disable interrupts, | |
1813 | * so if we miss here, then better luck next time. | |
1814 | */ | |
0199c4e6 | 1815 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
379cfdac | 1816 | return 0; |
bc0c38d1 | 1817 | |
939c7a4f | 1818 | idx = savedcmd->map_pid_to_cmdline[tsk->pid]; |
2c7eea4c | 1819 | if (idx == NO_CMDLINE_MAP) { |
939c7a4f | 1820 | idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; |
bc0c38d1 | 1821 | |
a635cf04 CE |
1822 | /* |
1823 | * Check whether the cmdline buffer at idx has a pid | |
1824 | * mapped. We are going to overwrite that entry so we | |
1825 | * need to clear the map_pid_to_cmdline. Otherwise we | |
1826 | * would read the new comm for the old pid. | |
1827 | */ | |
939c7a4f | 1828 | pid = savedcmd->map_cmdline_to_pid[idx]; |
a635cf04 | 1829 | if (pid != NO_CMDLINE_MAP) |
939c7a4f | 1830 | savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; |
bc0c38d1 | 1831 | |
939c7a4f YY |
1832 | savedcmd->map_cmdline_to_pid[idx] = tsk->pid; |
1833 | savedcmd->map_pid_to_cmdline[tsk->pid] = idx; | |
bc0c38d1 | 1834 | |
939c7a4f | 1835 | savedcmd->cmdline_idx = idx; |
bc0c38d1 SR |
1836 | } |
1837 | ||
939c7a4f | 1838 | set_cmdline(idx, tsk->comm); |
bc0c38d1 | 1839 | |
0199c4e6 | 1840 | arch_spin_unlock(&trace_cmdline_lock); |
379cfdac SRRH |
1841 | |
1842 | return 1; | |
bc0c38d1 SR |
1843 | } |
1844 | ||
4c27e756 | 1845 | static void __trace_find_cmdline(int pid, char comm[]) |
bc0c38d1 | 1846 | { |
bc0c38d1 SR |
1847 | unsigned map; |
1848 | ||
4ca53085 SR |
1849 | if (!pid) { |
1850 | strcpy(comm, "<idle>"); | |
1851 | return; | |
1852 | } | |
bc0c38d1 | 1853 | |
74bf4076 SR |
1854 | if (WARN_ON_ONCE(pid < 0)) { |
1855 | strcpy(comm, "<XXX>"); | |
1856 | return; | |
1857 | } | |
1858 | ||
4ca53085 SR |
1859 | if (pid > PID_MAX_DEFAULT) { |
1860 | strcpy(comm, "<...>"); | |
1861 | return; | |
1862 | } | |
bc0c38d1 | 1863 | |
939c7a4f | 1864 | map = savedcmd->map_pid_to_cmdline[pid]; |
50d88758 | 1865 | if (map != NO_CMDLINE_MAP) |
939c7a4f | 1866 | strcpy(comm, get_saved_cmdlines(map)); |
50d88758 TG |
1867 | else |
1868 | strcpy(comm, "<...>"); | |
4c27e756 SRRH |
1869 | } |
1870 | ||
1871 | void trace_find_cmdline(int pid, char comm[]) | |
1872 | { | |
1873 | preempt_disable(); | |
1874 | arch_spin_lock(&trace_cmdline_lock); | |
1875 | ||
1876 | __trace_find_cmdline(pid, comm); | |
bc0c38d1 | 1877 | |
0199c4e6 | 1878 | arch_spin_unlock(&trace_cmdline_lock); |
5b6045a9 | 1879 | preempt_enable(); |
bc0c38d1 SR |
1880 | } |
1881 | ||
e309b41d | 1882 | void tracing_record_cmdline(struct task_struct *tsk) |
bc0c38d1 | 1883 | { |
0fb9656d | 1884 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) |
bc0c38d1 SR |
1885 | return; |
1886 | ||
7ffbd48d SR |
1887 | if (!__this_cpu_read(trace_cmdline_save)) |
1888 | return; | |
1889 | ||
379cfdac SRRH |
1890 | if (trace_save_cmdline(tsk)) |
1891 | __this_cpu_write(trace_cmdline_save, false); | |
bc0c38d1 SR |
1892 | } |
1893 | ||
45dcd8b8 | 1894 | void |
38697053 SR |
1895 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
1896 | int pc) | |
bc0c38d1 SR |
1897 | { |
1898 | struct task_struct *tsk = current; | |
bc0c38d1 | 1899 | |
777e208d SR |
1900 | entry->preempt_count = pc & 0xff; |
1901 | entry->pid = (tsk) ? tsk->pid : 0; | |
1902 | entry->flags = | |
9244489a | 1903 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
2e2ca155 | 1904 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
9244489a SR |
1905 | #else |
1906 | TRACE_FLAG_IRQS_NOSUPPORT | | |
1907 | #endif | |
7e6867bf | 1908 | ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | |
bc0c38d1 SR |
1909 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
1910 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | |
e5137b50 PZ |
1911 | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | |
1912 | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); | |
bc0c38d1 | 1913 | } |
f413cdb8 | 1914 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
bc0c38d1 | 1915 | |
0fc1b09f SRRH |
1916 | static __always_inline void |
1917 | trace_event_setup(struct ring_buffer_event *event, | |
1918 | int type, unsigned long flags, int pc) | |
1919 | { | |
1920 | struct trace_entry *ent = ring_buffer_event_data(event); | |
1921 | ||
1922 | tracing_generic_entry_update(ent, flags, pc); | |
1923 | ent->type = type; | |
1924 | } | |
1925 | ||
e77405ad SR |
1926 | struct ring_buffer_event * |
1927 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | |
1928 | int type, | |
1929 | unsigned long len, | |
1930 | unsigned long flags, int pc) | |
51a763dd ACM |
1931 | { |
1932 | struct ring_buffer_event *event; | |
1933 | ||
e77405ad | 1934 | event = ring_buffer_lock_reserve(buffer, len); |
0fc1b09f SRRH |
1935 | if (event != NULL) |
1936 | trace_event_setup(event, type, flags, pc); | |
1937 | ||
1938 | return event; | |
1939 | } | |
1940 | ||
1941 | DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); | |
1942 | DEFINE_PER_CPU(int, trace_buffered_event_cnt); | |
1943 | static int trace_buffered_event_ref; | |
1944 | ||
1945 | /** | |
1946 | * trace_buffered_event_enable - enable buffering events | |
1947 | * | |
1948 | * When events are being filtered, it is quicker to use a temporary | |
1949 | * buffer to write the event data into if there's a likely chance | |
1950 | * that it will not be committed. The discard of the ring buffer | |
1951 | * is not as fast as committing, and is much slower than copying | |
1952 | * a commit. | |
1953 | * | |
1954 | * When an event is to be filtered, allocate per cpu buffers to | |
1955 | * write the event data into, and if the event is filtered and discarded | |
1956 | * it is simply dropped, otherwise, the entire data is to be committed | |
1957 | * in one shot. | |
1958 | */ | |
1959 | void trace_buffered_event_enable(void) | |
1960 | { | |
1961 | struct ring_buffer_event *event; | |
1962 | struct page *page; | |
1963 | int cpu; | |
51a763dd | 1964 | |
0fc1b09f SRRH |
1965 | WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); |
1966 | ||
1967 | if (trace_buffered_event_ref++) | |
1968 | return; | |
1969 | ||
1970 | for_each_tracing_cpu(cpu) { | |
1971 | page = alloc_pages_node(cpu_to_node(cpu), | |
1972 | GFP_KERNEL | __GFP_NORETRY, 0); | |
1973 | if (!page) | |
1974 | goto failed; | |
1975 | ||
1976 | event = page_address(page); | |
1977 | memset(event, 0, sizeof(*event)); | |
1978 | ||
1979 | per_cpu(trace_buffered_event, cpu) = event; | |
1980 | ||
1981 | preempt_disable(); | |
1982 | if (cpu == smp_processor_id() && | |
1983 | this_cpu_read(trace_buffered_event) != | |
1984 | per_cpu(trace_buffered_event, cpu)) | |
1985 | WARN_ON_ONCE(1); | |
1986 | preempt_enable(); | |
51a763dd ACM |
1987 | } |
1988 | ||
0fc1b09f SRRH |
1989 | return; |
1990 | failed: | |
1991 | trace_buffered_event_disable(); | |
1992 | } | |
1993 | ||
1994 | static void enable_trace_buffered_event(void *data) | |
1995 | { | |
1996 | /* Probably not needed, but do it anyway */ | |
1997 | smp_rmb(); | |
1998 | this_cpu_dec(trace_buffered_event_cnt); | |
1999 | } | |
2000 | ||
2001 | static void disable_trace_buffered_event(void *data) | |
2002 | { | |
2003 | this_cpu_inc(trace_buffered_event_cnt); | |
2004 | } | |
2005 | ||
2006 | /** | |
2007 | * trace_buffered_event_disable - disable buffering events | |
2008 | * | |
2009 | * When a filter is removed, it is faster to not use the buffered | |
2010 | * events, and to commit directly into the ring buffer. Free up | |
2011 | * the temp buffers when there are no more users. This requires | |
2012 | * special synchronization with current events. | |
2013 | */ | |
2014 | void trace_buffered_event_disable(void) | |
2015 | { | |
2016 | int cpu; | |
2017 | ||
2018 | WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); | |
2019 | ||
2020 | if (WARN_ON_ONCE(!trace_buffered_event_ref)) | |
2021 | return; | |
2022 | ||
2023 | if (--trace_buffered_event_ref) | |
2024 | return; | |
2025 | ||
2026 | preempt_disable(); | |
2027 | /* For each CPU, set the buffer as used. */ | |
2028 | smp_call_function_many(tracing_buffer_mask, | |
2029 | disable_trace_buffered_event, NULL, 1); | |
2030 | preempt_enable(); | |
2031 | ||
2032 | /* Wait for all current users to finish */ | |
2033 | synchronize_sched(); | |
2034 | ||
2035 | for_each_tracing_cpu(cpu) { | |
2036 | free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); | |
2037 | per_cpu(trace_buffered_event, cpu) = NULL; | |
2038 | } | |
2039 | /* | |
2040 | * Make sure trace_buffered_event is NULL before clearing | |
2041 | * trace_buffered_event_cnt. | |
2042 | */ | |
2043 | smp_wmb(); | |
2044 | ||
2045 | preempt_disable(); | |
2046 | /* Do the work on each cpu */ | |
2047 | smp_call_function_many(tracing_buffer_mask, | |
2048 | enable_trace_buffered_event, NULL, 1); | |
2049 | preempt_enable(); | |
51a763dd | 2050 | } |
51a763dd | 2051 | |
7ffbd48d SR |
2052 | void |
2053 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | |
2054 | { | |
2055 | __this_cpu_write(trace_cmdline_save, true); | |
0fc1b09f SRRH |
2056 | |
2057 | /* If this is the temp buffer, we need to commit fully */ | |
2058 | if (this_cpu_read(trace_buffered_event) == event) { | |
2059 | /* Length is in event->array[0] */ | |
2060 | ring_buffer_write(buffer, event->array[0], &event->array[1]); | |
2061 | /* Release the temp buffer */ | |
2062 | this_cpu_dec(trace_buffered_event_cnt); | |
2063 | } else | |
2064 | ring_buffer_unlock_commit(buffer, event); | |
7ffbd48d SR |
2065 | } |
2066 | ||
2c4a33ab SRRH |
2067 | static struct ring_buffer *temp_buffer; |
2068 | ||
ccb469a1 SR |
2069 | struct ring_buffer_event * |
2070 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | |
7f1d2f82 | 2071 | struct trace_event_file *trace_file, |
ccb469a1 SR |
2072 | int type, unsigned long len, |
2073 | unsigned long flags, int pc) | |
2074 | { | |
2c4a33ab | 2075 | struct ring_buffer_event *entry; |
0fc1b09f | 2076 | int val; |
2c4a33ab | 2077 | |
7f1d2f82 | 2078 | *current_rb = trace_file->tr->trace_buffer.buffer; |
0fc1b09f SRRH |
2079 | |
2080 | if ((trace_file->flags & | |
2081 | (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && | |
2082 | (entry = this_cpu_read(trace_buffered_event))) { | |
2083 | /* Try to use the per cpu buffer first */ | |
2084 | val = this_cpu_inc_return(trace_buffered_event_cnt); | |
2085 | if (val == 1) { | |
2086 | trace_event_setup(entry, type, flags, pc); | |
2087 | entry->array[0] = len; | |
2088 | return entry; | |
2089 | } | |
2090 | this_cpu_dec(trace_buffered_event_cnt); | |
2091 | } | |
2092 | ||
2c4a33ab | 2093 | entry = trace_buffer_lock_reserve(*current_rb, |
ccb469a1 | 2094 | type, len, flags, pc); |
2c4a33ab SRRH |
2095 | /* |
2096 | * If tracing is off, but we have triggers enabled | |
2097 | * we still need to look at the event data. Use the temp_buffer | |
2098 | * to store the trace event for the tigger to use. It's recusive | |
2099 | * safe and will not be recorded anywhere. | |
2100 | */ | |
5d6ad960 | 2101 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
2c4a33ab SRRH |
2102 | *current_rb = temp_buffer; |
2103 | entry = trace_buffer_lock_reserve(*current_rb, | |
2104 | type, len, flags, pc); | |
2105 | } | |
2106 | return entry; | |
ccb469a1 SR |
2107 | } |
2108 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | |
2109 | ||
b7f0c959 SRRH |
2110 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
2111 | struct ring_buffer *buffer, | |
0d5c6e1c SR |
2112 | struct ring_buffer_event *event, |
2113 | unsigned long flags, int pc, | |
2114 | struct pt_regs *regs) | |
1fd8df2c | 2115 | { |
7ffbd48d | 2116 | __buffer_unlock_commit(buffer, event); |
1fd8df2c | 2117 | |
be54f69c SRRH |
2118 | /* |
2119 | * If regs is not set, then skip the following callers: | |
2120 | * trace_buffer_unlock_commit_regs | |
2121 | * event_trigger_unlock_commit | |
2122 | * trace_event_buffer_commit | |
2123 | * trace_event_raw_event_sched_switch | |
2124 | * Note, we can still get here via blktrace, wakeup tracer | |
2125 | * and mmiotrace, but that's ok if they lose a function or | |
2126 | * two. They are that meaningful. | |
2127 | */ | |
2128 | ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); | |
1fd8df2c MH |
2129 | ftrace_trace_userstack(buffer, flags, pc); |
2130 | } | |
1fd8df2c | 2131 | |
478409dd CZ |
2132 | static void |
2133 | trace_process_export(struct trace_export *export, | |
2134 | struct ring_buffer_event *event) | |
2135 | { | |
2136 | struct trace_entry *entry; | |
2137 | unsigned int size = 0; | |
2138 | ||
2139 | entry = ring_buffer_event_data(event); | |
2140 | size = ring_buffer_event_length(event); | |
2141 | export->write(entry, size); | |
2142 | } | |
2143 | ||
2144 | static DEFINE_MUTEX(ftrace_export_lock); | |
2145 | ||
2146 | static struct trace_export __rcu *ftrace_exports_list __read_mostly; | |
2147 | ||
2148 | static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); | |
2149 | ||
2150 | static inline void ftrace_exports_enable(void) | |
2151 | { | |
2152 | static_branch_enable(&ftrace_exports_enabled); | |
2153 | } | |
2154 | ||
2155 | static inline void ftrace_exports_disable(void) | |
2156 | { | |
2157 | static_branch_disable(&ftrace_exports_enabled); | |
2158 | } | |
2159 | ||
2160 | void ftrace_exports(struct ring_buffer_event *event) | |
2161 | { | |
2162 | struct trace_export *export; | |
2163 | ||
2164 | preempt_disable_notrace(); | |
2165 | ||
2166 | export = rcu_dereference_raw_notrace(ftrace_exports_list); | |
2167 | while (export) { | |
2168 | trace_process_export(export, event); | |
2169 | export = rcu_dereference_raw_notrace(export->next); | |
2170 | } | |
2171 | ||
2172 | preempt_enable_notrace(); | |
2173 | } | |
2174 | ||
2175 | static inline void | |
2176 | add_trace_export(struct trace_export **list, struct trace_export *export) | |
2177 | { | |
2178 | rcu_assign_pointer(export->next, *list); | |
2179 | /* | |
2180 | * We are entering export into the list but another | |
2181 | * CPU might be walking that list. We need to make sure | |
2182 | * the export->next pointer is valid before another CPU sees | |
2183 | * the export pointer included into the list. | |
2184 | */ | |
2185 | rcu_assign_pointer(*list, export); | |
2186 | } | |
2187 | ||
2188 | static inline int | |
2189 | rm_trace_export(struct trace_export **list, struct trace_export *export) | |
2190 | { | |
2191 | struct trace_export **p; | |
2192 | ||
2193 | for (p = list; *p != NULL; p = &(*p)->next) | |
2194 | if (*p == export) | |
2195 | break; | |
2196 | ||
2197 | if (*p != export) | |
2198 | return -1; | |
2199 | ||
2200 | rcu_assign_pointer(*p, (*p)->next); | |
2201 | ||
2202 | return 0; | |
2203 | } | |
2204 | ||
2205 | static inline void | |
2206 | add_ftrace_export(struct trace_export **list, struct trace_export *export) | |
2207 | { | |
2208 | if (*list == NULL) | |
2209 | ftrace_exports_enable(); | |
2210 | ||
2211 | add_trace_export(list, export); | |
2212 | } | |
2213 | ||
2214 | static inline int | |
2215 | rm_ftrace_export(struct trace_export **list, struct trace_export *export) | |
2216 | { | |
2217 | int ret; | |
2218 | ||
2219 | ret = rm_trace_export(list, export); | |
2220 | if (*list == NULL) | |
2221 | ftrace_exports_disable(); | |
2222 | ||
2223 | return ret; | |
2224 | } | |
2225 | ||
2226 | int register_ftrace_export(struct trace_export *export) | |
2227 | { | |
2228 | if (WARN_ON_ONCE(!export->write)) | |
2229 | return -1; | |
2230 | ||
2231 | mutex_lock(&ftrace_export_lock); | |
2232 | ||
2233 | add_ftrace_export(&ftrace_exports_list, export); | |
2234 | ||
2235 | mutex_unlock(&ftrace_export_lock); | |
2236 | ||
2237 | return 0; | |
2238 | } | |
2239 | EXPORT_SYMBOL_GPL(register_ftrace_export); | |
2240 | ||
2241 | int unregister_ftrace_export(struct trace_export *export) | |
2242 | { | |
2243 | int ret; | |
2244 | ||
2245 | mutex_lock(&ftrace_export_lock); | |
2246 | ||
2247 | ret = rm_ftrace_export(&ftrace_exports_list, export); | |
2248 | ||
2249 | mutex_unlock(&ftrace_export_lock); | |
2250 | ||
2251 | return ret; | |
2252 | } | |
2253 | EXPORT_SYMBOL_GPL(unregister_ftrace_export); | |
2254 | ||
e309b41d | 2255 | void |
7be42151 | 2256 | trace_function(struct trace_array *tr, |
38697053 SR |
2257 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
2258 | int pc) | |
bc0c38d1 | 2259 | { |
2425bcb9 | 2260 | struct trace_event_call *call = &event_function; |
12883efb | 2261 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
3928a8a2 | 2262 | struct ring_buffer_event *event; |
777e208d | 2263 | struct ftrace_entry *entry; |
bc0c38d1 | 2264 | |
e77405ad | 2265 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
51a763dd | 2266 | flags, pc); |
3928a8a2 SR |
2267 | if (!event) |
2268 | return; | |
2269 | entry = ring_buffer_event_data(event); | |
777e208d SR |
2270 | entry->ip = ip; |
2271 | entry->parent_ip = parent_ip; | |
e1112b4d | 2272 | |
478409dd CZ |
2273 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
2274 | if (static_branch_unlikely(&ftrace_exports_enabled)) | |
2275 | ftrace_exports(event); | |
7ffbd48d | 2276 | __buffer_unlock_commit(buffer, event); |
478409dd | 2277 | } |
bc0c38d1 SR |
2278 | } |
2279 | ||
c0a0d0d3 | 2280 | #ifdef CONFIG_STACKTRACE |
4a9bd3f1 SR |
2281 | |
2282 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | |
2283 | struct ftrace_stack { | |
2284 | unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; | |
2285 | }; | |
2286 | ||
2287 | static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); | |
2288 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); | |
2289 | ||
e77405ad | 2290 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
53614991 | 2291 | unsigned long flags, |
1fd8df2c | 2292 | int skip, int pc, struct pt_regs *regs) |
86387f7e | 2293 | { |
2425bcb9 | 2294 | struct trace_event_call *call = &event_kernel_stack; |
3928a8a2 | 2295 | struct ring_buffer_event *event; |
777e208d | 2296 | struct stack_entry *entry; |
86387f7e | 2297 | struct stack_trace trace; |
4a9bd3f1 SR |
2298 | int use_stack; |
2299 | int size = FTRACE_STACK_ENTRIES; | |
2300 | ||
2301 | trace.nr_entries = 0; | |
2302 | trace.skip = skip; | |
2303 | ||
be54f69c SRRH |
2304 | /* |
2305 | * Add two, for this function and the call to save_stack_trace() | |
2306 | * If regs is set, then these functions will not be in the way. | |
2307 | */ | |
2308 | if (!regs) | |
2309 | trace.skip += 2; | |
2310 | ||
4a9bd3f1 SR |
2311 | /* |
2312 | * Since events can happen in NMIs there's no safe way to | |
2313 | * use the per cpu ftrace_stacks. We reserve it and if an interrupt | |
2314 | * or NMI comes in, it will just have to use the default | |
2315 | * FTRACE_STACK_SIZE. | |
2316 | */ | |
2317 | preempt_disable_notrace(); | |
2318 | ||
82146529 | 2319 | use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
4a9bd3f1 SR |
2320 | /* |
2321 | * We don't need any atomic variables, just a barrier. | |
2322 | * If an interrupt comes in, we don't care, because it would | |
2323 | * have exited and put the counter back to what we want. | |
2324 | * We just need a barrier to keep gcc from moving things | |
2325 | * around. | |
2326 | */ | |
2327 | barrier(); | |
2328 | if (use_stack == 1) { | |
bdffd893 | 2329 | trace.entries = this_cpu_ptr(ftrace_stack.calls); |
4a9bd3f1 SR |
2330 | trace.max_entries = FTRACE_STACK_MAX_ENTRIES; |
2331 | ||
2332 | if (regs) | |
2333 | save_stack_trace_regs(regs, &trace); | |
2334 | else | |
2335 | save_stack_trace(&trace); | |
2336 | ||
2337 | if (trace.nr_entries > size) | |
2338 | size = trace.nr_entries; | |
2339 | } else | |
2340 | /* From now on, use_stack is a boolean */ | |
2341 | use_stack = 0; | |
2342 | ||
2343 | size *= sizeof(unsigned long); | |
86387f7e | 2344 | |
e77405ad | 2345 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
4a9bd3f1 | 2346 | sizeof(*entry) + size, flags, pc); |
3928a8a2 | 2347 | if (!event) |
4a9bd3f1 SR |
2348 | goto out; |
2349 | entry = ring_buffer_event_data(event); | |
86387f7e | 2350 | |
4a9bd3f1 SR |
2351 | memset(&entry->caller, 0, size); |
2352 | ||
2353 | if (use_stack) | |
2354 | memcpy(&entry->caller, trace.entries, | |
2355 | trace.nr_entries * sizeof(unsigned long)); | |
2356 | else { | |
2357 | trace.max_entries = FTRACE_STACK_ENTRIES; | |
2358 | trace.entries = entry->caller; | |
2359 | if (regs) | |
2360 | save_stack_trace_regs(regs, &trace); | |
2361 | else | |
2362 | save_stack_trace(&trace); | |
2363 | } | |
2364 | ||
2365 | entry->size = trace.nr_entries; | |
86387f7e | 2366 | |
f306cc82 | 2367 | if (!call_filter_check_discard(call, entry, buffer, event)) |
7ffbd48d | 2368 | __buffer_unlock_commit(buffer, event); |
4a9bd3f1 SR |
2369 | |
2370 | out: | |
2371 | /* Again, don't let gcc optimize things here */ | |
2372 | barrier(); | |
82146529 | 2373 | __this_cpu_dec(ftrace_stack_reserve); |
4a9bd3f1 SR |
2374 | preempt_enable_notrace(); |
2375 | ||
f0a920d5 IM |
2376 | } |
2377 | ||
2d34f489 SRRH |
2378 | static inline void ftrace_trace_stack(struct trace_array *tr, |
2379 | struct ring_buffer *buffer, | |
73dddbb5 SRRH |
2380 | unsigned long flags, |
2381 | int skip, int pc, struct pt_regs *regs) | |
53614991 | 2382 | { |
2d34f489 | 2383 | if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) |
53614991 SR |
2384 | return; |
2385 | ||
73dddbb5 | 2386 | __ftrace_trace_stack(buffer, flags, skip, pc, regs); |
53614991 SR |
2387 | } |
2388 | ||
c0a0d0d3 FW |
2389 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
2390 | int pc) | |
38697053 | 2391 | { |
12883efb | 2392 | __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); |
38697053 SR |
2393 | } |
2394 | ||
03889384 SR |
2395 | /** |
2396 | * trace_dump_stack - record a stack back trace in the trace buffer | |
c142be8e | 2397 | * @skip: Number of functions to skip (helper handlers) |
03889384 | 2398 | */ |
c142be8e | 2399 | void trace_dump_stack(int skip) |
03889384 SR |
2400 | { |
2401 | unsigned long flags; | |
2402 | ||
2403 | if (tracing_disabled || tracing_selftest_running) | |
e36c5458 | 2404 | return; |
03889384 SR |
2405 | |
2406 | local_save_flags(flags); | |
2407 | ||
c142be8e SRRH |
2408 | /* |
2409 | * Skip 3 more, seems to get us at the caller of | |
2410 | * this function. | |
2411 | */ | |
2412 | skip += 3; | |
2413 | __ftrace_trace_stack(global_trace.trace_buffer.buffer, | |
2414 | flags, skip, preempt_count(), NULL); | |
03889384 SR |
2415 | } |
2416 | ||
91e86e56 SR |
2417 | static DEFINE_PER_CPU(int, user_stack_count); |
2418 | ||
e77405ad SR |
2419 | void |
2420 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |
02b67518 | 2421 | { |
2425bcb9 | 2422 | struct trace_event_call *call = &event_user_stack; |
8d7c6a96 | 2423 | struct ring_buffer_event *event; |
02b67518 TE |
2424 | struct userstack_entry *entry; |
2425 | struct stack_trace trace; | |
02b67518 | 2426 | |
983f938a | 2427 | if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) |
02b67518 TE |
2428 | return; |
2429 | ||
b6345879 SR |
2430 | /* |
2431 | * NMIs can not handle page faults, even with fix ups. | |
2432 | * The save user stack can (and often does) fault. | |
2433 | */ | |
2434 | if (unlikely(in_nmi())) | |
2435 | return; | |
02b67518 | 2436 | |
91e86e56 SR |
2437 | /* |
2438 | * prevent recursion, since the user stack tracing may | |
2439 | * trigger other kernel events. | |
2440 | */ | |
2441 | preempt_disable(); | |
2442 | if (__this_cpu_read(user_stack_count)) | |
2443 | goto out; | |
2444 | ||
2445 | __this_cpu_inc(user_stack_count); | |
2446 | ||
e77405ad | 2447 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
51a763dd | 2448 | sizeof(*entry), flags, pc); |
02b67518 | 2449 | if (!event) |
1dbd1951 | 2450 | goto out_drop_count; |
02b67518 | 2451 | entry = ring_buffer_event_data(event); |
02b67518 | 2452 | |
48659d31 | 2453 | entry->tgid = current->tgid; |
02b67518 TE |
2454 | memset(&entry->caller, 0, sizeof(entry->caller)); |
2455 | ||
2456 | trace.nr_entries = 0; | |
2457 | trace.max_entries = FTRACE_STACK_ENTRIES; | |
2458 | trace.skip = 0; | |
2459 | trace.entries = entry->caller; | |
2460 | ||
2461 | save_stack_trace_user(&trace); | |
f306cc82 | 2462 | if (!call_filter_check_discard(call, entry, buffer, event)) |
7ffbd48d | 2463 | __buffer_unlock_commit(buffer, event); |
91e86e56 | 2464 | |
1dbd1951 | 2465 | out_drop_count: |
91e86e56 | 2466 | __this_cpu_dec(user_stack_count); |
91e86e56 SR |
2467 | out: |
2468 | preempt_enable(); | |
02b67518 TE |
2469 | } |
2470 | ||
4fd27358 HE |
2471 | #ifdef UNUSED |
2472 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |
02b67518 | 2473 | { |
7be42151 | 2474 | ftrace_trace_userstack(tr, flags, preempt_count()); |
02b67518 | 2475 | } |
4fd27358 | 2476 | #endif /* UNUSED */ |
02b67518 | 2477 | |
c0a0d0d3 FW |
2478 | #endif /* CONFIG_STACKTRACE */ |
2479 | ||
07d777fe SR |
2480 | /* created for use with alloc_percpu */ |
2481 | struct trace_buffer_struct { | |
e2ace001 AL |
2482 | int nesting; |
2483 | char buffer[4][TRACE_BUF_SIZE]; | |
07d777fe SR |
2484 | }; |
2485 | ||
2486 | static struct trace_buffer_struct *trace_percpu_buffer; | |
07d777fe SR |
2487 | |
2488 | /* | |
e2ace001 AL |
2489 | * Thise allows for lockless recording. If we're nested too deeply, then |
2490 | * this returns NULL. | |
07d777fe SR |
2491 | */ |
2492 | static char *get_trace_buf(void) | |
2493 | { | |
e2ace001 | 2494 | struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); |
07d777fe | 2495 | |
e2ace001 | 2496 | if (!buffer || buffer->nesting >= 4) |
07d777fe SR |
2497 | return NULL; |
2498 | ||
e2ace001 AL |
2499 | return &buffer->buffer[buffer->nesting++][0]; |
2500 | } | |
2501 | ||
2502 | static void put_trace_buf(void) | |
2503 | { | |
2504 | this_cpu_dec(trace_percpu_buffer->nesting); | |
07d777fe SR |
2505 | } |
2506 | ||
2507 | static int alloc_percpu_trace_buffer(void) | |
2508 | { | |
2509 | struct trace_buffer_struct *buffers; | |
07d777fe SR |
2510 | |
2511 | buffers = alloc_percpu(struct trace_buffer_struct); | |
e2ace001 AL |
2512 | if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) |
2513 | return -ENOMEM; | |
07d777fe SR |
2514 | |
2515 | trace_percpu_buffer = buffers; | |
07d777fe | 2516 | return 0; |
07d777fe SR |
2517 | } |
2518 | ||
81698831 SR |
2519 | static int buffers_allocated; |
2520 | ||
07d777fe SR |
2521 | void trace_printk_init_buffers(void) |
2522 | { | |
07d777fe SR |
2523 | if (buffers_allocated) |
2524 | return; | |
2525 | ||
2526 | if (alloc_percpu_trace_buffer()) | |
2527 | return; | |
2528 | ||
2184db46 SR |
2529 | /* trace_printk() is for debug use only. Don't use it in production. */ |
2530 | ||
a395d6a7 JP |
2531 | pr_warn("\n"); |
2532 | pr_warn("**********************************************************\n"); | |
2533 | pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); | |
2534 | pr_warn("** **\n"); | |
2535 | pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); | |
2536 | pr_warn("** **\n"); | |
2537 | pr_warn("** This means that this is a DEBUG kernel and it is **\n"); | |
2538 | pr_warn("** unsafe for production use. **\n"); | |
2539 | pr_warn("** **\n"); | |
2540 | pr_warn("** If you see this message and you are not debugging **\n"); | |
2541 | pr_warn("** the kernel, report this immediately to your vendor! **\n"); | |
2542 | pr_warn("** **\n"); | |
2543 | pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); | |
2544 | pr_warn("**********************************************************\n"); | |
07d777fe | 2545 | |
b382ede6 SR |
2546 | /* Expand the buffers to set size */ |
2547 | tracing_update_buffers(); | |
2548 | ||
07d777fe | 2549 | buffers_allocated = 1; |
81698831 SR |
2550 | |
2551 | /* | |
2552 | * trace_printk_init_buffers() can be called by modules. | |
2553 | * If that happens, then we need to start cmdline recording | |
2554 | * directly here. If the global_trace.buffer is already | |
2555 | * allocated here, then this was called by module code. | |
2556 | */ | |
12883efb | 2557 | if (global_trace.trace_buffer.buffer) |
81698831 SR |
2558 | tracing_start_cmdline_record(); |
2559 | } | |
2560 | ||
2561 | void trace_printk_start_comm(void) | |
2562 | { | |
2563 | /* Start tracing comms if trace printk is set */ | |
2564 | if (!buffers_allocated) | |
2565 | return; | |
2566 | tracing_start_cmdline_record(); | |
2567 | } | |
2568 | ||
2569 | static void trace_printk_start_stop_comm(int enabled) | |
2570 | { | |
2571 | if (!buffers_allocated) | |
2572 | return; | |
2573 | ||
2574 | if (enabled) | |
2575 | tracing_start_cmdline_record(); | |
2576 | else | |
2577 | tracing_stop_cmdline_record(); | |
07d777fe SR |
2578 | } |
2579 | ||
769b0441 | 2580 | /** |
48ead020 | 2581 | * trace_vbprintk - write binary msg to tracing buffer |
769b0441 FW |
2582 | * |
2583 | */ | |
40ce74f1 | 2584 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
769b0441 | 2585 | { |
2425bcb9 | 2586 | struct trace_event_call *call = &event_bprint; |
769b0441 | 2587 | struct ring_buffer_event *event; |
e77405ad | 2588 | struct ring_buffer *buffer; |
769b0441 | 2589 | struct trace_array *tr = &global_trace; |
48ead020 | 2590 | struct bprint_entry *entry; |
769b0441 | 2591 | unsigned long flags; |
07d777fe SR |
2592 | char *tbuffer; |
2593 | int len = 0, size, pc; | |
769b0441 FW |
2594 | |
2595 | if (unlikely(tracing_selftest_running || tracing_disabled)) | |
2596 | return 0; | |
2597 | ||
2598 | /* Don't pollute graph traces with trace_vprintk internals */ | |
2599 | pause_graph_tracing(); | |
2600 | ||
2601 | pc = preempt_count(); | |
5168ae50 | 2602 | preempt_disable_notrace(); |
769b0441 | 2603 | |
07d777fe SR |
2604 | tbuffer = get_trace_buf(); |
2605 | if (!tbuffer) { | |
2606 | len = 0; | |
e2ace001 | 2607 | goto out_nobuffer; |
07d777fe | 2608 | } |
769b0441 | 2609 | |
07d777fe | 2610 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
769b0441 | 2611 | |
07d777fe SR |
2612 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
2613 | goto out; | |
769b0441 | 2614 | |
07d777fe | 2615 | local_save_flags(flags); |
769b0441 | 2616 | size = sizeof(*entry) + sizeof(u32) * len; |
12883efb | 2617 | buffer = tr->trace_buffer.buffer; |
e77405ad SR |
2618 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
2619 | flags, pc); | |
769b0441 | 2620 | if (!event) |
07d777fe | 2621 | goto out; |
769b0441 FW |
2622 | entry = ring_buffer_event_data(event); |
2623 | entry->ip = ip; | |
769b0441 FW |
2624 | entry->fmt = fmt; |
2625 | ||
07d777fe | 2626 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
f306cc82 | 2627 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
7ffbd48d | 2628 | __buffer_unlock_commit(buffer, event); |
2d34f489 | 2629 | ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); |
d931369b | 2630 | } |
769b0441 | 2631 | |
769b0441 | 2632 | out: |
e2ace001 AL |
2633 | put_trace_buf(); |
2634 | ||
2635 | out_nobuffer: | |
5168ae50 | 2636 | preempt_enable_notrace(); |
769b0441 FW |
2637 | unpause_graph_tracing(); |
2638 | ||
2639 | return len; | |
2640 | } | |
48ead020 FW |
2641 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
2642 | ||
12883efb SRRH |
2643 | static int |
2644 | __trace_array_vprintk(struct ring_buffer *buffer, | |
2645 | unsigned long ip, const char *fmt, va_list args) | |
48ead020 | 2646 | { |
2425bcb9 | 2647 | struct trace_event_call *call = &event_print; |
48ead020 | 2648 | struct ring_buffer_event *event; |
07d777fe | 2649 | int len = 0, size, pc; |
48ead020 | 2650 | struct print_entry *entry; |
07d777fe SR |
2651 | unsigned long flags; |
2652 | char *tbuffer; | |
48ead020 FW |
2653 | |
2654 | if (tracing_disabled || tracing_selftest_running) | |
2655 | return 0; | |
2656 | ||
07d777fe SR |
2657 | /* Don't pollute graph traces with trace_vprintk internals */ |
2658 | pause_graph_tracing(); | |
2659 | ||
48ead020 FW |
2660 | pc = preempt_count(); |
2661 | preempt_disable_notrace(); | |
48ead020 | 2662 | |
07d777fe SR |
2663 | |
2664 | tbuffer = get_trace_buf(); | |
2665 | if (!tbuffer) { | |
2666 | len = 0; | |
e2ace001 | 2667 | goto out_nobuffer; |
07d777fe | 2668 | } |
48ead020 | 2669 | |
3558a5ac | 2670 | len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
48ead020 | 2671 | |
07d777fe | 2672 | local_save_flags(flags); |
48ead020 | 2673 | size = sizeof(*entry) + len + 1; |
e77405ad | 2674 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
07d777fe | 2675 | flags, pc); |
48ead020 | 2676 | if (!event) |
07d777fe | 2677 | goto out; |
48ead020 | 2678 | entry = ring_buffer_event_data(event); |
c13d2f7c | 2679 | entry->ip = ip; |
48ead020 | 2680 | |
3558a5ac | 2681 | memcpy(&entry->buf, tbuffer, len + 1); |
f306cc82 | 2682 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
7ffbd48d | 2683 | __buffer_unlock_commit(buffer, event); |
2d34f489 | 2684 | ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); |
d931369b | 2685 | } |
e2ace001 AL |
2686 | |
2687 | out: | |
2688 | put_trace_buf(); | |
2689 | ||
2690 | out_nobuffer: | |
48ead020 | 2691 | preempt_enable_notrace(); |
07d777fe | 2692 | unpause_graph_tracing(); |
48ead020 FW |
2693 | |
2694 | return len; | |
2695 | } | |
659372d3 | 2696 | |
12883efb SRRH |
2697 | int trace_array_vprintk(struct trace_array *tr, |
2698 | unsigned long ip, const char *fmt, va_list args) | |
2699 | { | |
2700 | return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); | |
2701 | } | |
2702 | ||
2703 | int trace_array_printk(struct trace_array *tr, | |
2704 | unsigned long ip, const char *fmt, ...) | |
2705 | { | |
2706 | int ret; | |
2707 | va_list ap; | |
2708 | ||
983f938a | 2709 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
12883efb SRRH |
2710 | return 0; |
2711 | ||
2712 | va_start(ap, fmt); | |
2713 | ret = trace_array_vprintk(tr, ip, fmt, ap); | |
2714 | va_end(ap); | |
2715 | return ret; | |
2716 | } | |
2717 | ||
2718 | int trace_array_printk_buf(struct ring_buffer *buffer, | |
2719 | unsigned long ip, const char *fmt, ...) | |
2720 | { | |
2721 | int ret; | |
2722 | va_list ap; | |
2723 | ||
983f938a | 2724 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
12883efb SRRH |
2725 | return 0; |
2726 | ||
2727 | va_start(ap, fmt); | |
2728 | ret = __trace_array_vprintk(buffer, ip, fmt, ap); | |
2729 | va_end(ap); | |
2730 | return ret; | |
2731 | } | |
2732 | ||
659372d3 SR |
2733 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
2734 | { | |
a813a159 | 2735 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
659372d3 | 2736 | } |
769b0441 FW |
2737 | EXPORT_SYMBOL_GPL(trace_vprintk); |
2738 | ||
e2ac8ef5 | 2739 | static void trace_iterator_increment(struct trace_iterator *iter) |
5a90f577 | 2740 | { |
6d158a81 SR |
2741 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); |
2742 | ||
5a90f577 | 2743 | iter->idx++; |
6d158a81 SR |
2744 | if (buf_iter) |
2745 | ring_buffer_read(buf_iter, NULL); | |
5a90f577 SR |
2746 | } |
2747 | ||
e309b41d | 2748 | static struct trace_entry * |
bc21b478 SR |
2749 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
2750 | unsigned long *lost_events) | |
dd0e545f | 2751 | { |
3928a8a2 | 2752 | struct ring_buffer_event *event; |
6d158a81 | 2753 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); |
dd0e545f | 2754 | |
d769041f SR |
2755 | if (buf_iter) |
2756 | event = ring_buffer_iter_peek(buf_iter, ts); | |
2757 | else | |
12883efb | 2758 | event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, |
bc21b478 | 2759 | lost_events); |
d769041f | 2760 | |
4a9bd3f1 SR |
2761 | if (event) { |
2762 | iter->ent_size = ring_buffer_event_length(event); | |
2763 | return ring_buffer_event_data(event); | |
2764 | } | |
2765 | iter->ent_size = 0; | |
2766 | return NULL; | |
dd0e545f | 2767 | } |
d769041f | 2768 | |
dd0e545f | 2769 | static struct trace_entry * |
bc21b478 SR |
2770 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
2771 | unsigned long *missing_events, u64 *ent_ts) | |
bc0c38d1 | 2772 | { |
12883efb | 2773 | struct ring_buffer *buffer = iter->trace_buffer->buffer; |
bc0c38d1 | 2774 | struct trace_entry *ent, *next = NULL; |
aa27497c | 2775 | unsigned long lost_events = 0, next_lost = 0; |
b04cc6b1 | 2776 | int cpu_file = iter->cpu_file; |
3928a8a2 | 2777 | u64 next_ts = 0, ts; |
bc0c38d1 | 2778 | int next_cpu = -1; |
12b5da34 | 2779 | int next_size = 0; |
bc0c38d1 SR |
2780 | int cpu; |
2781 | ||
b04cc6b1 FW |
2782 | /* |
2783 | * If we are in a per_cpu trace file, don't bother by iterating over | |
2784 | * all cpu and peek directly. | |
2785 | */ | |
ae3b5093 | 2786 | if (cpu_file > RING_BUFFER_ALL_CPUS) { |
b04cc6b1 FW |
2787 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
2788 | return NULL; | |
bc21b478 | 2789 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
b04cc6b1 FW |
2790 | if (ent_cpu) |
2791 | *ent_cpu = cpu_file; | |
2792 | ||
2793 | return ent; | |
2794 | } | |
2795 | ||
ab46428c | 2796 | for_each_tracing_cpu(cpu) { |
dd0e545f | 2797 | |
3928a8a2 SR |
2798 | if (ring_buffer_empty_cpu(buffer, cpu)) |
2799 | continue; | |
dd0e545f | 2800 | |
bc21b478 | 2801 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
dd0e545f | 2802 | |
cdd31cd2 IM |
2803 | /* |
2804 | * Pick the entry with the smallest timestamp: | |
2805 | */ | |
3928a8a2 | 2806 | if (ent && (!next || ts < next_ts)) { |
bc0c38d1 SR |
2807 | next = ent; |
2808 | next_cpu = cpu; | |
3928a8a2 | 2809 | next_ts = ts; |
bc21b478 | 2810 | next_lost = lost_events; |
12b5da34 | 2811 | next_size = iter->ent_size; |
bc0c38d1 SR |
2812 | } |
2813 | } | |
2814 | ||
12b5da34 SR |
2815 | iter->ent_size = next_size; |
2816 | ||
bc0c38d1 SR |
2817 | if (ent_cpu) |
2818 | *ent_cpu = next_cpu; | |
2819 | ||
3928a8a2 SR |
2820 | if (ent_ts) |
2821 | *ent_ts = next_ts; | |
2822 | ||
bc21b478 SR |
2823 | if (missing_events) |
2824 | *missing_events = next_lost; | |
2825 | ||
bc0c38d1 SR |
2826 | return next; |
2827 | } | |
2828 | ||
dd0e545f | 2829 | /* Find the next real entry, without updating the iterator itself */ |
c4a8e8be FW |
2830 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
2831 | int *ent_cpu, u64 *ent_ts) | |
bc0c38d1 | 2832 | { |
bc21b478 | 2833 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
dd0e545f SR |
2834 | } |
2835 | ||
2836 | /* Find the next real entry, and increment the iterator to the next entry */ | |
955b61e5 | 2837 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
dd0e545f | 2838 | { |
bc21b478 SR |
2839 | iter->ent = __find_next_entry(iter, &iter->cpu, |
2840 | &iter->lost_events, &iter->ts); | |
dd0e545f | 2841 | |
3928a8a2 | 2842 | if (iter->ent) |
e2ac8ef5 | 2843 | trace_iterator_increment(iter); |
dd0e545f | 2844 | |
3928a8a2 | 2845 | return iter->ent ? iter : NULL; |
b3806b43 | 2846 | } |
bc0c38d1 | 2847 | |
e309b41d | 2848 | static void trace_consume(struct trace_iterator *iter) |
b3806b43 | 2849 | { |
12883efb | 2850 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, |
bc21b478 | 2851 | &iter->lost_events); |
bc0c38d1 SR |
2852 | } |
2853 | ||
e309b41d | 2854 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
bc0c38d1 SR |
2855 | { |
2856 | struct trace_iterator *iter = m->private; | |
bc0c38d1 | 2857 | int i = (int)*pos; |
4e3c3333 | 2858 | void *ent; |
bc0c38d1 | 2859 | |
a63ce5b3 SR |
2860 | WARN_ON_ONCE(iter->leftover); |
2861 | ||
bc0c38d1 SR |
2862 | (*pos)++; |
2863 | ||
2864 | /* can't go backwards */ | |
2865 | if (iter->idx > i) | |
2866 | return NULL; | |
2867 | ||
2868 | if (iter->idx < 0) | |
955b61e5 | 2869 | ent = trace_find_next_entry_inc(iter); |
bc0c38d1 SR |
2870 | else |
2871 | ent = iter; | |
2872 | ||
2873 | while (ent && iter->idx < i) | |
955b61e5 | 2874 | ent = trace_find_next_entry_inc(iter); |
bc0c38d1 SR |
2875 | |
2876 | iter->pos = *pos; | |
2877 | ||
bc0c38d1 SR |
2878 | return ent; |
2879 | } | |
2880 | ||
955b61e5 | 2881 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
2f26ebd5 | 2882 | { |
2f26ebd5 SR |
2883 | struct ring_buffer_event *event; |
2884 | struct ring_buffer_iter *buf_iter; | |
2885 | unsigned long entries = 0; | |
2886 | u64 ts; | |
2887 | ||
12883efb | 2888 | per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; |
2f26ebd5 | 2889 | |
6d158a81 SR |
2890 | buf_iter = trace_buffer_iter(iter, cpu); |
2891 | if (!buf_iter) | |
2f26ebd5 SR |
2892 | return; |
2893 | ||
2f26ebd5 SR |
2894 | ring_buffer_iter_reset(buf_iter); |
2895 | ||
2896 | /* | |
2897 | * We could have the case with the max latency tracers | |
2898 | * that a reset never took place on a cpu. This is evident | |
2899 | * by the timestamp being before the start of the buffer. | |
2900 | */ | |
2901 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | |
12883efb | 2902 | if (ts >= iter->trace_buffer->time_start) |
2f26ebd5 SR |
2903 | break; |
2904 | entries++; | |
2905 | ring_buffer_read(buf_iter, NULL); | |
2906 | } | |
2907 | ||
12883efb | 2908 | per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; |
2f26ebd5 SR |
2909 | } |
2910 | ||
d7350c3f | 2911 | /* |
d7350c3f FW |
2912 | * The current tracer is copied to avoid a global locking |
2913 | * all around. | |
2914 | */ | |
bc0c38d1 SR |
2915 | static void *s_start(struct seq_file *m, loff_t *pos) |
2916 | { | |
2917 | struct trace_iterator *iter = m->private; | |
2b6080f2 | 2918 | struct trace_array *tr = iter->tr; |
b04cc6b1 | 2919 | int cpu_file = iter->cpu_file; |
bc0c38d1 SR |
2920 | void *p = NULL; |
2921 | loff_t l = 0; | |
3928a8a2 | 2922 | int cpu; |
bc0c38d1 | 2923 | |
2fd196ec HT |
2924 | /* |
2925 | * copy the tracer to avoid using a global lock all around. | |
2926 | * iter->trace is a copy of current_trace, the pointer to the | |
2927 | * name may be used instead of a strcmp(), as iter->trace->name | |
2928 | * will point to the same string as current_trace->name. | |
2929 | */ | |
bc0c38d1 | 2930 | mutex_lock(&trace_types_lock); |
2b6080f2 SR |
2931 | if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) |
2932 | *iter->trace = *tr->current_trace; | |
d7350c3f | 2933 | mutex_unlock(&trace_types_lock); |
bc0c38d1 | 2934 | |
12883efb | 2935 | #ifdef CONFIG_TRACER_MAX_TRACE |
debdd57f HT |
2936 | if (iter->snapshot && iter->trace->use_max_tr) |
2937 | return ERR_PTR(-EBUSY); | |
12883efb | 2938 | #endif |
debdd57f HT |
2939 | |
2940 | if (!iter->snapshot) | |
2941 | atomic_inc(&trace_record_cmdline_disabled); | |
bc0c38d1 | 2942 | |
bc0c38d1 SR |
2943 | if (*pos != iter->pos) { |
2944 | iter->ent = NULL; | |
2945 | iter->cpu = 0; | |
2946 | iter->idx = -1; | |
2947 | ||
ae3b5093 | 2948 | if (cpu_file == RING_BUFFER_ALL_CPUS) { |
b04cc6b1 | 2949 | for_each_tracing_cpu(cpu) |
2f26ebd5 | 2950 | tracing_iter_reset(iter, cpu); |
b04cc6b1 | 2951 | } else |
2f26ebd5 | 2952 | tracing_iter_reset(iter, cpu_file); |
bc0c38d1 | 2953 | |
ac91d854 | 2954 | iter->leftover = 0; |
bc0c38d1 SR |
2955 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
2956 | ; | |
2957 | ||
2958 | } else { | |
a63ce5b3 SR |
2959 | /* |
2960 | * If we overflowed the seq_file before, then we want | |
2961 | * to just reuse the trace_seq buffer again. | |
2962 | */ | |
2963 | if (iter->leftover) | |
2964 | p = iter; | |
2965 | else { | |
2966 | l = *pos - 1; | |
2967 | p = s_next(m, p, &l); | |
2968 | } | |
bc0c38d1 SR |
2969 | } |
2970 | ||
4f535968 | 2971 | trace_event_read_lock(); |
7e53bd42 | 2972 | trace_access_lock(cpu_file); |
bc0c38d1 SR |
2973 | return p; |
2974 | } | |
2975 | ||
2976 | static void s_stop(struct seq_file *m, void *p) | |
2977 | { | |
7e53bd42 LJ |
2978 | struct trace_iterator *iter = m->private; |
2979 | ||
12883efb | 2980 | #ifdef CONFIG_TRACER_MAX_TRACE |
debdd57f HT |
2981 | if (iter->snapshot && iter->trace->use_max_tr) |
2982 | return; | |
12883efb | 2983 | #endif |
debdd57f HT |
2984 | |
2985 | if (!iter->snapshot) | |
2986 | atomic_dec(&trace_record_cmdline_disabled); | |
12883efb | 2987 | |
7e53bd42 | 2988 | trace_access_unlock(iter->cpu_file); |
4f535968 | 2989 | trace_event_read_unlock(); |
bc0c38d1 SR |
2990 | } |
2991 | ||
39eaf7ef | 2992 | static void |
12883efb SRRH |
2993 | get_total_entries(struct trace_buffer *buf, |
2994 | unsigned long *total, unsigned long *entries) | |
39eaf7ef SR |
2995 | { |
2996 | unsigned long count; | |
2997 | int cpu; | |
2998 | ||
2999 | *total = 0; | |
3000 | *entries = 0; | |
3001 | ||
3002 | for_each_tracing_cpu(cpu) { | |
12883efb | 3003 | count = ring_buffer_entries_cpu(buf->buffer, cpu); |
39eaf7ef SR |
3004 | /* |
3005 | * If this buffer has skipped entries, then we hold all | |
3006 | * entries for the trace and we need to ignore the | |
3007 | * ones before the time stamp. | |
3008 | */ | |
12883efb SRRH |
3009 | if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { |
3010 | count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; | |
39eaf7ef SR |
3011 | /* total is the same as the entries */ |
3012 | *total += count; | |
3013 | } else | |
3014 | *total += count + | |
12883efb | 3015 | ring_buffer_overrun_cpu(buf->buffer, cpu); |
39eaf7ef SR |
3016 | *entries += count; |
3017 | } | |
3018 | } | |
3019 | ||
e309b41d | 3020 | static void print_lat_help_header(struct seq_file *m) |
bc0c38d1 | 3021 | { |
d79ac28f RV |
3022 | seq_puts(m, "# _------=> CPU# \n" |
3023 | "# / _-----=> irqs-off \n" | |
3024 | "# | / _----=> need-resched \n" | |
3025 | "# || / _---=> hardirq/softirq \n" | |
3026 | "# ||| / _--=> preempt-depth \n" | |
3027 | "# |||| / delay \n" | |
3028 | "# cmd pid ||||| time | caller \n" | |
3029 | "# \\ / ||||| \\ | / \n"); | |
bc0c38d1 SR |
3030 | } |
3031 | ||
12883efb | 3032 | static void print_event_info(struct trace_buffer *buf, struct seq_file *m) |
bc0c38d1 | 3033 | { |
39eaf7ef SR |
3034 | unsigned long total; |
3035 | unsigned long entries; | |
3036 | ||
12883efb | 3037 | get_total_entries(buf, &total, &entries); |
39eaf7ef SR |
3038 | seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", |
3039 | entries, total, num_online_cpus()); | |
3040 | seq_puts(m, "#\n"); | |
3041 | } | |
3042 | ||
12883efb | 3043 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) |
39eaf7ef | 3044 | { |
12883efb | 3045 | print_event_info(buf, m); |
d79ac28f RV |
3046 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n" |
3047 | "# | | | | |\n"); | |
bc0c38d1 SR |
3048 | } |
3049 | ||
12883efb | 3050 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) |
77271ce4 | 3051 | { |
12883efb | 3052 | print_event_info(buf, m); |
d79ac28f RV |
3053 | seq_puts(m, "# _-----=> irqs-off\n" |
3054 | "# / _----=> need-resched\n" | |
3055 | "# | / _---=> hardirq/softirq\n" | |
3056 | "# || / _--=> preempt-depth\n" | |
3057 | "# ||| / delay\n" | |
3058 | "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" | |
3059 | "# | | | |||| | |\n"); | |
77271ce4 | 3060 | } |
bc0c38d1 | 3061 | |
62b915f1 | 3062 | void |
bc0c38d1 SR |
3063 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
3064 | { | |
983f938a | 3065 | unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); |
12883efb SRRH |
3066 | struct trace_buffer *buf = iter->trace_buffer; |
3067 | struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); | |
2b6080f2 | 3068 | struct tracer *type = iter->trace; |
39eaf7ef SR |
3069 | unsigned long entries; |
3070 | unsigned long total; | |
bc0c38d1 SR |
3071 | const char *name = "preemption"; |
3072 | ||
d840f718 | 3073 | name = type->name; |
bc0c38d1 | 3074 | |
12883efb | 3075 | get_total_entries(buf, &total, &entries); |
bc0c38d1 | 3076 | |
888b55dc | 3077 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
bc0c38d1 | 3078 | name, UTS_RELEASE); |
888b55dc | 3079 | seq_puts(m, "# -----------------------------------" |
bc0c38d1 | 3080 | "---------------------------------\n"); |
888b55dc | 3081 | seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" |
bc0c38d1 | 3082 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", |
57f50be1 | 3083 | nsecs_to_usecs(data->saved_latency), |
bc0c38d1 | 3084 | entries, |
4c11d7ae | 3085 | total, |
12883efb | 3086 | buf->cpu, |
bc0c38d1 SR |
3087 | #if defined(CONFIG_PREEMPT_NONE) |
3088 | "server", | |
3089 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | |
3090 | "desktop", | |
b5c21b45 | 3091 | #elif defined(CONFIG_PREEMPT) |
bc0c38d1 SR |
3092 | "preempt", |
3093 | #else | |
3094 | "unknown", | |
3095 | #endif | |
3096 | /* These are reserved for later use */ | |
3097 | 0, 0, 0, 0); | |
3098 | #ifdef CONFIG_SMP | |
3099 | seq_printf(m, " #P:%d)\n", num_online_cpus()); | |
3100 | #else | |
3101 | seq_puts(m, ")\n"); | |
3102 | #endif | |
888b55dc KM |
3103 | seq_puts(m, "# -----------------\n"); |
3104 | seq_printf(m, "# | task: %.16s-%d " | |
bc0c38d1 | 3105 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", |
d20b92ab EB |
3106 | data->comm, data->pid, |
3107 | from_kuid_munged(seq_user_ns(m), data->uid), data->nice, | |
bc0c38d1 | 3108 | data->policy, data->rt_priority); |
888b55dc | 3109 | seq_puts(m, "# -----------------\n"); |
bc0c38d1 SR |
3110 | |
3111 | if (data->critical_start) { | |
888b55dc | 3112 | seq_puts(m, "# => started at: "); |
214023c3 SR |
3113 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); |
3114 | trace_print_seq(m, &iter->seq); | |
888b55dc | 3115 | seq_puts(m, "\n# => ended at: "); |
214023c3 SR |
3116 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
3117 | trace_print_seq(m, &iter->seq); | |
8248ac05 | 3118 | seq_puts(m, "\n#\n"); |
bc0c38d1 SR |
3119 | } |
3120 | ||
888b55dc | 3121 | seq_puts(m, "#\n"); |
bc0c38d1 SR |
3122 | } |
3123 | ||
a309720c SR |
3124 | static void test_cpu_buff_start(struct trace_iterator *iter) |
3125 | { | |
3126 | struct trace_seq *s = &iter->seq; | |
983f938a | 3127 | struct trace_array *tr = iter->tr; |
a309720c | 3128 | |
983f938a | 3129 | if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) |
12ef7d44 SR |
3130 | return; |
3131 | ||
3132 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | |
3133 | return; | |
3134 | ||
919cd979 | 3135 | if (iter->started && cpumask_test_cpu(iter->cpu, iter->started)) |
a309720c SR |
3136 | return; |
3137 | ||
12883efb | 3138 | if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) |
2f26ebd5 SR |
3139 | return; |
3140 | ||
919cd979 SL |
3141 | if (iter->started) |
3142 | cpumask_set_cpu(iter->cpu, iter->started); | |
b0dfa978 FW |
3143 | |
3144 | /* Don't print started cpu buffer for the first entry of the trace */ | |
3145 | if (iter->idx > 1) | |
3146 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", | |
3147 | iter->cpu); | |
a309720c SR |
3148 | } |
3149 | ||
2c4f035f | 3150 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
bc0c38d1 | 3151 | { |
983f938a | 3152 | struct trace_array *tr = iter->tr; |
214023c3 | 3153 | struct trace_seq *s = &iter->seq; |
983f938a | 3154 | unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); |
4e3c3333 | 3155 | struct trace_entry *entry; |
f633cef0 | 3156 | struct trace_event *event; |
bc0c38d1 | 3157 | |
4e3c3333 | 3158 | entry = iter->ent; |
dd0e545f | 3159 | |
a309720c SR |
3160 | test_cpu_buff_start(iter); |
3161 | ||
c4a8e8be | 3162 | event = ftrace_find_event(entry->type); |
bc0c38d1 | 3163 | |
983f938a | 3164 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
19a7fe20 SRRH |
3165 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) |
3166 | trace_print_lat_context(iter); | |
3167 | else | |
3168 | trace_print_context(iter); | |
c4a8e8be | 3169 | } |
bc0c38d1 | 3170 | |
19a7fe20 SRRH |
3171 | if (trace_seq_has_overflowed(s)) |
3172 | return TRACE_TYPE_PARTIAL_LINE; | |
3173 | ||
268ccda0 | 3174 | if (event) |
a9a57763 | 3175 | return event->funcs->trace(iter, sym_flags, event); |
d9793bd8 | 3176 | |
19a7fe20 | 3177 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
02b67518 | 3178 | |
19a7fe20 | 3179 | return trace_handle_return(s); |
bc0c38d1 SR |
3180 | } |
3181 | ||
2c4f035f | 3182 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
f9896bf3 | 3183 | { |
983f938a | 3184 | struct trace_array *tr = iter->tr; |
f9896bf3 IM |
3185 | struct trace_seq *s = &iter->seq; |
3186 | struct trace_entry *entry; | |
f633cef0 | 3187 | struct trace_event *event; |
f9896bf3 IM |
3188 | |
3189 | entry = iter->ent; | |
dd0e545f | 3190 | |
983f938a | 3191 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) |
19a7fe20 SRRH |
3192 | trace_seq_printf(s, "%d %d %llu ", |
3193 | entry->pid, iter->cpu, iter->ts); | |
3194 | ||
3195 | if (trace_seq_has_overflowed(s)) | |
3196 | return TRACE_TYPE_PARTIAL_LINE; | |
f9896bf3 | 3197 | |
f633cef0 | 3198 | event = ftrace_find_event(entry->type); |
268ccda0 | 3199 | if (event) |
a9a57763 | 3200 | return event->funcs->raw(iter, 0, event); |
d9793bd8 | 3201 | |
19a7fe20 | 3202 | trace_seq_printf(s, "%d ?\n", entry->type); |
777e208d | 3203 | |
19a7fe20 | 3204 | return trace_handle_return(s); |
f9896bf3 IM |
3205 | } |
3206 | ||
2c4f035f | 3207 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
5e3ca0ec | 3208 | { |
983f938a | 3209 | struct trace_array *tr = iter->tr; |
5e3ca0ec IM |
3210 | struct trace_seq *s = &iter->seq; |
3211 | unsigned char newline = '\n'; | |
3212 | struct trace_entry *entry; | |
f633cef0 | 3213 | struct trace_event *event; |
5e3ca0ec IM |
3214 | |
3215 | entry = iter->ent; | |
dd0e545f | 3216 | |
983f938a | 3217 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
19a7fe20 SRRH |
3218 | SEQ_PUT_HEX_FIELD(s, entry->pid); |
3219 | SEQ_PUT_HEX_FIELD(s, iter->cpu); | |
3220 | SEQ_PUT_HEX_FIELD(s, iter->ts); | |
3221 | if (trace_seq_has_overflowed(s)) | |
3222 | return TRACE_TYPE_PARTIAL_LINE; | |
c4a8e8be | 3223 | } |
5e3ca0ec | 3224 | |
f633cef0 | 3225 | event = ftrace_find_event(entry->type); |
268ccda0 | 3226 | if (event) { |
a9a57763 | 3227 | enum print_line_t ret = event->funcs->hex(iter, 0, event); |
d9793bd8 ACM |
3228 | if (ret != TRACE_TYPE_HANDLED) |
3229 | return ret; | |
3230 | } | |
7104f300 | 3231 | |
19a7fe20 | 3232 | SEQ_PUT_FIELD(s, newline); |
5e3ca0ec | 3233 | |
19a7fe20 | 3234 | return trace_handle_return(s); |
5e3ca0ec IM |
3235 | } |
3236 | ||
2c4f035f | 3237 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
cb0f12aa | 3238 | { |
983f938a | 3239 | struct trace_array *tr = iter->tr; |
cb0f12aa IM |
3240 | struct trace_seq *s = &iter->seq; |
3241 | struct trace_entry *entry; | |
f633cef0 | 3242 | struct trace_event *event; |
cb0f12aa IM |
3243 | |
3244 | entry = iter->ent; | |
dd0e545f | 3245 | |
983f938a | 3246 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
19a7fe20 SRRH |
3247 | SEQ_PUT_FIELD(s, entry->pid); |
3248 | SEQ_PUT_FIELD(s, iter->cpu); | |
3249 | SEQ_PUT_FIELD(s, iter->ts); | |
3250 | if (trace_seq_has_overflowed(s)) | |
3251 | return TRACE_TYPE_PARTIAL_LINE; | |
c4a8e8be | 3252 | } |
cb0f12aa | 3253 | |
f633cef0 | 3254 | event = ftrace_find_event(entry->type); |
a9a57763 SR |
3255 | return event ? event->funcs->binary(iter, 0, event) : |
3256 | TRACE_TYPE_HANDLED; | |
cb0f12aa IM |
3257 | } |
3258 | ||
62b915f1 | 3259 | int trace_empty(struct trace_iterator *iter) |
bc0c38d1 | 3260 | { |
6d158a81 | 3261 | struct ring_buffer_iter *buf_iter; |
bc0c38d1 SR |
3262 | int cpu; |
3263 | ||
9aba60fe | 3264 | /* If we are looking at one CPU buffer, only check that one */ |
ae3b5093 | 3265 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { |
9aba60fe | 3266 | cpu = iter->cpu_file; |
6d158a81 SR |
3267 | buf_iter = trace_buffer_iter(iter, cpu); |
3268 | if (buf_iter) { | |
3269 | if (!ring_buffer_iter_empty(buf_iter)) | |
9aba60fe SR |
3270 | return 0; |
3271 | } else { | |
12883efb | 3272 | if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
9aba60fe SR |
3273 | return 0; |
3274 | } | |
3275 | return 1; | |
3276 | } | |
3277 | ||
ab46428c | 3278 | for_each_tracing_cpu(cpu) { |
6d158a81 SR |
3279 | buf_iter = trace_buffer_iter(iter, cpu); |
3280 | if (buf_iter) { | |
3281 | if (!ring_buffer_iter_empty(buf_iter)) | |
d769041f SR |
3282 | return 0; |
3283 | } else { | |
12883efb | 3284 | if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
d769041f SR |
3285 | return 0; |
3286 | } | |
bc0c38d1 | 3287 | } |
d769041f | 3288 | |
797d3712 | 3289 | return 1; |
bc0c38d1 SR |
3290 | } |
3291 | ||
4f535968 | 3292 | /* Called with trace_event_read_lock() held. */ |
955b61e5 | 3293 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
f9896bf3 | 3294 | { |
983f938a SRRH |
3295 | struct trace_array *tr = iter->tr; |
3296 | unsigned long trace_flags = tr->trace_flags; | |
2c4f035f FW |
3297 | enum print_line_t ret; |
3298 | ||
19a7fe20 SRRH |
3299 | if (iter->lost_events) { |
3300 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | |
3301 | iter->cpu, iter->lost_events); | |
3302 | if (trace_seq_has_overflowed(&iter->seq)) | |
3303 | return TRACE_TYPE_PARTIAL_LINE; | |
3304 | } | |
bc21b478 | 3305 | |
2c4f035f FW |
3306 | if (iter->trace && iter->trace->print_line) { |
3307 | ret = iter->trace->print_line(iter); | |
3308 | if (ret != TRACE_TYPE_UNHANDLED) | |
3309 | return ret; | |
3310 | } | |
72829bc3 | 3311 | |
09ae7234 SRRH |
3312 | if (iter->ent->type == TRACE_BPUTS && |
3313 | trace_flags & TRACE_ITER_PRINTK && | |
3314 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | |
3315 | return trace_print_bputs_msg_only(iter); | |
3316 | ||
48ead020 FW |
3317 | if (iter->ent->type == TRACE_BPRINT && |
3318 | trace_flags & TRACE_ITER_PRINTK && | |
3319 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | |
5ef841f6 | 3320 | return trace_print_bprintk_msg_only(iter); |
48ead020 | 3321 | |
66896a85 FW |
3322 | if (iter->ent->type == TRACE_PRINT && |
3323 | trace_flags & TRACE_ITER_PRINTK && | |
3324 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | |
5ef841f6 | 3325 | return trace_print_printk_msg_only(iter); |
66896a85 | 3326 | |
cb0f12aa IM |
3327 | if (trace_flags & TRACE_ITER_BIN) |
3328 | return print_bin_fmt(iter); | |
3329 | ||
5e3ca0ec IM |
3330 | if (trace_flags & TRACE_ITER_HEX) |
3331 | return print_hex_fmt(iter); | |
3332 | ||
f9896bf3 IM |
3333 | if (trace_flags & TRACE_ITER_RAW) |
3334 | return print_raw_fmt(iter); | |
3335 | ||
f9896bf3 IM |
3336 | return print_trace_fmt(iter); |
3337 | } | |
3338 | ||
7e9a49ef JO |
3339 | void trace_latency_header(struct seq_file *m) |
3340 | { | |
3341 | struct trace_iterator *iter = m->private; | |
983f938a | 3342 | struct trace_array *tr = iter->tr; |
7e9a49ef JO |
3343 | |
3344 | /* print nothing if the buffers are empty */ | |
3345 | if (trace_empty(iter)) | |
3346 | return; | |
3347 | ||
3348 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | |
3349 | print_trace_header(m, iter); | |
3350 | ||
983f938a | 3351 | if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) |
7e9a49ef JO |
3352 | print_lat_help_header(m); |
3353 | } | |
3354 | ||
62b915f1 JO |
3355 | void trace_default_header(struct seq_file *m) |
3356 | { | |
3357 | struct trace_iterator *iter = m->private; | |
983f938a SRRH |
3358 | struct trace_array *tr = iter->tr; |
3359 | unsigned long trace_flags = tr->trace_flags; | |
62b915f1 | 3360 | |
f56e7f8e JO |
3361 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
3362 | return; | |
3363 | ||
62b915f1 JO |
3364 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { |
3365 | /* print nothing if the buffers are empty */ | |
3366 | if (trace_empty(iter)) | |
3367 | return; | |
3368 | print_trace_header(m, iter); | |
3369 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | |
3370 | print_lat_help_header(m); | |
3371 | } else { | |
77271ce4 SR |
3372 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { |
3373 | if (trace_flags & TRACE_ITER_IRQ_INFO) | |
12883efb | 3374 | print_func_help_header_irq(iter->trace_buffer, m); |
77271ce4 | 3375 | else |
12883efb | 3376 | print_func_help_header(iter->trace_buffer, m); |
77271ce4 | 3377 | } |
62b915f1 JO |
3378 | } |
3379 | } | |
3380 | ||
e0a413f6 SR |
3381 | static void test_ftrace_alive(struct seq_file *m) |
3382 | { | |
3383 | if (!ftrace_is_dead()) | |
3384 | return; | |
d79ac28f RV |
3385 | seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" |
3386 | "# MAY BE MISSING FUNCTION EVENTS\n"); | |
e0a413f6 SR |
3387 | } |
3388 | ||
d8741e2e | 3389 | #ifdef CONFIG_TRACER_MAX_TRACE |
f1affcaa | 3390 | static void show_snapshot_main_help(struct seq_file *m) |
d8741e2e | 3391 | { |
d79ac28f RV |
3392 | seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" |
3393 | "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" | |
3394 | "# Takes a snapshot of the main buffer.\n" | |
3395 | "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" | |
3396 | "# (Doesn't have to be '2' works with any number that\n" | |
3397 | "# is not a '0' or '1')\n"); | |
d8741e2e | 3398 | } |
f1affcaa SRRH |
3399 | |
3400 | static void show_snapshot_percpu_help(struct seq_file *m) | |
3401 | { | |
fa6f0cc7 | 3402 | seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); |
f1affcaa | 3403 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP |
d79ac28f RV |
3404 | seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" |
3405 | "# Takes a snapshot of the main buffer for this cpu.\n"); | |
f1affcaa | 3406 | #else |
d79ac28f RV |
3407 | seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" |
3408 | "# Must use main snapshot file to allocate.\n"); | |
f1affcaa | 3409 | #endif |
d79ac28f RV |
3410 | seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" |
3411 | "# (Doesn't have to be '2' works with any number that\n" | |
3412 | "# is not a '0' or '1')\n"); | |
f1affcaa SRRH |
3413 | } |
3414 | ||
d8741e2e SRRH |
3415 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) |
3416 | { | |
45ad21ca | 3417 | if (iter->tr->allocated_snapshot) |
fa6f0cc7 | 3418 | seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); |
d8741e2e | 3419 | else |
fa6f0cc7 | 3420 | seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); |
d8741e2e | 3421 | |
fa6f0cc7 | 3422 | seq_puts(m, "# Snapshot commands:\n"); |
f1affcaa SRRH |
3423 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
3424 | show_snapshot_main_help(m); | |
3425 | else | |
3426 | show_snapshot_percpu_help(m); | |
d8741e2e SRRH |
3427 | } |
3428 | #else | |
3429 | /* Should never be called */ | |
3430 | static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } | |
3431 | #endif | |
3432 | ||
bc0c38d1 SR |
3433 | static int s_show(struct seq_file *m, void *v) |
3434 | { | |
3435 | struct trace_iterator *iter = v; | |
a63ce5b3 | 3436 | int ret; |
bc0c38d1 SR |
3437 | |
3438 | if (iter->ent == NULL) { | |
3439 | if (iter->tr) { | |
3440 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | |
3441 | seq_puts(m, "#\n"); | |
e0a413f6 | 3442 | test_ftrace_alive(m); |
bc0c38d1 | 3443 | } |
d8741e2e SRRH |
3444 | if (iter->snapshot && trace_empty(iter)) |
3445 | print_snapshot_help(m, iter); | |
3446 | else if (iter->trace && iter->trace->print_header) | |
8bba1bf5 | 3447 | iter->trace->print_header(m); |
62b915f1 JO |
3448 | else |
3449 | trace_default_header(m); | |
3450 | ||
a63ce5b3 SR |
3451 | } else if (iter->leftover) { |
3452 | /* | |
3453 | * If we filled the seq_file buffer earlier, we | |
3454 | * want to just show it now. | |
3455 | */ | |
3456 | ret = trace_print_seq(m, &iter->seq); | |
3457 | ||
3458 | /* ret should this time be zero, but you never know */ | |
3459 | iter->leftover = ret; | |
3460 | ||
bc0c38d1 | 3461 | } else { |
f9896bf3 | 3462 | print_trace_line(iter); |
a63ce5b3 SR |
3463 | ret = trace_print_seq(m, &iter->seq); |
3464 | /* | |
3465 | * If we overflow the seq_file buffer, then it will | |
3466 | * ask us for this data again at start up. | |
3467 | * Use that instead. | |
3468 | * ret is 0 if seq_file write succeeded. | |
3469 | * -1 otherwise. | |
3470 | */ | |
3471 | iter->leftover = ret; | |
bc0c38d1 SR |
3472 | } |
3473 | ||
3474 | return 0; | |
3475 | } | |
3476 | ||
649e9c70 ON |
3477 | /* |
3478 | * Should be used after trace_array_get(), trace_types_lock | |
3479 | * ensures that i_cdev was already initialized. | |
3480 | */ | |
3481 | static inline int tracing_get_cpu(struct inode *inode) | |
3482 | { | |
3483 | if (inode->i_cdev) /* See trace_create_cpu_file() */ | |
3484 | return (long)inode->i_cdev - 1; | |
3485 | return RING_BUFFER_ALL_CPUS; | |
3486 | } | |
3487 | ||
88e9d34c | 3488 | static const struct seq_operations tracer_seq_ops = { |
4bf39a94 IM |
3489 | .start = s_start, |
3490 | .next = s_next, | |
3491 | .stop = s_stop, | |
3492 | .show = s_show, | |
bc0c38d1 SR |
3493 | }; |
3494 | ||
e309b41d | 3495 | static struct trace_iterator * |
6484c71c | 3496 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
bc0c38d1 | 3497 | { |
6484c71c | 3498 | struct trace_array *tr = inode->i_private; |
bc0c38d1 | 3499 | struct trace_iterator *iter; |
50e18b94 | 3500 | int cpu; |
bc0c38d1 | 3501 | |
85a2f9b4 SR |
3502 | if (tracing_disabled) |
3503 | return ERR_PTR(-ENODEV); | |
60a11774 | 3504 | |
50e18b94 | 3505 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); |
85a2f9b4 SR |
3506 | if (!iter) |
3507 | return ERR_PTR(-ENOMEM); | |
bc0c38d1 | 3508 | |
72917235 | 3509 | iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), |
6d158a81 | 3510 | GFP_KERNEL); |
93574fcc DC |
3511 | if (!iter->buffer_iter) |
3512 | goto release; | |
3513 | ||
d7350c3f FW |
3514 | /* |
3515 | * We make a copy of the current tracer to avoid concurrent | |
3516 | * changes on it while we are reading. | |
3517 | */ | |
bc0c38d1 | 3518 | mutex_lock(&trace_types_lock); |
d7350c3f | 3519 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); |
85a2f9b4 | 3520 | if (!iter->trace) |
d7350c3f | 3521 | goto fail; |
85a2f9b4 | 3522 | |
2b6080f2 | 3523 | *iter->trace = *tr->current_trace; |
d7350c3f | 3524 | |
79f55997 | 3525 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
b0dfa978 FW |
3526 | goto fail; |
3527 | ||
12883efb SRRH |
3528 | iter->tr = tr; |
3529 | ||
3530 | #ifdef CONFIG_TRACER_MAX_TRACE | |
2b6080f2 SR |
3531 | /* Currently only the top directory has a snapshot */ |
3532 | if (tr->current_trace->print_max || snapshot) | |
12883efb | 3533 | iter->trace_buffer = &tr->max_buffer; |
bc0c38d1 | 3534 | else |
12883efb SRRH |
3535 | #endif |
3536 | iter->trace_buffer = &tr->trace_buffer; | |
debdd57f | 3537 | iter->snapshot = snapshot; |
bc0c38d1 | 3538 | iter->pos = -1; |
6484c71c | 3539 | iter->cpu_file = tracing_get_cpu(inode); |
d7350c3f | 3540 | mutex_init(&iter->mutex); |
bc0c38d1 | 3541 | |
8bba1bf5 MM |
3542 | /* Notify the tracer early; before we stop tracing. */ |
3543 | if (iter->trace && iter->trace->open) | |
a93751ca | 3544 | iter->trace->open(iter); |
8bba1bf5 | 3545 | |
12ef7d44 | 3546 | /* Annotate start of buffers if we had overruns */ |
12883efb | 3547 | if (ring_buffer_overruns(iter->trace_buffer->buffer)) |
12ef7d44 SR |
3548 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
3549 | ||
8be0709f | 3550 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
58e8eedf | 3551 | if (trace_clocks[tr->clock_id].in_ns) |
8be0709f DS |
3552 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3553 | ||
debdd57f HT |
3554 | /* stop the trace while dumping if we are not opening "snapshot" */ |
3555 | if (!iter->snapshot) | |
2b6080f2 | 3556 | tracing_stop_tr(tr); |
2f26ebd5 | 3557 | |
ae3b5093 | 3558 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
b04cc6b1 | 3559 | for_each_tracing_cpu(cpu) { |
b04cc6b1 | 3560 | iter->buffer_iter[cpu] = |
12883efb | 3561 | ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); |
72c9ddfd DM |
3562 | } |
3563 | ring_buffer_read_prepare_sync(); | |
3564 | for_each_tracing_cpu(cpu) { | |
3565 | ring_buffer_read_start(iter->buffer_iter[cpu]); | |
2f26ebd5 | 3566 | tracing_iter_reset(iter, cpu); |
b04cc6b1 FW |
3567 | } |
3568 | } else { | |
3569 | cpu = iter->cpu_file; | |
3928a8a2 | 3570 | iter->buffer_iter[cpu] = |
12883efb | 3571 | ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); |
72c9ddfd DM |
3572 | ring_buffer_read_prepare_sync(); |
3573 | ring_buffer_read_start(iter->buffer_iter[cpu]); | |
2f26ebd5 | 3574 | tracing_iter_reset(iter, cpu); |
3928a8a2 SR |
3575 | } |
3576 | ||
bc0c38d1 SR |
3577 | mutex_unlock(&trace_types_lock); |
3578 | ||
bc0c38d1 | 3579 | return iter; |
3928a8a2 | 3580 | |
d7350c3f | 3581 | fail: |
3928a8a2 | 3582 | mutex_unlock(&trace_types_lock); |
d7350c3f | 3583 | kfree(iter->trace); |
6d158a81 | 3584 | kfree(iter->buffer_iter); |
93574fcc | 3585 | release: |
50e18b94 JO |
3586 | seq_release_private(inode, file); |
3587 | return ERR_PTR(-ENOMEM); | |
bc0c38d1 SR |
3588 | } |
3589 | ||
3590 | int tracing_open_generic(struct inode *inode, struct file *filp) | |
3591 | { | |
60a11774 SR |
3592 | if (tracing_disabled) |
3593 | return -ENODEV; | |
3594 | ||
bc0c38d1 SR |
3595 | filp->private_data = inode->i_private; |
3596 | return 0; | |
3597 | } | |
3598 | ||
2e86421d GB |
3599 | bool tracing_is_disabled(void) |
3600 | { | |
3601 | return (tracing_disabled) ? true: false; | |
3602 | } | |
3603 | ||
7b85af63 SRRH |
3604 | /* |
3605 | * Open and update trace_array ref count. | |
3606 | * Must have the current trace_array passed to it. | |
3607 | */ | |
dcc30223 | 3608 | static int tracing_open_generic_tr(struct inode *inode, struct file *filp) |
7b85af63 SRRH |
3609 | { |
3610 | struct trace_array *tr = inode->i_private; | |
3611 | ||
3612 | if (tracing_disabled) | |
3613 | return -ENODEV; | |
3614 | ||
3615 | if (trace_array_get(tr) < 0) | |
3616 | return -ENODEV; | |
3617 | ||
3618 | filp->private_data = inode->i_private; | |
3619 | ||
3620 | return 0; | |
7b85af63 SRRH |
3621 | } |
3622 | ||
4fd27358 | 3623 | static int tracing_release(struct inode *inode, struct file *file) |
bc0c38d1 | 3624 | { |
6484c71c | 3625 | struct trace_array *tr = inode->i_private; |
907f2784 | 3626 | struct seq_file *m = file->private_data; |
4acd4d00 | 3627 | struct trace_iterator *iter; |
3928a8a2 | 3628 | int cpu; |
bc0c38d1 | 3629 | |
ff451961 | 3630 | if (!(file->f_mode & FMODE_READ)) { |
6484c71c | 3631 | trace_array_put(tr); |
4acd4d00 | 3632 | return 0; |
ff451961 | 3633 | } |
4acd4d00 | 3634 | |
6484c71c | 3635 | /* Writes do not use seq_file */ |
4acd4d00 | 3636 | iter = m->private; |
bc0c38d1 | 3637 | mutex_lock(&trace_types_lock); |
a695cb58 | 3638 | |
3928a8a2 SR |
3639 | for_each_tracing_cpu(cpu) { |
3640 | if (iter->buffer_iter[cpu]) | |
3641 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | |
3642 | } | |
3643 | ||
bc0c38d1 SR |
3644 | if (iter->trace && iter->trace->close) |
3645 | iter->trace->close(iter); | |
3646 | ||
debdd57f HT |
3647 | if (!iter->snapshot) |
3648 | /* reenable tracing if it was previously enabled */ | |
2b6080f2 | 3649 | tracing_start_tr(tr); |
f77d09a3 AL |
3650 | |
3651 | __trace_array_put(tr); | |
3652 | ||
bc0c38d1 SR |
3653 | mutex_unlock(&trace_types_lock); |
3654 | ||
d7350c3f | 3655 | mutex_destroy(&iter->mutex); |
b0dfa978 | 3656 | free_cpumask_var(iter->started); |
d7350c3f | 3657 | kfree(iter->trace); |
6d158a81 | 3658 | kfree(iter->buffer_iter); |
50e18b94 | 3659 | seq_release_private(inode, file); |
ff451961 | 3660 | |
bc0c38d1 SR |
3661 | return 0; |
3662 | } | |
3663 | ||
7b85af63 SRRH |
3664 | static int tracing_release_generic_tr(struct inode *inode, struct file *file) |
3665 | { | |
3666 | struct trace_array *tr = inode->i_private; | |
3667 | ||
3668 | trace_array_put(tr); | |
bc0c38d1 SR |
3669 | return 0; |
3670 | } | |
3671 | ||
7b85af63 SRRH |
3672 | static int tracing_single_release_tr(struct inode *inode, struct file *file) |
3673 | { | |
3674 | struct trace_array *tr = inode->i_private; | |
3675 | ||
3676 | trace_array_put(tr); | |
3677 | ||
3678 | return single_release(inode, file); | |
3679 | } | |
3680 | ||
bc0c38d1 SR |
3681 | static int tracing_open(struct inode *inode, struct file *file) |
3682 | { | |
6484c71c | 3683 | struct trace_array *tr = inode->i_private; |
85a2f9b4 SR |
3684 | struct trace_iterator *iter; |
3685 | int ret = 0; | |
bc0c38d1 | 3686 | |
ff451961 SRRH |
3687 | if (trace_array_get(tr) < 0) |
3688 | return -ENODEV; | |
3689 | ||
4acd4d00 | 3690 | /* If this file was open for write, then erase contents */ |
6484c71c ON |
3691 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
3692 | int cpu = tracing_get_cpu(inode); | |
3693 | ||
3694 | if (cpu == RING_BUFFER_ALL_CPUS) | |
12883efb | 3695 | tracing_reset_online_cpus(&tr->trace_buffer); |
4acd4d00 | 3696 | else |
6484c71c | 3697 | tracing_reset(&tr->trace_buffer, cpu); |
4acd4d00 | 3698 | } |
bc0c38d1 | 3699 | |
4acd4d00 | 3700 | if (file->f_mode & FMODE_READ) { |
6484c71c | 3701 | iter = __tracing_open(inode, file, false); |
4acd4d00 SR |
3702 | if (IS_ERR(iter)) |
3703 | ret = PTR_ERR(iter); | |
983f938a | 3704 | else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
4acd4d00 SR |
3705 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
3706 | } | |
ff451961 SRRH |
3707 | |
3708 | if (ret < 0) | |
3709 | trace_array_put(tr); | |
3710 | ||
bc0c38d1 SR |
3711 | return ret; |
3712 | } | |
3713 | ||
607e2ea1 SRRH |
3714 | /* |
3715 | * Some tracers are not suitable for instance buffers. | |
3716 | * A tracer is always available for the global array (toplevel) | |
3717 | * or if it explicitly states that it is. | |
3718 | */ | |
3719 | static bool | |
3720 | trace_ok_for_array(struct tracer *t, struct trace_array *tr) | |
3721 | { | |
3722 | return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; | |
3723 | } | |
3724 | ||
3725 | /* Find the next tracer that this trace array may use */ | |
3726 | static struct tracer * | |
3727 | get_tracer_for_array(struct trace_array *tr, struct tracer *t) | |
3728 | { | |
3729 | while (t && !trace_ok_for_array(t, tr)) | |
3730 | t = t->next; | |
3731 | ||
3732 | return t; | |
3733 | } | |
3734 | ||
e309b41d | 3735 | static void * |
bc0c38d1 SR |
3736 | t_next(struct seq_file *m, void *v, loff_t *pos) |
3737 | { | |
607e2ea1 | 3738 | struct trace_array *tr = m->private; |
f129e965 | 3739 | struct tracer *t = v; |
bc0c38d1 SR |
3740 | |
3741 | (*pos)++; | |
3742 | ||
3743 | if (t) | |
607e2ea1 | 3744 | t = get_tracer_for_array(tr, t->next); |
bc0c38d1 | 3745 | |
bc0c38d1 SR |
3746 | return t; |
3747 | } | |
3748 | ||
3749 | static void *t_start(struct seq_file *m, loff_t *pos) | |
3750 | { | |
607e2ea1 | 3751 | struct trace_array *tr = m->private; |
f129e965 | 3752 | struct tracer *t; |
bc0c38d1 SR |
3753 | loff_t l = 0; |
3754 | ||
3755 | mutex_lock(&trace_types_lock); | |
607e2ea1 SRRH |
3756 | |
3757 | t = get_tracer_for_array(tr, trace_types); | |
3758 | for (; t && l < *pos; t = t_next(m, t, &l)) | |
3759 | ; | |
bc0c38d1 SR |
3760 | |
3761 | return t; | |
3762 | } | |
3763 | ||
3764 | static void t_stop(struct seq_file *m, void *p) | |
3765 | { | |
3766 | mutex_unlock(&trace_types_lock); | |
3767 | } | |
3768 | ||
3769 | static int t_show(struct seq_file *m, void *v) | |
3770 | { | |
3771 | struct tracer *t = v; | |
3772 | ||
3773 | if (!t) | |
3774 | return 0; | |
3775 | ||
fa6f0cc7 | 3776 | seq_puts(m, t->name); |
bc0c38d1 SR |
3777 | if (t->next) |
3778 | seq_putc(m, ' '); | |
3779 | else | |
3780 | seq_putc(m, '\n'); | |
3781 | ||
3782 | return 0; | |
3783 | } | |
3784 | ||
88e9d34c | 3785 | static const struct seq_operations show_traces_seq_ops = { |
4bf39a94 IM |
3786 | .start = t_start, |
3787 | .next = t_next, | |
3788 | .stop = t_stop, | |
3789 | .show = t_show, | |
bc0c38d1 SR |
3790 | }; |
3791 | ||
3792 | static int show_traces_open(struct inode *inode, struct file *file) | |
3793 | { | |
607e2ea1 SRRH |
3794 | struct trace_array *tr = inode->i_private; |
3795 | struct seq_file *m; | |
3796 | int ret; | |
3797 | ||
60a11774 SR |
3798 | if (tracing_disabled) |
3799 | return -ENODEV; | |
3800 | ||
607e2ea1 SRRH |
3801 | ret = seq_open(file, &show_traces_seq_ops); |
3802 | if (ret) | |
3803 | return ret; | |
3804 | ||
3805 | m = file->private_data; | |
3806 | m->private = tr; | |
3807 | ||
3808 | return 0; | |
bc0c38d1 SR |
3809 | } |
3810 | ||
4acd4d00 SR |
3811 | static ssize_t |
3812 | tracing_write_stub(struct file *filp, const char __user *ubuf, | |
3813 | size_t count, loff_t *ppos) | |
3814 | { | |
3815 | return count; | |
3816 | } | |
3817 | ||
098c879e | 3818 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence) |
364829b1 | 3819 | { |
098c879e SRRH |
3820 | int ret; |
3821 | ||
364829b1 | 3822 | if (file->f_mode & FMODE_READ) |
098c879e | 3823 | ret = seq_lseek(file, offset, whence); |
364829b1 | 3824 | else |
098c879e SRRH |
3825 | file->f_pos = ret = 0; |
3826 | ||
3827 | return ret; | |
364829b1 SP |
3828 | } |
3829 | ||
5e2336a0 | 3830 | static const struct file_operations tracing_fops = { |
4bf39a94 IM |
3831 | .open = tracing_open, |
3832 | .read = seq_read, | |
4acd4d00 | 3833 | .write = tracing_write_stub, |
098c879e | 3834 | .llseek = tracing_lseek, |
4bf39a94 | 3835 | .release = tracing_release, |
bc0c38d1 SR |
3836 | }; |
3837 | ||
5e2336a0 | 3838 | static const struct file_operations show_traces_fops = { |
c7078de1 IM |
3839 | .open = show_traces_open, |
3840 | .read = seq_read, | |
3841 | .release = seq_release, | |
b444786f | 3842 | .llseek = seq_lseek, |
c7078de1 IM |
3843 | }; |
3844 | ||
36dfe925 IM |
3845 | /* |
3846 | * The tracer itself will not take this lock, but still we want | |
3847 | * to provide a consistent cpumask to user-space: | |
3848 | */ | |
3849 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | |
3850 | ||
3851 | /* | |
3852 | * Temporary storage for the character representation of the | |
3853 | * CPU bitmask (and one more byte for the newline): | |
3854 | */ | |
3855 | static char mask_str[NR_CPUS + 1]; | |
3856 | ||
c7078de1 IM |
3857 | static ssize_t |
3858 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | |
3859 | size_t count, loff_t *ppos) | |
3860 | { | |
ccfe9e42 | 3861 | struct trace_array *tr = file_inode(filp)->i_private; |
36dfe925 | 3862 | int len; |
c7078de1 IM |
3863 | |
3864 | mutex_lock(&tracing_cpumask_update_lock); | |
36dfe925 | 3865 | |
1a40243b TH |
3866 | len = snprintf(mask_str, count, "%*pb\n", |
3867 | cpumask_pr_args(tr->tracing_cpumask)); | |
3868 | if (len >= count) { | |
36dfe925 IM |
3869 | count = -EINVAL; |
3870 | goto out_err; | |
3871 | } | |
36dfe925 IM |
3872 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); |
3873 | ||
3874 | out_err: | |
c7078de1 IM |
3875 | mutex_unlock(&tracing_cpumask_update_lock); |
3876 | ||
3877 | return count; | |
3878 | } | |
3879 | ||
3880 | static ssize_t | |
3881 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |
3882 | size_t count, loff_t *ppos) | |
3883 | { | |
ccfe9e42 | 3884 | struct trace_array *tr = file_inode(filp)->i_private; |
9e01c1b7 | 3885 | cpumask_var_t tracing_cpumask_new; |
2b6080f2 | 3886 | int err, cpu; |
9e01c1b7 RR |
3887 | |
3888 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | |
3889 | return -ENOMEM; | |
c7078de1 | 3890 | |
9e01c1b7 | 3891 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
c7078de1 | 3892 | if (err) |
36dfe925 IM |
3893 | goto err_unlock; |
3894 | ||
215368e8 LZ |
3895 | mutex_lock(&tracing_cpumask_update_lock); |
3896 | ||
a5e25883 | 3897 | local_irq_disable(); |
0b9b12c1 | 3898 | arch_spin_lock(&tr->max_lock); |
ab46428c | 3899 | for_each_tracing_cpu(cpu) { |
36dfe925 IM |
3900 | /* |
3901 | * Increase/decrease the disabled counter if we are | |
3902 | * about to flip a bit in the cpumask: | |
3903 | */ | |
ccfe9e42 | 3904 | if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
9e01c1b7 | 3905 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
12883efb SRRH |
3906 | atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
3907 | ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); | |
36dfe925 | 3908 | } |
ccfe9e42 | 3909 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
9e01c1b7 | 3910 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
12883efb SRRH |
3911 | atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
3912 | ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); | |
36dfe925 IM |
3913 | } |
3914 | } | |
0b9b12c1 | 3915 | arch_spin_unlock(&tr->max_lock); |
a5e25883 | 3916 | local_irq_enable(); |
36dfe925 | 3917 | |
ccfe9e42 | 3918 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); |
36dfe925 IM |
3919 | |
3920 | mutex_unlock(&tracing_cpumask_update_lock); | |
9e01c1b7 | 3921 | free_cpumask_var(tracing_cpumask_new); |
c7078de1 IM |
3922 | |
3923 | return count; | |
36dfe925 IM |
3924 | |
3925 | err_unlock: | |
215368e8 | 3926 | free_cpumask_var(tracing_cpumask_new); |
36dfe925 IM |
3927 | |
3928 | return err; | |
c7078de1 IM |
3929 | } |
3930 | ||
5e2336a0 | 3931 | static const struct file_operations tracing_cpumask_fops = { |
ccfe9e42 | 3932 | .open = tracing_open_generic_tr, |
c7078de1 IM |
3933 | .read = tracing_cpumask_read, |
3934 | .write = tracing_cpumask_write, | |
ccfe9e42 | 3935 | .release = tracing_release_generic_tr, |
b444786f | 3936 | .llseek = generic_file_llseek, |
bc0c38d1 SR |
3937 | }; |
3938 | ||
fdb372ed | 3939 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
bc0c38d1 | 3940 | { |
d8e83d26 | 3941 | struct tracer_opt *trace_opts; |
2b6080f2 | 3942 | struct trace_array *tr = m->private; |
d8e83d26 | 3943 | u32 tracer_flags; |
d8e83d26 | 3944 | int i; |
adf9f195 | 3945 | |
d8e83d26 | 3946 | mutex_lock(&trace_types_lock); |
2b6080f2 SR |
3947 | tracer_flags = tr->current_trace->flags->val; |
3948 | trace_opts = tr->current_trace->flags->opts; | |
d8e83d26 | 3949 | |
bc0c38d1 | 3950 | for (i = 0; trace_options[i]; i++) { |
983f938a | 3951 | if (tr->trace_flags & (1 << i)) |
fdb372ed | 3952 | seq_printf(m, "%s\n", trace_options[i]); |
bc0c38d1 | 3953 | else |
fdb372ed | 3954 | seq_printf(m, "no%s\n", trace_options[i]); |
bc0c38d1 SR |
3955 | } |
3956 | ||
adf9f195 FW |
3957 | for (i = 0; trace_opts[i].name; i++) { |
3958 | if (tracer_flags & trace_opts[i].bit) | |
fdb372ed | 3959 | seq_printf(m, "%s\n", trace_opts[i].name); |
adf9f195 | 3960 | else |
fdb372ed | 3961 | seq_printf(m, "no%s\n", trace_opts[i].name); |
adf9f195 | 3962 | } |
d8e83d26 | 3963 | mutex_unlock(&trace_types_lock); |
adf9f195 | 3964 | |
fdb372ed | 3965 | return 0; |
bc0c38d1 | 3966 | } |
bc0c38d1 | 3967 | |
8c1a49ae | 3968 | static int __set_tracer_option(struct trace_array *tr, |
8d18eaaf LZ |
3969 | struct tracer_flags *tracer_flags, |
3970 | struct tracer_opt *opts, int neg) | |
3971 | { | |
d39cdd20 | 3972 | struct tracer *trace = tracer_flags->trace; |
8d18eaaf | 3973 | int ret; |
bc0c38d1 | 3974 | |
8c1a49ae | 3975 | ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); |
8d18eaaf LZ |
3976 | if (ret) |
3977 | return ret; | |
3978 | ||
3979 | if (neg) | |
3980 | tracer_flags->val &= ~opts->bit; | |
3981 | else | |
3982 | tracer_flags->val |= opts->bit; | |
3983 | return 0; | |
bc0c38d1 SR |
3984 | } |
3985 | ||
adf9f195 | 3986 | /* Try to assign a tracer specific option */ |
8c1a49ae | 3987 | static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) |
adf9f195 | 3988 | { |
8c1a49ae | 3989 | struct tracer *trace = tr->current_trace; |
7770841e | 3990 | struct tracer_flags *tracer_flags = trace->flags; |
adf9f195 | 3991 | struct tracer_opt *opts = NULL; |
8d18eaaf | 3992 | int i; |
adf9f195 | 3993 | |
7770841e Z |
3994 | for (i = 0; tracer_flags->opts[i].name; i++) { |
3995 | opts = &tracer_flags->opts[i]; | |
adf9f195 | 3996 | |
8d18eaaf | 3997 | if (strcmp(cmp, opts->name) == 0) |
8c1a49ae | 3998 | return __set_tracer_option(tr, trace->flags, opts, neg); |
adf9f195 | 3999 | } |
adf9f195 | 4000 | |
8d18eaaf | 4001 | return -EINVAL; |
adf9f195 FW |
4002 | } |
4003 | ||
613f04a0 SRRH |
4004 | /* Some tracers require overwrite to stay enabled */ |
4005 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | |
4006 | { | |
4007 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | |
4008 | return -1; | |
4009 | ||
4010 | return 0; | |
4011 | } | |
4012 | ||
2b6080f2 | 4013 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) |
af4617bd SR |
4014 | { |
4015 | /* do nothing if flag is already set */ | |
983f938a | 4016 | if (!!(tr->trace_flags & mask) == !!enabled) |
613f04a0 SRRH |
4017 | return 0; |
4018 | ||
4019 | /* Give the tracer a chance to approve the change */ | |
2b6080f2 | 4020 | if (tr->current_trace->flag_changed) |
bf6065b5 | 4021 | if (tr->current_trace->flag_changed(tr, mask, !!enabled)) |
613f04a0 | 4022 | return -EINVAL; |
af4617bd SR |
4023 | |
4024 | if (enabled) | |
983f938a | 4025 | tr->trace_flags |= mask; |
af4617bd | 4026 | else |
983f938a | 4027 | tr->trace_flags &= ~mask; |
e870e9a1 LZ |
4028 | |
4029 | if (mask == TRACE_ITER_RECORD_CMD) | |
4030 | trace_event_enable_cmd_record(enabled); | |
750912fa | 4031 | |
c37775d5 SR |
4032 | if (mask == TRACE_ITER_EVENT_FORK) |
4033 | trace_event_follow_fork(tr, enabled); | |
4034 | ||
80902822 | 4035 | if (mask == TRACE_ITER_OVERWRITE) { |
12883efb | 4036 | ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); |
80902822 | 4037 | #ifdef CONFIG_TRACER_MAX_TRACE |
12883efb | 4038 | ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); |
80902822 SRRH |
4039 | #endif |
4040 | } | |
81698831 | 4041 | |
b9f9108c | 4042 | if (mask == TRACE_ITER_PRINTK) { |
81698831 | 4043 | trace_printk_start_stop_comm(enabled); |
b9f9108c SRRH |
4044 | trace_printk_control(enabled); |
4045 | } | |
613f04a0 SRRH |
4046 | |
4047 | return 0; | |
af4617bd SR |
4048 | } |
4049 | ||
2b6080f2 | 4050 | static int trace_set_options(struct trace_array *tr, char *option) |
bc0c38d1 | 4051 | { |
8d18eaaf | 4052 | char *cmp; |
bc0c38d1 | 4053 | int neg = 0; |
613f04a0 | 4054 | int ret = -ENODEV; |
bc0c38d1 | 4055 | int i; |
a4d1e688 | 4056 | size_t orig_len = strlen(option); |
bc0c38d1 | 4057 | |
7bcfaf54 | 4058 | cmp = strstrip(option); |
bc0c38d1 | 4059 | |
8d18eaaf | 4060 | if (strncmp(cmp, "no", 2) == 0) { |
bc0c38d1 SR |
4061 | neg = 1; |
4062 | cmp += 2; | |
4063 | } | |
4064 | ||
69d34da2 SRRH |
4065 | mutex_lock(&trace_types_lock); |
4066 | ||
bc0c38d1 | 4067 | for (i = 0; trace_options[i]; i++) { |
8d18eaaf | 4068 | if (strcmp(cmp, trace_options[i]) == 0) { |
2b6080f2 | 4069 | ret = set_tracer_flag(tr, 1 << i, !neg); |
bc0c38d1 SR |
4070 | break; |
4071 | } | |
4072 | } | |
adf9f195 FW |
4073 | |
4074 | /* If no option could be set, test the specific tracer options */ | |
69d34da2 | 4075 | if (!trace_options[i]) |
8c1a49ae | 4076 | ret = set_tracer_option(tr, cmp, neg); |
69d34da2 SRRH |
4077 | |
4078 | mutex_unlock(&trace_types_lock); | |
bc0c38d1 | 4079 | |
a4d1e688 JW |
4080 | /* |
4081 | * If the first trailing whitespace is replaced with '\0' by strstrip, | |
4082 | * turn it back into a space. | |
4083 | */ | |
4084 | if (orig_len > strlen(option)) | |
4085 | option[strlen(option)] = ' '; | |
4086 | ||
7bcfaf54 SR |
4087 | return ret; |
4088 | } | |
4089 | ||
a4d1e688 JW |
4090 | static void __init apply_trace_boot_options(void) |
4091 | { | |
4092 | char *buf = trace_boot_options_buf; | |
4093 | char *option; | |
4094 | ||
4095 | while (true) { | |
4096 | option = strsep(&buf, ","); | |
4097 | ||
4098 | if (!option) | |
4099 | break; | |
a4d1e688 | 4100 | |
43ed3843 SRRH |
4101 | if (*option) |
4102 | trace_set_options(&global_trace, option); | |
a4d1e688 JW |
4103 | |
4104 | /* Put back the comma to allow this to be called again */ | |
4105 | if (buf) | |
4106 | *(buf - 1) = ','; | |
4107 | } | |
4108 | } | |
4109 | ||
7bcfaf54 SR |
4110 | static ssize_t |
4111 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |
4112 | size_t cnt, loff_t *ppos) | |
4113 | { | |
2b6080f2 SR |
4114 | struct seq_file *m = filp->private_data; |
4115 | struct trace_array *tr = m->private; | |
7bcfaf54 | 4116 | char buf[64]; |
613f04a0 | 4117 | int ret; |
7bcfaf54 SR |
4118 | |
4119 | if (cnt >= sizeof(buf)) | |
4120 | return -EINVAL; | |
4121 | ||
4afe6495 | 4122 | if (copy_from_user(buf, ubuf, cnt)) |
7bcfaf54 SR |
4123 | return -EFAULT; |
4124 | ||
a8dd2176 SR |
4125 | buf[cnt] = 0; |
4126 | ||
2b6080f2 | 4127 | ret = trace_set_options(tr, buf); |
613f04a0 SRRH |
4128 | if (ret < 0) |
4129 | return ret; | |
7bcfaf54 | 4130 | |
cf8517cf | 4131 | *ppos += cnt; |
bc0c38d1 SR |
4132 | |
4133 | return cnt; | |
4134 | } | |
4135 | ||
fdb372ed LZ |
4136 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
4137 | { | |
7b85af63 | 4138 | struct trace_array *tr = inode->i_private; |
f77d09a3 | 4139 | int ret; |
7b85af63 | 4140 | |
fdb372ed LZ |
4141 | if (tracing_disabled) |
4142 | return -ENODEV; | |
2b6080f2 | 4143 | |
7b85af63 SRRH |
4144 | if (trace_array_get(tr) < 0) |
4145 | return -ENODEV; | |
4146 | ||
f77d09a3 AL |
4147 | ret = single_open(file, tracing_trace_options_show, inode->i_private); |
4148 | if (ret < 0) | |
4149 | trace_array_put(tr); | |
4150 | ||
4151 | return ret; | |
fdb372ed LZ |
4152 | } |
4153 | ||
5e2336a0 | 4154 | static const struct file_operations tracing_iter_fops = { |
fdb372ed LZ |
4155 | .open = tracing_trace_options_open, |
4156 | .read = seq_read, | |
4157 | .llseek = seq_lseek, | |
7b85af63 | 4158 | .release = tracing_single_release_tr, |
ee6bce52 | 4159 | .write = tracing_trace_options_write, |
bc0c38d1 SR |
4160 | }; |
4161 | ||
7bd2f24c IM |
4162 | static const char readme_msg[] = |
4163 | "tracing mini-HOWTO:\n\n" | |
22f45649 SRRH |
4164 | "# echo 0 > tracing_on : quick way to disable tracing\n" |
4165 | "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" | |
4166 | " Important files:\n" | |
4167 | " trace\t\t\t- The static contents of the buffer\n" | |
4168 | "\t\t\t To clear the buffer write into this file: echo > trace\n" | |
4169 | " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" | |
4170 | " current_tracer\t- function and latency tracers\n" | |
4171 | " available_tracers\t- list of configured tracers for current_tracer\n" | |
4172 | " buffer_size_kb\t- view and modify size of per cpu buffer\n" | |
4173 | " buffer_total_size_kb - view total size of all cpu buffers\n\n" | |
4174 | " trace_clock\t\t-change the clock used to order events\n" | |
4175 | " local: Per cpu clock but may not be synced across CPUs\n" | |
4176 | " global: Synced across CPUs but slows tracing down.\n" | |
4177 | " counter: Not a clock, but just an increment\n" | |
4178 | " uptime: Jiffy counter from time of boot\n" | |
4179 | " perf: Same clock that perf events use\n" | |
4180 | #ifdef CONFIG_X86_64 | |
4181 | " x86-tsc: TSC cycle counter\n" | |
4182 | #endif | |
4183 | "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" | |
fa32e855 | 4184 | "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" |
22f45649 SRRH |
4185 | " tracing_cpumask\t- Limit which CPUs to trace\n" |
4186 | " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" | |
4187 | "\t\t\t Remove sub-buffer with rmdir\n" | |
4188 | " trace_options\t\t- Set format or modify how tracing happens\n" | |
71485c45 SRRH |
4189 | "\t\t\t Disable an option by adding a suffix 'no' to the\n" |
4190 | "\t\t\t option name\n" | |
939c7a4f | 4191 | " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" |
22f45649 SRRH |
4192 | #ifdef CONFIG_DYNAMIC_FTRACE |
4193 | "\n available_filter_functions - list of functions that can be filtered on\n" | |
71485c45 SRRH |
4194 | " set_ftrace_filter\t- echo function name in here to only trace these\n" |
4195 | "\t\t\t functions\n" | |
60f1d5e3 | 4196 | "\t accepts: func_full_name or glob-matching-pattern\n" |
71485c45 SRRH |
4197 | "\t modules: Can select a group via module\n" |
4198 | "\t Format: :mod:<module-name>\n" | |
4199 | "\t example: echo :mod:ext3 > set_ftrace_filter\n" | |
4200 | "\t triggers: a command to perform when function is hit\n" | |
4201 | "\t Format: <function>:<trigger>[:count]\n" | |
4202 | "\t trigger: traceon, traceoff\n" | |
4203 | "\t\t enable_event:<system>:<event>\n" | |
4204 | "\t\t disable_event:<system>:<event>\n" | |
22f45649 | 4205 | #ifdef CONFIG_STACKTRACE |
71485c45 | 4206 | "\t\t stacktrace\n" |
22f45649 SRRH |
4207 | #endif |
4208 | #ifdef CONFIG_TRACER_SNAPSHOT | |
71485c45 | 4209 | "\t\t snapshot\n" |
22f45649 | 4210 | #endif |
17a280ea SRRH |
4211 | "\t\t dump\n" |
4212 | "\t\t cpudump\n" | |
71485c45 SRRH |
4213 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" |
4214 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" | |
4215 | "\t The first one will disable tracing every time do_fault is hit\n" | |
4216 | "\t The second will disable tracing at most 3 times when do_trap is hit\n" | |
4217 | "\t The first time do trap is hit and it disables tracing, the\n" | |
4218 | "\t counter will decrement to 2. If tracing is already disabled,\n" | |
4219 | "\t the counter will not decrement. It only decrements when the\n" | |
4220 | "\t trigger did work\n" | |
4221 | "\t To remove trigger without count:\n" | |
4222 | "\t echo '!<function>:<trigger> > set_ftrace_filter\n" | |
4223 | "\t To remove trigger with a count:\n" | |
4224 | "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" | |
22f45649 | 4225 | " set_ftrace_notrace\t- echo function name in here to never trace.\n" |
71485c45 SRRH |
4226 | "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" |
4227 | "\t modules: Can select a group via module command :mod:\n" | |
4228 | "\t Does not accept triggers\n" | |
22f45649 SRRH |
4229 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
4230 | #ifdef CONFIG_FUNCTION_TRACER | |
71485c45 SRRH |
4231 | " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" |
4232 | "\t\t (function)\n" | |
22f45649 SRRH |
4233 | #endif |
4234 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
4235 | " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" | |
d048a8c7 | 4236 | " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" |
22f45649 SRRH |
4237 | " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" |
4238 | #endif | |
4239 | #ifdef CONFIG_TRACER_SNAPSHOT | |
71485c45 SRRH |
4240 | "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" |
4241 | "\t\t\t snapshot buffer. Read the contents for more\n" | |
4242 | "\t\t\t information\n" | |
22f45649 | 4243 | #endif |
991821c8 | 4244 | #ifdef CONFIG_STACK_TRACER |
22f45649 SRRH |
4245 | " stack_trace\t\t- Shows the max stack trace when active\n" |
4246 | " stack_max_size\t- Shows current max stack size that was traced\n" | |
71485c45 SRRH |
4247 | "\t\t\t Write into this file to reset the max size (trigger a\n" |
4248 | "\t\t\t new trace)\n" | |
22f45649 | 4249 | #ifdef CONFIG_DYNAMIC_FTRACE |
71485c45 SRRH |
4250 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" |
4251 | "\t\t\t traces\n" | |
22f45649 | 4252 | #endif |
991821c8 | 4253 | #endif /* CONFIG_STACK_TRACER */ |
86425625 MH |
4254 | #ifdef CONFIG_KPROBE_EVENT |
4255 | " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" | |
4256 | "\t\t\t Write into this file to define/undefine new trace events.\n" | |
4257 | #endif | |
4258 | #ifdef CONFIG_UPROBE_EVENT | |
4259 | " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" | |
4260 | "\t\t\t Write into this file to define/undefine new trace events.\n" | |
4261 | #endif | |
4262 | #if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT) | |
4263 | "\t accepts: event-definitions (one definition per line)\n" | |
4264 | "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" | |
4265 | "\t -:[<group>/]<event>\n" | |
4266 | #ifdef CONFIG_KPROBE_EVENT | |
4267 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" | |
4268 | #endif | |
4269 | #ifdef CONFIG_UPROBE_EVENT | |
4270 | "\t place: <path>:<offset>\n" | |
4271 | #endif | |
4272 | "\t args: <name>=fetcharg[:type]\n" | |
4273 | "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" | |
4274 | "\t $stack<index>, $stack, $retval, $comm\n" | |
4275 | "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n" | |
4276 | "\t b<bit-width>@<bit-offset>/<container-size>\n" | |
4277 | #endif | |
26f25564 TZ |
4278 | " events/\t\t- Directory containing all trace event subsystems:\n" |
4279 | " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" | |
4280 | " events/<system>/\t- Directory containing all trace events for <system>:\n" | |
71485c45 SRRH |
4281 | " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" |
4282 | "\t\t\t events\n" | |
26f25564 | 4283 | " filter\t\t- If set, only events passing filter are traced\n" |
71485c45 SRRH |
4284 | " events/<system>/<event>/\t- Directory containing control files for\n" |
4285 | "\t\t\t <event>:\n" | |
26f25564 TZ |
4286 | " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" |
4287 | " filter\t\t- If set, only events passing filter are traced\n" | |
4288 | " trigger\t\t- If set, a command to perform when event is hit\n" | |
71485c45 SRRH |
4289 | "\t Format: <trigger>[:count][if <filter>]\n" |
4290 | "\t trigger: traceon, traceoff\n" | |
4291 | "\t enable_event:<system>:<event>\n" | |
4292 | "\t disable_event:<system>:<event>\n" | |
d0bad49b TZ |
4293 | #ifdef CONFIG_HIST_TRIGGERS |
4294 | "\t enable_hist:<system>:<event>\n" | |
4295 | "\t disable_hist:<system>:<event>\n" | |
4296 | #endif | |
26f25564 | 4297 | #ifdef CONFIG_STACKTRACE |
71485c45 | 4298 | "\t\t stacktrace\n" |
26f25564 TZ |
4299 | #endif |
4300 | #ifdef CONFIG_TRACER_SNAPSHOT | |
71485c45 | 4301 | "\t\t snapshot\n" |
7ef224d1 TZ |
4302 | #endif |
4303 | #ifdef CONFIG_HIST_TRIGGERS | |
4304 | "\t\t hist (see below)\n" | |
26f25564 | 4305 | #endif |
71485c45 SRRH |
4306 | "\t example: echo traceoff > events/block/block_unplug/trigger\n" |
4307 | "\t echo traceoff:3 > events/block/block_unplug/trigger\n" | |
4308 | "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" | |
4309 | "\t events/block/block_unplug/trigger\n" | |
4310 | "\t The first disables tracing every time block_unplug is hit.\n" | |
4311 | "\t The second disables tracing the first 3 times block_unplug is hit.\n" | |
4312 | "\t The third enables the kmalloc event the first 3 times block_unplug\n" | |
4313 | "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" | |
4314 | "\t Like function triggers, the counter is only decremented if it\n" | |
4315 | "\t enabled or disabled tracing.\n" | |
4316 | "\t To remove a trigger without a count:\n" | |
4317 | "\t echo '!<trigger> > <system>/<event>/trigger\n" | |
4318 | "\t To remove a trigger with a count:\n" | |
4319 | "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" | |
4320 | "\t Filters can be ignored when removing a trigger.\n" | |
7ef224d1 TZ |
4321 | #ifdef CONFIG_HIST_TRIGGERS |
4322 | " hist trigger\t- If set, event hits are aggregated into a hash table\n" | |
76a3b0c8 | 4323 | "\t Format: hist:keys=<field1[,field2,...]>\n" |
f2606835 | 4324 | "\t [:values=<field1[,field2,...]>]\n" |
e62347d2 | 4325 | "\t [:sort=<field1[,field2,...]>]\n" |
7ef224d1 | 4326 | "\t [:size=#entries]\n" |
e86ae9ba | 4327 | "\t [:pause][:continue][:clear]\n" |
5463bfda | 4328 | "\t [:name=histname1]\n" |
7ef224d1 TZ |
4329 | "\t [if <filter>]\n\n" |
4330 | "\t When a matching event is hit, an entry is added to a hash\n" | |
f2606835 TZ |
4331 | "\t table using the key(s) and value(s) named, and the value of a\n" |
4332 | "\t sum called 'hitcount' is incremented. Keys and values\n" | |
4333 | "\t correspond to fields in the event's format description. Keys\n" | |
69a0200c TZ |
4334 | "\t can be any field, or the special string 'stacktrace'.\n" |
4335 | "\t Compound keys consisting of up to two fields can be specified\n" | |
4336 | "\t by the 'keys' keyword. Values must correspond to numeric\n" | |
4337 | "\t fields. Sort keys consisting of up to two fields can be\n" | |
4338 | "\t specified using the 'sort' keyword. The sort direction can\n" | |
4339 | "\t be modified by appending '.descending' or '.ascending' to a\n" | |
4340 | "\t sort field. The 'size' parameter can be used to specify more\n" | |
5463bfda TZ |
4341 | "\t or fewer than the default 2048 entries for the hashtable size.\n" |
4342 | "\t If a hist trigger is given a name using the 'name' parameter,\n" | |
4343 | "\t its histogram data will be shared with other triggers of the\n" | |
4344 | "\t same name, and trigger hits will update this common data.\n\n" | |
7ef224d1 | 4345 | "\t Reading the 'hist' file for the event will dump the hash\n" |
52a7f16d TZ |
4346 | "\t table in its entirety to stdout. If there are multiple hist\n" |
4347 | "\t triggers attached to an event, there will be a table for each\n" | |
5463bfda TZ |
4348 | "\t trigger in the output. The table displayed for a named\n" |
4349 | "\t trigger will be the same as any other instance having the\n" | |
4350 | "\t same name. The default format used to display a given field\n" | |
4351 | "\t can be modified by appending any of the following modifiers\n" | |
4352 | "\t to the field name, as applicable:\n\n" | |
c6afad49 TZ |
4353 | "\t .hex display a number as a hex value\n" |
4354 | "\t .sym display an address as a symbol\n" | |
6b4827ad | 4355 | "\t .sym-offset display an address as a symbol and offset\n" |
31696198 TZ |
4356 | "\t .execname display a common_pid as a program name\n" |
4357 | "\t .syscall display a syscall id as a syscall name\n\n" | |
4b94f5b7 | 4358 | "\t .log2 display log2 value rather than raw number\n\n" |
83e99914 TZ |
4359 | "\t The 'pause' parameter can be used to pause an existing hist\n" |
4360 | "\t trigger or to start a hist trigger but not log any events\n" | |
4361 | "\t until told to do so. 'continue' can be used to start or\n" | |
4362 | "\t restart a paused hist trigger.\n\n" | |
e86ae9ba TZ |
4363 | "\t The 'clear' parameter will clear the contents of a running\n" |
4364 | "\t hist trigger and leave its current paused/active state\n" | |
4365 | "\t unchanged.\n\n" | |
d0bad49b TZ |
4366 | "\t The enable_hist and disable_hist triggers can be used to\n" |
4367 | "\t have one event conditionally start and stop another event's\n" | |
4368 | "\t already-attached hist trigger. The syntax is analagous to\n" | |
4369 | "\t the enable_event and disable_event triggers.\n" | |
7ef224d1 | 4370 | #endif |
7bd2f24c IM |
4371 | ; |
4372 | ||
4373 | static ssize_t | |
4374 | tracing_readme_read(struct file *filp, char __user *ubuf, | |
4375 | size_t cnt, loff_t *ppos) | |
4376 | { | |
4377 | return simple_read_from_buffer(ubuf, cnt, ppos, | |
4378 | readme_msg, strlen(readme_msg)); | |
4379 | } | |
4380 | ||
5e2336a0 | 4381 | static const struct file_operations tracing_readme_fops = { |
c7078de1 IM |
4382 | .open = tracing_open_generic, |
4383 | .read = tracing_readme_read, | |
b444786f | 4384 | .llseek = generic_file_llseek, |
7bd2f24c IM |
4385 | }; |
4386 | ||
42584c81 YY |
4387 | static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) |
4388 | { | |
4389 | unsigned int *ptr = v; | |
69abe6a5 | 4390 | |
42584c81 YY |
4391 | if (*pos || m->count) |
4392 | ptr++; | |
69abe6a5 | 4393 | |
42584c81 | 4394 | (*pos)++; |
69abe6a5 | 4395 | |
939c7a4f YY |
4396 | for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; |
4397 | ptr++) { | |
42584c81 YY |
4398 | if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) |
4399 | continue; | |
69abe6a5 | 4400 | |
42584c81 YY |
4401 | return ptr; |
4402 | } | |
69abe6a5 | 4403 | |
42584c81 YY |
4404 | return NULL; |
4405 | } | |
4406 | ||
4407 | static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) | |
4408 | { | |
4409 | void *v; | |
4410 | loff_t l = 0; | |
69abe6a5 | 4411 | |
4c27e756 SRRH |
4412 | preempt_disable(); |
4413 | arch_spin_lock(&trace_cmdline_lock); | |
4414 | ||
939c7a4f | 4415 | v = &savedcmd->map_cmdline_to_pid[0]; |
42584c81 YY |
4416 | while (l <= *pos) { |
4417 | v = saved_cmdlines_next(m, v, &l); | |
4418 | if (!v) | |
4419 | return NULL; | |
69abe6a5 AP |
4420 | } |
4421 | ||
42584c81 YY |
4422 | return v; |
4423 | } | |
4424 | ||
4425 | static void saved_cmdlines_stop(struct seq_file *m, void *v) | |
4426 | { | |
4c27e756 SRRH |
4427 | arch_spin_unlock(&trace_cmdline_lock); |
4428 | preempt_enable(); | |
42584c81 | 4429 | } |
69abe6a5 | 4430 | |
42584c81 YY |
4431 | static int saved_cmdlines_show(struct seq_file *m, void *v) |
4432 | { | |
4433 | char buf[TASK_COMM_LEN]; | |
4434 | unsigned int *pid = v; | |
69abe6a5 | 4435 | |
4c27e756 | 4436 | __trace_find_cmdline(*pid, buf); |
42584c81 YY |
4437 | seq_printf(m, "%d %s\n", *pid, buf); |
4438 | return 0; | |
4439 | } | |
4440 | ||
4441 | static const struct seq_operations tracing_saved_cmdlines_seq_ops = { | |
4442 | .start = saved_cmdlines_start, | |
4443 | .next = saved_cmdlines_next, | |
4444 | .stop = saved_cmdlines_stop, | |
4445 | .show = saved_cmdlines_show, | |
4446 | }; | |
4447 | ||
4448 | static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) | |
4449 | { | |
4450 | if (tracing_disabled) | |
4451 | return -ENODEV; | |
4452 | ||
4453 | return seq_open(filp, &tracing_saved_cmdlines_seq_ops); | |
69abe6a5 AP |
4454 | } |
4455 | ||
4456 | static const struct file_operations tracing_saved_cmdlines_fops = { | |
42584c81 YY |
4457 | .open = tracing_saved_cmdlines_open, |
4458 | .read = seq_read, | |
4459 | .llseek = seq_lseek, | |
4460 | .release = seq_release, | |
69abe6a5 AP |
4461 | }; |
4462 | ||
939c7a4f YY |
4463 | static ssize_t |
4464 | tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, | |
4465 | size_t cnt, loff_t *ppos) | |
4466 | { | |
4467 | char buf[64]; | |
4468 | int r; | |
4469 | ||
4470 | arch_spin_lock(&trace_cmdline_lock); | |
a6af8fbf | 4471 | r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); |
939c7a4f YY |
4472 | arch_spin_unlock(&trace_cmdline_lock); |
4473 | ||
4474 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
4475 | } | |
4476 | ||
4477 | static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) | |
4478 | { | |
4479 | kfree(s->saved_cmdlines); | |
4480 | kfree(s->map_cmdline_to_pid); | |
4481 | kfree(s); | |
4482 | } | |
4483 | ||
4484 | static int tracing_resize_saved_cmdlines(unsigned int val) | |
4485 | { | |
4486 | struct saved_cmdlines_buffer *s, *savedcmd_temp; | |
4487 | ||
a6af8fbf | 4488 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
939c7a4f YY |
4489 | if (!s) |
4490 | return -ENOMEM; | |
4491 | ||
4492 | if (allocate_cmdlines_buffer(val, s) < 0) { | |
4493 | kfree(s); | |
4494 | return -ENOMEM; | |
4495 | } | |
4496 | ||
4497 | arch_spin_lock(&trace_cmdline_lock); | |
4498 | savedcmd_temp = savedcmd; | |
4499 | savedcmd = s; | |
4500 | arch_spin_unlock(&trace_cmdline_lock); | |
4501 | free_saved_cmdlines_buffer(savedcmd_temp); | |
4502 | ||
4503 | return 0; | |
4504 | } | |
4505 | ||
4506 | static ssize_t | |
4507 | tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, | |
4508 | size_t cnt, loff_t *ppos) | |
4509 | { | |
4510 | unsigned long val; | |
4511 | int ret; | |
4512 | ||
4513 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
4514 | if (ret) | |
4515 | return ret; | |
4516 | ||
4517 | /* must have at least 1 entry or less than PID_MAX_DEFAULT */ | |
4518 | if (!val || val > PID_MAX_DEFAULT) | |
4519 | return -EINVAL; | |
4520 | ||
4521 | ret = tracing_resize_saved_cmdlines((unsigned int)val); | |
4522 | if (ret < 0) | |
4523 | return ret; | |
4524 | ||
4525 | *ppos += cnt; | |
4526 | ||
4527 | return cnt; | |
4528 | } | |
4529 | ||
4530 | static const struct file_operations tracing_saved_cmdlines_size_fops = { | |
4531 | .open = tracing_open_generic, | |
4532 | .read = tracing_saved_cmdlines_size_read, | |
4533 | .write = tracing_saved_cmdlines_size_write, | |
4534 | }; | |
4535 | ||
9828413d SRRH |
4536 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE |
4537 | static union trace_enum_map_item * | |
4538 | update_enum_map(union trace_enum_map_item *ptr) | |
4539 | { | |
4540 | if (!ptr->map.enum_string) { | |
4541 | if (ptr->tail.next) { | |
4542 | ptr = ptr->tail.next; | |
4543 | /* Set ptr to the next real item (skip head) */ | |
4544 | ptr++; | |
4545 | } else | |
4546 | return NULL; | |
4547 | } | |
4548 | return ptr; | |
4549 | } | |
4550 | ||
4551 | static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos) | |
4552 | { | |
4553 | union trace_enum_map_item *ptr = v; | |
4554 | ||
4555 | /* | |
4556 | * Paranoid! If ptr points to end, we don't want to increment past it. | |
4557 | * This really should never happen. | |
4558 | */ | |
4559 | ptr = update_enum_map(ptr); | |
4560 | if (WARN_ON_ONCE(!ptr)) | |
4561 | return NULL; | |
4562 | ||
4563 | ptr++; | |
4564 | ||
4565 | (*pos)++; | |
4566 | ||
4567 | ptr = update_enum_map(ptr); | |
4568 | ||
4569 | return ptr; | |
4570 | } | |
4571 | ||
4572 | static void *enum_map_start(struct seq_file *m, loff_t *pos) | |
4573 | { | |
4574 | union trace_enum_map_item *v; | |
4575 | loff_t l = 0; | |
4576 | ||
4577 | mutex_lock(&trace_enum_mutex); | |
4578 | ||
4579 | v = trace_enum_maps; | |
4580 | if (v) | |
4581 | v++; | |
4582 | ||
4583 | while (v && l < *pos) { | |
4584 | v = enum_map_next(m, v, &l); | |
4585 | } | |
4586 | ||
4587 | return v; | |
4588 | } | |
4589 | ||
4590 | static void enum_map_stop(struct seq_file *m, void *v) | |
4591 | { | |
4592 | mutex_unlock(&trace_enum_mutex); | |
4593 | } | |
4594 | ||
4595 | static int enum_map_show(struct seq_file *m, void *v) | |
4596 | { | |
4597 | union trace_enum_map_item *ptr = v; | |
4598 | ||
4599 | seq_printf(m, "%s %ld (%s)\n", | |
4600 | ptr->map.enum_string, ptr->map.enum_value, | |
4601 | ptr->map.system); | |
4602 | ||
4603 | return 0; | |
4604 | } | |
4605 | ||
4606 | static const struct seq_operations tracing_enum_map_seq_ops = { | |
4607 | .start = enum_map_start, | |
4608 | .next = enum_map_next, | |
4609 | .stop = enum_map_stop, | |
4610 | .show = enum_map_show, | |
4611 | }; | |
4612 | ||
4613 | static int tracing_enum_map_open(struct inode *inode, struct file *filp) | |
4614 | { | |
4615 | if (tracing_disabled) | |
4616 | return -ENODEV; | |
4617 | ||
4618 | return seq_open(filp, &tracing_enum_map_seq_ops); | |
4619 | } | |
4620 | ||
4621 | static const struct file_operations tracing_enum_map_fops = { | |
4622 | .open = tracing_enum_map_open, | |
4623 | .read = seq_read, | |
4624 | .llseek = seq_lseek, | |
4625 | .release = seq_release, | |
4626 | }; | |
4627 | ||
4628 | static inline union trace_enum_map_item * | |
4629 | trace_enum_jmp_to_tail(union trace_enum_map_item *ptr) | |
4630 | { | |
4631 | /* Return tail of array given the head */ | |
4632 | return ptr + ptr->head.length + 1; | |
4633 | } | |
4634 | ||
4635 | static void | |
4636 | trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start, | |
4637 | int len) | |
4638 | { | |
4639 | struct trace_enum_map **stop; | |
4640 | struct trace_enum_map **map; | |
4641 | union trace_enum_map_item *map_array; | |
4642 | union trace_enum_map_item *ptr; | |
4643 | ||
4644 | stop = start + len; | |
4645 | ||
4646 | /* | |
4647 | * The trace_enum_maps contains the map plus a head and tail item, | |
4648 | * where the head holds the module and length of array, and the | |
4649 | * tail holds a pointer to the next list. | |
4650 | */ | |
4651 | map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL); | |
4652 | if (!map_array) { | |
a395d6a7 | 4653 | pr_warn("Unable to allocate trace enum mapping\n"); |
9828413d SRRH |
4654 | return; |
4655 | } | |
4656 | ||
4657 | mutex_lock(&trace_enum_mutex); | |
4658 | ||
4659 | if (!trace_enum_maps) | |
4660 | trace_enum_maps = map_array; | |
4661 | else { | |
4662 | ptr = trace_enum_maps; | |
4663 | for (;;) { | |
4664 | ptr = trace_enum_jmp_to_tail(ptr); | |
4665 | if (!ptr->tail.next) | |
4666 | break; | |
4667 | ptr = ptr->tail.next; | |
4668 | ||
4669 | } | |
4670 | ptr->tail.next = map_array; | |
4671 | } | |
4672 | map_array->head.mod = mod; | |
4673 | map_array->head.length = len; | |
4674 | map_array++; | |
4675 | ||
4676 | for (map = start; (unsigned long)map < (unsigned long)stop; map++) { | |
4677 | map_array->map = **map; | |
4678 | map_array++; | |
4679 | } | |
4680 | memset(map_array, 0, sizeof(*map_array)); | |
4681 | ||
4682 | mutex_unlock(&trace_enum_mutex); | |
4683 | } | |
4684 | ||
4685 | static void trace_create_enum_file(struct dentry *d_tracer) | |
4686 | { | |
4687 | trace_create_file("enum_map", 0444, d_tracer, | |
4688 | NULL, &tracing_enum_map_fops); | |
4689 | } | |
4690 | ||
4691 | #else /* CONFIG_TRACE_ENUM_MAP_FILE */ | |
4692 | static inline void trace_create_enum_file(struct dentry *d_tracer) { } | |
4693 | static inline void trace_insert_enum_map_file(struct module *mod, | |
4694 | struct trace_enum_map **start, int len) { } | |
4695 | #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */ | |
4696 | ||
4697 | static void trace_insert_enum_map(struct module *mod, | |
4698 | struct trace_enum_map **start, int len) | |
0c564a53 SRRH |
4699 | { |
4700 | struct trace_enum_map **map; | |
0c564a53 SRRH |
4701 | |
4702 | if (len <= 0) | |
4703 | return; | |
4704 | ||
4705 | map = start; | |
4706 | ||
4707 | trace_event_enum_update(map, len); | |
9828413d SRRH |
4708 | |
4709 | trace_insert_enum_map_file(mod, start, len); | |
0c564a53 SRRH |
4710 | } |
4711 | ||
bc0c38d1 SR |
4712 | static ssize_t |
4713 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | |
4714 | size_t cnt, loff_t *ppos) | |
4715 | { | |
2b6080f2 | 4716 | struct trace_array *tr = filp->private_data; |
ee6c2c1b | 4717 | char buf[MAX_TRACER_SIZE+2]; |
bc0c38d1 SR |
4718 | int r; |
4719 | ||
4720 | mutex_lock(&trace_types_lock); | |
2b6080f2 | 4721 | r = sprintf(buf, "%s\n", tr->current_trace->name); |
bc0c38d1 SR |
4722 | mutex_unlock(&trace_types_lock); |
4723 | ||
4bf39a94 | 4724 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
4725 | } |
4726 | ||
b6f11df2 ACM |
4727 | int tracer_init(struct tracer *t, struct trace_array *tr) |
4728 | { | |
12883efb | 4729 | tracing_reset_online_cpus(&tr->trace_buffer); |
b6f11df2 ACM |
4730 | return t->init(tr); |
4731 | } | |
4732 | ||
12883efb | 4733 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) |
438ced17 VN |
4734 | { |
4735 | int cpu; | |
737223fb | 4736 | |
438ced17 | 4737 | for_each_tracing_cpu(cpu) |
12883efb | 4738 | per_cpu_ptr(buf->data, cpu)->entries = val; |
438ced17 VN |
4739 | } |
4740 | ||
12883efb | 4741 | #ifdef CONFIG_TRACER_MAX_TRACE |
d60da506 | 4742 | /* resize @tr's buffer to the size of @size_tr's entries */ |
12883efb SRRH |
4743 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
4744 | struct trace_buffer *size_buf, int cpu_id) | |
d60da506 HT |
4745 | { |
4746 | int cpu, ret = 0; | |
4747 | ||
4748 | if (cpu_id == RING_BUFFER_ALL_CPUS) { | |
4749 | for_each_tracing_cpu(cpu) { | |
12883efb SRRH |
4750 | ret = ring_buffer_resize(trace_buf->buffer, |
4751 | per_cpu_ptr(size_buf->data, cpu)->entries, cpu); | |
d60da506 HT |
4752 | if (ret < 0) |
4753 | break; | |
12883efb SRRH |
4754 | per_cpu_ptr(trace_buf->data, cpu)->entries = |
4755 | per_cpu_ptr(size_buf->data, cpu)->entries; | |
d60da506 HT |
4756 | } |
4757 | } else { | |
12883efb SRRH |
4758 | ret = ring_buffer_resize(trace_buf->buffer, |
4759 | per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); | |
d60da506 | 4760 | if (ret == 0) |
12883efb SRRH |
4761 | per_cpu_ptr(trace_buf->data, cpu_id)->entries = |
4762 | per_cpu_ptr(size_buf->data, cpu_id)->entries; | |
d60da506 HT |
4763 | } |
4764 | ||
4765 | return ret; | |
4766 | } | |
12883efb | 4767 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
d60da506 | 4768 | |
2b6080f2 SR |
4769 | static int __tracing_resize_ring_buffer(struct trace_array *tr, |
4770 | unsigned long size, int cpu) | |
73c5162a SR |
4771 | { |
4772 | int ret; | |
4773 | ||
4774 | /* | |
4775 | * If kernel or user changes the size of the ring buffer | |
a123c52b SR |
4776 | * we use the size that was given, and we can forget about |
4777 | * expanding it later. | |
73c5162a | 4778 | */ |
55034cd6 | 4779 | ring_buffer_expanded = true; |
73c5162a | 4780 | |
b382ede6 | 4781 | /* May be called before buffers are initialized */ |
12883efb | 4782 | if (!tr->trace_buffer.buffer) |
b382ede6 SR |
4783 | return 0; |
4784 | ||
12883efb | 4785 | ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); |
73c5162a SR |
4786 | if (ret < 0) |
4787 | return ret; | |
4788 | ||
12883efb | 4789 | #ifdef CONFIG_TRACER_MAX_TRACE |
2b6080f2 SR |
4790 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || |
4791 | !tr->current_trace->use_max_tr) | |
ef710e10 KM |
4792 | goto out; |
4793 | ||
12883efb | 4794 | ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); |
73c5162a | 4795 | if (ret < 0) { |
12883efb SRRH |
4796 | int r = resize_buffer_duplicate_size(&tr->trace_buffer, |
4797 | &tr->trace_buffer, cpu); | |
73c5162a | 4798 | if (r < 0) { |
a123c52b SR |
4799 | /* |
4800 | * AARGH! We are left with different | |
4801 | * size max buffer!!!! | |
4802 | * The max buffer is our "snapshot" buffer. | |
4803 | * When a tracer needs a snapshot (one of the | |
4804 | * latency tracers), it swaps the max buffer | |
4805 | * with the saved snap shot. We succeeded to | |
4806 | * update the size of the main buffer, but failed to | |
4807 | * update the size of the max buffer. But when we tried | |
4808 | * to reset the main buffer to the original size, we | |
4809 | * failed there too. This is very unlikely to | |
4810 | * happen, but if it does, warn and kill all | |
4811 | * tracing. | |
4812 | */ | |
73c5162a SR |
4813 | WARN_ON(1); |
4814 | tracing_disabled = 1; | |
4815 | } | |
4816 | return ret; | |
4817 | } | |
4818 | ||
438ced17 | 4819 | if (cpu == RING_BUFFER_ALL_CPUS) |
12883efb | 4820 | set_buffer_entries(&tr->max_buffer, size); |
438ced17 | 4821 | else |
12883efb | 4822 | per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; |
438ced17 | 4823 | |
ef710e10 | 4824 | out: |
12883efb SRRH |
4825 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
4826 | ||
438ced17 | 4827 | if (cpu == RING_BUFFER_ALL_CPUS) |
12883efb | 4828 | set_buffer_entries(&tr->trace_buffer, size); |
438ced17 | 4829 | else |
12883efb | 4830 | per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; |
73c5162a SR |
4831 | |
4832 | return ret; | |
4833 | } | |
4834 | ||
2b6080f2 SR |
4835 | static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
4836 | unsigned long size, int cpu_id) | |
4f271a2a | 4837 | { |
83f40318 | 4838 | int ret = size; |
4f271a2a VN |
4839 | |
4840 | mutex_lock(&trace_types_lock); | |
4841 | ||
438ced17 VN |
4842 | if (cpu_id != RING_BUFFER_ALL_CPUS) { |
4843 | /* make sure, this cpu is enabled in the mask */ | |
4844 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { | |
4845 | ret = -EINVAL; | |
4846 | goto out; | |
4847 | } | |
4848 | } | |
4f271a2a | 4849 | |
2b6080f2 | 4850 | ret = __tracing_resize_ring_buffer(tr, size, cpu_id); |
4f271a2a VN |
4851 | if (ret < 0) |
4852 | ret = -ENOMEM; | |
4853 | ||
438ced17 | 4854 | out: |
4f271a2a VN |
4855 | mutex_unlock(&trace_types_lock); |
4856 | ||
4857 | return ret; | |
4858 | } | |
4859 | ||
ef710e10 | 4860 | |
1852fcce SR |
4861 | /** |
4862 | * tracing_update_buffers - used by tracing facility to expand ring buffers | |
4863 | * | |
4864 | * To save on memory when the tracing is never used on a system with it | |
4865 | * configured in. The ring buffers are set to a minimum size. But once | |
4866 | * a user starts to use the tracing facility, then they need to grow | |
4867 | * to their default size. | |
4868 | * | |
4869 | * This function is to be called when a tracer is about to be used. | |
4870 | */ | |
4871 | int tracing_update_buffers(void) | |
4872 | { | |
4873 | int ret = 0; | |
4874 | ||
1027fcb2 | 4875 | mutex_lock(&trace_types_lock); |
1852fcce | 4876 | if (!ring_buffer_expanded) |
2b6080f2 | 4877 | ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, |
438ced17 | 4878 | RING_BUFFER_ALL_CPUS); |
1027fcb2 | 4879 | mutex_unlock(&trace_types_lock); |
1852fcce SR |
4880 | |
4881 | return ret; | |
4882 | } | |
4883 | ||
577b785f SR |
4884 | struct trace_option_dentry; |
4885 | ||
37aea98b | 4886 | static void |
2b6080f2 | 4887 | create_trace_option_files(struct trace_array *tr, struct tracer *tracer); |
577b785f | 4888 | |
6b450d25 SRRH |
4889 | /* |
4890 | * Used to clear out the tracer before deletion of an instance. | |
4891 | * Must have trace_types_lock held. | |
4892 | */ | |
4893 | static void tracing_set_nop(struct trace_array *tr) | |
4894 | { | |
4895 | if (tr->current_trace == &nop_trace) | |
4896 | return; | |
4897 | ||
50512ab5 | 4898 | tr->current_trace->enabled--; |
6b450d25 SRRH |
4899 | |
4900 | if (tr->current_trace->reset) | |
4901 | tr->current_trace->reset(tr); | |
4902 | ||
4903 | tr->current_trace = &nop_trace; | |
4904 | } | |
4905 | ||
41d9c0be | 4906 | static void add_tracer_options(struct trace_array *tr, struct tracer *t) |
bc0c38d1 | 4907 | { |
09d23a1d SRRH |
4908 | /* Only enable if the directory has been created already. */ |
4909 | if (!tr->dir) | |
4910 | return; | |
4911 | ||
37aea98b | 4912 | create_trace_option_files(tr, t); |
09d23a1d SRRH |
4913 | } |
4914 | ||
4915 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |
4916 | { | |
bc0c38d1 | 4917 | struct tracer *t; |
12883efb | 4918 | #ifdef CONFIG_TRACER_MAX_TRACE |
34600f0e | 4919 | bool had_max_tr; |
12883efb | 4920 | #endif |
d9e54076 | 4921 | int ret = 0; |
bc0c38d1 | 4922 | |
1027fcb2 SR |
4923 | mutex_lock(&trace_types_lock); |
4924 | ||
73c5162a | 4925 | if (!ring_buffer_expanded) { |
2b6080f2 | 4926 | ret = __tracing_resize_ring_buffer(tr, trace_buf_size, |
438ced17 | 4927 | RING_BUFFER_ALL_CPUS); |
73c5162a | 4928 | if (ret < 0) |
59f586db | 4929 | goto out; |
73c5162a SR |
4930 | ret = 0; |
4931 | } | |
4932 | ||
bc0c38d1 SR |
4933 | for (t = trace_types; t; t = t->next) { |
4934 | if (strcmp(t->name, buf) == 0) | |
4935 | break; | |
4936 | } | |
c2931e05 FW |
4937 | if (!t) { |
4938 | ret = -EINVAL; | |
4939 | goto out; | |
4940 | } | |
2b6080f2 | 4941 | if (t == tr->current_trace) |
bc0c38d1 SR |
4942 | goto out; |
4943 | ||
607e2ea1 SRRH |
4944 | /* Some tracers are only allowed for the top level buffer */ |
4945 | if (!trace_ok_for_array(t, tr)) { | |
4946 | ret = -EINVAL; | |
4947 | goto out; | |
4948 | } | |
4949 | ||
cf6ab6d9 SRRH |
4950 | /* If trace pipe files are being read, we can't change the tracer */ |
4951 | if (tr->current_trace->ref) { | |
4952 | ret = -EBUSY; | |
4953 | goto out; | |
4954 | } | |
4955 | ||
9f029e83 | 4956 | trace_branch_disable(); |
613f04a0 | 4957 | |
50512ab5 | 4958 | tr->current_trace->enabled--; |
613f04a0 | 4959 | |
2b6080f2 SR |
4960 | if (tr->current_trace->reset) |
4961 | tr->current_trace->reset(tr); | |
34600f0e | 4962 | |
12883efb | 4963 | /* Current trace needs to be nop_trace before synchronize_sched */ |
2b6080f2 | 4964 | tr->current_trace = &nop_trace; |
34600f0e | 4965 | |
45ad21ca SRRH |
4966 | #ifdef CONFIG_TRACER_MAX_TRACE |
4967 | had_max_tr = tr->allocated_snapshot; | |
34600f0e SR |
4968 | |
4969 | if (had_max_tr && !t->use_max_tr) { | |
4970 | /* | |
4971 | * We need to make sure that the update_max_tr sees that | |
4972 | * current_trace changed to nop_trace to keep it from | |
4973 | * swapping the buffers after we resize it. | |
4974 | * The update_max_tr is called from interrupts disabled | |
4975 | * so a synchronized_sched() is sufficient. | |
4976 | */ | |
4977 | synchronize_sched(); | |
3209cff4 | 4978 | free_snapshot(tr); |
ef710e10 | 4979 | } |
12883efb | 4980 | #endif |
12883efb SRRH |
4981 | |
4982 | #ifdef CONFIG_TRACER_MAX_TRACE | |
34600f0e | 4983 | if (t->use_max_tr && !had_max_tr) { |
3209cff4 | 4984 | ret = alloc_snapshot(tr); |
d60da506 HT |
4985 | if (ret < 0) |
4986 | goto out; | |
ef710e10 | 4987 | } |
12883efb | 4988 | #endif |
577b785f | 4989 | |
1c80025a | 4990 | if (t->init) { |
b6f11df2 | 4991 | ret = tracer_init(t, tr); |
1c80025a FW |
4992 | if (ret) |
4993 | goto out; | |
4994 | } | |
bc0c38d1 | 4995 | |
2b6080f2 | 4996 | tr->current_trace = t; |
50512ab5 | 4997 | tr->current_trace->enabled++; |
9f029e83 | 4998 | trace_branch_enable(tr); |
bc0c38d1 SR |
4999 | out: |
5000 | mutex_unlock(&trace_types_lock); | |
5001 | ||
d9e54076 PZ |
5002 | return ret; |
5003 | } | |
5004 | ||
5005 | static ssize_t | |
5006 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |
5007 | size_t cnt, loff_t *ppos) | |
5008 | { | |
607e2ea1 | 5009 | struct trace_array *tr = filp->private_data; |
ee6c2c1b | 5010 | char buf[MAX_TRACER_SIZE+1]; |
d9e54076 PZ |
5011 | int i; |
5012 | size_t ret; | |
e6e7a65a FW |
5013 | int err; |
5014 | ||
5015 | ret = cnt; | |
d9e54076 | 5016 | |
ee6c2c1b LZ |
5017 | if (cnt > MAX_TRACER_SIZE) |
5018 | cnt = MAX_TRACER_SIZE; | |
d9e54076 | 5019 | |
4afe6495 | 5020 | if (copy_from_user(buf, ubuf, cnt)) |
d9e54076 PZ |
5021 | return -EFAULT; |
5022 | ||
5023 | buf[cnt] = 0; | |
5024 | ||
5025 | /* strip ending whitespace. */ | |
5026 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | |
5027 | buf[i] = 0; | |
5028 | ||
607e2ea1 | 5029 | err = tracing_set_tracer(tr, buf); |
e6e7a65a FW |
5030 | if (err) |
5031 | return err; | |
d9e54076 | 5032 | |
cf8517cf | 5033 | *ppos += ret; |
bc0c38d1 | 5034 | |
c2931e05 | 5035 | return ret; |
bc0c38d1 SR |
5036 | } |
5037 | ||
5038 | static ssize_t | |
6508fa76 SF |
5039 | tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, |
5040 | size_t cnt, loff_t *ppos) | |
bc0c38d1 | 5041 | { |
bc0c38d1 SR |
5042 | char buf[64]; |
5043 | int r; | |
5044 | ||
cffae437 | 5045 | r = snprintf(buf, sizeof(buf), "%ld\n", |
bc0c38d1 | 5046 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); |
cffae437 SR |
5047 | if (r > sizeof(buf)) |
5048 | r = sizeof(buf); | |
4bf39a94 | 5049 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
5050 | } |
5051 | ||
5052 | static ssize_t | |
6508fa76 SF |
5053 | tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, |
5054 | size_t cnt, loff_t *ppos) | |
bc0c38d1 | 5055 | { |
5e39841c | 5056 | unsigned long val; |
c6caeeb1 | 5057 | int ret; |
bc0c38d1 | 5058 | |
22fe9b54 PH |
5059 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
5060 | if (ret) | |
c6caeeb1 | 5061 | return ret; |
bc0c38d1 SR |
5062 | |
5063 | *ptr = val * 1000; | |
5064 | ||
5065 | return cnt; | |
5066 | } | |
5067 | ||
6508fa76 SF |
5068 | static ssize_t |
5069 | tracing_thresh_read(struct file *filp, char __user *ubuf, | |
5070 | size_t cnt, loff_t *ppos) | |
5071 | { | |
5072 | return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); | |
5073 | } | |
5074 | ||
5075 | static ssize_t | |
5076 | tracing_thresh_write(struct file *filp, const char __user *ubuf, | |
5077 | size_t cnt, loff_t *ppos) | |
5078 | { | |
5079 | struct trace_array *tr = filp->private_data; | |
5080 | int ret; | |
5081 | ||
5082 | mutex_lock(&trace_types_lock); | |
5083 | ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); | |
5084 | if (ret < 0) | |
5085 | goto out; | |
5086 | ||
5087 | if (tr->current_trace->update_thresh) { | |
5088 | ret = tr->current_trace->update_thresh(tr); | |
5089 | if (ret < 0) | |
5090 | goto out; | |
5091 | } | |
5092 | ||
5093 | ret = cnt; | |
5094 | out: | |
5095 | mutex_unlock(&trace_types_lock); | |
5096 | ||
5097 | return ret; | |
5098 | } | |
5099 | ||
f971cc9a | 5100 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
e428abbb | 5101 | |
6508fa76 SF |
5102 | static ssize_t |
5103 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | |
5104 | size_t cnt, loff_t *ppos) | |
5105 | { | |
5106 | return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); | |
5107 | } | |
5108 | ||
5109 | static ssize_t | |
5110 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |
5111 | size_t cnt, loff_t *ppos) | |
5112 | { | |
5113 | return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); | |
5114 | } | |
5115 | ||
e428abbb CG |
5116 | #endif |
5117 | ||
b3806b43 SR |
5118 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
5119 | { | |
15544209 | 5120 | struct trace_array *tr = inode->i_private; |
b3806b43 | 5121 | struct trace_iterator *iter; |
b04cc6b1 | 5122 | int ret = 0; |
b3806b43 SR |
5123 | |
5124 | if (tracing_disabled) | |
5125 | return -ENODEV; | |
5126 | ||
7b85af63 SRRH |
5127 | if (trace_array_get(tr) < 0) |
5128 | return -ENODEV; | |
5129 | ||
b04cc6b1 FW |
5130 | mutex_lock(&trace_types_lock); |
5131 | ||
b3806b43 SR |
5132 | /* create a buffer to store the information to pass to userspace */ |
5133 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
b04cc6b1 FW |
5134 | if (!iter) { |
5135 | ret = -ENOMEM; | |
f77d09a3 | 5136 | __trace_array_put(tr); |
b04cc6b1 FW |
5137 | goto out; |
5138 | } | |
b3806b43 | 5139 | |
3a161d99 | 5140 | trace_seq_init(&iter->seq); |
d716ff71 | 5141 | iter->trace = tr->current_trace; |
d7350c3f | 5142 | |
4462344e | 5143 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
b04cc6b1 | 5144 | ret = -ENOMEM; |
d7350c3f | 5145 | goto fail; |
4462344e RR |
5146 | } |
5147 | ||
a309720c | 5148 | /* trace pipe does not show start of buffer */ |
4462344e | 5149 | cpumask_setall(iter->started); |
a309720c | 5150 | |
983f938a | 5151 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
112f38a7 SR |
5152 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
5153 | ||
8be0709f | 5154 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
58e8eedf | 5155 | if (trace_clocks[tr->clock_id].in_ns) |
8be0709f DS |
5156 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
5157 | ||
15544209 ON |
5158 | iter->tr = tr; |
5159 | iter->trace_buffer = &tr->trace_buffer; | |
5160 | iter->cpu_file = tracing_get_cpu(inode); | |
d7350c3f | 5161 | mutex_init(&iter->mutex); |
b3806b43 SR |
5162 | filp->private_data = iter; |
5163 | ||
107bad8b SR |
5164 | if (iter->trace->pipe_open) |
5165 | iter->trace->pipe_open(iter); | |
107bad8b | 5166 | |
b444786f | 5167 | nonseekable_open(inode, filp); |
cf6ab6d9 SRRH |
5168 | |
5169 | tr->current_trace->ref++; | |
b04cc6b1 FW |
5170 | out: |
5171 | mutex_unlock(&trace_types_lock); | |
5172 | return ret; | |
d7350c3f FW |
5173 | |
5174 | fail: | |
5175 | kfree(iter->trace); | |
5176 | kfree(iter); | |
7b85af63 | 5177 | __trace_array_put(tr); |
d7350c3f FW |
5178 | mutex_unlock(&trace_types_lock); |
5179 | return ret; | |
b3806b43 SR |
5180 | } |
5181 | ||
5182 | static int tracing_release_pipe(struct inode *inode, struct file *file) | |
5183 | { | |
5184 | struct trace_iterator *iter = file->private_data; | |
15544209 | 5185 | struct trace_array *tr = inode->i_private; |
b3806b43 | 5186 | |
b04cc6b1 FW |
5187 | mutex_lock(&trace_types_lock); |
5188 | ||
cf6ab6d9 SRRH |
5189 | tr->current_trace->ref--; |
5190 | ||
29bf4a5e | 5191 | if (iter->trace->pipe_close) |
c521efd1 SR |
5192 | iter->trace->pipe_close(iter); |
5193 | ||
b04cc6b1 FW |
5194 | mutex_unlock(&trace_types_lock); |
5195 | ||
4462344e | 5196 | free_cpumask_var(iter->started); |
d7350c3f | 5197 | mutex_destroy(&iter->mutex); |
b3806b43 | 5198 | kfree(iter); |
b3806b43 | 5199 | |
7b85af63 SRRH |
5200 | trace_array_put(tr); |
5201 | ||
b3806b43 SR |
5202 | return 0; |
5203 | } | |
5204 | ||
2a2cc8f7 | 5205 | static unsigned int |
cc60cdc9 | 5206 | trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) |
2a2cc8f7 | 5207 | { |
983f938a SRRH |
5208 | struct trace_array *tr = iter->tr; |
5209 | ||
15693458 SRRH |
5210 | /* Iterators are static, they should be filled or empty */ |
5211 | if (trace_buffer_iter(iter, iter->cpu_file)) | |
5212 | return POLLIN | POLLRDNORM; | |
2a2cc8f7 | 5213 | |
983f938a | 5214 | if (tr->trace_flags & TRACE_ITER_BLOCK) |
2a2cc8f7 SSP |
5215 | /* |
5216 | * Always select as readable when in blocking mode | |
5217 | */ | |
5218 | return POLLIN | POLLRDNORM; | |
15693458 | 5219 | else |
12883efb | 5220 | return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, |
15693458 | 5221 | filp, poll_table); |
2a2cc8f7 | 5222 | } |
2a2cc8f7 | 5223 | |
cc60cdc9 SR |
5224 | static unsigned int |
5225 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |
5226 | { | |
5227 | struct trace_iterator *iter = filp->private_data; | |
5228 | ||
5229 | return trace_poll(iter, filp, poll_table); | |
2a2cc8f7 SSP |
5230 | } |
5231 | ||
d716ff71 | 5232 | /* Must be called with iter->mutex held. */ |
ff98781b | 5233 | static int tracing_wait_pipe(struct file *filp) |
b3806b43 SR |
5234 | { |
5235 | struct trace_iterator *iter = filp->private_data; | |
8b8b3683 | 5236 | int ret; |
b3806b43 | 5237 | |
b3806b43 | 5238 | while (trace_empty(iter)) { |
2dc8f095 | 5239 | |
107bad8b | 5240 | if ((filp->f_flags & O_NONBLOCK)) { |
ff98781b | 5241 | return -EAGAIN; |
107bad8b | 5242 | } |
2dc8f095 | 5243 | |
b3806b43 | 5244 | /* |
250bfd3d | 5245 | * We block until we read something and tracing is disabled. |
b3806b43 SR |
5246 | * We still block if tracing is disabled, but we have never |
5247 | * read anything. This allows a user to cat this file, and | |
5248 | * then enable tracing. But after we have read something, | |
5249 | * we give an EOF when tracing is again disabled. | |
5250 | * | |
5251 | * iter->pos will be 0 if we haven't read anything. | |
5252 | */ | |
10246fa3 | 5253 | if (!tracing_is_on() && iter->pos) |
b3806b43 | 5254 | break; |
f4874261 SRRH |
5255 | |
5256 | mutex_unlock(&iter->mutex); | |
5257 | ||
e30f53aa | 5258 | ret = wait_on_pipe(iter, false); |
f4874261 SRRH |
5259 | |
5260 | mutex_lock(&iter->mutex); | |
5261 | ||
8b8b3683 SRRH |
5262 | if (ret) |
5263 | return ret; | |
b3806b43 SR |
5264 | } |
5265 | ||
ff98781b EGM |
5266 | return 1; |
5267 | } | |
5268 | ||
5269 | /* | |
5270 | * Consumer reader. | |
5271 | */ | |
5272 | static ssize_t | |
5273 | tracing_read_pipe(struct file *filp, char __user *ubuf, | |
5274 | size_t cnt, loff_t *ppos) | |
5275 | { | |
5276 | struct trace_iterator *iter = filp->private_data; | |
5277 | ssize_t sret; | |
5278 | ||
d7350c3f FW |
5279 | /* |
5280 | * Avoid more than one consumer on a single file descriptor | |
5281 | * This is just a matter of traces coherency, the ring buffer itself | |
5282 | * is protected. | |
5283 | */ | |
5284 | mutex_lock(&iter->mutex); | |
1245800c SRRH |
5285 | |
5286 | /* return any leftover data */ | |
5287 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | |
5288 | if (sret != -EBUSY) | |
5289 | goto out; | |
5290 | ||
5291 | trace_seq_init(&iter->seq); | |
5292 | ||
ff98781b EGM |
5293 | if (iter->trace->read) { |
5294 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | |
5295 | if (sret) | |
5296 | goto out; | |
5297 | } | |
5298 | ||
5299 | waitagain: | |
5300 | sret = tracing_wait_pipe(filp); | |
5301 | if (sret <= 0) | |
5302 | goto out; | |
5303 | ||
b3806b43 | 5304 | /* stop when tracing is finished */ |
ff98781b EGM |
5305 | if (trace_empty(iter)) { |
5306 | sret = 0; | |
107bad8b | 5307 | goto out; |
ff98781b | 5308 | } |
b3806b43 SR |
5309 | |
5310 | if (cnt >= PAGE_SIZE) | |
5311 | cnt = PAGE_SIZE - 1; | |
5312 | ||
53d0aa77 | 5313 | /* reset all but tr, trace, and overruns */ |
53d0aa77 SR |
5314 | memset(&iter->seq, 0, |
5315 | sizeof(struct trace_iterator) - | |
5316 | offsetof(struct trace_iterator, seq)); | |
ed5467da | 5317 | cpumask_clear(iter->started); |
4823ed7e | 5318 | iter->pos = -1; |
b3806b43 | 5319 | |
4f535968 | 5320 | trace_event_read_lock(); |
7e53bd42 | 5321 | trace_access_lock(iter->cpu_file); |
955b61e5 | 5322 | while (trace_find_next_entry_inc(iter) != NULL) { |
2c4f035f | 5323 | enum print_line_t ret; |
5ac48378 | 5324 | int save_len = iter->seq.seq.len; |
088b1e42 | 5325 | |
f9896bf3 | 5326 | ret = print_trace_line(iter); |
2c4f035f | 5327 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
088b1e42 | 5328 | /* don't print partial lines */ |
5ac48378 | 5329 | iter->seq.seq.len = save_len; |
b3806b43 | 5330 | break; |
088b1e42 | 5331 | } |
b91facc3 FW |
5332 | if (ret != TRACE_TYPE_NO_CONSUME) |
5333 | trace_consume(iter); | |
b3806b43 | 5334 | |
5ac48378 | 5335 | if (trace_seq_used(&iter->seq) >= cnt) |
b3806b43 | 5336 | break; |
ee5e51f5 JO |
5337 | |
5338 | /* | |
5339 | * Setting the full flag means we reached the trace_seq buffer | |
5340 | * size and we should leave by partial output condition above. | |
5341 | * One of the trace_seq_* functions is not used properly. | |
5342 | */ | |
5343 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | |
5344 | iter->ent->type); | |
b3806b43 | 5345 | } |
7e53bd42 | 5346 | trace_access_unlock(iter->cpu_file); |
4f535968 | 5347 | trace_event_read_unlock(); |
b3806b43 | 5348 | |
b3806b43 | 5349 | /* Now copy what we have to the user */ |
6c6c2796 | 5350 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
5ac48378 | 5351 | if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) |
f9520750 | 5352 | trace_seq_init(&iter->seq); |
9ff4b974 PP |
5353 | |
5354 | /* | |
25985edc | 5355 | * If there was nothing to send to user, in spite of consuming trace |
9ff4b974 PP |
5356 | * entries, go back to wait for more entries. |
5357 | */ | |
6c6c2796 | 5358 | if (sret == -EBUSY) |
9ff4b974 | 5359 | goto waitagain; |
b3806b43 | 5360 | |
107bad8b | 5361 | out: |
d7350c3f | 5362 | mutex_unlock(&iter->mutex); |
107bad8b | 5363 | |
6c6c2796 | 5364 | return sret; |
b3806b43 SR |
5365 | } |
5366 | ||
3c56819b EGM |
5367 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, |
5368 | unsigned int idx) | |
5369 | { | |
5370 | __free_page(spd->pages[idx]); | |
5371 | } | |
5372 | ||
28dfef8f | 5373 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
34cd4998 | 5374 | .can_merge = 0, |
34cd4998 | 5375 | .confirm = generic_pipe_buf_confirm, |
92fdd98c | 5376 | .release = generic_pipe_buf_release, |
34cd4998 SR |
5377 | .steal = generic_pipe_buf_steal, |
5378 | .get = generic_pipe_buf_get, | |
3c56819b EGM |
5379 | }; |
5380 | ||
34cd4998 | 5381 | static size_t |
fa7c7f6e | 5382 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) |
34cd4998 SR |
5383 | { |
5384 | size_t count; | |
74f06bb7 | 5385 | int save_len; |
34cd4998 SR |
5386 | int ret; |
5387 | ||
5388 | /* Seq buffer is page-sized, exactly what we need. */ | |
5389 | for (;;) { | |
74f06bb7 | 5390 | save_len = iter->seq.seq.len; |
34cd4998 | 5391 | ret = print_trace_line(iter); |
74f06bb7 SRRH |
5392 | |
5393 | if (trace_seq_has_overflowed(&iter->seq)) { | |
5394 | iter->seq.seq.len = save_len; | |
34cd4998 SR |
5395 | break; |
5396 | } | |
74f06bb7 SRRH |
5397 | |
5398 | /* | |
5399 | * This should not be hit, because it should only | |
5400 | * be set if the iter->seq overflowed. But check it | |
5401 | * anyway to be safe. | |
5402 | */ | |
34cd4998 | 5403 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
74f06bb7 SRRH |
5404 | iter->seq.seq.len = save_len; |
5405 | break; | |
5406 | } | |
5407 | ||
5ac48378 | 5408 | count = trace_seq_used(&iter->seq) - save_len; |
74f06bb7 SRRH |
5409 | if (rem < count) { |
5410 | rem = 0; | |
5411 | iter->seq.seq.len = save_len; | |
34cd4998 SR |
5412 | break; |
5413 | } | |
5414 | ||
74e7ff8c LJ |
5415 | if (ret != TRACE_TYPE_NO_CONSUME) |
5416 | trace_consume(iter); | |
34cd4998 | 5417 | rem -= count; |
955b61e5 | 5418 | if (!trace_find_next_entry_inc(iter)) { |
34cd4998 SR |
5419 | rem = 0; |
5420 | iter->ent = NULL; | |
5421 | break; | |
5422 | } | |
5423 | } | |
5424 | ||
5425 | return rem; | |
5426 | } | |
5427 | ||
3c56819b EGM |
5428 | static ssize_t tracing_splice_read_pipe(struct file *filp, |
5429 | loff_t *ppos, | |
5430 | struct pipe_inode_info *pipe, | |
5431 | size_t len, | |
5432 | unsigned int flags) | |
5433 | { | |
35f3d14d JA |
5434 | struct page *pages_def[PIPE_DEF_BUFFERS]; |
5435 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; | |
3c56819b EGM |
5436 | struct trace_iterator *iter = filp->private_data; |
5437 | struct splice_pipe_desc spd = { | |
35f3d14d JA |
5438 | .pages = pages_def, |
5439 | .partial = partial_def, | |
34cd4998 | 5440 | .nr_pages = 0, /* This gets updated below. */ |
047fe360 | 5441 | .nr_pages_max = PIPE_DEF_BUFFERS, |
34cd4998 SR |
5442 | .flags = flags, |
5443 | .ops = &tracing_pipe_buf_ops, | |
5444 | .spd_release = tracing_spd_release_pipe, | |
3c56819b EGM |
5445 | }; |
5446 | ssize_t ret; | |
34cd4998 | 5447 | size_t rem; |
3c56819b EGM |
5448 | unsigned int i; |
5449 | ||
35f3d14d JA |
5450 | if (splice_grow_spd(pipe, &spd)) |
5451 | return -ENOMEM; | |
5452 | ||
d7350c3f | 5453 | mutex_lock(&iter->mutex); |
3c56819b EGM |
5454 | |
5455 | if (iter->trace->splice_read) { | |
5456 | ret = iter->trace->splice_read(iter, filp, | |
5457 | ppos, pipe, len, flags); | |
5458 | if (ret) | |
34cd4998 | 5459 | goto out_err; |
3c56819b EGM |
5460 | } |
5461 | ||
5462 | ret = tracing_wait_pipe(filp); | |
5463 | if (ret <= 0) | |
34cd4998 | 5464 | goto out_err; |
3c56819b | 5465 | |
955b61e5 | 5466 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3c56819b | 5467 | ret = -EFAULT; |
34cd4998 | 5468 | goto out_err; |
3c56819b EGM |
5469 | } |
5470 | ||
4f535968 | 5471 | trace_event_read_lock(); |
7e53bd42 | 5472 | trace_access_lock(iter->cpu_file); |
4f535968 | 5473 | |
3c56819b | 5474 | /* Fill as many pages as possible. */ |
a786c06d | 5475 | for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { |
35f3d14d JA |
5476 | spd.pages[i] = alloc_page(GFP_KERNEL); |
5477 | if (!spd.pages[i]) | |
34cd4998 | 5478 | break; |
3c56819b | 5479 | |
fa7c7f6e | 5480 | rem = tracing_fill_pipe_page(rem, iter); |
3c56819b EGM |
5481 | |
5482 | /* Copy the data into the page, so we can start over. */ | |
5483 | ret = trace_seq_to_buffer(&iter->seq, | |
35f3d14d | 5484 | page_address(spd.pages[i]), |
5ac48378 | 5485 | trace_seq_used(&iter->seq)); |
3c56819b | 5486 | if (ret < 0) { |
35f3d14d | 5487 | __free_page(spd.pages[i]); |
3c56819b EGM |
5488 | break; |
5489 | } | |
35f3d14d | 5490 | spd.partial[i].offset = 0; |
5ac48378 | 5491 | spd.partial[i].len = trace_seq_used(&iter->seq); |
3c56819b | 5492 | |
f9520750 | 5493 | trace_seq_init(&iter->seq); |
3c56819b EGM |
5494 | } |
5495 | ||
7e53bd42 | 5496 | trace_access_unlock(iter->cpu_file); |
4f535968 | 5497 | trace_event_read_unlock(); |
d7350c3f | 5498 | mutex_unlock(&iter->mutex); |
3c56819b EGM |
5499 | |
5500 | spd.nr_pages = i; | |
5501 | ||
a29054d9 SRRH |
5502 | if (i) |
5503 | ret = splice_to_pipe(pipe, &spd); | |
5504 | else | |
5505 | ret = 0; | |
35f3d14d | 5506 | out: |
047fe360 | 5507 | splice_shrink_spd(&spd); |
35f3d14d | 5508 | return ret; |
3c56819b | 5509 | |
34cd4998 | 5510 | out_err: |
d7350c3f | 5511 | mutex_unlock(&iter->mutex); |
35f3d14d | 5512 | goto out; |
3c56819b EGM |
5513 | } |
5514 | ||
a98a3c3f SR |
5515 | static ssize_t |
5516 | tracing_entries_read(struct file *filp, char __user *ubuf, | |
5517 | size_t cnt, loff_t *ppos) | |
5518 | { | |
0bc392ee ON |
5519 | struct inode *inode = file_inode(filp); |
5520 | struct trace_array *tr = inode->i_private; | |
5521 | int cpu = tracing_get_cpu(inode); | |
438ced17 VN |
5522 | char buf[64]; |
5523 | int r = 0; | |
5524 | ssize_t ret; | |
a98a3c3f | 5525 | |
db526ca3 | 5526 | mutex_lock(&trace_types_lock); |
438ced17 | 5527 | |
0bc392ee | 5528 | if (cpu == RING_BUFFER_ALL_CPUS) { |
438ced17 VN |
5529 | int cpu, buf_size_same; |
5530 | unsigned long size; | |
5531 | ||
5532 | size = 0; | |
5533 | buf_size_same = 1; | |
5534 | /* check if all cpu sizes are same */ | |
5535 | for_each_tracing_cpu(cpu) { | |
5536 | /* fill in the size from first enabled cpu */ | |
5537 | if (size == 0) | |
12883efb SRRH |
5538 | size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; |
5539 | if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { | |
438ced17 VN |
5540 | buf_size_same = 0; |
5541 | break; | |
5542 | } | |
5543 | } | |
5544 | ||
5545 | if (buf_size_same) { | |
5546 | if (!ring_buffer_expanded) | |
5547 | r = sprintf(buf, "%lu (expanded: %lu)\n", | |
5548 | size >> 10, | |
5549 | trace_buf_size >> 10); | |
5550 | else | |
5551 | r = sprintf(buf, "%lu\n", size >> 10); | |
5552 | } else | |
5553 | r = sprintf(buf, "X\n"); | |
5554 | } else | |
0bc392ee | 5555 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
438ced17 | 5556 | |
db526ca3 SR |
5557 | mutex_unlock(&trace_types_lock); |
5558 | ||
438ced17 VN |
5559 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
5560 | return ret; | |
a98a3c3f SR |
5561 | } |
5562 | ||
5563 | static ssize_t | |
5564 | tracing_entries_write(struct file *filp, const char __user *ubuf, | |
5565 | size_t cnt, loff_t *ppos) | |
5566 | { | |
0bc392ee ON |
5567 | struct inode *inode = file_inode(filp); |
5568 | struct trace_array *tr = inode->i_private; | |
a98a3c3f | 5569 | unsigned long val; |
4f271a2a | 5570 | int ret; |
a98a3c3f | 5571 | |
22fe9b54 PH |
5572 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
5573 | if (ret) | |
c6caeeb1 | 5574 | return ret; |
a98a3c3f SR |
5575 | |
5576 | /* must have at least 1 entry */ | |
5577 | if (!val) | |
5578 | return -EINVAL; | |
5579 | ||
1696b2b0 SR |
5580 | /* value is in KB */ |
5581 | val <<= 10; | |
0bc392ee | 5582 | ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); |
4f271a2a VN |
5583 | if (ret < 0) |
5584 | return ret; | |
a98a3c3f | 5585 | |
cf8517cf | 5586 | *ppos += cnt; |
a98a3c3f | 5587 | |
4f271a2a VN |
5588 | return cnt; |
5589 | } | |
bf5e6519 | 5590 | |
f81ab074 VN |
5591 | static ssize_t |
5592 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | |
5593 | size_t cnt, loff_t *ppos) | |
5594 | { | |
5595 | struct trace_array *tr = filp->private_data; | |
5596 | char buf[64]; | |
5597 | int r, cpu; | |
5598 | unsigned long size = 0, expanded_size = 0; | |
5599 | ||
5600 | mutex_lock(&trace_types_lock); | |
5601 | for_each_tracing_cpu(cpu) { | |
12883efb | 5602 | size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; |
f81ab074 VN |
5603 | if (!ring_buffer_expanded) |
5604 | expanded_size += trace_buf_size >> 10; | |
5605 | } | |
5606 | if (ring_buffer_expanded) | |
5607 | r = sprintf(buf, "%lu\n", size); | |
5608 | else | |
5609 | r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); | |
5610 | mutex_unlock(&trace_types_lock); | |
5611 | ||
5612 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
5613 | } | |
5614 | ||
4f271a2a VN |
5615 | static ssize_t |
5616 | tracing_free_buffer_write(struct file *filp, const char __user *ubuf, | |
5617 | size_t cnt, loff_t *ppos) | |
5618 | { | |
5619 | /* | |
5620 | * There is no need to read what the user has written, this function | |
5621 | * is just to make sure that there is no error when "echo" is used | |
5622 | */ | |
5623 | ||
5624 | *ppos += cnt; | |
a98a3c3f SR |
5625 | |
5626 | return cnt; | |
5627 | } | |
5628 | ||
4f271a2a VN |
5629 | static int |
5630 | tracing_free_buffer_release(struct inode *inode, struct file *filp) | |
5631 | { | |
2b6080f2 SR |
5632 | struct trace_array *tr = inode->i_private; |
5633 | ||
cf30cf67 | 5634 | /* disable tracing ? */ |
983f938a | 5635 | if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) |
711e1243 | 5636 | tracer_tracing_off(tr); |
4f271a2a | 5637 | /* resize the ring buffer to 0 */ |
2b6080f2 | 5638 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); |
4f271a2a | 5639 | |
7b85af63 SRRH |
5640 | trace_array_put(tr); |
5641 | ||
4f271a2a VN |
5642 | return 0; |
5643 | } | |
5644 | ||
fa32e855 SR |
5645 | static inline int lock_user_pages(const char __user *ubuf, size_t cnt, |
5646 | struct page **pages, void **map_page, | |
5647 | int *offset) | |
5bf9a1ee | 5648 | { |
d696b58c | 5649 | unsigned long addr = (unsigned long)ubuf; |
d696b58c | 5650 | int nr_pages = 1; |
d696b58c | 5651 | int ret; |
6edb2a8a | 5652 | int i; |
5bf9a1ee | 5653 | |
d696b58c SR |
5654 | /* |
5655 | * Userspace is injecting traces into the kernel trace buffer. | |
5656 | * We want to be as non intrusive as possible. | |
5657 | * To do so, we do not want to allocate any special buffers | |
5658 | * or take any locks, but instead write the userspace data | |
5659 | * straight into the ring buffer. | |
5660 | * | |
5661 | * First we need to pin the userspace buffer into memory, | |
5662 | * which, most likely it is, because it just referenced it. | |
5663 | * But there's no guarantee that it is. By using get_user_pages_fast() | |
5664 | * and kmap_atomic/kunmap_atomic() we can get access to the | |
5665 | * pages directly. We then write the data directly into the | |
5666 | * ring buffer. | |
5667 | */ | |
5bf9a1ee | 5668 | |
d696b58c SR |
5669 | /* check if we cross pages */ |
5670 | if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) | |
5671 | nr_pages = 2; | |
5672 | ||
fa32e855 | 5673 | *offset = addr & (PAGE_SIZE - 1); |
d696b58c SR |
5674 | addr &= PAGE_MASK; |
5675 | ||
5676 | ret = get_user_pages_fast(addr, nr_pages, 0, pages); | |
5677 | if (ret < nr_pages) { | |
5678 | while (--ret >= 0) | |
5679 | put_page(pages[ret]); | |
fa32e855 | 5680 | return -EFAULT; |
5bf9a1ee | 5681 | } |
d696b58c | 5682 | |
6edb2a8a SR |
5683 | for (i = 0; i < nr_pages; i++) |
5684 | map_page[i] = kmap_atomic(pages[i]); | |
d696b58c | 5685 | |
fa32e855 SR |
5686 | return nr_pages; |
5687 | } | |
5688 | ||
5689 | static inline void unlock_user_pages(struct page **pages, | |
5690 | void **map_page, int nr_pages) | |
5691 | { | |
5692 | int i; | |
5693 | ||
5694 | for (i = nr_pages - 1; i >= 0; i--) { | |
5695 | kunmap_atomic(map_page[i]); | |
5696 | put_page(pages[i]); | |
5697 | } | |
5698 | } | |
5699 | ||
5700 | static ssize_t | |
5701 | tracing_mark_write(struct file *filp, const char __user *ubuf, | |
5702 | size_t cnt, loff_t *fpos) | |
5703 | { | |
5704 | struct trace_array *tr = filp->private_data; | |
5705 | struct ring_buffer_event *event; | |
5706 | struct ring_buffer *buffer; | |
5707 | struct print_entry *entry; | |
5708 | unsigned long irq_flags; | |
5709 | struct page *pages[2]; | |
5710 | void *map_page[2]; | |
5711 | int nr_pages = 1; | |
5712 | ssize_t written; | |
5713 | int offset; | |
5714 | int size; | |
5715 | int len; | |
5716 | ||
5717 | if (tracing_disabled) | |
5718 | return -EINVAL; | |
5719 | ||
5720 | if (!(tr->trace_flags & TRACE_ITER_MARKERS)) | |
5721 | return -EINVAL; | |
5722 | ||
5723 | if (cnt > TRACE_BUF_SIZE) | |
5724 | cnt = TRACE_BUF_SIZE; | |
5725 | ||
5726 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | |
5727 | ||
5728 | nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset); | |
5729 | if (nr_pages < 0) | |
5730 | return nr_pages; | |
5731 | ||
d696b58c SR |
5732 | local_save_flags(irq_flags); |
5733 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ | |
2d71619c | 5734 | buffer = tr->trace_buffer.buffer; |
d696b58c SR |
5735 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
5736 | irq_flags, preempt_count()); | |
5737 | if (!event) { | |
5738 | /* Ring buffer disabled, return as if not open for write */ | |
5739 | written = -EBADF; | |
5740 | goto out_unlock; | |
5bf9a1ee | 5741 | } |
d696b58c SR |
5742 | |
5743 | entry = ring_buffer_event_data(event); | |
5744 | entry->ip = _THIS_IP_; | |
5745 | ||
5746 | if (nr_pages == 2) { | |
5747 | len = PAGE_SIZE - offset; | |
6edb2a8a SR |
5748 | memcpy(&entry->buf, map_page[0] + offset, len); |
5749 | memcpy(&entry->buf[len], map_page[1], cnt - len); | |
c13d2f7c | 5750 | } else |
6edb2a8a | 5751 | memcpy(&entry->buf, map_page[0] + offset, cnt); |
5bf9a1ee | 5752 | |
d696b58c SR |
5753 | if (entry->buf[cnt - 1] != '\n') { |
5754 | entry->buf[cnt] = '\n'; | |
5755 | entry->buf[cnt + 1] = '\0'; | |
5756 | } else | |
5757 | entry->buf[cnt] = '\0'; | |
5758 | ||
7ffbd48d | 5759 | __buffer_unlock_commit(buffer, event); |
5bf9a1ee | 5760 | |
d696b58c | 5761 | written = cnt; |
5bf9a1ee | 5762 | |
d696b58c | 5763 | *fpos += written; |
1aa54bca | 5764 | |
d696b58c | 5765 | out_unlock: |
fa32e855 SR |
5766 | unlock_user_pages(pages, map_page, nr_pages); |
5767 | ||
5768 | return written; | |
5769 | } | |
5770 | ||
5771 | /* Limit it for now to 3K (including tag) */ | |
5772 | #define RAW_DATA_MAX_SIZE (1024*3) | |
5773 | ||
5774 | static ssize_t | |
5775 | tracing_mark_raw_write(struct file *filp, const char __user *ubuf, | |
5776 | size_t cnt, loff_t *fpos) | |
5777 | { | |
5778 | struct trace_array *tr = filp->private_data; | |
5779 | struct ring_buffer_event *event; | |
5780 | struct ring_buffer *buffer; | |
5781 | struct raw_data_entry *entry; | |
5782 | unsigned long irq_flags; | |
5783 | struct page *pages[2]; | |
5784 | void *map_page[2]; | |
5785 | int nr_pages = 1; | |
5786 | ssize_t written; | |
5787 | int offset; | |
5788 | int size; | |
5789 | int len; | |
5790 | ||
5791 | if (tracing_disabled) | |
5792 | return -EINVAL; | |
5793 | ||
5794 | if (!(tr->trace_flags & TRACE_ITER_MARKERS)) | |
5795 | return -EINVAL; | |
5796 | ||
5797 | /* The marker must at least have a tag id */ | |
5798 | if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) | |
5799 | return -EINVAL; | |
5800 | ||
5801 | if (cnt > TRACE_BUF_SIZE) | |
5802 | cnt = TRACE_BUF_SIZE; | |
5803 | ||
5804 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | |
5805 | ||
5806 | nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset); | |
5807 | if (nr_pages < 0) | |
5808 | return nr_pages; | |
5809 | ||
5810 | local_save_flags(irq_flags); | |
5811 | size = sizeof(*entry) + cnt; | |
5812 | buffer = tr->trace_buffer.buffer; | |
5813 | event = trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, | |
5814 | irq_flags, preempt_count()); | |
5815 | if (!event) { | |
5816 | /* Ring buffer disabled, return as if not open for write */ | |
5817 | written = -EBADF; | |
5818 | goto out_unlock; | |
6edb2a8a | 5819 | } |
fa32e855 SR |
5820 | |
5821 | entry = ring_buffer_event_data(event); | |
5822 | ||
5823 | if (nr_pages == 2) { | |
5824 | len = PAGE_SIZE - offset; | |
5825 | memcpy(&entry->id, map_page[0] + offset, len); | |
5826 | memcpy(((char *)&entry->id) + len, map_page[1], cnt - len); | |
5827 | } else | |
5828 | memcpy(&entry->id, map_page[0] + offset, cnt); | |
5829 | ||
5830 | __buffer_unlock_commit(buffer, event); | |
5831 | ||
5832 | written = cnt; | |
5833 | ||
5834 | *fpos += written; | |
5835 | ||
5836 | out_unlock: | |
5837 | unlock_user_pages(pages, map_page, nr_pages); | |
5838 | ||
1aa54bca | 5839 | return written; |
5bf9a1ee PP |
5840 | } |
5841 | ||
13f16d20 | 5842 | static int tracing_clock_show(struct seq_file *m, void *v) |
5079f326 | 5843 | { |
2b6080f2 | 5844 | struct trace_array *tr = m->private; |
5079f326 Z |
5845 | int i; |
5846 | ||
5847 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | |
13f16d20 | 5848 | seq_printf(m, |
5079f326 | 5849 | "%s%s%s%s", i ? " " : "", |
2b6080f2 SR |
5850 | i == tr->clock_id ? "[" : "", trace_clocks[i].name, |
5851 | i == tr->clock_id ? "]" : ""); | |
13f16d20 | 5852 | seq_putc(m, '\n'); |
5079f326 | 5853 | |
13f16d20 | 5854 | return 0; |
5079f326 Z |
5855 | } |
5856 | ||
e1e232ca | 5857 | static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
5079f326 | 5858 | { |
5079f326 Z |
5859 | int i; |
5860 | ||
5079f326 Z |
5861 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
5862 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | |
5863 | break; | |
5864 | } | |
5865 | if (i == ARRAY_SIZE(trace_clocks)) | |
5866 | return -EINVAL; | |
5867 | ||
5079f326 Z |
5868 | mutex_lock(&trace_types_lock); |
5869 | ||
2b6080f2 SR |
5870 | tr->clock_id = i; |
5871 | ||
12883efb | 5872 | ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); |
5079f326 | 5873 | |
60303ed3 DS |
5874 | /* |
5875 | * New clock may not be consistent with the previous clock. | |
5876 | * Reset the buffer so that it doesn't have incomparable timestamps. | |
5877 | */ | |
9457158b | 5878 | tracing_reset_online_cpus(&tr->trace_buffer); |
12883efb SRRH |
5879 | |
5880 | #ifdef CONFIG_TRACER_MAX_TRACE | |
5881 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) | |
5882 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); | |
9457158b | 5883 | tracing_reset_online_cpus(&tr->max_buffer); |
12883efb | 5884 | #endif |
60303ed3 | 5885 | |
5079f326 Z |
5886 | mutex_unlock(&trace_types_lock); |
5887 | ||
e1e232ca SR |
5888 | return 0; |
5889 | } | |
5890 | ||
5891 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |
5892 | size_t cnt, loff_t *fpos) | |
5893 | { | |
5894 | struct seq_file *m = filp->private_data; | |
5895 | struct trace_array *tr = m->private; | |
5896 | char buf[64]; | |
5897 | const char *clockstr; | |
5898 | int ret; | |
5899 | ||
5900 | if (cnt >= sizeof(buf)) | |
5901 | return -EINVAL; | |
5902 | ||
4afe6495 | 5903 | if (copy_from_user(buf, ubuf, cnt)) |
e1e232ca SR |
5904 | return -EFAULT; |
5905 | ||
5906 | buf[cnt] = 0; | |
5907 | ||
5908 | clockstr = strstrip(buf); | |
5909 | ||
5910 | ret = tracing_set_clock(tr, clockstr); | |
5911 | if (ret) | |
5912 | return ret; | |
5913 | ||
5079f326 Z |
5914 | *fpos += cnt; |
5915 | ||
5916 | return cnt; | |
5917 | } | |
5918 | ||
13f16d20 LZ |
5919 | static int tracing_clock_open(struct inode *inode, struct file *file) |
5920 | { | |
7b85af63 SRRH |
5921 | struct trace_array *tr = inode->i_private; |
5922 | int ret; | |
5923 | ||
13f16d20 LZ |
5924 | if (tracing_disabled) |
5925 | return -ENODEV; | |
2b6080f2 | 5926 | |
7b85af63 SRRH |
5927 | if (trace_array_get(tr)) |
5928 | return -ENODEV; | |
5929 | ||
5930 | ret = single_open(file, tracing_clock_show, inode->i_private); | |
5931 | if (ret < 0) | |
5932 | trace_array_put(tr); | |
5933 | ||
5934 | return ret; | |
13f16d20 LZ |
5935 | } |
5936 | ||
6de58e62 SRRH |
5937 | struct ftrace_buffer_info { |
5938 | struct trace_iterator iter; | |
5939 | void *spare; | |
5940 | unsigned int read; | |
5941 | }; | |
5942 | ||
debdd57f HT |
5943 | #ifdef CONFIG_TRACER_SNAPSHOT |
5944 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | |
5945 | { | |
6484c71c | 5946 | struct trace_array *tr = inode->i_private; |
debdd57f | 5947 | struct trace_iterator *iter; |
2b6080f2 | 5948 | struct seq_file *m; |
debdd57f HT |
5949 | int ret = 0; |
5950 | ||
ff451961 SRRH |
5951 | if (trace_array_get(tr) < 0) |
5952 | return -ENODEV; | |
5953 | ||
debdd57f | 5954 | if (file->f_mode & FMODE_READ) { |
6484c71c | 5955 | iter = __tracing_open(inode, file, true); |
debdd57f HT |
5956 | if (IS_ERR(iter)) |
5957 | ret = PTR_ERR(iter); | |
2b6080f2 SR |
5958 | } else { |
5959 | /* Writes still need the seq_file to hold the private data */ | |
f77d09a3 | 5960 | ret = -ENOMEM; |
2b6080f2 SR |
5961 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
5962 | if (!m) | |
f77d09a3 | 5963 | goto out; |
2b6080f2 SR |
5964 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
5965 | if (!iter) { | |
5966 | kfree(m); | |
f77d09a3 | 5967 | goto out; |
2b6080f2 | 5968 | } |
f77d09a3 AL |
5969 | ret = 0; |
5970 | ||
ff451961 | 5971 | iter->tr = tr; |
6484c71c ON |
5972 | iter->trace_buffer = &tr->max_buffer; |
5973 | iter->cpu_file = tracing_get_cpu(inode); | |
2b6080f2 SR |
5974 | m->private = iter; |
5975 | file->private_data = m; | |
debdd57f | 5976 | } |
f77d09a3 | 5977 | out: |
ff451961 SRRH |
5978 | if (ret < 0) |
5979 | trace_array_put(tr); | |
5980 | ||
debdd57f HT |
5981 | return ret; |
5982 | } | |
5983 | ||
5984 | static ssize_t | |
5985 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
5986 | loff_t *ppos) | |
5987 | { | |
2b6080f2 SR |
5988 | struct seq_file *m = filp->private_data; |
5989 | struct trace_iterator *iter = m->private; | |
5990 | struct trace_array *tr = iter->tr; | |
debdd57f HT |
5991 | unsigned long val; |
5992 | int ret; | |
5993 | ||
5994 | ret = tracing_update_buffers(); | |
5995 | if (ret < 0) | |
5996 | return ret; | |
5997 | ||
5998 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
5999 | if (ret) | |
6000 | return ret; | |
6001 | ||
6002 | mutex_lock(&trace_types_lock); | |
6003 | ||
2b6080f2 | 6004 | if (tr->current_trace->use_max_tr) { |
debdd57f HT |
6005 | ret = -EBUSY; |
6006 | goto out; | |
6007 | } | |
6008 | ||
6009 | switch (val) { | |
6010 | case 0: | |
f1affcaa SRRH |
6011 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { |
6012 | ret = -EINVAL; | |
6013 | break; | |
debdd57f | 6014 | } |
3209cff4 SRRH |
6015 | if (tr->allocated_snapshot) |
6016 | free_snapshot(tr); | |
debdd57f HT |
6017 | break; |
6018 | case 1: | |
f1affcaa SRRH |
6019 | /* Only allow per-cpu swap if the ring buffer supports it */ |
6020 | #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP | |
6021 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { | |
6022 | ret = -EINVAL; | |
6023 | break; | |
6024 | } | |
6025 | #endif | |
45ad21ca | 6026 | if (!tr->allocated_snapshot) { |
3209cff4 | 6027 | ret = alloc_snapshot(tr); |
debdd57f HT |
6028 | if (ret < 0) |
6029 | break; | |
debdd57f | 6030 | } |
debdd57f HT |
6031 | local_irq_disable(); |
6032 | /* Now, we're going to swap */ | |
f1affcaa | 6033 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
ce9bae55 | 6034 | update_max_tr(tr, current, smp_processor_id()); |
f1affcaa | 6035 | else |
ce9bae55 | 6036 | update_max_tr_single(tr, current, iter->cpu_file); |
debdd57f HT |
6037 | local_irq_enable(); |
6038 | break; | |
6039 | default: | |
45ad21ca | 6040 | if (tr->allocated_snapshot) { |
f1affcaa SRRH |
6041 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
6042 | tracing_reset_online_cpus(&tr->max_buffer); | |
6043 | else | |
6044 | tracing_reset(&tr->max_buffer, iter->cpu_file); | |
6045 | } | |
debdd57f HT |
6046 | break; |
6047 | } | |
6048 | ||
6049 | if (ret >= 0) { | |
6050 | *ppos += cnt; | |
6051 | ret = cnt; | |
6052 | } | |
6053 | out: | |
6054 | mutex_unlock(&trace_types_lock); | |
6055 | return ret; | |
6056 | } | |
2b6080f2 SR |
6057 | |
6058 | static int tracing_snapshot_release(struct inode *inode, struct file *file) | |
6059 | { | |
6060 | struct seq_file *m = file->private_data; | |
ff451961 SRRH |
6061 | int ret; |
6062 | ||
6063 | ret = tracing_release(inode, file); | |
2b6080f2 SR |
6064 | |
6065 | if (file->f_mode & FMODE_READ) | |
ff451961 | 6066 | return ret; |
2b6080f2 SR |
6067 | |
6068 | /* If write only, the seq_file is just a stub */ | |
6069 | if (m) | |
6070 | kfree(m->private); | |
6071 | kfree(m); | |
6072 | ||
6073 | return 0; | |
6074 | } | |
6075 | ||
6de58e62 SRRH |
6076 | static int tracing_buffers_open(struct inode *inode, struct file *filp); |
6077 | static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, | |
6078 | size_t count, loff_t *ppos); | |
6079 | static int tracing_buffers_release(struct inode *inode, struct file *file); | |
6080 | static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |
6081 | struct pipe_inode_info *pipe, size_t len, unsigned int flags); | |
6082 | ||
6083 | static int snapshot_raw_open(struct inode *inode, struct file *filp) | |
6084 | { | |
6085 | struct ftrace_buffer_info *info; | |
6086 | int ret; | |
6087 | ||
6088 | ret = tracing_buffers_open(inode, filp); | |
6089 | if (ret < 0) | |
6090 | return ret; | |
6091 | ||
6092 | info = filp->private_data; | |
6093 | ||
6094 | if (info->iter.trace->use_max_tr) { | |
6095 | tracing_buffers_release(inode, filp); | |
6096 | return -EBUSY; | |
6097 | } | |
6098 | ||
6099 | info->iter.snapshot = true; | |
6100 | info->iter.trace_buffer = &info->iter.tr->max_buffer; | |
6101 | ||
6102 | return ret; | |
6103 | } | |
6104 | ||
debdd57f HT |
6105 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
6106 | ||
6107 | ||
6508fa76 SF |
6108 | static const struct file_operations tracing_thresh_fops = { |
6109 | .open = tracing_open_generic, | |
6110 | .read = tracing_thresh_read, | |
6111 | .write = tracing_thresh_write, | |
6112 | .llseek = generic_file_llseek, | |
6113 | }; | |
6114 | ||
f971cc9a | 6115 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
5e2336a0 | 6116 | static const struct file_operations tracing_max_lat_fops = { |
4bf39a94 IM |
6117 | .open = tracing_open_generic, |
6118 | .read = tracing_max_lat_read, | |
6119 | .write = tracing_max_lat_write, | |
b444786f | 6120 | .llseek = generic_file_llseek, |
bc0c38d1 | 6121 | }; |
e428abbb | 6122 | #endif |
bc0c38d1 | 6123 | |
5e2336a0 | 6124 | static const struct file_operations set_tracer_fops = { |
4bf39a94 IM |
6125 | .open = tracing_open_generic, |
6126 | .read = tracing_set_trace_read, | |
6127 | .write = tracing_set_trace_write, | |
b444786f | 6128 | .llseek = generic_file_llseek, |
bc0c38d1 SR |
6129 | }; |
6130 | ||
5e2336a0 | 6131 | static const struct file_operations tracing_pipe_fops = { |
4bf39a94 | 6132 | .open = tracing_open_pipe, |
2a2cc8f7 | 6133 | .poll = tracing_poll_pipe, |
4bf39a94 | 6134 | .read = tracing_read_pipe, |
3c56819b | 6135 | .splice_read = tracing_splice_read_pipe, |
4bf39a94 | 6136 | .release = tracing_release_pipe, |
b444786f | 6137 | .llseek = no_llseek, |
b3806b43 SR |
6138 | }; |
6139 | ||
5e2336a0 | 6140 | static const struct file_operations tracing_entries_fops = { |
0bc392ee | 6141 | .open = tracing_open_generic_tr, |
a98a3c3f SR |
6142 | .read = tracing_entries_read, |
6143 | .write = tracing_entries_write, | |
b444786f | 6144 | .llseek = generic_file_llseek, |
0bc392ee | 6145 | .release = tracing_release_generic_tr, |
a98a3c3f SR |
6146 | }; |
6147 | ||
f81ab074 | 6148 | static const struct file_operations tracing_total_entries_fops = { |
7b85af63 | 6149 | .open = tracing_open_generic_tr, |
f81ab074 VN |
6150 | .read = tracing_total_entries_read, |
6151 | .llseek = generic_file_llseek, | |
7b85af63 | 6152 | .release = tracing_release_generic_tr, |
f81ab074 VN |
6153 | }; |
6154 | ||
4f271a2a | 6155 | static const struct file_operations tracing_free_buffer_fops = { |
7b85af63 | 6156 | .open = tracing_open_generic_tr, |
4f271a2a VN |
6157 | .write = tracing_free_buffer_write, |
6158 | .release = tracing_free_buffer_release, | |
6159 | }; | |
6160 | ||
5e2336a0 | 6161 | static const struct file_operations tracing_mark_fops = { |
7b85af63 | 6162 | .open = tracing_open_generic_tr, |
5bf9a1ee | 6163 | .write = tracing_mark_write, |
b444786f | 6164 | .llseek = generic_file_llseek, |
7b85af63 | 6165 | .release = tracing_release_generic_tr, |
5bf9a1ee PP |
6166 | }; |
6167 | ||
fa32e855 SR |
6168 | static const struct file_operations tracing_mark_raw_fops = { |
6169 | .open = tracing_open_generic_tr, | |
6170 | .write = tracing_mark_raw_write, | |
6171 | .llseek = generic_file_llseek, | |
6172 | .release = tracing_release_generic_tr, | |
6173 | }; | |
6174 | ||
5079f326 | 6175 | static const struct file_operations trace_clock_fops = { |
13f16d20 LZ |
6176 | .open = tracing_clock_open, |
6177 | .read = seq_read, | |
6178 | .llseek = seq_lseek, | |
7b85af63 | 6179 | .release = tracing_single_release_tr, |
5079f326 Z |
6180 | .write = tracing_clock_write, |
6181 | }; | |
6182 | ||
debdd57f HT |
6183 | #ifdef CONFIG_TRACER_SNAPSHOT |
6184 | static const struct file_operations snapshot_fops = { | |
6185 | .open = tracing_snapshot_open, | |
6186 | .read = seq_read, | |
6187 | .write = tracing_snapshot_write, | |
098c879e | 6188 | .llseek = tracing_lseek, |
2b6080f2 | 6189 | .release = tracing_snapshot_release, |
debdd57f | 6190 | }; |
debdd57f | 6191 | |
6de58e62 SRRH |
6192 | static const struct file_operations snapshot_raw_fops = { |
6193 | .open = snapshot_raw_open, | |
6194 | .read = tracing_buffers_read, | |
6195 | .release = tracing_buffers_release, | |
6196 | .splice_read = tracing_buffers_splice_read, | |
6197 | .llseek = no_llseek, | |
2cadf913 SR |
6198 | }; |
6199 | ||
6de58e62 SRRH |
6200 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
6201 | ||
2cadf913 SR |
6202 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
6203 | { | |
46ef2be0 | 6204 | struct trace_array *tr = inode->i_private; |
2cadf913 | 6205 | struct ftrace_buffer_info *info; |
7b85af63 | 6206 | int ret; |
2cadf913 SR |
6207 | |
6208 | if (tracing_disabled) | |
6209 | return -ENODEV; | |
6210 | ||
7b85af63 SRRH |
6211 | if (trace_array_get(tr) < 0) |
6212 | return -ENODEV; | |
6213 | ||
2cadf913 | 6214 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
7b85af63 SRRH |
6215 | if (!info) { |
6216 | trace_array_put(tr); | |
2cadf913 | 6217 | return -ENOMEM; |
7b85af63 | 6218 | } |
2cadf913 | 6219 | |
a695cb58 SRRH |
6220 | mutex_lock(&trace_types_lock); |
6221 | ||
cc60cdc9 | 6222 | info->iter.tr = tr; |
46ef2be0 | 6223 | info->iter.cpu_file = tracing_get_cpu(inode); |
b627344f | 6224 | info->iter.trace = tr->current_trace; |
12883efb | 6225 | info->iter.trace_buffer = &tr->trace_buffer; |
cc60cdc9 | 6226 | info->spare = NULL; |
2cadf913 | 6227 | /* Force reading ring buffer for first read */ |
cc60cdc9 | 6228 | info->read = (unsigned int)-1; |
2cadf913 SR |
6229 | |
6230 | filp->private_data = info; | |
6231 | ||
cf6ab6d9 SRRH |
6232 | tr->current_trace->ref++; |
6233 | ||
a695cb58 SRRH |
6234 | mutex_unlock(&trace_types_lock); |
6235 | ||
7b85af63 SRRH |
6236 | ret = nonseekable_open(inode, filp); |
6237 | if (ret < 0) | |
6238 | trace_array_put(tr); | |
6239 | ||
6240 | return ret; | |
2cadf913 SR |
6241 | } |
6242 | ||
cc60cdc9 SR |
6243 | static unsigned int |
6244 | tracing_buffers_poll(struct file *filp, poll_table *poll_table) | |
6245 | { | |
6246 | struct ftrace_buffer_info *info = filp->private_data; | |
6247 | struct trace_iterator *iter = &info->iter; | |
6248 | ||
6249 | return trace_poll(iter, filp, poll_table); | |
6250 | } | |
6251 | ||
2cadf913 SR |
6252 | static ssize_t |
6253 | tracing_buffers_read(struct file *filp, char __user *ubuf, | |
6254 | size_t count, loff_t *ppos) | |
6255 | { | |
6256 | struct ftrace_buffer_info *info = filp->private_data; | |
cc60cdc9 | 6257 | struct trace_iterator *iter = &info->iter; |
2cadf913 | 6258 | ssize_t ret; |
6de58e62 | 6259 | ssize_t size; |
2cadf913 | 6260 | |
2dc5d12b SR |
6261 | if (!count) |
6262 | return 0; | |
6263 | ||
6de58e62 | 6264 | #ifdef CONFIG_TRACER_MAX_TRACE |
d716ff71 SRRH |
6265 | if (iter->snapshot && iter->tr->current_trace->use_max_tr) |
6266 | return -EBUSY; | |
6de58e62 SRRH |
6267 | #endif |
6268 | ||
ddd538f3 | 6269 | if (!info->spare) |
12883efb SRRH |
6270 | info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, |
6271 | iter->cpu_file); | |
ddd538f3 | 6272 | if (!info->spare) |
d716ff71 | 6273 | return -ENOMEM; |
ddd538f3 | 6274 | |
2cadf913 SR |
6275 | /* Do we have previous read data to read? */ |
6276 | if (info->read < PAGE_SIZE) | |
6277 | goto read; | |
6278 | ||
b627344f | 6279 | again: |
cc60cdc9 | 6280 | trace_access_lock(iter->cpu_file); |
12883efb | 6281 | ret = ring_buffer_read_page(iter->trace_buffer->buffer, |
2cadf913 SR |
6282 | &info->spare, |
6283 | count, | |
cc60cdc9 SR |
6284 | iter->cpu_file, 0); |
6285 | trace_access_unlock(iter->cpu_file); | |
2cadf913 | 6286 | |
b627344f SR |
6287 | if (ret < 0) { |
6288 | if (trace_empty(iter)) { | |
d716ff71 SRRH |
6289 | if ((filp->f_flags & O_NONBLOCK)) |
6290 | return -EAGAIN; | |
6291 | ||
e30f53aa | 6292 | ret = wait_on_pipe(iter, false); |
d716ff71 SRRH |
6293 | if (ret) |
6294 | return ret; | |
6295 | ||
b627344f SR |
6296 | goto again; |
6297 | } | |
d716ff71 | 6298 | return 0; |
b627344f | 6299 | } |
436fc280 | 6300 | |
436fc280 | 6301 | info->read = 0; |
b627344f | 6302 | read: |
2cadf913 SR |
6303 | size = PAGE_SIZE - info->read; |
6304 | if (size > count) | |
6305 | size = count; | |
6306 | ||
6307 | ret = copy_to_user(ubuf, info->spare + info->read, size); | |
d716ff71 SRRH |
6308 | if (ret == size) |
6309 | return -EFAULT; | |
6310 | ||
2dc5d12b SR |
6311 | size -= ret; |
6312 | ||
2cadf913 SR |
6313 | *ppos += size; |
6314 | info->read += size; | |
6315 | ||
6316 | return size; | |
6317 | } | |
6318 | ||
6319 | static int tracing_buffers_release(struct inode *inode, struct file *file) | |
6320 | { | |
6321 | struct ftrace_buffer_info *info = file->private_data; | |
cc60cdc9 | 6322 | struct trace_iterator *iter = &info->iter; |
2cadf913 | 6323 | |
a695cb58 SRRH |
6324 | mutex_lock(&trace_types_lock); |
6325 | ||
cf6ab6d9 SRRH |
6326 | iter->tr->current_trace->ref--; |
6327 | ||
ff451961 | 6328 | __trace_array_put(iter->tr); |
2cadf913 | 6329 | |
ddd538f3 | 6330 | if (info->spare) |
12883efb | 6331 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); |
2cadf913 SR |
6332 | kfree(info); |
6333 | ||
a695cb58 SRRH |
6334 | mutex_unlock(&trace_types_lock); |
6335 | ||
2cadf913 SR |
6336 | return 0; |
6337 | } | |
6338 | ||
6339 | struct buffer_ref { | |
6340 | struct ring_buffer *buffer; | |
6341 | void *page; | |
6342 | int ref; | |
6343 | }; | |
6344 | ||
6345 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | |
6346 | struct pipe_buffer *buf) | |
6347 | { | |
6348 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | |
6349 | ||
6350 | if (--ref->ref) | |
6351 | return; | |
6352 | ||
6353 | ring_buffer_free_read_page(ref->buffer, ref->page); | |
6354 | kfree(ref); | |
6355 | buf->private = 0; | |
6356 | } | |
6357 | ||
2cadf913 SR |
6358 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, |
6359 | struct pipe_buffer *buf) | |
6360 | { | |
6361 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | |
6362 | ||
6363 | ref->ref++; | |
6364 | } | |
6365 | ||
6366 | /* Pipe buffer operations for a buffer. */ | |
28dfef8f | 6367 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
2cadf913 | 6368 | .can_merge = 0, |
2cadf913 SR |
6369 | .confirm = generic_pipe_buf_confirm, |
6370 | .release = buffer_pipe_buf_release, | |
d55cb6cf | 6371 | .steal = generic_pipe_buf_steal, |
2cadf913 SR |
6372 | .get = buffer_pipe_buf_get, |
6373 | }; | |
6374 | ||
6375 | /* | |
6376 | * Callback from splice_to_pipe(), if we need to release some pages | |
6377 | * at the end of the spd in case we error'ed out in filling the pipe. | |
6378 | */ | |
6379 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |
6380 | { | |
6381 | struct buffer_ref *ref = | |
6382 | (struct buffer_ref *)spd->partial[i].private; | |
6383 | ||
6384 | if (--ref->ref) | |
6385 | return; | |
6386 | ||
6387 | ring_buffer_free_read_page(ref->buffer, ref->page); | |
6388 | kfree(ref); | |
6389 | spd->partial[i].private = 0; | |
6390 | } | |
6391 | ||
6392 | static ssize_t | |
6393 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |
6394 | struct pipe_inode_info *pipe, size_t len, | |
6395 | unsigned int flags) | |
6396 | { | |
6397 | struct ftrace_buffer_info *info = file->private_data; | |
cc60cdc9 | 6398 | struct trace_iterator *iter = &info->iter; |
35f3d14d JA |
6399 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; |
6400 | struct page *pages_def[PIPE_DEF_BUFFERS]; | |
2cadf913 | 6401 | struct splice_pipe_desc spd = { |
35f3d14d JA |
6402 | .pages = pages_def, |
6403 | .partial = partial_def, | |
047fe360 | 6404 | .nr_pages_max = PIPE_DEF_BUFFERS, |
2cadf913 SR |
6405 | .flags = flags, |
6406 | .ops = &buffer_pipe_buf_ops, | |
6407 | .spd_release = buffer_spd_release, | |
6408 | }; | |
6409 | struct buffer_ref *ref; | |
93459c6c | 6410 | int entries, size, i; |
07906da7 | 6411 | ssize_t ret = 0; |
2cadf913 | 6412 | |
6de58e62 | 6413 | #ifdef CONFIG_TRACER_MAX_TRACE |
d716ff71 SRRH |
6414 | if (iter->snapshot && iter->tr->current_trace->use_max_tr) |
6415 | return -EBUSY; | |
6de58e62 SRRH |
6416 | #endif |
6417 | ||
d716ff71 SRRH |
6418 | if (*ppos & (PAGE_SIZE - 1)) |
6419 | return -EINVAL; | |
93cfb3c9 LJ |
6420 | |
6421 | if (len & (PAGE_SIZE - 1)) { | |
d716ff71 SRRH |
6422 | if (len < PAGE_SIZE) |
6423 | return -EINVAL; | |
93cfb3c9 LJ |
6424 | len &= PAGE_MASK; |
6425 | } | |
6426 | ||
1ae2293d AV |
6427 | if (splice_grow_spd(pipe, &spd)) |
6428 | return -ENOMEM; | |
6429 | ||
cc60cdc9 SR |
6430 | again: |
6431 | trace_access_lock(iter->cpu_file); | |
12883efb | 6432 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
93459c6c | 6433 | |
a786c06d | 6434 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
2cadf913 SR |
6435 | struct page *page; |
6436 | int r; | |
6437 | ||
6438 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | |
07906da7 RV |
6439 | if (!ref) { |
6440 | ret = -ENOMEM; | |
2cadf913 | 6441 | break; |
07906da7 | 6442 | } |
2cadf913 | 6443 | |
7267fa68 | 6444 | ref->ref = 1; |
12883efb | 6445 | ref->buffer = iter->trace_buffer->buffer; |
cc60cdc9 | 6446 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); |
2cadf913 | 6447 | if (!ref->page) { |
07906da7 | 6448 | ret = -ENOMEM; |
2cadf913 SR |
6449 | kfree(ref); |
6450 | break; | |
6451 | } | |
6452 | ||
6453 | r = ring_buffer_read_page(ref->buffer, &ref->page, | |
cc60cdc9 | 6454 | len, iter->cpu_file, 1); |
2cadf913 | 6455 | if (r < 0) { |
7ea59064 | 6456 | ring_buffer_free_read_page(ref->buffer, ref->page); |
2cadf913 SR |
6457 | kfree(ref); |
6458 | break; | |
6459 | } | |
6460 | ||
6461 | /* | |
6462 | * zero out any left over data, this is going to | |
6463 | * user land. | |
6464 | */ | |
6465 | size = ring_buffer_page_len(ref->page); | |
6466 | if (size < PAGE_SIZE) | |
6467 | memset(ref->page + size, 0, PAGE_SIZE - size); | |
6468 | ||
6469 | page = virt_to_page(ref->page); | |
6470 | ||
6471 | spd.pages[i] = page; | |
6472 | spd.partial[i].len = PAGE_SIZE; | |
6473 | spd.partial[i].offset = 0; | |
6474 | spd.partial[i].private = (unsigned long)ref; | |
6475 | spd.nr_pages++; | |
93cfb3c9 | 6476 | *ppos += PAGE_SIZE; |
93459c6c | 6477 | |
12883efb | 6478 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
2cadf913 SR |
6479 | } |
6480 | ||
cc60cdc9 | 6481 | trace_access_unlock(iter->cpu_file); |
2cadf913 SR |
6482 | spd.nr_pages = i; |
6483 | ||
6484 | /* did we read anything? */ | |
6485 | if (!spd.nr_pages) { | |
07906da7 | 6486 | if (ret) |
1ae2293d | 6487 | goto out; |
d716ff71 | 6488 | |
1ae2293d | 6489 | ret = -EAGAIN; |
d716ff71 | 6490 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) |
1ae2293d | 6491 | goto out; |
07906da7 | 6492 | |
e30f53aa | 6493 | ret = wait_on_pipe(iter, true); |
8b8b3683 | 6494 | if (ret) |
1ae2293d | 6495 | goto out; |
e30f53aa | 6496 | |
cc60cdc9 | 6497 | goto again; |
2cadf913 SR |
6498 | } |
6499 | ||
6500 | ret = splice_to_pipe(pipe, &spd); | |
1ae2293d | 6501 | out: |
047fe360 | 6502 | splice_shrink_spd(&spd); |
6de58e62 | 6503 | |
2cadf913 SR |
6504 | return ret; |
6505 | } | |
6506 | ||
6507 | static const struct file_operations tracing_buffers_fops = { | |
6508 | .open = tracing_buffers_open, | |
6509 | .read = tracing_buffers_read, | |
cc60cdc9 | 6510 | .poll = tracing_buffers_poll, |
2cadf913 SR |
6511 | .release = tracing_buffers_release, |
6512 | .splice_read = tracing_buffers_splice_read, | |
6513 | .llseek = no_llseek, | |
6514 | }; | |
6515 | ||
c8d77183 SR |
6516 | static ssize_t |
6517 | tracing_stats_read(struct file *filp, char __user *ubuf, | |
6518 | size_t count, loff_t *ppos) | |
6519 | { | |
4d3435b8 ON |
6520 | struct inode *inode = file_inode(filp); |
6521 | struct trace_array *tr = inode->i_private; | |
12883efb | 6522 | struct trace_buffer *trace_buf = &tr->trace_buffer; |
4d3435b8 | 6523 | int cpu = tracing_get_cpu(inode); |
c8d77183 SR |
6524 | struct trace_seq *s; |
6525 | unsigned long cnt; | |
c64e148a VN |
6526 | unsigned long long t; |
6527 | unsigned long usec_rem; | |
c8d77183 | 6528 | |
e4f2d10f | 6529 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
c8d77183 | 6530 | if (!s) |
a646365c | 6531 | return -ENOMEM; |
c8d77183 SR |
6532 | |
6533 | trace_seq_init(s); | |
6534 | ||
12883efb | 6535 | cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); |
c8d77183 SR |
6536 | trace_seq_printf(s, "entries: %ld\n", cnt); |
6537 | ||
12883efb | 6538 | cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); |
c8d77183 SR |
6539 | trace_seq_printf(s, "overrun: %ld\n", cnt); |
6540 | ||
12883efb | 6541 | cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); |
c8d77183 SR |
6542 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); |
6543 | ||
12883efb | 6544 | cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); |
c64e148a VN |
6545 | trace_seq_printf(s, "bytes: %ld\n", cnt); |
6546 | ||
58e8eedf | 6547 | if (trace_clocks[tr->clock_id].in_ns) { |
11043d8b | 6548 | /* local or global for trace_clock */ |
12883efb | 6549 | t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); |
11043d8b YY |
6550 | usec_rem = do_div(t, USEC_PER_SEC); |
6551 | trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", | |
6552 | t, usec_rem); | |
6553 | ||
12883efb | 6554 | t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); |
11043d8b YY |
6555 | usec_rem = do_div(t, USEC_PER_SEC); |
6556 | trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); | |
6557 | } else { | |
6558 | /* counter or tsc mode for trace_clock */ | |
6559 | trace_seq_printf(s, "oldest event ts: %llu\n", | |
12883efb | 6560 | ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); |
c64e148a | 6561 | |
11043d8b | 6562 | trace_seq_printf(s, "now ts: %llu\n", |
12883efb | 6563 | ring_buffer_time_stamp(trace_buf->buffer, cpu)); |
11043d8b | 6564 | } |
c64e148a | 6565 | |
12883efb | 6566 | cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); |
884bfe89 SP |
6567 | trace_seq_printf(s, "dropped events: %ld\n", cnt); |
6568 | ||
12883efb | 6569 | cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); |
ad964704 SRRH |
6570 | trace_seq_printf(s, "read events: %ld\n", cnt); |
6571 | ||
5ac48378 SRRH |
6572 | count = simple_read_from_buffer(ubuf, count, ppos, |
6573 | s->buffer, trace_seq_used(s)); | |
c8d77183 SR |
6574 | |
6575 | kfree(s); | |
6576 | ||
6577 | return count; | |
6578 | } | |
6579 | ||
6580 | static const struct file_operations tracing_stats_fops = { | |
4d3435b8 | 6581 | .open = tracing_open_generic_tr, |
c8d77183 | 6582 | .read = tracing_stats_read, |
b444786f | 6583 | .llseek = generic_file_llseek, |
4d3435b8 | 6584 | .release = tracing_release_generic_tr, |
c8d77183 SR |
6585 | }; |
6586 | ||
bc0c38d1 SR |
6587 | #ifdef CONFIG_DYNAMIC_FTRACE |
6588 | ||
b807c3d0 SR |
6589 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
6590 | { | |
6591 | return 0; | |
6592 | } | |
6593 | ||
bc0c38d1 | 6594 | static ssize_t |
b807c3d0 | 6595 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
bc0c38d1 SR |
6596 | size_t cnt, loff_t *ppos) |
6597 | { | |
a26a2a27 SR |
6598 | static char ftrace_dyn_info_buffer[1024]; |
6599 | static DEFINE_MUTEX(dyn_info_mutex); | |
bc0c38d1 | 6600 | unsigned long *p = filp->private_data; |
b807c3d0 | 6601 | char *buf = ftrace_dyn_info_buffer; |
a26a2a27 | 6602 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); |
bc0c38d1 SR |
6603 | int r; |
6604 | ||
b807c3d0 SR |
6605 | mutex_lock(&dyn_info_mutex); |
6606 | r = sprintf(buf, "%ld ", *p); | |
4bf39a94 | 6607 | |
a26a2a27 | 6608 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
b807c3d0 SR |
6609 | buf[r++] = '\n'; |
6610 | ||
6611 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
6612 | ||
6613 | mutex_unlock(&dyn_info_mutex); | |
6614 | ||
6615 | return r; | |
bc0c38d1 SR |
6616 | } |
6617 | ||
5e2336a0 | 6618 | static const struct file_operations tracing_dyn_info_fops = { |
4bf39a94 | 6619 | .open = tracing_open_generic, |
b807c3d0 | 6620 | .read = tracing_read_dyn_info, |
b444786f | 6621 | .llseek = generic_file_llseek, |
bc0c38d1 | 6622 | }; |
77fd5c15 | 6623 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
bc0c38d1 | 6624 | |
77fd5c15 SRRH |
6625 | #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) |
6626 | static void | |
6627 | ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) | |
6628 | { | |
6629 | tracing_snapshot(); | |
6630 | } | |
bc0c38d1 | 6631 | |
77fd5c15 SRRH |
6632 | static void |
6633 | ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) | |
bc0c38d1 | 6634 | { |
77fd5c15 SRRH |
6635 | unsigned long *count = (long *)data; |
6636 | ||
6637 | if (!*count) | |
6638 | return; | |
bc0c38d1 | 6639 | |
77fd5c15 SRRH |
6640 | if (*count != -1) |
6641 | (*count)--; | |
6642 | ||
6643 | tracing_snapshot(); | |
6644 | } | |
6645 | ||
6646 | static int | |
6647 | ftrace_snapshot_print(struct seq_file *m, unsigned long ip, | |
6648 | struct ftrace_probe_ops *ops, void *data) | |
6649 | { | |
6650 | long count = (long)data; | |
6651 | ||
6652 | seq_printf(m, "%ps:", (void *)ip); | |
6653 | ||
fa6f0cc7 | 6654 | seq_puts(m, "snapshot"); |
77fd5c15 SRRH |
6655 | |
6656 | if (count == -1) | |
fa6f0cc7 | 6657 | seq_puts(m, ":unlimited\n"); |
77fd5c15 SRRH |
6658 | else |
6659 | seq_printf(m, ":count=%ld\n", count); | |
6660 | ||
6661 | return 0; | |
6662 | } | |
6663 | ||
6664 | static struct ftrace_probe_ops snapshot_probe_ops = { | |
6665 | .func = ftrace_snapshot, | |
6666 | .print = ftrace_snapshot_print, | |
6667 | }; | |
6668 | ||
6669 | static struct ftrace_probe_ops snapshot_count_probe_ops = { | |
6670 | .func = ftrace_count_snapshot, | |
6671 | .print = ftrace_snapshot_print, | |
6672 | }; | |
6673 | ||
6674 | static int | |
6675 | ftrace_trace_snapshot_callback(struct ftrace_hash *hash, | |
6676 | char *glob, char *cmd, char *param, int enable) | |
6677 | { | |
6678 | struct ftrace_probe_ops *ops; | |
6679 | void *count = (void *)-1; | |
6680 | char *number; | |
6681 | int ret; | |
6682 | ||
6683 | /* hash funcs only work with set_ftrace_filter */ | |
6684 | if (!enable) | |
6685 | return -EINVAL; | |
6686 | ||
6687 | ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; | |
6688 | ||
6689 | if (glob[0] == '!') { | |
6690 | unregister_ftrace_function_probe_func(glob+1, ops); | |
6691 | return 0; | |
6692 | } | |
6693 | ||
6694 | if (!param) | |
6695 | goto out_reg; | |
6696 | ||
6697 | number = strsep(¶m, ":"); | |
6698 | ||
6699 | if (!strlen(number)) | |
6700 | goto out_reg; | |
6701 | ||
6702 | /* | |
6703 | * We use the callback data field (which is a pointer) | |
6704 | * as our counter. | |
6705 | */ | |
6706 | ret = kstrtoul(number, 0, (unsigned long *)&count); | |
6707 | if (ret) | |
6708 | return ret; | |
6709 | ||
6710 | out_reg: | |
6711 | ret = register_ftrace_function_probe(glob, ops, count); | |
6712 | ||
6713 | if (ret >= 0) | |
6714 | alloc_snapshot(&global_trace); | |
6715 | ||
6716 | return ret < 0 ? ret : 0; | |
6717 | } | |
6718 | ||
6719 | static struct ftrace_func_command ftrace_snapshot_cmd = { | |
6720 | .name = "snapshot", | |
6721 | .func = ftrace_trace_snapshot_callback, | |
6722 | }; | |
6723 | ||
38de93ab | 6724 | static __init int register_snapshot_cmd(void) |
77fd5c15 SRRH |
6725 | { |
6726 | return register_ftrace_command(&ftrace_snapshot_cmd); | |
6727 | } | |
6728 | #else | |
38de93ab | 6729 | static inline __init int register_snapshot_cmd(void) { return 0; } |
77fd5c15 | 6730 | #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ |
bc0c38d1 | 6731 | |
7eeafbca | 6732 | static struct dentry *tracing_get_dentry(struct trace_array *tr) |
bc0c38d1 | 6733 | { |
8434dc93 SRRH |
6734 | if (WARN_ON(!tr->dir)) |
6735 | return ERR_PTR(-ENODEV); | |
6736 | ||
6737 | /* Top directory uses NULL as the parent */ | |
6738 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
6739 | return NULL; | |
6740 | ||
6741 | /* All sub buffers have a descriptor */ | |
2b6080f2 | 6742 | return tr->dir; |
bc0c38d1 SR |
6743 | } |
6744 | ||
2b6080f2 | 6745 | static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) |
b04cc6b1 | 6746 | { |
b04cc6b1 FW |
6747 | struct dentry *d_tracer; |
6748 | ||
2b6080f2 SR |
6749 | if (tr->percpu_dir) |
6750 | return tr->percpu_dir; | |
b04cc6b1 | 6751 | |
7eeafbca | 6752 | d_tracer = tracing_get_dentry(tr); |
14a5ae40 | 6753 | if (IS_ERR(d_tracer)) |
b04cc6b1 FW |
6754 | return NULL; |
6755 | ||
8434dc93 | 6756 | tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); |
b04cc6b1 | 6757 | |
2b6080f2 | 6758 | WARN_ONCE(!tr->percpu_dir, |
8434dc93 | 6759 | "Could not create tracefs directory 'per_cpu/%d'\n", cpu); |
b04cc6b1 | 6760 | |
2b6080f2 | 6761 | return tr->percpu_dir; |
b04cc6b1 FW |
6762 | } |
6763 | ||
649e9c70 ON |
6764 | static struct dentry * |
6765 | trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | |
6766 | void *data, long cpu, const struct file_operations *fops) | |
6767 | { | |
6768 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); | |
6769 | ||
6770 | if (ret) /* See tracing_get_cpu() */ | |
7682c918 | 6771 | d_inode(ret)->i_cdev = (void *)(cpu + 1); |
649e9c70 ON |
6772 | return ret; |
6773 | } | |
6774 | ||
2b6080f2 | 6775 | static void |
8434dc93 | 6776 | tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) |
b04cc6b1 | 6777 | { |
2b6080f2 | 6778 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
5452af66 | 6779 | struct dentry *d_cpu; |
dd49a38c | 6780 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
b04cc6b1 | 6781 | |
0a3d7ce7 NK |
6782 | if (!d_percpu) |
6783 | return; | |
6784 | ||
dd49a38c | 6785 | snprintf(cpu_dir, 30, "cpu%ld", cpu); |
8434dc93 | 6786 | d_cpu = tracefs_create_dir(cpu_dir, d_percpu); |
8656e7a2 | 6787 | if (!d_cpu) { |
a395d6a7 | 6788 | pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); |
8656e7a2 FW |
6789 | return; |
6790 | } | |
b04cc6b1 | 6791 | |
8656e7a2 | 6792 | /* per cpu trace_pipe */ |
649e9c70 | 6793 | trace_create_cpu_file("trace_pipe", 0444, d_cpu, |
15544209 | 6794 | tr, cpu, &tracing_pipe_fops); |
b04cc6b1 FW |
6795 | |
6796 | /* per cpu trace */ | |
649e9c70 | 6797 | trace_create_cpu_file("trace", 0644, d_cpu, |
6484c71c | 6798 | tr, cpu, &tracing_fops); |
7f96f93f | 6799 | |
649e9c70 | 6800 | trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, |
46ef2be0 | 6801 | tr, cpu, &tracing_buffers_fops); |
7f96f93f | 6802 | |
649e9c70 | 6803 | trace_create_cpu_file("stats", 0444, d_cpu, |
4d3435b8 | 6804 | tr, cpu, &tracing_stats_fops); |
438ced17 | 6805 | |
649e9c70 | 6806 | trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, |
0bc392ee | 6807 | tr, cpu, &tracing_entries_fops); |
f1affcaa SRRH |
6808 | |
6809 | #ifdef CONFIG_TRACER_SNAPSHOT | |
649e9c70 | 6810 | trace_create_cpu_file("snapshot", 0644, d_cpu, |
6484c71c | 6811 | tr, cpu, &snapshot_fops); |
6de58e62 | 6812 | |
649e9c70 | 6813 | trace_create_cpu_file("snapshot_raw", 0444, d_cpu, |
46ef2be0 | 6814 | tr, cpu, &snapshot_raw_fops); |
f1affcaa | 6815 | #endif |
b04cc6b1 FW |
6816 | } |
6817 | ||
60a11774 SR |
6818 | #ifdef CONFIG_FTRACE_SELFTEST |
6819 | /* Let selftest have access to static functions in this file */ | |
6820 | #include "trace_selftest.c" | |
6821 | #endif | |
6822 | ||
577b785f SR |
6823 | static ssize_t |
6824 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | |
6825 | loff_t *ppos) | |
6826 | { | |
6827 | struct trace_option_dentry *topt = filp->private_data; | |
6828 | char *buf; | |
6829 | ||
6830 | if (topt->flags->val & topt->opt->bit) | |
6831 | buf = "1\n"; | |
6832 | else | |
6833 | buf = "0\n"; | |
6834 | ||
6835 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | |
6836 | } | |
6837 | ||
6838 | static ssize_t | |
6839 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
6840 | loff_t *ppos) | |
6841 | { | |
6842 | struct trace_option_dentry *topt = filp->private_data; | |
6843 | unsigned long val; | |
577b785f SR |
6844 | int ret; |
6845 | ||
22fe9b54 PH |
6846 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
6847 | if (ret) | |
577b785f SR |
6848 | return ret; |
6849 | ||
8d18eaaf LZ |
6850 | if (val != 0 && val != 1) |
6851 | return -EINVAL; | |
577b785f | 6852 | |
8d18eaaf | 6853 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
577b785f | 6854 | mutex_lock(&trace_types_lock); |
8c1a49ae | 6855 | ret = __set_tracer_option(topt->tr, topt->flags, |
c757bea9 | 6856 | topt->opt, !val); |
577b785f SR |
6857 | mutex_unlock(&trace_types_lock); |
6858 | if (ret) | |
6859 | return ret; | |
577b785f SR |
6860 | } |
6861 | ||
6862 | *ppos += cnt; | |
6863 | ||
6864 | return cnt; | |
6865 | } | |
6866 | ||
6867 | ||
6868 | static const struct file_operations trace_options_fops = { | |
6869 | .open = tracing_open_generic, | |
6870 | .read = trace_options_read, | |
6871 | .write = trace_options_write, | |
b444786f | 6872 | .llseek = generic_file_llseek, |
577b785f SR |
6873 | }; |
6874 | ||
9a38a885 SRRH |
6875 | /* |
6876 | * In order to pass in both the trace_array descriptor as well as the index | |
6877 | * to the flag that the trace option file represents, the trace_array | |
6878 | * has a character array of trace_flags_index[], which holds the index | |
6879 | * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. | |
6880 | * The address of this character array is passed to the flag option file | |
6881 | * read/write callbacks. | |
6882 | * | |
6883 | * In order to extract both the index and the trace_array descriptor, | |
6884 | * get_tr_index() uses the following algorithm. | |
6885 | * | |
6886 | * idx = *ptr; | |
6887 | * | |
6888 | * As the pointer itself contains the address of the index (remember | |
6889 | * index[1] == 1). | |
6890 | * | |
6891 | * Then to get the trace_array descriptor, by subtracting that index | |
6892 | * from the ptr, we get to the start of the index itself. | |
6893 | * | |
6894 | * ptr - idx == &index[0] | |
6895 | * | |
6896 | * Then a simple container_of() from that pointer gets us to the | |
6897 | * trace_array descriptor. | |
6898 | */ | |
6899 | static void get_tr_index(void *data, struct trace_array **ptr, | |
6900 | unsigned int *pindex) | |
6901 | { | |
6902 | *pindex = *(unsigned char *)data; | |
6903 | ||
6904 | *ptr = container_of(data - *pindex, struct trace_array, | |
6905 | trace_flags_index); | |
6906 | } | |
6907 | ||
a8259075 SR |
6908 | static ssize_t |
6909 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | |
6910 | loff_t *ppos) | |
6911 | { | |
9a38a885 SRRH |
6912 | void *tr_index = filp->private_data; |
6913 | struct trace_array *tr; | |
6914 | unsigned int index; | |
a8259075 SR |
6915 | char *buf; |
6916 | ||
9a38a885 SRRH |
6917 | get_tr_index(tr_index, &tr, &index); |
6918 | ||
6919 | if (tr->trace_flags & (1 << index)) | |
a8259075 SR |
6920 | buf = "1\n"; |
6921 | else | |
6922 | buf = "0\n"; | |
6923 | ||
6924 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | |
6925 | } | |
6926 | ||
6927 | static ssize_t | |
6928 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
6929 | loff_t *ppos) | |
6930 | { | |
9a38a885 SRRH |
6931 | void *tr_index = filp->private_data; |
6932 | struct trace_array *tr; | |
6933 | unsigned int index; | |
a8259075 SR |
6934 | unsigned long val; |
6935 | int ret; | |
6936 | ||
9a38a885 SRRH |
6937 | get_tr_index(tr_index, &tr, &index); |
6938 | ||
22fe9b54 PH |
6939 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
6940 | if (ret) | |
a8259075 SR |
6941 | return ret; |
6942 | ||
f2d84b65 | 6943 | if (val != 0 && val != 1) |
a8259075 | 6944 | return -EINVAL; |
69d34da2 SRRH |
6945 | |
6946 | mutex_lock(&trace_types_lock); | |
2b6080f2 | 6947 | ret = set_tracer_flag(tr, 1 << index, val); |
69d34da2 | 6948 | mutex_unlock(&trace_types_lock); |
a8259075 | 6949 | |
613f04a0 SRRH |
6950 | if (ret < 0) |
6951 | return ret; | |
6952 | ||
a8259075 SR |
6953 | *ppos += cnt; |
6954 | ||
6955 | return cnt; | |
6956 | } | |
6957 | ||
a8259075 SR |
6958 | static const struct file_operations trace_options_core_fops = { |
6959 | .open = tracing_open_generic, | |
6960 | .read = trace_options_core_read, | |
6961 | .write = trace_options_core_write, | |
b444786f | 6962 | .llseek = generic_file_llseek, |
a8259075 SR |
6963 | }; |
6964 | ||
5452af66 | 6965 | struct dentry *trace_create_file(const char *name, |
f4ae40a6 | 6966 | umode_t mode, |
5452af66 FW |
6967 | struct dentry *parent, |
6968 | void *data, | |
6969 | const struct file_operations *fops) | |
6970 | { | |
6971 | struct dentry *ret; | |
6972 | ||
8434dc93 | 6973 | ret = tracefs_create_file(name, mode, parent, data, fops); |
5452af66 | 6974 | if (!ret) |
a395d6a7 | 6975 | pr_warn("Could not create tracefs '%s' entry\n", name); |
5452af66 FW |
6976 | |
6977 | return ret; | |
6978 | } | |
6979 | ||
6980 | ||
2b6080f2 | 6981 | static struct dentry *trace_options_init_dentry(struct trace_array *tr) |
a8259075 SR |
6982 | { |
6983 | struct dentry *d_tracer; | |
a8259075 | 6984 | |
2b6080f2 SR |
6985 | if (tr->options) |
6986 | return tr->options; | |
a8259075 | 6987 | |
7eeafbca | 6988 | d_tracer = tracing_get_dentry(tr); |
14a5ae40 | 6989 | if (IS_ERR(d_tracer)) |
a8259075 SR |
6990 | return NULL; |
6991 | ||
8434dc93 | 6992 | tr->options = tracefs_create_dir("options", d_tracer); |
2b6080f2 | 6993 | if (!tr->options) { |
a395d6a7 | 6994 | pr_warn("Could not create tracefs directory 'options'\n"); |
a8259075 SR |
6995 | return NULL; |
6996 | } | |
6997 | ||
2b6080f2 | 6998 | return tr->options; |
a8259075 SR |
6999 | } |
7000 | ||
577b785f | 7001 | static void |
2b6080f2 SR |
7002 | create_trace_option_file(struct trace_array *tr, |
7003 | struct trace_option_dentry *topt, | |
577b785f SR |
7004 | struct tracer_flags *flags, |
7005 | struct tracer_opt *opt) | |
7006 | { | |
7007 | struct dentry *t_options; | |
577b785f | 7008 | |
2b6080f2 | 7009 | t_options = trace_options_init_dentry(tr); |
577b785f SR |
7010 | if (!t_options) |
7011 | return; | |
7012 | ||
7013 | topt->flags = flags; | |
7014 | topt->opt = opt; | |
2b6080f2 | 7015 | topt->tr = tr; |
577b785f | 7016 | |
5452af66 | 7017 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
577b785f SR |
7018 | &trace_options_fops); |
7019 | ||
577b785f SR |
7020 | } |
7021 | ||
37aea98b | 7022 | static void |
2b6080f2 | 7023 | create_trace_option_files(struct trace_array *tr, struct tracer *tracer) |
577b785f SR |
7024 | { |
7025 | struct trace_option_dentry *topts; | |
37aea98b | 7026 | struct trace_options *tr_topts; |
577b785f SR |
7027 | struct tracer_flags *flags; |
7028 | struct tracer_opt *opts; | |
7029 | int cnt; | |
37aea98b | 7030 | int i; |
577b785f SR |
7031 | |
7032 | if (!tracer) | |
37aea98b | 7033 | return; |
577b785f SR |
7034 | |
7035 | flags = tracer->flags; | |
7036 | ||
7037 | if (!flags || !flags->opts) | |
37aea98b SRRH |
7038 | return; |
7039 | ||
7040 | /* | |
7041 | * If this is an instance, only create flags for tracers | |
7042 | * the instance may have. | |
7043 | */ | |
7044 | if (!trace_ok_for_array(tracer, tr)) | |
7045 | return; | |
7046 | ||
7047 | for (i = 0; i < tr->nr_topts; i++) { | |
d39cdd20 CH |
7048 | /* Make sure there's no duplicate flags. */ |
7049 | if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) | |
37aea98b SRRH |
7050 | return; |
7051 | } | |
577b785f SR |
7052 | |
7053 | opts = flags->opts; | |
7054 | ||
7055 | for (cnt = 0; opts[cnt].name; cnt++) | |
7056 | ; | |
7057 | ||
0cfe8245 | 7058 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); |
577b785f | 7059 | if (!topts) |
37aea98b SRRH |
7060 | return; |
7061 | ||
7062 | tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), | |
7063 | GFP_KERNEL); | |
7064 | if (!tr_topts) { | |
7065 | kfree(topts); | |
7066 | return; | |
7067 | } | |
7068 | ||
7069 | tr->topts = tr_topts; | |
7070 | tr->topts[tr->nr_topts].tracer = tracer; | |
7071 | tr->topts[tr->nr_topts].topts = topts; | |
7072 | tr->nr_topts++; | |
577b785f | 7073 | |
41d9c0be | 7074 | for (cnt = 0; opts[cnt].name; cnt++) { |
2b6080f2 | 7075 | create_trace_option_file(tr, &topts[cnt], flags, |
577b785f | 7076 | &opts[cnt]); |
41d9c0be SRRH |
7077 | WARN_ONCE(topts[cnt].entry == NULL, |
7078 | "Failed to create trace option: %s", | |
7079 | opts[cnt].name); | |
7080 | } | |
577b785f SR |
7081 | } |
7082 | ||
a8259075 | 7083 | static struct dentry * |
2b6080f2 SR |
7084 | create_trace_option_core_file(struct trace_array *tr, |
7085 | const char *option, long index) | |
a8259075 SR |
7086 | { |
7087 | struct dentry *t_options; | |
a8259075 | 7088 | |
2b6080f2 | 7089 | t_options = trace_options_init_dentry(tr); |
a8259075 SR |
7090 | if (!t_options) |
7091 | return NULL; | |
7092 | ||
9a38a885 SRRH |
7093 | return trace_create_file(option, 0644, t_options, |
7094 | (void *)&tr->trace_flags_index[index], | |
7095 | &trace_options_core_fops); | |
a8259075 SR |
7096 | } |
7097 | ||
16270145 | 7098 | static void create_trace_options_dir(struct trace_array *tr) |
a8259075 SR |
7099 | { |
7100 | struct dentry *t_options; | |
16270145 | 7101 | bool top_level = tr == &global_trace; |
a8259075 SR |
7102 | int i; |
7103 | ||
2b6080f2 | 7104 | t_options = trace_options_init_dentry(tr); |
a8259075 SR |
7105 | if (!t_options) |
7106 | return; | |
7107 | ||
16270145 SRRH |
7108 | for (i = 0; trace_options[i]; i++) { |
7109 | if (top_level || | |
7110 | !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) | |
7111 | create_trace_option_core_file(tr, trace_options[i], i); | |
7112 | } | |
a8259075 SR |
7113 | } |
7114 | ||
499e5470 SR |
7115 | static ssize_t |
7116 | rb_simple_read(struct file *filp, char __user *ubuf, | |
7117 | size_t cnt, loff_t *ppos) | |
7118 | { | |
348f0fc2 | 7119 | struct trace_array *tr = filp->private_data; |
499e5470 SR |
7120 | char buf[64]; |
7121 | int r; | |
7122 | ||
10246fa3 | 7123 | r = tracer_tracing_is_on(tr); |
499e5470 SR |
7124 | r = sprintf(buf, "%d\n", r); |
7125 | ||
7126 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
7127 | } | |
7128 | ||
7129 | static ssize_t | |
7130 | rb_simple_write(struct file *filp, const char __user *ubuf, | |
7131 | size_t cnt, loff_t *ppos) | |
7132 | { | |
348f0fc2 | 7133 | struct trace_array *tr = filp->private_data; |
12883efb | 7134 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
499e5470 SR |
7135 | unsigned long val; |
7136 | int ret; | |
7137 | ||
7138 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
7139 | if (ret) | |
7140 | return ret; | |
7141 | ||
7142 | if (buffer) { | |
2df8f8a6 SR |
7143 | mutex_lock(&trace_types_lock); |
7144 | if (val) { | |
10246fa3 | 7145 | tracer_tracing_on(tr); |
2b6080f2 SR |
7146 | if (tr->current_trace->start) |
7147 | tr->current_trace->start(tr); | |
2df8f8a6 | 7148 | } else { |
10246fa3 | 7149 | tracer_tracing_off(tr); |
2b6080f2 SR |
7150 | if (tr->current_trace->stop) |
7151 | tr->current_trace->stop(tr); | |
2df8f8a6 SR |
7152 | } |
7153 | mutex_unlock(&trace_types_lock); | |
499e5470 SR |
7154 | } |
7155 | ||
7156 | (*ppos)++; | |
7157 | ||
7158 | return cnt; | |
7159 | } | |
7160 | ||
7161 | static const struct file_operations rb_simple_fops = { | |
7b85af63 | 7162 | .open = tracing_open_generic_tr, |
499e5470 SR |
7163 | .read = rb_simple_read, |
7164 | .write = rb_simple_write, | |
7b85af63 | 7165 | .release = tracing_release_generic_tr, |
499e5470 SR |
7166 | .llseek = default_llseek, |
7167 | }; | |
7168 | ||
277ba044 SR |
7169 | struct dentry *trace_instance_dir; |
7170 | ||
7171 | static void | |
8434dc93 | 7172 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
277ba044 | 7173 | |
55034cd6 SRRH |
7174 | static int |
7175 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | |
277ba044 SR |
7176 | { |
7177 | enum ring_buffer_flags rb_flags; | |
737223fb | 7178 | |
983f938a | 7179 | rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; |
737223fb | 7180 | |
dced341b SRRH |
7181 | buf->tr = tr; |
7182 | ||
55034cd6 SRRH |
7183 | buf->buffer = ring_buffer_alloc(size, rb_flags); |
7184 | if (!buf->buffer) | |
7185 | return -ENOMEM; | |
737223fb | 7186 | |
55034cd6 SRRH |
7187 | buf->data = alloc_percpu(struct trace_array_cpu); |
7188 | if (!buf->data) { | |
7189 | ring_buffer_free(buf->buffer); | |
7190 | return -ENOMEM; | |
7191 | } | |
737223fb | 7192 | |
737223fb SRRH |
7193 | /* Allocate the first page for all buffers */ |
7194 | set_buffer_entries(&tr->trace_buffer, | |
7195 | ring_buffer_size(tr->trace_buffer.buffer, 0)); | |
7196 | ||
55034cd6 SRRH |
7197 | return 0; |
7198 | } | |
737223fb | 7199 | |
55034cd6 SRRH |
7200 | static int allocate_trace_buffers(struct trace_array *tr, int size) |
7201 | { | |
7202 | int ret; | |
737223fb | 7203 | |
55034cd6 SRRH |
7204 | ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); |
7205 | if (ret) | |
7206 | return ret; | |
737223fb | 7207 | |
55034cd6 SRRH |
7208 | #ifdef CONFIG_TRACER_MAX_TRACE |
7209 | ret = allocate_trace_buffer(tr, &tr->max_buffer, | |
7210 | allocate_snapshot ? size : 1); | |
7211 | if (WARN_ON(ret)) { | |
737223fb | 7212 | ring_buffer_free(tr->trace_buffer.buffer); |
55034cd6 SRRH |
7213 | free_percpu(tr->trace_buffer.data); |
7214 | return -ENOMEM; | |
7215 | } | |
7216 | tr->allocated_snapshot = allocate_snapshot; | |
737223fb | 7217 | |
55034cd6 SRRH |
7218 | /* |
7219 | * Only the top level trace array gets its snapshot allocated | |
7220 | * from the kernel command line. | |
7221 | */ | |
7222 | allocate_snapshot = false; | |
737223fb | 7223 | #endif |
55034cd6 | 7224 | return 0; |
737223fb SRRH |
7225 | } |
7226 | ||
f0b70cc4 SRRH |
7227 | static void free_trace_buffer(struct trace_buffer *buf) |
7228 | { | |
7229 | if (buf->buffer) { | |
7230 | ring_buffer_free(buf->buffer); | |
7231 | buf->buffer = NULL; | |
7232 | free_percpu(buf->data); | |
7233 | buf->data = NULL; | |
7234 | } | |
7235 | } | |
7236 | ||
23aaa3c1 SRRH |
7237 | static void free_trace_buffers(struct trace_array *tr) |
7238 | { | |
7239 | if (!tr) | |
7240 | return; | |
7241 | ||
f0b70cc4 | 7242 | free_trace_buffer(&tr->trace_buffer); |
23aaa3c1 SRRH |
7243 | |
7244 | #ifdef CONFIG_TRACER_MAX_TRACE | |
f0b70cc4 | 7245 | free_trace_buffer(&tr->max_buffer); |
23aaa3c1 SRRH |
7246 | #endif |
7247 | } | |
7248 | ||
9a38a885 SRRH |
7249 | static void init_trace_flags_index(struct trace_array *tr) |
7250 | { | |
7251 | int i; | |
7252 | ||
7253 | /* Used by the trace options files */ | |
7254 | for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) | |
7255 | tr->trace_flags_index[i] = i; | |
7256 | } | |
7257 | ||
37aea98b SRRH |
7258 | static void __update_tracer_options(struct trace_array *tr) |
7259 | { | |
7260 | struct tracer *t; | |
7261 | ||
7262 | for (t = trace_types; t; t = t->next) | |
7263 | add_tracer_options(tr, t); | |
7264 | } | |
7265 | ||
7266 | static void update_tracer_options(struct trace_array *tr) | |
7267 | { | |
7268 | mutex_lock(&trace_types_lock); | |
7269 | __update_tracer_options(tr); | |
7270 | mutex_unlock(&trace_types_lock); | |
7271 | } | |
7272 | ||
eae47358 | 7273 | static int instance_mkdir(const char *name) |
737223fb | 7274 | { |
277ba044 SR |
7275 | struct trace_array *tr; |
7276 | int ret; | |
277ba044 SR |
7277 | |
7278 | mutex_lock(&trace_types_lock); | |
7279 | ||
7280 | ret = -EEXIST; | |
7281 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
7282 | if (tr->name && strcmp(tr->name, name) == 0) | |
7283 | goto out_unlock; | |
7284 | } | |
7285 | ||
7286 | ret = -ENOMEM; | |
7287 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); | |
7288 | if (!tr) | |
7289 | goto out_unlock; | |
7290 | ||
7291 | tr->name = kstrdup(name, GFP_KERNEL); | |
7292 | if (!tr->name) | |
7293 | goto out_free_tr; | |
7294 | ||
ccfe9e42 AL |
7295 | if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) |
7296 | goto out_free_tr; | |
7297 | ||
20550622 | 7298 | tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; |
983f938a | 7299 | |
ccfe9e42 AL |
7300 | cpumask_copy(tr->tracing_cpumask, cpu_all_mask); |
7301 | ||
277ba044 SR |
7302 | raw_spin_lock_init(&tr->start_lock); |
7303 | ||
0b9b12c1 SRRH |
7304 | tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
7305 | ||
277ba044 SR |
7306 | tr->current_trace = &nop_trace; |
7307 | ||
7308 | INIT_LIST_HEAD(&tr->systems); | |
7309 | INIT_LIST_HEAD(&tr->events); | |
7310 | ||
737223fb | 7311 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
277ba044 SR |
7312 | goto out_free_tr; |
7313 | ||
8434dc93 | 7314 | tr->dir = tracefs_create_dir(name, trace_instance_dir); |
277ba044 SR |
7315 | if (!tr->dir) |
7316 | goto out_free_tr; | |
7317 | ||
7318 | ret = event_trace_add_tracer(tr->dir, tr); | |
609e85a7 | 7319 | if (ret) { |
8434dc93 | 7320 | tracefs_remove_recursive(tr->dir); |
277ba044 | 7321 | goto out_free_tr; |
609e85a7 | 7322 | } |
277ba044 | 7323 | |
8434dc93 | 7324 | init_tracer_tracefs(tr, tr->dir); |
9a38a885 | 7325 | init_trace_flags_index(tr); |
37aea98b | 7326 | __update_tracer_options(tr); |
277ba044 SR |
7327 | |
7328 | list_add(&tr->list, &ftrace_trace_arrays); | |
7329 | ||
7330 | mutex_unlock(&trace_types_lock); | |
7331 | ||
7332 | return 0; | |
7333 | ||
7334 | out_free_tr: | |
23aaa3c1 | 7335 | free_trace_buffers(tr); |
ccfe9e42 | 7336 | free_cpumask_var(tr->tracing_cpumask); |
277ba044 SR |
7337 | kfree(tr->name); |
7338 | kfree(tr); | |
7339 | ||
7340 | out_unlock: | |
7341 | mutex_unlock(&trace_types_lock); | |
7342 | ||
7343 | return ret; | |
7344 | ||
7345 | } | |
7346 | ||
eae47358 | 7347 | static int instance_rmdir(const char *name) |
0c8916c3 SR |
7348 | { |
7349 | struct trace_array *tr; | |
7350 | int found = 0; | |
7351 | int ret; | |
37aea98b | 7352 | int i; |
0c8916c3 SR |
7353 | |
7354 | mutex_lock(&trace_types_lock); | |
7355 | ||
7356 | ret = -ENODEV; | |
7357 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
7358 | if (tr->name && strcmp(tr->name, name) == 0) { | |
7359 | found = 1; | |
7360 | break; | |
7361 | } | |
7362 | } | |
7363 | if (!found) | |
7364 | goto out_unlock; | |
7365 | ||
a695cb58 | 7366 | ret = -EBUSY; |
cf6ab6d9 | 7367 | if (tr->ref || (tr->current_trace && tr->current_trace->ref)) |
a695cb58 SRRH |
7368 | goto out_unlock; |
7369 | ||
0c8916c3 SR |
7370 | list_del(&tr->list); |
7371 | ||
20550622 SRRH |
7372 | /* Disable all the flags that were enabled coming in */ |
7373 | for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { | |
7374 | if ((1 << i) & ZEROED_TRACE_FLAGS) | |
7375 | set_tracer_flag(tr, 1 << i, 0); | |
7376 | } | |
7377 | ||
6b450d25 | 7378 | tracing_set_nop(tr); |
0c8916c3 | 7379 | event_trace_del_tracer(tr); |
591dffda | 7380 | ftrace_destroy_function_files(tr); |
681a4a2f | 7381 | tracefs_remove_recursive(tr->dir); |
a9fcaaac | 7382 | free_trace_buffers(tr); |
0c8916c3 | 7383 | |
37aea98b SRRH |
7384 | for (i = 0; i < tr->nr_topts; i++) { |
7385 | kfree(tr->topts[i].topts); | |
7386 | } | |
7387 | kfree(tr->topts); | |
7388 | ||
0c8916c3 SR |
7389 | kfree(tr->name); |
7390 | kfree(tr); | |
7391 | ||
7392 | ret = 0; | |
7393 | ||
7394 | out_unlock: | |
7395 | mutex_unlock(&trace_types_lock); | |
7396 | ||
7397 | return ret; | |
7398 | } | |
7399 | ||
277ba044 SR |
7400 | static __init void create_trace_instances(struct dentry *d_tracer) |
7401 | { | |
eae47358 SRRH |
7402 | trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, |
7403 | instance_mkdir, | |
7404 | instance_rmdir); | |
277ba044 SR |
7405 | if (WARN_ON(!trace_instance_dir)) |
7406 | return; | |
277ba044 SR |
7407 | } |
7408 | ||
2b6080f2 | 7409 | static void |
8434dc93 | 7410 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
2b6080f2 | 7411 | { |
121aaee7 | 7412 | int cpu; |
2b6080f2 | 7413 | |
607e2ea1 SRRH |
7414 | trace_create_file("available_tracers", 0444, d_tracer, |
7415 | tr, &show_traces_fops); | |
7416 | ||
7417 | trace_create_file("current_tracer", 0644, d_tracer, | |
7418 | tr, &set_tracer_fops); | |
7419 | ||
ccfe9e42 AL |
7420 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
7421 | tr, &tracing_cpumask_fops); | |
7422 | ||
2b6080f2 SR |
7423 | trace_create_file("trace_options", 0644, d_tracer, |
7424 | tr, &tracing_iter_fops); | |
7425 | ||
7426 | trace_create_file("trace", 0644, d_tracer, | |
6484c71c | 7427 | tr, &tracing_fops); |
2b6080f2 SR |
7428 | |
7429 | trace_create_file("trace_pipe", 0444, d_tracer, | |
15544209 | 7430 | tr, &tracing_pipe_fops); |
2b6080f2 SR |
7431 | |
7432 | trace_create_file("buffer_size_kb", 0644, d_tracer, | |
0bc392ee | 7433 | tr, &tracing_entries_fops); |
2b6080f2 SR |
7434 | |
7435 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | |
7436 | tr, &tracing_total_entries_fops); | |
7437 | ||
238ae93d | 7438 | trace_create_file("free_buffer", 0200, d_tracer, |
2b6080f2 SR |
7439 | tr, &tracing_free_buffer_fops); |
7440 | ||
7441 | trace_create_file("trace_marker", 0220, d_tracer, | |
7442 | tr, &tracing_mark_fops); | |
7443 | ||
fa32e855 SR |
7444 | trace_create_file("trace_marker_raw", 0220, d_tracer, |
7445 | tr, &tracing_mark_raw_fops); | |
7446 | ||
2b6080f2 SR |
7447 | trace_create_file("trace_clock", 0644, d_tracer, tr, |
7448 | &trace_clock_fops); | |
7449 | ||
7450 | trace_create_file("tracing_on", 0644, d_tracer, | |
6484c71c | 7451 | tr, &rb_simple_fops); |
ce9bae55 | 7452 | |
16270145 SRRH |
7453 | create_trace_options_dir(tr); |
7454 | ||
f971cc9a | 7455 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
6d9b3fa5 SRRH |
7456 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
7457 | &tr->max_latency, &tracing_max_lat_fops); | |
7458 | #endif | |
7459 | ||
591dffda SRRH |
7460 | if (ftrace_create_function_files(tr, d_tracer)) |
7461 | WARN(1, "Could not allocate function filter files"); | |
7462 | ||
ce9bae55 SRRH |
7463 | #ifdef CONFIG_TRACER_SNAPSHOT |
7464 | trace_create_file("snapshot", 0644, d_tracer, | |
6484c71c | 7465 | tr, &snapshot_fops); |
ce9bae55 | 7466 | #endif |
121aaee7 SRRH |
7467 | |
7468 | for_each_tracing_cpu(cpu) | |
8434dc93 | 7469 | tracing_init_tracefs_percpu(tr, cpu); |
121aaee7 | 7470 | |
345ddcc8 | 7471 | ftrace_init_tracefs(tr, d_tracer); |
2b6080f2 SR |
7472 | } |
7473 | ||
f76180bc SRRH |
7474 | static struct vfsmount *trace_automount(void *ingore) |
7475 | { | |
7476 | struct vfsmount *mnt; | |
7477 | struct file_system_type *type; | |
7478 | ||
7479 | /* | |
7480 | * To maintain backward compatibility for tools that mount | |
7481 | * debugfs to get to the tracing facility, tracefs is automatically | |
7482 | * mounted to the debugfs/tracing directory. | |
7483 | */ | |
7484 | type = get_fs_type("tracefs"); | |
7485 | if (!type) | |
7486 | return NULL; | |
7487 | mnt = vfs_kern_mount(type, 0, "tracefs", NULL); | |
7488 | put_filesystem(type); | |
7489 | if (IS_ERR(mnt)) | |
7490 | return NULL; | |
7491 | mntget(mnt); | |
7492 | ||
7493 | return mnt; | |
7494 | } | |
7495 | ||
7eeafbca SRRH |
7496 | /** |
7497 | * tracing_init_dentry - initialize top level trace array | |
7498 | * | |
7499 | * This is called when creating files or directories in the tracing | |
7500 | * directory. It is called via fs_initcall() by any of the boot up code | |
7501 | * and expects to return the dentry of the top level tracing directory. | |
7502 | */ | |
7503 | struct dentry *tracing_init_dentry(void) | |
7504 | { | |
7505 | struct trace_array *tr = &global_trace; | |
7506 | ||
f76180bc | 7507 | /* The top level trace array uses NULL as parent */ |
7eeafbca | 7508 | if (tr->dir) |
f76180bc | 7509 | return NULL; |
7eeafbca | 7510 | |
8b129199 JW |
7511 | if (WARN_ON(!tracefs_initialized()) || |
7512 | (IS_ENABLED(CONFIG_DEBUG_FS) && | |
7513 | WARN_ON(!debugfs_initialized()))) | |
7eeafbca SRRH |
7514 | return ERR_PTR(-ENODEV); |
7515 | ||
f76180bc SRRH |
7516 | /* |
7517 | * As there may still be users that expect the tracing | |
7518 | * files to exist in debugfs/tracing, we must automount | |
7519 | * the tracefs file system there, so older tools still | |
7520 | * work with the newer kerenl. | |
7521 | */ | |
7522 | tr->dir = debugfs_create_automount("tracing", NULL, | |
7523 | trace_automount, NULL); | |
7eeafbca SRRH |
7524 | if (!tr->dir) { |
7525 | pr_warn_once("Could not create debugfs directory 'tracing'\n"); | |
7526 | return ERR_PTR(-ENOMEM); | |
7527 | } | |
7528 | ||
8434dc93 | 7529 | return NULL; |
7eeafbca SRRH |
7530 | } |
7531 | ||
0c564a53 SRRH |
7532 | extern struct trace_enum_map *__start_ftrace_enum_maps[]; |
7533 | extern struct trace_enum_map *__stop_ftrace_enum_maps[]; | |
7534 | ||
7535 | static void __init trace_enum_init(void) | |
7536 | { | |
3673b8e4 SRRH |
7537 | int len; |
7538 | ||
7539 | len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps; | |
9828413d | 7540 | trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len); |
3673b8e4 SRRH |
7541 | } |
7542 | ||
7543 | #ifdef CONFIG_MODULES | |
7544 | static void trace_module_add_enums(struct module *mod) | |
7545 | { | |
7546 | if (!mod->num_trace_enums) | |
7547 | return; | |
7548 | ||
7549 | /* | |
7550 | * Modules with bad taint do not have events created, do | |
7551 | * not bother with enums either. | |
7552 | */ | |
7553 | if (trace_module_has_bad_taint(mod)) | |
7554 | return; | |
7555 | ||
9828413d | 7556 | trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums); |
3673b8e4 SRRH |
7557 | } |
7558 | ||
9828413d SRRH |
7559 | #ifdef CONFIG_TRACE_ENUM_MAP_FILE |
7560 | static void trace_module_remove_enums(struct module *mod) | |
7561 | { | |
7562 | union trace_enum_map_item *map; | |
7563 | union trace_enum_map_item **last = &trace_enum_maps; | |
7564 | ||
7565 | if (!mod->num_trace_enums) | |
7566 | return; | |
7567 | ||
7568 | mutex_lock(&trace_enum_mutex); | |
7569 | ||
7570 | map = trace_enum_maps; | |
7571 | ||
7572 | while (map) { | |
7573 | if (map->head.mod == mod) | |
7574 | break; | |
7575 | map = trace_enum_jmp_to_tail(map); | |
7576 | last = &map->tail.next; | |
7577 | map = map->tail.next; | |
7578 | } | |
7579 | if (!map) | |
7580 | goto out; | |
7581 | ||
7582 | *last = trace_enum_jmp_to_tail(map)->tail.next; | |
7583 | kfree(map); | |
7584 | out: | |
7585 | mutex_unlock(&trace_enum_mutex); | |
7586 | } | |
7587 | #else | |
7588 | static inline void trace_module_remove_enums(struct module *mod) { } | |
7589 | #endif /* CONFIG_TRACE_ENUM_MAP_FILE */ | |
7590 | ||
3673b8e4 SRRH |
7591 | static int trace_module_notify(struct notifier_block *self, |
7592 | unsigned long val, void *data) | |
7593 | { | |
7594 | struct module *mod = data; | |
7595 | ||
7596 | switch (val) { | |
7597 | case MODULE_STATE_COMING: | |
7598 | trace_module_add_enums(mod); | |
7599 | break; | |
9828413d SRRH |
7600 | case MODULE_STATE_GOING: |
7601 | trace_module_remove_enums(mod); | |
7602 | break; | |
3673b8e4 SRRH |
7603 | } |
7604 | ||
7605 | return 0; | |
0c564a53 SRRH |
7606 | } |
7607 | ||
3673b8e4 SRRH |
7608 | static struct notifier_block trace_module_nb = { |
7609 | .notifier_call = trace_module_notify, | |
7610 | .priority = 0, | |
7611 | }; | |
9828413d | 7612 | #endif /* CONFIG_MODULES */ |
3673b8e4 | 7613 | |
8434dc93 | 7614 | static __init int tracer_init_tracefs(void) |
bc0c38d1 SR |
7615 | { |
7616 | struct dentry *d_tracer; | |
bc0c38d1 | 7617 | |
7e53bd42 LJ |
7618 | trace_access_lock_init(); |
7619 | ||
bc0c38d1 | 7620 | d_tracer = tracing_init_dentry(); |
14a5ae40 | 7621 | if (IS_ERR(d_tracer)) |
ed6f1c99 | 7622 | return 0; |
bc0c38d1 | 7623 | |
8434dc93 | 7624 | init_tracer_tracefs(&global_trace, d_tracer); |
501c2375 | 7625 | ftrace_init_tracefs_toplevel(&global_trace, d_tracer); |
bc0c38d1 | 7626 | |
5452af66 | 7627 | trace_create_file("tracing_thresh", 0644, d_tracer, |
6508fa76 | 7628 | &global_trace, &tracing_thresh_fops); |
a8259075 | 7629 | |
339ae5d3 | 7630 | trace_create_file("README", 0444, d_tracer, |
5452af66 FW |
7631 | NULL, &tracing_readme_fops); |
7632 | ||
69abe6a5 AP |
7633 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
7634 | NULL, &tracing_saved_cmdlines_fops); | |
5bf9a1ee | 7635 | |
939c7a4f YY |
7636 | trace_create_file("saved_cmdlines_size", 0644, d_tracer, |
7637 | NULL, &tracing_saved_cmdlines_size_fops); | |
7638 | ||
0c564a53 SRRH |
7639 | trace_enum_init(); |
7640 | ||
9828413d SRRH |
7641 | trace_create_enum_file(d_tracer); |
7642 | ||
3673b8e4 SRRH |
7643 | #ifdef CONFIG_MODULES |
7644 | register_module_notifier(&trace_module_nb); | |
7645 | #endif | |
7646 | ||
bc0c38d1 | 7647 | #ifdef CONFIG_DYNAMIC_FTRACE |
5452af66 FW |
7648 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
7649 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | |
bc0c38d1 | 7650 | #endif |
b04cc6b1 | 7651 | |
277ba044 | 7652 | create_trace_instances(d_tracer); |
5452af66 | 7653 | |
37aea98b | 7654 | update_tracer_options(&global_trace); |
09d23a1d | 7655 | |
b5ad384e | 7656 | return 0; |
bc0c38d1 SR |
7657 | } |
7658 | ||
3f5a54e3 SR |
7659 | static int trace_panic_handler(struct notifier_block *this, |
7660 | unsigned long event, void *unused) | |
7661 | { | |
944ac425 | 7662 | if (ftrace_dump_on_oops) |
cecbca96 | 7663 | ftrace_dump(ftrace_dump_on_oops); |
3f5a54e3 SR |
7664 | return NOTIFY_OK; |
7665 | } | |
7666 | ||
7667 | static struct notifier_block trace_panic_notifier = { | |
7668 | .notifier_call = trace_panic_handler, | |
7669 | .next = NULL, | |
7670 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | |
7671 | }; | |
7672 | ||
7673 | static int trace_die_handler(struct notifier_block *self, | |
7674 | unsigned long val, | |
7675 | void *data) | |
7676 | { | |
7677 | switch (val) { | |
7678 | case DIE_OOPS: | |
944ac425 | 7679 | if (ftrace_dump_on_oops) |
cecbca96 | 7680 | ftrace_dump(ftrace_dump_on_oops); |
3f5a54e3 SR |
7681 | break; |
7682 | default: | |
7683 | break; | |
7684 | } | |
7685 | return NOTIFY_OK; | |
7686 | } | |
7687 | ||
7688 | static struct notifier_block trace_die_notifier = { | |
7689 | .notifier_call = trace_die_handler, | |
7690 | .priority = 200 | |
7691 | }; | |
7692 | ||
7693 | /* | |
7694 | * printk is set to max of 1024, we really don't need it that big. | |
7695 | * Nothing should be printing 1000 characters anyway. | |
7696 | */ | |
7697 | #define TRACE_MAX_PRINT 1000 | |
7698 | ||
7699 | /* | |
7700 | * Define here KERN_TRACE so that we have one place to modify | |
7701 | * it if we decide to change what log level the ftrace dump | |
7702 | * should be at. | |
7703 | */ | |
428aee14 | 7704 | #define KERN_TRACE KERN_EMERG |
3f5a54e3 | 7705 | |
955b61e5 | 7706 | void |
3f5a54e3 SR |
7707 | trace_printk_seq(struct trace_seq *s) |
7708 | { | |
7709 | /* Probably should print a warning here. */ | |
3a161d99 SRRH |
7710 | if (s->seq.len >= TRACE_MAX_PRINT) |
7711 | s->seq.len = TRACE_MAX_PRINT; | |
3f5a54e3 | 7712 | |
820b75f6 SRRH |
7713 | /* |
7714 | * More paranoid code. Although the buffer size is set to | |
7715 | * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just | |
7716 | * an extra layer of protection. | |
7717 | */ | |
7718 | if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) | |
7719 | s->seq.len = s->seq.size - 1; | |
3f5a54e3 SR |
7720 | |
7721 | /* should be zero ended, but we are paranoid. */ | |
3a161d99 | 7722 | s->buffer[s->seq.len] = 0; |
3f5a54e3 SR |
7723 | |
7724 | printk(KERN_TRACE "%s", s->buffer); | |
7725 | ||
f9520750 | 7726 | trace_seq_init(s); |
3f5a54e3 SR |
7727 | } |
7728 | ||
955b61e5 JW |
7729 | void trace_init_global_iter(struct trace_iterator *iter) |
7730 | { | |
7731 | iter->tr = &global_trace; | |
2b6080f2 | 7732 | iter->trace = iter->tr->current_trace; |
ae3b5093 | 7733 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
12883efb | 7734 | iter->trace_buffer = &global_trace.trace_buffer; |
b2f974d6 CS |
7735 | |
7736 | if (iter->trace && iter->trace->open) | |
7737 | iter->trace->open(iter); | |
7738 | ||
7739 | /* Annotate start of buffers if we had overruns */ | |
7740 | if (ring_buffer_overruns(iter->trace_buffer->buffer)) | |
7741 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | |
7742 | ||
7743 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | |
7744 | if (trace_clocks[iter->tr->clock_id].in_ns) | |
7745 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | |
955b61e5 JW |
7746 | } |
7747 | ||
7fe70b57 | 7748 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
3f5a54e3 | 7749 | { |
3f5a54e3 SR |
7750 | /* use static because iter can be a bit big for the stack */ |
7751 | static struct trace_iterator iter; | |
7fe70b57 | 7752 | static atomic_t dump_running; |
983f938a | 7753 | struct trace_array *tr = &global_trace; |
cf586b61 | 7754 | unsigned int old_userobj; |
d769041f SR |
7755 | unsigned long flags; |
7756 | int cnt = 0, cpu; | |
3f5a54e3 | 7757 | |
7fe70b57 SRRH |
7758 | /* Only allow one dump user at a time. */ |
7759 | if (atomic_inc_return(&dump_running) != 1) { | |
7760 | atomic_dec(&dump_running); | |
7761 | return; | |
7762 | } | |
3f5a54e3 | 7763 | |
7fe70b57 SRRH |
7764 | /* |
7765 | * Always turn off tracing when we dump. | |
7766 | * We don't need to show trace output of what happens | |
7767 | * between multiple crashes. | |
7768 | * | |
7769 | * If the user does a sysrq-z, then they can re-enable | |
7770 | * tracing with echo 1 > tracing_on. | |
7771 | */ | |
0ee6b6cf | 7772 | tracing_off(); |
cf586b61 | 7773 | |
7fe70b57 | 7774 | local_irq_save(flags); |
3f5a54e3 | 7775 | |
38dbe0b1 | 7776 | /* Simulate the iterator */ |
955b61e5 JW |
7777 | trace_init_global_iter(&iter); |
7778 | ||
d769041f | 7779 | for_each_tracing_cpu(cpu) { |
5e2d5ef8 | 7780 | atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
d769041f SR |
7781 | } |
7782 | ||
983f938a | 7783 | old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; |
cf586b61 | 7784 | |
b54d3de9 | 7785 | /* don't look at user memory in panic mode */ |
983f938a | 7786 | tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
b54d3de9 | 7787 | |
cecbca96 FW |
7788 | switch (oops_dump_mode) { |
7789 | case DUMP_ALL: | |
ae3b5093 | 7790 | iter.cpu_file = RING_BUFFER_ALL_CPUS; |
cecbca96 FW |
7791 | break; |
7792 | case DUMP_ORIG: | |
7793 | iter.cpu_file = raw_smp_processor_id(); | |
7794 | break; | |
7795 | case DUMP_NONE: | |
7796 | goto out_enable; | |
7797 | default: | |
7798 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | |
ae3b5093 | 7799 | iter.cpu_file = RING_BUFFER_ALL_CPUS; |
cecbca96 FW |
7800 | } |
7801 | ||
7802 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | |
3f5a54e3 | 7803 | |
7fe70b57 SRRH |
7804 | /* Did function tracer already get disabled? */ |
7805 | if (ftrace_is_dead()) { | |
7806 | printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | |
7807 | printk("# MAY BE MISSING FUNCTION EVENTS\n"); | |
7808 | } | |
7809 | ||
3f5a54e3 SR |
7810 | /* |
7811 | * We need to stop all tracing on all CPUS to read the | |
7812 | * the next buffer. This is a bit expensive, but is | |
7813 | * not done often. We fill all what we can read, | |
7814 | * and then release the locks again. | |
7815 | */ | |
7816 | ||
3f5a54e3 SR |
7817 | while (!trace_empty(&iter)) { |
7818 | ||
7819 | if (!cnt) | |
7820 | printk(KERN_TRACE "---------------------------------\n"); | |
7821 | ||
7822 | cnt++; | |
7823 | ||
7824 | /* reset all but tr, trace, and overruns */ | |
7825 | memset(&iter.seq, 0, | |
7826 | sizeof(struct trace_iterator) - | |
7827 | offsetof(struct trace_iterator, seq)); | |
7828 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | |
7829 | iter.pos = -1; | |
7830 | ||
955b61e5 | 7831 | if (trace_find_next_entry_inc(&iter) != NULL) { |
74e7ff8c LJ |
7832 | int ret; |
7833 | ||
7834 | ret = print_trace_line(&iter); | |
7835 | if (ret != TRACE_TYPE_NO_CONSUME) | |
7836 | trace_consume(&iter); | |
3f5a54e3 | 7837 | } |
b892e5c8 | 7838 | touch_nmi_watchdog(); |
3f5a54e3 SR |
7839 | |
7840 | trace_printk_seq(&iter.seq); | |
7841 | } | |
7842 | ||
7843 | if (!cnt) | |
7844 | printk(KERN_TRACE " (ftrace buffer empty)\n"); | |
7845 | else | |
7846 | printk(KERN_TRACE "---------------------------------\n"); | |
7847 | ||
cecbca96 | 7848 | out_enable: |
983f938a | 7849 | tr->trace_flags |= old_userobj; |
cf586b61 | 7850 | |
7fe70b57 SRRH |
7851 | for_each_tracing_cpu(cpu) { |
7852 | atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); | |
cf586b61 | 7853 | } |
7fe70b57 | 7854 | atomic_dec(&dump_running); |
cd891ae0 | 7855 | local_irq_restore(flags); |
3f5a54e3 | 7856 | } |
a8eecf22 | 7857 | EXPORT_SYMBOL_GPL(ftrace_dump); |
cf586b61 | 7858 | |
3928a8a2 | 7859 | __init static int tracer_alloc_buffers(void) |
bc0c38d1 | 7860 | { |
73c5162a | 7861 | int ring_buf_size; |
9e01c1b7 | 7862 | int ret = -ENOMEM; |
4c11d7ae | 7863 | |
b5e87c05 SRRH |
7864 | /* |
7865 | * Make sure we don't accidently add more trace options | |
7866 | * than we have bits for. | |
7867 | */ | |
9a38a885 | 7868 | BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); |
b5e87c05 | 7869 | |
9e01c1b7 RR |
7870 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
7871 | goto out; | |
7872 | ||
ccfe9e42 | 7873 | if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) |
9e01c1b7 | 7874 | goto out_free_buffer_mask; |
4c11d7ae | 7875 | |
07d777fe SR |
7876 | /* Only allocate trace_printk buffers if a trace_printk exists */ |
7877 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | |
81698831 | 7878 | /* Must be called before global_trace.buffer is allocated */ |
07d777fe SR |
7879 | trace_printk_init_buffers(); |
7880 | ||
73c5162a SR |
7881 | /* To save memory, keep the ring buffer size to its minimum */ |
7882 | if (ring_buffer_expanded) | |
7883 | ring_buf_size = trace_buf_size; | |
7884 | else | |
7885 | ring_buf_size = 1; | |
7886 | ||
9e01c1b7 | 7887 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
ccfe9e42 | 7888 | cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); |
9e01c1b7 | 7889 | |
2b6080f2 SR |
7890 | raw_spin_lock_init(&global_trace.start_lock); |
7891 | ||
2c4a33ab SRRH |
7892 | /* Used for event triggers */ |
7893 | temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); | |
7894 | if (!temp_buffer) | |
7895 | goto out_free_cpumask; | |
7896 | ||
939c7a4f YY |
7897 | if (trace_create_savedcmd() < 0) |
7898 | goto out_free_temp_buffer; | |
7899 | ||
9e01c1b7 | 7900 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
737223fb | 7901 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
3928a8a2 SR |
7902 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
7903 | WARN_ON(1); | |
939c7a4f | 7904 | goto out_free_savedcmd; |
4c11d7ae | 7905 | } |
a7603ff4 | 7906 | |
499e5470 SR |
7907 | if (global_trace.buffer_disabled) |
7908 | tracing_off(); | |
4c11d7ae | 7909 | |
e1e232ca SR |
7910 | if (trace_boot_clock) { |
7911 | ret = tracing_set_clock(&global_trace, trace_boot_clock); | |
7912 | if (ret < 0) | |
a395d6a7 JP |
7913 | pr_warn("Trace clock %s not defined, going back to default\n", |
7914 | trace_boot_clock); | |
e1e232ca SR |
7915 | } |
7916 | ||
ca164318 SRRH |
7917 | /* |
7918 | * register_tracer() might reference current_trace, so it | |
7919 | * needs to be set before we register anything. This is | |
7920 | * just a bootstrap of current_trace anyway. | |
7921 | */ | |
2b6080f2 SR |
7922 | global_trace.current_trace = &nop_trace; |
7923 | ||
0b9b12c1 SRRH |
7924 | global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
7925 | ||
4104d326 SRRH |
7926 | ftrace_init_global_array_ops(&global_trace); |
7927 | ||
9a38a885 SRRH |
7928 | init_trace_flags_index(&global_trace); |
7929 | ||
ca164318 SRRH |
7930 | register_tracer(&nop_trace); |
7931 | ||
60a11774 SR |
7932 | /* All seems OK, enable tracing */ |
7933 | tracing_disabled = 0; | |
3928a8a2 | 7934 | |
3f5a54e3 SR |
7935 | atomic_notifier_chain_register(&panic_notifier_list, |
7936 | &trace_panic_notifier); | |
7937 | ||
7938 | register_die_notifier(&trace_die_notifier); | |
2fc1dfbe | 7939 | |
ae63b31e SR |
7940 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; |
7941 | ||
7942 | INIT_LIST_HEAD(&global_trace.systems); | |
7943 | INIT_LIST_HEAD(&global_trace.events); | |
7944 | list_add(&global_trace.list, &ftrace_trace_arrays); | |
7945 | ||
a4d1e688 | 7946 | apply_trace_boot_options(); |
7bcfaf54 | 7947 | |
77fd5c15 SRRH |
7948 | register_snapshot_cmd(); |
7949 | ||
2fc1dfbe | 7950 | return 0; |
3f5a54e3 | 7951 | |
939c7a4f YY |
7952 | out_free_savedcmd: |
7953 | free_saved_cmdlines_buffer(savedcmd); | |
2c4a33ab SRRH |
7954 | out_free_temp_buffer: |
7955 | ring_buffer_free(temp_buffer); | |
9e01c1b7 | 7956 | out_free_cpumask: |
ccfe9e42 | 7957 | free_cpumask_var(global_trace.tracing_cpumask); |
9e01c1b7 RR |
7958 | out_free_buffer_mask: |
7959 | free_cpumask_var(tracing_buffer_mask); | |
7960 | out: | |
7961 | return ret; | |
bc0c38d1 | 7962 | } |
b2821ae6 | 7963 | |
5f893b26 SRRH |
7964 | void __init trace_init(void) |
7965 | { | |
0daa2302 SRRH |
7966 | if (tracepoint_printk) { |
7967 | tracepoint_print_iter = | |
7968 | kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); | |
7969 | if (WARN_ON(!tracepoint_print_iter)) | |
7970 | tracepoint_printk = 0; | |
7971 | } | |
5f893b26 | 7972 | tracer_alloc_buffers(); |
0c564a53 | 7973 | trace_event_init(); |
5f893b26 SRRH |
7974 | } |
7975 | ||
b2821ae6 SR |
7976 | __init static int clear_boot_tracer(void) |
7977 | { | |
7978 | /* | |
7979 | * The default tracer at boot buffer is an init section. | |
7980 | * This function is called in lateinit. If we did not | |
7981 | * find the boot tracer, then clear it out, to prevent | |
7982 | * later registration from accessing the buffer that is | |
7983 | * about to be freed. | |
7984 | */ | |
7985 | if (!default_bootup_tracer) | |
7986 | return 0; | |
7987 | ||
7988 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | |
7989 | default_bootup_tracer); | |
7990 | default_bootup_tracer = NULL; | |
7991 | ||
7992 | return 0; | |
7993 | } | |
7994 | ||
8434dc93 | 7995 | fs_initcall(tracer_init_tracefs); |
b2821ae6 | 7996 | late_initcall(clear_boot_tracer); |