]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/trace/trace.c
tracing: Add trace_export support for event trace
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
44 #include <linux/fs.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
51
52 #include "trace.h"
53 #include "trace_output.h"
54
55 /*
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
59 bool ring_buffer_expanded;
60
61 /*
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
67 */
68 static bool __read_mostly tracing_selftest_running;
69
70 /*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
73 bool __read_mostly tracing_selftest_disabled;
74
75 /* Pipe tracepoints to printk */
76 struct trace_iterator *tracepoint_print_iter;
77 int tracepoint_printk;
78 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
79
80 /* For tracers that don't implement custom flags */
81 static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83 };
84
85 static int
86 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
87 {
88 return 0;
89 }
90
91 /*
92 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
96 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
97
98 /*
99 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
104 static int tracing_disabled = 1;
105
106 cpumask_var_t __read_mostly tracing_buffer_mask;
107
108 /*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
122 */
123
124 enum ftrace_dump_mode ftrace_dump_on_oops;
125
126 /* When set, tracing will stop when a WARN*() is hit */
127 int __disable_trace_on_warning;
128
129 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
130 /* Map of enums to their values, for "eval_map" file */
131 struct trace_eval_map_head {
132 struct module *mod;
133 unsigned long length;
134 };
135
136 union trace_eval_map_item;
137
138 struct trace_eval_map_tail {
139 /*
140 * "end" is first and points to NULL as it must be different
141 * than "mod" or "eval_string"
142 */
143 union trace_eval_map_item *next;
144 const char *end; /* points to NULL */
145 };
146
147 static DEFINE_MUTEX(trace_eval_mutex);
148
149 /*
150 * The trace_eval_maps are saved in an array with two extra elements,
151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
154 * pointer to the next array of saved eval_map items.
155 */
156 union trace_eval_map_item {
157 struct trace_eval_map map;
158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
160 };
161
162 static union trace_eval_map_item *trace_eval_maps;
163 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
164
165 int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 static void ftrace_trace_userstack(struct trace_buffer *buffer,
167 unsigned long flags, int pc);
168
169 #define MAX_TRACER_SIZE 100
170 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
171 static char *default_bootup_tracer;
172
173 static bool allocate_snapshot;
174
175 static int __init set_cmdline_ftrace(char *str)
176 {
177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
178 default_bootup_tracer = bootup_tracer_buf;
179 /* We are using ftrace early, expand it */
180 ring_buffer_expanded = true;
181 return 1;
182 }
183 __setup("ftrace=", set_cmdline_ftrace);
184
185 static int __init set_ftrace_dump_on_oops(char *str)
186 {
187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
198 }
199 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
200
201 static int __init stop_trace_on_warning(char *str)
202 {
203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
205 return 1;
206 }
207 __setup("traceoff_on_warning", stop_trace_on_warning);
208
209 static int __init boot_alloc_snapshot(char *str)
210 {
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215 }
216 __setup("alloc_snapshot", boot_alloc_snapshot);
217
218
219 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
220
221 static int __init set_trace_boot_options(char *str)
222 {
223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
224 return 0;
225 }
226 __setup("trace_options=", set_trace_boot_options);
227
228 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229 static char *trace_boot_clock __initdata;
230
231 static int __init set_trace_boot_clock(char *str)
232 {
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236 }
237 __setup("trace_clock=", set_trace_boot_clock);
238
239 static int __init set_tracepoint_printk(char *str)
240 {
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244 }
245 __setup("tp_printk", set_tracepoint_printk);
246
247 unsigned long long ns2usecs(u64 nsec)
248 {
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252 }
253
254 static void
255 trace_process_export(struct trace_export *export,
256 struct ring_buffer_event *event, int flag)
257 {
258 struct trace_entry *entry;
259 unsigned int size = 0;
260
261 if (export->flags & flag) {
262 entry = ring_buffer_event_data(event);
263 size = ring_buffer_event_length(event);
264 export->write(export, entry, size);
265 }
266 }
267
268 static DEFINE_MUTEX(ftrace_export_lock);
269
270 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
271
272 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
273 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
274
275 static inline void ftrace_exports_enable(struct trace_export *export)
276 {
277 if (export->flags & TRACE_EXPORT_FUNCTION)
278 static_branch_inc(&trace_function_exports_enabled);
279
280 if (export->flags & TRACE_EXPORT_EVENT)
281 static_branch_inc(&trace_event_exports_enabled);
282 }
283
284 static inline void ftrace_exports_disable(struct trace_export *export)
285 {
286 if (export->flags & TRACE_EXPORT_FUNCTION)
287 static_branch_dec(&trace_function_exports_enabled);
288
289 if (export->flags & TRACE_EXPORT_EVENT)
290 static_branch_dec(&trace_event_exports_enabled);
291 }
292
293 static void ftrace_exports(struct ring_buffer_event *event, int flag)
294 {
295 struct trace_export *export;
296
297 preempt_disable_notrace();
298
299 export = rcu_dereference_raw_check(ftrace_exports_list);
300 while (export) {
301 trace_process_export(export, event, flag);
302 export = rcu_dereference_raw_check(export->next);
303 }
304
305 preempt_enable_notrace();
306 }
307
308 static inline void
309 add_trace_export(struct trace_export **list, struct trace_export *export)
310 {
311 rcu_assign_pointer(export->next, *list);
312 /*
313 * We are entering export into the list but another
314 * CPU might be walking that list. We need to make sure
315 * the export->next pointer is valid before another CPU sees
316 * the export pointer included into the list.
317 */
318 rcu_assign_pointer(*list, export);
319 }
320
321 static inline int
322 rm_trace_export(struct trace_export **list, struct trace_export *export)
323 {
324 struct trace_export **p;
325
326 for (p = list; *p != NULL; p = &(*p)->next)
327 if (*p == export)
328 break;
329
330 if (*p != export)
331 return -1;
332
333 rcu_assign_pointer(*p, (*p)->next);
334
335 return 0;
336 }
337
338 static inline void
339 add_ftrace_export(struct trace_export **list, struct trace_export *export)
340 {
341 ftrace_exports_enable(export);
342
343 add_trace_export(list, export);
344 }
345
346 static inline int
347 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
348 {
349 int ret;
350
351 ret = rm_trace_export(list, export);
352 ftrace_exports_disable(export);
353
354 return ret;
355 }
356
357 int register_ftrace_export(struct trace_export *export)
358 {
359 if (WARN_ON_ONCE(!export->write))
360 return -1;
361
362 mutex_lock(&ftrace_export_lock);
363
364 add_ftrace_export(&ftrace_exports_list, export);
365
366 mutex_unlock(&ftrace_export_lock);
367
368 return 0;
369 }
370 EXPORT_SYMBOL_GPL(register_ftrace_export);
371
372 int unregister_ftrace_export(struct trace_export *export)
373 {
374 int ret;
375
376 mutex_lock(&ftrace_export_lock);
377
378 ret = rm_ftrace_export(&ftrace_exports_list, export);
379
380 mutex_unlock(&ftrace_export_lock);
381
382 return ret;
383 }
384 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
385
386 /* trace_flags holds trace_options default values */
387 #define TRACE_DEFAULT_FLAGS \
388 (FUNCTION_DEFAULT_FLAGS | \
389 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
390 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
391 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
392 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
393
394 /* trace_options that are only supported by global_trace */
395 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
396 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
397
398 /* trace_flags that are default zero for instances */
399 #define ZEROED_TRACE_FLAGS \
400 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
401
402 /*
403 * The global_trace is the descriptor that holds the top-level tracing
404 * buffers for the live tracing.
405 */
406 static struct trace_array global_trace = {
407 .trace_flags = TRACE_DEFAULT_FLAGS,
408 };
409
410 LIST_HEAD(ftrace_trace_arrays);
411
412 int trace_array_get(struct trace_array *this_tr)
413 {
414 struct trace_array *tr;
415 int ret = -ENODEV;
416
417 mutex_lock(&trace_types_lock);
418 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
419 if (tr == this_tr) {
420 tr->ref++;
421 ret = 0;
422 break;
423 }
424 }
425 mutex_unlock(&trace_types_lock);
426
427 return ret;
428 }
429
430 static void __trace_array_put(struct trace_array *this_tr)
431 {
432 WARN_ON(!this_tr->ref);
433 this_tr->ref--;
434 }
435
436 /**
437 * trace_array_put - Decrement the reference counter for this trace array.
438 *
439 * NOTE: Use this when we no longer need the trace array returned by
440 * trace_array_get_by_name(). This ensures the trace array can be later
441 * destroyed.
442 *
443 */
444 void trace_array_put(struct trace_array *this_tr)
445 {
446 if (!this_tr)
447 return;
448
449 mutex_lock(&trace_types_lock);
450 __trace_array_put(this_tr);
451 mutex_unlock(&trace_types_lock);
452 }
453 EXPORT_SYMBOL_GPL(trace_array_put);
454
455 int tracing_check_open_get_tr(struct trace_array *tr)
456 {
457 int ret;
458
459 ret = security_locked_down(LOCKDOWN_TRACEFS);
460 if (ret)
461 return ret;
462
463 if (tracing_disabled)
464 return -ENODEV;
465
466 if (tr && trace_array_get(tr) < 0)
467 return -ENODEV;
468
469 return 0;
470 }
471
472 int call_filter_check_discard(struct trace_event_call *call, void *rec,
473 struct trace_buffer *buffer,
474 struct ring_buffer_event *event)
475 {
476 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
477 !filter_match_preds(call->filter, rec)) {
478 __trace_event_discard_commit(buffer, event);
479 return 1;
480 }
481
482 return 0;
483 }
484
485 void trace_free_pid_list(struct trace_pid_list *pid_list)
486 {
487 vfree(pid_list->pids);
488 kfree(pid_list);
489 }
490
491 /**
492 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
493 * @filtered_pids: The list of pids to check
494 * @search_pid: The PID to find in @filtered_pids
495 *
496 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
497 */
498 bool
499 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
500 {
501 /*
502 * If pid_max changed after filtered_pids was created, we
503 * by default ignore all pids greater than the previous pid_max.
504 */
505 if (search_pid >= filtered_pids->pid_max)
506 return false;
507
508 return test_bit(search_pid, filtered_pids->pids);
509 }
510
511 /**
512 * trace_ignore_this_task - should a task be ignored for tracing
513 * @filtered_pids: The list of pids to check
514 * @task: The task that should be ignored if not filtered
515 *
516 * Checks if @task should be traced or not from @filtered_pids.
517 * Returns true if @task should *NOT* be traced.
518 * Returns false if @task should be traced.
519 */
520 bool
521 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
522 struct trace_pid_list *filtered_no_pids,
523 struct task_struct *task)
524 {
525 /*
526 * If filterd_no_pids is not empty, and the task's pid is listed
527 * in filtered_no_pids, then return true.
528 * Otherwise, if filtered_pids is empty, that means we can
529 * trace all tasks. If it has content, then only trace pids
530 * within filtered_pids.
531 */
532
533 return (filtered_pids &&
534 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
535 (filtered_no_pids &&
536 trace_find_filtered_pid(filtered_no_pids, task->pid));
537 }
538
539 /**
540 * trace_filter_add_remove_task - Add or remove a task from a pid_list
541 * @pid_list: The list to modify
542 * @self: The current task for fork or NULL for exit
543 * @task: The task to add or remove
544 *
545 * If adding a task, if @self is defined, the task is only added if @self
546 * is also included in @pid_list. This happens on fork and tasks should
547 * only be added when the parent is listed. If @self is NULL, then the
548 * @task pid will be removed from the list, which would happen on exit
549 * of a task.
550 */
551 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
552 struct task_struct *self,
553 struct task_struct *task)
554 {
555 if (!pid_list)
556 return;
557
558 /* For forks, we only add if the forking task is listed */
559 if (self) {
560 if (!trace_find_filtered_pid(pid_list, self->pid))
561 return;
562 }
563
564 /* Sorry, but we don't support pid_max changing after setting */
565 if (task->pid >= pid_list->pid_max)
566 return;
567
568 /* "self" is set for forks, and NULL for exits */
569 if (self)
570 set_bit(task->pid, pid_list->pids);
571 else
572 clear_bit(task->pid, pid_list->pids);
573 }
574
575 /**
576 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
577 * @pid_list: The pid list to show
578 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
579 * @pos: The position of the file
580 *
581 * This is used by the seq_file "next" operation to iterate the pids
582 * listed in a trace_pid_list structure.
583 *
584 * Returns the pid+1 as we want to display pid of zero, but NULL would
585 * stop the iteration.
586 */
587 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
588 {
589 unsigned long pid = (unsigned long)v;
590
591 (*pos)++;
592
593 /* pid already is +1 of the actual prevous bit */
594 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
595
596 /* Return pid + 1 to allow zero to be represented */
597 if (pid < pid_list->pid_max)
598 return (void *)(pid + 1);
599
600 return NULL;
601 }
602
603 /**
604 * trace_pid_start - Used for seq_file to start reading pid lists
605 * @pid_list: The pid list to show
606 * @pos: The position of the file
607 *
608 * This is used by seq_file "start" operation to start the iteration
609 * of listing pids.
610 *
611 * Returns the pid+1 as we want to display pid of zero, but NULL would
612 * stop the iteration.
613 */
614 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
615 {
616 unsigned long pid;
617 loff_t l = 0;
618
619 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
620 if (pid >= pid_list->pid_max)
621 return NULL;
622
623 /* Return pid + 1 so that zero can be the exit value */
624 for (pid++; pid && l < *pos;
625 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
626 ;
627 return (void *)pid;
628 }
629
630 /**
631 * trace_pid_show - show the current pid in seq_file processing
632 * @m: The seq_file structure to write into
633 * @v: A void pointer of the pid (+1) value to display
634 *
635 * Can be directly used by seq_file operations to display the current
636 * pid value.
637 */
638 int trace_pid_show(struct seq_file *m, void *v)
639 {
640 unsigned long pid = (unsigned long)v - 1;
641
642 seq_printf(m, "%lu\n", pid);
643 return 0;
644 }
645
646 /* 128 should be much more than enough */
647 #define PID_BUF_SIZE 127
648
649 int trace_pid_write(struct trace_pid_list *filtered_pids,
650 struct trace_pid_list **new_pid_list,
651 const char __user *ubuf, size_t cnt)
652 {
653 struct trace_pid_list *pid_list;
654 struct trace_parser parser;
655 unsigned long val;
656 int nr_pids = 0;
657 ssize_t read = 0;
658 ssize_t ret = 0;
659 loff_t pos;
660 pid_t pid;
661
662 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
663 return -ENOMEM;
664
665 /*
666 * Always recreate a new array. The write is an all or nothing
667 * operation. Always create a new array when adding new pids by
668 * the user. If the operation fails, then the current list is
669 * not modified.
670 */
671 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
672 if (!pid_list) {
673 trace_parser_put(&parser);
674 return -ENOMEM;
675 }
676
677 pid_list->pid_max = READ_ONCE(pid_max);
678
679 /* Only truncating will shrink pid_max */
680 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
681 pid_list->pid_max = filtered_pids->pid_max;
682
683 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
684 if (!pid_list->pids) {
685 trace_parser_put(&parser);
686 kfree(pid_list);
687 return -ENOMEM;
688 }
689
690 if (filtered_pids) {
691 /* copy the current bits to the new max */
692 for_each_set_bit(pid, filtered_pids->pids,
693 filtered_pids->pid_max) {
694 set_bit(pid, pid_list->pids);
695 nr_pids++;
696 }
697 }
698
699 while (cnt > 0) {
700
701 pos = 0;
702
703 ret = trace_get_user(&parser, ubuf, cnt, &pos);
704 if (ret < 0 || !trace_parser_loaded(&parser))
705 break;
706
707 read += ret;
708 ubuf += ret;
709 cnt -= ret;
710
711 ret = -EINVAL;
712 if (kstrtoul(parser.buffer, 0, &val))
713 break;
714 if (val >= pid_list->pid_max)
715 break;
716
717 pid = (pid_t)val;
718
719 set_bit(pid, pid_list->pids);
720 nr_pids++;
721
722 trace_parser_clear(&parser);
723 ret = 0;
724 }
725 trace_parser_put(&parser);
726
727 if (ret < 0) {
728 trace_free_pid_list(pid_list);
729 return ret;
730 }
731
732 if (!nr_pids) {
733 /* Cleared the list of pids */
734 trace_free_pid_list(pid_list);
735 read = ret;
736 pid_list = NULL;
737 }
738
739 *new_pid_list = pid_list;
740
741 return read;
742 }
743
744 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
745 {
746 u64 ts;
747
748 /* Early boot up does not have a buffer yet */
749 if (!buf->buffer)
750 return trace_clock_local();
751
752 ts = ring_buffer_time_stamp(buf->buffer, cpu);
753 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
754
755 return ts;
756 }
757
758 u64 ftrace_now(int cpu)
759 {
760 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
761 }
762
763 /**
764 * tracing_is_enabled - Show if global_trace has been disabled
765 *
766 * Shows if the global trace has been enabled or not. It uses the
767 * mirror flag "buffer_disabled" to be used in fast paths such as for
768 * the irqsoff tracer. But it may be inaccurate due to races. If you
769 * need to know the accurate state, use tracing_is_on() which is a little
770 * slower, but accurate.
771 */
772 int tracing_is_enabled(void)
773 {
774 /*
775 * For quick access (irqsoff uses this in fast path), just
776 * return the mirror variable of the state of the ring buffer.
777 * It's a little racy, but we don't really care.
778 */
779 smp_rmb();
780 return !global_trace.buffer_disabled;
781 }
782
783 /*
784 * trace_buf_size is the size in bytes that is allocated
785 * for a buffer. Note, the number of bytes is always rounded
786 * to page size.
787 *
788 * This number is purposely set to a low number of 16384.
789 * If the dump on oops happens, it will be much appreciated
790 * to not have to wait for all that output. Anyway this can be
791 * boot time and run time configurable.
792 */
793 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
794
795 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
796
797 /* trace_types holds a link list of available tracers. */
798 static struct tracer *trace_types __read_mostly;
799
800 /*
801 * trace_types_lock is used to protect the trace_types list.
802 */
803 DEFINE_MUTEX(trace_types_lock);
804
805 /*
806 * serialize the access of the ring buffer
807 *
808 * ring buffer serializes readers, but it is low level protection.
809 * The validity of the events (which returns by ring_buffer_peek() ..etc)
810 * are not protected by ring buffer.
811 *
812 * The content of events may become garbage if we allow other process consumes
813 * these events concurrently:
814 * A) the page of the consumed events may become a normal page
815 * (not reader page) in ring buffer, and this page will be rewrited
816 * by events producer.
817 * B) The page of the consumed events may become a page for splice_read,
818 * and this page will be returned to system.
819 *
820 * These primitives allow multi process access to different cpu ring buffer
821 * concurrently.
822 *
823 * These primitives don't distinguish read-only and read-consume access.
824 * Multi read-only access are also serialized.
825 */
826
827 #ifdef CONFIG_SMP
828 static DECLARE_RWSEM(all_cpu_access_lock);
829 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
830
831 static inline void trace_access_lock(int cpu)
832 {
833 if (cpu == RING_BUFFER_ALL_CPUS) {
834 /* gain it for accessing the whole ring buffer. */
835 down_write(&all_cpu_access_lock);
836 } else {
837 /* gain it for accessing a cpu ring buffer. */
838
839 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
840 down_read(&all_cpu_access_lock);
841
842 /* Secondly block other access to this @cpu ring buffer. */
843 mutex_lock(&per_cpu(cpu_access_lock, cpu));
844 }
845 }
846
847 static inline void trace_access_unlock(int cpu)
848 {
849 if (cpu == RING_BUFFER_ALL_CPUS) {
850 up_write(&all_cpu_access_lock);
851 } else {
852 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
853 up_read(&all_cpu_access_lock);
854 }
855 }
856
857 static inline void trace_access_lock_init(void)
858 {
859 int cpu;
860
861 for_each_possible_cpu(cpu)
862 mutex_init(&per_cpu(cpu_access_lock, cpu));
863 }
864
865 #else
866
867 static DEFINE_MUTEX(access_lock);
868
869 static inline void trace_access_lock(int cpu)
870 {
871 (void)cpu;
872 mutex_lock(&access_lock);
873 }
874
875 static inline void trace_access_unlock(int cpu)
876 {
877 (void)cpu;
878 mutex_unlock(&access_lock);
879 }
880
881 static inline void trace_access_lock_init(void)
882 {
883 }
884
885 #endif
886
887 #ifdef CONFIG_STACKTRACE
888 static void __ftrace_trace_stack(struct trace_buffer *buffer,
889 unsigned long flags,
890 int skip, int pc, struct pt_regs *regs);
891 static inline void ftrace_trace_stack(struct trace_array *tr,
892 struct trace_buffer *buffer,
893 unsigned long flags,
894 int skip, int pc, struct pt_regs *regs);
895
896 #else
897 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
898 unsigned long flags,
899 int skip, int pc, struct pt_regs *regs)
900 {
901 }
902 static inline void ftrace_trace_stack(struct trace_array *tr,
903 struct trace_buffer *buffer,
904 unsigned long flags,
905 int skip, int pc, struct pt_regs *regs)
906 {
907 }
908
909 #endif
910
911 static __always_inline void
912 trace_event_setup(struct ring_buffer_event *event,
913 int type, unsigned long flags, int pc)
914 {
915 struct trace_entry *ent = ring_buffer_event_data(event);
916
917 tracing_generic_entry_update(ent, type, flags, pc);
918 }
919
920 static __always_inline struct ring_buffer_event *
921 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
922 int type,
923 unsigned long len,
924 unsigned long flags, int pc)
925 {
926 struct ring_buffer_event *event;
927
928 event = ring_buffer_lock_reserve(buffer, len);
929 if (event != NULL)
930 trace_event_setup(event, type, flags, pc);
931
932 return event;
933 }
934
935 void tracer_tracing_on(struct trace_array *tr)
936 {
937 if (tr->array_buffer.buffer)
938 ring_buffer_record_on(tr->array_buffer.buffer);
939 /*
940 * This flag is looked at when buffers haven't been allocated
941 * yet, or by some tracers (like irqsoff), that just want to
942 * know if the ring buffer has been disabled, but it can handle
943 * races of where it gets disabled but we still do a record.
944 * As the check is in the fast path of the tracers, it is more
945 * important to be fast than accurate.
946 */
947 tr->buffer_disabled = 0;
948 /* Make the flag seen by readers */
949 smp_wmb();
950 }
951
952 /**
953 * tracing_on - enable tracing buffers
954 *
955 * This function enables tracing buffers that may have been
956 * disabled with tracing_off.
957 */
958 void tracing_on(void)
959 {
960 tracer_tracing_on(&global_trace);
961 }
962 EXPORT_SYMBOL_GPL(tracing_on);
963
964
965 static __always_inline void
966 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
967 {
968 __this_cpu_write(trace_taskinfo_save, true);
969
970 /* If this is the temp buffer, we need to commit fully */
971 if (this_cpu_read(trace_buffered_event) == event) {
972 /* Length is in event->array[0] */
973 ring_buffer_write(buffer, event->array[0], &event->array[1]);
974 /* Release the temp buffer */
975 this_cpu_dec(trace_buffered_event_cnt);
976 } else
977 ring_buffer_unlock_commit(buffer, event);
978 }
979
980 /**
981 * __trace_puts - write a constant string into the trace buffer.
982 * @ip: The address of the caller
983 * @str: The constant string to write
984 * @size: The size of the string.
985 */
986 int __trace_puts(unsigned long ip, const char *str, int size)
987 {
988 struct ring_buffer_event *event;
989 struct trace_buffer *buffer;
990 struct print_entry *entry;
991 unsigned long irq_flags;
992 int alloc;
993 int pc;
994
995 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
996 return 0;
997
998 pc = preempt_count();
999
1000 if (unlikely(tracing_selftest_running || tracing_disabled))
1001 return 0;
1002
1003 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1004
1005 local_save_flags(irq_flags);
1006 buffer = global_trace.array_buffer.buffer;
1007 ring_buffer_nest_start(buffer);
1008 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1009 irq_flags, pc);
1010 if (!event) {
1011 size = 0;
1012 goto out;
1013 }
1014
1015 entry = ring_buffer_event_data(event);
1016 entry->ip = ip;
1017
1018 memcpy(&entry->buf, str, size);
1019
1020 /* Add a newline if necessary */
1021 if (entry->buf[size - 1] != '\n') {
1022 entry->buf[size] = '\n';
1023 entry->buf[size + 1] = '\0';
1024 } else
1025 entry->buf[size] = '\0';
1026
1027 __buffer_unlock_commit(buffer, event);
1028 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1029 out:
1030 ring_buffer_nest_end(buffer);
1031 return size;
1032 }
1033 EXPORT_SYMBOL_GPL(__trace_puts);
1034
1035 /**
1036 * __trace_bputs - write the pointer to a constant string into trace buffer
1037 * @ip: The address of the caller
1038 * @str: The constant string to write to the buffer to
1039 */
1040 int __trace_bputs(unsigned long ip, const char *str)
1041 {
1042 struct ring_buffer_event *event;
1043 struct trace_buffer *buffer;
1044 struct bputs_entry *entry;
1045 unsigned long irq_flags;
1046 int size = sizeof(struct bputs_entry);
1047 int ret = 0;
1048 int pc;
1049
1050 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1051 return 0;
1052
1053 pc = preempt_count();
1054
1055 if (unlikely(tracing_selftest_running || tracing_disabled))
1056 return 0;
1057
1058 local_save_flags(irq_flags);
1059 buffer = global_trace.array_buffer.buffer;
1060
1061 ring_buffer_nest_start(buffer);
1062 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1063 irq_flags, pc);
1064 if (!event)
1065 goto out;
1066
1067 entry = ring_buffer_event_data(event);
1068 entry->ip = ip;
1069 entry->str = str;
1070
1071 __buffer_unlock_commit(buffer, event);
1072 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1073
1074 ret = 1;
1075 out:
1076 ring_buffer_nest_end(buffer);
1077 return ret;
1078 }
1079 EXPORT_SYMBOL_GPL(__trace_bputs);
1080
1081 #ifdef CONFIG_TRACER_SNAPSHOT
1082 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1083 void *cond_data)
1084 {
1085 struct tracer *tracer = tr->current_trace;
1086 unsigned long flags;
1087
1088 if (in_nmi()) {
1089 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1090 internal_trace_puts("*** snapshot is being ignored ***\n");
1091 return;
1092 }
1093
1094 if (!tr->allocated_snapshot) {
1095 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1096 internal_trace_puts("*** stopping trace here! ***\n");
1097 tracing_off();
1098 return;
1099 }
1100
1101 /* Note, snapshot can not be used when the tracer uses it */
1102 if (tracer->use_max_tr) {
1103 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1104 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1105 return;
1106 }
1107
1108 local_irq_save(flags);
1109 update_max_tr(tr, current, smp_processor_id(), cond_data);
1110 local_irq_restore(flags);
1111 }
1112
1113 void tracing_snapshot_instance(struct trace_array *tr)
1114 {
1115 tracing_snapshot_instance_cond(tr, NULL);
1116 }
1117
1118 /**
1119 * tracing_snapshot - take a snapshot of the current buffer.
1120 *
1121 * This causes a swap between the snapshot buffer and the current live
1122 * tracing buffer. You can use this to take snapshots of the live
1123 * trace when some condition is triggered, but continue to trace.
1124 *
1125 * Note, make sure to allocate the snapshot with either
1126 * a tracing_snapshot_alloc(), or by doing it manually
1127 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1128 *
1129 * If the snapshot buffer is not allocated, it will stop tracing.
1130 * Basically making a permanent snapshot.
1131 */
1132 void tracing_snapshot(void)
1133 {
1134 struct trace_array *tr = &global_trace;
1135
1136 tracing_snapshot_instance(tr);
1137 }
1138 EXPORT_SYMBOL_GPL(tracing_snapshot);
1139
1140 /**
1141 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1142 * @tr: The tracing instance to snapshot
1143 * @cond_data: The data to be tested conditionally, and possibly saved
1144 *
1145 * This is the same as tracing_snapshot() except that the snapshot is
1146 * conditional - the snapshot will only happen if the
1147 * cond_snapshot.update() implementation receiving the cond_data
1148 * returns true, which means that the trace array's cond_snapshot
1149 * update() operation used the cond_data to determine whether the
1150 * snapshot should be taken, and if it was, presumably saved it along
1151 * with the snapshot.
1152 */
1153 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1154 {
1155 tracing_snapshot_instance_cond(tr, cond_data);
1156 }
1157 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1158
1159 /**
1160 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1161 * @tr: The tracing instance
1162 *
1163 * When the user enables a conditional snapshot using
1164 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1165 * with the snapshot. This accessor is used to retrieve it.
1166 *
1167 * Should not be called from cond_snapshot.update(), since it takes
1168 * the tr->max_lock lock, which the code calling
1169 * cond_snapshot.update() has already done.
1170 *
1171 * Returns the cond_data associated with the trace array's snapshot.
1172 */
1173 void *tracing_cond_snapshot_data(struct trace_array *tr)
1174 {
1175 void *cond_data = NULL;
1176
1177 arch_spin_lock(&tr->max_lock);
1178
1179 if (tr->cond_snapshot)
1180 cond_data = tr->cond_snapshot->cond_data;
1181
1182 arch_spin_unlock(&tr->max_lock);
1183
1184 return cond_data;
1185 }
1186 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1187
1188 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1189 struct array_buffer *size_buf, int cpu_id);
1190 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1191
1192 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1193 {
1194 int ret;
1195
1196 if (!tr->allocated_snapshot) {
1197
1198 /* allocate spare buffer */
1199 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1200 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1201 if (ret < 0)
1202 return ret;
1203
1204 tr->allocated_snapshot = true;
1205 }
1206
1207 return 0;
1208 }
1209
1210 static void free_snapshot(struct trace_array *tr)
1211 {
1212 /*
1213 * We don't free the ring buffer. instead, resize it because
1214 * The max_tr ring buffer has some state (e.g. ring->clock) and
1215 * we want preserve it.
1216 */
1217 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1218 set_buffer_entries(&tr->max_buffer, 1);
1219 tracing_reset_online_cpus(&tr->max_buffer);
1220 tr->allocated_snapshot = false;
1221 }
1222
1223 /**
1224 * tracing_alloc_snapshot - allocate snapshot buffer.
1225 *
1226 * This only allocates the snapshot buffer if it isn't already
1227 * allocated - it doesn't also take a snapshot.
1228 *
1229 * This is meant to be used in cases where the snapshot buffer needs
1230 * to be set up for events that can't sleep but need to be able to
1231 * trigger a snapshot.
1232 */
1233 int tracing_alloc_snapshot(void)
1234 {
1235 struct trace_array *tr = &global_trace;
1236 int ret;
1237
1238 ret = tracing_alloc_snapshot_instance(tr);
1239 WARN_ON(ret < 0);
1240
1241 return ret;
1242 }
1243 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1244
1245 /**
1246 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1247 *
1248 * This is similar to tracing_snapshot(), but it will allocate the
1249 * snapshot buffer if it isn't already allocated. Use this only
1250 * where it is safe to sleep, as the allocation may sleep.
1251 *
1252 * This causes a swap between the snapshot buffer and the current live
1253 * tracing buffer. You can use this to take snapshots of the live
1254 * trace when some condition is triggered, but continue to trace.
1255 */
1256 void tracing_snapshot_alloc(void)
1257 {
1258 int ret;
1259
1260 ret = tracing_alloc_snapshot();
1261 if (ret < 0)
1262 return;
1263
1264 tracing_snapshot();
1265 }
1266 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1267
1268 /**
1269 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1270 * @tr: The tracing instance
1271 * @cond_data: User data to associate with the snapshot
1272 * @update: Implementation of the cond_snapshot update function
1273 *
1274 * Check whether the conditional snapshot for the given instance has
1275 * already been enabled, or if the current tracer is already using a
1276 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1277 * save the cond_data and update function inside.
1278 *
1279 * Returns 0 if successful, error otherwise.
1280 */
1281 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1282 cond_update_fn_t update)
1283 {
1284 struct cond_snapshot *cond_snapshot;
1285 int ret = 0;
1286
1287 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1288 if (!cond_snapshot)
1289 return -ENOMEM;
1290
1291 cond_snapshot->cond_data = cond_data;
1292 cond_snapshot->update = update;
1293
1294 mutex_lock(&trace_types_lock);
1295
1296 ret = tracing_alloc_snapshot_instance(tr);
1297 if (ret)
1298 goto fail_unlock;
1299
1300 if (tr->current_trace->use_max_tr) {
1301 ret = -EBUSY;
1302 goto fail_unlock;
1303 }
1304
1305 /*
1306 * The cond_snapshot can only change to NULL without the
1307 * trace_types_lock. We don't care if we race with it going
1308 * to NULL, but we want to make sure that it's not set to
1309 * something other than NULL when we get here, which we can
1310 * do safely with only holding the trace_types_lock and not
1311 * having to take the max_lock.
1312 */
1313 if (tr->cond_snapshot) {
1314 ret = -EBUSY;
1315 goto fail_unlock;
1316 }
1317
1318 arch_spin_lock(&tr->max_lock);
1319 tr->cond_snapshot = cond_snapshot;
1320 arch_spin_unlock(&tr->max_lock);
1321
1322 mutex_unlock(&trace_types_lock);
1323
1324 return ret;
1325
1326 fail_unlock:
1327 mutex_unlock(&trace_types_lock);
1328 kfree(cond_snapshot);
1329 return ret;
1330 }
1331 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1332
1333 /**
1334 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1335 * @tr: The tracing instance
1336 *
1337 * Check whether the conditional snapshot for the given instance is
1338 * enabled; if so, free the cond_snapshot associated with it,
1339 * otherwise return -EINVAL.
1340 *
1341 * Returns 0 if successful, error otherwise.
1342 */
1343 int tracing_snapshot_cond_disable(struct trace_array *tr)
1344 {
1345 int ret = 0;
1346
1347 arch_spin_lock(&tr->max_lock);
1348
1349 if (!tr->cond_snapshot)
1350 ret = -EINVAL;
1351 else {
1352 kfree(tr->cond_snapshot);
1353 tr->cond_snapshot = NULL;
1354 }
1355
1356 arch_spin_unlock(&tr->max_lock);
1357
1358 return ret;
1359 }
1360 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1361 #else
1362 void tracing_snapshot(void)
1363 {
1364 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1365 }
1366 EXPORT_SYMBOL_GPL(tracing_snapshot);
1367 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1368 {
1369 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1370 }
1371 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1372 int tracing_alloc_snapshot(void)
1373 {
1374 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1375 return -ENODEV;
1376 }
1377 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1378 void tracing_snapshot_alloc(void)
1379 {
1380 /* Give warning */
1381 tracing_snapshot();
1382 }
1383 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1384 void *tracing_cond_snapshot_data(struct trace_array *tr)
1385 {
1386 return NULL;
1387 }
1388 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1389 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1390 {
1391 return -ENODEV;
1392 }
1393 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1394 int tracing_snapshot_cond_disable(struct trace_array *tr)
1395 {
1396 return false;
1397 }
1398 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1399 #endif /* CONFIG_TRACER_SNAPSHOT */
1400
1401 void tracer_tracing_off(struct trace_array *tr)
1402 {
1403 if (tr->array_buffer.buffer)
1404 ring_buffer_record_off(tr->array_buffer.buffer);
1405 /*
1406 * This flag is looked at when buffers haven't been allocated
1407 * yet, or by some tracers (like irqsoff), that just want to
1408 * know if the ring buffer has been disabled, but it can handle
1409 * races of where it gets disabled but we still do a record.
1410 * As the check is in the fast path of the tracers, it is more
1411 * important to be fast than accurate.
1412 */
1413 tr->buffer_disabled = 1;
1414 /* Make the flag seen by readers */
1415 smp_wmb();
1416 }
1417
1418 /**
1419 * tracing_off - turn off tracing buffers
1420 *
1421 * This function stops the tracing buffers from recording data.
1422 * It does not disable any overhead the tracers themselves may
1423 * be causing. This function simply causes all recording to
1424 * the ring buffers to fail.
1425 */
1426 void tracing_off(void)
1427 {
1428 tracer_tracing_off(&global_trace);
1429 }
1430 EXPORT_SYMBOL_GPL(tracing_off);
1431
1432 void disable_trace_on_warning(void)
1433 {
1434 if (__disable_trace_on_warning) {
1435 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1436 "Disabling tracing due to warning\n");
1437 tracing_off();
1438 }
1439 }
1440
1441 /**
1442 * tracer_tracing_is_on - show real state of ring buffer enabled
1443 * @tr : the trace array to know if ring buffer is enabled
1444 *
1445 * Shows real state of the ring buffer if it is enabled or not.
1446 */
1447 bool tracer_tracing_is_on(struct trace_array *tr)
1448 {
1449 if (tr->array_buffer.buffer)
1450 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1451 return !tr->buffer_disabled;
1452 }
1453
1454 /**
1455 * tracing_is_on - show state of ring buffers enabled
1456 */
1457 int tracing_is_on(void)
1458 {
1459 return tracer_tracing_is_on(&global_trace);
1460 }
1461 EXPORT_SYMBOL_GPL(tracing_is_on);
1462
1463 static int __init set_buf_size(char *str)
1464 {
1465 unsigned long buf_size;
1466
1467 if (!str)
1468 return 0;
1469 buf_size = memparse(str, &str);
1470 /* nr_entries can not be zero */
1471 if (buf_size == 0)
1472 return 0;
1473 trace_buf_size = buf_size;
1474 return 1;
1475 }
1476 __setup("trace_buf_size=", set_buf_size);
1477
1478 static int __init set_tracing_thresh(char *str)
1479 {
1480 unsigned long threshold;
1481 int ret;
1482
1483 if (!str)
1484 return 0;
1485 ret = kstrtoul(str, 0, &threshold);
1486 if (ret < 0)
1487 return 0;
1488 tracing_thresh = threshold * 1000;
1489 return 1;
1490 }
1491 __setup("tracing_thresh=", set_tracing_thresh);
1492
1493 unsigned long nsecs_to_usecs(unsigned long nsecs)
1494 {
1495 return nsecs / 1000;
1496 }
1497
1498 /*
1499 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1500 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1501 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1502 * of strings in the order that the evals (enum) were defined.
1503 */
1504 #undef C
1505 #define C(a, b) b
1506
1507 /* These must match the bit postions in trace_iterator_flags */
1508 static const char *trace_options[] = {
1509 TRACE_FLAGS
1510 NULL
1511 };
1512
1513 static struct {
1514 u64 (*func)(void);
1515 const char *name;
1516 int in_ns; /* is this clock in nanoseconds? */
1517 } trace_clocks[] = {
1518 { trace_clock_local, "local", 1 },
1519 { trace_clock_global, "global", 1 },
1520 { trace_clock_counter, "counter", 0 },
1521 { trace_clock_jiffies, "uptime", 0 },
1522 { trace_clock, "perf", 1 },
1523 { ktime_get_mono_fast_ns, "mono", 1 },
1524 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1525 { ktime_get_boot_fast_ns, "boot", 1 },
1526 ARCH_TRACE_CLOCKS
1527 };
1528
1529 bool trace_clock_in_ns(struct trace_array *tr)
1530 {
1531 if (trace_clocks[tr->clock_id].in_ns)
1532 return true;
1533
1534 return false;
1535 }
1536
1537 /*
1538 * trace_parser_get_init - gets the buffer for trace parser
1539 */
1540 int trace_parser_get_init(struct trace_parser *parser, int size)
1541 {
1542 memset(parser, 0, sizeof(*parser));
1543
1544 parser->buffer = kmalloc(size, GFP_KERNEL);
1545 if (!parser->buffer)
1546 return 1;
1547
1548 parser->size = size;
1549 return 0;
1550 }
1551
1552 /*
1553 * trace_parser_put - frees the buffer for trace parser
1554 */
1555 void trace_parser_put(struct trace_parser *parser)
1556 {
1557 kfree(parser->buffer);
1558 parser->buffer = NULL;
1559 }
1560
1561 /*
1562 * trace_get_user - reads the user input string separated by space
1563 * (matched by isspace(ch))
1564 *
1565 * For each string found the 'struct trace_parser' is updated,
1566 * and the function returns.
1567 *
1568 * Returns number of bytes read.
1569 *
1570 * See kernel/trace/trace.h for 'struct trace_parser' details.
1571 */
1572 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1573 size_t cnt, loff_t *ppos)
1574 {
1575 char ch;
1576 size_t read = 0;
1577 ssize_t ret;
1578
1579 if (!*ppos)
1580 trace_parser_clear(parser);
1581
1582 ret = get_user(ch, ubuf++);
1583 if (ret)
1584 goto out;
1585
1586 read++;
1587 cnt--;
1588
1589 /*
1590 * The parser is not finished with the last write,
1591 * continue reading the user input without skipping spaces.
1592 */
1593 if (!parser->cont) {
1594 /* skip white space */
1595 while (cnt && isspace(ch)) {
1596 ret = get_user(ch, ubuf++);
1597 if (ret)
1598 goto out;
1599 read++;
1600 cnt--;
1601 }
1602
1603 parser->idx = 0;
1604
1605 /* only spaces were written */
1606 if (isspace(ch) || !ch) {
1607 *ppos += read;
1608 ret = read;
1609 goto out;
1610 }
1611 }
1612
1613 /* read the non-space input */
1614 while (cnt && !isspace(ch) && ch) {
1615 if (parser->idx < parser->size - 1)
1616 parser->buffer[parser->idx++] = ch;
1617 else {
1618 ret = -EINVAL;
1619 goto out;
1620 }
1621 ret = get_user(ch, ubuf++);
1622 if (ret)
1623 goto out;
1624 read++;
1625 cnt--;
1626 }
1627
1628 /* We either got finished input or we have to wait for another call. */
1629 if (isspace(ch) || !ch) {
1630 parser->buffer[parser->idx] = 0;
1631 parser->cont = false;
1632 } else if (parser->idx < parser->size - 1) {
1633 parser->cont = true;
1634 parser->buffer[parser->idx++] = ch;
1635 /* Make sure the parsed string always terminates with '\0'. */
1636 parser->buffer[parser->idx] = 0;
1637 } else {
1638 ret = -EINVAL;
1639 goto out;
1640 }
1641
1642 *ppos += read;
1643 ret = read;
1644
1645 out:
1646 return ret;
1647 }
1648
1649 /* TODO add a seq_buf_to_buffer() */
1650 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1651 {
1652 int len;
1653
1654 if (trace_seq_used(s) <= s->seq.readpos)
1655 return -EBUSY;
1656
1657 len = trace_seq_used(s) - s->seq.readpos;
1658 if (cnt > len)
1659 cnt = len;
1660 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1661
1662 s->seq.readpos += cnt;
1663 return cnt;
1664 }
1665
1666 unsigned long __read_mostly tracing_thresh;
1667 static const struct file_operations tracing_max_lat_fops;
1668
1669 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1670 defined(CONFIG_FSNOTIFY)
1671
1672 static struct workqueue_struct *fsnotify_wq;
1673
1674 static void latency_fsnotify_workfn(struct work_struct *work)
1675 {
1676 struct trace_array *tr = container_of(work, struct trace_array,
1677 fsnotify_work);
1678 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1679 }
1680
1681 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1682 {
1683 struct trace_array *tr = container_of(iwork, struct trace_array,
1684 fsnotify_irqwork);
1685 queue_work(fsnotify_wq, &tr->fsnotify_work);
1686 }
1687
1688 static void trace_create_maxlat_file(struct trace_array *tr,
1689 struct dentry *d_tracer)
1690 {
1691 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1692 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1693 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1694 d_tracer, &tr->max_latency,
1695 &tracing_max_lat_fops);
1696 }
1697
1698 __init static int latency_fsnotify_init(void)
1699 {
1700 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1701 WQ_UNBOUND | WQ_HIGHPRI, 0);
1702 if (!fsnotify_wq) {
1703 pr_err("Unable to allocate tr_max_lat_wq\n");
1704 return -ENOMEM;
1705 }
1706 return 0;
1707 }
1708
1709 late_initcall_sync(latency_fsnotify_init);
1710
1711 void latency_fsnotify(struct trace_array *tr)
1712 {
1713 if (!fsnotify_wq)
1714 return;
1715 /*
1716 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1717 * possible that we are called from __schedule() or do_idle(), which
1718 * could cause a deadlock.
1719 */
1720 irq_work_queue(&tr->fsnotify_irqwork);
1721 }
1722
1723 /*
1724 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1725 * defined(CONFIG_FSNOTIFY)
1726 */
1727 #else
1728
1729 #define trace_create_maxlat_file(tr, d_tracer) \
1730 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1731 &tr->max_latency, &tracing_max_lat_fops)
1732
1733 #endif
1734
1735 #ifdef CONFIG_TRACER_MAX_TRACE
1736 /*
1737 * Copy the new maximum trace into the separate maximum-trace
1738 * structure. (this way the maximum trace is permanently saved,
1739 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1740 */
1741 static void
1742 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1743 {
1744 struct array_buffer *trace_buf = &tr->array_buffer;
1745 struct array_buffer *max_buf = &tr->max_buffer;
1746 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1747 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1748
1749 max_buf->cpu = cpu;
1750 max_buf->time_start = data->preempt_timestamp;
1751
1752 max_data->saved_latency = tr->max_latency;
1753 max_data->critical_start = data->critical_start;
1754 max_data->critical_end = data->critical_end;
1755
1756 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1757 max_data->pid = tsk->pid;
1758 /*
1759 * If tsk == current, then use current_uid(), as that does not use
1760 * RCU. The irq tracer can be called out of RCU scope.
1761 */
1762 if (tsk == current)
1763 max_data->uid = current_uid();
1764 else
1765 max_data->uid = task_uid(tsk);
1766
1767 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1768 max_data->policy = tsk->policy;
1769 max_data->rt_priority = tsk->rt_priority;
1770
1771 /* record this tasks comm */
1772 tracing_record_cmdline(tsk);
1773 latency_fsnotify(tr);
1774 }
1775
1776 /**
1777 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1778 * @tr: tracer
1779 * @tsk: the task with the latency
1780 * @cpu: The cpu that initiated the trace.
1781 * @cond_data: User data associated with a conditional snapshot
1782 *
1783 * Flip the buffers between the @tr and the max_tr and record information
1784 * about which task was the cause of this latency.
1785 */
1786 void
1787 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1788 void *cond_data)
1789 {
1790 if (tr->stop_count)
1791 return;
1792
1793 WARN_ON_ONCE(!irqs_disabled());
1794
1795 if (!tr->allocated_snapshot) {
1796 /* Only the nop tracer should hit this when disabling */
1797 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1798 return;
1799 }
1800
1801 arch_spin_lock(&tr->max_lock);
1802
1803 /* Inherit the recordable setting from array_buffer */
1804 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1805 ring_buffer_record_on(tr->max_buffer.buffer);
1806 else
1807 ring_buffer_record_off(tr->max_buffer.buffer);
1808
1809 #ifdef CONFIG_TRACER_SNAPSHOT
1810 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1811 goto out_unlock;
1812 #endif
1813 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1814
1815 __update_max_tr(tr, tsk, cpu);
1816
1817 out_unlock:
1818 arch_spin_unlock(&tr->max_lock);
1819 }
1820
1821 /**
1822 * update_max_tr_single - only copy one trace over, and reset the rest
1823 * @tr: tracer
1824 * @tsk: task with the latency
1825 * @cpu: the cpu of the buffer to copy.
1826 *
1827 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1828 */
1829 void
1830 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1831 {
1832 int ret;
1833
1834 if (tr->stop_count)
1835 return;
1836
1837 WARN_ON_ONCE(!irqs_disabled());
1838 if (!tr->allocated_snapshot) {
1839 /* Only the nop tracer should hit this when disabling */
1840 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1841 return;
1842 }
1843
1844 arch_spin_lock(&tr->max_lock);
1845
1846 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1847
1848 if (ret == -EBUSY) {
1849 /*
1850 * We failed to swap the buffer due to a commit taking
1851 * place on this CPU. We fail to record, but we reset
1852 * the max trace buffer (no one writes directly to it)
1853 * and flag that it failed.
1854 */
1855 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1856 "Failed to swap buffers due to commit in progress\n");
1857 }
1858
1859 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1860
1861 __update_max_tr(tr, tsk, cpu);
1862 arch_spin_unlock(&tr->max_lock);
1863 }
1864 #endif /* CONFIG_TRACER_MAX_TRACE */
1865
1866 static int wait_on_pipe(struct trace_iterator *iter, int full)
1867 {
1868 /* Iterators are static, they should be filled or empty */
1869 if (trace_buffer_iter(iter, iter->cpu_file))
1870 return 0;
1871
1872 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1873 full);
1874 }
1875
1876 #ifdef CONFIG_FTRACE_STARTUP_TEST
1877 static bool selftests_can_run;
1878
1879 struct trace_selftests {
1880 struct list_head list;
1881 struct tracer *type;
1882 };
1883
1884 static LIST_HEAD(postponed_selftests);
1885
1886 static int save_selftest(struct tracer *type)
1887 {
1888 struct trace_selftests *selftest;
1889
1890 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1891 if (!selftest)
1892 return -ENOMEM;
1893
1894 selftest->type = type;
1895 list_add(&selftest->list, &postponed_selftests);
1896 return 0;
1897 }
1898
1899 static int run_tracer_selftest(struct tracer *type)
1900 {
1901 struct trace_array *tr = &global_trace;
1902 struct tracer *saved_tracer = tr->current_trace;
1903 int ret;
1904
1905 if (!type->selftest || tracing_selftest_disabled)
1906 return 0;
1907
1908 /*
1909 * If a tracer registers early in boot up (before scheduling is
1910 * initialized and such), then do not run its selftests yet.
1911 * Instead, run it a little later in the boot process.
1912 */
1913 if (!selftests_can_run)
1914 return save_selftest(type);
1915
1916 /*
1917 * Run a selftest on this tracer.
1918 * Here we reset the trace buffer, and set the current
1919 * tracer to be this tracer. The tracer can then run some
1920 * internal tracing to verify that everything is in order.
1921 * If we fail, we do not register this tracer.
1922 */
1923 tracing_reset_online_cpus(&tr->array_buffer);
1924
1925 tr->current_trace = type;
1926
1927 #ifdef CONFIG_TRACER_MAX_TRACE
1928 if (type->use_max_tr) {
1929 /* If we expanded the buffers, make sure the max is expanded too */
1930 if (ring_buffer_expanded)
1931 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1932 RING_BUFFER_ALL_CPUS);
1933 tr->allocated_snapshot = true;
1934 }
1935 #endif
1936
1937 /* the test is responsible for initializing and enabling */
1938 pr_info("Testing tracer %s: ", type->name);
1939 ret = type->selftest(type, tr);
1940 /* the test is responsible for resetting too */
1941 tr->current_trace = saved_tracer;
1942 if (ret) {
1943 printk(KERN_CONT "FAILED!\n");
1944 /* Add the warning after printing 'FAILED' */
1945 WARN_ON(1);
1946 return -1;
1947 }
1948 /* Only reset on passing, to avoid touching corrupted buffers */
1949 tracing_reset_online_cpus(&tr->array_buffer);
1950
1951 #ifdef CONFIG_TRACER_MAX_TRACE
1952 if (type->use_max_tr) {
1953 tr->allocated_snapshot = false;
1954
1955 /* Shrink the max buffer again */
1956 if (ring_buffer_expanded)
1957 ring_buffer_resize(tr->max_buffer.buffer, 1,
1958 RING_BUFFER_ALL_CPUS);
1959 }
1960 #endif
1961
1962 printk(KERN_CONT "PASSED\n");
1963 return 0;
1964 }
1965
1966 static __init int init_trace_selftests(void)
1967 {
1968 struct trace_selftests *p, *n;
1969 struct tracer *t, **last;
1970 int ret;
1971
1972 selftests_can_run = true;
1973
1974 mutex_lock(&trace_types_lock);
1975
1976 if (list_empty(&postponed_selftests))
1977 goto out;
1978
1979 pr_info("Running postponed tracer tests:\n");
1980
1981 tracing_selftest_running = true;
1982 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1983 /* This loop can take minutes when sanitizers are enabled, so
1984 * lets make sure we allow RCU processing.
1985 */
1986 cond_resched();
1987 ret = run_tracer_selftest(p->type);
1988 /* If the test fails, then warn and remove from available_tracers */
1989 if (ret < 0) {
1990 WARN(1, "tracer: %s failed selftest, disabling\n",
1991 p->type->name);
1992 last = &trace_types;
1993 for (t = trace_types; t; t = t->next) {
1994 if (t == p->type) {
1995 *last = t->next;
1996 break;
1997 }
1998 last = &t->next;
1999 }
2000 }
2001 list_del(&p->list);
2002 kfree(p);
2003 }
2004 tracing_selftest_running = false;
2005
2006 out:
2007 mutex_unlock(&trace_types_lock);
2008
2009 return 0;
2010 }
2011 core_initcall(init_trace_selftests);
2012 #else
2013 static inline int run_tracer_selftest(struct tracer *type)
2014 {
2015 return 0;
2016 }
2017 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2018
2019 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2020
2021 static void __init apply_trace_boot_options(void);
2022
2023 /**
2024 * register_tracer - register a tracer with the ftrace system.
2025 * @type: the plugin for the tracer
2026 *
2027 * Register a new plugin tracer.
2028 */
2029 int __init register_tracer(struct tracer *type)
2030 {
2031 struct tracer *t;
2032 int ret = 0;
2033
2034 if (!type->name) {
2035 pr_info("Tracer must have a name\n");
2036 return -1;
2037 }
2038
2039 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2040 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2041 return -1;
2042 }
2043
2044 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2045 pr_warn("Can not register tracer %s due to lockdown\n",
2046 type->name);
2047 return -EPERM;
2048 }
2049
2050 mutex_lock(&trace_types_lock);
2051
2052 tracing_selftest_running = true;
2053
2054 for (t = trace_types; t; t = t->next) {
2055 if (strcmp(type->name, t->name) == 0) {
2056 /* already found */
2057 pr_info("Tracer %s already registered\n",
2058 type->name);
2059 ret = -1;
2060 goto out;
2061 }
2062 }
2063
2064 if (!type->set_flag)
2065 type->set_flag = &dummy_set_flag;
2066 if (!type->flags) {
2067 /*allocate a dummy tracer_flags*/
2068 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2069 if (!type->flags) {
2070 ret = -ENOMEM;
2071 goto out;
2072 }
2073 type->flags->val = 0;
2074 type->flags->opts = dummy_tracer_opt;
2075 } else
2076 if (!type->flags->opts)
2077 type->flags->opts = dummy_tracer_opt;
2078
2079 /* store the tracer for __set_tracer_option */
2080 type->flags->trace = type;
2081
2082 ret = run_tracer_selftest(type);
2083 if (ret < 0)
2084 goto out;
2085
2086 type->next = trace_types;
2087 trace_types = type;
2088 add_tracer_options(&global_trace, type);
2089
2090 out:
2091 tracing_selftest_running = false;
2092 mutex_unlock(&trace_types_lock);
2093
2094 if (ret || !default_bootup_tracer)
2095 goto out_unlock;
2096
2097 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2098 goto out_unlock;
2099
2100 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2101 /* Do we want this tracer to start on bootup? */
2102 tracing_set_tracer(&global_trace, type->name);
2103 default_bootup_tracer = NULL;
2104
2105 apply_trace_boot_options();
2106
2107 /* disable other selftests, since this will break it. */
2108 tracing_selftest_disabled = true;
2109 #ifdef CONFIG_FTRACE_STARTUP_TEST
2110 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
2111 type->name);
2112 #endif
2113
2114 out_unlock:
2115 return ret;
2116 }
2117
2118 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2119 {
2120 struct trace_buffer *buffer = buf->buffer;
2121
2122 if (!buffer)
2123 return;
2124
2125 ring_buffer_record_disable(buffer);
2126
2127 /* Make sure all commits have finished */
2128 synchronize_rcu();
2129 ring_buffer_reset_cpu(buffer, cpu);
2130
2131 ring_buffer_record_enable(buffer);
2132 }
2133
2134 void tracing_reset_online_cpus(struct array_buffer *buf)
2135 {
2136 struct trace_buffer *buffer = buf->buffer;
2137
2138 if (!buffer)
2139 return;
2140
2141 ring_buffer_record_disable(buffer);
2142
2143 /* Make sure all commits have finished */
2144 synchronize_rcu();
2145
2146 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2147
2148 ring_buffer_reset_online_cpus(buffer);
2149
2150 ring_buffer_record_enable(buffer);
2151 }
2152
2153 /* Must have trace_types_lock held */
2154 void tracing_reset_all_online_cpus(void)
2155 {
2156 struct trace_array *tr;
2157
2158 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2159 if (!tr->clear_trace)
2160 continue;
2161 tr->clear_trace = false;
2162 tracing_reset_online_cpus(&tr->array_buffer);
2163 #ifdef CONFIG_TRACER_MAX_TRACE
2164 tracing_reset_online_cpus(&tr->max_buffer);
2165 #endif
2166 }
2167 }
2168
2169 static int *tgid_map;
2170
2171 #define SAVED_CMDLINES_DEFAULT 128
2172 #define NO_CMDLINE_MAP UINT_MAX
2173 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2174 struct saved_cmdlines_buffer {
2175 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2176 unsigned *map_cmdline_to_pid;
2177 unsigned cmdline_num;
2178 int cmdline_idx;
2179 char *saved_cmdlines;
2180 };
2181 static struct saved_cmdlines_buffer *savedcmd;
2182
2183 /* temporary disable recording */
2184 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2185
2186 static inline char *get_saved_cmdlines(int idx)
2187 {
2188 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2189 }
2190
2191 static inline void set_cmdline(int idx, const char *cmdline)
2192 {
2193 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2194 }
2195
2196 static int allocate_cmdlines_buffer(unsigned int val,
2197 struct saved_cmdlines_buffer *s)
2198 {
2199 s->map_cmdline_to_pid = kmalloc_array(val,
2200 sizeof(*s->map_cmdline_to_pid),
2201 GFP_KERNEL);
2202 if (!s->map_cmdline_to_pid)
2203 return -ENOMEM;
2204
2205 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2206 if (!s->saved_cmdlines) {
2207 kfree(s->map_cmdline_to_pid);
2208 return -ENOMEM;
2209 }
2210
2211 s->cmdline_idx = 0;
2212 s->cmdline_num = val;
2213 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2214 sizeof(s->map_pid_to_cmdline));
2215 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2216 val * sizeof(*s->map_cmdline_to_pid));
2217
2218 return 0;
2219 }
2220
2221 static int trace_create_savedcmd(void)
2222 {
2223 int ret;
2224
2225 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2226 if (!savedcmd)
2227 return -ENOMEM;
2228
2229 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2230 if (ret < 0) {
2231 kfree(savedcmd);
2232 savedcmd = NULL;
2233 return -ENOMEM;
2234 }
2235
2236 return 0;
2237 }
2238
2239 int is_tracing_stopped(void)
2240 {
2241 return global_trace.stop_count;
2242 }
2243
2244 /**
2245 * tracing_start - quick start of the tracer
2246 *
2247 * If tracing is enabled but was stopped by tracing_stop,
2248 * this will start the tracer back up.
2249 */
2250 void tracing_start(void)
2251 {
2252 struct trace_buffer *buffer;
2253 unsigned long flags;
2254
2255 if (tracing_disabled)
2256 return;
2257
2258 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2259 if (--global_trace.stop_count) {
2260 if (global_trace.stop_count < 0) {
2261 /* Someone screwed up their debugging */
2262 WARN_ON_ONCE(1);
2263 global_trace.stop_count = 0;
2264 }
2265 goto out;
2266 }
2267
2268 /* Prevent the buffers from switching */
2269 arch_spin_lock(&global_trace.max_lock);
2270
2271 buffer = global_trace.array_buffer.buffer;
2272 if (buffer)
2273 ring_buffer_record_enable(buffer);
2274
2275 #ifdef CONFIG_TRACER_MAX_TRACE
2276 buffer = global_trace.max_buffer.buffer;
2277 if (buffer)
2278 ring_buffer_record_enable(buffer);
2279 #endif
2280
2281 arch_spin_unlock(&global_trace.max_lock);
2282
2283 out:
2284 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2285 }
2286
2287 static void tracing_start_tr(struct trace_array *tr)
2288 {
2289 struct trace_buffer *buffer;
2290 unsigned long flags;
2291
2292 if (tracing_disabled)
2293 return;
2294
2295 /* If global, we need to also start the max tracer */
2296 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2297 return tracing_start();
2298
2299 raw_spin_lock_irqsave(&tr->start_lock, flags);
2300
2301 if (--tr->stop_count) {
2302 if (tr->stop_count < 0) {
2303 /* Someone screwed up their debugging */
2304 WARN_ON_ONCE(1);
2305 tr->stop_count = 0;
2306 }
2307 goto out;
2308 }
2309
2310 buffer = tr->array_buffer.buffer;
2311 if (buffer)
2312 ring_buffer_record_enable(buffer);
2313
2314 out:
2315 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2316 }
2317
2318 /**
2319 * tracing_stop - quick stop of the tracer
2320 *
2321 * Light weight way to stop tracing. Use in conjunction with
2322 * tracing_start.
2323 */
2324 void tracing_stop(void)
2325 {
2326 struct trace_buffer *buffer;
2327 unsigned long flags;
2328
2329 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2330 if (global_trace.stop_count++)
2331 goto out;
2332
2333 /* Prevent the buffers from switching */
2334 arch_spin_lock(&global_trace.max_lock);
2335
2336 buffer = global_trace.array_buffer.buffer;
2337 if (buffer)
2338 ring_buffer_record_disable(buffer);
2339
2340 #ifdef CONFIG_TRACER_MAX_TRACE
2341 buffer = global_trace.max_buffer.buffer;
2342 if (buffer)
2343 ring_buffer_record_disable(buffer);
2344 #endif
2345
2346 arch_spin_unlock(&global_trace.max_lock);
2347
2348 out:
2349 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2350 }
2351
2352 static void tracing_stop_tr(struct trace_array *tr)
2353 {
2354 struct trace_buffer *buffer;
2355 unsigned long flags;
2356
2357 /* If global, we need to also stop the max tracer */
2358 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2359 return tracing_stop();
2360
2361 raw_spin_lock_irqsave(&tr->start_lock, flags);
2362 if (tr->stop_count++)
2363 goto out;
2364
2365 buffer = tr->array_buffer.buffer;
2366 if (buffer)
2367 ring_buffer_record_disable(buffer);
2368
2369 out:
2370 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2371 }
2372
2373 static int trace_save_cmdline(struct task_struct *tsk)
2374 {
2375 unsigned pid, idx;
2376
2377 /* treat recording of idle task as a success */
2378 if (!tsk->pid)
2379 return 1;
2380
2381 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2382 return 0;
2383
2384 /*
2385 * It's not the end of the world if we don't get
2386 * the lock, but we also don't want to spin
2387 * nor do we want to disable interrupts,
2388 * so if we miss here, then better luck next time.
2389 */
2390 if (!arch_spin_trylock(&trace_cmdline_lock))
2391 return 0;
2392
2393 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2394 if (idx == NO_CMDLINE_MAP) {
2395 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2396
2397 /*
2398 * Check whether the cmdline buffer at idx has a pid
2399 * mapped. We are going to overwrite that entry so we
2400 * need to clear the map_pid_to_cmdline. Otherwise we
2401 * would read the new comm for the old pid.
2402 */
2403 pid = savedcmd->map_cmdline_to_pid[idx];
2404 if (pid != NO_CMDLINE_MAP)
2405 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2406
2407 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2408 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2409
2410 savedcmd->cmdline_idx = idx;
2411 }
2412
2413 set_cmdline(idx, tsk->comm);
2414
2415 arch_spin_unlock(&trace_cmdline_lock);
2416
2417 return 1;
2418 }
2419
2420 static void __trace_find_cmdline(int pid, char comm[])
2421 {
2422 unsigned map;
2423
2424 if (!pid) {
2425 strcpy(comm, "<idle>");
2426 return;
2427 }
2428
2429 if (WARN_ON_ONCE(pid < 0)) {
2430 strcpy(comm, "<XXX>");
2431 return;
2432 }
2433
2434 if (pid > PID_MAX_DEFAULT) {
2435 strcpy(comm, "<...>");
2436 return;
2437 }
2438
2439 map = savedcmd->map_pid_to_cmdline[pid];
2440 if (map != NO_CMDLINE_MAP)
2441 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2442 else
2443 strcpy(comm, "<...>");
2444 }
2445
2446 void trace_find_cmdline(int pid, char comm[])
2447 {
2448 preempt_disable();
2449 arch_spin_lock(&trace_cmdline_lock);
2450
2451 __trace_find_cmdline(pid, comm);
2452
2453 arch_spin_unlock(&trace_cmdline_lock);
2454 preempt_enable();
2455 }
2456
2457 int trace_find_tgid(int pid)
2458 {
2459 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2460 return 0;
2461
2462 return tgid_map[pid];
2463 }
2464
2465 static int trace_save_tgid(struct task_struct *tsk)
2466 {
2467 /* treat recording of idle task as a success */
2468 if (!tsk->pid)
2469 return 1;
2470
2471 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2472 return 0;
2473
2474 tgid_map[tsk->pid] = tsk->tgid;
2475 return 1;
2476 }
2477
2478 static bool tracing_record_taskinfo_skip(int flags)
2479 {
2480 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2481 return true;
2482 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2483 return true;
2484 if (!__this_cpu_read(trace_taskinfo_save))
2485 return true;
2486 return false;
2487 }
2488
2489 /**
2490 * tracing_record_taskinfo - record the task info of a task
2491 *
2492 * @task: task to record
2493 * @flags: TRACE_RECORD_CMDLINE for recording comm
2494 * TRACE_RECORD_TGID for recording tgid
2495 */
2496 void tracing_record_taskinfo(struct task_struct *task, int flags)
2497 {
2498 bool done;
2499
2500 if (tracing_record_taskinfo_skip(flags))
2501 return;
2502
2503 /*
2504 * Record as much task information as possible. If some fail, continue
2505 * to try to record the others.
2506 */
2507 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2508 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2509
2510 /* If recording any information failed, retry again soon. */
2511 if (!done)
2512 return;
2513
2514 __this_cpu_write(trace_taskinfo_save, false);
2515 }
2516
2517 /**
2518 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2519 *
2520 * @prev: previous task during sched_switch
2521 * @next: next task during sched_switch
2522 * @flags: TRACE_RECORD_CMDLINE for recording comm
2523 * TRACE_RECORD_TGID for recording tgid
2524 */
2525 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2526 struct task_struct *next, int flags)
2527 {
2528 bool done;
2529
2530 if (tracing_record_taskinfo_skip(flags))
2531 return;
2532
2533 /*
2534 * Record as much task information as possible. If some fail, continue
2535 * to try to record the others.
2536 */
2537 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2538 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2539 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2540 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2541
2542 /* If recording any information failed, retry again soon. */
2543 if (!done)
2544 return;
2545
2546 __this_cpu_write(trace_taskinfo_save, false);
2547 }
2548
2549 /* Helpers to record a specific task information */
2550 void tracing_record_cmdline(struct task_struct *task)
2551 {
2552 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2553 }
2554
2555 void tracing_record_tgid(struct task_struct *task)
2556 {
2557 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2558 }
2559
2560 /*
2561 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2562 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2563 * simplifies those functions and keeps them in sync.
2564 */
2565 enum print_line_t trace_handle_return(struct trace_seq *s)
2566 {
2567 return trace_seq_has_overflowed(s) ?
2568 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2569 }
2570 EXPORT_SYMBOL_GPL(trace_handle_return);
2571
2572 void
2573 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2574 unsigned long flags, int pc)
2575 {
2576 struct task_struct *tsk = current;
2577
2578 entry->preempt_count = pc & 0xff;
2579 entry->pid = (tsk) ? tsk->pid : 0;
2580 entry->type = type;
2581 entry->flags =
2582 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2583 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2584 #else
2585 TRACE_FLAG_IRQS_NOSUPPORT |
2586 #endif
2587 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2588 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2589 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2590 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2591 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2592 }
2593 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2594
2595 struct ring_buffer_event *
2596 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2597 int type,
2598 unsigned long len,
2599 unsigned long flags, int pc)
2600 {
2601 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2602 }
2603
2604 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2605 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2606 static int trace_buffered_event_ref;
2607
2608 /**
2609 * trace_buffered_event_enable - enable buffering events
2610 *
2611 * When events are being filtered, it is quicker to use a temporary
2612 * buffer to write the event data into if there's a likely chance
2613 * that it will not be committed. The discard of the ring buffer
2614 * is not as fast as committing, and is much slower than copying
2615 * a commit.
2616 *
2617 * When an event is to be filtered, allocate per cpu buffers to
2618 * write the event data into, and if the event is filtered and discarded
2619 * it is simply dropped, otherwise, the entire data is to be committed
2620 * in one shot.
2621 */
2622 void trace_buffered_event_enable(void)
2623 {
2624 struct ring_buffer_event *event;
2625 struct page *page;
2626 int cpu;
2627
2628 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2629
2630 if (trace_buffered_event_ref++)
2631 return;
2632
2633 for_each_tracing_cpu(cpu) {
2634 page = alloc_pages_node(cpu_to_node(cpu),
2635 GFP_KERNEL | __GFP_NORETRY, 0);
2636 if (!page)
2637 goto failed;
2638
2639 event = page_address(page);
2640 memset(event, 0, sizeof(*event));
2641
2642 per_cpu(trace_buffered_event, cpu) = event;
2643
2644 preempt_disable();
2645 if (cpu == smp_processor_id() &&
2646 this_cpu_read(trace_buffered_event) !=
2647 per_cpu(trace_buffered_event, cpu))
2648 WARN_ON_ONCE(1);
2649 preempt_enable();
2650 }
2651
2652 return;
2653 failed:
2654 trace_buffered_event_disable();
2655 }
2656
2657 static void enable_trace_buffered_event(void *data)
2658 {
2659 /* Probably not needed, but do it anyway */
2660 smp_rmb();
2661 this_cpu_dec(trace_buffered_event_cnt);
2662 }
2663
2664 static void disable_trace_buffered_event(void *data)
2665 {
2666 this_cpu_inc(trace_buffered_event_cnt);
2667 }
2668
2669 /**
2670 * trace_buffered_event_disable - disable buffering events
2671 *
2672 * When a filter is removed, it is faster to not use the buffered
2673 * events, and to commit directly into the ring buffer. Free up
2674 * the temp buffers when there are no more users. This requires
2675 * special synchronization with current events.
2676 */
2677 void trace_buffered_event_disable(void)
2678 {
2679 int cpu;
2680
2681 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2682
2683 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2684 return;
2685
2686 if (--trace_buffered_event_ref)
2687 return;
2688
2689 preempt_disable();
2690 /* For each CPU, set the buffer as used. */
2691 smp_call_function_many(tracing_buffer_mask,
2692 disable_trace_buffered_event, NULL, 1);
2693 preempt_enable();
2694
2695 /* Wait for all current users to finish */
2696 synchronize_rcu();
2697
2698 for_each_tracing_cpu(cpu) {
2699 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2700 per_cpu(trace_buffered_event, cpu) = NULL;
2701 }
2702 /*
2703 * Make sure trace_buffered_event is NULL before clearing
2704 * trace_buffered_event_cnt.
2705 */
2706 smp_wmb();
2707
2708 preempt_disable();
2709 /* Do the work on each cpu */
2710 smp_call_function_many(tracing_buffer_mask,
2711 enable_trace_buffered_event, NULL, 1);
2712 preempt_enable();
2713 }
2714
2715 static struct trace_buffer *temp_buffer;
2716
2717 struct ring_buffer_event *
2718 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2719 struct trace_event_file *trace_file,
2720 int type, unsigned long len,
2721 unsigned long flags, int pc)
2722 {
2723 struct ring_buffer_event *entry;
2724 int val;
2725
2726 *current_rb = trace_file->tr->array_buffer.buffer;
2727
2728 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2729 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2730 (entry = this_cpu_read(trace_buffered_event))) {
2731 /* Try to use the per cpu buffer first */
2732 val = this_cpu_inc_return(trace_buffered_event_cnt);
2733 if (val == 1) {
2734 trace_event_setup(entry, type, flags, pc);
2735 entry->array[0] = len;
2736 return entry;
2737 }
2738 this_cpu_dec(trace_buffered_event_cnt);
2739 }
2740
2741 entry = __trace_buffer_lock_reserve(*current_rb,
2742 type, len, flags, pc);
2743 /*
2744 * If tracing is off, but we have triggers enabled
2745 * we still need to look at the event data. Use the temp_buffer
2746 * to store the trace event for the tigger to use. It's recusive
2747 * safe and will not be recorded anywhere.
2748 */
2749 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2750 *current_rb = temp_buffer;
2751 entry = __trace_buffer_lock_reserve(*current_rb,
2752 type, len, flags, pc);
2753 }
2754 return entry;
2755 }
2756 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2757
2758 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2759 static DEFINE_MUTEX(tracepoint_printk_mutex);
2760
2761 static void output_printk(struct trace_event_buffer *fbuffer)
2762 {
2763 struct trace_event_call *event_call;
2764 struct trace_event_file *file;
2765 struct trace_event *event;
2766 unsigned long flags;
2767 struct trace_iterator *iter = tracepoint_print_iter;
2768
2769 /* We should never get here if iter is NULL */
2770 if (WARN_ON_ONCE(!iter))
2771 return;
2772
2773 event_call = fbuffer->trace_file->event_call;
2774 if (!event_call || !event_call->event.funcs ||
2775 !event_call->event.funcs->trace)
2776 return;
2777
2778 file = fbuffer->trace_file;
2779 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2780 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2781 !filter_match_preds(file->filter, fbuffer->entry)))
2782 return;
2783
2784 event = &fbuffer->trace_file->event_call->event;
2785
2786 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2787 trace_seq_init(&iter->seq);
2788 iter->ent = fbuffer->entry;
2789 event_call->event.funcs->trace(iter, 0, event);
2790 trace_seq_putc(&iter->seq, 0);
2791 printk("%s", iter->seq.buffer);
2792
2793 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2794 }
2795
2796 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2797 void *buffer, size_t *lenp,
2798 loff_t *ppos)
2799 {
2800 int save_tracepoint_printk;
2801 int ret;
2802
2803 mutex_lock(&tracepoint_printk_mutex);
2804 save_tracepoint_printk = tracepoint_printk;
2805
2806 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2807
2808 /*
2809 * This will force exiting early, as tracepoint_printk
2810 * is always zero when tracepoint_printk_iter is not allocated
2811 */
2812 if (!tracepoint_print_iter)
2813 tracepoint_printk = 0;
2814
2815 if (save_tracepoint_printk == tracepoint_printk)
2816 goto out;
2817
2818 if (tracepoint_printk)
2819 static_key_enable(&tracepoint_printk_key.key);
2820 else
2821 static_key_disable(&tracepoint_printk_key.key);
2822
2823 out:
2824 mutex_unlock(&tracepoint_printk_mutex);
2825
2826 return ret;
2827 }
2828
2829 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2830 {
2831 if (static_key_false(&tracepoint_printk_key.key))
2832 output_printk(fbuffer);
2833
2834 if (static_branch_unlikely(&trace_event_exports_enabled))
2835 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2836 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2837 fbuffer->event, fbuffer->entry,
2838 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2839 }
2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2841
2842 /*
2843 * Skip 3:
2844 *
2845 * trace_buffer_unlock_commit_regs()
2846 * trace_event_buffer_commit()
2847 * trace_event_raw_event_xxx()
2848 */
2849 # define STACK_SKIP 3
2850
2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2852 struct trace_buffer *buffer,
2853 struct ring_buffer_event *event,
2854 unsigned long flags, int pc,
2855 struct pt_regs *regs)
2856 {
2857 __buffer_unlock_commit(buffer, event);
2858
2859 /*
2860 * If regs is not set, then skip the necessary functions.
2861 * Note, we can still get here via blktrace, wakeup tracer
2862 * and mmiotrace, but that's ok if they lose a function or
2863 * two. They are not that meaningful.
2864 */
2865 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2866 ftrace_trace_userstack(buffer, flags, pc);
2867 }
2868
2869 /*
2870 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2871 */
2872 void
2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2874 struct ring_buffer_event *event)
2875 {
2876 __buffer_unlock_commit(buffer, event);
2877 }
2878
2879 void
2880 trace_function(struct trace_array *tr,
2881 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2882 int pc)
2883 {
2884 struct trace_event_call *call = &event_function;
2885 struct trace_buffer *buffer = tr->array_buffer.buffer;
2886 struct ring_buffer_event *event;
2887 struct ftrace_entry *entry;
2888
2889 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2890 flags, pc);
2891 if (!event)
2892 return;
2893 entry = ring_buffer_event_data(event);
2894 entry->ip = ip;
2895 entry->parent_ip = parent_ip;
2896
2897 if (!call_filter_check_discard(call, entry, buffer, event)) {
2898 if (static_branch_unlikely(&trace_function_exports_enabled))
2899 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2900 __buffer_unlock_commit(buffer, event);
2901 }
2902 }
2903
2904 #ifdef CONFIG_STACKTRACE
2905
2906 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2907 #define FTRACE_KSTACK_NESTING 4
2908
2909 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2910
2911 struct ftrace_stack {
2912 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2913 };
2914
2915
2916 struct ftrace_stacks {
2917 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2918 };
2919
2920 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2921 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2922
2923 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2924 unsigned long flags,
2925 int skip, int pc, struct pt_regs *regs)
2926 {
2927 struct trace_event_call *call = &event_kernel_stack;
2928 struct ring_buffer_event *event;
2929 unsigned int size, nr_entries;
2930 struct ftrace_stack *fstack;
2931 struct stack_entry *entry;
2932 int stackidx;
2933
2934 /*
2935 * Add one, for this function and the call to save_stack_trace()
2936 * If regs is set, then these functions will not be in the way.
2937 */
2938 #ifndef CONFIG_UNWINDER_ORC
2939 if (!regs)
2940 skip++;
2941 #endif
2942
2943 preempt_disable_notrace();
2944
2945 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2946
2947 /* This should never happen. If it does, yell once and skip */
2948 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2949 goto out;
2950
2951 /*
2952 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2953 * interrupt will either see the value pre increment or post
2954 * increment. If the interrupt happens pre increment it will have
2955 * restored the counter when it returns. We just need a barrier to
2956 * keep gcc from moving things around.
2957 */
2958 barrier();
2959
2960 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2961 size = ARRAY_SIZE(fstack->calls);
2962
2963 if (regs) {
2964 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2965 size, skip);
2966 } else {
2967 nr_entries = stack_trace_save(fstack->calls, size, skip);
2968 }
2969
2970 size = nr_entries * sizeof(unsigned long);
2971 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2972 sizeof(*entry) + size, flags, pc);
2973 if (!event)
2974 goto out;
2975 entry = ring_buffer_event_data(event);
2976
2977 memcpy(&entry->caller, fstack->calls, size);
2978 entry->size = nr_entries;
2979
2980 if (!call_filter_check_discard(call, entry, buffer, event))
2981 __buffer_unlock_commit(buffer, event);
2982
2983 out:
2984 /* Again, don't let gcc optimize things here */
2985 barrier();
2986 __this_cpu_dec(ftrace_stack_reserve);
2987 preempt_enable_notrace();
2988
2989 }
2990
2991 static inline void ftrace_trace_stack(struct trace_array *tr,
2992 struct trace_buffer *buffer,
2993 unsigned long flags,
2994 int skip, int pc, struct pt_regs *regs)
2995 {
2996 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2997 return;
2998
2999 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3000 }
3001
3002 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3003 int pc)
3004 {
3005 struct trace_buffer *buffer = tr->array_buffer.buffer;
3006
3007 if (rcu_is_watching()) {
3008 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3009 return;
3010 }
3011
3012 /*
3013 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3014 * but if the above rcu_is_watching() failed, then the NMI
3015 * triggered someplace critical, and rcu_irq_enter() should
3016 * not be called from NMI.
3017 */
3018 if (unlikely(in_nmi()))
3019 return;
3020
3021 rcu_irq_enter_irqson();
3022 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3023 rcu_irq_exit_irqson();
3024 }
3025
3026 /**
3027 * trace_dump_stack - record a stack back trace in the trace buffer
3028 * @skip: Number of functions to skip (helper handlers)
3029 */
3030 void trace_dump_stack(int skip)
3031 {
3032 unsigned long flags;
3033
3034 if (tracing_disabled || tracing_selftest_running)
3035 return;
3036
3037 local_save_flags(flags);
3038
3039 #ifndef CONFIG_UNWINDER_ORC
3040 /* Skip 1 to skip this function. */
3041 skip++;
3042 #endif
3043 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3044 flags, skip, preempt_count(), NULL);
3045 }
3046 EXPORT_SYMBOL_GPL(trace_dump_stack);
3047
3048 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3049 static DEFINE_PER_CPU(int, user_stack_count);
3050
3051 static void
3052 ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
3053 {
3054 struct trace_event_call *call = &event_user_stack;
3055 struct ring_buffer_event *event;
3056 struct userstack_entry *entry;
3057
3058 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
3059 return;
3060
3061 /*
3062 * NMIs can not handle page faults, even with fix ups.
3063 * The save user stack can (and often does) fault.
3064 */
3065 if (unlikely(in_nmi()))
3066 return;
3067
3068 /*
3069 * prevent recursion, since the user stack tracing may
3070 * trigger other kernel events.
3071 */
3072 preempt_disable();
3073 if (__this_cpu_read(user_stack_count))
3074 goto out;
3075
3076 __this_cpu_inc(user_stack_count);
3077
3078 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3079 sizeof(*entry), flags, pc);
3080 if (!event)
3081 goto out_drop_count;
3082 entry = ring_buffer_event_data(event);
3083
3084 entry->tgid = current->tgid;
3085 memset(&entry->caller, 0, sizeof(entry->caller));
3086
3087 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3088 if (!call_filter_check_discard(call, entry, buffer, event))
3089 __buffer_unlock_commit(buffer, event);
3090
3091 out_drop_count:
3092 __this_cpu_dec(user_stack_count);
3093 out:
3094 preempt_enable();
3095 }
3096 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3097 static void ftrace_trace_userstack(struct trace_buffer *buffer,
3098 unsigned long flags, int pc)
3099 {
3100 }
3101 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3102
3103 #endif /* CONFIG_STACKTRACE */
3104
3105 /* created for use with alloc_percpu */
3106 struct trace_buffer_struct {
3107 int nesting;
3108 char buffer[4][TRACE_BUF_SIZE];
3109 };
3110
3111 static struct trace_buffer_struct *trace_percpu_buffer;
3112
3113 /*
3114 * Thise allows for lockless recording. If we're nested too deeply, then
3115 * this returns NULL.
3116 */
3117 static char *get_trace_buf(void)
3118 {
3119 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3120
3121 if (!buffer || buffer->nesting >= 4)
3122 return NULL;
3123
3124 buffer->nesting++;
3125
3126 /* Interrupts must see nesting incremented before we use the buffer */
3127 barrier();
3128 return &buffer->buffer[buffer->nesting][0];
3129 }
3130
3131 static void put_trace_buf(void)
3132 {
3133 /* Don't let the decrement of nesting leak before this */
3134 barrier();
3135 this_cpu_dec(trace_percpu_buffer->nesting);
3136 }
3137
3138 static int alloc_percpu_trace_buffer(void)
3139 {
3140 struct trace_buffer_struct *buffers;
3141
3142 if (trace_percpu_buffer)
3143 return 0;
3144
3145 buffers = alloc_percpu(struct trace_buffer_struct);
3146 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3147 return -ENOMEM;
3148
3149 trace_percpu_buffer = buffers;
3150 return 0;
3151 }
3152
3153 static int buffers_allocated;
3154
3155 void trace_printk_init_buffers(void)
3156 {
3157 if (buffers_allocated)
3158 return;
3159
3160 if (alloc_percpu_trace_buffer())
3161 return;
3162
3163 /* trace_printk() is for debug use only. Don't use it in production. */
3164
3165 pr_warn("\n");
3166 pr_warn("**********************************************************\n");
3167 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3168 pr_warn("** **\n");
3169 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3170 pr_warn("** **\n");
3171 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3172 pr_warn("** unsafe for production use. **\n");
3173 pr_warn("** **\n");
3174 pr_warn("** If you see this message and you are not debugging **\n");
3175 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3176 pr_warn("** **\n");
3177 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3178 pr_warn("**********************************************************\n");
3179
3180 /* Expand the buffers to set size */
3181 tracing_update_buffers();
3182
3183 buffers_allocated = 1;
3184
3185 /*
3186 * trace_printk_init_buffers() can be called by modules.
3187 * If that happens, then we need to start cmdline recording
3188 * directly here. If the global_trace.buffer is already
3189 * allocated here, then this was called by module code.
3190 */
3191 if (global_trace.array_buffer.buffer)
3192 tracing_start_cmdline_record();
3193 }
3194 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3195
3196 void trace_printk_start_comm(void)
3197 {
3198 /* Start tracing comms if trace printk is set */
3199 if (!buffers_allocated)
3200 return;
3201 tracing_start_cmdline_record();
3202 }
3203
3204 static void trace_printk_start_stop_comm(int enabled)
3205 {
3206 if (!buffers_allocated)
3207 return;
3208
3209 if (enabled)
3210 tracing_start_cmdline_record();
3211 else
3212 tracing_stop_cmdline_record();
3213 }
3214
3215 /**
3216 * trace_vbprintk - write binary msg to tracing buffer
3217 * @ip: The address of the caller
3218 * @fmt: The string format to write to the buffer
3219 * @args: Arguments for @fmt
3220 */
3221 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3222 {
3223 struct trace_event_call *call = &event_bprint;
3224 struct ring_buffer_event *event;
3225 struct trace_buffer *buffer;
3226 struct trace_array *tr = &global_trace;
3227 struct bprint_entry *entry;
3228 unsigned long flags;
3229 char *tbuffer;
3230 int len = 0, size, pc;
3231
3232 if (unlikely(tracing_selftest_running || tracing_disabled))
3233 return 0;
3234
3235 /* Don't pollute graph traces with trace_vprintk internals */
3236 pause_graph_tracing();
3237
3238 pc = preempt_count();
3239 preempt_disable_notrace();
3240
3241 tbuffer = get_trace_buf();
3242 if (!tbuffer) {
3243 len = 0;
3244 goto out_nobuffer;
3245 }
3246
3247 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3248
3249 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3250 goto out_put;
3251
3252 local_save_flags(flags);
3253 size = sizeof(*entry) + sizeof(u32) * len;
3254 buffer = tr->array_buffer.buffer;
3255 ring_buffer_nest_start(buffer);
3256 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3257 flags, pc);
3258 if (!event)
3259 goto out;
3260 entry = ring_buffer_event_data(event);
3261 entry->ip = ip;
3262 entry->fmt = fmt;
3263
3264 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3265 if (!call_filter_check_discard(call, entry, buffer, event)) {
3266 __buffer_unlock_commit(buffer, event);
3267 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3268 }
3269
3270 out:
3271 ring_buffer_nest_end(buffer);
3272 out_put:
3273 put_trace_buf();
3274
3275 out_nobuffer:
3276 preempt_enable_notrace();
3277 unpause_graph_tracing();
3278
3279 return len;
3280 }
3281 EXPORT_SYMBOL_GPL(trace_vbprintk);
3282
3283 __printf(3, 0)
3284 static int
3285 __trace_array_vprintk(struct trace_buffer *buffer,
3286 unsigned long ip, const char *fmt, va_list args)
3287 {
3288 struct trace_event_call *call = &event_print;
3289 struct ring_buffer_event *event;
3290 int len = 0, size, pc;
3291 struct print_entry *entry;
3292 unsigned long flags;
3293 char *tbuffer;
3294
3295 if (tracing_disabled || tracing_selftest_running)
3296 return 0;
3297
3298 /* Don't pollute graph traces with trace_vprintk internals */
3299 pause_graph_tracing();
3300
3301 pc = preempt_count();
3302 preempt_disable_notrace();
3303
3304
3305 tbuffer = get_trace_buf();
3306 if (!tbuffer) {
3307 len = 0;
3308 goto out_nobuffer;
3309 }
3310
3311 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3312
3313 local_save_flags(flags);
3314 size = sizeof(*entry) + len + 1;
3315 ring_buffer_nest_start(buffer);
3316 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3317 flags, pc);
3318 if (!event)
3319 goto out;
3320 entry = ring_buffer_event_data(event);
3321 entry->ip = ip;
3322
3323 memcpy(&entry->buf, tbuffer, len + 1);
3324 if (!call_filter_check_discard(call, entry, buffer, event)) {
3325 __buffer_unlock_commit(buffer, event);
3326 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3327 }
3328
3329 out:
3330 ring_buffer_nest_end(buffer);
3331 put_trace_buf();
3332
3333 out_nobuffer:
3334 preempt_enable_notrace();
3335 unpause_graph_tracing();
3336
3337 return len;
3338 }
3339
3340 __printf(3, 0)
3341 int trace_array_vprintk(struct trace_array *tr,
3342 unsigned long ip, const char *fmt, va_list args)
3343 {
3344 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3345 }
3346
3347 /**
3348 * trace_array_printk - Print a message to a specific instance
3349 * @tr: The instance trace_array descriptor
3350 * @ip: The instruction pointer that this is called from.
3351 * @fmt: The format to print (printf format)
3352 *
3353 * If a subsystem sets up its own instance, they have the right to
3354 * printk strings into their tracing instance buffer using this
3355 * function. Note, this function will not write into the top level
3356 * buffer (use trace_printk() for that), as writing into the top level
3357 * buffer should only have events that can be individually disabled.
3358 * trace_printk() is only used for debugging a kernel, and should not
3359 * be ever encorporated in normal use.
3360 *
3361 * trace_array_printk() can be used, as it will not add noise to the
3362 * top level tracing buffer.
3363 *
3364 * Note, trace_array_init_printk() must be called on @tr before this
3365 * can be used.
3366 */
3367 __printf(3, 0)
3368 int trace_array_printk(struct trace_array *tr,
3369 unsigned long ip, const char *fmt, ...)
3370 {
3371 int ret;
3372 va_list ap;
3373
3374 if (!tr)
3375 return -ENOENT;
3376
3377 /* This is only allowed for created instances */
3378 if (tr == &global_trace)
3379 return 0;
3380
3381 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3382 return 0;
3383
3384 va_start(ap, fmt);
3385 ret = trace_array_vprintk(tr, ip, fmt, ap);
3386 va_end(ap);
3387 return ret;
3388 }
3389 EXPORT_SYMBOL_GPL(trace_array_printk);
3390
3391 /**
3392 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3393 * @tr: The trace array to initialize the buffers for
3394 *
3395 * As trace_array_printk() only writes into instances, they are OK to
3396 * have in the kernel (unlike trace_printk()). This needs to be called
3397 * before trace_array_printk() can be used on a trace_array.
3398 */
3399 int trace_array_init_printk(struct trace_array *tr)
3400 {
3401 if (!tr)
3402 return -ENOENT;
3403
3404 /* This is only allowed for created instances */
3405 if (tr == &global_trace)
3406 return -EINVAL;
3407
3408 return alloc_percpu_trace_buffer();
3409 }
3410 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3411
3412 __printf(3, 4)
3413 int trace_array_printk_buf(struct trace_buffer *buffer,
3414 unsigned long ip, const char *fmt, ...)
3415 {
3416 int ret;
3417 va_list ap;
3418
3419 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3420 return 0;
3421
3422 va_start(ap, fmt);
3423 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3424 va_end(ap);
3425 return ret;
3426 }
3427
3428 __printf(2, 0)
3429 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3430 {
3431 return trace_array_vprintk(&global_trace, ip, fmt, args);
3432 }
3433 EXPORT_SYMBOL_GPL(trace_vprintk);
3434
3435 static void trace_iterator_increment(struct trace_iterator *iter)
3436 {
3437 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3438
3439 iter->idx++;
3440 if (buf_iter)
3441 ring_buffer_iter_advance(buf_iter);
3442 }
3443
3444 static struct trace_entry *
3445 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3446 unsigned long *lost_events)
3447 {
3448 struct ring_buffer_event *event;
3449 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3450
3451 if (buf_iter) {
3452 event = ring_buffer_iter_peek(buf_iter, ts);
3453 if (lost_events)
3454 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3455 (unsigned long)-1 : 0;
3456 } else {
3457 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3458 lost_events);
3459 }
3460
3461 if (event) {
3462 iter->ent_size = ring_buffer_event_length(event);
3463 return ring_buffer_event_data(event);
3464 }
3465 iter->ent_size = 0;
3466 return NULL;
3467 }
3468
3469 static struct trace_entry *
3470 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3471 unsigned long *missing_events, u64 *ent_ts)
3472 {
3473 struct trace_buffer *buffer = iter->array_buffer->buffer;
3474 struct trace_entry *ent, *next = NULL;
3475 unsigned long lost_events = 0, next_lost = 0;
3476 int cpu_file = iter->cpu_file;
3477 u64 next_ts = 0, ts;
3478 int next_cpu = -1;
3479 int next_size = 0;
3480 int cpu;
3481
3482 /*
3483 * If we are in a per_cpu trace file, don't bother by iterating over
3484 * all cpu and peek directly.
3485 */
3486 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3487 if (ring_buffer_empty_cpu(buffer, cpu_file))
3488 return NULL;
3489 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3490 if (ent_cpu)
3491 *ent_cpu = cpu_file;
3492
3493 return ent;
3494 }
3495
3496 for_each_tracing_cpu(cpu) {
3497
3498 if (ring_buffer_empty_cpu(buffer, cpu))
3499 continue;
3500
3501 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3502
3503 /*
3504 * Pick the entry with the smallest timestamp:
3505 */
3506 if (ent && (!next || ts < next_ts)) {
3507 next = ent;
3508 next_cpu = cpu;
3509 next_ts = ts;
3510 next_lost = lost_events;
3511 next_size = iter->ent_size;
3512 }
3513 }
3514
3515 iter->ent_size = next_size;
3516
3517 if (ent_cpu)
3518 *ent_cpu = next_cpu;
3519
3520 if (ent_ts)
3521 *ent_ts = next_ts;
3522
3523 if (missing_events)
3524 *missing_events = next_lost;
3525
3526 return next;
3527 }
3528
3529 #define STATIC_TEMP_BUF_SIZE 128
3530 static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3531
3532 /* Find the next real entry, without updating the iterator itself */
3533 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3534 int *ent_cpu, u64 *ent_ts)
3535 {
3536 /* __find_next_entry will reset ent_size */
3537 int ent_size = iter->ent_size;
3538 struct trace_entry *entry;
3539
3540 /*
3541 * If called from ftrace_dump(), then the iter->temp buffer
3542 * will be the static_temp_buf and not created from kmalloc.
3543 * If the entry size is greater than the buffer, we can
3544 * not save it. Just return NULL in that case. This is only
3545 * used to add markers when two consecutive events' time
3546 * stamps have a large delta. See trace_print_lat_context()
3547 */
3548 if (iter->temp == static_temp_buf &&
3549 STATIC_TEMP_BUF_SIZE < ent_size)
3550 return NULL;
3551
3552 /*
3553 * The __find_next_entry() may call peek_next_entry(), which may
3554 * call ring_buffer_peek() that may make the contents of iter->ent
3555 * undefined. Need to copy iter->ent now.
3556 */
3557 if (iter->ent && iter->ent != iter->temp) {
3558 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3559 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3560 kfree(iter->temp);
3561 iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
3562 if (!iter->temp)
3563 return NULL;
3564 }
3565 memcpy(iter->temp, iter->ent, iter->ent_size);
3566 iter->temp_size = iter->ent_size;
3567 iter->ent = iter->temp;
3568 }
3569 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3570 /* Put back the original ent_size */
3571 iter->ent_size = ent_size;
3572
3573 return entry;
3574 }
3575
3576 /* Find the next real entry, and increment the iterator to the next entry */
3577 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3578 {
3579 iter->ent = __find_next_entry(iter, &iter->cpu,
3580 &iter->lost_events, &iter->ts);
3581
3582 if (iter->ent)
3583 trace_iterator_increment(iter);
3584
3585 return iter->ent ? iter : NULL;
3586 }
3587
3588 static void trace_consume(struct trace_iterator *iter)
3589 {
3590 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3591 &iter->lost_events);
3592 }
3593
3594 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3595 {
3596 struct trace_iterator *iter = m->private;
3597 int i = (int)*pos;
3598 void *ent;
3599
3600 WARN_ON_ONCE(iter->leftover);
3601
3602 (*pos)++;
3603
3604 /* can't go backwards */
3605 if (iter->idx > i)
3606 return NULL;
3607
3608 if (iter->idx < 0)
3609 ent = trace_find_next_entry_inc(iter);
3610 else
3611 ent = iter;
3612
3613 while (ent && iter->idx < i)
3614 ent = trace_find_next_entry_inc(iter);
3615
3616 iter->pos = *pos;
3617
3618 return ent;
3619 }
3620
3621 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3622 {
3623 struct ring_buffer_iter *buf_iter;
3624 unsigned long entries = 0;
3625 u64 ts;
3626
3627 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3628
3629 buf_iter = trace_buffer_iter(iter, cpu);
3630 if (!buf_iter)
3631 return;
3632
3633 ring_buffer_iter_reset(buf_iter);
3634
3635 /*
3636 * We could have the case with the max latency tracers
3637 * that a reset never took place on a cpu. This is evident
3638 * by the timestamp being before the start of the buffer.
3639 */
3640 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3641 if (ts >= iter->array_buffer->time_start)
3642 break;
3643 entries++;
3644 ring_buffer_iter_advance(buf_iter);
3645 }
3646
3647 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3648 }
3649
3650 /*
3651 * The current tracer is copied to avoid a global locking
3652 * all around.
3653 */
3654 static void *s_start(struct seq_file *m, loff_t *pos)
3655 {
3656 struct trace_iterator *iter = m->private;
3657 struct trace_array *tr = iter->tr;
3658 int cpu_file = iter->cpu_file;
3659 void *p = NULL;
3660 loff_t l = 0;
3661 int cpu;
3662
3663 /*
3664 * copy the tracer to avoid using a global lock all around.
3665 * iter->trace is a copy of current_trace, the pointer to the
3666 * name may be used instead of a strcmp(), as iter->trace->name
3667 * will point to the same string as current_trace->name.
3668 */
3669 mutex_lock(&trace_types_lock);
3670 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3671 *iter->trace = *tr->current_trace;
3672 mutex_unlock(&trace_types_lock);
3673
3674 #ifdef CONFIG_TRACER_MAX_TRACE
3675 if (iter->snapshot && iter->trace->use_max_tr)
3676 return ERR_PTR(-EBUSY);
3677 #endif
3678
3679 if (!iter->snapshot)
3680 atomic_inc(&trace_record_taskinfo_disabled);
3681
3682 if (*pos != iter->pos) {
3683 iter->ent = NULL;
3684 iter->cpu = 0;
3685 iter->idx = -1;
3686
3687 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3688 for_each_tracing_cpu(cpu)
3689 tracing_iter_reset(iter, cpu);
3690 } else
3691 tracing_iter_reset(iter, cpu_file);
3692
3693 iter->leftover = 0;
3694 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3695 ;
3696
3697 } else {
3698 /*
3699 * If we overflowed the seq_file before, then we want
3700 * to just reuse the trace_seq buffer again.
3701 */
3702 if (iter->leftover)
3703 p = iter;
3704 else {
3705 l = *pos - 1;
3706 p = s_next(m, p, &l);
3707 }
3708 }
3709
3710 trace_event_read_lock();
3711 trace_access_lock(cpu_file);
3712 return p;
3713 }
3714
3715 static void s_stop(struct seq_file *m, void *p)
3716 {
3717 struct trace_iterator *iter = m->private;
3718
3719 #ifdef CONFIG_TRACER_MAX_TRACE
3720 if (iter->snapshot && iter->trace->use_max_tr)
3721 return;
3722 #endif
3723
3724 if (!iter->snapshot)
3725 atomic_dec(&trace_record_taskinfo_disabled);
3726
3727 trace_access_unlock(iter->cpu_file);
3728 trace_event_read_unlock();
3729 }
3730
3731 static void
3732 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3733 unsigned long *entries, int cpu)
3734 {
3735 unsigned long count;
3736
3737 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3738 /*
3739 * If this buffer has skipped entries, then we hold all
3740 * entries for the trace and we need to ignore the
3741 * ones before the time stamp.
3742 */
3743 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3744 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3745 /* total is the same as the entries */
3746 *total = count;
3747 } else
3748 *total = count +
3749 ring_buffer_overrun_cpu(buf->buffer, cpu);
3750 *entries = count;
3751 }
3752
3753 static void
3754 get_total_entries(struct array_buffer *buf,
3755 unsigned long *total, unsigned long *entries)
3756 {
3757 unsigned long t, e;
3758 int cpu;
3759
3760 *total = 0;
3761 *entries = 0;
3762
3763 for_each_tracing_cpu(cpu) {
3764 get_total_entries_cpu(buf, &t, &e, cpu);
3765 *total += t;
3766 *entries += e;
3767 }
3768 }
3769
3770 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3771 {
3772 unsigned long total, entries;
3773
3774 if (!tr)
3775 tr = &global_trace;
3776
3777 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3778
3779 return entries;
3780 }
3781
3782 unsigned long trace_total_entries(struct trace_array *tr)
3783 {
3784 unsigned long total, entries;
3785
3786 if (!tr)
3787 tr = &global_trace;
3788
3789 get_total_entries(&tr->array_buffer, &total, &entries);
3790
3791 return entries;
3792 }
3793
3794 static void print_lat_help_header(struct seq_file *m)
3795 {
3796 seq_puts(m, "# _------=> CPU# \n"
3797 "# / _-----=> irqs-off \n"
3798 "# | / _----=> need-resched \n"
3799 "# || / _---=> hardirq/softirq \n"
3800 "# ||| / _--=> preempt-depth \n"
3801 "# |||| / delay \n"
3802 "# cmd pid ||||| time | caller \n"
3803 "# \\ / ||||| \\ | / \n");
3804 }
3805
3806 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3807 {
3808 unsigned long total;
3809 unsigned long entries;
3810
3811 get_total_entries(buf, &total, &entries);
3812 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3813 entries, total, num_online_cpus());
3814 seq_puts(m, "#\n");
3815 }
3816
3817 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3818 unsigned int flags)
3819 {
3820 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3821
3822 print_event_info(buf, m);
3823
3824 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3825 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3826 }
3827
3828 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3829 unsigned int flags)
3830 {
3831 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3832 const char *space = " ";
3833 int prec = tgid ? 10 : 2;
3834
3835 print_event_info(buf, m);
3836
3837 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3838 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3839 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3840 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3841 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3842 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3843 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3844 }
3845
3846 void
3847 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3848 {
3849 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3850 struct array_buffer *buf = iter->array_buffer;
3851 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3852 struct tracer *type = iter->trace;
3853 unsigned long entries;
3854 unsigned long total;
3855 const char *name = "preemption";
3856
3857 name = type->name;
3858
3859 get_total_entries(buf, &total, &entries);
3860
3861 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3862 name, UTS_RELEASE);
3863 seq_puts(m, "# -----------------------------------"
3864 "---------------------------------\n");
3865 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3866 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3867 nsecs_to_usecs(data->saved_latency),
3868 entries,
3869 total,
3870 buf->cpu,
3871 #if defined(CONFIG_PREEMPT_NONE)
3872 "server",
3873 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3874 "desktop",
3875 #elif defined(CONFIG_PREEMPT)
3876 "preempt",
3877 #elif defined(CONFIG_PREEMPT_RT)
3878 "preempt_rt",
3879 #else
3880 "unknown",
3881 #endif
3882 /* These are reserved for later use */
3883 0, 0, 0, 0);
3884 #ifdef CONFIG_SMP
3885 seq_printf(m, " #P:%d)\n", num_online_cpus());
3886 #else
3887 seq_puts(m, ")\n");
3888 #endif
3889 seq_puts(m, "# -----------------\n");
3890 seq_printf(m, "# | task: %.16s-%d "
3891 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3892 data->comm, data->pid,
3893 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3894 data->policy, data->rt_priority);
3895 seq_puts(m, "# -----------------\n");
3896
3897 if (data->critical_start) {
3898 seq_puts(m, "# => started at: ");
3899 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3900 trace_print_seq(m, &iter->seq);
3901 seq_puts(m, "\n# => ended at: ");
3902 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3903 trace_print_seq(m, &iter->seq);
3904 seq_puts(m, "\n#\n");
3905 }
3906
3907 seq_puts(m, "#\n");
3908 }
3909
3910 static void test_cpu_buff_start(struct trace_iterator *iter)
3911 {
3912 struct trace_seq *s = &iter->seq;
3913 struct trace_array *tr = iter->tr;
3914
3915 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3916 return;
3917
3918 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3919 return;
3920
3921 if (cpumask_available(iter->started) &&
3922 cpumask_test_cpu(iter->cpu, iter->started))
3923 return;
3924
3925 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3926 return;
3927
3928 if (cpumask_available(iter->started))
3929 cpumask_set_cpu(iter->cpu, iter->started);
3930
3931 /* Don't print started cpu buffer for the first entry of the trace */
3932 if (iter->idx > 1)
3933 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3934 iter->cpu);
3935 }
3936
3937 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3938 {
3939 struct trace_array *tr = iter->tr;
3940 struct trace_seq *s = &iter->seq;
3941 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3942 struct trace_entry *entry;
3943 struct trace_event *event;
3944
3945 entry = iter->ent;
3946
3947 test_cpu_buff_start(iter);
3948
3949 event = ftrace_find_event(entry->type);
3950
3951 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3952 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3953 trace_print_lat_context(iter);
3954 else
3955 trace_print_context(iter);
3956 }
3957
3958 if (trace_seq_has_overflowed(s))
3959 return TRACE_TYPE_PARTIAL_LINE;
3960
3961 if (event)
3962 return event->funcs->trace(iter, sym_flags, event);
3963
3964 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3965
3966 return trace_handle_return(s);
3967 }
3968
3969 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3970 {
3971 struct trace_array *tr = iter->tr;
3972 struct trace_seq *s = &iter->seq;
3973 struct trace_entry *entry;
3974 struct trace_event *event;
3975
3976 entry = iter->ent;
3977
3978 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3979 trace_seq_printf(s, "%d %d %llu ",
3980 entry->pid, iter->cpu, iter->ts);
3981
3982 if (trace_seq_has_overflowed(s))
3983 return TRACE_TYPE_PARTIAL_LINE;
3984
3985 event = ftrace_find_event(entry->type);
3986 if (event)
3987 return event->funcs->raw(iter, 0, event);
3988
3989 trace_seq_printf(s, "%d ?\n", entry->type);
3990
3991 return trace_handle_return(s);
3992 }
3993
3994 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3995 {
3996 struct trace_array *tr = iter->tr;
3997 struct trace_seq *s = &iter->seq;
3998 unsigned char newline = '\n';
3999 struct trace_entry *entry;
4000 struct trace_event *event;
4001
4002 entry = iter->ent;
4003
4004 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4005 SEQ_PUT_HEX_FIELD(s, entry->pid);
4006 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4007 SEQ_PUT_HEX_FIELD(s, iter->ts);
4008 if (trace_seq_has_overflowed(s))
4009 return TRACE_TYPE_PARTIAL_LINE;
4010 }
4011
4012 event = ftrace_find_event(entry->type);
4013 if (event) {
4014 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4015 if (ret != TRACE_TYPE_HANDLED)
4016 return ret;
4017 }
4018
4019 SEQ_PUT_FIELD(s, newline);
4020
4021 return trace_handle_return(s);
4022 }
4023
4024 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4025 {
4026 struct trace_array *tr = iter->tr;
4027 struct trace_seq *s = &iter->seq;
4028 struct trace_entry *entry;
4029 struct trace_event *event;
4030
4031 entry = iter->ent;
4032
4033 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4034 SEQ_PUT_FIELD(s, entry->pid);
4035 SEQ_PUT_FIELD(s, iter->cpu);
4036 SEQ_PUT_FIELD(s, iter->ts);
4037 if (trace_seq_has_overflowed(s))
4038 return TRACE_TYPE_PARTIAL_LINE;
4039 }
4040
4041 event = ftrace_find_event(entry->type);
4042 return event ? event->funcs->binary(iter, 0, event) :
4043 TRACE_TYPE_HANDLED;
4044 }
4045
4046 int trace_empty(struct trace_iterator *iter)
4047 {
4048 struct ring_buffer_iter *buf_iter;
4049 int cpu;
4050
4051 /* If we are looking at one CPU buffer, only check that one */
4052 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4053 cpu = iter->cpu_file;
4054 buf_iter = trace_buffer_iter(iter, cpu);
4055 if (buf_iter) {
4056 if (!ring_buffer_iter_empty(buf_iter))
4057 return 0;
4058 } else {
4059 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4060 return 0;
4061 }
4062 return 1;
4063 }
4064
4065 for_each_tracing_cpu(cpu) {
4066 buf_iter = trace_buffer_iter(iter, cpu);
4067 if (buf_iter) {
4068 if (!ring_buffer_iter_empty(buf_iter))
4069 return 0;
4070 } else {
4071 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4072 return 0;
4073 }
4074 }
4075
4076 return 1;
4077 }
4078
4079 /* Called with trace_event_read_lock() held. */
4080 enum print_line_t print_trace_line(struct trace_iterator *iter)
4081 {
4082 struct trace_array *tr = iter->tr;
4083 unsigned long trace_flags = tr->trace_flags;
4084 enum print_line_t ret;
4085
4086 if (iter->lost_events) {
4087 if (iter->lost_events == (unsigned long)-1)
4088 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4089 iter->cpu);
4090 else
4091 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4092 iter->cpu, iter->lost_events);
4093 if (trace_seq_has_overflowed(&iter->seq))
4094 return TRACE_TYPE_PARTIAL_LINE;
4095 }
4096
4097 if (iter->trace && iter->trace->print_line) {
4098 ret = iter->trace->print_line(iter);
4099 if (ret != TRACE_TYPE_UNHANDLED)
4100 return ret;
4101 }
4102
4103 if (iter->ent->type == TRACE_BPUTS &&
4104 trace_flags & TRACE_ITER_PRINTK &&
4105 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4106 return trace_print_bputs_msg_only(iter);
4107
4108 if (iter->ent->type == TRACE_BPRINT &&
4109 trace_flags & TRACE_ITER_PRINTK &&
4110 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4111 return trace_print_bprintk_msg_only(iter);
4112
4113 if (iter->ent->type == TRACE_PRINT &&
4114 trace_flags & TRACE_ITER_PRINTK &&
4115 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4116 return trace_print_printk_msg_only(iter);
4117
4118 if (trace_flags & TRACE_ITER_BIN)
4119 return print_bin_fmt(iter);
4120
4121 if (trace_flags & TRACE_ITER_HEX)
4122 return print_hex_fmt(iter);
4123
4124 if (trace_flags & TRACE_ITER_RAW)
4125 return print_raw_fmt(iter);
4126
4127 return print_trace_fmt(iter);
4128 }
4129
4130 void trace_latency_header(struct seq_file *m)
4131 {
4132 struct trace_iterator *iter = m->private;
4133 struct trace_array *tr = iter->tr;
4134
4135 /* print nothing if the buffers are empty */
4136 if (trace_empty(iter))
4137 return;
4138
4139 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4140 print_trace_header(m, iter);
4141
4142 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4143 print_lat_help_header(m);
4144 }
4145
4146 void trace_default_header(struct seq_file *m)
4147 {
4148 struct trace_iterator *iter = m->private;
4149 struct trace_array *tr = iter->tr;
4150 unsigned long trace_flags = tr->trace_flags;
4151
4152 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4153 return;
4154
4155 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4156 /* print nothing if the buffers are empty */
4157 if (trace_empty(iter))
4158 return;
4159 print_trace_header(m, iter);
4160 if (!(trace_flags & TRACE_ITER_VERBOSE))
4161 print_lat_help_header(m);
4162 } else {
4163 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4164 if (trace_flags & TRACE_ITER_IRQ_INFO)
4165 print_func_help_header_irq(iter->array_buffer,
4166 m, trace_flags);
4167 else
4168 print_func_help_header(iter->array_buffer, m,
4169 trace_flags);
4170 }
4171 }
4172 }
4173
4174 static void test_ftrace_alive(struct seq_file *m)
4175 {
4176 if (!ftrace_is_dead())
4177 return;
4178 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4179 "# MAY BE MISSING FUNCTION EVENTS\n");
4180 }
4181
4182 #ifdef CONFIG_TRACER_MAX_TRACE
4183 static void show_snapshot_main_help(struct seq_file *m)
4184 {
4185 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4186 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4187 "# Takes a snapshot of the main buffer.\n"
4188 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4189 "# (Doesn't have to be '2' works with any number that\n"
4190 "# is not a '0' or '1')\n");
4191 }
4192
4193 static void show_snapshot_percpu_help(struct seq_file *m)
4194 {
4195 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4196 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4197 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4198 "# Takes a snapshot of the main buffer for this cpu.\n");
4199 #else
4200 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4201 "# Must use main snapshot file to allocate.\n");
4202 #endif
4203 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4204 "# (Doesn't have to be '2' works with any number that\n"
4205 "# is not a '0' or '1')\n");
4206 }
4207
4208 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4209 {
4210 if (iter->tr->allocated_snapshot)
4211 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4212 else
4213 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4214
4215 seq_puts(m, "# Snapshot commands:\n");
4216 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4217 show_snapshot_main_help(m);
4218 else
4219 show_snapshot_percpu_help(m);
4220 }
4221 #else
4222 /* Should never be called */
4223 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4224 #endif
4225
4226 static int s_show(struct seq_file *m, void *v)
4227 {
4228 struct trace_iterator *iter = v;
4229 int ret;
4230
4231 if (iter->ent == NULL) {
4232 if (iter->tr) {
4233 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4234 seq_puts(m, "#\n");
4235 test_ftrace_alive(m);
4236 }
4237 if (iter->snapshot && trace_empty(iter))
4238 print_snapshot_help(m, iter);
4239 else if (iter->trace && iter->trace->print_header)
4240 iter->trace->print_header(m);
4241 else
4242 trace_default_header(m);
4243
4244 } else if (iter->leftover) {
4245 /*
4246 * If we filled the seq_file buffer earlier, we
4247 * want to just show it now.
4248 */
4249 ret = trace_print_seq(m, &iter->seq);
4250
4251 /* ret should this time be zero, but you never know */
4252 iter->leftover = ret;
4253
4254 } else {
4255 print_trace_line(iter);
4256 ret = trace_print_seq(m, &iter->seq);
4257 /*
4258 * If we overflow the seq_file buffer, then it will
4259 * ask us for this data again at start up.
4260 * Use that instead.
4261 * ret is 0 if seq_file write succeeded.
4262 * -1 otherwise.
4263 */
4264 iter->leftover = ret;
4265 }
4266
4267 return 0;
4268 }
4269
4270 /*
4271 * Should be used after trace_array_get(), trace_types_lock
4272 * ensures that i_cdev was already initialized.
4273 */
4274 static inline int tracing_get_cpu(struct inode *inode)
4275 {
4276 if (inode->i_cdev) /* See trace_create_cpu_file() */
4277 return (long)inode->i_cdev - 1;
4278 return RING_BUFFER_ALL_CPUS;
4279 }
4280
4281 static const struct seq_operations tracer_seq_ops = {
4282 .start = s_start,
4283 .next = s_next,
4284 .stop = s_stop,
4285 .show = s_show,
4286 };
4287
4288 static struct trace_iterator *
4289 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4290 {
4291 struct trace_array *tr = inode->i_private;
4292 struct trace_iterator *iter;
4293 int cpu;
4294
4295 if (tracing_disabled)
4296 return ERR_PTR(-ENODEV);
4297
4298 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4299 if (!iter)
4300 return ERR_PTR(-ENOMEM);
4301
4302 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4303 GFP_KERNEL);
4304 if (!iter->buffer_iter)
4305 goto release;
4306
4307 /*
4308 * trace_find_next_entry() may need to save off iter->ent.
4309 * It will place it into the iter->temp buffer. As most
4310 * events are less than 128, allocate a buffer of that size.
4311 * If one is greater, then trace_find_next_entry() will
4312 * allocate a new buffer to adjust for the bigger iter->ent.
4313 * It's not critical if it fails to get allocated here.
4314 */
4315 iter->temp = kmalloc(128, GFP_KERNEL);
4316 if (iter->temp)
4317 iter->temp_size = 128;
4318
4319 /*
4320 * We make a copy of the current tracer to avoid concurrent
4321 * changes on it while we are reading.
4322 */
4323 mutex_lock(&trace_types_lock);
4324 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4325 if (!iter->trace)
4326 goto fail;
4327
4328 *iter->trace = *tr->current_trace;
4329
4330 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4331 goto fail;
4332
4333 iter->tr = tr;
4334
4335 #ifdef CONFIG_TRACER_MAX_TRACE
4336 /* Currently only the top directory has a snapshot */
4337 if (tr->current_trace->print_max || snapshot)
4338 iter->array_buffer = &tr->max_buffer;
4339 else
4340 #endif
4341 iter->array_buffer = &tr->array_buffer;
4342 iter->snapshot = snapshot;
4343 iter->pos = -1;
4344 iter->cpu_file = tracing_get_cpu(inode);
4345 mutex_init(&iter->mutex);
4346
4347 /* Notify the tracer early; before we stop tracing. */
4348 if (iter->trace->open)
4349 iter->trace->open(iter);
4350
4351 /* Annotate start of buffers if we had overruns */
4352 if (ring_buffer_overruns(iter->array_buffer->buffer))
4353 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4354
4355 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4356 if (trace_clocks[tr->clock_id].in_ns)
4357 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4358
4359 /*
4360 * If pause-on-trace is enabled, then stop the trace while
4361 * dumping, unless this is the "snapshot" file
4362 */
4363 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4364 tracing_stop_tr(tr);
4365
4366 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4367 for_each_tracing_cpu(cpu) {
4368 iter->buffer_iter[cpu] =
4369 ring_buffer_read_prepare(iter->array_buffer->buffer,
4370 cpu, GFP_KERNEL);
4371 }
4372 ring_buffer_read_prepare_sync();
4373 for_each_tracing_cpu(cpu) {
4374 ring_buffer_read_start(iter->buffer_iter[cpu]);
4375 tracing_iter_reset(iter, cpu);
4376 }
4377 } else {
4378 cpu = iter->cpu_file;
4379 iter->buffer_iter[cpu] =
4380 ring_buffer_read_prepare(iter->array_buffer->buffer,
4381 cpu, GFP_KERNEL);
4382 ring_buffer_read_prepare_sync();
4383 ring_buffer_read_start(iter->buffer_iter[cpu]);
4384 tracing_iter_reset(iter, cpu);
4385 }
4386
4387 mutex_unlock(&trace_types_lock);
4388
4389 return iter;
4390
4391 fail:
4392 mutex_unlock(&trace_types_lock);
4393 kfree(iter->trace);
4394 kfree(iter->temp);
4395 kfree(iter->buffer_iter);
4396 release:
4397 seq_release_private(inode, file);
4398 return ERR_PTR(-ENOMEM);
4399 }
4400
4401 int tracing_open_generic(struct inode *inode, struct file *filp)
4402 {
4403 int ret;
4404
4405 ret = tracing_check_open_get_tr(NULL);
4406 if (ret)
4407 return ret;
4408
4409 filp->private_data = inode->i_private;
4410 return 0;
4411 }
4412
4413 bool tracing_is_disabled(void)
4414 {
4415 return (tracing_disabled) ? true: false;
4416 }
4417
4418 /*
4419 * Open and update trace_array ref count.
4420 * Must have the current trace_array passed to it.
4421 */
4422 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4423 {
4424 struct trace_array *tr = inode->i_private;
4425 int ret;
4426
4427 ret = tracing_check_open_get_tr(tr);
4428 if (ret)
4429 return ret;
4430
4431 filp->private_data = inode->i_private;
4432
4433 return 0;
4434 }
4435
4436 static int tracing_release(struct inode *inode, struct file *file)
4437 {
4438 struct trace_array *tr = inode->i_private;
4439 struct seq_file *m = file->private_data;
4440 struct trace_iterator *iter;
4441 int cpu;
4442
4443 if (!(file->f_mode & FMODE_READ)) {
4444 trace_array_put(tr);
4445 return 0;
4446 }
4447
4448 /* Writes do not use seq_file */
4449 iter = m->private;
4450 mutex_lock(&trace_types_lock);
4451
4452 for_each_tracing_cpu(cpu) {
4453 if (iter->buffer_iter[cpu])
4454 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4455 }
4456
4457 if (iter->trace && iter->trace->close)
4458 iter->trace->close(iter);
4459
4460 if (!iter->snapshot && tr->stop_count)
4461 /* reenable tracing if it was previously enabled */
4462 tracing_start_tr(tr);
4463
4464 __trace_array_put(tr);
4465
4466 mutex_unlock(&trace_types_lock);
4467
4468 mutex_destroy(&iter->mutex);
4469 free_cpumask_var(iter->started);
4470 kfree(iter->temp);
4471 kfree(iter->trace);
4472 kfree(iter->buffer_iter);
4473 seq_release_private(inode, file);
4474
4475 return 0;
4476 }
4477
4478 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4479 {
4480 struct trace_array *tr = inode->i_private;
4481
4482 trace_array_put(tr);
4483 return 0;
4484 }
4485
4486 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4487 {
4488 struct trace_array *tr = inode->i_private;
4489
4490 trace_array_put(tr);
4491
4492 return single_release(inode, file);
4493 }
4494
4495 static int tracing_open(struct inode *inode, struct file *file)
4496 {
4497 struct trace_array *tr = inode->i_private;
4498 struct trace_iterator *iter;
4499 int ret;
4500
4501 ret = tracing_check_open_get_tr(tr);
4502 if (ret)
4503 return ret;
4504
4505 /* If this file was open for write, then erase contents */
4506 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4507 int cpu = tracing_get_cpu(inode);
4508 struct array_buffer *trace_buf = &tr->array_buffer;
4509
4510 #ifdef CONFIG_TRACER_MAX_TRACE
4511 if (tr->current_trace->print_max)
4512 trace_buf = &tr->max_buffer;
4513 #endif
4514
4515 if (cpu == RING_BUFFER_ALL_CPUS)
4516 tracing_reset_online_cpus(trace_buf);
4517 else
4518 tracing_reset_cpu(trace_buf, cpu);
4519 }
4520
4521 if (file->f_mode & FMODE_READ) {
4522 iter = __tracing_open(inode, file, false);
4523 if (IS_ERR(iter))
4524 ret = PTR_ERR(iter);
4525 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4526 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4527 }
4528
4529 if (ret < 0)
4530 trace_array_put(tr);
4531
4532 return ret;
4533 }
4534
4535 /*
4536 * Some tracers are not suitable for instance buffers.
4537 * A tracer is always available for the global array (toplevel)
4538 * or if it explicitly states that it is.
4539 */
4540 static bool
4541 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4542 {
4543 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4544 }
4545
4546 /* Find the next tracer that this trace array may use */
4547 static struct tracer *
4548 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4549 {
4550 while (t && !trace_ok_for_array(t, tr))
4551 t = t->next;
4552
4553 return t;
4554 }
4555
4556 static void *
4557 t_next(struct seq_file *m, void *v, loff_t *pos)
4558 {
4559 struct trace_array *tr = m->private;
4560 struct tracer *t = v;
4561
4562 (*pos)++;
4563
4564 if (t)
4565 t = get_tracer_for_array(tr, t->next);
4566
4567 return t;
4568 }
4569
4570 static void *t_start(struct seq_file *m, loff_t *pos)
4571 {
4572 struct trace_array *tr = m->private;
4573 struct tracer *t;
4574 loff_t l = 0;
4575
4576 mutex_lock(&trace_types_lock);
4577
4578 t = get_tracer_for_array(tr, trace_types);
4579 for (; t && l < *pos; t = t_next(m, t, &l))
4580 ;
4581
4582 return t;
4583 }
4584
4585 static void t_stop(struct seq_file *m, void *p)
4586 {
4587 mutex_unlock(&trace_types_lock);
4588 }
4589
4590 static int t_show(struct seq_file *m, void *v)
4591 {
4592 struct tracer *t = v;
4593
4594 if (!t)
4595 return 0;
4596
4597 seq_puts(m, t->name);
4598 if (t->next)
4599 seq_putc(m, ' ');
4600 else
4601 seq_putc(m, '\n');
4602
4603 return 0;
4604 }
4605
4606 static const struct seq_operations show_traces_seq_ops = {
4607 .start = t_start,
4608 .next = t_next,
4609 .stop = t_stop,
4610 .show = t_show,
4611 };
4612
4613 static int show_traces_open(struct inode *inode, struct file *file)
4614 {
4615 struct trace_array *tr = inode->i_private;
4616 struct seq_file *m;
4617 int ret;
4618
4619 ret = tracing_check_open_get_tr(tr);
4620 if (ret)
4621 return ret;
4622
4623 ret = seq_open(file, &show_traces_seq_ops);
4624 if (ret) {
4625 trace_array_put(tr);
4626 return ret;
4627 }
4628
4629 m = file->private_data;
4630 m->private = tr;
4631
4632 return 0;
4633 }
4634
4635 static int show_traces_release(struct inode *inode, struct file *file)
4636 {
4637 struct trace_array *tr = inode->i_private;
4638
4639 trace_array_put(tr);
4640 return seq_release(inode, file);
4641 }
4642
4643 static ssize_t
4644 tracing_write_stub(struct file *filp, const char __user *ubuf,
4645 size_t count, loff_t *ppos)
4646 {
4647 return count;
4648 }
4649
4650 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4651 {
4652 int ret;
4653
4654 if (file->f_mode & FMODE_READ)
4655 ret = seq_lseek(file, offset, whence);
4656 else
4657 file->f_pos = ret = 0;
4658
4659 return ret;
4660 }
4661
4662 static const struct file_operations tracing_fops = {
4663 .open = tracing_open,
4664 .read = seq_read,
4665 .write = tracing_write_stub,
4666 .llseek = tracing_lseek,
4667 .release = tracing_release,
4668 };
4669
4670 static const struct file_operations show_traces_fops = {
4671 .open = show_traces_open,
4672 .read = seq_read,
4673 .llseek = seq_lseek,
4674 .release = show_traces_release,
4675 };
4676
4677 static ssize_t
4678 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4679 size_t count, loff_t *ppos)
4680 {
4681 struct trace_array *tr = file_inode(filp)->i_private;
4682 char *mask_str;
4683 int len;
4684
4685 len = snprintf(NULL, 0, "%*pb\n",
4686 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4687 mask_str = kmalloc(len, GFP_KERNEL);
4688 if (!mask_str)
4689 return -ENOMEM;
4690
4691 len = snprintf(mask_str, len, "%*pb\n",
4692 cpumask_pr_args(tr->tracing_cpumask));
4693 if (len >= count) {
4694 count = -EINVAL;
4695 goto out_err;
4696 }
4697 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4698
4699 out_err:
4700 kfree(mask_str);
4701
4702 return count;
4703 }
4704
4705 int tracing_set_cpumask(struct trace_array *tr,
4706 cpumask_var_t tracing_cpumask_new)
4707 {
4708 int cpu;
4709
4710 if (!tr)
4711 return -EINVAL;
4712
4713 local_irq_disable();
4714 arch_spin_lock(&tr->max_lock);
4715 for_each_tracing_cpu(cpu) {
4716 /*
4717 * Increase/decrease the disabled counter if we are
4718 * about to flip a bit in the cpumask:
4719 */
4720 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4721 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4722 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4723 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4724 }
4725 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4726 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4727 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4728 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4729 }
4730 }
4731 arch_spin_unlock(&tr->max_lock);
4732 local_irq_enable();
4733
4734 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4735
4736 return 0;
4737 }
4738
4739 static ssize_t
4740 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4741 size_t count, loff_t *ppos)
4742 {
4743 struct trace_array *tr = file_inode(filp)->i_private;
4744 cpumask_var_t tracing_cpumask_new;
4745 int err;
4746
4747 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4748 return -ENOMEM;
4749
4750 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4751 if (err)
4752 goto err_free;
4753
4754 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4755 if (err)
4756 goto err_free;
4757
4758 free_cpumask_var(tracing_cpumask_new);
4759
4760 return count;
4761
4762 err_free:
4763 free_cpumask_var(tracing_cpumask_new);
4764
4765 return err;
4766 }
4767
4768 static const struct file_operations tracing_cpumask_fops = {
4769 .open = tracing_open_generic_tr,
4770 .read = tracing_cpumask_read,
4771 .write = tracing_cpumask_write,
4772 .release = tracing_release_generic_tr,
4773 .llseek = generic_file_llseek,
4774 };
4775
4776 static int tracing_trace_options_show(struct seq_file *m, void *v)
4777 {
4778 struct tracer_opt *trace_opts;
4779 struct trace_array *tr = m->private;
4780 u32 tracer_flags;
4781 int i;
4782
4783 mutex_lock(&trace_types_lock);
4784 tracer_flags = tr->current_trace->flags->val;
4785 trace_opts = tr->current_trace->flags->opts;
4786
4787 for (i = 0; trace_options[i]; i++) {
4788 if (tr->trace_flags & (1 << i))
4789 seq_printf(m, "%s\n", trace_options[i]);
4790 else
4791 seq_printf(m, "no%s\n", trace_options[i]);
4792 }
4793
4794 for (i = 0; trace_opts[i].name; i++) {
4795 if (tracer_flags & trace_opts[i].bit)
4796 seq_printf(m, "%s\n", trace_opts[i].name);
4797 else
4798 seq_printf(m, "no%s\n", trace_opts[i].name);
4799 }
4800 mutex_unlock(&trace_types_lock);
4801
4802 return 0;
4803 }
4804
4805 static int __set_tracer_option(struct trace_array *tr,
4806 struct tracer_flags *tracer_flags,
4807 struct tracer_opt *opts, int neg)
4808 {
4809 struct tracer *trace = tracer_flags->trace;
4810 int ret;
4811
4812 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4813 if (ret)
4814 return ret;
4815
4816 if (neg)
4817 tracer_flags->val &= ~opts->bit;
4818 else
4819 tracer_flags->val |= opts->bit;
4820 return 0;
4821 }
4822
4823 /* Try to assign a tracer specific option */
4824 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4825 {
4826 struct tracer *trace = tr->current_trace;
4827 struct tracer_flags *tracer_flags = trace->flags;
4828 struct tracer_opt *opts = NULL;
4829 int i;
4830
4831 for (i = 0; tracer_flags->opts[i].name; i++) {
4832 opts = &tracer_flags->opts[i];
4833
4834 if (strcmp(cmp, opts->name) == 0)
4835 return __set_tracer_option(tr, trace->flags, opts, neg);
4836 }
4837
4838 return -EINVAL;
4839 }
4840
4841 /* Some tracers require overwrite to stay enabled */
4842 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4843 {
4844 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4845 return -1;
4846
4847 return 0;
4848 }
4849
4850 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4851 {
4852 if ((mask == TRACE_ITER_RECORD_TGID) ||
4853 (mask == TRACE_ITER_RECORD_CMD))
4854 lockdep_assert_held(&event_mutex);
4855
4856 /* do nothing if flag is already set */
4857 if (!!(tr->trace_flags & mask) == !!enabled)
4858 return 0;
4859
4860 /* Give the tracer a chance to approve the change */
4861 if (tr->current_trace->flag_changed)
4862 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4863 return -EINVAL;
4864
4865 if (enabled)
4866 tr->trace_flags |= mask;
4867 else
4868 tr->trace_flags &= ~mask;
4869
4870 if (mask == TRACE_ITER_RECORD_CMD)
4871 trace_event_enable_cmd_record(enabled);
4872
4873 if (mask == TRACE_ITER_RECORD_TGID) {
4874 if (!tgid_map)
4875 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4876 sizeof(*tgid_map),
4877 GFP_KERNEL);
4878 if (!tgid_map) {
4879 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4880 return -ENOMEM;
4881 }
4882
4883 trace_event_enable_tgid_record(enabled);
4884 }
4885
4886 if (mask == TRACE_ITER_EVENT_FORK)
4887 trace_event_follow_fork(tr, enabled);
4888
4889 if (mask == TRACE_ITER_FUNC_FORK)
4890 ftrace_pid_follow_fork(tr, enabled);
4891
4892 if (mask == TRACE_ITER_OVERWRITE) {
4893 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4894 #ifdef CONFIG_TRACER_MAX_TRACE
4895 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4896 #endif
4897 }
4898
4899 if (mask == TRACE_ITER_PRINTK) {
4900 trace_printk_start_stop_comm(enabled);
4901 trace_printk_control(enabled);
4902 }
4903
4904 return 0;
4905 }
4906
4907 int trace_set_options(struct trace_array *tr, char *option)
4908 {
4909 char *cmp;
4910 int neg = 0;
4911 int ret;
4912 size_t orig_len = strlen(option);
4913 int len;
4914
4915 cmp = strstrip(option);
4916
4917 len = str_has_prefix(cmp, "no");
4918 if (len)
4919 neg = 1;
4920
4921 cmp += len;
4922
4923 mutex_lock(&event_mutex);
4924 mutex_lock(&trace_types_lock);
4925
4926 ret = match_string(trace_options, -1, cmp);
4927 /* If no option could be set, test the specific tracer options */
4928 if (ret < 0)
4929 ret = set_tracer_option(tr, cmp, neg);
4930 else
4931 ret = set_tracer_flag(tr, 1 << ret, !neg);
4932
4933 mutex_unlock(&trace_types_lock);
4934 mutex_unlock(&event_mutex);
4935
4936 /*
4937 * If the first trailing whitespace is replaced with '\0' by strstrip,
4938 * turn it back into a space.
4939 */
4940 if (orig_len > strlen(option))
4941 option[strlen(option)] = ' ';
4942
4943 return ret;
4944 }
4945
4946 static void __init apply_trace_boot_options(void)
4947 {
4948 char *buf = trace_boot_options_buf;
4949 char *option;
4950
4951 while (true) {
4952 option = strsep(&buf, ",");
4953
4954 if (!option)
4955 break;
4956
4957 if (*option)
4958 trace_set_options(&global_trace, option);
4959
4960 /* Put back the comma to allow this to be called again */
4961 if (buf)
4962 *(buf - 1) = ',';
4963 }
4964 }
4965
4966 static ssize_t
4967 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4968 size_t cnt, loff_t *ppos)
4969 {
4970 struct seq_file *m = filp->private_data;
4971 struct trace_array *tr = m->private;
4972 char buf[64];
4973 int ret;
4974
4975 if (cnt >= sizeof(buf))
4976 return -EINVAL;
4977
4978 if (copy_from_user(buf, ubuf, cnt))
4979 return -EFAULT;
4980
4981 buf[cnt] = 0;
4982
4983 ret = trace_set_options(tr, buf);
4984 if (ret < 0)
4985 return ret;
4986
4987 *ppos += cnt;
4988
4989 return cnt;
4990 }
4991
4992 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4993 {
4994 struct trace_array *tr = inode->i_private;
4995 int ret;
4996
4997 ret = tracing_check_open_get_tr(tr);
4998 if (ret)
4999 return ret;
5000
5001 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5002 if (ret < 0)
5003 trace_array_put(tr);
5004
5005 return ret;
5006 }
5007
5008 static const struct file_operations tracing_iter_fops = {
5009 .open = tracing_trace_options_open,
5010 .read = seq_read,
5011 .llseek = seq_lseek,
5012 .release = tracing_single_release_tr,
5013 .write = tracing_trace_options_write,
5014 };
5015
5016 static const char readme_msg[] =
5017 "tracing mini-HOWTO:\n\n"
5018 "# echo 0 > tracing_on : quick way to disable tracing\n"
5019 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5020 " Important files:\n"
5021 " trace\t\t\t- The static contents of the buffer\n"
5022 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5023 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5024 " current_tracer\t- function and latency tracers\n"
5025 " available_tracers\t- list of configured tracers for current_tracer\n"
5026 " error_log\t- error log for failed commands (that support it)\n"
5027 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5028 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5029 " trace_clock\t\t-change the clock used to order events\n"
5030 " local: Per cpu clock but may not be synced across CPUs\n"
5031 " global: Synced across CPUs but slows tracing down.\n"
5032 " counter: Not a clock, but just an increment\n"
5033 " uptime: Jiffy counter from time of boot\n"
5034 " perf: Same clock that perf events use\n"
5035 #ifdef CONFIG_X86_64
5036 " x86-tsc: TSC cycle counter\n"
5037 #endif
5038 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5039 " delta: Delta difference against a buffer-wide timestamp\n"
5040 " absolute: Absolute (standalone) timestamp\n"
5041 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5042 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5043 " tracing_cpumask\t- Limit which CPUs to trace\n"
5044 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5045 "\t\t\t Remove sub-buffer with rmdir\n"
5046 " trace_options\t\t- Set format or modify how tracing happens\n"
5047 "\t\t\t Disable an option by prefixing 'no' to the\n"
5048 "\t\t\t option name\n"
5049 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5050 #ifdef CONFIG_DYNAMIC_FTRACE
5051 "\n available_filter_functions - list of functions that can be filtered on\n"
5052 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5053 "\t\t\t functions\n"
5054 "\t accepts: func_full_name or glob-matching-pattern\n"
5055 "\t modules: Can select a group via module\n"
5056 "\t Format: :mod:<module-name>\n"
5057 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5058 "\t triggers: a command to perform when function is hit\n"
5059 "\t Format: <function>:<trigger>[:count]\n"
5060 "\t trigger: traceon, traceoff\n"
5061 "\t\t enable_event:<system>:<event>\n"
5062 "\t\t disable_event:<system>:<event>\n"
5063 #ifdef CONFIG_STACKTRACE
5064 "\t\t stacktrace\n"
5065 #endif
5066 #ifdef CONFIG_TRACER_SNAPSHOT
5067 "\t\t snapshot\n"
5068 #endif
5069 "\t\t dump\n"
5070 "\t\t cpudump\n"
5071 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5072 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5073 "\t The first one will disable tracing every time do_fault is hit\n"
5074 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5075 "\t The first time do trap is hit and it disables tracing, the\n"
5076 "\t counter will decrement to 2. If tracing is already disabled,\n"
5077 "\t the counter will not decrement. It only decrements when the\n"
5078 "\t trigger did work\n"
5079 "\t To remove trigger without count:\n"
5080 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5081 "\t To remove trigger with a count:\n"
5082 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5083 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5084 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5085 "\t modules: Can select a group via module command :mod:\n"
5086 "\t Does not accept triggers\n"
5087 #endif /* CONFIG_DYNAMIC_FTRACE */
5088 #ifdef CONFIG_FUNCTION_TRACER
5089 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5090 "\t\t (function)\n"
5091 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5092 "\t\t (function)\n"
5093 #endif
5094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5095 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5096 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5097 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5098 #endif
5099 #ifdef CONFIG_TRACER_SNAPSHOT
5100 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5101 "\t\t\t snapshot buffer. Read the contents for more\n"
5102 "\t\t\t information\n"
5103 #endif
5104 #ifdef CONFIG_STACK_TRACER
5105 " stack_trace\t\t- Shows the max stack trace when active\n"
5106 " stack_max_size\t- Shows current max stack size that was traced\n"
5107 "\t\t\t Write into this file to reset the max size (trigger a\n"
5108 "\t\t\t new trace)\n"
5109 #ifdef CONFIG_DYNAMIC_FTRACE
5110 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5111 "\t\t\t traces\n"
5112 #endif
5113 #endif /* CONFIG_STACK_TRACER */
5114 #ifdef CONFIG_DYNAMIC_EVENTS
5115 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5116 "\t\t\t Write into this file to define/undefine new trace events.\n"
5117 #endif
5118 #ifdef CONFIG_KPROBE_EVENTS
5119 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5120 "\t\t\t Write into this file to define/undefine new trace events.\n"
5121 #endif
5122 #ifdef CONFIG_UPROBE_EVENTS
5123 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5124 "\t\t\t Write into this file to define/undefine new trace events.\n"
5125 #endif
5126 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5127 "\t accepts: event-definitions (one definition per line)\n"
5128 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5129 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5130 #ifdef CONFIG_HIST_TRIGGERS
5131 "\t s:[synthetic/]<event> <field> [<field>]\n"
5132 #endif
5133 "\t -:[<group>/]<event>\n"
5134 #ifdef CONFIG_KPROBE_EVENTS
5135 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5136 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5137 #endif
5138 #ifdef CONFIG_UPROBE_EVENTS
5139 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
5140 #endif
5141 "\t args: <name>=fetcharg[:type]\n"
5142 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5143 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5144 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5145 #else
5146 "\t $stack<index>, $stack, $retval, $comm,\n"
5147 #endif
5148 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5149 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5150 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5151 "\t <type>\\[<array-size>\\]\n"
5152 #ifdef CONFIG_HIST_TRIGGERS
5153 "\t field: <stype> <name>;\n"
5154 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5155 "\t [unsigned] char/int/long\n"
5156 #endif
5157 #endif
5158 " events/\t\t- Directory containing all trace event subsystems:\n"
5159 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5160 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5161 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5162 "\t\t\t events\n"
5163 " filter\t\t- If set, only events passing filter are traced\n"
5164 " events/<system>/<event>/\t- Directory containing control files for\n"
5165 "\t\t\t <event>:\n"
5166 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5167 " filter\t\t- If set, only events passing filter are traced\n"
5168 " trigger\t\t- If set, a command to perform when event is hit\n"
5169 "\t Format: <trigger>[:count][if <filter>]\n"
5170 "\t trigger: traceon, traceoff\n"
5171 "\t enable_event:<system>:<event>\n"
5172 "\t disable_event:<system>:<event>\n"
5173 #ifdef CONFIG_HIST_TRIGGERS
5174 "\t enable_hist:<system>:<event>\n"
5175 "\t disable_hist:<system>:<event>\n"
5176 #endif
5177 #ifdef CONFIG_STACKTRACE
5178 "\t\t stacktrace\n"
5179 #endif
5180 #ifdef CONFIG_TRACER_SNAPSHOT
5181 "\t\t snapshot\n"
5182 #endif
5183 #ifdef CONFIG_HIST_TRIGGERS
5184 "\t\t hist (see below)\n"
5185 #endif
5186 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5187 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5188 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5189 "\t events/block/block_unplug/trigger\n"
5190 "\t The first disables tracing every time block_unplug is hit.\n"
5191 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5192 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5193 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5194 "\t Like function triggers, the counter is only decremented if it\n"
5195 "\t enabled or disabled tracing.\n"
5196 "\t To remove a trigger without a count:\n"
5197 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5198 "\t To remove a trigger with a count:\n"
5199 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5200 "\t Filters can be ignored when removing a trigger.\n"
5201 #ifdef CONFIG_HIST_TRIGGERS
5202 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5203 "\t Format: hist:keys=<field1[,field2,...]>\n"
5204 "\t [:values=<field1[,field2,...]>]\n"
5205 "\t [:sort=<field1[,field2,...]>]\n"
5206 "\t [:size=#entries]\n"
5207 "\t [:pause][:continue][:clear]\n"
5208 "\t [:name=histname1]\n"
5209 "\t [:<handler>.<action>]\n"
5210 "\t [if <filter>]\n\n"
5211 "\t When a matching event is hit, an entry is added to a hash\n"
5212 "\t table using the key(s) and value(s) named, and the value of a\n"
5213 "\t sum called 'hitcount' is incremented. Keys and values\n"
5214 "\t correspond to fields in the event's format description. Keys\n"
5215 "\t can be any field, or the special string 'stacktrace'.\n"
5216 "\t Compound keys consisting of up to two fields can be specified\n"
5217 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5218 "\t fields. Sort keys consisting of up to two fields can be\n"
5219 "\t specified using the 'sort' keyword. The sort direction can\n"
5220 "\t be modified by appending '.descending' or '.ascending' to a\n"
5221 "\t sort field. The 'size' parameter can be used to specify more\n"
5222 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5223 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5224 "\t its histogram data will be shared with other triggers of the\n"
5225 "\t same name, and trigger hits will update this common data.\n\n"
5226 "\t Reading the 'hist' file for the event will dump the hash\n"
5227 "\t table in its entirety to stdout. If there are multiple hist\n"
5228 "\t triggers attached to an event, there will be a table for each\n"
5229 "\t trigger in the output. The table displayed for a named\n"
5230 "\t trigger will be the same as any other instance having the\n"
5231 "\t same name. The default format used to display a given field\n"
5232 "\t can be modified by appending any of the following modifiers\n"
5233 "\t to the field name, as applicable:\n\n"
5234 "\t .hex display a number as a hex value\n"
5235 "\t .sym display an address as a symbol\n"
5236 "\t .sym-offset display an address as a symbol and offset\n"
5237 "\t .execname display a common_pid as a program name\n"
5238 "\t .syscall display a syscall id as a syscall name\n"
5239 "\t .log2 display log2 value rather than raw number\n"
5240 "\t .usecs display a common_timestamp in microseconds\n\n"
5241 "\t The 'pause' parameter can be used to pause an existing hist\n"
5242 "\t trigger or to start a hist trigger but not log any events\n"
5243 "\t until told to do so. 'continue' can be used to start or\n"
5244 "\t restart a paused hist trigger.\n\n"
5245 "\t The 'clear' parameter will clear the contents of a running\n"
5246 "\t hist trigger and leave its current paused/active state\n"
5247 "\t unchanged.\n\n"
5248 "\t The enable_hist and disable_hist triggers can be used to\n"
5249 "\t have one event conditionally start and stop another event's\n"
5250 "\t already-attached hist trigger. The syntax is analogous to\n"
5251 "\t the enable_event and disable_event triggers.\n\n"
5252 "\t Hist trigger handlers and actions are executed whenever a\n"
5253 "\t a histogram entry is added or updated. They take the form:\n\n"
5254 "\t <handler>.<action>\n\n"
5255 "\t The available handlers are:\n\n"
5256 "\t onmatch(matching.event) - invoke on addition or update\n"
5257 "\t onmax(var) - invoke if var exceeds current max\n"
5258 "\t onchange(var) - invoke action if var changes\n\n"
5259 "\t The available actions are:\n\n"
5260 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5261 "\t save(field,...) - save current event fields\n"
5262 #ifdef CONFIG_TRACER_SNAPSHOT
5263 "\t snapshot() - snapshot the trace buffer\n"
5264 #endif
5265 #endif
5266 ;
5267
5268 static ssize_t
5269 tracing_readme_read(struct file *filp, char __user *ubuf,
5270 size_t cnt, loff_t *ppos)
5271 {
5272 return simple_read_from_buffer(ubuf, cnt, ppos,
5273 readme_msg, strlen(readme_msg));
5274 }
5275
5276 static const struct file_operations tracing_readme_fops = {
5277 .open = tracing_open_generic,
5278 .read = tracing_readme_read,
5279 .llseek = generic_file_llseek,
5280 };
5281
5282 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5283 {
5284 int *ptr = v;
5285
5286 if (*pos || m->count)
5287 ptr++;
5288
5289 (*pos)++;
5290
5291 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5292 if (trace_find_tgid(*ptr))
5293 return ptr;
5294 }
5295
5296 return NULL;
5297 }
5298
5299 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5300 {
5301 void *v;
5302 loff_t l = 0;
5303
5304 if (!tgid_map)
5305 return NULL;
5306
5307 v = &tgid_map[0];
5308 while (l <= *pos) {
5309 v = saved_tgids_next(m, v, &l);
5310 if (!v)
5311 return NULL;
5312 }
5313
5314 return v;
5315 }
5316
5317 static void saved_tgids_stop(struct seq_file *m, void *v)
5318 {
5319 }
5320
5321 static int saved_tgids_show(struct seq_file *m, void *v)
5322 {
5323 int pid = (int *)v - tgid_map;
5324
5325 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5326 return 0;
5327 }
5328
5329 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5330 .start = saved_tgids_start,
5331 .stop = saved_tgids_stop,
5332 .next = saved_tgids_next,
5333 .show = saved_tgids_show,
5334 };
5335
5336 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5337 {
5338 int ret;
5339
5340 ret = tracing_check_open_get_tr(NULL);
5341 if (ret)
5342 return ret;
5343
5344 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5345 }
5346
5347
5348 static const struct file_operations tracing_saved_tgids_fops = {
5349 .open = tracing_saved_tgids_open,
5350 .read = seq_read,
5351 .llseek = seq_lseek,
5352 .release = seq_release,
5353 };
5354
5355 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5356 {
5357 unsigned int *ptr = v;
5358
5359 if (*pos || m->count)
5360 ptr++;
5361
5362 (*pos)++;
5363
5364 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5365 ptr++) {
5366 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5367 continue;
5368
5369 return ptr;
5370 }
5371
5372 return NULL;
5373 }
5374
5375 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5376 {
5377 void *v;
5378 loff_t l = 0;
5379
5380 preempt_disable();
5381 arch_spin_lock(&trace_cmdline_lock);
5382
5383 v = &savedcmd->map_cmdline_to_pid[0];
5384 while (l <= *pos) {
5385 v = saved_cmdlines_next(m, v, &l);
5386 if (!v)
5387 return NULL;
5388 }
5389
5390 return v;
5391 }
5392
5393 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5394 {
5395 arch_spin_unlock(&trace_cmdline_lock);
5396 preempt_enable();
5397 }
5398
5399 static int saved_cmdlines_show(struct seq_file *m, void *v)
5400 {
5401 char buf[TASK_COMM_LEN];
5402 unsigned int *pid = v;
5403
5404 __trace_find_cmdline(*pid, buf);
5405 seq_printf(m, "%d %s\n", *pid, buf);
5406 return 0;
5407 }
5408
5409 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5410 .start = saved_cmdlines_start,
5411 .next = saved_cmdlines_next,
5412 .stop = saved_cmdlines_stop,
5413 .show = saved_cmdlines_show,
5414 };
5415
5416 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5417 {
5418 int ret;
5419
5420 ret = tracing_check_open_get_tr(NULL);
5421 if (ret)
5422 return ret;
5423
5424 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5425 }
5426
5427 static const struct file_operations tracing_saved_cmdlines_fops = {
5428 .open = tracing_saved_cmdlines_open,
5429 .read = seq_read,
5430 .llseek = seq_lseek,
5431 .release = seq_release,
5432 };
5433
5434 static ssize_t
5435 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5436 size_t cnt, loff_t *ppos)
5437 {
5438 char buf[64];
5439 int r;
5440
5441 arch_spin_lock(&trace_cmdline_lock);
5442 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5443 arch_spin_unlock(&trace_cmdline_lock);
5444
5445 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5446 }
5447
5448 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5449 {
5450 kfree(s->saved_cmdlines);
5451 kfree(s->map_cmdline_to_pid);
5452 kfree(s);
5453 }
5454
5455 static int tracing_resize_saved_cmdlines(unsigned int val)
5456 {
5457 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5458
5459 s = kmalloc(sizeof(*s), GFP_KERNEL);
5460 if (!s)
5461 return -ENOMEM;
5462
5463 if (allocate_cmdlines_buffer(val, s) < 0) {
5464 kfree(s);
5465 return -ENOMEM;
5466 }
5467
5468 arch_spin_lock(&trace_cmdline_lock);
5469 savedcmd_temp = savedcmd;
5470 savedcmd = s;
5471 arch_spin_unlock(&trace_cmdline_lock);
5472 free_saved_cmdlines_buffer(savedcmd_temp);
5473
5474 return 0;
5475 }
5476
5477 static ssize_t
5478 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5479 size_t cnt, loff_t *ppos)
5480 {
5481 unsigned long val;
5482 int ret;
5483
5484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5485 if (ret)
5486 return ret;
5487
5488 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5489 if (!val || val > PID_MAX_DEFAULT)
5490 return -EINVAL;
5491
5492 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5493 if (ret < 0)
5494 return ret;
5495
5496 *ppos += cnt;
5497
5498 return cnt;
5499 }
5500
5501 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5502 .open = tracing_open_generic,
5503 .read = tracing_saved_cmdlines_size_read,
5504 .write = tracing_saved_cmdlines_size_write,
5505 };
5506
5507 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5508 static union trace_eval_map_item *
5509 update_eval_map(union trace_eval_map_item *ptr)
5510 {
5511 if (!ptr->map.eval_string) {
5512 if (ptr->tail.next) {
5513 ptr = ptr->tail.next;
5514 /* Set ptr to the next real item (skip head) */
5515 ptr++;
5516 } else
5517 return NULL;
5518 }
5519 return ptr;
5520 }
5521
5522 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5523 {
5524 union trace_eval_map_item *ptr = v;
5525
5526 /*
5527 * Paranoid! If ptr points to end, we don't want to increment past it.
5528 * This really should never happen.
5529 */
5530 (*pos)++;
5531 ptr = update_eval_map(ptr);
5532 if (WARN_ON_ONCE(!ptr))
5533 return NULL;
5534
5535 ptr++;
5536 ptr = update_eval_map(ptr);
5537
5538 return ptr;
5539 }
5540
5541 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5542 {
5543 union trace_eval_map_item *v;
5544 loff_t l = 0;
5545
5546 mutex_lock(&trace_eval_mutex);
5547
5548 v = trace_eval_maps;
5549 if (v)
5550 v++;
5551
5552 while (v && l < *pos) {
5553 v = eval_map_next(m, v, &l);
5554 }
5555
5556 return v;
5557 }
5558
5559 static void eval_map_stop(struct seq_file *m, void *v)
5560 {
5561 mutex_unlock(&trace_eval_mutex);
5562 }
5563
5564 static int eval_map_show(struct seq_file *m, void *v)
5565 {
5566 union trace_eval_map_item *ptr = v;
5567
5568 seq_printf(m, "%s %ld (%s)\n",
5569 ptr->map.eval_string, ptr->map.eval_value,
5570 ptr->map.system);
5571
5572 return 0;
5573 }
5574
5575 static const struct seq_operations tracing_eval_map_seq_ops = {
5576 .start = eval_map_start,
5577 .next = eval_map_next,
5578 .stop = eval_map_stop,
5579 .show = eval_map_show,
5580 };
5581
5582 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5583 {
5584 int ret;
5585
5586 ret = tracing_check_open_get_tr(NULL);
5587 if (ret)
5588 return ret;
5589
5590 return seq_open(filp, &tracing_eval_map_seq_ops);
5591 }
5592
5593 static const struct file_operations tracing_eval_map_fops = {
5594 .open = tracing_eval_map_open,
5595 .read = seq_read,
5596 .llseek = seq_lseek,
5597 .release = seq_release,
5598 };
5599
5600 static inline union trace_eval_map_item *
5601 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5602 {
5603 /* Return tail of array given the head */
5604 return ptr + ptr->head.length + 1;
5605 }
5606
5607 static void
5608 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5609 int len)
5610 {
5611 struct trace_eval_map **stop;
5612 struct trace_eval_map **map;
5613 union trace_eval_map_item *map_array;
5614 union trace_eval_map_item *ptr;
5615
5616 stop = start + len;
5617
5618 /*
5619 * The trace_eval_maps contains the map plus a head and tail item,
5620 * where the head holds the module and length of array, and the
5621 * tail holds a pointer to the next list.
5622 */
5623 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5624 if (!map_array) {
5625 pr_warn("Unable to allocate trace eval mapping\n");
5626 return;
5627 }
5628
5629 mutex_lock(&trace_eval_mutex);
5630
5631 if (!trace_eval_maps)
5632 trace_eval_maps = map_array;
5633 else {
5634 ptr = trace_eval_maps;
5635 for (;;) {
5636 ptr = trace_eval_jmp_to_tail(ptr);
5637 if (!ptr->tail.next)
5638 break;
5639 ptr = ptr->tail.next;
5640
5641 }
5642 ptr->tail.next = map_array;
5643 }
5644 map_array->head.mod = mod;
5645 map_array->head.length = len;
5646 map_array++;
5647
5648 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5649 map_array->map = **map;
5650 map_array++;
5651 }
5652 memset(map_array, 0, sizeof(*map_array));
5653
5654 mutex_unlock(&trace_eval_mutex);
5655 }
5656
5657 static void trace_create_eval_file(struct dentry *d_tracer)
5658 {
5659 trace_create_file("eval_map", 0444, d_tracer,
5660 NULL, &tracing_eval_map_fops);
5661 }
5662
5663 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5664 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5665 static inline void trace_insert_eval_map_file(struct module *mod,
5666 struct trace_eval_map **start, int len) { }
5667 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5668
5669 static void trace_insert_eval_map(struct module *mod,
5670 struct trace_eval_map **start, int len)
5671 {
5672 struct trace_eval_map **map;
5673
5674 if (len <= 0)
5675 return;
5676
5677 map = start;
5678
5679 trace_event_eval_update(map, len);
5680
5681 trace_insert_eval_map_file(mod, start, len);
5682 }
5683
5684 static ssize_t
5685 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5686 size_t cnt, loff_t *ppos)
5687 {
5688 struct trace_array *tr = filp->private_data;
5689 char buf[MAX_TRACER_SIZE+2];
5690 int r;
5691
5692 mutex_lock(&trace_types_lock);
5693 r = sprintf(buf, "%s\n", tr->current_trace->name);
5694 mutex_unlock(&trace_types_lock);
5695
5696 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5697 }
5698
5699 int tracer_init(struct tracer *t, struct trace_array *tr)
5700 {
5701 tracing_reset_online_cpus(&tr->array_buffer);
5702 return t->init(tr);
5703 }
5704
5705 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5706 {
5707 int cpu;
5708
5709 for_each_tracing_cpu(cpu)
5710 per_cpu_ptr(buf->data, cpu)->entries = val;
5711 }
5712
5713 #ifdef CONFIG_TRACER_MAX_TRACE
5714 /* resize @tr's buffer to the size of @size_tr's entries */
5715 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5716 struct array_buffer *size_buf, int cpu_id)
5717 {
5718 int cpu, ret = 0;
5719
5720 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5721 for_each_tracing_cpu(cpu) {
5722 ret = ring_buffer_resize(trace_buf->buffer,
5723 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5724 if (ret < 0)
5725 break;
5726 per_cpu_ptr(trace_buf->data, cpu)->entries =
5727 per_cpu_ptr(size_buf->data, cpu)->entries;
5728 }
5729 } else {
5730 ret = ring_buffer_resize(trace_buf->buffer,
5731 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5732 if (ret == 0)
5733 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5734 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5735 }
5736
5737 return ret;
5738 }
5739 #endif /* CONFIG_TRACER_MAX_TRACE */
5740
5741 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5742 unsigned long size, int cpu)
5743 {
5744 int ret;
5745
5746 /*
5747 * If kernel or user changes the size of the ring buffer
5748 * we use the size that was given, and we can forget about
5749 * expanding it later.
5750 */
5751 ring_buffer_expanded = true;
5752
5753 /* May be called before buffers are initialized */
5754 if (!tr->array_buffer.buffer)
5755 return 0;
5756
5757 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5758 if (ret < 0)
5759 return ret;
5760
5761 #ifdef CONFIG_TRACER_MAX_TRACE
5762 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5763 !tr->current_trace->use_max_tr)
5764 goto out;
5765
5766 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5767 if (ret < 0) {
5768 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5769 &tr->array_buffer, cpu);
5770 if (r < 0) {
5771 /*
5772 * AARGH! We are left with different
5773 * size max buffer!!!!
5774 * The max buffer is our "snapshot" buffer.
5775 * When a tracer needs a snapshot (one of the
5776 * latency tracers), it swaps the max buffer
5777 * with the saved snap shot. We succeeded to
5778 * update the size of the main buffer, but failed to
5779 * update the size of the max buffer. But when we tried
5780 * to reset the main buffer to the original size, we
5781 * failed there too. This is very unlikely to
5782 * happen, but if it does, warn and kill all
5783 * tracing.
5784 */
5785 WARN_ON(1);
5786 tracing_disabled = 1;
5787 }
5788 return ret;
5789 }
5790
5791 if (cpu == RING_BUFFER_ALL_CPUS)
5792 set_buffer_entries(&tr->max_buffer, size);
5793 else
5794 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5795
5796 out:
5797 #endif /* CONFIG_TRACER_MAX_TRACE */
5798
5799 if (cpu == RING_BUFFER_ALL_CPUS)
5800 set_buffer_entries(&tr->array_buffer, size);
5801 else
5802 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5803
5804 return ret;
5805 }
5806
5807 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5808 unsigned long size, int cpu_id)
5809 {
5810 int ret = size;
5811
5812 mutex_lock(&trace_types_lock);
5813
5814 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5815 /* make sure, this cpu is enabled in the mask */
5816 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5817 ret = -EINVAL;
5818 goto out;
5819 }
5820 }
5821
5822 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5823 if (ret < 0)
5824 ret = -ENOMEM;
5825
5826 out:
5827 mutex_unlock(&trace_types_lock);
5828
5829 return ret;
5830 }
5831
5832
5833 /**
5834 * tracing_update_buffers - used by tracing facility to expand ring buffers
5835 *
5836 * To save on memory when the tracing is never used on a system with it
5837 * configured in. The ring buffers are set to a minimum size. But once
5838 * a user starts to use the tracing facility, then they need to grow
5839 * to their default size.
5840 *
5841 * This function is to be called when a tracer is about to be used.
5842 */
5843 int tracing_update_buffers(void)
5844 {
5845 int ret = 0;
5846
5847 mutex_lock(&trace_types_lock);
5848 if (!ring_buffer_expanded)
5849 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5850 RING_BUFFER_ALL_CPUS);
5851 mutex_unlock(&trace_types_lock);
5852
5853 return ret;
5854 }
5855
5856 struct trace_option_dentry;
5857
5858 static void
5859 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5860
5861 /*
5862 * Used to clear out the tracer before deletion of an instance.
5863 * Must have trace_types_lock held.
5864 */
5865 static void tracing_set_nop(struct trace_array *tr)
5866 {
5867 if (tr->current_trace == &nop_trace)
5868 return;
5869
5870 tr->current_trace->enabled--;
5871
5872 if (tr->current_trace->reset)
5873 tr->current_trace->reset(tr);
5874
5875 tr->current_trace = &nop_trace;
5876 }
5877
5878 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5879 {
5880 /* Only enable if the directory has been created already. */
5881 if (!tr->dir)
5882 return;
5883
5884 create_trace_option_files(tr, t);
5885 }
5886
5887 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5888 {
5889 struct tracer *t;
5890 #ifdef CONFIG_TRACER_MAX_TRACE
5891 bool had_max_tr;
5892 #endif
5893 int ret = 0;
5894
5895 mutex_lock(&trace_types_lock);
5896
5897 if (!ring_buffer_expanded) {
5898 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5899 RING_BUFFER_ALL_CPUS);
5900 if (ret < 0)
5901 goto out;
5902 ret = 0;
5903 }
5904
5905 for (t = trace_types; t; t = t->next) {
5906 if (strcmp(t->name, buf) == 0)
5907 break;
5908 }
5909 if (!t) {
5910 ret = -EINVAL;
5911 goto out;
5912 }
5913 if (t == tr->current_trace)
5914 goto out;
5915
5916 #ifdef CONFIG_TRACER_SNAPSHOT
5917 if (t->use_max_tr) {
5918 arch_spin_lock(&tr->max_lock);
5919 if (tr->cond_snapshot)
5920 ret = -EBUSY;
5921 arch_spin_unlock(&tr->max_lock);
5922 if (ret)
5923 goto out;
5924 }
5925 #endif
5926 /* Some tracers won't work on kernel command line */
5927 if (system_state < SYSTEM_RUNNING && t->noboot) {
5928 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5929 t->name);
5930 goto out;
5931 }
5932
5933 /* Some tracers are only allowed for the top level buffer */
5934 if (!trace_ok_for_array(t, tr)) {
5935 ret = -EINVAL;
5936 goto out;
5937 }
5938
5939 /* If trace pipe files are being read, we can't change the tracer */
5940 if (tr->trace_ref) {
5941 ret = -EBUSY;
5942 goto out;
5943 }
5944
5945 trace_branch_disable();
5946
5947 tr->current_trace->enabled--;
5948
5949 if (tr->current_trace->reset)
5950 tr->current_trace->reset(tr);
5951
5952 /* Current trace needs to be nop_trace before synchronize_rcu */
5953 tr->current_trace = &nop_trace;
5954
5955 #ifdef CONFIG_TRACER_MAX_TRACE
5956 had_max_tr = tr->allocated_snapshot;
5957
5958 if (had_max_tr && !t->use_max_tr) {
5959 /*
5960 * We need to make sure that the update_max_tr sees that
5961 * current_trace changed to nop_trace to keep it from
5962 * swapping the buffers after we resize it.
5963 * The update_max_tr is called from interrupts disabled
5964 * so a synchronized_sched() is sufficient.
5965 */
5966 synchronize_rcu();
5967 free_snapshot(tr);
5968 }
5969 #endif
5970
5971 #ifdef CONFIG_TRACER_MAX_TRACE
5972 if (t->use_max_tr && !had_max_tr) {
5973 ret = tracing_alloc_snapshot_instance(tr);
5974 if (ret < 0)
5975 goto out;
5976 }
5977 #endif
5978
5979 if (t->init) {
5980 ret = tracer_init(t, tr);
5981 if (ret)
5982 goto out;
5983 }
5984
5985 tr->current_trace = t;
5986 tr->current_trace->enabled++;
5987 trace_branch_enable(tr);
5988 out:
5989 mutex_unlock(&trace_types_lock);
5990
5991 return ret;
5992 }
5993
5994 static ssize_t
5995 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5996 size_t cnt, loff_t *ppos)
5997 {
5998 struct trace_array *tr = filp->private_data;
5999 char buf[MAX_TRACER_SIZE+1];
6000 int i;
6001 size_t ret;
6002 int err;
6003
6004 ret = cnt;
6005
6006 if (cnt > MAX_TRACER_SIZE)
6007 cnt = MAX_TRACER_SIZE;
6008
6009 if (copy_from_user(buf, ubuf, cnt))
6010 return -EFAULT;
6011
6012 buf[cnt] = 0;
6013
6014 /* strip ending whitespace. */
6015 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6016 buf[i] = 0;
6017
6018 err = tracing_set_tracer(tr, buf);
6019 if (err)
6020 return err;
6021
6022 *ppos += ret;
6023
6024 return ret;
6025 }
6026
6027 static ssize_t
6028 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6029 size_t cnt, loff_t *ppos)
6030 {
6031 char buf[64];
6032 int r;
6033
6034 r = snprintf(buf, sizeof(buf), "%ld\n",
6035 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6036 if (r > sizeof(buf))
6037 r = sizeof(buf);
6038 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6039 }
6040
6041 static ssize_t
6042 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6043 size_t cnt, loff_t *ppos)
6044 {
6045 unsigned long val;
6046 int ret;
6047
6048 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6049 if (ret)
6050 return ret;
6051
6052 *ptr = val * 1000;
6053
6054 return cnt;
6055 }
6056
6057 static ssize_t
6058 tracing_thresh_read(struct file *filp, char __user *ubuf,
6059 size_t cnt, loff_t *ppos)
6060 {
6061 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6062 }
6063
6064 static ssize_t
6065 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6066 size_t cnt, loff_t *ppos)
6067 {
6068 struct trace_array *tr = filp->private_data;
6069 int ret;
6070
6071 mutex_lock(&trace_types_lock);
6072 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6073 if (ret < 0)
6074 goto out;
6075
6076 if (tr->current_trace->update_thresh) {
6077 ret = tr->current_trace->update_thresh(tr);
6078 if (ret < 0)
6079 goto out;
6080 }
6081
6082 ret = cnt;
6083 out:
6084 mutex_unlock(&trace_types_lock);
6085
6086 return ret;
6087 }
6088
6089 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6090
6091 static ssize_t
6092 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6093 size_t cnt, loff_t *ppos)
6094 {
6095 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6096 }
6097
6098 static ssize_t
6099 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6100 size_t cnt, loff_t *ppos)
6101 {
6102 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6103 }
6104
6105 #endif
6106
6107 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6108 {
6109 struct trace_array *tr = inode->i_private;
6110 struct trace_iterator *iter;
6111 int ret;
6112
6113 ret = tracing_check_open_get_tr(tr);
6114 if (ret)
6115 return ret;
6116
6117 mutex_lock(&trace_types_lock);
6118
6119 /* create a buffer to store the information to pass to userspace */
6120 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6121 if (!iter) {
6122 ret = -ENOMEM;
6123 __trace_array_put(tr);
6124 goto out;
6125 }
6126
6127 trace_seq_init(&iter->seq);
6128 iter->trace = tr->current_trace;
6129
6130 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6131 ret = -ENOMEM;
6132 goto fail;
6133 }
6134
6135 /* trace pipe does not show start of buffer */
6136 cpumask_setall(iter->started);
6137
6138 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6139 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6140
6141 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6142 if (trace_clocks[tr->clock_id].in_ns)
6143 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6144
6145 iter->tr = tr;
6146 iter->array_buffer = &tr->array_buffer;
6147 iter->cpu_file = tracing_get_cpu(inode);
6148 mutex_init(&iter->mutex);
6149 filp->private_data = iter;
6150
6151 if (iter->trace->pipe_open)
6152 iter->trace->pipe_open(iter);
6153
6154 nonseekable_open(inode, filp);
6155
6156 tr->trace_ref++;
6157 out:
6158 mutex_unlock(&trace_types_lock);
6159 return ret;
6160
6161 fail:
6162 kfree(iter);
6163 __trace_array_put(tr);
6164 mutex_unlock(&trace_types_lock);
6165 return ret;
6166 }
6167
6168 static int tracing_release_pipe(struct inode *inode, struct file *file)
6169 {
6170 struct trace_iterator *iter = file->private_data;
6171 struct trace_array *tr = inode->i_private;
6172
6173 mutex_lock(&trace_types_lock);
6174
6175 tr->trace_ref--;
6176
6177 if (iter->trace->pipe_close)
6178 iter->trace->pipe_close(iter);
6179
6180 mutex_unlock(&trace_types_lock);
6181
6182 free_cpumask_var(iter->started);
6183 mutex_destroy(&iter->mutex);
6184 kfree(iter);
6185
6186 trace_array_put(tr);
6187
6188 return 0;
6189 }
6190
6191 static __poll_t
6192 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6193 {
6194 struct trace_array *tr = iter->tr;
6195
6196 /* Iterators are static, they should be filled or empty */
6197 if (trace_buffer_iter(iter, iter->cpu_file))
6198 return EPOLLIN | EPOLLRDNORM;
6199
6200 if (tr->trace_flags & TRACE_ITER_BLOCK)
6201 /*
6202 * Always select as readable when in blocking mode
6203 */
6204 return EPOLLIN | EPOLLRDNORM;
6205 else
6206 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6207 filp, poll_table);
6208 }
6209
6210 static __poll_t
6211 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6212 {
6213 struct trace_iterator *iter = filp->private_data;
6214
6215 return trace_poll(iter, filp, poll_table);
6216 }
6217
6218 /* Must be called with iter->mutex held. */
6219 static int tracing_wait_pipe(struct file *filp)
6220 {
6221 struct trace_iterator *iter = filp->private_data;
6222 int ret;
6223
6224 while (trace_empty(iter)) {
6225
6226 if ((filp->f_flags & O_NONBLOCK)) {
6227 return -EAGAIN;
6228 }
6229
6230 /*
6231 * We block until we read something and tracing is disabled.
6232 * We still block if tracing is disabled, but we have never
6233 * read anything. This allows a user to cat this file, and
6234 * then enable tracing. But after we have read something,
6235 * we give an EOF when tracing is again disabled.
6236 *
6237 * iter->pos will be 0 if we haven't read anything.
6238 */
6239 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6240 break;
6241
6242 mutex_unlock(&iter->mutex);
6243
6244 ret = wait_on_pipe(iter, 0);
6245
6246 mutex_lock(&iter->mutex);
6247
6248 if (ret)
6249 return ret;
6250 }
6251
6252 return 1;
6253 }
6254
6255 /*
6256 * Consumer reader.
6257 */
6258 static ssize_t
6259 tracing_read_pipe(struct file *filp, char __user *ubuf,
6260 size_t cnt, loff_t *ppos)
6261 {
6262 struct trace_iterator *iter = filp->private_data;
6263 ssize_t sret;
6264
6265 /*
6266 * Avoid more than one consumer on a single file descriptor
6267 * This is just a matter of traces coherency, the ring buffer itself
6268 * is protected.
6269 */
6270 mutex_lock(&iter->mutex);
6271
6272 /* return any leftover data */
6273 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6274 if (sret != -EBUSY)
6275 goto out;
6276
6277 trace_seq_init(&iter->seq);
6278
6279 if (iter->trace->read) {
6280 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6281 if (sret)
6282 goto out;
6283 }
6284
6285 waitagain:
6286 sret = tracing_wait_pipe(filp);
6287 if (sret <= 0)
6288 goto out;
6289
6290 /* stop when tracing is finished */
6291 if (trace_empty(iter)) {
6292 sret = 0;
6293 goto out;
6294 }
6295
6296 if (cnt >= PAGE_SIZE)
6297 cnt = PAGE_SIZE - 1;
6298
6299 /* reset all but tr, trace, and overruns */
6300 memset(&iter->seq, 0,
6301 sizeof(struct trace_iterator) -
6302 offsetof(struct trace_iterator, seq));
6303 cpumask_clear(iter->started);
6304 trace_seq_init(&iter->seq);
6305 iter->pos = -1;
6306
6307 trace_event_read_lock();
6308 trace_access_lock(iter->cpu_file);
6309 while (trace_find_next_entry_inc(iter) != NULL) {
6310 enum print_line_t ret;
6311 int save_len = iter->seq.seq.len;
6312
6313 ret = print_trace_line(iter);
6314 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6315 /* don't print partial lines */
6316 iter->seq.seq.len = save_len;
6317 break;
6318 }
6319 if (ret != TRACE_TYPE_NO_CONSUME)
6320 trace_consume(iter);
6321
6322 if (trace_seq_used(&iter->seq) >= cnt)
6323 break;
6324
6325 /*
6326 * Setting the full flag means we reached the trace_seq buffer
6327 * size and we should leave by partial output condition above.
6328 * One of the trace_seq_* functions is not used properly.
6329 */
6330 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6331 iter->ent->type);
6332 }
6333 trace_access_unlock(iter->cpu_file);
6334 trace_event_read_unlock();
6335
6336 /* Now copy what we have to the user */
6337 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6338 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6339 trace_seq_init(&iter->seq);
6340
6341 /*
6342 * If there was nothing to send to user, in spite of consuming trace
6343 * entries, go back to wait for more entries.
6344 */
6345 if (sret == -EBUSY)
6346 goto waitagain;
6347
6348 out:
6349 mutex_unlock(&iter->mutex);
6350
6351 return sret;
6352 }
6353
6354 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6355 unsigned int idx)
6356 {
6357 __free_page(spd->pages[idx]);
6358 }
6359
6360 static size_t
6361 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6362 {
6363 size_t count;
6364 int save_len;
6365 int ret;
6366
6367 /* Seq buffer is page-sized, exactly what we need. */
6368 for (;;) {
6369 save_len = iter->seq.seq.len;
6370 ret = print_trace_line(iter);
6371
6372 if (trace_seq_has_overflowed(&iter->seq)) {
6373 iter->seq.seq.len = save_len;
6374 break;
6375 }
6376
6377 /*
6378 * This should not be hit, because it should only
6379 * be set if the iter->seq overflowed. But check it
6380 * anyway to be safe.
6381 */
6382 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6383 iter->seq.seq.len = save_len;
6384 break;
6385 }
6386
6387 count = trace_seq_used(&iter->seq) - save_len;
6388 if (rem < count) {
6389 rem = 0;
6390 iter->seq.seq.len = save_len;
6391 break;
6392 }
6393
6394 if (ret != TRACE_TYPE_NO_CONSUME)
6395 trace_consume(iter);
6396 rem -= count;
6397 if (!trace_find_next_entry_inc(iter)) {
6398 rem = 0;
6399 iter->ent = NULL;
6400 break;
6401 }
6402 }
6403
6404 return rem;
6405 }
6406
6407 static ssize_t tracing_splice_read_pipe(struct file *filp,
6408 loff_t *ppos,
6409 struct pipe_inode_info *pipe,
6410 size_t len,
6411 unsigned int flags)
6412 {
6413 struct page *pages_def[PIPE_DEF_BUFFERS];
6414 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6415 struct trace_iterator *iter = filp->private_data;
6416 struct splice_pipe_desc spd = {
6417 .pages = pages_def,
6418 .partial = partial_def,
6419 .nr_pages = 0, /* This gets updated below. */
6420 .nr_pages_max = PIPE_DEF_BUFFERS,
6421 .ops = &default_pipe_buf_ops,
6422 .spd_release = tracing_spd_release_pipe,
6423 };
6424 ssize_t ret;
6425 size_t rem;
6426 unsigned int i;
6427
6428 if (splice_grow_spd(pipe, &spd))
6429 return -ENOMEM;
6430
6431 mutex_lock(&iter->mutex);
6432
6433 if (iter->trace->splice_read) {
6434 ret = iter->trace->splice_read(iter, filp,
6435 ppos, pipe, len, flags);
6436 if (ret)
6437 goto out_err;
6438 }
6439
6440 ret = tracing_wait_pipe(filp);
6441 if (ret <= 0)
6442 goto out_err;
6443
6444 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6445 ret = -EFAULT;
6446 goto out_err;
6447 }
6448
6449 trace_event_read_lock();
6450 trace_access_lock(iter->cpu_file);
6451
6452 /* Fill as many pages as possible. */
6453 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6454 spd.pages[i] = alloc_page(GFP_KERNEL);
6455 if (!spd.pages[i])
6456 break;
6457
6458 rem = tracing_fill_pipe_page(rem, iter);
6459
6460 /* Copy the data into the page, so we can start over. */
6461 ret = trace_seq_to_buffer(&iter->seq,
6462 page_address(spd.pages[i]),
6463 trace_seq_used(&iter->seq));
6464 if (ret < 0) {
6465 __free_page(spd.pages[i]);
6466 break;
6467 }
6468 spd.partial[i].offset = 0;
6469 spd.partial[i].len = trace_seq_used(&iter->seq);
6470
6471 trace_seq_init(&iter->seq);
6472 }
6473
6474 trace_access_unlock(iter->cpu_file);
6475 trace_event_read_unlock();
6476 mutex_unlock(&iter->mutex);
6477
6478 spd.nr_pages = i;
6479
6480 if (i)
6481 ret = splice_to_pipe(pipe, &spd);
6482 else
6483 ret = 0;
6484 out:
6485 splice_shrink_spd(&spd);
6486 return ret;
6487
6488 out_err:
6489 mutex_unlock(&iter->mutex);
6490 goto out;
6491 }
6492
6493 static ssize_t
6494 tracing_entries_read(struct file *filp, char __user *ubuf,
6495 size_t cnt, loff_t *ppos)
6496 {
6497 struct inode *inode = file_inode(filp);
6498 struct trace_array *tr = inode->i_private;
6499 int cpu = tracing_get_cpu(inode);
6500 char buf[64];
6501 int r = 0;
6502 ssize_t ret;
6503
6504 mutex_lock(&trace_types_lock);
6505
6506 if (cpu == RING_BUFFER_ALL_CPUS) {
6507 int cpu, buf_size_same;
6508 unsigned long size;
6509
6510 size = 0;
6511 buf_size_same = 1;
6512 /* check if all cpu sizes are same */
6513 for_each_tracing_cpu(cpu) {
6514 /* fill in the size from first enabled cpu */
6515 if (size == 0)
6516 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6517 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6518 buf_size_same = 0;
6519 break;
6520 }
6521 }
6522
6523 if (buf_size_same) {
6524 if (!ring_buffer_expanded)
6525 r = sprintf(buf, "%lu (expanded: %lu)\n",
6526 size >> 10,
6527 trace_buf_size >> 10);
6528 else
6529 r = sprintf(buf, "%lu\n", size >> 10);
6530 } else
6531 r = sprintf(buf, "X\n");
6532 } else
6533 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6534
6535 mutex_unlock(&trace_types_lock);
6536
6537 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6538 return ret;
6539 }
6540
6541 static ssize_t
6542 tracing_entries_write(struct file *filp, const char __user *ubuf,
6543 size_t cnt, loff_t *ppos)
6544 {
6545 struct inode *inode = file_inode(filp);
6546 struct trace_array *tr = inode->i_private;
6547 unsigned long val;
6548 int ret;
6549
6550 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6551 if (ret)
6552 return ret;
6553
6554 /* must have at least 1 entry */
6555 if (!val)
6556 return -EINVAL;
6557
6558 /* value is in KB */
6559 val <<= 10;
6560 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6561 if (ret < 0)
6562 return ret;
6563
6564 *ppos += cnt;
6565
6566 return cnt;
6567 }
6568
6569 static ssize_t
6570 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6571 size_t cnt, loff_t *ppos)
6572 {
6573 struct trace_array *tr = filp->private_data;
6574 char buf[64];
6575 int r, cpu;
6576 unsigned long size = 0, expanded_size = 0;
6577
6578 mutex_lock(&trace_types_lock);
6579 for_each_tracing_cpu(cpu) {
6580 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6581 if (!ring_buffer_expanded)
6582 expanded_size += trace_buf_size >> 10;
6583 }
6584 if (ring_buffer_expanded)
6585 r = sprintf(buf, "%lu\n", size);
6586 else
6587 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6588 mutex_unlock(&trace_types_lock);
6589
6590 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6591 }
6592
6593 static ssize_t
6594 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6595 size_t cnt, loff_t *ppos)
6596 {
6597 /*
6598 * There is no need to read what the user has written, this function
6599 * is just to make sure that there is no error when "echo" is used
6600 */
6601
6602 *ppos += cnt;
6603
6604 return cnt;
6605 }
6606
6607 static int
6608 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6609 {
6610 struct trace_array *tr = inode->i_private;
6611
6612 /* disable tracing ? */
6613 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6614 tracer_tracing_off(tr);
6615 /* resize the ring buffer to 0 */
6616 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6617
6618 trace_array_put(tr);
6619
6620 return 0;
6621 }
6622
6623 static ssize_t
6624 tracing_mark_write(struct file *filp, const char __user *ubuf,
6625 size_t cnt, loff_t *fpos)
6626 {
6627 struct trace_array *tr = filp->private_data;
6628 struct ring_buffer_event *event;
6629 enum event_trigger_type tt = ETT_NONE;
6630 struct trace_buffer *buffer;
6631 struct print_entry *entry;
6632 unsigned long irq_flags;
6633 ssize_t written;
6634 int size;
6635 int len;
6636
6637 /* Used in tracing_mark_raw_write() as well */
6638 #define FAULTED_STR "<faulted>"
6639 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6640
6641 if (tracing_disabled)
6642 return -EINVAL;
6643
6644 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6645 return -EINVAL;
6646
6647 if (cnt > TRACE_BUF_SIZE)
6648 cnt = TRACE_BUF_SIZE;
6649
6650 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6651
6652 local_save_flags(irq_flags);
6653 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6654
6655 /* If less than "<faulted>", then make sure we can still add that */
6656 if (cnt < FAULTED_SIZE)
6657 size += FAULTED_SIZE - cnt;
6658
6659 buffer = tr->array_buffer.buffer;
6660 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6661 irq_flags, preempt_count());
6662 if (unlikely(!event))
6663 /* Ring buffer disabled, return as if not open for write */
6664 return -EBADF;
6665
6666 entry = ring_buffer_event_data(event);
6667 entry->ip = _THIS_IP_;
6668
6669 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6670 if (len) {
6671 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6672 cnt = FAULTED_SIZE;
6673 written = -EFAULT;
6674 } else
6675 written = cnt;
6676 len = cnt;
6677
6678 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6679 /* do not add \n before testing triggers, but add \0 */
6680 entry->buf[cnt] = '\0';
6681 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6682 }
6683
6684 if (entry->buf[cnt - 1] != '\n') {
6685 entry->buf[cnt] = '\n';
6686 entry->buf[cnt + 1] = '\0';
6687 } else
6688 entry->buf[cnt] = '\0';
6689
6690 __buffer_unlock_commit(buffer, event);
6691
6692 if (tt)
6693 event_triggers_post_call(tr->trace_marker_file, tt);
6694
6695 if (written > 0)
6696 *fpos += written;
6697
6698 return written;
6699 }
6700
6701 /* Limit it for now to 3K (including tag) */
6702 #define RAW_DATA_MAX_SIZE (1024*3)
6703
6704 static ssize_t
6705 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6706 size_t cnt, loff_t *fpos)
6707 {
6708 struct trace_array *tr = filp->private_data;
6709 struct ring_buffer_event *event;
6710 struct trace_buffer *buffer;
6711 struct raw_data_entry *entry;
6712 unsigned long irq_flags;
6713 ssize_t written;
6714 int size;
6715 int len;
6716
6717 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6718
6719 if (tracing_disabled)
6720 return -EINVAL;
6721
6722 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6723 return -EINVAL;
6724
6725 /* The marker must at least have a tag id */
6726 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6727 return -EINVAL;
6728
6729 if (cnt > TRACE_BUF_SIZE)
6730 cnt = TRACE_BUF_SIZE;
6731
6732 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6733
6734 local_save_flags(irq_flags);
6735 size = sizeof(*entry) + cnt;
6736 if (cnt < FAULT_SIZE_ID)
6737 size += FAULT_SIZE_ID - cnt;
6738
6739 buffer = tr->array_buffer.buffer;
6740 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6741 irq_flags, preempt_count());
6742 if (!event)
6743 /* Ring buffer disabled, return as if not open for write */
6744 return -EBADF;
6745
6746 entry = ring_buffer_event_data(event);
6747
6748 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6749 if (len) {
6750 entry->id = -1;
6751 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6752 written = -EFAULT;
6753 } else
6754 written = cnt;
6755
6756 __buffer_unlock_commit(buffer, event);
6757
6758 if (written > 0)
6759 *fpos += written;
6760
6761 return written;
6762 }
6763
6764 static int tracing_clock_show(struct seq_file *m, void *v)
6765 {
6766 struct trace_array *tr = m->private;
6767 int i;
6768
6769 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6770 seq_printf(m,
6771 "%s%s%s%s", i ? " " : "",
6772 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6773 i == tr->clock_id ? "]" : "");
6774 seq_putc(m, '\n');
6775
6776 return 0;
6777 }
6778
6779 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6780 {
6781 int i;
6782
6783 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6784 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6785 break;
6786 }
6787 if (i == ARRAY_SIZE(trace_clocks))
6788 return -EINVAL;
6789
6790 mutex_lock(&trace_types_lock);
6791
6792 tr->clock_id = i;
6793
6794 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6795
6796 /*
6797 * New clock may not be consistent with the previous clock.
6798 * Reset the buffer so that it doesn't have incomparable timestamps.
6799 */
6800 tracing_reset_online_cpus(&tr->array_buffer);
6801
6802 #ifdef CONFIG_TRACER_MAX_TRACE
6803 if (tr->max_buffer.buffer)
6804 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6805 tracing_reset_online_cpus(&tr->max_buffer);
6806 #endif
6807
6808 mutex_unlock(&trace_types_lock);
6809
6810 return 0;
6811 }
6812
6813 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6814 size_t cnt, loff_t *fpos)
6815 {
6816 struct seq_file *m = filp->private_data;
6817 struct trace_array *tr = m->private;
6818 char buf[64];
6819 const char *clockstr;
6820 int ret;
6821
6822 if (cnt >= sizeof(buf))
6823 return -EINVAL;
6824
6825 if (copy_from_user(buf, ubuf, cnt))
6826 return -EFAULT;
6827
6828 buf[cnt] = 0;
6829
6830 clockstr = strstrip(buf);
6831
6832 ret = tracing_set_clock(tr, clockstr);
6833 if (ret)
6834 return ret;
6835
6836 *fpos += cnt;
6837
6838 return cnt;
6839 }
6840
6841 static int tracing_clock_open(struct inode *inode, struct file *file)
6842 {
6843 struct trace_array *tr = inode->i_private;
6844 int ret;
6845
6846 ret = tracing_check_open_get_tr(tr);
6847 if (ret)
6848 return ret;
6849
6850 ret = single_open(file, tracing_clock_show, inode->i_private);
6851 if (ret < 0)
6852 trace_array_put(tr);
6853
6854 return ret;
6855 }
6856
6857 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6858 {
6859 struct trace_array *tr = m->private;
6860
6861 mutex_lock(&trace_types_lock);
6862
6863 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6864 seq_puts(m, "delta [absolute]\n");
6865 else
6866 seq_puts(m, "[delta] absolute\n");
6867
6868 mutex_unlock(&trace_types_lock);
6869
6870 return 0;
6871 }
6872
6873 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6874 {
6875 struct trace_array *tr = inode->i_private;
6876 int ret;
6877
6878 ret = tracing_check_open_get_tr(tr);
6879 if (ret)
6880 return ret;
6881
6882 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6883 if (ret < 0)
6884 trace_array_put(tr);
6885
6886 return ret;
6887 }
6888
6889 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6890 {
6891 int ret = 0;
6892
6893 mutex_lock(&trace_types_lock);
6894
6895 if (abs && tr->time_stamp_abs_ref++)
6896 goto out;
6897
6898 if (!abs) {
6899 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6900 ret = -EINVAL;
6901 goto out;
6902 }
6903
6904 if (--tr->time_stamp_abs_ref)
6905 goto out;
6906 }
6907
6908 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6909
6910 #ifdef CONFIG_TRACER_MAX_TRACE
6911 if (tr->max_buffer.buffer)
6912 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6913 #endif
6914 out:
6915 mutex_unlock(&trace_types_lock);
6916
6917 return ret;
6918 }
6919
6920 struct ftrace_buffer_info {
6921 struct trace_iterator iter;
6922 void *spare;
6923 unsigned int spare_cpu;
6924 unsigned int read;
6925 };
6926
6927 #ifdef CONFIG_TRACER_SNAPSHOT
6928 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6929 {
6930 struct trace_array *tr = inode->i_private;
6931 struct trace_iterator *iter;
6932 struct seq_file *m;
6933 int ret;
6934
6935 ret = tracing_check_open_get_tr(tr);
6936 if (ret)
6937 return ret;
6938
6939 if (file->f_mode & FMODE_READ) {
6940 iter = __tracing_open(inode, file, true);
6941 if (IS_ERR(iter))
6942 ret = PTR_ERR(iter);
6943 } else {
6944 /* Writes still need the seq_file to hold the private data */
6945 ret = -ENOMEM;
6946 m = kzalloc(sizeof(*m), GFP_KERNEL);
6947 if (!m)
6948 goto out;
6949 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6950 if (!iter) {
6951 kfree(m);
6952 goto out;
6953 }
6954 ret = 0;
6955
6956 iter->tr = tr;
6957 iter->array_buffer = &tr->max_buffer;
6958 iter->cpu_file = tracing_get_cpu(inode);
6959 m->private = iter;
6960 file->private_data = m;
6961 }
6962 out:
6963 if (ret < 0)
6964 trace_array_put(tr);
6965
6966 return ret;
6967 }
6968
6969 static ssize_t
6970 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6971 loff_t *ppos)
6972 {
6973 struct seq_file *m = filp->private_data;
6974 struct trace_iterator *iter = m->private;
6975 struct trace_array *tr = iter->tr;
6976 unsigned long val;
6977 int ret;
6978
6979 ret = tracing_update_buffers();
6980 if (ret < 0)
6981 return ret;
6982
6983 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6984 if (ret)
6985 return ret;
6986
6987 mutex_lock(&trace_types_lock);
6988
6989 if (tr->current_trace->use_max_tr) {
6990 ret = -EBUSY;
6991 goto out;
6992 }
6993
6994 arch_spin_lock(&tr->max_lock);
6995 if (tr->cond_snapshot)
6996 ret = -EBUSY;
6997 arch_spin_unlock(&tr->max_lock);
6998 if (ret)
6999 goto out;
7000
7001 switch (val) {
7002 case 0:
7003 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7004 ret = -EINVAL;
7005 break;
7006 }
7007 if (tr->allocated_snapshot)
7008 free_snapshot(tr);
7009 break;
7010 case 1:
7011 /* Only allow per-cpu swap if the ring buffer supports it */
7012 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7013 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7014 ret = -EINVAL;
7015 break;
7016 }
7017 #endif
7018 if (tr->allocated_snapshot)
7019 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7020 &tr->array_buffer, iter->cpu_file);
7021 else
7022 ret = tracing_alloc_snapshot_instance(tr);
7023 if (ret < 0)
7024 break;
7025 local_irq_disable();
7026 /* Now, we're going to swap */
7027 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7028 update_max_tr(tr, current, smp_processor_id(), NULL);
7029 else
7030 update_max_tr_single(tr, current, iter->cpu_file);
7031 local_irq_enable();
7032 break;
7033 default:
7034 if (tr->allocated_snapshot) {
7035 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7036 tracing_reset_online_cpus(&tr->max_buffer);
7037 else
7038 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7039 }
7040 break;
7041 }
7042
7043 if (ret >= 0) {
7044 *ppos += cnt;
7045 ret = cnt;
7046 }
7047 out:
7048 mutex_unlock(&trace_types_lock);
7049 return ret;
7050 }
7051
7052 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7053 {
7054 struct seq_file *m = file->private_data;
7055 int ret;
7056
7057 ret = tracing_release(inode, file);
7058
7059 if (file->f_mode & FMODE_READ)
7060 return ret;
7061
7062 /* If write only, the seq_file is just a stub */
7063 if (m)
7064 kfree(m->private);
7065 kfree(m);
7066
7067 return 0;
7068 }
7069
7070 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7071 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7072 size_t count, loff_t *ppos);
7073 static int tracing_buffers_release(struct inode *inode, struct file *file);
7074 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7075 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7076
7077 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7078 {
7079 struct ftrace_buffer_info *info;
7080 int ret;
7081
7082 /* The following checks for tracefs lockdown */
7083 ret = tracing_buffers_open(inode, filp);
7084 if (ret < 0)
7085 return ret;
7086
7087 info = filp->private_data;
7088
7089 if (info->iter.trace->use_max_tr) {
7090 tracing_buffers_release(inode, filp);
7091 return -EBUSY;
7092 }
7093
7094 info->iter.snapshot = true;
7095 info->iter.array_buffer = &info->iter.tr->max_buffer;
7096
7097 return ret;
7098 }
7099
7100 #endif /* CONFIG_TRACER_SNAPSHOT */
7101
7102
7103 static const struct file_operations tracing_thresh_fops = {
7104 .open = tracing_open_generic,
7105 .read = tracing_thresh_read,
7106 .write = tracing_thresh_write,
7107 .llseek = generic_file_llseek,
7108 };
7109
7110 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7111 static const struct file_operations tracing_max_lat_fops = {
7112 .open = tracing_open_generic,
7113 .read = tracing_max_lat_read,
7114 .write = tracing_max_lat_write,
7115 .llseek = generic_file_llseek,
7116 };
7117 #endif
7118
7119 static const struct file_operations set_tracer_fops = {
7120 .open = tracing_open_generic,
7121 .read = tracing_set_trace_read,
7122 .write = tracing_set_trace_write,
7123 .llseek = generic_file_llseek,
7124 };
7125
7126 static const struct file_operations tracing_pipe_fops = {
7127 .open = tracing_open_pipe,
7128 .poll = tracing_poll_pipe,
7129 .read = tracing_read_pipe,
7130 .splice_read = tracing_splice_read_pipe,
7131 .release = tracing_release_pipe,
7132 .llseek = no_llseek,
7133 };
7134
7135 static const struct file_operations tracing_entries_fops = {
7136 .open = tracing_open_generic_tr,
7137 .read = tracing_entries_read,
7138 .write = tracing_entries_write,
7139 .llseek = generic_file_llseek,
7140 .release = tracing_release_generic_tr,
7141 };
7142
7143 static const struct file_operations tracing_total_entries_fops = {
7144 .open = tracing_open_generic_tr,
7145 .read = tracing_total_entries_read,
7146 .llseek = generic_file_llseek,
7147 .release = tracing_release_generic_tr,
7148 };
7149
7150 static const struct file_operations tracing_free_buffer_fops = {
7151 .open = tracing_open_generic_tr,
7152 .write = tracing_free_buffer_write,
7153 .release = tracing_free_buffer_release,
7154 };
7155
7156 static const struct file_operations tracing_mark_fops = {
7157 .open = tracing_open_generic_tr,
7158 .write = tracing_mark_write,
7159 .llseek = generic_file_llseek,
7160 .release = tracing_release_generic_tr,
7161 };
7162
7163 static const struct file_operations tracing_mark_raw_fops = {
7164 .open = tracing_open_generic_tr,
7165 .write = tracing_mark_raw_write,
7166 .llseek = generic_file_llseek,
7167 .release = tracing_release_generic_tr,
7168 };
7169
7170 static const struct file_operations trace_clock_fops = {
7171 .open = tracing_clock_open,
7172 .read = seq_read,
7173 .llseek = seq_lseek,
7174 .release = tracing_single_release_tr,
7175 .write = tracing_clock_write,
7176 };
7177
7178 static const struct file_operations trace_time_stamp_mode_fops = {
7179 .open = tracing_time_stamp_mode_open,
7180 .read = seq_read,
7181 .llseek = seq_lseek,
7182 .release = tracing_single_release_tr,
7183 };
7184
7185 #ifdef CONFIG_TRACER_SNAPSHOT
7186 static const struct file_operations snapshot_fops = {
7187 .open = tracing_snapshot_open,
7188 .read = seq_read,
7189 .write = tracing_snapshot_write,
7190 .llseek = tracing_lseek,
7191 .release = tracing_snapshot_release,
7192 };
7193
7194 static const struct file_operations snapshot_raw_fops = {
7195 .open = snapshot_raw_open,
7196 .read = tracing_buffers_read,
7197 .release = tracing_buffers_release,
7198 .splice_read = tracing_buffers_splice_read,
7199 .llseek = no_llseek,
7200 };
7201
7202 #endif /* CONFIG_TRACER_SNAPSHOT */
7203
7204 #define TRACING_LOG_ERRS_MAX 8
7205 #define TRACING_LOG_LOC_MAX 128
7206
7207 #define CMD_PREFIX " Command: "
7208
7209 struct err_info {
7210 const char **errs; /* ptr to loc-specific array of err strings */
7211 u8 type; /* index into errs -> specific err string */
7212 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7213 u64 ts;
7214 };
7215
7216 struct tracing_log_err {
7217 struct list_head list;
7218 struct err_info info;
7219 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7220 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7221 };
7222
7223 static DEFINE_MUTEX(tracing_err_log_lock);
7224
7225 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7226 {
7227 struct tracing_log_err *err;
7228
7229 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7230 err = kzalloc(sizeof(*err), GFP_KERNEL);
7231 if (!err)
7232 err = ERR_PTR(-ENOMEM);
7233 tr->n_err_log_entries++;
7234
7235 return err;
7236 }
7237
7238 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7239 list_del(&err->list);
7240
7241 return err;
7242 }
7243
7244 /**
7245 * err_pos - find the position of a string within a command for error careting
7246 * @cmd: The tracing command that caused the error
7247 * @str: The string to position the caret at within @cmd
7248 *
7249 * Finds the position of the first occurence of @str within @cmd. The
7250 * return value can be passed to tracing_log_err() for caret placement
7251 * within @cmd.
7252 *
7253 * Returns the index within @cmd of the first occurence of @str or 0
7254 * if @str was not found.
7255 */
7256 unsigned int err_pos(char *cmd, const char *str)
7257 {
7258 char *found;
7259
7260 if (WARN_ON(!strlen(cmd)))
7261 return 0;
7262
7263 found = strstr(cmd, str);
7264 if (found)
7265 return found - cmd;
7266
7267 return 0;
7268 }
7269
7270 /**
7271 * tracing_log_err - write an error to the tracing error log
7272 * @tr: The associated trace array for the error (NULL for top level array)
7273 * @loc: A string describing where the error occurred
7274 * @cmd: The tracing command that caused the error
7275 * @errs: The array of loc-specific static error strings
7276 * @type: The index into errs[], which produces the specific static err string
7277 * @pos: The position the caret should be placed in the cmd
7278 *
7279 * Writes an error into tracing/error_log of the form:
7280 *
7281 * <loc>: error: <text>
7282 * Command: <cmd>
7283 * ^
7284 *
7285 * tracing/error_log is a small log file containing the last
7286 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7287 * unless there has been a tracing error, and the error log can be
7288 * cleared and have its memory freed by writing the empty string in
7289 * truncation mode to it i.e. echo > tracing/error_log.
7290 *
7291 * NOTE: the @errs array along with the @type param are used to
7292 * produce a static error string - this string is not copied and saved
7293 * when the error is logged - only a pointer to it is saved. See
7294 * existing callers for examples of how static strings are typically
7295 * defined for use with tracing_log_err().
7296 */
7297 void tracing_log_err(struct trace_array *tr,
7298 const char *loc, const char *cmd,
7299 const char **errs, u8 type, u8 pos)
7300 {
7301 struct tracing_log_err *err;
7302
7303 if (!tr)
7304 tr = &global_trace;
7305
7306 mutex_lock(&tracing_err_log_lock);
7307 err = get_tracing_log_err(tr);
7308 if (PTR_ERR(err) == -ENOMEM) {
7309 mutex_unlock(&tracing_err_log_lock);
7310 return;
7311 }
7312
7313 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7314 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7315
7316 err->info.errs = errs;
7317 err->info.type = type;
7318 err->info.pos = pos;
7319 err->info.ts = local_clock();
7320
7321 list_add_tail(&err->list, &tr->err_log);
7322 mutex_unlock(&tracing_err_log_lock);
7323 }
7324
7325 static void clear_tracing_err_log(struct trace_array *tr)
7326 {
7327 struct tracing_log_err *err, *next;
7328
7329 mutex_lock(&tracing_err_log_lock);
7330 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7331 list_del(&err->list);
7332 kfree(err);
7333 }
7334
7335 tr->n_err_log_entries = 0;
7336 mutex_unlock(&tracing_err_log_lock);
7337 }
7338
7339 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7340 {
7341 struct trace_array *tr = m->private;
7342
7343 mutex_lock(&tracing_err_log_lock);
7344
7345 return seq_list_start(&tr->err_log, *pos);
7346 }
7347
7348 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7349 {
7350 struct trace_array *tr = m->private;
7351
7352 return seq_list_next(v, &tr->err_log, pos);
7353 }
7354
7355 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7356 {
7357 mutex_unlock(&tracing_err_log_lock);
7358 }
7359
7360 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7361 {
7362 u8 i;
7363
7364 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7365 seq_putc(m, ' ');
7366 for (i = 0; i < pos; i++)
7367 seq_putc(m, ' ');
7368 seq_puts(m, "^\n");
7369 }
7370
7371 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7372 {
7373 struct tracing_log_err *err = v;
7374
7375 if (err) {
7376 const char *err_text = err->info.errs[err->info.type];
7377 u64 sec = err->info.ts;
7378 u32 nsec;
7379
7380 nsec = do_div(sec, NSEC_PER_SEC);
7381 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7382 err->loc, err_text);
7383 seq_printf(m, "%s", err->cmd);
7384 tracing_err_log_show_pos(m, err->info.pos);
7385 }
7386
7387 return 0;
7388 }
7389
7390 static const struct seq_operations tracing_err_log_seq_ops = {
7391 .start = tracing_err_log_seq_start,
7392 .next = tracing_err_log_seq_next,
7393 .stop = tracing_err_log_seq_stop,
7394 .show = tracing_err_log_seq_show
7395 };
7396
7397 static int tracing_err_log_open(struct inode *inode, struct file *file)
7398 {
7399 struct trace_array *tr = inode->i_private;
7400 int ret = 0;
7401
7402 ret = tracing_check_open_get_tr(tr);
7403 if (ret)
7404 return ret;
7405
7406 /* If this file was opened for write, then erase contents */
7407 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7408 clear_tracing_err_log(tr);
7409
7410 if (file->f_mode & FMODE_READ) {
7411 ret = seq_open(file, &tracing_err_log_seq_ops);
7412 if (!ret) {
7413 struct seq_file *m = file->private_data;
7414 m->private = tr;
7415 } else {
7416 trace_array_put(tr);
7417 }
7418 }
7419 return ret;
7420 }
7421
7422 static ssize_t tracing_err_log_write(struct file *file,
7423 const char __user *buffer,
7424 size_t count, loff_t *ppos)
7425 {
7426 return count;
7427 }
7428
7429 static int tracing_err_log_release(struct inode *inode, struct file *file)
7430 {
7431 struct trace_array *tr = inode->i_private;
7432
7433 trace_array_put(tr);
7434
7435 if (file->f_mode & FMODE_READ)
7436 seq_release(inode, file);
7437
7438 return 0;
7439 }
7440
7441 static const struct file_operations tracing_err_log_fops = {
7442 .open = tracing_err_log_open,
7443 .write = tracing_err_log_write,
7444 .read = seq_read,
7445 .llseek = seq_lseek,
7446 .release = tracing_err_log_release,
7447 };
7448
7449 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7450 {
7451 struct trace_array *tr = inode->i_private;
7452 struct ftrace_buffer_info *info;
7453 int ret;
7454
7455 ret = tracing_check_open_get_tr(tr);
7456 if (ret)
7457 return ret;
7458
7459 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7460 if (!info) {
7461 trace_array_put(tr);
7462 return -ENOMEM;
7463 }
7464
7465 mutex_lock(&trace_types_lock);
7466
7467 info->iter.tr = tr;
7468 info->iter.cpu_file = tracing_get_cpu(inode);
7469 info->iter.trace = tr->current_trace;
7470 info->iter.array_buffer = &tr->array_buffer;
7471 info->spare = NULL;
7472 /* Force reading ring buffer for first read */
7473 info->read = (unsigned int)-1;
7474
7475 filp->private_data = info;
7476
7477 tr->trace_ref++;
7478
7479 mutex_unlock(&trace_types_lock);
7480
7481 ret = nonseekable_open(inode, filp);
7482 if (ret < 0)
7483 trace_array_put(tr);
7484
7485 return ret;
7486 }
7487
7488 static __poll_t
7489 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7490 {
7491 struct ftrace_buffer_info *info = filp->private_data;
7492 struct trace_iterator *iter = &info->iter;
7493
7494 return trace_poll(iter, filp, poll_table);
7495 }
7496
7497 static ssize_t
7498 tracing_buffers_read(struct file *filp, char __user *ubuf,
7499 size_t count, loff_t *ppos)
7500 {
7501 struct ftrace_buffer_info *info = filp->private_data;
7502 struct trace_iterator *iter = &info->iter;
7503 ssize_t ret = 0;
7504 ssize_t size;
7505
7506 if (!count)
7507 return 0;
7508
7509 #ifdef CONFIG_TRACER_MAX_TRACE
7510 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7511 return -EBUSY;
7512 #endif
7513
7514 if (!info->spare) {
7515 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7516 iter->cpu_file);
7517 if (IS_ERR(info->spare)) {
7518 ret = PTR_ERR(info->spare);
7519 info->spare = NULL;
7520 } else {
7521 info->spare_cpu = iter->cpu_file;
7522 }
7523 }
7524 if (!info->spare)
7525 return ret;
7526
7527 /* Do we have previous read data to read? */
7528 if (info->read < PAGE_SIZE)
7529 goto read;
7530
7531 again:
7532 trace_access_lock(iter->cpu_file);
7533 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7534 &info->spare,
7535 count,
7536 iter->cpu_file, 0);
7537 trace_access_unlock(iter->cpu_file);
7538
7539 if (ret < 0) {
7540 if (trace_empty(iter)) {
7541 if ((filp->f_flags & O_NONBLOCK))
7542 return -EAGAIN;
7543
7544 ret = wait_on_pipe(iter, 0);
7545 if (ret)
7546 return ret;
7547
7548 goto again;
7549 }
7550 return 0;
7551 }
7552
7553 info->read = 0;
7554 read:
7555 size = PAGE_SIZE - info->read;
7556 if (size > count)
7557 size = count;
7558
7559 ret = copy_to_user(ubuf, info->spare + info->read, size);
7560 if (ret == size)
7561 return -EFAULT;
7562
7563 size -= ret;
7564
7565 *ppos += size;
7566 info->read += size;
7567
7568 return size;
7569 }
7570
7571 static int tracing_buffers_release(struct inode *inode, struct file *file)
7572 {
7573 struct ftrace_buffer_info *info = file->private_data;
7574 struct trace_iterator *iter = &info->iter;
7575
7576 mutex_lock(&trace_types_lock);
7577
7578 iter->tr->trace_ref--;
7579
7580 __trace_array_put(iter->tr);
7581
7582 if (info->spare)
7583 ring_buffer_free_read_page(iter->array_buffer->buffer,
7584 info->spare_cpu, info->spare);
7585 kvfree(info);
7586
7587 mutex_unlock(&trace_types_lock);
7588
7589 return 0;
7590 }
7591
7592 struct buffer_ref {
7593 struct trace_buffer *buffer;
7594 void *page;
7595 int cpu;
7596 refcount_t refcount;
7597 };
7598
7599 static void buffer_ref_release(struct buffer_ref *ref)
7600 {
7601 if (!refcount_dec_and_test(&ref->refcount))
7602 return;
7603 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7604 kfree(ref);
7605 }
7606
7607 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7608 struct pipe_buffer *buf)
7609 {
7610 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7611
7612 buffer_ref_release(ref);
7613 buf->private = 0;
7614 }
7615
7616 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7617 struct pipe_buffer *buf)
7618 {
7619 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7620
7621 if (refcount_read(&ref->refcount) > INT_MAX/2)
7622 return false;
7623
7624 refcount_inc(&ref->refcount);
7625 return true;
7626 }
7627
7628 /* Pipe buffer operations for a buffer. */
7629 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7630 .release = buffer_pipe_buf_release,
7631 .get = buffer_pipe_buf_get,
7632 };
7633
7634 /*
7635 * Callback from splice_to_pipe(), if we need to release some pages
7636 * at the end of the spd in case we error'ed out in filling the pipe.
7637 */
7638 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7639 {
7640 struct buffer_ref *ref =
7641 (struct buffer_ref *)spd->partial[i].private;
7642
7643 buffer_ref_release(ref);
7644 spd->partial[i].private = 0;
7645 }
7646
7647 static ssize_t
7648 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7649 struct pipe_inode_info *pipe, size_t len,
7650 unsigned int flags)
7651 {
7652 struct ftrace_buffer_info *info = file->private_data;
7653 struct trace_iterator *iter = &info->iter;
7654 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7655 struct page *pages_def[PIPE_DEF_BUFFERS];
7656 struct splice_pipe_desc spd = {
7657 .pages = pages_def,
7658 .partial = partial_def,
7659 .nr_pages_max = PIPE_DEF_BUFFERS,
7660 .ops = &buffer_pipe_buf_ops,
7661 .spd_release = buffer_spd_release,
7662 };
7663 struct buffer_ref *ref;
7664 int entries, i;
7665 ssize_t ret = 0;
7666
7667 #ifdef CONFIG_TRACER_MAX_TRACE
7668 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7669 return -EBUSY;
7670 #endif
7671
7672 if (*ppos & (PAGE_SIZE - 1))
7673 return -EINVAL;
7674
7675 if (len & (PAGE_SIZE - 1)) {
7676 if (len < PAGE_SIZE)
7677 return -EINVAL;
7678 len &= PAGE_MASK;
7679 }
7680
7681 if (splice_grow_spd(pipe, &spd))
7682 return -ENOMEM;
7683
7684 again:
7685 trace_access_lock(iter->cpu_file);
7686 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7687
7688 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7689 struct page *page;
7690 int r;
7691
7692 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7693 if (!ref) {
7694 ret = -ENOMEM;
7695 break;
7696 }
7697
7698 refcount_set(&ref->refcount, 1);
7699 ref->buffer = iter->array_buffer->buffer;
7700 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7701 if (IS_ERR(ref->page)) {
7702 ret = PTR_ERR(ref->page);
7703 ref->page = NULL;
7704 kfree(ref);
7705 break;
7706 }
7707 ref->cpu = iter->cpu_file;
7708
7709 r = ring_buffer_read_page(ref->buffer, &ref->page,
7710 len, iter->cpu_file, 1);
7711 if (r < 0) {
7712 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7713 ref->page);
7714 kfree(ref);
7715 break;
7716 }
7717
7718 page = virt_to_page(ref->page);
7719
7720 spd.pages[i] = page;
7721 spd.partial[i].len = PAGE_SIZE;
7722 spd.partial[i].offset = 0;
7723 spd.partial[i].private = (unsigned long)ref;
7724 spd.nr_pages++;
7725 *ppos += PAGE_SIZE;
7726
7727 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7728 }
7729
7730 trace_access_unlock(iter->cpu_file);
7731 spd.nr_pages = i;
7732
7733 /* did we read anything? */
7734 if (!spd.nr_pages) {
7735 if (ret)
7736 goto out;
7737
7738 ret = -EAGAIN;
7739 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7740 goto out;
7741
7742 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7743 if (ret)
7744 goto out;
7745
7746 goto again;
7747 }
7748
7749 ret = splice_to_pipe(pipe, &spd);
7750 out:
7751 splice_shrink_spd(&spd);
7752
7753 return ret;
7754 }
7755
7756 static const struct file_operations tracing_buffers_fops = {
7757 .open = tracing_buffers_open,
7758 .read = tracing_buffers_read,
7759 .poll = tracing_buffers_poll,
7760 .release = tracing_buffers_release,
7761 .splice_read = tracing_buffers_splice_read,
7762 .llseek = no_llseek,
7763 };
7764
7765 static ssize_t
7766 tracing_stats_read(struct file *filp, char __user *ubuf,
7767 size_t count, loff_t *ppos)
7768 {
7769 struct inode *inode = file_inode(filp);
7770 struct trace_array *tr = inode->i_private;
7771 struct array_buffer *trace_buf = &tr->array_buffer;
7772 int cpu = tracing_get_cpu(inode);
7773 struct trace_seq *s;
7774 unsigned long cnt;
7775 unsigned long long t;
7776 unsigned long usec_rem;
7777
7778 s = kmalloc(sizeof(*s), GFP_KERNEL);
7779 if (!s)
7780 return -ENOMEM;
7781
7782 trace_seq_init(s);
7783
7784 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7785 trace_seq_printf(s, "entries: %ld\n", cnt);
7786
7787 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7788 trace_seq_printf(s, "overrun: %ld\n", cnt);
7789
7790 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7791 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7792
7793 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7794 trace_seq_printf(s, "bytes: %ld\n", cnt);
7795
7796 if (trace_clocks[tr->clock_id].in_ns) {
7797 /* local or global for trace_clock */
7798 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7799 usec_rem = do_div(t, USEC_PER_SEC);
7800 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7801 t, usec_rem);
7802
7803 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7804 usec_rem = do_div(t, USEC_PER_SEC);
7805 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7806 } else {
7807 /* counter or tsc mode for trace_clock */
7808 trace_seq_printf(s, "oldest event ts: %llu\n",
7809 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7810
7811 trace_seq_printf(s, "now ts: %llu\n",
7812 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7813 }
7814
7815 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7816 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7817
7818 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7819 trace_seq_printf(s, "read events: %ld\n", cnt);
7820
7821 count = simple_read_from_buffer(ubuf, count, ppos,
7822 s->buffer, trace_seq_used(s));
7823
7824 kfree(s);
7825
7826 return count;
7827 }
7828
7829 static const struct file_operations tracing_stats_fops = {
7830 .open = tracing_open_generic_tr,
7831 .read = tracing_stats_read,
7832 .llseek = generic_file_llseek,
7833 .release = tracing_release_generic_tr,
7834 };
7835
7836 #ifdef CONFIG_DYNAMIC_FTRACE
7837
7838 static ssize_t
7839 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7840 size_t cnt, loff_t *ppos)
7841 {
7842 ssize_t ret;
7843 char *buf;
7844 int r;
7845
7846 /* 256 should be plenty to hold the amount needed */
7847 buf = kmalloc(256, GFP_KERNEL);
7848 if (!buf)
7849 return -ENOMEM;
7850
7851 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7852 ftrace_update_tot_cnt,
7853 ftrace_number_of_pages,
7854 ftrace_number_of_groups);
7855
7856 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7857 kfree(buf);
7858 return ret;
7859 }
7860
7861 static const struct file_operations tracing_dyn_info_fops = {
7862 .open = tracing_open_generic,
7863 .read = tracing_read_dyn_info,
7864 .llseek = generic_file_llseek,
7865 };
7866 #endif /* CONFIG_DYNAMIC_FTRACE */
7867
7868 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7869 static void
7870 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7871 struct trace_array *tr, struct ftrace_probe_ops *ops,
7872 void *data)
7873 {
7874 tracing_snapshot_instance(tr);
7875 }
7876
7877 static void
7878 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7879 struct trace_array *tr, struct ftrace_probe_ops *ops,
7880 void *data)
7881 {
7882 struct ftrace_func_mapper *mapper = data;
7883 long *count = NULL;
7884
7885 if (mapper)
7886 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7887
7888 if (count) {
7889
7890 if (*count <= 0)
7891 return;
7892
7893 (*count)--;
7894 }
7895
7896 tracing_snapshot_instance(tr);
7897 }
7898
7899 static int
7900 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7901 struct ftrace_probe_ops *ops, void *data)
7902 {
7903 struct ftrace_func_mapper *mapper = data;
7904 long *count = NULL;
7905
7906 seq_printf(m, "%ps:", (void *)ip);
7907
7908 seq_puts(m, "snapshot");
7909
7910 if (mapper)
7911 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7912
7913 if (count)
7914 seq_printf(m, ":count=%ld\n", *count);
7915 else
7916 seq_puts(m, ":unlimited\n");
7917
7918 return 0;
7919 }
7920
7921 static int
7922 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7923 unsigned long ip, void *init_data, void **data)
7924 {
7925 struct ftrace_func_mapper *mapper = *data;
7926
7927 if (!mapper) {
7928 mapper = allocate_ftrace_func_mapper();
7929 if (!mapper)
7930 return -ENOMEM;
7931 *data = mapper;
7932 }
7933
7934 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7935 }
7936
7937 static void
7938 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7939 unsigned long ip, void *data)
7940 {
7941 struct ftrace_func_mapper *mapper = data;
7942
7943 if (!ip) {
7944 if (!mapper)
7945 return;
7946 free_ftrace_func_mapper(mapper, NULL);
7947 return;
7948 }
7949
7950 ftrace_func_mapper_remove_ip(mapper, ip);
7951 }
7952
7953 static struct ftrace_probe_ops snapshot_probe_ops = {
7954 .func = ftrace_snapshot,
7955 .print = ftrace_snapshot_print,
7956 };
7957
7958 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7959 .func = ftrace_count_snapshot,
7960 .print = ftrace_snapshot_print,
7961 .init = ftrace_snapshot_init,
7962 .free = ftrace_snapshot_free,
7963 };
7964
7965 static int
7966 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7967 char *glob, char *cmd, char *param, int enable)
7968 {
7969 struct ftrace_probe_ops *ops;
7970 void *count = (void *)-1;
7971 char *number;
7972 int ret;
7973
7974 if (!tr)
7975 return -ENODEV;
7976
7977 /* hash funcs only work with set_ftrace_filter */
7978 if (!enable)
7979 return -EINVAL;
7980
7981 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7982
7983 if (glob[0] == '!')
7984 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7985
7986 if (!param)
7987 goto out_reg;
7988
7989 number = strsep(&param, ":");
7990
7991 if (!strlen(number))
7992 goto out_reg;
7993
7994 /*
7995 * We use the callback data field (which is a pointer)
7996 * as our counter.
7997 */
7998 ret = kstrtoul(number, 0, (unsigned long *)&count);
7999 if (ret)
8000 return ret;
8001
8002 out_reg:
8003 ret = tracing_alloc_snapshot_instance(tr);
8004 if (ret < 0)
8005 goto out;
8006
8007 ret = register_ftrace_function_probe(glob, tr, ops, count);
8008
8009 out:
8010 return ret < 0 ? ret : 0;
8011 }
8012
8013 static struct ftrace_func_command ftrace_snapshot_cmd = {
8014 .name = "snapshot",
8015 .func = ftrace_trace_snapshot_callback,
8016 };
8017
8018 static __init int register_snapshot_cmd(void)
8019 {
8020 return register_ftrace_command(&ftrace_snapshot_cmd);
8021 }
8022 #else
8023 static inline __init int register_snapshot_cmd(void) { return 0; }
8024 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8025
8026 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8027 {
8028 if (WARN_ON(!tr->dir))
8029 return ERR_PTR(-ENODEV);
8030
8031 /* Top directory uses NULL as the parent */
8032 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8033 return NULL;
8034
8035 /* All sub buffers have a descriptor */
8036 return tr->dir;
8037 }
8038
8039 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8040 {
8041 struct dentry *d_tracer;
8042
8043 if (tr->percpu_dir)
8044 return tr->percpu_dir;
8045
8046 d_tracer = tracing_get_dentry(tr);
8047 if (IS_ERR(d_tracer))
8048 return NULL;
8049
8050 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8051
8052 MEM_FAIL(!tr->percpu_dir,
8053 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8054
8055 return tr->percpu_dir;
8056 }
8057
8058 static struct dentry *
8059 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8060 void *data, long cpu, const struct file_operations *fops)
8061 {
8062 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8063
8064 if (ret) /* See tracing_get_cpu() */
8065 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8066 return ret;
8067 }
8068
8069 static void
8070 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8071 {
8072 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8073 struct dentry *d_cpu;
8074 char cpu_dir[30]; /* 30 characters should be more than enough */
8075
8076 if (!d_percpu)
8077 return;
8078
8079 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8080 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8081 if (!d_cpu) {
8082 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8083 return;
8084 }
8085
8086 /* per cpu trace_pipe */
8087 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8088 tr, cpu, &tracing_pipe_fops);
8089
8090 /* per cpu trace */
8091 trace_create_cpu_file("trace", 0644, d_cpu,
8092 tr, cpu, &tracing_fops);
8093
8094 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8095 tr, cpu, &tracing_buffers_fops);
8096
8097 trace_create_cpu_file("stats", 0444, d_cpu,
8098 tr, cpu, &tracing_stats_fops);
8099
8100 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8101 tr, cpu, &tracing_entries_fops);
8102
8103 #ifdef CONFIG_TRACER_SNAPSHOT
8104 trace_create_cpu_file("snapshot", 0644, d_cpu,
8105 tr, cpu, &snapshot_fops);
8106
8107 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8108 tr, cpu, &snapshot_raw_fops);
8109 #endif
8110 }
8111
8112 #ifdef CONFIG_FTRACE_SELFTEST
8113 /* Let selftest have access to static functions in this file */
8114 #include "trace_selftest.c"
8115 #endif
8116
8117 static ssize_t
8118 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8119 loff_t *ppos)
8120 {
8121 struct trace_option_dentry *topt = filp->private_data;
8122 char *buf;
8123
8124 if (topt->flags->val & topt->opt->bit)
8125 buf = "1\n";
8126 else
8127 buf = "0\n";
8128
8129 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8130 }
8131
8132 static ssize_t
8133 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8134 loff_t *ppos)
8135 {
8136 struct trace_option_dentry *topt = filp->private_data;
8137 unsigned long val;
8138 int ret;
8139
8140 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8141 if (ret)
8142 return ret;
8143
8144 if (val != 0 && val != 1)
8145 return -EINVAL;
8146
8147 if (!!(topt->flags->val & topt->opt->bit) != val) {
8148 mutex_lock(&trace_types_lock);
8149 ret = __set_tracer_option(topt->tr, topt->flags,
8150 topt->opt, !val);
8151 mutex_unlock(&trace_types_lock);
8152 if (ret)
8153 return ret;
8154 }
8155
8156 *ppos += cnt;
8157
8158 return cnt;
8159 }
8160
8161
8162 static const struct file_operations trace_options_fops = {
8163 .open = tracing_open_generic,
8164 .read = trace_options_read,
8165 .write = trace_options_write,
8166 .llseek = generic_file_llseek,
8167 };
8168
8169 /*
8170 * In order to pass in both the trace_array descriptor as well as the index
8171 * to the flag that the trace option file represents, the trace_array
8172 * has a character array of trace_flags_index[], which holds the index
8173 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8174 * The address of this character array is passed to the flag option file
8175 * read/write callbacks.
8176 *
8177 * In order to extract both the index and the trace_array descriptor,
8178 * get_tr_index() uses the following algorithm.
8179 *
8180 * idx = *ptr;
8181 *
8182 * As the pointer itself contains the address of the index (remember
8183 * index[1] == 1).
8184 *
8185 * Then to get the trace_array descriptor, by subtracting that index
8186 * from the ptr, we get to the start of the index itself.
8187 *
8188 * ptr - idx == &index[0]
8189 *
8190 * Then a simple container_of() from that pointer gets us to the
8191 * trace_array descriptor.
8192 */
8193 static void get_tr_index(void *data, struct trace_array **ptr,
8194 unsigned int *pindex)
8195 {
8196 *pindex = *(unsigned char *)data;
8197
8198 *ptr = container_of(data - *pindex, struct trace_array,
8199 trace_flags_index);
8200 }
8201
8202 static ssize_t
8203 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8204 loff_t *ppos)
8205 {
8206 void *tr_index = filp->private_data;
8207 struct trace_array *tr;
8208 unsigned int index;
8209 char *buf;
8210
8211 get_tr_index(tr_index, &tr, &index);
8212
8213 if (tr->trace_flags & (1 << index))
8214 buf = "1\n";
8215 else
8216 buf = "0\n";
8217
8218 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8219 }
8220
8221 static ssize_t
8222 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8223 loff_t *ppos)
8224 {
8225 void *tr_index = filp->private_data;
8226 struct trace_array *tr;
8227 unsigned int index;
8228 unsigned long val;
8229 int ret;
8230
8231 get_tr_index(tr_index, &tr, &index);
8232
8233 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8234 if (ret)
8235 return ret;
8236
8237 if (val != 0 && val != 1)
8238 return -EINVAL;
8239
8240 mutex_lock(&event_mutex);
8241 mutex_lock(&trace_types_lock);
8242 ret = set_tracer_flag(tr, 1 << index, val);
8243 mutex_unlock(&trace_types_lock);
8244 mutex_unlock(&event_mutex);
8245
8246 if (ret < 0)
8247 return ret;
8248
8249 *ppos += cnt;
8250
8251 return cnt;
8252 }
8253
8254 static const struct file_operations trace_options_core_fops = {
8255 .open = tracing_open_generic,
8256 .read = trace_options_core_read,
8257 .write = trace_options_core_write,
8258 .llseek = generic_file_llseek,
8259 };
8260
8261 struct dentry *trace_create_file(const char *name,
8262 umode_t mode,
8263 struct dentry *parent,
8264 void *data,
8265 const struct file_operations *fops)
8266 {
8267 struct dentry *ret;
8268
8269 ret = tracefs_create_file(name, mode, parent, data, fops);
8270 if (!ret)
8271 pr_warn("Could not create tracefs '%s' entry\n", name);
8272
8273 return ret;
8274 }
8275
8276
8277 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8278 {
8279 struct dentry *d_tracer;
8280
8281 if (tr->options)
8282 return tr->options;
8283
8284 d_tracer = tracing_get_dentry(tr);
8285 if (IS_ERR(d_tracer))
8286 return NULL;
8287
8288 tr->options = tracefs_create_dir("options", d_tracer);
8289 if (!tr->options) {
8290 pr_warn("Could not create tracefs directory 'options'\n");
8291 return NULL;
8292 }
8293
8294 return tr->options;
8295 }
8296
8297 static void
8298 create_trace_option_file(struct trace_array *tr,
8299 struct trace_option_dentry *topt,
8300 struct tracer_flags *flags,
8301 struct tracer_opt *opt)
8302 {
8303 struct dentry *t_options;
8304
8305 t_options = trace_options_init_dentry(tr);
8306 if (!t_options)
8307 return;
8308
8309 topt->flags = flags;
8310 topt->opt = opt;
8311 topt->tr = tr;
8312
8313 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8314 &trace_options_fops);
8315
8316 }
8317
8318 static void
8319 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8320 {
8321 struct trace_option_dentry *topts;
8322 struct trace_options *tr_topts;
8323 struct tracer_flags *flags;
8324 struct tracer_opt *opts;
8325 int cnt;
8326 int i;
8327
8328 if (!tracer)
8329 return;
8330
8331 flags = tracer->flags;
8332
8333 if (!flags || !flags->opts)
8334 return;
8335
8336 /*
8337 * If this is an instance, only create flags for tracers
8338 * the instance may have.
8339 */
8340 if (!trace_ok_for_array(tracer, tr))
8341 return;
8342
8343 for (i = 0; i < tr->nr_topts; i++) {
8344 /* Make sure there's no duplicate flags. */
8345 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8346 return;
8347 }
8348
8349 opts = flags->opts;
8350
8351 for (cnt = 0; opts[cnt].name; cnt++)
8352 ;
8353
8354 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8355 if (!topts)
8356 return;
8357
8358 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8359 GFP_KERNEL);
8360 if (!tr_topts) {
8361 kfree(topts);
8362 return;
8363 }
8364
8365 tr->topts = tr_topts;
8366 tr->topts[tr->nr_topts].tracer = tracer;
8367 tr->topts[tr->nr_topts].topts = topts;
8368 tr->nr_topts++;
8369
8370 for (cnt = 0; opts[cnt].name; cnt++) {
8371 create_trace_option_file(tr, &topts[cnt], flags,
8372 &opts[cnt]);
8373 MEM_FAIL(topts[cnt].entry == NULL,
8374 "Failed to create trace option: %s",
8375 opts[cnt].name);
8376 }
8377 }
8378
8379 static struct dentry *
8380 create_trace_option_core_file(struct trace_array *tr,
8381 const char *option, long index)
8382 {
8383 struct dentry *t_options;
8384
8385 t_options = trace_options_init_dentry(tr);
8386 if (!t_options)
8387 return NULL;
8388
8389 return trace_create_file(option, 0644, t_options,
8390 (void *)&tr->trace_flags_index[index],
8391 &trace_options_core_fops);
8392 }
8393
8394 static void create_trace_options_dir(struct trace_array *tr)
8395 {
8396 struct dentry *t_options;
8397 bool top_level = tr == &global_trace;
8398 int i;
8399
8400 t_options = trace_options_init_dentry(tr);
8401 if (!t_options)
8402 return;
8403
8404 for (i = 0; trace_options[i]; i++) {
8405 if (top_level ||
8406 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8407 create_trace_option_core_file(tr, trace_options[i], i);
8408 }
8409 }
8410
8411 static ssize_t
8412 rb_simple_read(struct file *filp, char __user *ubuf,
8413 size_t cnt, loff_t *ppos)
8414 {
8415 struct trace_array *tr = filp->private_data;
8416 char buf[64];
8417 int r;
8418
8419 r = tracer_tracing_is_on(tr);
8420 r = sprintf(buf, "%d\n", r);
8421
8422 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8423 }
8424
8425 static ssize_t
8426 rb_simple_write(struct file *filp, const char __user *ubuf,
8427 size_t cnt, loff_t *ppos)
8428 {
8429 struct trace_array *tr = filp->private_data;
8430 struct trace_buffer *buffer = tr->array_buffer.buffer;
8431 unsigned long val;
8432 int ret;
8433
8434 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8435 if (ret)
8436 return ret;
8437
8438 if (buffer) {
8439 mutex_lock(&trace_types_lock);
8440 if (!!val == tracer_tracing_is_on(tr)) {
8441 val = 0; /* do nothing */
8442 } else if (val) {
8443 tracer_tracing_on(tr);
8444 if (tr->current_trace->start)
8445 tr->current_trace->start(tr);
8446 } else {
8447 tracer_tracing_off(tr);
8448 if (tr->current_trace->stop)
8449 tr->current_trace->stop(tr);
8450 }
8451 mutex_unlock(&trace_types_lock);
8452 }
8453
8454 (*ppos)++;
8455
8456 return cnt;
8457 }
8458
8459 static const struct file_operations rb_simple_fops = {
8460 .open = tracing_open_generic_tr,
8461 .read = rb_simple_read,
8462 .write = rb_simple_write,
8463 .release = tracing_release_generic_tr,
8464 .llseek = default_llseek,
8465 };
8466
8467 static ssize_t
8468 buffer_percent_read(struct file *filp, char __user *ubuf,
8469 size_t cnt, loff_t *ppos)
8470 {
8471 struct trace_array *tr = filp->private_data;
8472 char buf[64];
8473 int r;
8474
8475 r = tr->buffer_percent;
8476 r = sprintf(buf, "%d\n", r);
8477
8478 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8479 }
8480
8481 static ssize_t
8482 buffer_percent_write(struct file *filp, const char __user *ubuf,
8483 size_t cnt, loff_t *ppos)
8484 {
8485 struct trace_array *tr = filp->private_data;
8486 unsigned long val;
8487 int ret;
8488
8489 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8490 if (ret)
8491 return ret;
8492
8493 if (val > 100)
8494 return -EINVAL;
8495
8496 if (!val)
8497 val = 1;
8498
8499 tr->buffer_percent = val;
8500
8501 (*ppos)++;
8502
8503 return cnt;
8504 }
8505
8506 static const struct file_operations buffer_percent_fops = {
8507 .open = tracing_open_generic_tr,
8508 .read = buffer_percent_read,
8509 .write = buffer_percent_write,
8510 .release = tracing_release_generic_tr,
8511 .llseek = default_llseek,
8512 };
8513
8514 static struct dentry *trace_instance_dir;
8515
8516 static void
8517 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8518
8519 static int
8520 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8521 {
8522 enum ring_buffer_flags rb_flags;
8523
8524 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8525
8526 buf->tr = tr;
8527
8528 buf->buffer = ring_buffer_alloc(size, rb_flags);
8529 if (!buf->buffer)
8530 return -ENOMEM;
8531
8532 buf->data = alloc_percpu(struct trace_array_cpu);
8533 if (!buf->data) {
8534 ring_buffer_free(buf->buffer);
8535 buf->buffer = NULL;
8536 return -ENOMEM;
8537 }
8538
8539 /* Allocate the first page for all buffers */
8540 set_buffer_entries(&tr->array_buffer,
8541 ring_buffer_size(tr->array_buffer.buffer, 0));
8542
8543 return 0;
8544 }
8545
8546 static int allocate_trace_buffers(struct trace_array *tr, int size)
8547 {
8548 int ret;
8549
8550 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8551 if (ret)
8552 return ret;
8553
8554 #ifdef CONFIG_TRACER_MAX_TRACE
8555 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8556 allocate_snapshot ? size : 1);
8557 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8558 ring_buffer_free(tr->array_buffer.buffer);
8559 tr->array_buffer.buffer = NULL;
8560 free_percpu(tr->array_buffer.data);
8561 tr->array_buffer.data = NULL;
8562 return -ENOMEM;
8563 }
8564 tr->allocated_snapshot = allocate_snapshot;
8565
8566 /*
8567 * Only the top level trace array gets its snapshot allocated
8568 * from the kernel command line.
8569 */
8570 allocate_snapshot = false;
8571 #endif
8572
8573 return 0;
8574 }
8575
8576 static void free_trace_buffer(struct array_buffer *buf)
8577 {
8578 if (buf->buffer) {
8579 ring_buffer_free(buf->buffer);
8580 buf->buffer = NULL;
8581 free_percpu(buf->data);
8582 buf->data = NULL;
8583 }
8584 }
8585
8586 static void free_trace_buffers(struct trace_array *tr)
8587 {
8588 if (!tr)
8589 return;
8590
8591 free_trace_buffer(&tr->array_buffer);
8592
8593 #ifdef CONFIG_TRACER_MAX_TRACE
8594 free_trace_buffer(&tr->max_buffer);
8595 #endif
8596 }
8597
8598 static void init_trace_flags_index(struct trace_array *tr)
8599 {
8600 int i;
8601
8602 /* Used by the trace options files */
8603 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8604 tr->trace_flags_index[i] = i;
8605 }
8606
8607 static void __update_tracer_options(struct trace_array *tr)
8608 {
8609 struct tracer *t;
8610
8611 for (t = trace_types; t; t = t->next)
8612 add_tracer_options(tr, t);
8613 }
8614
8615 static void update_tracer_options(struct trace_array *tr)
8616 {
8617 mutex_lock(&trace_types_lock);
8618 __update_tracer_options(tr);
8619 mutex_unlock(&trace_types_lock);
8620 }
8621
8622 /* Must have trace_types_lock held */
8623 struct trace_array *trace_array_find(const char *instance)
8624 {
8625 struct trace_array *tr, *found = NULL;
8626
8627 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8628 if (tr->name && strcmp(tr->name, instance) == 0) {
8629 found = tr;
8630 break;
8631 }
8632 }
8633
8634 return found;
8635 }
8636
8637 struct trace_array *trace_array_find_get(const char *instance)
8638 {
8639 struct trace_array *tr;
8640
8641 mutex_lock(&trace_types_lock);
8642 tr = trace_array_find(instance);
8643 if (tr)
8644 tr->ref++;
8645 mutex_unlock(&trace_types_lock);
8646
8647 return tr;
8648 }
8649
8650 static struct trace_array *trace_array_create(const char *name)
8651 {
8652 struct trace_array *tr;
8653 int ret;
8654
8655 ret = -ENOMEM;
8656 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8657 if (!tr)
8658 return ERR_PTR(ret);
8659
8660 tr->name = kstrdup(name, GFP_KERNEL);
8661 if (!tr->name)
8662 goto out_free_tr;
8663
8664 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8665 goto out_free_tr;
8666
8667 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8668
8669 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8670
8671 raw_spin_lock_init(&tr->start_lock);
8672
8673 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8674
8675 tr->current_trace = &nop_trace;
8676
8677 INIT_LIST_HEAD(&tr->systems);
8678 INIT_LIST_HEAD(&tr->events);
8679 INIT_LIST_HEAD(&tr->hist_vars);
8680 INIT_LIST_HEAD(&tr->err_log);
8681
8682 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8683 goto out_free_tr;
8684
8685 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8686 if (!tr->dir)
8687 goto out_free_tr;
8688
8689 ret = event_trace_add_tracer(tr->dir, tr);
8690 if (ret) {
8691 tracefs_remove(tr->dir);
8692 goto out_free_tr;
8693 }
8694
8695 ftrace_init_trace_array(tr);
8696
8697 init_tracer_tracefs(tr, tr->dir);
8698 init_trace_flags_index(tr);
8699 __update_tracer_options(tr);
8700
8701 list_add(&tr->list, &ftrace_trace_arrays);
8702
8703 tr->ref++;
8704
8705
8706 return tr;
8707
8708 out_free_tr:
8709 free_trace_buffers(tr);
8710 free_cpumask_var(tr->tracing_cpumask);
8711 kfree(tr->name);
8712 kfree(tr);
8713
8714 return ERR_PTR(ret);
8715 }
8716
8717 static int instance_mkdir(const char *name)
8718 {
8719 struct trace_array *tr;
8720 int ret;
8721
8722 mutex_lock(&event_mutex);
8723 mutex_lock(&trace_types_lock);
8724
8725 ret = -EEXIST;
8726 if (trace_array_find(name))
8727 goto out_unlock;
8728
8729 tr = trace_array_create(name);
8730
8731 ret = PTR_ERR_OR_ZERO(tr);
8732
8733 out_unlock:
8734 mutex_unlock(&trace_types_lock);
8735 mutex_unlock(&event_mutex);
8736 return ret;
8737 }
8738
8739 /**
8740 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8741 * @name: The name of the trace array to be looked up/created.
8742 *
8743 * Returns pointer to trace array with given name.
8744 * NULL, if it cannot be created.
8745 *
8746 * NOTE: This function increments the reference counter associated with the
8747 * trace array returned. This makes sure it cannot be freed while in use.
8748 * Use trace_array_put() once the trace array is no longer needed.
8749 * If the trace_array is to be freed, trace_array_destroy() needs to
8750 * be called after the trace_array_put(), or simply let user space delete
8751 * it from the tracefs instances directory. But until the
8752 * trace_array_put() is called, user space can not delete it.
8753 *
8754 */
8755 struct trace_array *trace_array_get_by_name(const char *name)
8756 {
8757 struct trace_array *tr;
8758
8759 mutex_lock(&event_mutex);
8760 mutex_lock(&trace_types_lock);
8761
8762 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8763 if (tr->name && strcmp(tr->name, name) == 0)
8764 goto out_unlock;
8765 }
8766
8767 tr = trace_array_create(name);
8768
8769 if (IS_ERR(tr))
8770 tr = NULL;
8771 out_unlock:
8772 if (tr)
8773 tr->ref++;
8774
8775 mutex_unlock(&trace_types_lock);
8776 mutex_unlock(&event_mutex);
8777 return tr;
8778 }
8779 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8780
8781 static int __remove_instance(struct trace_array *tr)
8782 {
8783 int i;
8784
8785 /* Reference counter for a newly created trace array = 1. */
8786 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8787 return -EBUSY;
8788
8789 list_del(&tr->list);
8790
8791 /* Disable all the flags that were enabled coming in */
8792 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8793 if ((1 << i) & ZEROED_TRACE_FLAGS)
8794 set_tracer_flag(tr, 1 << i, 0);
8795 }
8796
8797 tracing_set_nop(tr);
8798 clear_ftrace_function_probes(tr);
8799 event_trace_del_tracer(tr);
8800 ftrace_clear_pids(tr);
8801 ftrace_destroy_function_files(tr);
8802 tracefs_remove(tr->dir);
8803 free_trace_buffers(tr);
8804
8805 for (i = 0; i < tr->nr_topts; i++) {
8806 kfree(tr->topts[i].topts);
8807 }
8808 kfree(tr->topts);
8809
8810 free_cpumask_var(tr->tracing_cpumask);
8811 kfree(tr->name);
8812 kfree(tr);
8813 tr = NULL;
8814
8815 return 0;
8816 }
8817
8818 int trace_array_destroy(struct trace_array *this_tr)
8819 {
8820 struct trace_array *tr;
8821 int ret;
8822
8823 if (!this_tr)
8824 return -EINVAL;
8825
8826 mutex_lock(&event_mutex);
8827 mutex_lock(&trace_types_lock);
8828
8829 ret = -ENODEV;
8830
8831 /* Making sure trace array exists before destroying it. */
8832 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8833 if (tr == this_tr) {
8834 ret = __remove_instance(tr);
8835 break;
8836 }
8837 }
8838
8839 mutex_unlock(&trace_types_lock);
8840 mutex_unlock(&event_mutex);
8841
8842 return ret;
8843 }
8844 EXPORT_SYMBOL_GPL(trace_array_destroy);
8845
8846 static int instance_rmdir(const char *name)
8847 {
8848 struct trace_array *tr;
8849 int ret;
8850
8851 mutex_lock(&event_mutex);
8852 mutex_lock(&trace_types_lock);
8853
8854 ret = -ENODEV;
8855 tr = trace_array_find(name);
8856 if (tr)
8857 ret = __remove_instance(tr);
8858
8859 mutex_unlock(&trace_types_lock);
8860 mutex_unlock(&event_mutex);
8861
8862 return ret;
8863 }
8864
8865 static __init void create_trace_instances(struct dentry *d_tracer)
8866 {
8867 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8868 instance_mkdir,
8869 instance_rmdir);
8870 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8871 return;
8872 }
8873
8874 static void
8875 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8876 {
8877 struct trace_event_file *file;
8878 int cpu;
8879
8880 trace_create_file("available_tracers", 0444, d_tracer,
8881 tr, &show_traces_fops);
8882
8883 trace_create_file("current_tracer", 0644, d_tracer,
8884 tr, &set_tracer_fops);
8885
8886 trace_create_file("tracing_cpumask", 0644, d_tracer,
8887 tr, &tracing_cpumask_fops);
8888
8889 trace_create_file("trace_options", 0644, d_tracer,
8890 tr, &tracing_iter_fops);
8891
8892 trace_create_file("trace", 0644, d_tracer,
8893 tr, &tracing_fops);
8894
8895 trace_create_file("trace_pipe", 0444, d_tracer,
8896 tr, &tracing_pipe_fops);
8897
8898 trace_create_file("buffer_size_kb", 0644, d_tracer,
8899 tr, &tracing_entries_fops);
8900
8901 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8902 tr, &tracing_total_entries_fops);
8903
8904 trace_create_file("free_buffer", 0200, d_tracer,
8905 tr, &tracing_free_buffer_fops);
8906
8907 trace_create_file("trace_marker", 0220, d_tracer,
8908 tr, &tracing_mark_fops);
8909
8910 file = __find_event_file(tr, "ftrace", "print");
8911 if (file && file->dir)
8912 trace_create_file("trigger", 0644, file->dir, file,
8913 &event_trigger_fops);
8914 tr->trace_marker_file = file;
8915
8916 trace_create_file("trace_marker_raw", 0220, d_tracer,
8917 tr, &tracing_mark_raw_fops);
8918
8919 trace_create_file("trace_clock", 0644, d_tracer, tr,
8920 &trace_clock_fops);
8921
8922 trace_create_file("tracing_on", 0644, d_tracer,
8923 tr, &rb_simple_fops);
8924
8925 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8926 &trace_time_stamp_mode_fops);
8927
8928 tr->buffer_percent = 50;
8929
8930 trace_create_file("buffer_percent", 0444, d_tracer,
8931 tr, &buffer_percent_fops);
8932
8933 create_trace_options_dir(tr);
8934
8935 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8936 trace_create_maxlat_file(tr, d_tracer);
8937 #endif
8938
8939 if (ftrace_create_function_files(tr, d_tracer))
8940 MEM_FAIL(1, "Could not allocate function filter files");
8941
8942 #ifdef CONFIG_TRACER_SNAPSHOT
8943 trace_create_file("snapshot", 0644, d_tracer,
8944 tr, &snapshot_fops);
8945 #endif
8946
8947 trace_create_file("error_log", 0644, d_tracer,
8948 tr, &tracing_err_log_fops);
8949
8950 for_each_tracing_cpu(cpu)
8951 tracing_init_tracefs_percpu(tr, cpu);
8952
8953 ftrace_init_tracefs(tr, d_tracer);
8954 }
8955
8956 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8957 {
8958 struct vfsmount *mnt;
8959 struct file_system_type *type;
8960
8961 /*
8962 * To maintain backward compatibility for tools that mount
8963 * debugfs to get to the tracing facility, tracefs is automatically
8964 * mounted to the debugfs/tracing directory.
8965 */
8966 type = get_fs_type("tracefs");
8967 if (!type)
8968 return NULL;
8969 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8970 put_filesystem(type);
8971 if (IS_ERR(mnt))
8972 return NULL;
8973 mntget(mnt);
8974
8975 return mnt;
8976 }
8977
8978 /**
8979 * tracing_init_dentry - initialize top level trace array
8980 *
8981 * This is called when creating files or directories in the tracing
8982 * directory. It is called via fs_initcall() by any of the boot up code
8983 * and expects to return the dentry of the top level tracing directory.
8984 */
8985 struct dentry *tracing_init_dentry(void)
8986 {
8987 struct trace_array *tr = &global_trace;
8988
8989 if (security_locked_down(LOCKDOWN_TRACEFS)) {
8990 pr_warn("Tracing disabled due to lockdown\n");
8991 return ERR_PTR(-EPERM);
8992 }
8993
8994 /* The top level trace array uses NULL as parent */
8995 if (tr->dir)
8996 return NULL;
8997
8998 if (WARN_ON(!tracefs_initialized()))
8999 return ERR_PTR(-ENODEV);
9000
9001 /*
9002 * As there may still be users that expect the tracing
9003 * files to exist in debugfs/tracing, we must automount
9004 * the tracefs file system there, so older tools still
9005 * work with the newer kerenl.
9006 */
9007 tr->dir = debugfs_create_automount("tracing", NULL,
9008 trace_automount, NULL);
9009
9010 return NULL;
9011 }
9012
9013 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9014 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9015
9016 static void __init trace_eval_init(void)
9017 {
9018 int len;
9019
9020 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9021 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9022 }
9023
9024 #ifdef CONFIG_MODULES
9025 static void trace_module_add_evals(struct module *mod)
9026 {
9027 if (!mod->num_trace_evals)
9028 return;
9029
9030 /*
9031 * Modules with bad taint do not have events created, do
9032 * not bother with enums either.
9033 */
9034 if (trace_module_has_bad_taint(mod))
9035 return;
9036
9037 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9038 }
9039
9040 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9041 static void trace_module_remove_evals(struct module *mod)
9042 {
9043 union trace_eval_map_item *map;
9044 union trace_eval_map_item **last = &trace_eval_maps;
9045
9046 if (!mod->num_trace_evals)
9047 return;
9048
9049 mutex_lock(&trace_eval_mutex);
9050
9051 map = trace_eval_maps;
9052
9053 while (map) {
9054 if (map->head.mod == mod)
9055 break;
9056 map = trace_eval_jmp_to_tail(map);
9057 last = &map->tail.next;
9058 map = map->tail.next;
9059 }
9060 if (!map)
9061 goto out;
9062
9063 *last = trace_eval_jmp_to_tail(map)->tail.next;
9064 kfree(map);
9065 out:
9066 mutex_unlock(&trace_eval_mutex);
9067 }
9068 #else
9069 static inline void trace_module_remove_evals(struct module *mod) { }
9070 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9071
9072 static int trace_module_notify(struct notifier_block *self,
9073 unsigned long val, void *data)
9074 {
9075 struct module *mod = data;
9076
9077 switch (val) {
9078 case MODULE_STATE_COMING:
9079 trace_module_add_evals(mod);
9080 break;
9081 case MODULE_STATE_GOING:
9082 trace_module_remove_evals(mod);
9083 break;
9084 }
9085
9086 return 0;
9087 }
9088
9089 static struct notifier_block trace_module_nb = {
9090 .notifier_call = trace_module_notify,
9091 .priority = 0,
9092 };
9093 #endif /* CONFIG_MODULES */
9094
9095 static __init int tracer_init_tracefs(void)
9096 {
9097 struct dentry *d_tracer;
9098
9099 trace_access_lock_init();
9100
9101 d_tracer = tracing_init_dentry();
9102 if (IS_ERR(d_tracer))
9103 return 0;
9104
9105 event_trace_init();
9106
9107 init_tracer_tracefs(&global_trace, d_tracer);
9108 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
9109
9110 trace_create_file("tracing_thresh", 0644, d_tracer,
9111 &global_trace, &tracing_thresh_fops);
9112
9113 trace_create_file("README", 0444, d_tracer,
9114 NULL, &tracing_readme_fops);
9115
9116 trace_create_file("saved_cmdlines", 0444, d_tracer,
9117 NULL, &tracing_saved_cmdlines_fops);
9118
9119 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9120 NULL, &tracing_saved_cmdlines_size_fops);
9121
9122 trace_create_file("saved_tgids", 0444, d_tracer,
9123 NULL, &tracing_saved_tgids_fops);
9124
9125 trace_eval_init();
9126
9127 trace_create_eval_file(d_tracer);
9128
9129 #ifdef CONFIG_MODULES
9130 register_module_notifier(&trace_module_nb);
9131 #endif
9132
9133 #ifdef CONFIG_DYNAMIC_FTRACE
9134 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
9135 NULL, &tracing_dyn_info_fops);
9136 #endif
9137
9138 create_trace_instances(d_tracer);
9139
9140 update_tracer_options(&global_trace);
9141
9142 return 0;
9143 }
9144
9145 static int trace_panic_handler(struct notifier_block *this,
9146 unsigned long event, void *unused)
9147 {
9148 if (ftrace_dump_on_oops)
9149 ftrace_dump(ftrace_dump_on_oops);
9150 return NOTIFY_OK;
9151 }
9152
9153 static struct notifier_block trace_panic_notifier = {
9154 .notifier_call = trace_panic_handler,
9155 .next = NULL,
9156 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9157 };
9158
9159 static int trace_die_handler(struct notifier_block *self,
9160 unsigned long val,
9161 void *data)
9162 {
9163 switch (val) {
9164 case DIE_OOPS:
9165 if (ftrace_dump_on_oops)
9166 ftrace_dump(ftrace_dump_on_oops);
9167 break;
9168 default:
9169 break;
9170 }
9171 return NOTIFY_OK;
9172 }
9173
9174 static struct notifier_block trace_die_notifier = {
9175 .notifier_call = trace_die_handler,
9176 .priority = 200
9177 };
9178
9179 /*
9180 * printk is set to max of 1024, we really don't need it that big.
9181 * Nothing should be printing 1000 characters anyway.
9182 */
9183 #define TRACE_MAX_PRINT 1000
9184
9185 /*
9186 * Define here KERN_TRACE so that we have one place to modify
9187 * it if we decide to change what log level the ftrace dump
9188 * should be at.
9189 */
9190 #define KERN_TRACE KERN_EMERG
9191
9192 void
9193 trace_printk_seq(struct trace_seq *s)
9194 {
9195 /* Probably should print a warning here. */
9196 if (s->seq.len >= TRACE_MAX_PRINT)
9197 s->seq.len = TRACE_MAX_PRINT;
9198
9199 /*
9200 * More paranoid code. Although the buffer size is set to
9201 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9202 * an extra layer of protection.
9203 */
9204 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9205 s->seq.len = s->seq.size - 1;
9206
9207 /* should be zero ended, but we are paranoid. */
9208 s->buffer[s->seq.len] = 0;
9209
9210 printk(KERN_TRACE "%s", s->buffer);
9211
9212 trace_seq_init(s);
9213 }
9214
9215 void trace_init_global_iter(struct trace_iterator *iter)
9216 {
9217 iter->tr = &global_trace;
9218 iter->trace = iter->tr->current_trace;
9219 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9220 iter->array_buffer = &global_trace.array_buffer;
9221
9222 if (iter->trace && iter->trace->open)
9223 iter->trace->open(iter);
9224
9225 /* Annotate start of buffers if we had overruns */
9226 if (ring_buffer_overruns(iter->array_buffer->buffer))
9227 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9228
9229 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9230 if (trace_clocks[iter->tr->clock_id].in_ns)
9231 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9232 }
9233
9234 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9235 {
9236 /* use static because iter can be a bit big for the stack */
9237 static struct trace_iterator iter;
9238 static atomic_t dump_running;
9239 struct trace_array *tr = &global_trace;
9240 unsigned int old_userobj;
9241 unsigned long flags;
9242 int cnt = 0, cpu;
9243
9244 /* Only allow one dump user at a time. */
9245 if (atomic_inc_return(&dump_running) != 1) {
9246 atomic_dec(&dump_running);
9247 return;
9248 }
9249
9250 /*
9251 * Always turn off tracing when we dump.
9252 * We don't need to show trace output of what happens
9253 * between multiple crashes.
9254 *
9255 * If the user does a sysrq-z, then they can re-enable
9256 * tracing with echo 1 > tracing_on.
9257 */
9258 tracing_off();
9259
9260 local_irq_save(flags);
9261 printk_nmi_direct_enter();
9262
9263 /* Simulate the iterator */
9264 trace_init_global_iter(&iter);
9265 /* Can not use kmalloc for iter.temp */
9266 iter.temp = static_temp_buf;
9267 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9268
9269 for_each_tracing_cpu(cpu) {
9270 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9271 }
9272
9273 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9274
9275 /* don't look at user memory in panic mode */
9276 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9277
9278 switch (oops_dump_mode) {
9279 case DUMP_ALL:
9280 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9281 break;
9282 case DUMP_ORIG:
9283 iter.cpu_file = raw_smp_processor_id();
9284 break;
9285 case DUMP_NONE:
9286 goto out_enable;
9287 default:
9288 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9289 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9290 }
9291
9292 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9293
9294 /* Did function tracer already get disabled? */
9295 if (ftrace_is_dead()) {
9296 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9297 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9298 }
9299
9300 /*
9301 * We need to stop all tracing on all CPUS to read the
9302 * the next buffer. This is a bit expensive, but is
9303 * not done often. We fill all what we can read,
9304 * and then release the locks again.
9305 */
9306
9307 while (!trace_empty(&iter)) {
9308
9309 if (!cnt)
9310 printk(KERN_TRACE "---------------------------------\n");
9311
9312 cnt++;
9313
9314 trace_iterator_reset(&iter);
9315 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9316
9317 if (trace_find_next_entry_inc(&iter) != NULL) {
9318 int ret;
9319
9320 ret = print_trace_line(&iter);
9321 if (ret != TRACE_TYPE_NO_CONSUME)
9322 trace_consume(&iter);
9323 }
9324 touch_nmi_watchdog();
9325
9326 trace_printk_seq(&iter.seq);
9327 }
9328
9329 if (!cnt)
9330 printk(KERN_TRACE " (ftrace buffer empty)\n");
9331 else
9332 printk(KERN_TRACE "---------------------------------\n");
9333
9334 out_enable:
9335 tr->trace_flags |= old_userobj;
9336
9337 for_each_tracing_cpu(cpu) {
9338 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9339 }
9340 atomic_dec(&dump_running);
9341 printk_nmi_direct_exit();
9342 local_irq_restore(flags);
9343 }
9344 EXPORT_SYMBOL_GPL(ftrace_dump);
9345
9346 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9347 {
9348 char **argv;
9349 int argc, ret;
9350
9351 argc = 0;
9352 ret = 0;
9353 argv = argv_split(GFP_KERNEL, buf, &argc);
9354 if (!argv)
9355 return -ENOMEM;
9356
9357 if (argc)
9358 ret = createfn(argc, argv);
9359
9360 argv_free(argv);
9361
9362 return ret;
9363 }
9364
9365 #define WRITE_BUFSIZE 4096
9366
9367 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9368 size_t count, loff_t *ppos,
9369 int (*createfn)(int, char **))
9370 {
9371 char *kbuf, *buf, *tmp;
9372 int ret = 0;
9373 size_t done = 0;
9374 size_t size;
9375
9376 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9377 if (!kbuf)
9378 return -ENOMEM;
9379
9380 while (done < count) {
9381 size = count - done;
9382
9383 if (size >= WRITE_BUFSIZE)
9384 size = WRITE_BUFSIZE - 1;
9385
9386 if (copy_from_user(kbuf, buffer + done, size)) {
9387 ret = -EFAULT;
9388 goto out;
9389 }
9390 kbuf[size] = '\0';
9391 buf = kbuf;
9392 do {
9393 tmp = strchr(buf, '\n');
9394 if (tmp) {
9395 *tmp = '\0';
9396 size = tmp - buf + 1;
9397 } else {
9398 size = strlen(buf);
9399 if (done + size < count) {
9400 if (buf != kbuf)
9401 break;
9402 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9403 pr_warn("Line length is too long: Should be less than %d\n",
9404 WRITE_BUFSIZE - 2);
9405 ret = -EINVAL;
9406 goto out;
9407 }
9408 }
9409 done += size;
9410
9411 /* Remove comments */
9412 tmp = strchr(buf, '#');
9413
9414 if (tmp)
9415 *tmp = '\0';
9416
9417 ret = trace_run_command(buf, createfn);
9418 if (ret)
9419 goto out;
9420 buf += size;
9421
9422 } while (done < count);
9423 }
9424 ret = done;
9425
9426 out:
9427 kfree(kbuf);
9428
9429 return ret;
9430 }
9431
9432 __init static int tracer_alloc_buffers(void)
9433 {
9434 int ring_buf_size;
9435 int ret = -ENOMEM;
9436
9437
9438 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9439 pr_warn("Tracing disabled due to lockdown\n");
9440 return -EPERM;
9441 }
9442
9443 /*
9444 * Make sure we don't accidently add more trace options
9445 * than we have bits for.
9446 */
9447 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9448
9449 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9450 goto out;
9451
9452 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9453 goto out_free_buffer_mask;
9454
9455 /* Only allocate trace_printk buffers if a trace_printk exists */
9456 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9457 /* Must be called before global_trace.buffer is allocated */
9458 trace_printk_init_buffers();
9459
9460 /* To save memory, keep the ring buffer size to its minimum */
9461 if (ring_buffer_expanded)
9462 ring_buf_size = trace_buf_size;
9463 else
9464 ring_buf_size = 1;
9465
9466 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9467 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9468
9469 raw_spin_lock_init(&global_trace.start_lock);
9470
9471 /*
9472 * The prepare callbacks allocates some memory for the ring buffer. We
9473 * don't free the buffer if the if the CPU goes down. If we were to free
9474 * the buffer, then the user would lose any trace that was in the
9475 * buffer. The memory will be removed once the "instance" is removed.
9476 */
9477 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9478 "trace/RB:preapre", trace_rb_cpu_prepare,
9479 NULL);
9480 if (ret < 0)
9481 goto out_free_cpumask;
9482 /* Used for event triggers */
9483 ret = -ENOMEM;
9484 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9485 if (!temp_buffer)
9486 goto out_rm_hp_state;
9487
9488 if (trace_create_savedcmd() < 0)
9489 goto out_free_temp_buffer;
9490
9491 /* TODO: make the number of buffers hot pluggable with CPUS */
9492 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9493 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9494 goto out_free_savedcmd;
9495 }
9496
9497 if (global_trace.buffer_disabled)
9498 tracing_off();
9499
9500 if (trace_boot_clock) {
9501 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9502 if (ret < 0)
9503 pr_warn("Trace clock %s not defined, going back to default\n",
9504 trace_boot_clock);
9505 }
9506
9507 /*
9508 * register_tracer() might reference current_trace, so it
9509 * needs to be set before we register anything. This is
9510 * just a bootstrap of current_trace anyway.
9511 */
9512 global_trace.current_trace = &nop_trace;
9513
9514 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9515
9516 ftrace_init_global_array_ops(&global_trace);
9517
9518 init_trace_flags_index(&global_trace);
9519
9520 register_tracer(&nop_trace);
9521
9522 /* Function tracing may start here (via kernel command line) */
9523 init_function_trace();
9524
9525 /* All seems OK, enable tracing */
9526 tracing_disabled = 0;
9527
9528 atomic_notifier_chain_register(&panic_notifier_list,
9529 &trace_panic_notifier);
9530
9531 register_die_notifier(&trace_die_notifier);
9532
9533 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9534
9535 INIT_LIST_HEAD(&global_trace.systems);
9536 INIT_LIST_HEAD(&global_trace.events);
9537 INIT_LIST_HEAD(&global_trace.hist_vars);
9538 INIT_LIST_HEAD(&global_trace.err_log);
9539 list_add(&global_trace.list, &ftrace_trace_arrays);
9540
9541 apply_trace_boot_options();
9542
9543 register_snapshot_cmd();
9544
9545 return 0;
9546
9547 out_free_savedcmd:
9548 free_saved_cmdlines_buffer(savedcmd);
9549 out_free_temp_buffer:
9550 ring_buffer_free(temp_buffer);
9551 out_rm_hp_state:
9552 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9553 out_free_cpumask:
9554 free_cpumask_var(global_trace.tracing_cpumask);
9555 out_free_buffer_mask:
9556 free_cpumask_var(tracing_buffer_mask);
9557 out:
9558 return ret;
9559 }
9560
9561 void __init early_trace_init(void)
9562 {
9563 if (tracepoint_printk) {
9564 tracepoint_print_iter =
9565 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9566 if (MEM_FAIL(!tracepoint_print_iter,
9567 "Failed to allocate trace iterator\n"))
9568 tracepoint_printk = 0;
9569 else
9570 static_key_enable(&tracepoint_printk_key.key);
9571 }
9572 tracer_alloc_buffers();
9573 }
9574
9575 void __init trace_init(void)
9576 {
9577 trace_event_init();
9578 }
9579
9580 __init static int clear_boot_tracer(void)
9581 {
9582 /*
9583 * The default tracer at boot buffer is an init section.
9584 * This function is called in lateinit. If we did not
9585 * find the boot tracer, then clear it out, to prevent
9586 * later registration from accessing the buffer that is
9587 * about to be freed.
9588 */
9589 if (!default_bootup_tracer)
9590 return 0;
9591
9592 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9593 default_bootup_tracer);
9594 default_bootup_tracer = NULL;
9595
9596 return 0;
9597 }
9598
9599 fs_initcall(tracer_init_tracefs);
9600 late_initcall_sync(clear_boot_tracer);
9601
9602 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9603 __init static int tracing_set_default_clock(void)
9604 {
9605 /* sched_clock_stable() is determined in late_initcall */
9606 if (!trace_boot_clock && !sched_clock_stable()) {
9607 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9608 pr_warn("Can not set tracing clock due to lockdown\n");
9609 return -EPERM;
9610 }
9611
9612 printk(KERN_WARNING
9613 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9614 "If you want to keep using the local clock, then add:\n"
9615 " \"trace_clock=local\"\n"
9616 "on the kernel command line\n");
9617 tracing_set_clock(&global_trace, "global");
9618 }
9619
9620 return 0;
9621 }
9622 late_initcall_sync(tracing_set_default_clock);
9623 #endif