]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/trace/trace.c
tracing: Comment why cond_snapshot is checked outside of max_lock protection
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
43 #include <linux/fs.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
47
48 #include "trace.h"
49 #include "trace_output.h"
50
51 /*
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
55 bool ring_buffer_expanded;
56
57 /*
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
63 */
64 static bool __read_mostly tracing_selftest_running;
65
66 /*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
69 bool __read_mostly tracing_selftest_disabled;
70
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
75
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79 };
80
81 static int
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
83 {
84 return 0;
85 }
86
87 /*
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
93
94 /*
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
100 static int tracing_disabled = 1;
101
102 cpumask_var_t __read_mostly tracing_buffer_mask;
103
104 /*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 */
119
120 enum ftrace_dump_mode ftrace_dump_on_oops;
121
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
124
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
128 struct module *mod;
129 unsigned long length;
130 };
131
132 union trace_eval_map_item;
133
134 struct trace_eval_map_tail {
135 /*
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
138 */
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
141 };
142
143 static DEFINE_MUTEX(trace_eval_mutex);
144
145 /*
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
151 */
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
156 };
157
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
160
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
166
167 static bool allocate_snapshot;
168
169 static int __init set_cmdline_ftrace(char *str)
170 {
171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172 default_bootup_tracer = bootup_tracer_buf;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded = true;
175 return 1;
176 }
177 __setup("ftrace=", set_cmdline_ftrace);
178
179 static int __init set_ftrace_dump_on_oops(char *str)
180 {
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
192 }
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
194
195 static int __init stop_trace_on_warning(char *str)
196 {
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
199 return 1;
200 }
201 __setup("traceoff_on_warning", stop_trace_on_warning);
202
203 static int __init boot_alloc_snapshot(char *str)
204 {
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209 }
210 __setup("alloc_snapshot", boot_alloc_snapshot);
211
212
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
214
215 static int __init set_trace_boot_options(char *str)
216 {
217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218 return 0;
219 }
220 __setup("trace_options=", set_trace_boot_options);
221
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
224
225 static int __init set_trace_boot_clock(char *str)
226 {
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230 }
231 __setup("trace_clock=", set_trace_boot_clock);
232
233 static int __init set_tracepoint_printk(char *str)
234 {
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238 }
239 __setup("tp_printk", set_tracepoint_printk);
240
241 unsigned long long ns2usecs(u64 nsec)
242 {
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246 }
247
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263
264 /*
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
267 */
268 static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270 };
271
272 LIST_HEAD(ftrace_trace_arrays);
273
274 int trace_array_get(struct trace_array *this_tr)
275 {
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290 }
291
292 static void __trace_array_put(struct trace_array *this_tr)
293 {
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296 }
297
298 void trace_array_put(struct trace_array *this_tr)
299 {
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303 }
304
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308 {
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
311 __trace_event_discard_commit(buffer, event);
312 return 1;
313 }
314
315 return 0;
316 }
317
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
319 {
320 vfree(pid_list->pids);
321 kfree(pid_list);
322 }
323
324 /**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331 bool
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333 {
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342 }
343
344 /**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353 bool
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355 {
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364 }
365
366 /**
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381 {
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400 }
401
402 /**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415 {
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428 }
429
430 /**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442 {
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455 }
456
457 /**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465 int trace_pid_show(struct seq_file *m, void *v)
466 {
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471 }
472
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
475
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479 {
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
518 set_bit(pid, pid_list->pids);
519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566 }
567
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
569 {
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
573 if (!buf->buffer)
574 return trace_clock_local();
575
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
578
579 return ts;
580 }
581
582 u64 ftrace_now(int cpu)
583 {
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585 }
586
587 /**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
596 int tracing_is_enabled(void)
597 {
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
605 }
606
607 /*
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
616 */
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
618
619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
620
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer *trace_types __read_mostly;
623
624 /*
625 * trace_types_lock is used to protect the trace_types list.
626 */
627 DEFINE_MUTEX(trace_types_lock);
628
629 /*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651 #ifdef CONFIG_SMP
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655 static inline void trace_access_lock(int cpu)
656 {
657 if (cpu == RING_BUFFER_ALL_CPUS) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669 }
670
671 static inline void trace_access_unlock(int cpu)
672 {
673 if (cpu == RING_BUFFER_ALL_CPUS) {
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679 }
680
681 static inline void trace_access_lock_init(void)
682 {
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687 }
688
689 #else
690
691 static DEFINE_MUTEX(access_lock);
692
693 static inline void trace_access_lock(int cpu)
694 {
695 (void)cpu;
696 mutex_lock(&access_lock);
697 }
698
699 static inline void trace_access_unlock(int cpu)
700 {
701 (void)cpu;
702 mutex_unlock(&access_lock);
703 }
704
705 static inline void trace_access_lock_init(void)
706 {
707 }
708
709 #endif
710
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
719
720 #else
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724 {
725 }
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
730 {
731 }
732
733 #endif
734
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738 {
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743 }
744
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750 {
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758 }
759
760 void tracer_tracing_on(struct trace_array *tr)
761 {
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775 }
776
777 /**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783 void tracing_on(void)
784 {
785 tracer_tracing_on(&global_trace);
786 }
787 EXPORT_SYMBOL_GPL(tracing_on);
788
789
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792 {
793 __this_cpu_write(trace_taskinfo_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803 }
804
805 /**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811 int __trace_puts(unsigned long ip, const char *str, int size)
812 {
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
818 int pc;
819
820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
821 return 0;
822
823 pc = preempt_count();
824
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
851
852 return size;
853 }
854 EXPORT_SYMBOL_GPL(__trace_puts);
855
856 /**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861 int __trace_bputs(unsigned long ip, const char *str)
862 {
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
868 int pc;
869
870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
871 return 0;
872
873 pc = preempt_count();
874
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
891
892 return 1;
893 }
894 EXPORT_SYMBOL_GPL(__trace_bputs);
895
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
898 {
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
908 if (!tr->allocated_snapshot) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id(), cond_data);
924 local_irq_restore(flags);
925 }
926
927 void tracing_snapshot_instance(struct trace_array *tr)
928 {
929 tracing_snapshot_instance_cond(tr, NULL);
930 }
931
932 /**
933 * tracing_snapshot - take a snapshot of the current buffer.
934 *
935 * This causes a swap between the snapshot buffer and the current live
936 * tracing buffer. You can use this to take snapshots of the live
937 * trace when some condition is triggered, but continue to trace.
938 *
939 * Note, make sure to allocate the snapshot with either
940 * a tracing_snapshot_alloc(), or by doing it manually
941 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
942 *
943 * If the snapshot buffer is not allocated, it will stop tracing.
944 * Basically making a permanent snapshot.
945 */
946 void tracing_snapshot(void)
947 {
948 struct trace_array *tr = &global_trace;
949
950 tracing_snapshot_instance(tr);
951 }
952 EXPORT_SYMBOL_GPL(tracing_snapshot);
953
954 /**
955 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
956 * @tr: The tracing instance to snapshot
957 * @cond_data: The data to be tested conditionally, and possibly saved
958 *
959 * This is the same as tracing_snapshot() except that the snapshot is
960 * conditional - the snapshot will only happen if the
961 * cond_snapshot.update() implementation receiving the cond_data
962 * returns true, which means that the trace array's cond_snapshot
963 * update() operation used the cond_data to determine whether the
964 * snapshot should be taken, and if it was, presumably saved it along
965 * with the snapshot.
966 */
967 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
968 {
969 tracing_snapshot_instance_cond(tr, cond_data);
970 }
971 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
972
973 /**
974 * tracing_snapshot_cond_data - get the user data associated with a snapshot
975 * @tr: The tracing instance
976 *
977 * When the user enables a conditional snapshot using
978 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
979 * with the snapshot. This accessor is used to retrieve it.
980 *
981 * Should not be called from cond_snapshot.update(), since it takes
982 * the tr->max_lock lock, which the code calling
983 * cond_snapshot.update() has already done.
984 *
985 * Returns the cond_data associated with the trace array's snapshot.
986 */
987 void *tracing_cond_snapshot_data(struct trace_array *tr)
988 {
989 void *cond_data = NULL;
990
991 arch_spin_lock(&tr->max_lock);
992
993 if (tr->cond_snapshot)
994 cond_data = tr->cond_snapshot->cond_data;
995
996 arch_spin_unlock(&tr->max_lock);
997
998 return cond_data;
999 }
1000 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1001
1002 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1003 struct trace_buffer *size_buf, int cpu_id);
1004 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1005
1006 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1007 {
1008 int ret;
1009
1010 if (!tr->allocated_snapshot) {
1011
1012 /* allocate spare buffer */
1013 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1014 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1015 if (ret < 0)
1016 return ret;
1017
1018 tr->allocated_snapshot = true;
1019 }
1020
1021 return 0;
1022 }
1023
1024 static void free_snapshot(struct trace_array *tr)
1025 {
1026 /*
1027 * We don't free the ring buffer. instead, resize it because
1028 * The max_tr ring buffer has some state (e.g. ring->clock) and
1029 * we want preserve it.
1030 */
1031 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1032 set_buffer_entries(&tr->max_buffer, 1);
1033 tracing_reset_online_cpus(&tr->max_buffer);
1034 tr->allocated_snapshot = false;
1035 }
1036
1037 /**
1038 * tracing_alloc_snapshot - allocate snapshot buffer.
1039 *
1040 * This only allocates the snapshot buffer if it isn't already
1041 * allocated - it doesn't also take a snapshot.
1042 *
1043 * This is meant to be used in cases where the snapshot buffer needs
1044 * to be set up for events that can't sleep but need to be able to
1045 * trigger a snapshot.
1046 */
1047 int tracing_alloc_snapshot(void)
1048 {
1049 struct trace_array *tr = &global_trace;
1050 int ret;
1051
1052 ret = tracing_alloc_snapshot_instance(tr);
1053 WARN_ON(ret < 0);
1054
1055 return ret;
1056 }
1057 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1058
1059 /**
1060 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1061 *
1062 * This is similar to tracing_snapshot(), but it will allocate the
1063 * snapshot buffer if it isn't already allocated. Use this only
1064 * where it is safe to sleep, as the allocation may sleep.
1065 *
1066 * This causes a swap between the snapshot buffer and the current live
1067 * tracing buffer. You can use this to take snapshots of the live
1068 * trace when some condition is triggered, but continue to trace.
1069 */
1070 void tracing_snapshot_alloc(void)
1071 {
1072 int ret;
1073
1074 ret = tracing_alloc_snapshot();
1075 if (ret < 0)
1076 return;
1077
1078 tracing_snapshot();
1079 }
1080 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1081
1082 /**
1083 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1084 * @tr: The tracing instance
1085 * @cond_data: User data to associate with the snapshot
1086 * @update: Implementation of the cond_snapshot update function
1087 *
1088 * Check whether the conditional snapshot for the given instance has
1089 * already been enabled, or if the current tracer is already using a
1090 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1091 * save the cond_data and update function inside.
1092 *
1093 * Returns 0 if successful, error otherwise.
1094 */
1095 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1096 cond_update_fn_t update)
1097 {
1098 struct cond_snapshot *cond_snapshot;
1099 int ret = 0;
1100
1101 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1102 if (!cond_snapshot)
1103 return -ENOMEM;
1104
1105 cond_snapshot->cond_data = cond_data;
1106 cond_snapshot->update = update;
1107
1108 mutex_lock(&trace_types_lock);
1109
1110 ret = tracing_alloc_snapshot_instance(tr);
1111 if (ret)
1112 goto fail_unlock;
1113
1114 if (tr->current_trace->use_max_tr) {
1115 ret = -EBUSY;
1116 goto fail_unlock;
1117 }
1118
1119 /*
1120 * The cond_snapshot can only change to NULL without the
1121 * trace_types_lock. We don't care if we race with it going
1122 * to NULL, but we want to make sure that it's not set to
1123 * something other than NULL when we get here, which we can
1124 * do safely with only holding the trace_types_lock and not
1125 * having to take the max_lock.
1126 */
1127 if (tr->cond_snapshot) {
1128 ret = -EBUSY;
1129 goto fail_unlock;
1130 }
1131
1132 arch_spin_lock(&tr->max_lock);
1133 tr->cond_snapshot = cond_snapshot;
1134 arch_spin_unlock(&tr->max_lock);
1135
1136 mutex_unlock(&trace_types_lock);
1137
1138 return ret;
1139
1140 fail_unlock:
1141 mutex_unlock(&trace_types_lock);
1142 kfree(cond_snapshot);
1143 return ret;
1144 }
1145 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1146
1147 /**
1148 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1149 * @tr: The tracing instance
1150 *
1151 * Check whether the conditional snapshot for the given instance is
1152 * enabled; if so, free the cond_snapshot associated with it,
1153 * otherwise return -EINVAL.
1154 *
1155 * Returns 0 if successful, error otherwise.
1156 */
1157 int tracing_snapshot_cond_disable(struct trace_array *tr)
1158 {
1159 int ret = 0;
1160
1161 arch_spin_lock(&tr->max_lock);
1162
1163 if (!tr->cond_snapshot)
1164 ret = -EINVAL;
1165 else {
1166 kfree(tr->cond_snapshot);
1167 tr->cond_snapshot = NULL;
1168 }
1169
1170 arch_spin_unlock(&tr->max_lock);
1171
1172 return ret;
1173 }
1174 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1175 #else
1176 void tracing_snapshot(void)
1177 {
1178 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1179 }
1180 EXPORT_SYMBOL_GPL(tracing_snapshot);
1181 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1182 {
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1184 }
1185 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1186 int tracing_alloc_snapshot(void)
1187 {
1188 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1189 return -ENODEV;
1190 }
1191 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1192 void tracing_snapshot_alloc(void)
1193 {
1194 /* Give warning */
1195 tracing_snapshot();
1196 }
1197 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1198 void *tracing_cond_snapshot_data(struct trace_array *tr)
1199 {
1200 return NULL;
1201 }
1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1204 {
1205 return -ENODEV;
1206 }
1207 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1208 int tracing_snapshot_cond_disable(struct trace_array *tr)
1209 {
1210 return false;
1211 }
1212 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1213 #endif /* CONFIG_TRACER_SNAPSHOT */
1214
1215 void tracer_tracing_off(struct trace_array *tr)
1216 {
1217 if (tr->trace_buffer.buffer)
1218 ring_buffer_record_off(tr->trace_buffer.buffer);
1219 /*
1220 * This flag is looked at when buffers haven't been allocated
1221 * yet, or by some tracers (like irqsoff), that just want to
1222 * know if the ring buffer has been disabled, but it can handle
1223 * races of where it gets disabled but we still do a record.
1224 * As the check is in the fast path of the tracers, it is more
1225 * important to be fast than accurate.
1226 */
1227 tr->buffer_disabled = 1;
1228 /* Make the flag seen by readers */
1229 smp_wmb();
1230 }
1231
1232 /**
1233 * tracing_off - turn off tracing buffers
1234 *
1235 * This function stops the tracing buffers from recording data.
1236 * It does not disable any overhead the tracers themselves may
1237 * be causing. This function simply causes all recording to
1238 * the ring buffers to fail.
1239 */
1240 void tracing_off(void)
1241 {
1242 tracer_tracing_off(&global_trace);
1243 }
1244 EXPORT_SYMBOL_GPL(tracing_off);
1245
1246 void disable_trace_on_warning(void)
1247 {
1248 if (__disable_trace_on_warning)
1249 tracing_off();
1250 }
1251
1252 /**
1253 * tracer_tracing_is_on - show real state of ring buffer enabled
1254 * @tr : the trace array to know if ring buffer is enabled
1255 *
1256 * Shows real state of the ring buffer if it is enabled or not.
1257 */
1258 bool tracer_tracing_is_on(struct trace_array *tr)
1259 {
1260 if (tr->trace_buffer.buffer)
1261 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1262 return !tr->buffer_disabled;
1263 }
1264
1265 /**
1266 * tracing_is_on - show state of ring buffers enabled
1267 */
1268 int tracing_is_on(void)
1269 {
1270 return tracer_tracing_is_on(&global_trace);
1271 }
1272 EXPORT_SYMBOL_GPL(tracing_is_on);
1273
1274 static int __init set_buf_size(char *str)
1275 {
1276 unsigned long buf_size;
1277
1278 if (!str)
1279 return 0;
1280 buf_size = memparse(str, &str);
1281 /* nr_entries can not be zero */
1282 if (buf_size == 0)
1283 return 0;
1284 trace_buf_size = buf_size;
1285 return 1;
1286 }
1287 __setup("trace_buf_size=", set_buf_size);
1288
1289 static int __init set_tracing_thresh(char *str)
1290 {
1291 unsigned long threshold;
1292 int ret;
1293
1294 if (!str)
1295 return 0;
1296 ret = kstrtoul(str, 0, &threshold);
1297 if (ret < 0)
1298 return 0;
1299 tracing_thresh = threshold * 1000;
1300 return 1;
1301 }
1302 __setup("tracing_thresh=", set_tracing_thresh);
1303
1304 unsigned long nsecs_to_usecs(unsigned long nsecs)
1305 {
1306 return nsecs / 1000;
1307 }
1308
1309 /*
1310 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1311 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1312 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1313 * of strings in the order that the evals (enum) were defined.
1314 */
1315 #undef C
1316 #define C(a, b) b
1317
1318 /* These must match the bit postions in trace_iterator_flags */
1319 static const char *trace_options[] = {
1320 TRACE_FLAGS
1321 NULL
1322 };
1323
1324 static struct {
1325 u64 (*func)(void);
1326 const char *name;
1327 int in_ns; /* is this clock in nanoseconds? */
1328 } trace_clocks[] = {
1329 { trace_clock_local, "local", 1 },
1330 { trace_clock_global, "global", 1 },
1331 { trace_clock_counter, "counter", 0 },
1332 { trace_clock_jiffies, "uptime", 0 },
1333 { trace_clock, "perf", 1 },
1334 { ktime_get_mono_fast_ns, "mono", 1 },
1335 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1336 { ktime_get_boot_fast_ns, "boot", 1 },
1337 ARCH_TRACE_CLOCKS
1338 };
1339
1340 bool trace_clock_in_ns(struct trace_array *tr)
1341 {
1342 if (trace_clocks[tr->clock_id].in_ns)
1343 return true;
1344
1345 return false;
1346 }
1347
1348 /*
1349 * trace_parser_get_init - gets the buffer for trace parser
1350 */
1351 int trace_parser_get_init(struct trace_parser *parser, int size)
1352 {
1353 memset(parser, 0, sizeof(*parser));
1354
1355 parser->buffer = kmalloc(size, GFP_KERNEL);
1356 if (!parser->buffer)
1357 return 1;
1358
1359 parser->size = size;
1360 return 0;
1361 }
1362
1363 /*
1364 * trace_parser_put - frees the buffer for trace parser
1365 */
1366 void trace_parser_put(struct trace_parser *parser)
1367 {
1368 kfree(parser->buffer);
1369 parser->buffer = NULL;
1370 }
1371
1372 /*
1373 * trace_get_user - reads the user input string separated by space
1374 * (matched by isspace(ch))
1375 *
1376 * For each string found the 'struct trace_parser' is updated,
1377 * and the function returns.
1378 *
1379 * Returns number of bytes read.
1380 *
1381 * See kernel/trace/trace.h for 'struct trace_parser' details.
1382 */
1383 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1384 size_t cnt, loff_t *ppos)
1385 {
1386 char ch;
1387 size_t read = 0;
1388 ssize_t ret;
1389
1390 if (!*ppos)
1391 trace_parser_clear(parser);
1392
1393 ret = get_user(ch, ubuf++);
1394 if (ret)
1395 goto out;
1396
1397 read++;
1398 cnt--;
1399
1400 /*
1401 * The parser is not finished with the last write,
1402 * continue reading the user input without skipping spaces.
1403 */
1404 if (!parser->cont) {
1405 /* skip white space */
1406 while (cnt && isspace(ch)) {
1407 ret = get_user(ch, ubuf++);
1408 if (ret)
1409 goto out;
1410 read++;
1411 cnt--;
1412 }
1413
1414 parser->idx = 0;
1415
1416 /* only spaces were written */
1417 if (isspace(ch) || !ch) {
1418 *ppos += read;
1419 ret = read;
1420 goto out;
1421 }
1422 }
1423
1424 /* read the non-space input */
1425 while (cnt && !isspace(ch) && ch) {
1426 if (parser->idx < parser->size - 1)
1427 parser->buffer[parser->idx++] = ch;
1428 else {
1429 ret = -EINVAL;
1430 goto out;
1431 }
1432 ret = get_user(ch, ubuf++);
1433 if (ret)
1434 goto out;
1435 read++;
1436 cnt--;
1437 }
1438
1439 /* We either got finished input or we have to wait for another call. */
1440 if (isspace(ch) || !ch) {
1441 parser->buffer[parser->idx] = 0;
1442 parser->cont = false;
1443 } else if (parser->idx < parser->size - 1) {
1444 parser->cont = true;
1445 parser->buffer[parser->idx++] = ch;
1446 /* Make sure the parsed string always terminates with '\0'. */
1447 parser->buffer[parser->idx] = 0;
1448 } else {
1449 ret = -EINVAL;
1450 goto out;
1451 }
1452
1453 *ppos += read;
1454 ret = read;
1455
1456 out:
1457 return ret;
1458 }
1459
1460 /* TODO add a seq_buf_to_buffer() */
1461 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1462 {
1463 int len;
1464
1465 if (trace_seq_used(s) <= s->seq.readpos)
1466 return -EBUSY;
1467
1468 len = trace_seq_used(s) - s->seq.readpos;
1469 if (cnt > len)
1470 cnt = len;
1471 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1472
1473 s->seq.readpos += cnt;
1474 return cnt;
1475 }
1476
1477 unsigned long __read_mostly tracing_thresh;
1478
1479 #ifdef CONFIG_TRACER_MAX_TRACE
1480 /*
1481 * Copy the new maximum trace into the separate maximum-trace
1482 * structure. (this way the maximum trace is permanently saved,
1483 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1484 */
1485 static void
1486 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1487 {
1488 struct trace_buffer *trace_buf = &tr->trace_buffer;
1489 struct trace_buffer *max_buf = &tr->max_buffer;
1490 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1491 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1492
1493 max_buf->cpu = cpu;
1494 max_buf->time_start = data->preempt_timestamp;
1495
1496 max_data->saved_latency = tr->max_latency;
1497 max_data->critical_start = data->critical_start;
1498 max_data->critical_end = data->critical_end;
1499
1500 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1501 max_data->pid = tsk->pid;
1502 /*
1503 * If tsk == current, then use current_uid(), as that does not use
1504 * RCU. The irq tracer can be called out of RCU scope.
1505 */
1506 if (tsk == current)
1507 max_data->uid = current_uid();
1508 else
1509 max_data->uid = task_uid(tsk);
1510
1511 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1512 max_data->policy = tsk->policy;
1513 max_data->rt_priority = tsk->rt_priority;
1514
1515 /* record this tasks comm */
1516 tracing_record_cmdline(tsk);
1517 }
1518
1519 /**
1520 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1521 * @tr: tracer
1522 * @tsk: the task with the latency
1523 * @cpu: The cpu that initiated the trace.
1524 * @cond_data: User data associated with a conditional snapshot
1525 *
1526 * Flip the buffers between the @tr and the max_tr and record information
1527 * about which task was the cause of this latency.
1528 */
1529 void
1530 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1531 void *cond_data)
1532 {
1533 if (tr->stop_count)
1534 return;
1535
1536 WARN_ON_ONCE(!irqs_disabled());
1537
1538 if (!tr->allocated_snapshot) {
1539 /* Only the nop tracer should hit this when disabling */
1540 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1541 return;
1542 }
1543
1544 arch_spin_lock(&tr->max_lock);
1545
1546 /* Inherit the recordable setting from trace_buffer */
1547 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1548 ring_buffer_record_on(tr->max_buffer.buffer);
1549 else
1550 ring_buffer_record_off(tr->max_buffer.buffer);
1551
1552 #ifdef CONFIG_TRACER_SNAPSHOT
1553 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1554 goto out_unlock;
1555 #endif
1556 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1557
1558 __update_max_tr(tr, tsk, cpu);
1559
1560 out_unlock:
1561 arch_spin_unlock(&tr->max_lock);
1562 }
1563
1564 /**
1565 * update_max_tr_single - only copy one trace over, and reset the rest
1566 * @tr - tracer
1567 * @tsk - task with the latency
1568 * @cpu - the cpu of the buffer to copy.
1569 *
1570 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1571 */
1572 void
1573 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1574 {
1575 int ret;
1576
1577 if (tr->stop_count)
1578 return;
1579
1580 WARN_ON_ONCE(!irqs_disabled());
1581 if (!tr->allocated_snapshot) {
1582 /* Only the nop tracer should hit this when disabling */
1583 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1584 return;
1585 }
1586
1587 arch_spin_lock(&tr->max_lock);
1588
1589 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1590
1591 if (ret == -EBUSY) {
1592 /*
1593 * We failed to swap the buffer due to a commit taking
1594 * place on this CPU. We fail to record, but we reset
1595 * the max trace buffer (no one writes directly to it)
1596 * and flag that it failed.
1597 */
1598 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1599 "Failed to swap buffers due to commit in progress\n");
1600 }
1601
1602 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1603
1604 __update_max_tr(tr, tsk, cpu);
1605 arch_spin_unlock(&tr->max_lock);
1606 }
1607 #endif /* CONFIG_TRACER_MAX_TRACE */
1608
1609 static int wait_on_pipe(struct trace_iterator *iter, int full)
1610 {
1611 /* Iterators are static, they should be filled or empty */
1612 if (trace_buffer_iter(iter, iter->cpu_file))
1613 return 0;
1614
1615 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1616 full);
1617 }
1618
1619 #ifdef CONFIG_FTRACE_STARTUP_TEST
1620 static bool selftests_can_run;
1621
1622 struct trace_selftests {
1623 struct list_head list;
1624 struct tracer *type;
1625 };
1626
1627 static LIST_HEAD(postponed_selftests);
1628
1629 static int save_selftest(struct tracer *type)
1630 {
1631 struct trace_selftests *selftest;
1632
1633 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1634 if (!selftest)
1635 return -ENOMEM;
1636
1637 selftest->type = type;
1638 list_add(&selftest->list, &postponed_selftests);
1639 return 0;
1640 }
1641
1642 static int run_tracer_selftest(struct tracer *type)
1643 {
1644 struct trace_array *tr = &global_trace;
1645 struct tracer *saved_tracer = tr->current_trace;
1646 int ret;
1647
1648 if (!type->selftest || tracing_selftest_disabled)
1649 return 0;
1650
1651 /*
1652 * If a tracer registers early in boot up (before scheduling is
1653 * initialized and such), then do not run its selftests yet.
1654 * Instead, run it a little later in the boot process.
1655 */
1656 if (!selftests_can_run)
1657 return save_selftest(type);
1658
1659 /*
1660 * Run a selftest on this tracer.
1661 * Here we reset the trace buffer, and set the current
1662 * tracer to be this tracer. The tracer can then run some
1663 * internal tracing to verify that everything is in order.
1664 * If we fail, we do not register this tracer.
1665 */
1666 tracing_reset_online_cpus(&tr->trace_buffer);
1667
1668 tr->current_trace = type;
1669
1670 #ifdef CONFIG_TRACER_MAX_TRACE
1671 if (type->use_max_tr) {
1672 /* If we expanded the buffers, make sure the max is expanded too */
1673 if (ring_buffer_expanded)
1674 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1675 RING_BUFFER_ALL_CPUS);
1676 tr->allocated_snapshot = true;
1677 }
1678 #endif
1679
1680 /* the test is responsible for initializing and enabling */
1681 pr_info("Testing tracer %s: ", type->name);
1682 ret = type->selftest(type, tr);
1683 /* the test is responsible for resetting too */
1684 tr->current_trace = saved_tracer;
1685 if (ret) {
1686 printk(KERN_CONT "FAILED!\n");
1687 /* Add the warning after printing 'FAILED' */
1688 WARN_ON(1);
1689 return -1;
1690 }
1691 /* Only reset on passing, to avoid touching corrupted buffers */
1692 tracing_reset_online_cpus(&tr->trace_buffer);
1693
1694 #ifdef CONFIG_TRACER_MAX_TRACE
1695 if (type->use_max_tr) {
1696 tr->allocated_snapshot = false;
1697
1698 /* Shrink the max buffer again */
1699 if (ring_buffer_expanded)
1700 ring_buffer_resize(tr->max_buffer.buffer, 1,
1701 RING_BUFFER_ALL_CPUS);
1702 }
1703 #endif
1704
1705 printk(KERN_CONT "PASSED\n");
1706 return 0;
1707 }
1708
1709 static __init int init_trace_selftests(void)
1710 {
1711 struct trace_selftests *p, *n;
1712 struct tracer *t, **last;
1713 int ret;
1714
1715 selftests_can_run = true;
1716
1717 mutex_lock(&trace_types_lock);
1718
1719 if (list_empty(&postponed_selftests))
1720 goto out;
1721
1722 pr_info("Running postponed tracer tests:\n");
1723
1724 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1725 ret = run_tracer_selftest(p->type);
1726 /* If the test fails, then warn and remove from available_tracers */
1727 if (ret < 0) {
1728 WARN(1, "tracer: %s failed selftest, disabling\n",
1729 p->type->name);
1730 last = &trace_types;
1731 for (t = trace_types; t; t = t->next) {
1732 if (t == p->type) {
1733 *last = t->next;
1734 break;
1735 }
1736 last = &t->next;
1737 }
1738 }
1739 list_del(&p->list);
1740 kfree(p);
1741 }
1742
1743 out:
1744 mutex_unlock(&trace_types_lock);
1745
1746 return 0;
1747 }
1748 core_initcall(init_trace_selftests);
1749 #else
1750 static inline int run_tracer_selftest(struct tracer *type)
1751 {
1752 return 0;
1753 }
1754 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1755
1756 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1757
1758 static void __init apply_trace_boot_options(void);
1759
1760 /**
1761 * register_tracer - register a tracer with the ftrace system.
1762 * @type - the plugin for the tracer
1763 *
1764 * Register a new plugin tracer.
1765 */
1766 int __init register_tracer(struct tracer *type)
1767 {
1768 struct tracer *t;
1769 int ret = 0;
1770
1771 if (!type->name) {
1772 pr_info("Tracer must have a name\n");
1773 return -1;
1774 }
1775
1776 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1777 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1778 return -1;
1779 }
1780
1781 mutex_lock(&trace_types_lock);
1782
1783 tracing_selftest_running = true;
1784
1785 for (t = trace_types; t; t = t->next) {
1786 if (strcmp(type->name, t->name) == 0) {
1787 /* already found */
1788 pr_info("Tracer %s already registered\n",
1789 type->name);
1790 ret = -1;
1791 goto out;
1792 }
1793 }
1794
1795 if (!type->set_flag)
1796 type->set_flag = &dummy_set_flag;
1797 if (!type->flags) {
1798 /*allocate a dummy tracer_flags*/
1799 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1800 if (!type->flags) {
1801 ret = -ENOMEM;
1802 goto out;
1803 }
1804 type->flags->val = 0;
1805 type->flags->opts = dummy_tracer_opt;
1806 } else
1807 if (!type->flags->opts)
1808 type->flags->opts = dummy_tracer_opt;
1809
1810 /* store the tracer for __set_tracer_option */
1811 type->flags->trace = type;
1812
1813 ret = run_tracer_selftest(type);
1814 if (ret < 0)
1815 goto out;
1816
1817 type->next = trace_types;
1818 trace_types = type;
1819 add_tracer_options(&global_trace, type);
1820
1821 out:
1822 tracing_selftest_running = false;
1823 mutex_unlock(&trace_types_lock);
1824
1825 if (ret || !default_bootup_tracer)
1826 goto out_unlock;
1827
1828 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1829 goto out_unlock;
1830
1831 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1832 /* Do we want this tracer to start on bootup? */
1833 tracing_set_tracer(&global_trace, type->name);
1834 default_bootup_tracer = NULL;
1835
1836 apply_trace_boot_options();
1837
1838 /* disable other selftests, since this will break it. */
1839 tracing_selftest_disabled = true;
1840 #ifdef CONFIG_FTRACE_STARTUP_TEST
1841 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1842 type->name);
1843 #endif
1844
1845 out_unlock:
1846 return ret;
1847 }
1848
1849 void tracing_reset(struct trace_buffer *buf, int cpu)
1850 {
1851 struct ring_buffer *buffer = buf->buffer;
1852
1853 if (!buffer)
1854 return;
1855
1856 ring_buffer_record_disable(buffer);
1857
1858 /* Make sure all commits have finished */
1859 synchronize_rcu();
1860 ring_buffer_reset_cpu(buffer, cpu);
1861
1862 ring_buffer_record_enable(buffer);
1863 }
1864
1865 void tracing_reset_online_cpus(struct trace_buffer *buf)
1866 {
1867 struct ring_buffer *buffer = buf->buffer;
1868 int cpu;
1869
1870 if (!buffer)
1871 return;
1872
1873 ring_buffer_record_disable(buffer);
1874
1875 /* Make sure all commits have finished */
1876 synchronize_rcu();
1877
1878 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1879
1880 for_each_online_cpu(cpu)
1881 ring_buffer_reset_cpu(buffer, cpu);
1882
1883 ring_buffer_record_enable(buffer);
1884 }
1885
1886 /* Must have trace_types_lock held */
1887 void tracing_reset_all_online_cpus(void)
1888 {
1889 struct trace_array *tr;
1890
1891 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1892 if (!tr->clear_trace)
1893 continue;
1894 tr->clear_trace = false;
1895 tracing_reset_online_cpus(&tr->trace_buffer);
1896 #ifdef CONFIG_TRACER_MAX_TRACE
1897 tracing_reset_online_cpus(&tr->max_buffer);
1898 #endif
1899 }
1900 }
1901
1902 static int *tgid_map;
1903
1904 #define SAVED_CMDLINES_DEFAULT 128
1905 #define NO_CMDLINE_MAP UINT_MAX
1906 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1907 struct saved_cmdlines_buffer {
1908 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1909 unsigned *map_cmdline_to_pid;
1910 unsigned cmdline_num;
1911 int cmdline_idx;
1912 char *saved_cmdlines;
1913 };
1914 static struct saved_cmdlines_buffer *savedcmd;
1915
1916 /* temporary disable recording */
1917 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1918
1919 static inline char *get_saved_cmdlines(int idx)
1920 {
1921 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1922 }
1923
1924 static inline void set_cmdline(int idx, const char *cmdline)
1925 {
1926 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1927 }
1928
1929 static int allocate_cmdlines_buffer(unsigned int val,
1930 struct saved_cmdlines_buffer *s)
1931 {
1932 s->map_cmdline_to_pid = kmalloc_array(val,
1933 sizeof(*s->map_cmdline_to_pid),
1934 GFP_KERNEL);
1935 if (!s->map_cmdline_to_pid)
1936 return -ENOMEM;
1937
1938 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1939 if (!s->saved_cmdlines) {
1940 kfree(s->map_cmdline_to_pid);
1941 return -ENOMEM;
1942 }
1943
1944 s->cmdline_idx = 0;
1945 s->cmdline_num = val;
1946 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1947 sizeof(s->map_pid_to_cmdline));
1948 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1949 val * sizeof(*s->map_cmdline_to_pid));
1950
1951 return 0;
1952 }
1953
1954 static int trace_create_savedcmd(void)
1955 {
1956 int ret;
1957
1958 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1959 if (!savedcmd)
1960 return -ENOMEM;
1961
1962 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1963 if (ret < 0) {
1964 kfree(savedcmd);
1965 savedcmd = NULL;
1966 return -ENOMEM;
1967 }
1968
1969 return 0;
1970 }
1971
1972 int is_tracing_stopped(void)
1973 {
1974 return global_trace.stop_count;
1975 }
1976
1977 /**
1978 * tracing_start - quick start of the tracer
1979 *
1980 * If tracing is enabled but was stopped by tracing_stop,
1981 * this will start the tracer back up.
1982 */
1983 void tracing_start(void)
1984 {
1985 struct ring_buffer *buffer;
1986 unsigned long flags;
1987
1988 if (tracing_disabled)
1989 return;
1990
1991 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1992 if (--global_trace.stop_count) {
1993 if (global_trace.stop_count < 0) {
1994 /* Someone screwed up their debugging */
1995 WARN_ON_ONCE(1);
1996 global_trace.stop_count = 0;
1997 }
1998 goto out;
1999 }
2000
2001 /* Prevent the buffers from switching */
2002 arch_spin_lock(&global_trace.max_lock);
2003
2004 buffer = global_trace.trace_buffer.buffer;
2005 if (buffer)
2006 ring_buffer_record_enable(buffer);
2007
2008 #ifdef CONFIG_TRACER_MAX_TRACE
2009 buffer = global_trace.max_buffer.buffer;
2010 if (buffer)
2011 ring_buffer_record_enable(buffer);
2012 #endif
2013
2014 arch_spin_unlock(&global_trace.max_lock);
2015
2016 out:
2017 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2018 }
2019
2020 static void tracing_start_tr(struct trace_array *tr)
2021 {
2022 struct ring_buffer *buffer;
2023 unsigned long flags;
2024
2025 if (tracing_disabled)
2026 return;
2027
2028 /* If global, we need to also start the max tracer */
2029 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2030 return tracing_start();
2031
2032 raw_spin_lock_irqsave(&tr->start_lock, flags);
2033
2034 if (--tr->stop_count) {
2035 if (tr->stop_count < 0) {
2036 /* Someone screwed up their debugging */
2037 WARN_ON_ONCE(1);
2038 tr->stop_count = 0;
2039 }
2040 goto out;
2041 }
2042
2043 buffer = tr->trace_buffer.buffer;
2044 if (buffer)
2045 ring_buffer_record_enable(buffer);
2046
2047 out:
2048 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2049 }
2050
2051 /**
2052 * tracing_stop - quick stop of the tracer
2053 *
2054 * Light weight way to stop tracing. Use in conjunction with
2055 * tracing_start.
2056 */
2057 void tracing_stop(void)
2058 {
2059 struct ring_buffer *buffer;
2060 unsigned long flags;
2061
2062 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2063 if (global_trace.stop_count++)
2064 goto out;
2065
2066 /* Prevent the buffers from switching */
2067 arch_spin_lock(&global_trace.max_lock);
2068
2069 buffer = global_trace.trace_buffer.buffer;
2070 if (buffer)
2071 ring_buffer_record_disable(buffer);
2072
2073 #ifdef CONFIG_TRACER_MAX_TRACE
2074 buffer = global_trace.max_buffer.buffer;
2075 if (buffer)
2076 ring_buffer_record_disable(buffer);
2077 #endif
2078
2079 arch_spin_unlock(&global_trace.max_lock);
2080
2081 out:
2082 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2083 }
2084
2085 static void tracing_stop_tr(struct trace_array *tr)
2086 {
2087 struct ring_buffer *buffer;
2088 unsigned long flags;
2089
2090 /* If global, we need to also stop the max tracer */
2091 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2092 return tracing_stop();
2093
2094 raw_spin_lock_irqsave(&tr->start_lock, flags);
2095 if (tr->stop_count++)
2096 goto out;
2097
2098 buffer = tr->trace_buffer.buffer;
2099 if (buffer)
2100 ring_buffer_record_disable(buffer);
2101
2102 out:
2103 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2104 }
2105
2106 static int trace_save_cmdline(struct task_struct *tsk)
2107 {
2108 unsigned pid, idx;
2109
2110 /* treat recording of idle task as a success */
2111 if (!tsk->pid)
2112 return 1;
2113
2114 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2115 return 0;
2116
2117 /*
2118 * It's not the end of the world if we don't get
2119 * the lock, but we also don't want to spin
2120 * nor do we want to disable interrupts,
2121 * so if we miss here, then better luck next time.
2122 */
2123 if (!arch_spin_trylock(&trace_cmdline_lock))
2124 return 0;
2125
2126 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2127 if (idx == NO_CMDLINE_MAP) {
2128 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2129
2130 /*
2131 * Check whether the cmdline buffer at idx has a pid
2132 * mapped. We are going to overwrite that entry so we
2133 * need to clear the map_pid_to_cmdline. Otherwise we
2134 * would read the new comm for the old pid.
2135 */
2136 pid = savedcmd->map_cmdline_to_pid[idx];
2137 if (pid != NO_CMDLINE_MAP)
2138 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2139
2140 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2141 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2142
2143 savedcmd->cmdline_idx = idx;
2144 }
2145
2146 set_cmdline(idx, tsk->comm);
2147
2148 arch_spin_unlock(&trace_cmdline_lock);
2149
2150 return 1;
2151 }
2152
2153 static void __trace_find_cmdline(int pid, char comm[])
2154 {
2155 unsigned map;
2156
2157 if (!pid) {
2158 strcpy(comm, "<idle>");
2159 return;
2160 }
2161
2162 if (WARN_ON_ONCE(pid < 0)) {
2163 strcpy(comm, "<XXX>");
2164 return;
2165 }
2166
2167 if (pid > PID_MAX_DEFAULT) {
2168 strcpy(comm, "<...>");
2169 return;
2170 }
2171
2172 map = savedcmd->map_pid_to_cmdline[pid];
2173 if (map != NO_CMDLINE_MAP)
2174 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2175 else
2176 strcpy(comm, "<...>");
2177 }
2178
2179 void trace_find_cmdline(int pid, char comm[])
2180 {
2181 preempt_disable();
2182 arch_spin_lock(&trace_cmdline_lock);
2183
2184 __trace_find_cmdline(pid, comm);
2185
2186 arch_spin_unlock(&trace_cmdline_lock);
2187 preempt_enable();
2188 }
2189
2190 int trace_find_tgid(int pid)
2191 {
2192 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2193 return 0;
2194
2195 return tgid_map[pid];
2196 }
2197
2198 static int trace_save_tgid(struct task_struct *tsk)
2199 {
2200 /* treat recording of idle task as a success */
2201 if (!tsk->pid)
2202 return 1;
2203
2204 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2205 return 0;
2206
2207 tgid_map[tsk->pid] = tsk->tgid;
2208 return 1;
2209 }
2210
2211 static bool tracing_record_taskinfo_skip(int flags)
2212 {
2213 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2214 return true;
2215 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2216 return true;
2217 if (!__this_cpu_read(trace_taskinfo_save))
2218 return true;
2219 return false;
2220 }
2221
2222 /**
2223 * tracing_record_taskinfo - record the task info of a task
2224 *
2225 * @task - task to record
2226 * @flags - TRACE_RECORD_CMDLINE for recording comm
2227 * - TRACE_RECORD_TGID for recording tgid
2228 */
2229 void tracing_record_taskinfo(struct task_struct *task, int flags)
2230 {
2231 bool done;
2232
2233 if (tracing_record_taskinfo_skip(flags))
2234 return;
2235
2236 /*
2237 * Record as much task information as possible. If some fail, continue
2238 * to try to record the others.
2239 */
2240 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2241 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2242
2243 /* If recording any information failed, retry again soon. */
2244 if (!done)
2245 return;
2246
2247 __this_cpu_write(trace_taskinfo_save, false);
2248 }
2249
2250 /**
2251 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2252 *
2253 * @prev - previous task during sched_switch
2254 * @next - next task during sched_switch
2255 * @flags - TRACE_RECORD_CMDLINE for recording comm
2256 * TRACE_RECORD_TGID for recording tgid
2257 */
2258 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2259 struct task_struct *next, int flags)
2260 {
2261 bool done;
2262
2263 if (tracing_record_taskinfo_skip(flags))
2264 return;
2265
2266 /*
2267 * Record as much task information as possible. If some fail, continue
2268 * to try to record the others.
2269 */
2270 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2271 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2272 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2273 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2274
2275 /* If recording any information failed, retry again soon. */
2276 if (!done)
2277 return;
2278
2279 __this_cpu_write(trace_taskinfo_save, false);
2280 }
2281
2282 /* Helpers to record a specific task information */
2283 void tracing_record_cmdline(struct task_struct *task)
2284 {
2285 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2286 }
2287
2288 void tracing_record_tgid(struct task_struct *task)
2289 {
2290 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2291 }
2292
2293 /*
2294 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2295 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2296 * simplifies those functions and keeps them in sync.
2297 */
2298 enum print_line_t trace_handle_return(struct trace_seq *s)
2299 {
2300 return trace_seq_has_overflowed(s) ?
2301 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2302 }
2303 EXPORT_SYMBOL_GPL(trace_handle_return);
2304
2305 void
2306 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2307 int pc)
2308 {
2309 struct task_struct *tsk = current;
2310
2311 entry->preempt_count = pc & 0xff;
2312 entry->pid = (tsk) ? tsk->pid : 0;
2313 entry->flags =
2314 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2315 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2316 #else
2317 TRACE_FLAG_IRQS_NOSUPPORT |
2318 #endif
2319 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2320 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2321 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2322 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2323 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2324 }
2325 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2326
2327 struct ring_buffer_event *
2328 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2329 int type,
2330 unsigned long len,
2331 unsigned long flags, int pc)
2332 {
2333 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2334 }
2335
2336 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2337 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2338 static int trace_buffered_event_ref;
2339
2340 /**
2341 * trace_buffered_event_enable - enable buffering events
2342 *
2343 * When events are being filtered, it is quicker to use a temporary
2344 * buffer to write the event data into if there's a likely chance
2345 * that it will not be committed. The discard of the ring buffer
2346 * is not as fast as committing, and is much slower than copying
2347 * a commit.
2348 *
2349 * When an event is to be filtered, allocate per cpu buffers to
2350 * write the event data into, and if the event is filtered and discarded
2351 * it is simply dropped, otherwise, the entire data is to be committed
2352 * in one shot.
2353 */
2354 void trace_buffered_event_enable(void)
2355 {
2356 struct ring_buffer_event *event;
2357 struct page *page;
2358 int cpu;
2359
2360 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2361
2362 if (trace_buffered_event_ref++)
2363 return;
2364
2365 for_each_tracing_cpu(cpu) {
2366 page = alloc_pages_node(cpu_to_node(cpu),
2367 GFP_KERNEL | __GFP_NORETRY, 0);
2368 if (!page)
2369 goto failed;
2370
2371 event = page_address(page);
2372 memset(event, 0, sizeof(*event));
2373
2374 per_cpu(trace_buffered_event, cpu) = event;
2375
2376 preempt_disable();
2377 if (cpu == smp_processor_id() &&
2378 this_cpu_read(trace_buffered_event) !=
2379 per_cpu(trace_buffered_event, cpu))
2380 WARN_ON_ONCE(1);
2381 preempt_enable();
2382 }
2383
2384 return;
2385 failed:
2386 trace_buffered_event_disable();
2387 }
2388
2389 static void enable_trace_buffered_event(void *data)
2390 {
2391 /* Probably not needed, but do it anyway */
2392 smp_rmb();
2393 this_cpu_dec(trace_buffered_event_cnt);
2394 }
2395
2396 static void disable_trace_buffered_event(void *data)
2397 {
2398 this_cpu_inc(trace_buffered_event_cnt);
2399 }
2400
2401 /**
2402 * trace_buffered_event_disable - disable buffering events
2403 *
2404 * When a filter is removed, it is faster to not use the buffered
2405 * events, and to commit directly into the ring buffer. Free up
2406 * the temp buffers when there are no more users. This requires
2407 * special synchronization with current events.
2408 */
2409 void trace_buffered_event_disable(void)
2410 {
2411 int cpu;
2412
2413 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2414
2415 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2416 return;
2417
2418 if (--trace_buffered_event_ref)
2419 return;
2420
2421 preempt_disable();
2422 /* For each CPU, set the buffer as used. */
2423 smp_call_function_many(tracing_buffer_mask,
2424 disable_trace_buffered_event, NULL, 1);
2425 preempt_enable();
2426
2427 /* Wait for all current users to finish */
2428 synchronize_rcu();
2429
2430 for_each_tracing_cpu(cpu) {
2431 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2432 per_cpu(trace_buffered_event, cpu) = NULL;
2433 }
2434 /*
2435 * Make sure trace_buffered_event is NULL before clearing
2436 * trace_buffered_event_cnt.
2437 */
2438 smp_wmb();
2439
2440 preempt_disable();
2441 /* Do the work on each cpu */
2442 smp_call_function_many(tracing_buffer_mask,
2443 enable_trace_buffered_event, NULL, 1);
2444 preempt_enable();
2445 }
2446
2447 static struct ring_buffer *temp_buffer;
2448
2449 struct ring_buffer_event *
2450 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2451 struct trace_event_file *trace_file,
2452 int type, unsigned long len,
2453 unsigned long flags, int pc)
2454 {
2455 struct ring_buffer_event *entry;
2456 int val;
2457
2458 *current_rb = trace_file->tr->trace_buffer.buffer;
2459
2460 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2461 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2462 (entry = this_cpu_read(trace_buffered_event))) {
2463 /* Try to use the per cpu buffer first */
2464 val = this_cpu_inc_return(trace_buffered_event_cnt);
2465 if (val == 1) {
2466 trace_event_setup(entry, type, flags, pc);
2467 entry->array[0] = len;
2468 return entry;
2469 }
2470 this_cpu_dec(trace_buffered_event_cnt);
2471 }
2472
2473 entry = __trace_buffer_lock_reserve(*current_rb,
2474 type, len, flags, pc);
2475 /*
2476 * If tracing is off, but we have triggers enabled
2477 * we still need to look at the event data. Use the temp_buffer
2478 * to store the trace event for the tigger to use. It's recusive
2479 * safe and will not be recorded anywhere.
2480 */
2481 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2482 *current_rb = temp_buffer;
2483 entry = __trace_buffer_lock_reserve(*current_rb,
2484 type, len, flags, pc);
2485 }
2486 return entry;
2487 }
2488 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2489
2490 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2491 static DEFINE_MUTEX(tracepoint_printk_mutex);
2492
2493 static void output_printk(struct trace_event_buffer *fbuffer)
2494 {
2495 struct trace_event_call *event_call;
2496 struct trace_event *event;
2497 unsigned long flags;
2498 struct trace_iterator *iter = tracepoint_print_iter;
2499
2500 /* We should never get here if iter is NULL */
2501 if (WARN_ON_ONCE(!iter))
2502 return;
2503
2504 event_call = fbuffer->trace_file->event_call;
2505 if (!event_call || !event_call->event.funcs ||
2506 !event_call->event.funcs->trace)
2507 return;
2508
2509 event = &fbuffer->trace_file->event_call->event;
2510
2511 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2512 trace_seq_init(&iter->seq);
2513 iter->ent = fbuffer->entry;
2514 event_call->event.funcs->trace(iter, 0, event);
2515 trace_seq_putc(&iter->seq, 0);
2516 printk("%s", iter->seq.buffer);
2517
2518 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2519 }
2520
2521 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2522 void __user *buffer, size_t *lenp,
2523 loff_t *ppos)
2524 {
2525 int save_tracepoint_printk;
2526 int ret;
2527
2528 mutex_lock(&tracepoint_printk_mutex);
2529 save_tracepoint_printk = tracepoint_printk;
2530
2531 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2532
2533 /*
2534 * This will force exiting early, as tracepoint_printk
2535 * is always zero when tracepoint_printk_iter is not allocated
2536 */
2537 if (!tracepoint_print_iter)
2538 tracepoint_printk = 0;
2539
2540 if (save_tracepoint_printk == tracepoint_printk)
2541 goto out;
2542
2543 if (tracepoint_printk)
2544 static_key_enable(&tracepoint_printk_key.key);
2545 else
2546 static_key_disable(&tracepoint_printk_key.key);
2547
2548 out:
2549 mutex_unlock(&tracepoint_printk_mutex);
2550
2551 return ret;
2552 }
2553
2554 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2555 {
2556 if (static_key_false(&tracepoint_printk_key.key))
2557 output_printk(fbuffer);
2558
2559 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2560 fbuffer->event, fbuffer->entry,
2561 fbuffer->flags, fbuffer->pc);
2562 }
2563 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2564
2565 /*
2566 * Skip 3:
2567 *
2568 * trace_buffer_unlock_commit_regs()
2569 * trace_event_buffer_commit()
2570 * trace_event_raw_event_xxx()
2571 */
2572 # define STACK_SKIP 3
2573
2574 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2575 struct ring_buffer *buffer,
2576 struct ring_buffer_event *event,
2577 unsigned long flags, int pc,
2578 struct pt_regs *regs)
2579 {
2580 __buffer_unlock_commit(buffer, event);
2581
2582 /*
2583 * If regs is not set, then skip the necessary functions.
2584 * Note, we can still get here via blktrace, wakeup tracer
2585 * and mmiotrace, but that's ok if they lose a function or
2586 * two. They are not that meaningful.
2587 */
2588 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2589 ftrace_trace_userstack(buffer, flags, pc);
2590 }
2591
2592 /*
2593 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2594 */
2595 void
2596 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2597 struct ring_buffer_event *event)
2598 {
2599 __buffer_unlock_commit(buffer, event);
2600 }
2601
2602 static void
2603 trace_process_export(struct trace_export *export,
2604 struct ring_buffer_event *event)
2605 {
2606 struct trace_entry *entry;
2607 unsigned int size = 0;
2608
2609 entry = ring_buffer_event_data(event);
2610 size = ring_buffer_event_length(event);
2611 export->write(export, entry, size);
2612 }
2613
2614 static DEFINE_MUTEX(ftrace_export_lock);
2615
2616 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2617
2618 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2619
2620 static inline void ftrace_exports_enable(void)
2621 {
2622 static_branch_enable(&ftrace_exports_enabled);
2623 }
2624
2625 static inline void ftrace_exports_disable(void)
2626 {
2627 static_branch_disable(&ftrace_exports_enabled);
2628 }
2629
2630 static void ftrace_exports(struct ring_buffer_event *event)
2631 {
2632 struct trace_export *export;
2633
2634 preempt_disable_notrace();
2635
2636 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2637 while (export) {
2638 trace_process_export(export, event);
2639 export = rcu_dereference_raw_notrace(export->next);
2640 }
2641
2642 preempt_enable_notrace();
2643 }
2644
2645 static inline void
2646 add_trace_export(struct trace_export **list, struct trace_export *export)
2647 {
2648 rcu_assign_pointer(export->next, *list);
2649 /*
2650 * We are entering export into the list but another
2651 * CPU might be walking that list. We need to make sure
2652 * the export->next pointer is valid before another CPU sees
2653 * the export pointer included into the list.
2654 */
2655 rcu_assign_pointer(*list, export);
2656 }
2657
2658 static inline int
2659 rm_trace_export(struct trace_export **list, struct trace_export *export)
2660 {
2661 struct trace_export **p;
2662
2663 for (p = list; *p != NULL; p = &(*p)->next)
2664 if (*p == export)
2665 break;
2666
2667 if (*p != export)
2668 return -1;
2669
2670 rcu_assign_pointer(*p, (*p)->next);
2671
2672 return 0;
2673 }
2674
2675 static inline void
2676 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2677 {
2678 if (*list == NULL)
2679 ftrace_exports_enable();
2680
2681 add_trace_export(list, export);
2682 }
2683
2684 static inline int
2685 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2686 {
2687 int ret;
2688
2689 ret = rm_trace_export(list, export);
2690 if (*list == NULL)
2691 ftrace_exports_disable();
2692
2693 return ret;
2694 }
2695
2696 int register_ftrace_export(struct trace_export *export)
2697 {
2698 if (WARN_ON_ONCE(!export->write))
2699 return -1;
2700
2701 mutex_lock(&ftrace_export_lock);
2702
2703 add_ftrace_export(&ftrace_exports_list, export);
2704
2705 mutex_unlock(&ftrace_export_lock);
2706
2707 return 0;
2708 }
2709 EXPORT_SYMBOL_GPL(register_ftrace_export);
2710
2711 int unregister_ftrace_export(struct trace_export *export)
2712 {
2713 int ret;
2714
2715 mutex_lock(&ftrace_export_lock);
2716
2717 ret = rm_ftrace_export(&ftrace_exports_list, export);
2718
2719 mutex_unlock(&ftrace_export_lock);
2720
2721 return ret;
2722 }
2723 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2724
2725 void
2726 trace_function(struct trace_array *tr,
2727 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2728 int pc)
2729 {
2730 struct trace_event_call *call = &event_function;
2731 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2732 struct ring_buffer_event *event;
2733 struct ftrace_entry *entry;
2734
2735 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2736 flags, pc);
2737 if (!event)
2738 return;
2739 entry = ring_buffer_event_data(event);
2740 entry->ip = ip;
2741 entry->parent_ip = parent_ip;
2742
2743 if (!call_filter_check_discard(call, entry, buffer, event)) {
2744 if (static_branch_unlikely(&ftrace_exports_enabled))
2745 ftrace_exports(event);
2746 __buffer_unlock_commit(buffer, event);
2747 }
2748 }
2749
2750 #ifdef CONFIG_STACKTRACE
2751
2752 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2753 struct ftrace_stack {
2754 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2755 };
2756
2757 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2758 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2759
2760 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2761 unsigned long flags,
2762 int skip, int pc, struct pt_regs *regs)
2763 {
2764 struct trace_event_call *call = &event_kernel_stack;
2765 struct ring_buffer_event *event;
2766 struct stack_entry *entry;
2767 struct stack_trace trace;
2768 int use_stack;
2769 int size = FTRACE_STACK_ENTRIES;
2770
2771 trace.nr_entries = 0;
2772 trace.skip = skip;
2773
2774 /*
2775 * Add one, for this function and the call to save_stack_trace()
2776 * If regs is set, then these functions will not be in the way.
2777 */
2778 #ifndef CONFIG_UNWINDER_ORC
2779 if (!regs)
2780 trace.skip++;
2781 #endif
2782
2783 /*
2784 * Since events can happen in NMIs there's no safe way to
2785 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2786 * or NMI comes in, it will just have to use the default
2787 * FTRACE_STACK_SIZE.
2788 */
2789 preempt_disable_notrace();
2790
2791 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2792 /*
2793 * We don't need any atomic variables, just a barrier.
2794 * If an interrupt comes in, we don't care, because it would
2795 * have exited and put the counter back to what we want.
2796 * We just need a barrier to keep gcc from moving things
2797 * around.
2798 */
2799 barrier();
2800 if (use_stack == 1) {
2801 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2802 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2803
2804 if (regs)
2805 save_stack_trace_regs(regs, &trace);
2806 else
2807 save_stack_trace(&trace);
2808
2809 if (trace.nr_entries > size)
2810 size = trace.nr_entries;
2811 } else
2812 /* From now on, use_stack is a boolean */
2813 use_stack = 0;
2814
2815 size *= sizeof(unsigned long);
2816
2817 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2818 sizeof(*entry) + size, flags, pc);
2819 if (!event)
2820 goto out;
2821 entry = ring_buffer_event_data(event);
2822
2823 memset(&entry->caller, 0, size);
2824
2825 if (use_stack)
2826 memcpy(&entry->caller, trace.entries,
2827 trace.nr_entries * sizeof(unsigned long));
2828 else {
2829 trace.max_entries = FTRACE_STACK_ENTRIES;
2830 trace.entries = entry->caller;
2831 if (regs)
2832 save_stack_trace_regs(regs, &trace);
2833 else
2834 save_stack_trace(&trace);
2835 }
2836
2837 entry->size = trace.nr_entries;
2838
2839 if (!call_filter_check_discard(call, entry, buffer, event))
2840 __buffer_unlock_commit(buffer, event);
2841
2842 out:
2843 /* Again, don't let gcc optimize things here */
2844 barrier();
2845 __this_cpu_dec(ftrace_stack_reserve);
2846 preempt_enable_notrace();
2847
2848 }
2849
2850 static inline void ftrace_trace_stack(struct trace_array *tr,
2851 struct ring_buffer *buffer,
2852 unsigned long flags,
2853 int skip, int pc, struct pt_regs *regs)
2854 {
2855 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2856 return;
2857
2858 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2859 }
2860
2861 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2862 int pc)
2863 {
2864 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2865
2866 if (rcu_is_watching()) {
2867 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2868 return;
2869 }
2870
2871 /*
2872 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2873 * but if the above rcu_is_watching() failed, then the NMI
2874 * triggered someplace critical, and rcu_irq_enter() should
2875 * not be called from NMI.
2876 */
2877 if (unlikely(in_nmi()))
2878 return;
2879
2880 rcu_irq_enter_irqson();
2881 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2882 rcu_irq_exit_irqson();
2883 }
2884
2885 /**
2886 * trace_dump_stack - record a stack back trace in the trace buffer
2887 * @skip: Number of functions to skip (helper handlers)
2888 */
2889 void trace_dump_stack(int skip)
2890 {
2891 unsigned long flags;
2892
2893 if (tracing_disabled || tracing_selftest_running)
2894 return;
2895
2896 local_save_flags(flags);
2897
2898 #ifndef CONFIG_UNWINDER_ORC
2899 /* Skip 1 to skip this function. */
2900 skip++;
2901 #endif
2902 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2903 flags, skip, preempt_count(), NULL);
2904 }
2905 EXPORT_SYMBOL_GPL(trace_dump_stack);
2906
2907 static DEFINE_PER_CPU(int, user_stack_count);
2908
2909 void
2910 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2911 {
2912 struct trace_event_call *call = &event_user_stack;
2913 struct ring_buffer_event *event;
2914 struct userstack_entry *entry;
2915 struct stack_trace trace;
2916
2917 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2918 return;
2919
2920 /*
2921 * NMIs can not handle page faults, even with fix ups.
2922 * The save user stack can (and often does) fault.
2923 */
2924 if (unlikely(in_nmi()))
2925 return;
2926
2927 /*
2928 * prevent recursion, since the user stack tracing may
2929 * trigger other kernel events.
2930 */
2931 preempt_disable();
2932 if (__this_cpu_read(user_stack_count))
2933 goto out;
2934
2935 __this_cpu_inc(user_stack_count);
2936
2937 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2938 sizeof(*entry), flags, pc);
2939 if (!event)
2940 goto out_drop_count;
2941 entry = ring_buffer_event_data(event);
2942
2943 entry->tgid = current->tgid;
2944 memset(&entry->caller, 0, sizeof(entry->caller));
2945
2946 trace.nr_entries = 0;
2947 trace.max_entries = FTRACE_STACK_ENTRIES;
2948 trace.skip = 0;
2949 trace.entries = entry->caller;
2950
2951 save_stack_trace_user(&trace);
2952 if (!call_filter_check_discard(call, entry, buffer, event))
2953 __buffer_unlock_commit(buffer, event);
2954
2955 out_drop_count:
2956 __this_cpu_dec(user_stack_count);
2957 out:
2958 preempt_enable();
2959 }
2960
2961 #ifdef UNUSED
2962 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2963 {
2964 ftrace_trace_userstack(tr, flags, preempt_count());
2965 }
2966 #endif /* UNUSED */
2967
2968 #endif /* CONFIG_STACKTRACE */
2969
2970 /* created for use with alloc_percpu */
2971 struct trace_buffer_struct {
2972 int nesting;
2973 char buffer[4][TRACE_BUF_SIZE];
2974 };
2975
2976 static struct trace_buffer_struct *trace_percpu_buffer;
2977
2978 /*
2979 * Thise allows for lockless recording. If we're nested too deeply, then
2980 * this returns NULL.
2981 */
2982 static char *get_trace_buf(void)
2983 {
2984 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2985
2986 if (!buffer || buffer->nesting >= 4)
2987 return NULL;
2988
2989 buffer->nesting++;
2990
2991 /* Interrupts must see nesting incremented before we use the buffer */
2992 barrier();
2993 return &buffer->buffer[buffer->nesting][0];
2994 }
2995
2996 static void put_trace_buf(void)
2997 {
2998 /* Don't let the decrement of nesting leak before this */
2999 barrier();
3000 this_cpu_dec(trace_percpu_buffer->nesting);
3001 }
3002
3003 static int alloc_percpu_trace_buffer(void)
3004 {
3005 struct trace_buffer_struct *buffers;
3006
3007 buffers = alloc_percpu(struct trace_buffer_struct);
3008 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3009 return -ENOMEM;
3010
3011 trace_percpu_buffer = buffers;
3012 return 0;
3013 }
3014
3015 static int buffers_allocated;
3016
3017 void trace_printk_init_buffers(void)
3018 {
3019 if (buffers_allocated)
3020 return;
3021
3022 if (alloc_percpu_trace_buffer())
3023 return;
3024
3025 /* trace_printk() is for debug use only. Don't use it in production. */
3026
3027 pr_warn("\n");
3028 pr_warn("**********************************************************\n");
3029 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3030 pr_warn("** **\n");
3031 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3032 pr_warn("** **\n");
3033 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3034 pr_warn("** unsafe for production use. **\n");
3035 pr_warn("** **\n");
3036 pr_warn("** If you see this message and you are not debugging **\n");
3037 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3038 pr_warn("** **\n");
3039 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3040 pr_warn("**********************************************************\n");
3041
3042 /* Expand the buffers to set size */
3043 tracing_update_buffers();
3044
3045 buffers_allocated = 1;
3046
3047 /*
3048 * trace_printk_init_buffers() can be called by modules.
3049 * If that happens, then we need to start cmdline recording
3050 * directly here. If the global_trace.buffer is already
3051 * allocated here, then this was called by module code.
3052 */
3053 if (global_trace.trace_buffer.buffer)
3054 tracing_start_cmdline_record();
3055 }
3056
3057 void trace_printk_start_comm(void)
3058 {
3059 /* Start tracing comms if trace printk is set */
3060 if (!buffers_allocated)
3061 return;
3062 tracing_start_cmdline_record();
3063 }
3064
3065 static void trace_printk_start_stop_comm(int enabled)
3066 {
3067 if (!buffers_allocated)
3068 return;
3069
3070 if (enabled)
3071 tracing_start_cmdline_record();
3072 else
3073 tracing_stop_cmdline_record();
3074 }
3075
3076 /**
3077 * trace_vbprintk - write binary msg to tracing buffer
3078 *
3079 */
3080 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3081 {
3082 struct trace_event_call *call = &event_bprint;
3083 struct ring_buffer_event *event;
3084 struct ring_buffer *buffer;
3085 struct trace_array *tr = &global_trace;
3086 struct bprint_entry *entry;
3087 unsigned long flags;
3088 char *tbuffer;
3089 int len = 0, size, pc;
3090
3091 if (unlikely(tracing_selftest_running || tracing_disabled))
3092 return 0;
3093
3094 /* Don't pollute graph traces with trace_vprintk internals */
3095 pause_graph_tracing();
3096
3097 pc = preempt_count();
3098 preempt_disable_notrace();
3099
3100 tbuffer = get_trace_buf();
3101 if (!tbuffer) {
3102 len = 0;
3103 goto out_nobuffer;
3104 }
3105
3106 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3107
3108 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3109 goto out;
3110
3111 local_save_flags(flags);
3112 size = sizeof(*entry) + sizeof(u32) * len;
3113 buffer = tr->trace_buffer.buffer;
3114 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3115 flags, pc);
3116 if (!event)
3117 goto out;
3118 entry = ring_buffer_event_data(event);
3119 entry->ip = ip;
3120 entry->fmt = fmt;
3121
3122 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3123 if (!call_filter_check_discard(call, entry, buffer, event)) {
3124 __buffer_unlock_commit(buffer, event);
3125 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3126 }
3127
3128 out:
3129 put_trace_buf();
3130
3131 out_nobuffer:
3132 preempt_enable_notrace();
3133 unpause_graph_tracing();
3134
3135 return len;
3136 }
3137 EXPORT_SYMBOL_GPL(trace_vbprintk);
3138
3139 __printf(3, 0)
3140 static int
3141 __trace_array_vprintk(struct ring_buffer *buffer,
3142 unsigned long ip, const char *fmt, va_list args)
3143 {
3144 struct trace_event_call *call = &event_print;
3145 struct ring_buffer_event *event;
3146 int len = 0, size, pc;
3147 struct print_entry *entry;
3148 unsigned long flags;
3149 char *tbuffer;
3150
3151 if (tracing_disabled || tracing_selftest_running)
3152 return 0;
3153
3154 /* Don't pollute graph traces with trace_vprintk internals */
3155 pause_graph_tracing();
3156
3157 pc = preempt_count();
3158 preempt_disable_notrace();
3159
3160
3161 tbuffer = get_trace_buf();
3162 if (!tbuffer) {
3163 len = 0;
3164 goto out_nobuffer;
3165 }
3166
3167 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3168
3169 local_save_flags(flags);
3170 size = sizeof(*entry) + len + 1;
3171 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3172 flags, pc);
3173 if (!event)
3174 goto out;
3175 entry = ring_buffer_event_data(event);
3176 entry->ip = ip;
3177
3178 memcpy(&entry->buf, tbuffer, len + 1);
3179 if (!call_filter_check_discard(call, entry, buffer, event)) {
3180 __buffer_unlock_commit(buffer, event);
3181 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3182 }
3183
3184 out:
3185 put_trace_buf();
3186
3187 out_nobuffer:
3188 preempt_enable_notrace();
3189 unpause_graph_tracing();
3190
3191 return len;
3192 }
3193
3194 __printf(3, 0)
3195 int trace_array_vprintk(struct trace_array *tr,
3196 unsigned long ip, const char *fmt, va_list args)
3197 {
3198 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3199 }
3200
3201 __printf(3, 0)
3202 int trace_array_printk(struct trace_array *tr,
3203 unsigned long ip, const char *fmt, ...)
3204 {
3205 int ret;
3206 va_list ap;
3207
3208 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3209 return 0;
3210
3211 va_start(ap, fmt);
3212 ret = trace_array_vprintk(tr, ip, fmt, ap);
3213 va_end(ap);
3214 return ret;
3215 }
3216
3217 __printf(3, 4)
3218 int trace_array_printk_buf(struct ring_buffer *buffer,
3219 unsigned long ip, const char *fmt, ...)
3220 {
3221 int ret;
3222 va_list ap;
3223
3224 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3225 return 0;
3226
3227 va_start(ap, fmt);
3228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3229 va_end(ap);
3230 return ret;
3231 }
3232
3233 __printf(2, 0)
3234 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3235 {
3236 return trace_array_vprintk(&global_trace, ip, fmt, args);
3237 }
3238 EXPORT_SYMBOL_GPL(trace_vprintk);
3239
3240 static void trace_iterator_increment(struct trace_iterator *iter)
3241 {
3242 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3243
3244 iter->idx++;
3245 if (buf_iter)
3246 ring_buffer_read(buf_iter, NULL);
3247 }
3248
3249 static struct trace_entry *
3250 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3251 unsigned long *lost_events)
3252 {
3253 struct ring_buffer_event *event;
3254 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3255
3256 if (buf_iter)
3257 event = ring_buffer_iter_peek(buf_iter, ts);
3258 else
3259 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3260 lost_events);
3261
3262 if (event) {
3263 iter->ent_size = ring_buffer_event_length(event);
3264 return ring_buffer_event_data(event);
3265 }
3266 iter->ent_size = 0;
3267 return NULL;
3268 }
3269
3270 static struct trace_entry *
3271 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3272 unsigned long *missing_events, u64 *ent_ts)
3273 {
3274 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3275 struct trace_entry *ent, *next = NULL;
3276 unsigned long lost_events = 0, next_lost = 0;
3277 int cpu_file = iter->cpu_file;
3278 u64 next_ts = 0, ts;
3279 int next_cpu = -1;
3280 int next_size = 0;
3281 int cpu;
3282
3283 /*
3284 * If we are in a per_cpu trace file, don't bother by iterating over
3285 * all cpu and peek directly.
3286 */
3287 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3288 if (ring_buffer_empty_cpu(buffer, cpu_file))
3289 return NULL;
3290 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3291 if (ent_cpu)
3292 *ent_cpu = cpu_file;
3293
3294 return ent;
3295 }
3296
3297 for_each_tracing_cpu(cpu) {
3298
3299 if (ring_buffer_empty_cpu(buffer, cpu))
3300 continue;
3301
3302 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3303
3304 /*
3305 * Pick the entry with the smallest timestamp:
3306 */
3307 if (ent && (!next || ts < next_ts)) {
3308 next = ent;
3309 next_cpu = cpu;
3310 next_ts = ts;
3311 next_lost = lost_events;
3312 next_size = iter->ent_size;
3313 }
3314 }
3315
3316 iter->ent_size = next_size;
3317
3318 if (ent_cpu)
3319 *ent_cpu = next_cpu;
3320
3321 if (ent_ts)
3322 *ent_ts = next_ts;
3323
3324 if (missing_events)
3325 *missing_events = next_lost;
3326
3327 return next;
3328 }
3329
3330 /* Find the next real entry, without updating the iterator itself */
3331 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3332 int *ent_cpu, u64 *ent_ts)
3333 {
3334 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3335 }
3336
3337 /* Find the next real entry, and increment the iterator to the next entry */
3338 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3339 {
3340 iter->ent = __find_next_entry(iter, &iter->cpu,
3341 &iter->lost_events, &iter->ts);
3342
3343 if (iter->ent)
3344 trace_iterator_increment(iter);
3345
3346 return iter->ent ? iter : NULL;
3347 }
3348
3349 static void trace_consume(struct trace_iterator *iter)
3350 {
3351 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3352 &iter->lost_events);
3353 }
3354
3355 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3356 {
3357 struct trace_iterator *iter = m->private;
3358 int i = (int)*pos;
3359 void *ent;
3360
3361 WARN_ON_ONCE(iter->leftover);
3362
3363 (*pos)++;
3364
3365 /* can't go backwards */
3366 if (iter->idx > i)
3367 return NULL;
3368
3369 if (iter->idx < 0)
3370 ent = trace_find_next_entry_inc(iter);
3371 else
3372 ent = iter;
3373
3374 while (ent && iter->idx < i)
3375 ent = trace_find_next_entry_inc(iter);
3376
3377 iter->pos = *pos;
3378
3379 return ent;
3380 }
3381
3382 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3383 {
3384 struct ring_buffer_event *event;
3385 struct ring_buffer_iter *buf_iter;
3386 unsigned long entries = 0;
3387 u64 ts;
3388
3389 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3390
3391 buf_iter = trace_buffer_iter(iter, cpu);
3392 if (!buf_iter)
3393 return;
3394
3395 ring_buffer_iter_reset(buf_iter);
3396
3397 /*
3398 * We could have the case with the max latency tracers
3399 * that a reset never took place on a cpu. This is evident
3400 * by the timestamp being before the start of the buffer.
3401 */
3402 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3403 if (ts >= iter->trace_buffer->time_start)
3404 break;
3405 entries++;
3406 ring_buffer_read(buf_iter, NULL);
3407 }
3408
3409 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3410 }
3411
3412 /*
3413 * The current tracer is copied to avoid a global locking
3414 * all around.
3415 */
3416 static void *s_start(struct seq_file *m, loff_t *pos)
3417 {
3418 struct trace_iterator *iter = m->private;
3419 struct trace_array *tr = iter->tr;
3420 int cpu_file = iter->cpu_file;
3421 void *p = NULL;
3422 loff_t l = 0;
3423 int cpu;
3424
3425 /*
3426 * copy the tracer to avoid using a global lock all around.
3427 * iter->trace is a copy of current_trace, the pointer to the
3428 * name may be used instead of a strcmp(), as iter->trace->name
3429 * will point to the same string as current_trace->name.
3430 */
3431 mutex_lock(&trace_types_lock);
3432 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3433 *iter->trace = *tr->current_trace;
3434 mutex_unlock(&trace_types_lock);
3435
3436 #ifdef CONFIG_TRACER_MAX_TRACE
3437 if (iter->snapshot && iter->trace->use_max_tr)
3438 return ERR_PTR(-EBUSY);
3439 #endif
3440
3441 if (!iter->snapshot)
3442 atomic_inc(&trace_record_taskinfo_disabled);
3443
3444 if (*pos != iter->pos) {
3445 iter->ent = NULL;
3446 iter->cpu = 0;
3447 iter->idx = -1;
3448
3449 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3450 for_each_tracing_cpu(cpu)
3451 tracing_iter_reset(iter, cpu);
3452 } else
3453 tracing_iter_reset(iter, cpu_file);
3454
3455 iter->leftover = 0;
3456 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3457 ;
3458
3459 } else {
3460 /*
3461 * If we overflowed the seq_file before, then we want
3462 * to just reuse the trace_seq buffer again.
3463 */
3464 if (iter->leftover)
3465 p = iter;
3466 else {
3467 l = *pos - 1;
3468 p = s_next(m, p, &l);
3469 }
3470 }
3471
3472 trace_event_read_lock();
3473 trace_access_lock(cpu_file);
3474 return p;
3475 }
3476
3477 static void s_stop(struct seq_file *m, void *p)
3478 {
3479 struct trace_iterator *iter = m->private;
3480
3481 #ifdef CONFIG_TRACER_MAX_TRACE
3482 if (iter->snapshot && iter->trace->use_max_tr)
3483 return;
3484 #endif
3485
3486 if (!iter->snapshot)
3487 atomic_dec(&trace_record_taskinfo_disabled);
3488
3489 trace_access_unlock(iter->cpu_file);
3490 trace_event_read_unlock();
3491 }
3492
3493 static void
3494 get_total_entries(struct trace_buffer *buf,
3495 unsigned long *total, unsigned long *entries)
3496 {
3497 unsigned long count;
3498 int cpu;
3499
3500 *total = 0;
3501 *entries = 0;
3502
3503 for_each_tracing_cpu(cpu) {
3504 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3505 /*
3506 * If this buffer has skipped entries, then we hold all
3507 * entries for the trace and we need to ignore the
3508 * ones before the time stamp.
3509 */
3510 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3511 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3512 /* total is the same as the entries */
3513 *total += count;
3514 } else
3515 *total += count +
3516 ring_buffer_overrun_cpu(buf->buffer, cpu);
3517 *entries += count;
3518 }
3519 }
3520
3521 static void print_lat_help_header(struct seq_file *m)
3522 {
3523 seq_puts(m, "# _------=> CPU# \n"
3524 "# / _-----=> irqs-off \n"
3525 "# | / _----=> need-resched \n"
3526 "# || / _---=> hardirq/softirq \n"
3527 "# ||| / _--=> preempt-depth \n"
3528 "# |||| / delay \n"
3529 "# cmd pid ||||| time | caller \n"
3530 "# \\ / ||||| \\ | / \n");
3531 }
3532
3533 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3534 {
3535 unsigned long total;
3536 unsigned long entries;
3537
3538 get_total_entries(buf, &total, &entries);
3539 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3540 entries, total, num_online_cpus());
3541 seq_puts(m, "#\n");
3542 }
3543
3544 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3545 unsigned int flags)
3546 {
3547 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3548
3549 print_event_info(buf, m);
3550
3551 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3552 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3553 }
3554
3555 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3556 unsigned int flags)
3557 {
3558 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3559 const char tgid_space[] = " ";
3560 const char space[] = " ";
3561
3562 seq_printf(m, "# %s _-----=> irqs-off\n",
3563 tgid ? tgid_space : space);
3564 seq_printf(m, "# %s / _----=> need-resched\n",
3565 tgid ? tgid_space : space);
3566 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3567 tgid ? tgid_space : space);
3568 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3569 tgid ? tgid_space : space);
3570 seq_printf(m, "# %s||| / delay\n",
3571 tgid ? tgid_space : space);
3572 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3573 tgid ? " TGID " : space);
3574 seq_printf(m, "# | | %s | |||| | |\n",
3575 tgid ? " | " : space);
3576 }
3577
3578 void
3579 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3580 {
3581 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3582 struct trace_buffer *buf = iter->trace_buffer;
3583 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3584 struct tracer *type = iter->trace;
3585 unsigned long entries;
3586 unsigned long total;
3587 const char *name = "preemption";
3588
3589 name = type->name;
3590
3591 get_total_entries(buf, &total, &entries);
3592
3593 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3594 name, UTS_RELEASE);
3595 seq_puts(m, "# -----------------------------------"
3596 "---------------------------------\n");
3597 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3598 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3599 nsecs_to_usecs(data->saved_latency),
3600 entries,
3601 total,
3602 buf->cpu,
3603 #if defined(CONFIG_PREEMPT_NONE)
3604 "server",
3605 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3606 "desktop",
3607 #elif defined(CONFIG_PREEMPT)
3608 "preempt",
3609 #else
3610 "unknown",
3611 #endif
3612 /* These are reserved for later use */
3613 0, 0, 0, 0);
3614 #ifdef CONFIG_SMP
3615 seq_printf(m, " #P:%d)\n", num_online_cpus());
3616 #else
3617 seq_puts(m, ")\n");
3618 #endif
3619 seq_puts(m, "# -----------------\n");
3620 seq_printf(m, "# | task: %.16s-%d "
3621 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3622 data->comm, data->pid,
3623 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3624 data->policy, data->rt_priority);
3625 seq_puts(m, "# -----------------\n");
3626
3627 if (data->critical_start) {
3628 seq_puts(m, "# => started at: ");
3629 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3630 trace_print_seq(m, &iter->seq);
3631 seq_puts(m, "\n# => ended at: ");
3632 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3633 trace_print_seq(m, &iter->seq);
3634 seq_puts(m, "\n#\n");
3635 }
3636
3637 seq_puts(m, "#\n");
3638 }
3639
3640 static void test_cpu_buff_start(struct trace_iterator *iter)
3641 {
3642 struct trace_seq *s = &iter->seq;
3643 struct trace_array *tr = iter->tr;
3644
3645 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3646 return;
3647
3648 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3649 return;
3650
3651 if (cpumask_available(iter->started) &&
3652 cpumask_test_cpu(iter->cpu, iter->started))
3653 return;
3654
3655 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3656 return;
3657
3658 if (cpumask_available(iter->started))
3659 cpumask_set_cpu(iter->cpu, iter->started);
3660
3661 /* Don't print started cpu buffer for the first entry of the trace */
3662 if (iter->idx > 1)
3663 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3664 iter->cpu);
3665 }
3666
3667 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3668 {
3669 struct trace_array *tr = iter->tr;
3670 struct trace_seq *s = &iter->seq;
3671 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3672 struct trace_entry *entry;
3673 struct trace_event *event;
3674
3675 entry = iter->ent;
3676
3677 test_cpu_buff_start(iter);
3678
3679 event = ftrace_find_event(entry->type);
3680
3681 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3682 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3683 trace_print_lat_context(iter);
3684 else
3685 trace_print_context(iter);
3686 }
3687
3688 if (trace_seq_has_overflowed(s))
3689 return TRACE_TYPE_PARTIAL_LINE;
3690
3691 if (event)
3692 return event->funcs->trace(iter, sym_flags, event);
3693
3694 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3695
3696 return trace_handle_return(s);
3697 }
3698
3699 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3700 {
3701 struct trace_array *tr = iter->tr;
3702 struct trace_seq *s = &iter->seq;
3703 struct trace_entry *entry;
3704 struct trace_event *event;
3705
3706 entry = iter->ent;
3707
3708 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3709 trace_seq_printf(s, "%d %d %llu ",
3710 entry->pid, iter->cpu, iter->ts);
3711
3712 if (trace_seq_has_overflowed(s))
3713 return TRACE_TYPE_PARTIAL_LINE;
3714
3715 event = ftrace_find_event(entry->type);
3716 if (event)
3717 return event->funcs->raw(iter, 0, event);
3718
3719 trace_seq_printf(s, "%d ?\n", entry->type);
3720
3721 return trace_handle_return(s);
3722 }
3723
3724 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3725 {
3726 struct trace_array *tr = iter->tr;
3727 struct trace_seq *s = &iter->seq;
3728 unsigned char newline = '\n';
3729 struct trace_entry *entry;
3730 struct trace_event *event;
3731
3732 entry = iter->ent;
3733
3734 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3735 SEQ_PUT_HEX_FIELD(s, entry->pid);
3736 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3737 SEQ_PUT_HEX_FIELD(s, iter->ts);
3738 if (trace_seq_has_overflowed(s))
3739 return TRACE_TYPE_PARTIAL_LINE;
3740 }
3741
3742 event = ftrace_find_event(entry->type);
3743 if (event) {
3744 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3745 if (ret != TRACE_TYPE_HANDLED)
3746 return ret;
3747 }
3748
3749 SEQ_PUT_FIELD(s, newline);
3750
3751 return trace_handle_return(s);
3752 }
3753
3754 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3755 {
3756 struct trace_array *tr = iter->tr;
3757 struct trace_seq *s = &iter->seq;
3758 struct trace_entry *entry;
3759 struct trace_event *event;
3760
3761 entry = iter->ent;
3762
3763 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3764 SEQ_PUT_FIELD(s, entry->pid);
3765 SEQ_PUT_FIELD(s, iter->cpu);
3766 SEQ_PUT_FIELD(s, iter->ts);
3767 if (trace_seq_has_overflowed(s))
3768 return TRACE_TYPE_PARTIAL_LINE;
3769 }
3770
3771 event = ftrace_find_event(entry->type);
3772 return event ? event->funcs->binary(iter, 0, event) :
3773 TRACE_TYPE_HANDLED;
3774 }
3775
3776 int trace_empty(struct trace_iterator *iter)
3777 {
3778 struct ring_buffer_iter *buf_iter;
3779 int cpu;
3780
3781 /* If we are looking at one CPU buffer, only check that one */
3782 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3783 cpu = iter->cpu_file;
3784 buf_iter = trace_buffer_iter(iter, cpu);
3785 if (buf_iter) {
3786 if (!ring_buffer_iter_empty(buf_iter))
3787 return 0;
3788 } else {
3789 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3790 return 0;
3791 }
3792 return 1;
3793 }
3794
3795 for_each_tracing_cpu(cpu) {
3796 buf_iter = trace_buffer_iter(iter, cpu);
3797 if (buf_iter) {
3798 if (!ring_buffer_iter_empty(buf_iter))
3799 return 0;
3800 } else {
3801 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3802 return 0;
3803 }
3804 }
3805
3806 return 1;
3807 }
3808
3809 /* Called with trace_event_read_lock() held. */
3810 enum print_line_t print_trace_line(struct trace_iterator *iter)
3811 {
3812 struct trace_array *tr = iter->tr;
3813 unsigned long trace_flags = tr->trace_flags;
3814 enum print_line_t ret;
3815
3816 if (iter->lost_events) {
3817 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3818 iter->cpu, iter->lost_events);
3819 if (trace_seq_has_overflowed(&iter->seq))
3820 return TRACE_TYPE_PARTIAL_LINE;
3821 }
3822
3823 if (iter->trace && iter->trace->print_line) {
3824 ret = iter->trace->print_line(iter);
3825 if (ret != TRACE_TYPE_UNHANDLED)
3826 return ret;
3827 }
3828
3829 if (iter->ent->type == TRACE_BPUTS &&
3830 trace_flags & TRACE_ITER_PRINTK &&
3831 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3832 return trace_print_bputs_msg_only(iter);
3833
3834 if (iter->ent->type == TRACE_BPRINT &&
3835 trace_flags & TRACE_ITER_PRINTK &&
3836 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3837 return trace_print_bprintk_msg_only(iter);
3838
3839 if (iter->ent->type == TRACE_PRINT &&
3840 trace_flags & TRACE_ITER_PRINTK &&
3841 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3842 return trace_print_printk_msg_only(iter);
3843
3844 if (trace_flags & TRACE_ITER_BIN)
3845 return print_bin_fmt(iter);
3846
3847 if (trace_flags & TRACE_ITER_HEX)
3848 return print_hex_fmt(iter);
3849
3850 if (trace_flags & TRACE_ITER_RAW)
3851 return print_raw_fmt(iter);
3852
3853 return print_trace_fmt(iter);
3854 }
3855
3856 void trace_latency_header(struct seq_file *m)
3857 {
3858 struct trace_iterator *iter = m->private;
3859 struct trace_array *tr = iter->tr;
3860
3861 /* print nothing if the buffers are empty */
3862 if (trace_empty(iter))
3863 return;
3864
3865 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3866 print_trace_header(m, iter);
3867
3868 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3869 print_lat_help_header(m);
3870 }
3871
3872 void trace_default_header(struct seq_file *m)
3873 {
3874 struct trace_iterator *iter = m->private;
3875 struct trace_array *tr = iter->tr;
3876 unsigned long trace_flags = tr->trace_flags;
3877
3878 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3879 return;
3880
3881 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3882 /* print nothing if the buffers are empty */
3883 if (trace_empty(iter))
3884 return;
3885 print_trace_header(m, iter);
3886 if (!(trace_flags & TRACE_ITER_VERBOSE))
3887 print_lat_help_header(m);
3888 } else {
3889 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3890 if (trace_flags & TRACE_ITER_IRQ_INFO)
3891 print_func_help_header_irq(iter->trace_buffer,
3892 m, trace_flags);
3893 else
3894 print_func_help_header(iter->trace_buffer, m,
3895 trace_flags);
3896 }
3897 }
3898 }
3899
3900 static void test_ftrace_alive(struct seq_file *m)
3901 {
3902 if (!ftrace_is_dead())
3903 return;
3904 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3905 "# MAY BE MISSING FUNCTION EVENTS\n");
3906 }
3907
3908 #ifdef CONFIG_TRACER_MAX_TRACE
3909 static void show_snapshot_main_help(struct seq_file *m)
3910 {
3911 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3912 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3913 "# Takes a snapshot of the main buffer.\n"
3914 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3915 "# (Doesn't have to be '2' works with any number that\n"
3916 "# is not a '0' or '1')\n");
3917 }
3918
3919 static void show_snapshot_percpu_help(struct seq_file *m)
3920 {
3921 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3922 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3923 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3924 "# Takes a snapshot of the main buffer for this cpu.\n");
3925 #else
3926 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3927 "# Must use main snapshot file to allocate.\n");
3928 #endif
3929 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3930 "# (Doesn't have to be '2' works with any number that\n"
3931 "# is not a '0' or '1')\n");
3932 }
3933
3934 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3935 {
3936 if (iter->tr->allocated_snapshot)
3937 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3938 else
3939 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3940
3941 seq_puts(m, "# Snapshot commands:\n");
3942 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3943 show_snapshot_main_help(m);
3944 else
3945 show_snapshot_percpu_help(m);
3946 }
3947 #else
3948 /* Should never be called */
3949 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3950 #endif
3951
3952 static int s_show(struct seq_file *m, void *v)
3953 {
3954 struct trace_iterator *iter = v;
3955 int ret;
3956
3957 if (iter->ent == NULL) {
3958 if (iter->tr) {
3959 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3960 seq_puts(m, "#\n");
3961 test_ftrace_alive(m);
3962 }
3963 if (iter->snapshot && trace_empty(iter))
3964 print_snapshot_help(m, iter);
3965 else if (iter->trace && iter->trace->print_header)
3966 iter->trace->print_header(m);
3967 else
3968 trace_default_header(m);
3969
3970 } else if (iter->leftover) {
3971 /*
3972 * If we filled the seq_file buffer earlier, we
3973 * want to just show it now.
3974 */
3975 ret = trace_print_seq(m, &iter->seq);
3976
3977 /* ret should this time be zero, but you never know */
3978 iter->leftover = ret;
3979
3980 } else {
3981 print_trace_line(iter);
3982 ret = trace_print_seq(m, &iter->seq);
3983 /*
3984 * If we overflow the seq_file buffer, then it will
3985 * ask us for this data again at start up.
3986 * Use that instead.
3987 * ret is 0 if seq_file write succeeded.
3988 * -1 otherwise.
3989 */
3990 iter->leftover = ret;
3991 }
3992
3993 return 0;
3994 }
3995
3996 /*
3997 * Should be used after trace_array_get(), trace_types_lock
3998 * ensures that i_cdev was already initialized.
3999 */
4000 static inline int tracing_get_cpu(struct inode *inode)
4001 {
4002 if (inode->i_cdev) /* See trace_create_cpu_file() */
4003 return (long)inode->i_cdev - 1;
4004 return RING_BUFFER_ALL_CPUS;
4005 }
4006
4007 static const struct seq_operations tracer_seq_ops = {
4008 .start = s_start,
4009 .next = s_next,
4010 .stop = s_stop,
4011 .show = s_show,
4012 };
4013
4014 static struct trace_iterator *
4015 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4016 {
4017 struct trace_array *tr = inode->i_private;
4018 struct trace_iterator *iter;
4019 int cpu;
4020
4021 if (tracing_disabled)
4022 return ERR_PTR(-ENODEV);
4023
4024 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4025 if (!iter)
4026 return ERR_PTR(-ENOMEM);
4027
4028 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4029 GFP_KERNEL);
4030 if (!iter->buffer_iter)
4031 goto release;
4032
4033 /*
4034 * We make a copy of the current tracer to avoid concurrent
4035 * changes on it while we are reading.
4036 */
4037 mutex_lock(&trace_types_lock);
4038 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4039 if (!iter->trace)
4040 goto fail;
4041
4042 *iter->trace = *tr->current_trace;
4043
4044 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4045 goto fail;
4046
4047 iter->tr = tr;
4048
4049 #ifdef CONFIG_TRACER_MAX_TRACE
4050 /* Currently only the top directory has a snapshot */
4051 if (tr->current_trace->print_max || snapshot)
4052 iter->trace_buffer = &tr->max_buffer;
4053 else
4054 #endif
4055 iter->trace_buffer = &tr->trace_buffer;
4056 iter->snapshot = snapshot;
4057 iter->pos = -1;
4058 iter->cpu_file = tracing_get_cpu(inode);
4059 mutex_init(&iter->mutex);
4060
4061 /* Notify the tracer early; before we stop tracing. */
4062 if (iter->trace && iter->trace->open)
4063 iter->trace->open(iter);
4064
4065 /* Annotate start of buffers if we had overruns */
4066 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4067 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4068
4069 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4070 if (trace_clocks[tr->clock_id].in_ns)
4071 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4072
4073 /* stop the trace while dumping if we are not opening "snapshot" */
4074 if (!iter->snapshot)
4075 tracing_stop_tr(tr);
4076
4077 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4078 for_each_tracing_cpu(cpu) {
4079 iter->buffer_iter[cpu] =
4080 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
4081 }
4082 ring_buffer_read_prepare_sync();
4083 for_each_tracing_cpu(cpu) {
4084 ring_buffer_read_start(iter->buffer_iter[cpu]);
4085 tracing_iter_reset(iter, cpu);
4086 }
4087 } else {
4088 cpu = iter->cpu_file;
4089 iter->buffer_iter[cpu] =
4090 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
4091 ring_buffer_read_prepare_sync();
4092 ring_buffer_read_start(iter->buffer_iter[cpu]);
4093 tracing_iter_reset(iter, cpu);
4094 }
4095
4096 mutex_unlock(&trace_types_lock);
4097
4098 return iter;
4099
4100 fail:
4101 mutex_unlock(&trace_types_lock);
4102 kfree(iter->trace);
4103 kfree(iter->buffer_iter);
4104 release:
4105 seq_release_private(inode, file);
4106 return ERR_PTR(-ENOMEM);
4107 }
4108
4109 int tracing_open_generic(struct inode *inode, struct file *filp)
4110 {
4111 if (tracing_disabled)
4112 return -ENODEV;
4113
4114 filp->private_data = inode->i_private;
4115 return 0;
4116 }
4117
4118 bool tracing_is_disabled(void)
4119 {
4120 return (tracing_disabled) ? true: false;
4121 }
4122
4123 /*
4124 * Open and update trace_array ref count.
4125 * Must have the current trace_array passed to it.
4126 */
4127 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4128 {
4129 struct trace_array *tr = inode->i_private;
4130
4131 if (tracing_disabled)
4132 return -ENODEV;
4133
4134 if (trace_array_get(tr) < 0)
4135 return -ENODEV;
4136
4137 filp->private_data = inode->i_private;
4138
4139 return 0;
4140 }
4141
4142 static int tracing_release(struct inode *inode, struct file *file)
4143 {
4144 struct trace_array *tr = inode->i_private;
4145 struct seq_file *m = file->private_data;
4146 struct trace_iterator *iter;
4147 int cpu;
4148
4149 if (!(file->f_mode & FMODE_READ)) {
4150 trace_array_put(tr);
4151 return 0;
4152 }
4153
4154 /* Writes do not use seq_file */
4155 iter = m->private;
4156 mutex_lock(&trace_types_lock);
4157
4158 for_each_tracing_cpu(cpu) {
4159 if (iter->buffer_iter[cpu])
4160 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4161 }
4162
4163 if (iter->trace && iter->trace->close)
4164 iter->trace->close(iter);
4165
4166 if (!iter->snapshot)
4167 /* reenable tracing if it was previously enabled */
4168 tracing_start_tr(tr);
4169
4170 __trace_array_put(tr);
4171
4172 mutex_unlock(&trace_types_lock);
4173
4174 mutex_destroy(&iter->mutex);
4175 free_cpumask_var(iter->started);
4176 kfree(iter->trace);
4177 kfree(iter->buffer_iter);
4178 seq_release_private(inode, file);
4179
4180 return 0;
4181 }
4182
4183 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4184 {
4185 struct trace_array *tr = inode->i_private;
4186
4187 trace_array_put(tr);
4188 return 0;
4189 }
4190
4191 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4192 {
4193 struct trace_array *tr = inode->i_private;
4194
4195 trace_array_put(tr);
4196
4197 return single_release(inode, file);
4198 }
4199
4200 static int tracing_open(struct inode *inode, struct file *file)
4201 {
4202 struct trace_array *tr = inode->i_private;
4203 struct trace_iterator *iter;
4204 int ret = 0;
4205
4206 if (trace_array_get(tr) < 0)
4207 return -ENODEV;
4208
4209 /* If this file was open for write, then erase contents */
4210 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4211 int cpu = tracing_get_cpu(inode);
4212 struct trace_buffer *trace_buf = &tr->trace_buffer;
4213
4214 #ifdef CONFIG_TRACER_MAX_TRACE
4215 if (tr->current_trace->print_max)
4216 trace_buf = &tr->max_buffer;
4217 #endif
4218
4219 if (cpu == RING_BUFFER_ALL_CPUS)
4220 tracing_reset_online_cpus(trace_buf);
4221 else
4222 tracing_reset(trace_buf, cpu);
4223 }
4224
4225 if (file->f_mode & FMODE_READ) {
4226 iter = __tracing_open(inode, file, false);
4227 if (IS_ERR(iter))
4228 ret = PTR_ERR(iter);
4229 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4230 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4231 }
4232
4233 if (ret < 0)
4234 trace_array_put(tr);
4235
4236 return ret;
4237 }
4238
4239 /*
4240 * Some tracers are not suitable for instance buffers.
4241 * A tracer is always available for the global array (toplevel)
4242 * or if it explicitly states that it is.
4243 */
4244 static bool
4245 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4246 {
4247 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4248 }
4249
4250 /* Find the next tracer that this trace array may use */
4251 static struct tracer *
4252 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4253 {
4254 while (t && !trace_ok_for_array(t, tr))
4255 t = t->next;
4256
4257 return t;
4258 }
4259
4260 static void *
4261 t_next(struct seq_file *m, void *v, loff_t *pos)
4262 {
4263 struct trace_array *tr = m->private;
4264 struct tracer *t = v;
4265
4266 (*pos)++;
4267
4268 if (t)
4269 t = get_tracer_for_array(tr, t->next);
4270
4271 return t;
4272 }
4273
4274 static void *t_start(struct seq_file *m, loff_t *pos)
4275 {
4276 struct trace_array *tr = m->private;
4277 struct tracer *t;
4278 loff_t l = 0;
4279
4280 mutex_lock(&trace_types_lock);
4281
4282 t = get_tracer_for_array(tr, trace_types);
4283 for (; t && l < *pos; t = t_next(m, t, &l))
4284 ;
4285
4286 return t;
4287 }
4288
4289 static void t_stop(struct seq_file *m, void *p)
4290 {
4291 mutex_unlock(&trace_types_lock);
4292 }
4293
4294 static int t_show(struct seq_file *m, void *v)
4295 {
4296 struct tracer *t = v;
4297
4298 if (!t)
4299 return 0;
4300
4301 seq_puts(m, t->name);
4302 if (t->next)
4303 seq_putc(m, ' ');
4304 else
4305 seq_putc(m, '\n');
4306
4307 return 0;
4308 }
4309
4310 static const struct seq_operations show_traces_seq_ops = {
4311 .start = t_start,
4312 .next = t_next,
4313 .stop = t_stop,
4314 .show = t_show,
4315 };
4316
4317 static int show_traces_open(struct inode *inode, struct file *file)
4318 {
4319 struct trace_array *tr = inode->i_private;
4320 struct seq_file *m;
4321 int ret;
4322
4323 if (tracing_disabled)
4324 return -ENODEV;
4325
4326 ret = seq_open(file, &show_traces_seq_ops);
4327 if (ret)
4328 return ret;
4329
4330 m = file->private_data;
4331 m->private = tr;
4332
4333 return 0;
4334 }
4335
4336 static ssize_t
4337 tracing_write_stub(struct file *filp, const char __user *ubuf,
4338 size_t count, loff_t *ppos)
4339 {
4340 return count;
4341 }
4342
4343 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4344 {
4345 int ret;
4346
4347 if (file->f_mode & FMODE_READ)
4348 ret = seq_lseek(file, offset, whence);
4349 else
4350 file->f_pos = ret = 0;
4351
4352 return ret;
4353 }
4354
4355 static const struct file_operations tracing_fops = {
4356 .open = tracing_open,
4357 .read = seq_read,
4358 .write = tracing_write_stub,
4359 .llseek = tracing_lseek,
4360 .release = tracing_release,
4361 };
4362
4363 static const struct file_operations show_traces_fops = {
4364 .open = show_traces_open,
4365 .read = seq_read,
4366 .release = seq_release,
4367 .llseek = seq_lseek,
4368 };
4369
4370 static ssize_t
4371 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4372 size_t count, loff_t *ppos)
4373 {
4374 struct trace_array *tr = file_inode(filp)->i_private;
4375 char *mask_str;
4376 int len;
4377
4378 len = snprintf(NULL, 0, "%*pb\n",
4379 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4380 mask_str = kmalloc(len, GFP_KERNEL);
4381 if (!mask_str)
4382 return -ENOMEM;
4383
4384 len = snprintf(mask_str, len, "%*pb\n",
4385 cpumask_pr_args(tr->tracing_cpumask));
4386 if (len >= count) {
4387 count = -EINVAL;
4388 goto out_err;
4389 }
4390 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4391
4392 out_err:
4393 kfree(mask_str);
4394
4395 return count;
4396 }
4397
4398 static ssize_t
4399 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4400 size_t count, loff_t *ppos)
4401 {
4402 struct trace_array *tr = file_inode(filp)->i_private;
4403 cpumask_var_t tracing_cpumask_new;
4404 int err, cpu;
4405
4406 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4407 return -ENOMEM;
4408
4409 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4410 if (err)
4411 goto err_unlock;
4412
4413 local_irq_disable();
4414 arch_spin_lock(&tr->max_lock);
4415 for_each_tracing_cpu(cpu) {
4416 /*
4417 * Increase/decrease the disabled counter if we are
4418 * about to flip a bit in the cpumask:
4419 */
4420 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4421 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4422 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4423 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4424 }
4425 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4426 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4427 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4428 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4429 }
4430 }
4431 arch_spin_unlock(&tr->max_lock);
4432 local_irq_enable();
4433
4434 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4435 free_cpumask_var(tracing_cpumask_new);
4436
4437 return count;
4438
4439 err_unlock:
4440 free_cpumask_var(tracing_cpumask_new);
4441
4442 return err;
4443 }
4444
4445 static const struct file_operations tracing_cpumask_fops = {
4446 .open = tracing_open_generic_tr,
4447 .read = tracing_cpumask_read,
4448 .write = tracing_cpumask_write,
4449 .release = tracing_release_generic_tr,
4450 .llseek = generic_file_llseek,
4451 };
4452
4453 static int tracing_trace_options_show(struct seq_file *m, void *v)
4454 {
4455 struct tracer_opt *trace_opts;
4456 struct trace_array *tr = m->private;
4457 u32 tracer_flags;
4458 int i;
4459
4460 mutex_lock(&trace_types_lock);
4461 tracer_flags = tr->current_trace->flags->val;
4462 trace_opts = tr->current_trace->flags->opts;
4463
4464 for (i = 0; trace_options[i]; i++) {
4465 if (tr->trace_flags & (1 << i))
4466 seq_printf(m, "%s\n", trace_options[i]);
4467 else
4468 seq_printf(m, "no%s\n", trace_options[i]);
4469 }
4470
4471 for (i = 0; trace_opts[i].name; i++) {
4472 if (tracer_flags & trace_opts[i].bit)
4473 seq_printf(m, "%s\n", trace_opts[i].name);
4474 else
4475 seq_printf(m, "no%s\n", trace_opts[i].name);
4476 }
4477 mutex_unlock(&trace_types_lock);
4478
4479 return 0;
4480 }
4481
4482 static int __set_tracer_option(struct trace_array *tr,
4483 struct tracer_flags *tracer_flags,
4484 struct tracer_opt *opts, int neg)
4485 {
4486 struct tracer *trace = tracer_flags->trace;
4487 int ret;
4488
4489 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4490 if (ret)
4491 return ret;
4492
4493 if (neg)
4494 tracer_flags->val &= ~opts->bit;
4495 else
4496 tracer_flags->val |= opts->bit;
4497 return 0;
4498 }
4499
4500 /* Try to assign a tracer specific option */
4501 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4502 {
4503 struct tracer *trace = tr->current_trace;
4504 struct tracer_flags *tracer_flags = trace->flags;
4505 struct tracer_opt *opts = NULL;
4506 int i;
4507
4508 for (i = 0; tracer_flags->opts[i].name; i++) {
4509 opts = &tracer_flags->opts[i];
4510
4511 if (strcmp(cmp, opts->name) == 0)
4512 return __set_tracer_option(tr, trace->flags, opts, neg);
4513 }
4514
4515 return -EINVAL;
4516 }
4517
4518 /* Some tracers require overwrite to stay enabled */
4519 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4520 {
4521 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4522 return -1;
4523
4524 return 0;
4525 }
4526
4527 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4528 {
4529 /* do nothing if flag is already set */
4530 if (!!(tr->trace_flags & mask) == !!enabled)
4531 return 0;
4532
4533 /* Give the tracer a chance to approve the change */
4534 if (tr->current_trace->flag_changed)
4535 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4536 return -EINVAL;
4537
4538 if (enabled)
4539 tr->trace_flags |= mask;
4540 else
4541 tr->trace_flags &= ~mask;
4542
4543 if (mask == TRACE_ITER_RECORD_CMD)
4544 trace_event_enable_cmd_record(enabled);
4545
4546 if (mask == TRACE_ITER_RECORD_TGID) {
4547 if (!tgid_map)
4548 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4549 sizeof(*tgid_map),
4550 GFP_KERNEL);
4551 if (!tgid_map) {
4552 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4553 return -ENOMEM;
4554 }
4555
4556 trace_event_enable_tgid_record(enabled);
4557 }
4558
4559 if (mask == TRACE_ITER_EVENT_FORK)
4560 trace_event_follow_fork(tr, enabled);
4561
4562 if (mask == TRACE_ITER_FUNC_FORK)
4563 ftrace_pid_follow_fork(tr, enabled);
4564
4565 if (mask == TRACE_ITER_OVERWRITE) {
4566 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4567 #ifdef CONFIG_TRACER_MAX_TRACE
4568 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4569 #endif
4570 }
4571
4572 if (mask == TRACE_ITER_PRINTK) {
4573 trace_printk_start_stop_comm(enabled);
4574 trace_printk_control(enabled);
4575 }
4576
4577 return 0;
4578 }
4579
4580 static int trace_set_options(struct trace_array *tr, char *option)
4581 {
4582 char *cmp;
4583 int neg = 0;
4584 int ret;
4585 size_t orig_len = strlen(option);
4586 int len;
4587
4588 cmp = strstrip(option);
4589
4590 len = str_has_prefix(cmp, "no");
4591 if (len)
4592 neg = 1;
4593
4594 cmp += len;
4595
4596 mutex_lock(&trace_types_lock);
4597
4598 ret = match_string(trace_options, -1, cmp);
4599 /* If no option could be set, test the specific tracer options */
4600 if (ret < 0)
4601 ret = set_tracer_option(tr, cmp, neg);
4602 else
4603 ret = set_tracer_flag(tr, 1 << ret, !neg);
4604
4605 mutex_unlock(&trace_types_lock);
4606
4607 /*
4608 * If the first trailing whitespace is replaced with '\0' by strstrip,
4609 * turn it back into a space.
4610 */
4611 if (orig_len > strlen(option))
4612 option[strlen(option)] = ' ';
4613
4614 return ret;
4615 }
4616
4617 static void __init apply_trace_boot_options(void)
4618 {
4619 char *buf = trace_boot_options_buf;
4620 char *option;
4621
4622 while (true) {
4623 option = strsep(&buf, ",");
4624
4625 if (!option)
4626 break;
4627
4628 if (*option)
4629 trace_set_options(&global_trace, option);
4630
4631 /* Put back the comma to allow this to be called again */
4632 if (buf)
4633 *(buf - 1) = ',';
4634 }
4635 }
4636
4637 static ssize_t
4638 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4639 size_t cnt, loff_t *ppos)
4640 {
4641 struct seq_file *m = filp->private_data;
4642 struct trace_array *tr = m->private;
4643 char buf[64];
4644 int ret;
4645
4646 if (cnt >= sizeof(buf))
4647 return -EINVAL;
4648
4649 if (copy_from_user(buf, ubuf, cnt))
4650 return -EFAULT;
4651
4652 buf[cnt] = 0;
4653
4654 ret = trace_set_options(tr, buf);
4655 if (ret < 0)
4656 return ret;
4657
4658 *ppos += cnt;
4659
4660 return cnt;
4661 }
4662
4663 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4664 {
4665 struct trace_array *tr = inode->i_private;
4666 int ret;
4667
4668 if (tracing_disabled)
4669 return -ENODEV;
4670
4671 if (trace_array_get(tr) < 0)
4672 return -ENODEV;
4673
4674 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4675 if (ret < 0)
4676 trace_array_put(tr);
4677
4678 return ret;
4679 }
4680
4681 static const struct file_operations tracing_iter_fops = {
4682 .open = tracing_trace_options_open,
4683 .read = seq_read,
4684 .llseek = seq_lseek,
4685 .release = tracing_single_release_tr,
4686 .write = tracing_trace_options_write,
4687 };
4688
4689 static const char readme_msg[] =
4690 "tracing mini-HOWTO:\n\n"
4691 "# echo 0 > tracing_on : quick way to disable tracing\n"
4692 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4693 " Important files:\n"
4694 " trace\t\t\t- The static contents of the buffer\n"
4695 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4696 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4697 " current_tracer\t- function and latency tracers\n"
4698 " available_tracers\t- list of configured tracers for current_tracer\n"
4699 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4700 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4701 " trace_clock\t\t-change the clock used to order events\n"
4702 " local: Per cpu clock but may not be synced across CPUs\n"
4703 " global: Synced across CPUs but slows tracing down.\n"
4704 " counter: Not a clock, but just an increment\n"
4705 " uptime: Jiffy counter from time of boot\n"
4706 " perf: Same clock that perf events use\n"
4707 #ifdef CONFIG_X86_64
4708 " x86-tsc: TSC cycle counter\n"
4709 #endif
4710 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4711 " delta: Delta difference against a buffer-wide timestamp\n"
4712 " absolute: Absolute (standalone) timestamp\n"
4713 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4714 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4715 " tracing_cpumask\t- Limit which CPUs to trace\n"
4716 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4717 "\t\t\t Remove sub-buffer with rmdir\n"
4718 " trace_options\t\t- Set format or modify how tracing happens\n"
4719 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4720 "\t\t\t option name\n"
4721 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4722 #ifdef CONFIG_DYNAMIC_FTRACE
4723 "\n available_filter_functions - list of functions that can be filtered on\n"
4724 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4725 "\t\t\t functions\n"
4726 "\t accepts: func_full_name or glob-matching-pattern\n"
4727 "\t modules: Can select a group via module\n"
4728 "\t Format: :mod:<module-name>\n"
4729 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4730 "\t triggers: a command to perform when function is hit\n"
4731 "\t Format: <function>:<trigger>[:count]\n"
4732 "\t trigger: traceon, traceoff\n"
4733 "\t\t enable_event:<system>:<event>\n"
4734 "\t\t disable_event:<system>:<event>\n"
4735 #ifdef CONFIG_STACKTRACE
4736 "\t\t stacktrace\n"
4737 #endif
4738 #ifdef CONFIG_TRACER_SNAPSHOT
4739 "\t\t snapshot\n"
4740 #endif
4741 "\t\t dump\n"
4742 "\t\t cpudump\n"
4743 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4744 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4745 "\t The first one will disable tracing every time do_fault is hit\n"
4746 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4747 "\t The first time do trap is hit and it disables tracing, the\n"
4748 "\t counter will decrement to 2. If tracing is already disabled,\n"
4749 "\t the counter will not decrement. It only decrements when the\n"
4750 "\t trigger did work\n"
4751 "\t To remove trigger without count:\n"
4752 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4753 "\t To remove trigger with a count:\n"
4754 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4755 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4756 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4757 "\t modules: Can select a group via module command :mod:\n"
4758 "\t Does not accept triggers\n"
4759 #endif /* CONFIG_DYNAMIC_FTRACE */
4760 #ifdef CONFIG_FUNCTION_TRACER
4761 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4762 "\t\t (function)\n"
4763 #endif
4764 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4765 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4766 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4767 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4768 #endif
4769 #ifdef CONFIG_TRACER_SNAPSHOT
4770 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4771 "\t\t\t snapshot buffer. Read the contents for more\n"
4772 "\t\t\t information\n"
4773 #endif
4774 #ifdef CONFIG_STACK_TRACER
4775 " stack_trace\t\t- Shows the max stack trace when active\n"
4776 " stack_max_size\t- Shows current max stack size that was traced\n"
4777 "\t\t\t Write into this file to reset the max size (trigger a\n"
4778 "\t\t\t new trace)\n"
4779 #ifdef CONFIG_DYNAMIC_FTRACE
4780 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4781 "\t\t\t traces\n"
4782 #endif
4783 #endif /* CONFIG_STACK_TRACER */
4784 #ifdef CONFIG_DYNAMIC_EVENTS
4785 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4786 "\t\t\t Write into this file to define/undefine new trace events.\n"
4787 #endif
4788 #ifdef CONFIG_KPROBE_EVENTS
4789 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4790 "\t\t\t Write into this file to define/undefine new trace events.\n"
4791 #endif
4792 #ifdef CONFIG_UPROBE_EVENTS
4793 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4794 "\t\t\t Write into this file to define/undefine new trace events.\n"
4795 #endif
4796 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4797 "\t accepts: event-definitions (one definition per line)\n"
4798 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4799 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4800 #ifdef CONFIG_HIST_TRIGGERS
4801 "\t s:[synthetic/]<event> <field> [<field>]\n"
4802 #endif
4803 "\t -:[<group>/]<event>\n"
4804 #ifdef CONFIG_KPROBE_EVENTS
4805 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4806 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4807 #endif
4808 #ifdef CONFIG_UPROBE_EVENTS
4809 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4810 #endif
4811 "\t args: <name>=fetcharg[:type]\n"
4812 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4813 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4814 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4815 #else
4816 "\t $stack<index>, $stack, $retval, $comm\n"
4817 #endif
4818 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4819 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4820 "\t <type>\\[<array-size>\\]\n"
4821 #ifdef CONFIG_HIST_TRIGGERS
4822 "\t field: <stype> <name>;\n"
4823 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4824 "\t [unsigned] char/int/long\n"
4825 #endif
4826 #endif
4827 " events/\t\t- Directory containing all trace event subsystems:\n"
4828 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4829 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4830 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4831 "\t\t\t events\n"
4832 " filter\t\t- If set, only events passing filter are traced\n"
4833 " events/<system>/<event>/\t- Directory containing control files for\n"
4834 "\t\t\t <event>:\n"
4835 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4836 " filter\t\t- If set, only events passing filter are traced\n"
4837 " trigger\t\t- If set, a command to perform when event is hit\n"
4838 "\t Format: <trigger>[:count][if <filter>]\n"
4839 "\t trigger: traceon, traceoff\n"
4840 "\t enable_event:<system>:<event>\n"
4841 "\t disable_event:<system>:<event>\n"
4842 #ifdef CONFIG_HIST_TRIGGERS
4843 "\t enable_hist:<system>:<event>\n"
4844 "\t disable_hist:<system>:<event>\n"
4845 #endif
4846 #ifdef CONFIG_STACKTRACE
4847 "\t\t stacktrace\n"
4848 #endif
4849 #ifdef CONFIG_TRACER_SNAPSHOT
4850 "\t\t snapshot\n"
4851 #endif
4852 #ifdef CONFIG_HIST_TRIGGERS
4853 "\t\t hist (see below)\n"
4854 #endif
4855 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4856 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4857 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4858 "\t events/block/block_unplug/trigger\n"
4859 "\t The first disables tracing every time block_unplug is hit.\n"
4860 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4861 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4862 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4863 "\t Like function triggers, the counter is only decremented if it\n"
4864 "\t enabled or disabled tracing.\n"
4865 "\t To remove a trigger without a count:\n"
4866 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4867 "\t To remove a trigger with a count:\n"
4868 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4869 "\t Filters can be ignored when removing a trigger.\n"
4870 #ifdef CONFIG_HIST_TRIGGERS
4871 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4872 "\t Format: hist:keys=<field1[,field2,...]>\n"
4873 "\t [:values=<field1[,field2,...]>]\n"
4874 "\t [:sort=<field1[,field2,...]>]\n"
4875 "\t [:size=#entries]\n"
4876 "\t [:pause][:continue][:clear]\n"
4877 "\t [:name=histname1]\n"
4878 "\t [:<handler>.<action>]\n"
4879 "\t [if <filter>]\n\n"
4880 "\t When a matching event is hit, an entry is added to a hash\n"
4881 "\t table using the key(s) and value(s) named, and the value of a\n"
4882 "\t sum called 'hitcount' is incremented. Keys and values\n"
4883 "\t correspond to fields in the event's format description. Keys\n"
4884 "\t can be any field, or the special string 'stacktrace'.\n"
4885 "\t Compound keys consisting of up to two fields can be specified\n"
4886 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4887 "\t fields. Sort keys consisting of up to two fields can be\n"
4888 "\t specified using the 'sort' keyword. The sort direction can\n"
4889 "\t be modified by appending '.descending' or '.ascending' to a\n"
4890 "\t sort field. The 'size' parameter can be used to specify more\n"
4891 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4892 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4893 "\t its histogram data will be shared with other triggers of the\n"
4894 "\t same name, and trigger hits will update this common data.\n\n"
4895 "\t Reading the 'hist' file for the event will dump the hash\n"
4896 "\t table in its entirety to stdout. If there are multiple hist\n"
4897 "\t triggers attached to an event, there will be a table for each\n"
4898 "\t trigger in the output. The table displayed for a named\n"
4899 "\t trigger will be the same as any other instance having the\n"
4900 "\t same name. The default format used to display a given field\n"
4901 "\t can be modified by appending any of the following modifiers\n"
4902 "\t to the field name, as applicable:\n\n"
4903 "\t .hex display a number as a hex value\n"
4904 "\t .sym display an address as a symbol\n"
4905 "\t .sym-offset display an address as a symbol and offset\n"
4906 "\t .execname display a common_pid as a program name\n"
4907 "\t .syscall display a syscall id as a syscall name\n"
4908 "\t .log2 display log2 value rather than raw number\n"
4909 "\t .usecs display a common_timestamp in microseconds\n\n"
4910 "\t The 'pause' parameter can be used to pause an existing hist\n"
4911 "\t trigger or to start a hist trigger but not log any events\n"
4912 "\t until told to do so. 'continue' can be used to start or\n"
4913 "\t restart a paused hist trigger.\n\n"
4914 "\t The 'clear' parameter will clear the contents of a running\n"
4915 "\t hist trigger and leave its current paused/active state\n"
4916 "\t unchanged.\n\n"
4917 "\t The enable_hist and disable_hist triggers can be used to\n"
4918 "\t have one event conditionally start and stop another event's\n"
4919 "\t already-attached hist trigger. The syntax is analagous to\n"
4920 "\t the enable_event and disable_event triggers.\n\n"
4921 "\t Hist trigger handlers and actions are executed whenever a\n"
4922 "\t a histogram entry is added or updated. They take the form:\n\n"
4923 "\t <handler>.<action>\n\n"
4924 "\t The available handlers are:\n\n"
4925 "\t onmatch(matching.event) - invoke on addition or update\n"
4926 "\t onmax(var) - invoke if var exceeds current max\n"
4927 "\t onchange(var) - invoke action if var changes\n\n"
4928 "\t The available actions are:\n\n"
4929 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4930 "\t save(field,...) - save current event fields\n"
4931 #ifdef CONFIG_TRACER_SNAPSHOT
4932 "\t snapshot() - snapshot the trace buffer\n"
4933 #endif
4934 #endif
4935 ;
4936
4937 static ssize_t
4938 tracing_readme_read(struct file *filp, char __user *ubuf,
4939 size_t cnt, loff_t *ppos)
4940 {
4941 return simple_read_from_buffer(ubuf, cnt, ppos,
4942 readme_msg, strlen(readme_msg));
4943 }
4944
4945 static const struct file_operations tracing_readme_fops = {
4946 .open = tracing_open_generic,
4947 .read = tracing_readme_read,
4948 .llseek = generic_file_llseek,
4949 };
4950
4951 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4952 {
4953 int *ptr = v;
4954
4955 if (*pos || m->count)
4956 ptr++;
4957
4958 (*pos)++;
4959
4960 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4961 if (trace_find_tgid(*ptr))
4962 return ptr;
4963 }
4964
4965 return NULL;
4966 }
4967
4968 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4969 {
4970 void *v;
4971 loff_t l = 0;
4972
4973 if (!tgid_map)
4974 return NULL;
4975
4976 v = &tgid_map[0];
4977 while (l <= *pos) {
4978 v = saved_tgids_next(m, v, &l);
4979 if (!v)
4980 return NULL;
4981 }
4982
4983 return v;
4984 }
4985
4986 static void saved_tgids_stop(struct seq_file *m, void *v)
4987 {
4988 }
4989
4990 static int saved_tgids_show(struct seq_file *m, void *v)
4991 {
4992 int pid = (int *)v - tgid_map;
4993
4994 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4995 return 0;
4996 }
4997
4998 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4999 .start = saved_tgids_start,
5000 .stop = saved_tgids_stop,
5001 .next = saved_tgids_next,
5002 .show = saved_tgids_show,
5003 };
5004
5005 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5006 {
5007 if (tracing_disabled)
5008 return -ENODEV;
5009
5010 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5011 }
5012
5013
5014 static const struct file_operations tracing_saved_tgids_fops = {
5015 .open = tracing_saved_tgids_open,
5016 .read = seq_read,
5017 .llseek = seq_lseek,
5018 .release = seq_release,
5019 };
5020
5021 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5022 {
5023 unsigned int *ptr = v;
5024
5025 if (*pos || m->count)
5026 ptr++;
5027
5028 (*pos)++;
5029
5030 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5031 ptr++) {
5032 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5033 continue;
5034
5035 return ptr;
5036 }
5037
5038 return NULL;
5039 }
5040
5041 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5042 {
5043 void *v;
5044 loff_t l = 0;
5045
5046 preempt_disable();
5047 arch_spin_lock(&trace_cmdline_lock);
5048
5049 v = &savedcmd->map_cmdline_to_pid[0];
5050 while (l <= *pos) {
5051 v = saved_cmdlines_next(m, v, &l);
5052 if (!v)
5053 return NULL;
5054 }
5055
5056 return v;
5057 }
5058
5059 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5060 {
5061 arch_spin_unlock(&trace_cmdline_lock);
5062 preempt_enable();
5063 }
5064
5065 static int saved_cmdlines_show(struct seq_file *m, void *v)
5066 {
5067 char buf[TASK_COMM_LEN];
5068 unsigned int *pid = v;
5069
5070 __trace_find_cmdline(*pid, buf);
5071 seq_printf(m, "%d %s\n", *pid, buf);
5072 return 0;
5073 }
5074
5075 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5076 .start = saved_cmdlines_start,
5077 .next = saved_cmdlines_next,
5078 .stop = saved_cmdlines_stop,
5079 .show = saved_cmdlines_show,
5080 };
5081
5082 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5083 {
5084 if (tracing_disabled)
5085 return -ENODEV;
5086
5087 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5088 }
5089
5090 static const struct file_operations tracing_saved_cmdlines_fops = {
5091 .open = tracing_saved_cmdlines_open,
5092 .read = seq_read,
5093 .llseek = seq_lseek,
5094 .release = seq_release,
5095 };
5096
5097 static ssize_t
5098 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5099 size_t cnt, loff_t *ppos)
5100 {
5101 char buf[64];
5102 int r;
5103
5104 arch_spin_lock(&trace_cmdline_lock);
5105 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5106 arch_spin_unlock(&trace_cmdline_lock);
5107
5108 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5109 }
5110
5111 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5112 {
5113 kfree(s->saved_cmdlines);
5114 kfree(s->map_cmdline_to_pid);
5115 kfree(s);
5116 }
5117
5118 static int tracing_resize_saved_cmdlines(unsigned int val)
5119 {
5120 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5121
5122 s = kmalloc(sizeof(*s), GFP_KERNEL);
5123 if (!s)
5124 return -ENOMEM;
5125
5126 if (allocate_cmdlines_buffer(val, s) < 0) {
5127 kfree(s);
5128 return -ENOMEM;
5129 }
5130
5131 arch_spin_lock(&trace_cmdline_lock);
5132 savedcmd_temp = savedcmd;
5133 savedcmd = s;
5134 arch_spin_unlock(&trace_cmdline_lock);
5135 free_saved_cmdlines_buffer(savedcmd_temp);
5136
5137 return 0;
5138 }
5139
5140 static ssize_t
5141 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5142 size_t cnt, loff_t *ppos)
5143 {
5144 unsigned long val;
5145 int ret;
5146
5147 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5148 if (ret)
5149 return ret;
5150
5151 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5152 if (!val || val > PID_MAX_DEFAULT)
5153 return -EINVAL;
5154
5155 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5156 if (ret < 0)
5157 return ret;
5158
5159 *ppos += cnt;
5160
5161 return cnt;
5162 }
5163
5164 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5165 .open = tracing_open_generic,
5166 .read = tracing_saved_cmdlines_size_read,
5167 .write = tracing_saved_cmdlines_size_write,
5168 };
5169
5170 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5171 static union trace_eval_map_item *
5172 update_eval_map(union trace_eval_map_item *ptr)
5173 {
5174 if (!ptr->map.eval_string) {
5175 if (ptr->tail.next) {
5176 ptr = ptr->tail.next;
5177 /* Set ptr to the next real item (skip head) */
5178 ptr++;
5179 } else
5180 return NULL;
5181 }
5182 return ptr;
5183 }
5184
5185 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5186 {
5187 union trace_eval_map_item *ptr = v;
5188
5189 /*
5190 * Paranoid! If ptr points to end, we don't want to increment past it.
5191 * This really should never happen.
5192 */
5193 ptr = update_eval_map(ptr);
5194 if (WARN_ON_ONCE(!ptr))
5195 return NULL;
5196
5197 ptr++;
5198
5199 (*pos)++;
5200
5201 ptr = update_eval_map(ptr);
5202
5203 return ptr;
5204 }
5205
5206 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5207 {
5208 union trace_eval_map_item *v;
5209 loff_t l = 0;
5210
5211 mutex_lock(&trace_eval_mutex);
5212
5213 v = trace_eval_maps;
5214 if (v)
5215 v++;
5216
5217 while (v && l < *pos) {
5218 v = eval_map_next(m, v, &l);
5219 }
5220
5221 return v;
5222 }
5223
5224 static void eval_map_stop(struct seq_file *m, void *v)
5225 {
5226 mutex_unlock(&trace_eval_mutex);
5227 }
5228
5229 static int eval_map_show(struct seq_file *m, void *v)
5230 {
5231 union trace_eval_map_item *ptr = v;
5232
5233 seq_printf(m, "%s %ld (%s)\n",
5234 ptr->map.eval_string, ptr->map.eval_value,
5235 ptr->map.system);
5236
5237 return 0;
5238 }
5239
5240 static const struct seq_operations tracing_eval_map_seq_ops = {
5241 .start = eval_map_start,
5242 .next = eval_map_next,
5243 .stop = eval_map_stop,
5244 .show = eval_map_show,
5245 };
5246
5247 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5248 {
5249 if (tracing_disabled)
5250 return -ENODEV;
5251
5252 return seq_open(filp, &tracing_eval_map_seq_ops);
5253 }
5254
5255 static const struct file_operations tracing_eval_map_fops = {
5256 .open = tracing_eval_map_open,
5257 .read = seq_read,
5258 .llseek = seq_lseek,
5259 .release = seq_release,
5260 };
5261
5262 static inline union trace_eval_map_item *
5263 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5264 {
5265 /* Return tail of array given the head */
5266 return ptr + ptr->head.length + 1;
5267 }
5268
5269 static void
5270 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5271 int len)
5272 {
5273 struct trace_eval_map **stop;
5274 struct trace_eval_map **map;
5275 union trace_eval_map_item *map_array;
5276 union trace_eval_map_item *ptr;
5277
5278 stop = start + len;
5279
5280 /*
5281 * The trace_eval_maps contains the map plus a head and tail item,
5282 * where the head holds the module and length of array, and the
5283 * tail holds a pointer to the next list.
5284 */
5285 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5286 if (!map_array) {
5287 pr_warn("Unable to allocate trace eval mapping\n");
5288 return;
5289 }
5290
5291 mutex_lock(&trace_eval_mutex);
5292
5293 if (!trace_eval_maps)
5294 trace_eval_maps = map_array;
5295 else {
5296 ptr = trace_eval_maps;
5297 for (;;) {
5298 ptr = trace_eval_jmp_to_tail(ptr);
5299 if (!ptr->tail.next)
5300 break;
5301 ptr = ptr->tail.next;
5302
5303 }
5304 ptr->tail.next = map_array;
5305 }
5306 map_array->head.mod = mod;
5307 map_array->head.length = len;
5308 map_array++;
5309
5310 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5311 map_array->map = **map;
5312 map_array++;
5313 }
5314 memset(map_array, 0, sizeof(*map_array));
5315
5316 mutex_unlock(&trace_eval_mutex);
5317 }
5318
5319 static void trace_create_eval_file(struct dentry *d_tracer)
5320 {
5321 trace_create_file("eval_map", 0444, d_tracer,
5322 NULL, &tracing_eval_map_fops);
5323 }
5324
5325 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5326 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5327 static inline void trace_insert_eval_map_file(struct module *mod,
5328 struct trace_eval_map **start, int len) { }
5329 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5330
5331 static void trace_insert_eval_map(struct module *mod,
5332 struct trace_eval_map **start, int len)
5333 {
5334 struct trace_eval_map **map;
5335
5336 if (len <= 0)
5337 return;
5338
5339 map = start;
5340
5341 trace_event_eval_update(map, len);
5342
5343 trace_insert_eval_map_file(mod, start, len);
5344 }
5345
5346 static ssize_t
5347 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5348 size_t cnt, loff_t *ppos)
5349 {
5350 struct trace_array *tr = filp->private_data;
5351 char buf[MAX_TRACER_SIZE+2];
5352 int r;
5353
5354 mutex_lock(&trace_types_lock);
5355 r = sprintf(buf, "%s\n", tr->current_trace->name);
5356 mutex_unlock(&trace_types_lock);
5357
5358 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5359 }
5360
5361 int tracer_init(struct tracer *t, struct trace_array *tr)
5362 {
5363 tracing_reset_online_cpus(&tr->trace_buffer);
5364 return t->init(tr);
5365 }
5366
5367 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5368 {
5369 int cpu;
5370
5371 for_each_tracing_cpu(cpu)
5372 per_cpu_ptr(buf->data, cpu)->entries = val;
5373 }
5374
5375 #ifdef CONFIG_TRACER_MAX_TRACE
5376 /* resize @tr's buffer to the size of @size_tr's entries */
5377 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5378 struct trace_buffer *size_buf, int cpu_id)
5379 {
5380 int cpu, ret = 0;
5381
5382 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5383 for_each_tracing_cpu(cpu) {
5384 ret = ring_buffer_resize(trace_buf->buffer,
5385 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5386 if (ret < 0)
5387 break;
5388 per_cpu_ptr(trace_buf->data, cpu)->entries =
5389 per_cpu_ptr(size_buf->data, cpu)->entries;
5390 }
5391 } else {
5392 ret = ring_buffer_resize(trace_buf->buffer,
5393 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5394 if (ret == 0)
5395 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5396 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5397 }
5398
5399 return ret;
5400 }
5401 #endif /* CONFIG_TRACER_MAX_TRACE */
5402
5403 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5404 unsigned long size, int cpu)
5405 {
5406 int ret;
5407
5408 /*
5409 * If kernel or user changes the size of the ring buffer
5410 * we use the size that was given, and we can forget about
5411 * expanding it later.
5412 */
5413 ring_buffer_expanded = true;
5414
5415 /* May be called before buffers are initialized */
5416 if (!tr->trace_buffer.buffer)
5417 return 0;
5418
5419 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5420 if (ret < 0)
5421 return ret;
5422
5423 #ifdef CONFIG_TRACER_MAX_TRACE
5424 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5425 !tr->current_trace->use_max_tr)
5426 goto out;
5427
5428 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5429 if (ret < 0) {
5430 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5431 &tr->trace_buffer, cpu);
5432 if (r < 0) {
5433 /*
5434 * AARGH! We are left with different
5435 * size max buffer!!!!
5436 * The max buffer is our "snapshot" buffer.
5437 * When a tracer needs a snapshot (one of the
5438 * latency tracers), it swaps the max buffer
5439 * with the saved snap shot. We succeeded to
5440 * update the size of the main buffer, but failed to
5441 * update the size of the max buffer. But when we tried
5442 * to reset the main buffer to the original size, we
5443 * failed there too. This is very unlikely to
5444 * happen, but if it does, warn and kill all
5445 * tracing.
5446 */
5447 WARN_ON(1);
5448 tracing_disabled = 1;
5449 }
5450 return ret;
5451 }
5452
5453 if (cpu == RING_BUFFER_ALL_CPUS)
5454 set_buffer_entries(&tr->max_buffer, size);
5455 else
5456 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5457
5458 out:
5459 #endif /* CONFIG_TRACER_MAX_TRACE */
5460
5461 if (cpu == RING_BUFFER_ALL_CPUS)
5462 set_buffer_entries(&tr->trace_buffer, size);
5463 else
5464 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5465
5466 return ret;
5467 }
5468
5469 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5470 unsigned long size, int cpu_id)
5471 {
5472 int ret = size;
5473
5474 mutex_lock(&trace_types_lock);
5475
5476 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5477 /* make sure, this cpu is enabled in the mask */
5478 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5479 ret = -EINVAL;
5480 goto out;
5481 }
5482 }
5483
5484 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5485 if (ret < 0)
5486 ret = -ENOMEM;
5487
5488 out:
5489 mutex_unlock(&trace_types_lock);
5490
5491 return ret;
5492 }
5493
5494
5495 /**
5496 * tracing_update_buffers - used by tracing facility to expand ring buffers
5497 *
5498 * To save on memory when the tracing is never used on a system with it
5499 * configured in. The ring buffers are set to a minimum size. But once
5500 * a user starts to use the tracing facility, then they need to grow
5501 * to their default size.
5502 *
5503 * This function is to be called when a tracer is about to be used.
5504 */
5505 int tracing_update_buffers(void)
5506 {
5507 int ret = 0;
5508
5509 mutex_lock(&trace_types_lock);
5510 if (!ring_buffer_expanded)
5511 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5512 RING_BUFFER_ALL_CPUS);
5513 mutex_unlock(&trace_types_lock);
5514
5515 return ret;
5516 }
5517
5518 struct trace_option_dentry;
5519
5520 static void
5521 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5522
5523 /*
5524 * Used to clear out the tracer before deletion of an instance.
5525 * Must have trace_types_lock held.
5526 */
5527 static void tracing_set_nop(struct trace_array *tr)
5528 {
5529 if (tr->current_trace == &nop_trace)
5530 return;
5531
5532 tr->current_trace->enabled--;
5533
5534 if (tr->current_trace->reset)
5535 tr->current_trace->reset(tr);
5536
5537 tr->current_trace = &nop_trace;
5538 }
5539
5540 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5541 {
5542 /* Only enable if the directory has been created already. */
5543 if (!tr->dir)
5544 return;
5545
5546 create_trace_option_files(tr, t);
5547 }
5548
5549 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5550 {
5551 struct tracer *t;
5552 #ifdef CONFIG_TRACER_MAX_TRACE
5553 bool had_max_tr;
5554 #endif
5555 int ret = 0;
5556
5557 mutex_lock(&trace_types_lock);
5558
5559 if (!ring_buffer_expanded) {
5560 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5561 RING_BUFFER_ALL_CPUS);
5562 if (ret < 0)
5563 goto out;
5564 ret = 0;
5565 }
5566
5567 for (t = trace_types; t; t = t->next) {
5568 if (strcmp(t->name, buf) == 0)
5569 break;
5570 }
5571 if (!t) {
5572 ret = -EINVAL;
5573 goto out;
5574 }
5575 if (t == tr->current_trace)
5576 goto out;
5577
5578 #ifdef CONFIG_TRACER_SNAPSHOT
5579 if (t->use_max_tr) {
5580 arch_spin_lock(&tr->max_lock);
5581 if (tr->cond_snapshot)
5582 ret = -EBUSY;
5583 arch_spin_unlock(&tr->max_lock);
5584 if (ret)
5585 goto out;
5586 }
5587 #endif
5588 /* Some tracers won't work on kernel command line */
5589 if (system_state < SYSTEM_RUNNING && t->noboot) {
5590 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5591 t->name);
5592 goto out;
5593 }
5594
5595 /* Some tracers are only allowed for the top level buffer */
5596 if (!trace_ok_for_array(t, tr)) {
5597 ret = -EINVAL;
5598 goto out;
5599 }
5600
5601 /* If trace pipe files are being read, we can't change the tracer */
5602 if (tr->current_trace->ref) {
5603 ret = -EBUSY;
5604 goto out;
5605 }
5606
5607 trace_branch_disable();
5608
5609 tr->current_trace->enabled--;
5610
5611 if (tr->current_trace->reset)
5612 tr->current_trace->reset(tr);
5613
5614 /* Current trace needs to be nop_trace before synchronize_rcu */
5615 tr->current_trace = &nop_trace;
5616
5617 #ifdef CONFIG_TRACER_MAX_TRACE
5618 had_max_tr = tr->allocated_snapshot;
5619
5620 if (had_max_tr && !t->use_max_tr) {
5621 /*
5622 * We need to make sure that the update_max_tr sees that
5623 * current_trace changed to nop_trace to keep it from
5624 * swapping the buffers after we resize it.
5625 * The update_max_tr is called from interrupts disabled
5626 * so a synchronized_sched() is sufficient.
5627 */
5628 synchronize_rcu();
5629 free_snapshot(tr);
5630 }
5631 #endif
5632
5633 #ifdef CONFIG_TRACER_MAX_TRACE
5634 if (t->use_max_tr && !had_max_tr) {
5635 ret = tracing_alloc_snapshot_instance(tr);
5636 if (ret < 0)
5637 goto out;
5638 }
5639 #endif
5640
5641 if (t->init) {
5642 ret = tracer_init(t, tr);
5643 if (ret)
5644 goto out;
5645 }
5646
5647 tr->current_trace = t;
5648 tr->current_trace->enabled++;
5649 trace_branch_enable(tr);
5650 out:
5651 mutex_unlock(&trace_types_lock);
5652
5653 return ret;
5654 }
5655
5656 static ssize_t
5657 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5658 size_t cnt, loff_t *ppos)
5659 {
5660 struct trace_array *tr = filp->private_data;
5661 char buf[MAX_TRACER_SIZE+1];
5662 int i;
5663 size_t ret;
5664 int err;
5665
5666 ret = cnt;
5667
5668 if (cnt > MAX_TRACER_SIZE)
5669 cnt = MAX_TRACER_SIZE;
5670
5671 if (copy_from_user(buf, ubuf, cnt))
5672 return -EFAULT;
5673
5674 buf[cnt] = 0;
5675
5676 /* strip ending whitespace. */
5677 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5678 buf[i] = 0;
5679
5680 err = tracing_set_tracer(tr, buf);
5681 if (err)
5682 return err;
5683
5684 *ppos += ret;
5685
5686 return ret;
5687 }
5688
5689 static ssize_t
5690 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5691 size_t cnt, loff_t *ppos)
5692 {
5693 char buf[64];
5694 int r;
5695
5696 r = snprintf(buf, sizeof(buf), "%ld\n",
5697 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5698 if (r > sizeof(buf))
5699 r = sizeof(buf);
5700 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5701 }
5702
5703 static ssize_t
5704 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5705 size_t cnt, loff_t *ppos)
5706 {
5707 unsigned long val;
5708 int ret;
5709
5710 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5711 if (ret)
5712 return ret;
5713
5714 *ptr = val * 1000;
5715
5716 return cnt;
5717 }
5718
5719 static ssize_t
5720 tracing_thresh_read(struct file *filp, char __user *ubuf,
5721 size_t cnt, loff_t *ppos)
5722 {
5723 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5724 }
5725
5726 static ssize_t
5727 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5728 size_t cnt, loff_t *ppos)
5729 {
5730 struct trace_array *tr = filp->private_data;
5731 int ret;
5732
5733 mutex_lock(&trace_types_lock);
5734 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5735 if (ret < 0)
5736 goto out;
5737
5738 if (tr->current_trace->update_thresh) {
5739 ret = tr->current_trace->update_thresh(tr);
5740 if (ret < 0)
5741 goto out;
5742 }
5743
5744 ret = cnt;
5745 out:
5746 mutex_unlock(&trace_types_lock);
5747
5748 return ret;
5749 }
5750
5751 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5752
5753 static ssize_t
5754 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5755 size_t cnt, loff_t *ppos)
5756 {
5757 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5758 }
5759
5760 static ssize_t
5761 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5762 size_t cnt, loff_t *ppos)
5763 {
5764 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5765 }
5766
5767 #endif
5768
5769 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5770 {
5771 struct trace_array *tr = inode->i_private;
5772 struct trace_iterator *iter;
5773 int ret = 0;
5774
5775 if (tracing_disabled)
5776 return -ENODEV;
5777
5778 if (trace_array_get(tr) < 0)
5779 return -ENODEV;
5780
5781 mutex_lock(&trace_types_lock);
5782
5783 /* create a buffer to store the information to pass to userspace */
5784 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5785 if (!iter) {
5786 ret = -ENOMEM;
5787 __trace_array_put(tr);
5788 goto out;
5789 }
5790
5791 trace_seq_init(&iter->seq);
5792 iter->trace = tr->current_trace;
5793
5794 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5795 ret = -ENOMEM;
5796 goto fail;
5797 }
5798
5799 /* trace pipe does not show start of buffer */
5800 cpumask_setall(iter->started);
5801
5802 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5803 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5804
5805 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5806 if (trace_clocks[tr->clock_id].in_ns)
5807 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5808
5809 iter->tr = tr;
5810 iter->trace_buffer = &tr->trace_buffer;
5811 iter->cpu_file = tracing_get_cpu(inode);
5812 mutex_init(&iter->mutex);
5813 filp->private_data = iter;
5814
5815 if (iter->trace->pipe_open)
5816 iter->trace->pipe_open(iter);
5817
5818 nonseekable_open(inode, filp);
5819
5820 tr->current_trace->ref++;
5821 out:
5822 mutex_unlock(&trace_types_lock);
5823 return ret;
5824
5825 fail:
5826 kfree(iter);
5827 __trace_array_put(tr);
5828 mutex_unlock(&trace_types_lock);
5829 return ret;
5830 }
5831
5832 static int tracing_release_pipe(struct inode *inode, struct file *file)
5833 {
5834 struct trace_iterator *iter = file->private_data;
5835 struct trace_array *tr = inode->i_private;
5836
5837 mutex_lock(&trace_types_lock);
5838
5839 tr->current_trace->ref--;
5840
5841 if (iter->trace->pipe_close)
5842 iter->trace->pipe_close(iter);
5843
5844 mutex_unlock(&trace_types_lock);
5845
5846 free_cpumask_var(iter->started);
5847 mutex_destroy(&iter->mutex);
5848 kfree(iter);
5849
5850 trace_array_put(tr);
5851
5852 return 0;
5853 }
5854
5855 static __poll_t
5856 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5857 {
5858 struct trace_array *tr = iter->tr;
5859
5860 /* Iterators are static, they should be filled or empty */
5861 if (trace_buffer_iter(iter, iter->cpu_file))
5862 return EPOLLIN | EPOLLRDNORM;
5863
5864 if (tr->trace_flags & TRACE_ITER_BLOCK)
5865 /*
5866 * Always select as readable when in blocking mode
5867 */
5868 return EPOLLIN | EPOLLRDNORM;
5869 else
5870 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5871 filp, poll_table);
5872 }
5873
5874 static __poll_t
5875 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5876 {
5877 struct trace_iterator *iter = filp->private_data;
5878
5879 return trace_poll(iter, filp, poll_table);
5880 }
5881
5882 /* Must be called with iter->mutex held. */
5883 static int tracing_wait_pipe(struct file *filp)
5884 {
5885 struct trace_iterator *iter = filp->private_data;
5886 int ret;
5887
5888 while (trace_empty(iter)) {
5889
5890 if ((filp->f_flags & O_NONBLOCK)) {
5891 return -EAGAIN;
5892 }
5893
5894 /*
5895 * We block until we read something and tracing is disabled.
5896 * We still block if tracing is disabled, but we have never
5897 * read anything. This allows a user to cat this file, and
5898 * then enable tracing. But after we have read something,
5899 * we give an EOF when tracing is again disabled.
5900 *
5901 * iter->pos will be 0 if we haven't read anything.
5902 */
5903 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5904 break;
5905
5906 mutex_unlock(&iter->mutex);
5907
5908 ret = wait_on_pipe(iter, 0);
5909
5910 mutex_lock(&iter->mutex);
5911
5912 if (ret)
5913 return ret;
5914 }
5915
5916 return 1;
5917 }
5918
5919 /*
5920 * Consumer reader.
5921 */
5922 static ssize_t
5923 tracing_read_pipe(struct file *filp, char __user *ubuf,
5924 size_t cnt, loff_t *ppos)
5925 {
5926 struct trace_iterator *iter = filp->private_data;
5927 ssize_t sret;
5928
5929 /*
5930 * Avoid more than one consumer on a single file descriptor
5931 * This is just a matter of traces coherency, the ring buffer itself
5932 * is protected.
5933 */
5934 mutex_lock(&iter->mutex);
5935
5936 /* return any leftover data */
5937 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5938 if (sret != -EBUSY)
5939 goto out;
5940
5941 trace_seq_init(&iter->seq);
5942
5943 if (iter->trace->read) {
5944 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5945 if (sret)
5946 goto out;
5947 }
5948
5949 waitagain:
5950 sret = tracing_wait_pipe(filp);
5951 if (sret <= 0)
5952 goto out;
5953
5954 /* stop when tracing is finished */
5955 if (trace_empty(iter)) {
5956 sret = 0;
5957 goto out;
5958 }
5959
5960 if (cnt >= PAGE_SIZE)
5961 cnt = PAGE_SIZE - 1;
5962
5963 /* reset all but tr, trace, and overruns */
5964 memset(&iter->seq, 0,
5965 sizeof(struct trace_iterator) -
5966 offsetof(struct trace_iterator, seq));
5967 cpumask_clear(iter->started);
5968 iter->pos = -1;
5969
5970 trace_event_read_lock();
5971 trace_access_lock(iter->cpu_file);
5972 while (trace_find_next_entry_inc(iter) != NULL) {
5973 enum print_line_t ret;
5974 int save_len = iter->seq.seq.len;
5975
5976 ret = print_trace_line(iter);
5977 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5978 /* don't print partial lines */
5979 iter->seq.seq.len = save_len;
5980 break;
5981 }
5982 if (ret != TRACE_TYPE_NO_CONSUME)
5983 trace_consume(iter);
5984
5985 if (trace_seq_used(&iter->seq) >= cnt)
5986 break;
5987
5988 /*
5989 * Setting the full flag means we reached the trace_seq buffer
5990 * size and we should leave by partial output condition above.
5991 * One of the trace_seq_* functions is not used properly.
5992 */
5993 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5994 iter->ent->type);
5995 }
5996 trace_access_unlock(iter->cpu_file);
5997 trace_event_read_unlock();
5998
5999 /* Now copy what we have to the user */
6000 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6001 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6002 trace_seq_init(&iter->seq);
6003
6004 /*
6005 * If there was nothing to send to user, in spite of consuming trace
6006 * entries, go back to wait for more entries.
6007 */
6008 if (sret == -EBUSY)
6009 goto waitagain;
6010
6011 out:
6012 mutex_unlock(&iter->mutex);
6013
6014 return sret;
6015 }
6016
6017 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6018 unsigned int idx)
6019 {
6020 __free_page(spd->pages[idx]);
6021 }
6022
6023 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6024 .can_merge = 0,
6025 .confirm = generic_pipe_buf_confirm,
6026 .release = generic_pipe_buf_release,
6027 .steal = generic_pipe_buf_steal,
6028 .get = generic_pipe_buf_get,
6029 };
6030
6031 static size_t
6032 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6033 {
6034 size_t count;
6035 int save_len;
6036 int ret;
6037
6038 /* Seq buffer is page-sized, exactly what we need. */
6039 for (;;) {
6040 save_len = iter->seq.seq.len;
6041 ret = print_trace_line(iter);
6042
6043 if (trace_seq_has_overflowed(&iter->seq)) {
6044 iter->seq.seq.len = save_len;
6045 break;
6046 }
6047
6048 /*
6049 * This should not be hit, because it should only
6050 * be set if the iter->seq overflowed. But check it
6051 * anyway to be safe.
6052 */
6053 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6054 iter->seq.seq.len = save_len;
6055 break;
6056 }
6057
6058 count = trace_seq_used(&iter->seq) - save_len;
6059 if (rem < count) {
6060 rem = 0;
6061 iter->seq.seq.len = save_len;
6062 break;
6063 }
6064
6065 if (ret != TRACE_TYPE_NO_CONSUME)
6066 trace_consume(iter);
6067 rem -= count;
6068 if (!trace_find_next_entry_inc(iter)) {
6069 rem = 0;
6070 iter->ent = NULL;
6071 break;
6072 }
6073 }
6074
6075 return rem;
6076 }
6077
6078 static ssize_t tracing_splice_read_pipe(struct file *filp,
6079 loff_t *ppos,
6080 struct pipe_inode_info *pipe,
6081 size_t len,
6082 unsigned int flags)
6083 {
6084 struct page *pages_def[PIPE_DEF_BUFFERS];
6085 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6086 struct trace_iterator *iter = filp->private_data;
6087 struct splice_pipe_desc spd = {
6088 .pages = pages_def,
6089 .partial = partial_def,
6090 .nr_pages = 0, /* This gets updated below. */
6091 .nr_pages_max = PIPE_DEF_BUFFERS,
6092 .ops = &tracing_pipe_buf_ops,
6093 .spd_release = tracing_spd_release_pipe,
6094 };
6095 ssize_t ret;
6096 size_t rem;
6097 unsigned int i;
6098
6099 if (splice_grow_spd(pipe, &spd))
6100 return -ENOMEM;
6101
6102 mutex_lock(&iter->mutex);
6103
6104 if (iter->trace->splice_read) {
6105 ret = iter->trace->splice_read(iter, filp,
6106 ppos, pipe, len, flags);
6107 if (ret)
6108 goto out_err;
6109 }
6110
6111 ret = tracing_wait_pipe(filp);
6112 if (ret <= 0)
6113 goto out_err;
6114
6115 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6116 ret = -EFAULT;
6117 goto out_err;
6118 }
6119
6120 trace_event_read_lock();
6121 trace_access_lock(iter->cpu_file);
6122
6123 /* Fill as many pages as possible. */
6124 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6125 spd.pages[i] = alloc_page(GFP_KERNEL);
6126 if (!spd.pages[i])
6127 break;
6128
6129 rem = tracing_fill_pipe_page(rem, iter);
6130
6131 /* Copy the data into the page, so we can start over. */
6132 ret = trace_seq_to_buffer(&iter->seq,
6133 page_address(spd.pages[i]),
6134 trace_seq_used(&iter->seq));
6135 if (ret < 0) {
6136 __free_page(spd.pages[i]);
6137 break;
6138 }
6139 spd.partial[i].offset = 0;
6140 spd.partial[i].len = trace_seq_used(&iter->seq);
6141
6142 trace_seq_init(&iter->seq);
6143 }
6144
6145 trace_access_unlock(iter->cpu_file);
6146 trace_event_read_unlock();
6147 mutex_unlock(&iter->mutex);
6148
6149 spd.nr_pages = i;
6150
6151 if (i)
6152 ret = splice_to_pipe(pipe, &spd);
6153 else
6154 ret = 0;
6155 out:
6156 splice_shrink_spd(&spd);
6157 return ret;
6158
6159 out_err:
6160 mutex_unlock(&iter->mutex);
6161 goto out;
6162 }
6163
6164 static ssize_t
6165 tracing_entries_read(struct file *filp, char __user *ubuf,
6166 size_t cnt, loff_t *ppos)
6167 {
6168 struct inode *inode = file_inode(filp);
6169 struct trace_array *tr = inode->i_private;
6170 int cpu = tracing_get_cpu(inode);
6171 char buf[64];
6172 int r = 0;
6173 ssize_t ret;
6174
6175 mutex_lock(&trace_types_lock);
6176
6177 if (cpu == RING_BUFFER_ALL_CPUS) {
6178 int cpu, buf_size_same;
6179 unsigned long size;
6180
6181 size = 0;
6182 buf_size_same = 1;
6183 /* check if all cpu sizes are same */
6184 for_each_tracing_cpu(cpu) {
6185 /* fill in the size from first enabled cpu */
6186 if (size == 0)
6187 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6188 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6189 buf_size_same = 0;
6190 break;
6191 }
6192 }
6193
6194 if (buf_size_same) {
6195 if (!ring_buffer_expanded)
6196 r = sprintf(buf, "%lu (expanded: %lu)\n",
6197 size >> 10,
6198 trace_buf_size >> 10);
6199 else
6200 r = sprintf(buf, "%lu\n", size >> 10);
6201 } else
6202 r = sprintf(buf, "X\n");
6203 } else
6204 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6205
6206 mutex_unlock(&trace_types_lock);
6207
6208 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6209 return ret;
6210 }
6211
6212 static ssize_t
6213 tracing_entries_write(struct file *filp, const char __user *ubuf,
6214 size_t cnt, loff_t *ppos)
6215 {
6216 struct inode *inode = file_inode(filp);
6217 struct trace_array *tr = inode->i_private;
6218 unsigned long val;
6219 int ret;
6220
6221 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6222 if (ret)
6223 return ret;
6224
6225 /* must have at least 1 entry */
6226 if (!val)
6227 return -EINVAL;
6228
6229 /* value is in KB */
6230 val <<= 10;
6231 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6232 if (ret < 0)
6233 return ret;
6234
6235 *ppos += cnt;
6236
6237 return cnt;
6238 }
6239
6240 static ssize_t
6241 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6242 size_t cnt, loff_t *ppos)
6243 {
6244 struct trace_array *tr = filp->private_data;
6245 char buf[64];
6246 int r, cpu;
6247 unsigned long size = 0, expanded_size = 0;
6248
6249 mutex_lock(&trace_types_lock);
6250 for_each_tracing_cpu(cpu) {
6251 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6252 if (!ring_buffer_expanded)
6253 expanded_size += trace_buf_size >> 10;
6254 }
6255 if (ring_buffer_expanded)
6256 r = sprintf(buf, "%lu\n", size);
6257 else
6258 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6259 mutex_unlock(&trace_types_lock);
6260
6261 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6262 }
6263
6264 static ssize_t
6265 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6266 size_t cnt, loff_t *ppos)
6267 {
6268 /*
6269 * There is no need to read what the user has written, this function
6270 * is just to make sure that there is no error when "echo" is used
6271 */
6272
6273 *ppos += cnt;
6274
6275 return cnt;
6276 }
6277
6278 static int
6279 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6280 {
6281 struct trace_array *tr = inode->i_private;
6282
6283 /* disable tracing ? */
6284 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6285 tracer_tracing_off(tr);
6286 /* resize the ring buffer to 0 */
6287 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6288
6289 trace_array_put(tr);
6290
6291 return 0;
6292 }
6293
6294 static ssize_t
6295 tracing_mark_write(struct file *filp, const char __user *ubuf,
6296 size_t cnt, loff_t *fpos)
6297 {
6298 struct trace_array *tr = filp->private_data;
6299 struct ring_buffer_event *event;
6300 enum event_trigger_type tt = ETT_NONE;
6301 struct ring_buffer *buffer;
6302 struct print_entry *entry;
6303 unsigned long irq_flags;
6304 const char faulted[] = "<faulted>";
6305 ssize_t written;
6306 int size;
6307 int len;
6308
6309 /* Used in tracing_mark_raw_write() as well */
6310 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6311
6312 if (tracing_disabled)
6313 return -EINVAL;
6314
6315 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6316 return -EINVAL;
6317
6318 if (cnt > TRACE_BUF_SIZE)
6319 cnt = TRACE_BUF_SIZE;
6320
6321 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6322
6323 local_save_flags(irq_flags);
6324 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6325
6326 /* If less than "<faulted>", then make sure we can still add that */
6327 if (cnt < FAULTED_SIZE)
6328 size += FAULTED_SIZE - cnt;
6329
6330 buffer = tr->trace_buffer.buffer;
6331 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6332 irq_flags, preempt_count());
6333 if (unlikely(!event))
6334 /* Ring buffer disabled, return as if not open for write */
6335 return -EBADF;
6336
6337 entry = ring_buffer_event_data(event);
6338 entry->ip = _THIS_IP_;
6339
6340 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6341 if (len) {
6342 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6343 cnt = FAULTED_SIZE;
6344 written = -EFAULT;
6345 } else
6346 written = cnt;
6347 len = cnt;
6348
6349 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6350 /* do not add \n before testing triggers, but add \0 */
6351 entry->buf[cnt] = '\0';
6352 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6353 }
6354
6355 if (entry->buf[cnt - 1] != '\n') {
6356 entry->buf[cnt] = '\n';
6357 entry->buf[cnt + 1] = '\0';
6358 } else
6359 entry->buf[cnt] = '\0';
6360
6361 __buffer_unlock_commit(buffer, event);
6362
6363 if (tt)
6364 event_triggers_post_call(tr->trace_marker_file, tt);
6365
6366 if (written > 0)
6367 *fpos += written;
6368
6369 return written;
6370 }
6371
6372 /* Limit it for now to 3K (including tag) */
6373 #define RAW_DATA_MAX_SIZE (1024*3)
6374
6375 static ssize_t
6376 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6377 size_t cnt, loff_t *fpos)
6378 {
6379 struct trace_array *tr = filp->private_data;
6380 struct ring_buffer_event *event;
6381 struct ring_buffer *buffer;
6382 struct raw_data_entry *entry;
6383 const char faulted[] = "<faulted>";
6384 unsigned long irq_flags;
6385 ssize_t written;
6386 int size;
6387 int len;
6388
6389 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6390
6391 if (tracing_disabled)
6392 return -EINVAL;
6393
6394 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6395 return -EINVAL;
6396
6397 /* The marker must at least have a tag id */
6398 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6399 return -EINVAL;
6400
6401 if (cnt > TRACE_BUF_SIZE)
6402 cnt = TRACE_BUF_SIZE;
6403
6404 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6405
6406 local_save_flags(irq_flags);
6407 size = sizeof(*entry) + cnt;
6408 if (cnt < FAULT_SIZE_ID)
6409 size += FAULT_SIZE_ID - cnt;
6410
6411 buffer = tr->trace_buffer.buffer;
6412 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6413 irq_flags, preempt_count());
6414 if (!event)
6415 /* Ring buffer disabled, return as if not open for write */
6416 return -EBADF;
6417
6418 entry = ring_buffer_event_data(event);
6419
6420 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6421 if (len) {
6422 entry->id = -1;
6423 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6424 written = -EFAULT;
6425 } else
6426 written = cnt;
6427
6428 __buffer_unlock_commit(buffer, event);
6429
6430 if (written > 0)
6431 *fpos += written;
6432
6433 return written;
6434 }
6435
6436 static int tracing_clock_show(struct seq_file *m, void *v)
6437 {
6438 struct trace_array *tr = m->private;
6439 int i;
6440
6441 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6442 seq_printf(m,
6443 "%s%s%s%s", i ? " " : "",
6444 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6445 i == tr->clock_id ? "]" : "");
6446 seq_putc(m, '\n');
6447
6448 return 0;
6449 }
6450
6451 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6452 {
6453 int i;
6454
6455 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6456 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6457 break;
6458 }
6459 if (i == ARRAY_SIZE(trace_clocks))
6460 return -EINVAL;
6461
6462 mutex_lock(&trace_types_lock);
6463
6464 tr->clock_id = i;
6465
6466 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6467
6468 /*
6469 * New clock may not be consistent with the previous clock.
6470 * Reset the buffer so that it doesn't have incomparable timestamps.
6471 */
6472 tracing_reset_online_cpus(&tr->trace_buffer);
6473
6474 #ifdef CONFIG_TRACER_MAX_TRACE
6475 if (tr->max_buffer.buffer)
6476 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6477 tracing_reset_online_cpus(&tr->max_buffer);
6478 #endif
6479
6480 mutex_unlock(&trace_types_lock);
6481
6482 return 0;
6483 }
6484
6485 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6486 size_t cnt, loff_t *fpos)
6487 {
6488 struct seq_file *m = filp->private_data;
6489 struct trace_array *tr = m->private;
6490 char buf[64];
6491 const char *clockstr;
6492 int ret;
6493
6494 if (cnt >= sizeof(buf))
6495 return -EINVAL;
6496
6497 if (copy_from_user(buf, ubuf, cnt))
6498 return -EFAULT;
6499
6500 buf[cnt] = 0;
6501
6502 clockstr = strstrip(buf);
6503
6504 ret = tracing_set_clock(tr, clockstr);
6505 if (ret)
6506 return ret;
6507
6508 *fpos += cnt;
6509
6510 return cnt;
6511 }
6512
6513 static int tracing_clock_open(struct inode *inode, struct file *file)
6514 {
6515 struct trace_array *tr = inode->i_private;
6516 int ret;
6517
6518 if (tracing_disabled)
6519 return -ENODEV;
6520
6521 if (trace_array_get(tr))
6522 return -ENODEV;
6523
6524 ret = single_open(file, tracing_clock_show, inode->i_private);
6525 if (ret < 0)
6526 trace_array_put(tr);
6527
6528 return ret;
6529 }
6530
6531 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6532 {
6533 struct trace_array *tr = m->private;
6534
6535 mutex_lock(&trace_types_lock);
6536
6537 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6538 seq_puts(m, "delta [absolute]\n");
6539 else
6540 seq_puts(m, "[delta] absolute\n");
6541
6542 mutex_unlock(&trace_types_lock);
6543
6544 return 0;
6545 }
6546
6547 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6548 {
6549 struct trace_array *tr = inode->i_private;
6550 int ret;
6551
6552 if (tracing_disabled)
6553 return -ENODEV;
6554
6555 if (trace_array_get(tr))
6556 return -ENODEV;
6557
6558 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6559 if (ret < 0)
6560 trace_array_put(tr);
6561
6562 return ret;
6563 }
6564
6565 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6566 {
6567 int ret = 0;
6568
6569 mutex_lock(&trace_types_lock);
6570
6571 if (abs && tr->time_stamp_abs_ref++)
6572 goto out;
6573
6574 if (!abs) {
6575 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6576 ret = -EINVAL;
6577 goto out;
6578 }
6579
6580 if (--tr->time_stamp_abs_ref)
6581 goto out;
6582 }
6583
6584 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6585
6586 #ifdef CONFIG_TRACER_MAX_TRACE
6587 if (tr->max_buffer.buffer)
6588 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6589 #endif
6590 out:
6591 mutex_unlock(&trace_types_lock);
6592
6593 return ret;
6594 }
6595
6596 struct ftrace_buffer_info {
6597 struct trace_iterator iter;
6598 void *spare;
6599 unsigned int spare_cpu;
6600 unsigned int read;
6601 };
6602
6603 #ifdef CONFIG_TRACER_SNAPSHOT
6604 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6605 {
6606 struct trace_array *tr = inode->i_private;
6607 struct trace_iterator *iter;
6608 struct seq_file *m;
6609 int ret = 0;
6610
6611 if (trace_array_get(tr) < 0)
6612 return -ENODEV;
6613
6614 if (file->f_mode & FMODE_READ) {
6615 iter = __tracing_open(inode, file, true);
6616 if (IS_ERR(iter))
6617 ret = PTR_ERR(iter);
6618 } else {
6619 /* Writes still need the seq_file to hold the private data */
6620 ret = -ENOMEM;
6621 m = kzalloc(sizeof(*m), GFP_KERNEL);
6622 if (!m)
6623 goto out;
6624 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6625 if (!iter) {
6626 kfree(m);
6627 goto out;
6628 }
6629 ret = 0;
6630
6631 iter->tr = tr;
6632 iter->trace_buffer = &tr->max_buffer;
6633 iter->cpu_file = tracing_get_cpu(inode);
6634 m->private = iter;
6635 file->private_data = m;
6636 }
6637 out:
6638 if (ret < 0)
6639 trace_array_put(tr);
6640
6641 return ret;
6642 }
6643
6644 static ssize_t
6645 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6646 loff_t *ppos)
6647 {
6648 struct seq_file *m = filp->private_data;
6649 struct trace_iterator *iter = m->private;
6650 struct trace_array *tr = iter->tr;
6651 unsigned long val;
6652 int ret;
6653
6654 ret = tracing_update_buffers();
6655 if (ret < 0)
6656 return ret;
6657
6658 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6659 if (ret)
6660 return ret;
6661
6662 mutex_lock(&trace_types_lock);
6663
6664 if (tr->current_trace->use_max_tr) {
6665 ret = -EBUSY;
6666 goto out;
6667 }
6668
6669 arch_spin_lock(&tr->max_lock);
6670 if (tr->cond_snapshot)
6671 ret = -EBUSY;
6672 arch_spin_unlock(&tr->max_lock);
6673 if (ret)
6674 goto out;
6675
6676 switch (val) {
6677 case 0:
6678 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6679 ret = -EINVAL;
6680 break;
6681 }
6682 if (tr->allocated_snapshot)
6683 free_snapshot(tr);
6684 break;
6685 case 1:
6686 /* Only allow per-cpu swap if the ring buffer supports it */
6687 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6688 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6689 ret = -EINVAL;
6690 break;
6691 }
6692 #endif
6693 if (!tr->allocated_snapshot) {
6694 ret = tracing_alloc_snapshot_instance(tr);
6695 if (ret < 0)
6696 break;
6697 }
6698 local_irq_disable();
6699 /* Now, we're going to swap */
6700 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6701 update_max_tr(tr, current, smp_processor_id(), NULL);
6702 else
6703 update_max_tr_single(tr, current, iter->cpu_file);
6704 local_irq_enable();
6705 break;
6706 default:
6707 if (tr->allocated_snapshot) {
6708 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6709 tracing_reset_online_cpus(&tr->max_buffer);
6710 else
6711 tracing_reset(&tr->max_buffer, iter->cpu_file);
6712 }
6713 break;
6714 }
6715
6716 if (ret >= 0) {
6717 *ppos += cnt;
6718 ret = cnt;
6719 }
6720 out:
6721 mutex_unlock(&trace_types_lock);
6722 return ret;
6723 }
6724
6725 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6726 {
6727 struct seq_file *m = file->private_data;
6728 int ret;
6729
6730 ret = tracing_release(inode, file);
6731
6732 if (file->f_mode & FMODE_READ)
6733 return ret;
6734
6735 /* If write only, the seq_file is just a stub */
6736 if (m)
6737 kfree(m->private);
6738 kfree(m);
6739
6740 return 0;
6741 }
6742
6743 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6744 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6745 size_t count, loff_t *ppos);
6746 static int tracing_buffers_release(struct inode *inode, struct file *file);
6747 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6748 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6749
6750 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6751 {
6752 struct ftrace_buffer_info *info;
6753 int ret;
6754
6755 ret = tracing_buffers_open(inode, filp);
6756 if (ret < 0)
6757 return ret;
6758
6759 info = filp->private_data;
6760
6761 if (info->iter.trace->use_max_tr) {
6762 tracing_buffers_release(inode, filp);
6763 return -EBUSY;
6764 }
6765
6766 info->iter.snapshot = true;
6767 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6768
6769 return ret;
6770 }
6771
6772 #endif /* CONFIG_TRACER_SNAPSHOT */
6773
6774
6775 static const struct file_operations tracing_thresh_fops = {
6776 .open = tracing_open_generic,
6777 .read = tracing_thresh_read,
6778 .write = tracing_thresh_write,
6779 .llseek = generic_file_llseek,
6780 };
6781
6782 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6783 static const struct file_operations tracing_max_lat_fops = {
6784 .open = tracing_open_generic,
6785 .read = tracing_max_lat_read,
6786 .write = tracing_max_lat_write,
6787 .llseek = generic_file_llseek,
6788 };
6789 #endif
6790
6791 static const struct file_operations set_tracer_fops = {
6792 .open = tracing_open_generic,
6793 .read = tracing_set_trace_read,
6794 .write = tracing_set_trace_write,
6795 .llseek = generic_file_llseek,
6796 };
6797
6798 static const struct file_operations tracing_pipe_fops = {
6799 .open = tracing_open_pipe,
6800 .poll = tracing_poll_pipe,
6801 .read = tracing_read_pipe,
6802 .splice_read = tracing_splice_read_pipe,
6803 .release = tracing_release_pipe,
6804 .llseek = no_llseek,
6805 };
6806
6807 static const struct file_operations tracing_entries_fops = {
6808 .open = tracing_open_generic_tr,
6809 .read = tracing_entries_read,
6810 .write = tracing_entries_write,
6811 .llseek = generic_file_llseek,
6812 .release = tracing_release_generic_tr,
6813 };
6814
6815 static const struct file_operations tracing_total_entries_fops = {
6816 .open = tracing_open_generic_tr,
6817 .read = tracing_total_entries_read,
6818 .llseek = generic_file_llseek,
6819 .release = tracing_release_generic_tr,
6820 };
6821
6822 static const struct file_operations tracing_free_buffer_fops = {
6823 .open = tracing_open_generic_tr,
6824 .write = tracing_free_buffer_write,
6825 .release = tracing_free_buffer_release,
6826 };
6827
6828 static const struct file_operations tracing_mark_fops = {
6829 .open = tracing_open_generic_tr,
6830 .write = tracing_mark_write,
6831 .llseek = generic_file_llseek,
6832 .release = tracing_release_generic_tr,
6833 };
6834
6835 static const struct file_operations tracing_mark_raw_fops = {
6836 .open = tracing_open_generic_tr,
6837 .write = tracing_mark_raw_write,
6838 .llseek = generic_file_llseek,
6839 .release = tracing_release_generic_tr,
6840 };
6841
6842 static const struct file_operations trace_clock_fops = {
6843 .open = tracing_clock_open,
6844 .read = seq_read,
6845 .llseek = seq_lseek,
6846 .release = tracing_single_release_tr,
6847 .write = tracing_clock_write,
6848 };
6849
6850 static const struct file_operations trace_time_stamp_mode_fops = {
6851 .open = tracing_time_stamp_mode_open,
6852 .read = seq_read,
6853 .llseek = seq_lseek,
6854 .release = tracing_single_release_tr,
6855 };
6856
6857 #ifdef CONFIG_TRACER_SNAPSHOT
6858 static const struct file_operations snapshot_fops = {
6859 .open = tracing_snapshot_open,
6860 .read = seq_read,
6861 .write = tracing_snapshot_write,
6862 .llseek = tracing_lseek,
6863 .release = tracing_snapshot_release,
6864 };
6865
6866 static const struct file_operations snapshot_raw_fops = {
6867 .open = snapshot_raw_open,
6868 .read = tracing_buffers_read,
6869 .release = tracing_buffers_release,
6870 .splice_read = tracing_buffers_splice_read,
6871 .llseek = no_llseek,
6872 };
6873
6874 #endif /* CONFIG_TRACER_SNAPSHOT */
6875
6876 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6877 {
6878 struct trace_array *tr = inode->i_private;
6879 struct ftrace_buffer_info *info;
6880 int ret;
6881
6882 if (tracing_disabled)
6883 return -ENODEV;
6884
6885 if (trace_array_get(tr) < 0)
6886 return -ENODEV;
6887
6888 info = kzalloc(sizeof(*info), GFP_KERNEL);
6889 if (!info) {
6890 trace_array_put(tr);
6891 return -ENOMEM;
6892 }
6893
6894 mutex_lock(&trace_types_lock);
6895
6896 info->iter.tr = tr;
6897 info->iter.cpu_file = tracing_get_cpu(inode);
6898 info->iter.trace = tr->current_trace;
6899 info->iter.trace_buffer = &tr->trace_buffer;
6900 info->spare = NULL;
6901 /* Force reading ring buffer for first read */
6902 info->read = (unsigned int)-1;
6903
6904 filp->private_data = info;
6905
6906 tr->current_trace->ref++;
6907
6908 mutex_unlock(&trace_types_lock);
6909
6910 ret = nonseekable_open(inode, filp);
6911 if (ret < 0)
6912 trace_array_put(tr);
6913
6914 return ret;
6915 }
6916
6917 static __poll_t
6918 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6919 {
6920 struct ftrace_buffer_info *info = filp->private_data;
6921 struct trace_iterator *iter = &info->iter;
6922
6923 return trace_poll(iter, filp, poll_table);
6924 }
6925
6926 static ssize_t
6927 tracing_buffers_read(struct file *filp, char __user *ubuf,
6928 size_t count, loff_t *ppos)
6929 {
6930 struct ftrace_buffer_info *info = filp->private_data;
6931 struct trace_iterator *iter = &info->iter;
6932 ssize_t ret = 0;
6933 ssize_t size;
6934
6935 if (!count)
6936 return 0;
6937
6938 #ifdef CONFIG_TRACER_MAX_TRACE
6939 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6940 return -EBUSY;
6941 #endif
6942
6943 if (!info->spare) {
6944 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6945 iter->cpu_file);
6946 if (IS_ERR(info->spare)) {
6947 ret = PTR_ERR(info->spare);
6948 info->spare = NULL;
6949 } else {
6950 info->spare_cpu = iter->cpu_file;
6951 }
6952 }
6953 if (!info->spare)
6954 return ret;
6955
6956 /* Do we have previous read data to read? */
6957 if (info->read < PAGE_SIZE)
6958 goto read;
6959
6960 again:
6961 trace_access_lock(iter->cpu_file);
6962 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6963 &info->spare,
6964 count,
6965 iter->cpu_file, 0);
6966 trace_access_unlock(iter->cpu_file);
6967
6968 if (ret < 0) {
6969 if (trace_empty(iter)) {
6970 if ((filp->f_flags & O_NONBLOCK))
6971 return -EAGAIN;
6972
6973 ret = wait_on_pipe(iter, 0);
6974 if (ret)
6975 return ret;
6976
6977 goto again;
6978 }
6979 return 0;
6980 }
6981
6982 info->read = 0;
6983 read:
6984 size = PAGE_SIZE - info->read;
6985 if (size > count)
6986 size = count;
6987
6988 ret = copy_to_user(ubuf, info->spare + info->read, size);
6989 if (ret == size)
6990 return -EFAULT;
6991
6992 size -= ret;
6993
6994 *ppos += size;
6995 info->read += size;
6996
6997 return size;
6998 }
6999
7000 static int tracing_buffers_release(struct inode *inode, struct file *file)
7001 {
7002 struct ftrace_buffer_info *info = file->private_data;
7003 struct trace_iterator *iter = &info->iter;
7004
7005 mutex_lock(&trace_types_lock);
7006
7007 iter->tr->current_trace->ref--;
7008
7009 __trace_array_put(iter->tr);
7010
7011 if (info->spare)
7012 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7013 info->spare_cpu, info->spare);
7014 kfree(info);
7015
7016 mutex_unlock(&trace_types_lock);
7017
7018 return 0;
7019 }
7020
7021 struct buffer_ref {
7022 struct ring_buffer *buffer;
7023 void *page;
7024 int cpu;
7025 int ref;
7026 };
7027
7028 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7029 struct pipe_buffer *buf)
7030 {
7031 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7032
7033 if (--ref->ref)
7034 return;
7035
7036 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7037 kfree(ref);
7038 buf->private = 0;
7039 }
7040
7041 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7042 struct pipe_buffer *buf)
7043 {
7044 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7045
7046 ref->ref++;
7047 }
7048
7049 /* Pipe buffer operations for a buffer. */
7050 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7051 .can_merge = 0,
7052 .confirm = generic_pipe_buf_confirm,
7053 .release = buffer_pipe_buf_release,
7054 .steal = generic_pipe_buf_steal,
7055 .get = buffer_pipe_buf_get,
7056 };
7057
7058 /*
7059 * Callback from splice_to_pipe(), if we need to release some pages
7060 * at the end of the spd in case we error'ed out in filling the pipe.
7061 */
7062 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7063 {
7064 struct buffer_ref *ref =
7065 (struct buffer_ref *)spd->partial[i].private;
7066
7067 if (--ref->ref)
7068 return;
7069
7070 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7071 kfree(ref);
7072 spd->partial[i].private = 0;
7073 }
7074
7075 static ssize_t
7076 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7077 struct pipe_inode_info *pipe, size_t len,
7078 unsigned int flags)
7079 {
7080 struct ftrace_buffer_info *info = file->private_data;
7081 struct trace_iterator *iter = &info->iter;
7082 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7083 struct page *pages_def[PIPE_DEF_BUFFERS];
7084 struct splice_pipe_desc spd = {
7085 .pages = pages_def,
7086 .partial = partial_def,
7087 .nr_pages_max = PIPE_DEF_BUFFERS,
7088 .ops = &buffer_pipe_buf_ops,
7089 .spd_release = buffer_spd_release,
7090 };
7091 struct buffer_ref *ref;
7092 int entries, i;
7093 ssize_t ret = 0;
7094
7095 #ifdef CONFIG_TRACER_MAX_TRACE
7096 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7097 return -EBUSY;
7098 #endif
7099
7100 if (*ppos & (PAGE_SIZE - 1))
7101 return -EINVAL;
7102
7103 if (len & (PAGE_SIZE - 1)) {
7104 if (len < PAGE_SIZE)
7105 return -EINVAL;
7106 len &= PAGE_MASK;
7107 }
7108
7109 if (splice_grow_spd(pipe, &spd))
7110 return -ENOMEM;
7111
7112 again:
7113 trace_access_lock(iter->cpu_file);
7114 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7115
7116 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7117 struct page *page;
7118 int r;
7119
7120 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7121 if (!ref) {
7122 ret = -ENOMEM;
7123 break;
7124 }
7125
7126 ref->ref = 1;
7127 ref->buffer = iter->trace_buffer->buffer;
7128 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7129 if (IS_ERR(ref->page)) {
7130 ret = PTR_ERR(ref->page);
7131 ref->page = NULL;
7132 kfree(ref);
7133 break;
7134 }
7135 ref->cpu = iter->cpu_file;
7136
7137 r = ring_buffer_read_page(ref->buffer, &ref->page,
7138 len, iter->cpu_file, 1);
7139 if (r < 0) {
7140 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7141 ref->page);
7142 kfree(ref);
7143 break;
7144 }
7145
7146 page = virt_to_page(ref->page);
7147
7148 spd.pages[i] = page;
7149 spd.partial[i].len = PAGE_SIZE;
7150 spd.partial[i].offset = 0;
7151 spd.partial[i].private = (unsigned long)ref;
7152 spd.nr_pages++;
7153 *ppos += PAGE_SIZE;
7154
7155 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7156 }
7157
7158 trace_access_unlock(iter->cpu_file);
7159 spd.nr_pages = i;
7160
7161 /* did we read anything? */
7162 if (!spd.nr_pages) {
7163 if (ret)
7164 goto out;
7165
7166 ret = -EAGAIN;
7167 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7168 goto out;
7169
7170 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7171 if (ret)
7172 goto out;
7173
7174 goto again;
7175 }
7176
7177 ret = splice_to_pipe(pipe, &spd);
7178 out:
7179 splice_shrink_spd(&spd);
7180
7181 return ret;
7182 }
7183
7184 static const struct file_operations tracing_buffers_fops = {
7185 .open = tracing_buffers_open,
7186 .read = tracing_buffers_read,
7187 .poll = tracing_buffers_poll,
7188 .release = tracing_buffers_release,
7189 .splice_read = tracing_buffers_splice_read,
7190 .llseek = no_llseek,
7191 };
7192
7193 static ssize_t
7194 tracing_stats_read(struct file *filp, char __user *ubuf,
7195 size_t count, loff_t *ppos)
7196 {
7197 struct inode *inode = file_inode(filp);
7198 struct trace_array *tr = inode->i_private;
7199 struct trace_buffer *trace_buf = &tr->trace_buffer;
7200 int cpu = tracing_get_cpu(inode);
7201 struct trace_seq *s;
7202 unsigned long cnt;
7203 unsigned long long t;
7204 unsigned long usec_rem;
7205
7206 s = kmalloc(sizeof(*s), GFP_KERNEL);
7207 if (!s)
7208 return -ENOMEM;
7209
7210 trace_seq_init(s);
7211
7212 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7213 trace_seq_printf(s, "entries: %ld\n", cnt);
7214
7215 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7216 trace_seq_printf(s, "overrun: %ld\n", cnt);
7217
7218 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7219 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7220
7221 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7222 trace_seq_printf(s, "bytes: %ld\n", cnt);
7223
7224 if (trace_clocks[tr->clock_id].in_ns) {
7225 /* local or global for trace_clock */
7226 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7227 usec_rem = do_div(t, USEC_PER_SEC);
7228 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7229 t, usec_rem);
7230
7231 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7232 usec_rem = do_div(t, USEC_PER_SEC);
7233 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7234 } else {
7235 /* counter or tsc mode for trace_clock */
7236 trace_seq_printf(s, "oldest event ts: %llu\n",
7237 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7238
7239 trace_seq_printf(s, "now ts: %llu\n",
7240 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7241 }
7242
7243 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7244 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7245
7246 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7247 trace_seq_printf(s, "read events: %ld\n", cnt);
7248
7249 count = simple_read_from_buffer(ubuf, count, ppos,
7250 s->buffer, trace_seq_used(s));
7251
7252 kfree(s);
7253
7254 return count;
7255 }
7256
7257 static const struct file_operations tracing_stats_fops = {
7258 .open = tracing_open_generic_tr,
7259 .read = tracing_stats_read,
7260 .llseek = generic_file_llseek,
7261 .release = tracing_release_generic_tr,
7262 };
7263
7264 #ifdef CONFIG_DYNAMIC_FTRACE
7265
7266 static ssize_t
7267 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7268 size_t cnt, loff_t *ppos)
7269 {
7270 unsigned long *p = filp->private_data;
7271 char buf[64]; /* Not too big for a shallow stack */
7272 int r;
7273
7274 r = scnprintf(buf, 63, "%ld", *p);
7275 buf[r++] = '\n';
7276
7277 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7278 }
7279
7280 static const struct file_operations tracing_dyn_info_fops = {
7281 .open = tracing_open_generic,
7282 .read = tracing_read_dyn_info,
7283 .llseek = generic_file_llseek,
7284 };
7285 #endif /* CONFIG_DYNAMIC_FTRACE */
7286
7287 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7288 static void
7289 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7290 struct trace_array *tr, struct ftrace_probe_ops *ops,
7291 void *data)
7292 {
7293 tracing_snapshot_instance(tr);
7294 }
7295
7296 static void
7297 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7298 struct trace_array *tr, struct ftrace_probe_ops *ops,
7299 void *data)
7300 {
7301 struct ftrace_func_mapper *mapper = data;
7302 long *count = NULL;
7303
7304 if (mapper)
7305 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7306
7307 if (count) {
7308
7309 if (*count <= 0)
7310 return;
7311
7312 (*count)--;
7313 }
7314
7315 tracing_snapshot_instance(tr);
7316 }
7317
7318 static int
7319 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7320 struct ftrace_probe_ops *ops, void *data)
7321 {
7322 struct ftrace_func_mapper *mapper = data;
7323 long *count = NULL;
7324
7325 seq_printf(m, "%ps:", (void *)ip);
7326
7327 seq_puts(m, "snapshot");
7328
7329 if (mapper)
7330 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7331
7332 if (count)
7333 seq_printf(m, ":count=%ld\n", *count);
7334 else
7335 seq_puts(m, ":unlimited\n");
7336
7337 return 0;
7338 }
7339
7340 static int
7341 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7342 unsigned long ip, void *init_data, void **data)
7343 {
7344 struct ftrace_func_mapper *mapper = *data;
7345
7346 if (!mapper) {
7347 mapper = allocate_ftrace_func_mapper();
7348 if (!mapper)
7349 return -ENOMEM;
7350 *data = mapper;
7351 }
7352
7353 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7354 }
7355
7356 static void
7357 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7358 unsigned long ip, void *data)
7359 {
7360 struct ftrace_func_mapper *mapper = data;
7361
7362 if (!ip) {
7363 if (!mapper)
7364 return;
7365 free_ftrace_func_mapper(mapper, NULL);
7366 return;
7367 }
7368
7369 ftrace_func_mapper_remove_ip(mapper, ip);
7370 }
7371
7372 static struct ftrace_probe_ops snapshot_probe_ops = {
7373 .func = ftrace_snapshot,
7374 .print = ftrace_snapshot_print,
7375 };
7376
7377 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7378 .func = ftrace_count_snapshot,
7379 .print = ftrace_snapshot_print,
7380 .init = ftrace_snapshot_init,
7381 .free = ftrace_snapshot_free,
7382 };
7383
7384 static int
7385 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7386 char *glob, char *cmd, char *param, int enable)
7387 {
7388 struct ftrace_probe_ops *ops;
7389 void *count = (void *)-1;
7390 char *number;
7391 int ret;
7392
7393 if (!tr)
7394 return -ENODEV;
7395
7396 /* hash funcs only work with set_ftrace_filter */
7397 if (!enable)
7398 return -EINVAL;
7399
7400 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7401
7402 if (glob[0] == '!')
7403 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7404
7405 if (!param)
7406 goto out_reg;
7407
7408 number = strsep(&param, ":");
7409
7410 if (!strlen(number))
7411 goto out_reg;
7412
7413 /*
7414 * We use the callback data field (which is a pointer)
7415 * as our counter.
7416 */
7417 ret = kstrtoul(number, 0, (unsigned long *)&count);
7418 if (ret)
7419 return ret;
7420
7421 out_reg:
7422 ret = tracing_alloc_snapshot_instance(tr);
7423 if (ret < 0)
7424 goto out;
7425
7426 ret = register_ftrace_function_probe(glob, tr, ops, count);
7427
7428 out:
7429 return ret < 0 ? ret : 0;
7430 }
7431
7432 static struct ftrace_func_command ftrace_snapshot_cmd = {
7433 .name = "snapshot",
7434 .func = ftrace_trace_snapshot_callback,
7435 };
7436
7437 static __init int register_snapshot_cmd(void)
7438 {
7439 return register_ftrace_command(&ftrace_snapshot_cmd);
7440 }
7441 #else
7442 static inline __init int register_snapshot_cmd(void) { return 0; }
7443 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7444
7445 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7446 {
7447 if (WARN_ON(!tr->dir))
7448 return ERR_PTR(-ENODEV);
7449
7450 /* Top directory uses NULL as the parent */
7451 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7452 return NULL;
7453
7454 /* All sub buffers have a descriptor */
7455 return tr->dir;
7456 }
7457
7458 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7459 {
7460 struct dentry *d_tracer;
7461
7462 if (tr->percpu_dir)
7463 return tr->percpu_dir;
7464
7465 d_tracer = tracing_get_dentry(tr);
7466 if (IS_ERR(d_tracer))
7467 return NULL;
7468
7469 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7470
7471 WARN_ONCE(!tr->percpu_dir,
7472 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7473
7474 return tr->percpu_dir;
7475 }
7476
7477 static struct dentry *
7478 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7479 void *data, long cpu, const struct file_operations *fops)
7480 {
7481 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7482
7483 if (ret) /* See tracing_get_cpu() */
7484 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7485 return ret;
7486 }
7487
7488 static void
7489 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7490 {
7491 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7492 struct dentry *d_cpu;
7493 char cpu_dir[30]; /* 30 characters should be more than enough */
7494
7495 if (!d_percpu)
7496 return;
7497
7498 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7499 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7500 if (!d_cpu) {
7501 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7502 return;
7503 }
7504
7505 /* per cpu trace_pipe */
7506 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7507 tr, cpu, &tracing_pipe_fops);
7508
7509 /* per cpu trace */
7510 trace_create_cpu_file("trace", 0644, d_cpu,
7511 tr, cpu, &tracing_fops);
7512
7513 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7514 tr, cpu, &tracing_buffers_fops);
7515
7516 trace_create_cpu_file("stats", 0444, d_cpu,
7517 tr, cpu, &tracing_stats_fops);
7518
7519 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7520 tr, cpu, &tracing_entries_fops);
7521
7522 #ifdef CONFIG_TRACER_SNAPSHOT
7523 trace_create_cpu_file("snapshot", 0644, d_cpu,
7524 tr, cpu, &snapshot_fops);
7525
7526 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7527 tr, cpu, &snapshot_raw_fops);
7528 #endif
7529 }
7530
7531 #ifdef CONFIG_FTRACE_SELFTEST
7532 /* Let selftest have access to static functions in this file */
7533 #include "trace_selftest.c"
7534 #endif
7535
7536 static ssize_t
7537 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7538 loff_t *ppos)
7539 {
7540 struct trace_option_dentry *topt = filp->private_data;
7541 char *buf;
7542
7543 if (topt->flags->val & topt->opt->bit)
7544 buf = "1\n";
7545 else
7546 buf = "0\n";
7547
7548 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7549 }
7550
7551 static ssize_t
7552 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7553 loff_t *ppos)
7554 {
7555 struct trace_option_dentry *topt = filp->private_data;
7556 unsigned long val;
7557 int ret;
7558
7559 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7560 if (ret)
7561 return ret;
7562
7563 if (val != 0 && val != 1)
7564 return -EINVAL;
7565
7566 if (!!(topt->flags->val & topt->opt->bit) != val) {
7567 mutex_lock(&trace_types_lock);
7568 ret = __set_tracer_option(topt->tr, topt->flags,
7569 topt->opt, !val);
7570 mutex_unlock(&trace_types_lock);
7571 if (ret)
7572 return ret;
7573 }
7574
7575 *ppos += cnt;
7576
7577 return cnt;
7578 }
7579
7580
7581 static const struct file_operations trace_options_fops = {
7582 .open = tracing_open_generic,
7583 .read = trace_options_read,
7584 .write = trace_options_write,
7585 .llseek = generic_file_llseek,
7586 };
7587
7588 /*
7589 * In order to pass in both the trace_array descriptor as well as the index
7590 * to the flag that the trace option file represents, the trace_array
7591 * has a character array of trace_flags_index[], which holds the index
7592 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7593 * The address of this character array is passed to the flag option file
7594 * read/write callbacks.
7595 *
7596 * In order to extract both the index and the trace_array descriptor,
7597 * get_tr_index() uses the following algorithm.
7598 *
7599 * idx = *ptr;
7600 *
7601 * As the pointer itself contains the address of the index (remember
7602 * index[1] == 1).
7603 *
7604 * Then to get the trace_array descriptor, by subtracting that index
7605 * from the ptr, we get to the start of the index itself.
7606 *
7607 * ptr - idx == &index[0]
7608 *
7609 * Then a simple container_of() from that pointer gets us to the
7610 * trace_array descriptor.
7611 */
7612 static void get_tr_index(void *data, struct trace_array **ptr,
7613 unsigned int *pindex)
7614 {
7615 *pindex = *(unsigned char *)data;
7616
7617 *ptr = container_of(data - *pindex, struct trace_array,
7618 trace_flags_index);
7619 }
7620
7621 static ssize_t
7622 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7623 loff_t *ppos)
7624 {
7625 void *tr_index = filp->private_data;
7626 struct trace_array *tr;
7627 unsigned int index;
7628 char *buf;
7629
7630 get_tr_index(tr_index, &tr, &index);
7631
7632 if (tr->trace_flags & (1 << index))
7633 buf = "1\n";
7634 else
7635 buf = "0\n";
7636
7637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7638 }
7639
7640 static ssize_t
7641 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7642 loff_t *ppos)
7643 {
7644 void *tr_index = filp->private_data;
7645 struct trace_array *tr;
7646 unsigned int index;
7647 unsigned long val;
7648 int ret;
7649
7650 get_tr_index(tr_index, &tr, &index);
7651
7652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7653 if (ret)
7654 return ret;
7655
7656 if (val != 0 && val != 1)
7657 return -EINVAL;
7658
7659 mutex_lock(&trace_types_lock);
7660 ret = set_tracer_flag(tr, 1 << index, val);
7661 mutex_unlock(&trace_types_lock);
7662
7663 if (ret < 0)
7664 return ret;
7665
7666 *ppos += cnt;
7667
7668 return cnt;
7669 }
7670
7671 static const struct file_operations trace_options_core_fops = {
7672 .open = tracing_open_generic,
7673 .read = trace_options_core_read,
7674 .write = trace_options_core_write,
7675 .llseek = generic_file_llseek,
7676 };
7677
7678 struct dentry *trace_create_file(const char *name,
7679 umode_t mode,
7680 struct dentry *parent,
7681 void *data,
7682 const struct file_operations *fops)
7683 {
7684 struct dentry *ret;
7685
7686 ret = tracefs_create_file(name, mode, parent, data, fops);
7687 if (!ret)
7688 pr_warn("Could not create tracefs '%s' entry\n", name);
7689
7690 return ret;
7691 }
7692
7693
7694 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7695 {
7696 struct dentry *d_tracer;
7697
7698 if (tr->options)
7699 return tr->options;
7700
7701 d_tracer = tracing_get_dentry(tr);
7702 if (IS_ERR(d_tracer))
7703 return NULL;
7704
7705 tr->options = tracefs_create_dir("options", d_tracer);
7706 if (!tr->options) {
7707 pr_warn("Could not create tracefs directory 'options'\n");
7708 return NULL;
7709 }
7710
7711 return tr->options;
7712 }
7713
7714 static void
7715 create_trace_option_file(struct trace_array *tr,
7716 struct trace_option_dentry *topt,
7717 struct tracer_flags *flags,
7718 struct tracer_opt *opt)
7719 {
7720 struct dentry *t_options;
7721
7722 t_options = trace_options_init_dentry(tr);
7723 if (!t_options)
7724 return;
7725
7726 topt->flags = flags;
7727 topt->opt = opt;
7728 topt->tr = tr;
7729
7730 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7731 &trace_options_fops);
7732
7733 }
7734
7735 static void
7736 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7737 {
7738 struct trace_option_dentry *topts;
7739 struct trace_options *tr_topts;
7740 struct tracer_flags *flags;
7741 struct tracer_opt *opts;
7742 int cnt;
7743 int i;
7744
7745 if (!tracer)
7746 return;
7747
7748 flags = tracer->flags;
7749
7750 if (!flags || !flags->opts)
7751 return;
7752
7753 /*
7754 * If this is an instance, only create flags for tracers
7755 * the instance may have.
7756 */
7757 if (!trace_ok_for_array(tracer, tr))
7758 return;
7759
7760 for (i = 0; i < tr->nr_topts; i++) {
7761 /* Make sure there's no duplicate flags. */
7762 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7763 return;
7764 }
7765
7766 opts = flags->opts;
7767
7768 for (cnt = 0; opts[cnt].name; cnt++)
7769 ;
7770
7771 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7772 if (!topts)
7773 return;
7774
7775 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7776 GFP_KERNEL);
7777 if (!tr_topts) {
7778 kfree(topts);
7779 return;
7780 }
7781
7782 tr->topts = tr_topts;
7783 tr->topts[tr->nr_topts].tracer = tracer;
7784 tr->topts[tr->nr_topts].topts = topts;
7785 tr->nr_topts++;
7786
7787 for (cnt = 0; opts[cnt].name; cnt++) {
7788 create_trace_option_file(tr, &topts[cnt], flags,
7789 &opts[cnt]);
7790 WARN_ONCE(topts[cnt].entry == NULL,
7791 "Failed to create trace option: %s",
7792 opts[cnt].name);
7793 }
7794 }
7795
7796 static struct dentry *
7797 create_trace_option_core_file(struct trace_array *tr,
7798 const char *option, long index)
7799 {
7800 struct dentry *t_options;
7801
7802 t_options = trace_options_init_dentry(tr);
7803 if (!t_options)
7804 return NULL;
7805
7806 return trace_create_file(option, 0644, t_options,
7807 (void *)&tr->trace_flags_index[index],
7808 &trace_options_core_fops);
7809 }
7810
7811 static void create_trace_options_dir(struct trace_array *tr)
7812 {
7813 struct dentry *t_options;
7814 bool top_level = tr == &global_trace;
7815 int i;
7816
7817 t_options = trace_options_init_dentry(tr);
7818 if (!t_options)
7819 return;
7820
7821 for (i = 0; trace_options[i]; i++) {
7822 if (top_level ||
7823 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7824 create_trace_option_core_file(tr, trace_options[i], i);
7825 }
7826 }
7827
7828 static ssize_t
7829 rb_simple_read(struct file *filp, char __user *ubuf,
7830 size_t cnt, loff_t *ppos)
7831 {
7832 struct trace_array *tr = filp->private_data;
7833 char buf[64];
7834 int r;
7835
7836 r = tracer_tracing_is_on(tr);
7837 r = sprintf(buf, "%d\n", r);
7838
7839 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7840 }
7841
7842 static ssize_t
7843 rb_simple_write(struct file *filp, const char __user *ubuf,
7844 size_t cnt, loff_t *ppos)
7845 {
7846 struct trace_array *tr = filp->private_data;
7847 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7848 unsigned long val;
7849 int ret;
7850
7851 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7852 if (ret)
7853 return ret;
7854
7855 if (buffer) {
7856 mutex_lock(&trace_types_lock);
7857 if (!!val == tracer_tracing_is_on(tr)) {
7858 val = 0; /* do nothing */
7859 } else if (val) {
7860 tracer_tracing_on(tr);
7861 if (tr->current_trace->start)
7862 tr->current_trace->start(tr);
7863 } else {
7864 tracer_tracing_off(tr);
7865 if (tr->current_trace->stop)
7866 tr->current_trace->stop(tr);
7867 }
7868 mutex_unlock(&trace_types_lock);
7869 }
7870
7871 (*ppos)++;
7872
7873 return cnt;
7874 }
7875
7876 static const struct file_operations rb_simple_fops = {
7877 .open = tracing_open_generic_tr,
7878 .read = rb_simple_read,
7879 .write = rb_simple_write,
7880 .release = tracing_release_generic_tr,
7881 .llseek = default_llseek,
7882 };
7883
7884 static ssize_t
7885 buffer_percent_read(struct file *filp, char __user *ubuf,
7886 size_t cnt, loff_t *ppos)
7887 {
7888 struct trace_array *tr = filp->private_data;
7889 char buf[64];
7890 int r;
7891
7892 r = tr->buffer_percent;
7893 r = sprintf(buf, "%d\n", r);
7894
7895 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7896 }
7897
7898 static ssize_t
7899 buffer_percent_write(struct file *filp, const char __user *ubuf,
7900 size_t cnt, loff_t *ppos)
7901 {
7902 struct trace_array *tr = filp->private_data;
7903 unsigned long val;
7904 int ret;
7905
7906 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7907 if (ret)
7908 return ret;
7909
7910 if (val > 100)
7911 return -EINVAL;
7912
7913 if (!val)
7914 val = 1;
7915
7916 tr->buffer_percent = val;
7917
7918 (*ppos)++;
7919
7920 return cnt;
7921 }
7922
7923 static const struct file_operations buffer_percent_fops = {
7924 .open = tracing_open_generic_tr,
7925 .read = buffer_percent_read,
7926 .write = buffer_percent_write,
7927 .release = tracing_release_generic_tr,
7928 .llseek = default_llseek,
7929 };
7930
7931 struct dentry *trace_instance_dir;
7932
7933 static void
7934 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7935
7936 static int
7937 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7938 {
7939 enum ring_buffer_flags rb_flags;
7940
7941 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7942
7943 buf->tr = tr;
7944
7945 buf->buffer = ring_buffer_alloc(size, rb_flags);
7946 if (!buf->buffer)
7947 return -ENOMEM;
7948
7949 buf->data = alloc_percpu(struct trace_array_cpu);
7950 if (!buf->data) {
7951 ring_buffer_free(buf->buffer);
7952 buf->buffer = NULL;
7953 return -ENOMEM;
7954 }
7955
7956 /* Allocate the first page for all buffers */
7957 set_buffer_entries(&tr->trace_buffer,
7958 ring_buffer_size(tr->trace_buffer.buffer, 0));
7959
7960 return 0;
7961 }
7962
7963 static int allocate_trace_buffers(struct trace_array *tr, int size)
7964 {
7965 int ret;
7966
7967 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7968 if (ret)
7969 return ret;
7970
7971 #ifdef CONFIG_TRACER_MAX_TRACE
7972 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7973 allocate_snapshot ? size : 1);
7974 if (WARN_ON(ret)) {
7975 ring_buffer_free(tr->trace_buffer.buffer);
7976 tr->trace_buffer.buffer = NULL;
7977 free_percpu(tr->trace_buffer.data);
7978 tr->trace_buffer.data = NULL;
7979 return -ENOMEM;
7980 }
7981 tr->allocated_snapshot = allocate_snapshot;
7982
7983 /*
7984 * Only the top level trace array gets its snapshot allocated
7985 * from the kernel command line.
7986 */
7987 allocate_snapshot = false;
7988 #endif
7989 return 0;
7990 }
7991
7992 static void free_trace_buffer(struct trace_buffer *buf)
7993 {
7994 if (buf->buffer) {
7995 ring_buffer_free(buf->buffer);
7996 buf->buffer = NULL;
7997 free_percpu(buf->data);
7998 buf->data = NULL;
7999 }
8000 }
8001
8002 static void free_trace_buffers(struct trace_array *tr)
8003 {
8004 if (!tr)
8005 return;
8006
8007 free_trace_buffer(&tr->trace_buffer);
8008
8009 #ifdef CONFIG_TRACER_MAX_TRACE
8010 free_trace_buffer(&tr->max_buffer);
8011 #endif
8012 }
8013
8014 static void init_trace_flags_index(struct trace_array *tr)
8015 {
8016 int i;
8017
8018 /* Used by the trace options files */
8019 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8020 tr->trace_flags_index[i] = i;
8021 }
8022
8023 static void __update_tracer_options(struct trace_array *tr)
8024 {
8025 struct tracer *t;
8026
8027 for (t = trace_types; t; t = t->next)
8028 add_tracer_options(tr, t);
8029 }
8030
8031 static void update_tracer_options(struct trace_array *tr)
8032 {
8033 mutex_lock(&trace_types_lock);
8034 __update_tracer_options(tr);
8035 mutex_unlock(&trace_types_lock);
8036 }
8037
8038 static int instance_mkdir(const char *name)
8039 {
8040 struct trace_array *tr;
8041 int ret;
8042
8043 mutex_lock(&event_mutex);
8044 mutex_lock(&trace_types_lock);
8045
8046 ret = -EEXIST;
8047 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8048 if (tr->name && strcmp(tr->name, name) == 0)
8049 goto out_unlock;
8050 }
8051
8052 ret = -ENOMEM;
8053 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8054 if (!tr)
8055 goto out_unlock;
8056
8057 tr->name = kstrdup(name, GFP_KERNEL);
8058 if (!tr->name)
8059 goto out_free_tr;
8060
8061 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8062 goto out_free_tr;
8063
8064 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8065
8066 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8067
8068 raw_spin_lock_init(&tr->start_lock);
8069
8070 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8071
8072 tr->current_trace = &nop_trace;
8073
8074 INIT_LIST_HEAD(&tr->systems);
8075 INIT_LIST_HEAD(&tr->events);
8076 INIT_LIST_HEAD(&tr->hist_vars);
8077
8078 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8079 goto out_free_tr;
8080
8081 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8082 if (!tr->dir)
8083 goto out_free_tr;
8084
8085 ret = event_trace_add_tracer(tr->dir, tr);
8086 if (ret) {
8087 tracefs_remove_recursive(tr->dir);
8088 goto out_free_tr;
8089 }
8090
8091 ftrace_init_trace_array(tr);
8092
8093 init_tracer_tracefs(tr, tr->dir);
8094 init_trace_flags_index(tr);
8095 __update_tracer_options(tr);
8096
8097 list_add(&tr->list, &ftrace_trace_arrays);
8098
8099 mutex_unlock(&trace_types_lock);
8100 mutex_unlock(&event_mutex);
8101
8102 return 0;
8103
8104 out_free_tr:
8105 free_trace_buffers(tr);
8106 free_cpumask_var(tr->tracing_cpumask);
8107 kfree(tr->name);
8108 kfree(tr);
8109
8110 out_unlock:
8111 mutex_unlock(&trace_types_lock);
8112 mutex_unlock(&event_mutex);
8113
8114 return ret;
8115
8116 }
8117
8118 static int instance_rmdir(const char *name)
8119 {
8120 struct trace_array *tr;
8121 int found = 0;
8122 int ret;
8123 int i;
8124
8125 mutex_lock(&event_mutex);
8126 mutex_lock(&trace_types_lock);
8127
8128 ret = -ENODEV;
8129 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8130 if (tr->name && strcmp(tr->name, name) == 0) {
8131 found = 1;
8132 break;
8133 }
8134 }
8135 if (!found)
8136 goto out_unlock;
8137
8138 ret = -EBUSY;
8139 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8140 goto out_unlock;
8141
8142 list_del(&tr->list);
8143
8144 /* Disable all the flags that were enabled coming in */
8145 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8146 if ((1 << i) & ZEROED_TRACE_FLAGS)
8147 set_tracer_flag(tr, 1 << i, 0);
8148 }
8149
8150 tracing_set_nop(tr);
8151 clear_ftrace_function_probes(tr);
8152 event_trace_del_tracer(tr);
8153 ftrace_clear_pids(tr);
8154 ftrace_destroy_function_files(tr);
8155 tracefs_remove_recursive(tr->dir);
8156 free_trace_buffers(tr);
8157
8158 for (i = 0; i < tr->nr_topts; i++) {
8159 kfree(tr->topts[i].topts);
8160 }
8161 kfree(tr->topts);
8162
8163 free_cpumask_var(tr->tracing_cpumask);
8164 kfree(tr->name);
8165 kfree(tr);
8166
8167 ret = 0;
8168
8169 out_unlock:
8170 mutex_unlock(&trace_types_lock);
8171 mutex_unlock(&event_mutex);
8172
8173 return ret;
8174 }
8175
8176 static __init void create_trace_instances(struct dentry *d_tracer)
8177 {
8178 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8179 instance_mkdir,
8180 instance_rmdir);
8181 if (WARN_ON(!trace_instance_dir))
8182 return;
8183 }
8184
8185 static void
8186 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8187 {
8188 struct trace_event_file *file;
8189 int cpu;
8190
8191 trace_create_file("available_tracers", 0444, d_tracer,
8192 tr, &show_traces_fops);
8193
8194 trace_create_file("current_tracer", 0644, d_tracer,
8195 tr, &set_tracer_fops);
8196
8197 trace_create_file("tracing_cpumask", 0644, d_tracer,
8198 tr, &tracing_cpumask_fops);
8199
8200 trace_create_file("trace_options", 0644, d_tracer,
8201 tr, &tracing_iter_fops);
8202
8203 trace_create_file("trace", 0644, d_tracer,
8204 tr, &tracing_fops);
8205
8206 trace_create_file("trace_pipe", 0444, d_tracer,
8207 tr, &tracing_pipe_fops);
8208
8209 trace_create_file("buffer_size_kb", 0644, d_tracer,
8210 tr, &tracing_entries_fops);
8211
8212 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8213 tr, &tracing_total_entries_fops);
8214
8215 trace_create_file("free_buffer", 0200, d_tracer,
8216 tr, &tracing_free_buffer_fops);
8217
8218 trace_create_file("trace_marker", 0220, d_tracer,
8219 tr, &tracing_mark_fops);
8220
8221 file = __find_event_file(tr, "ftrace", "print");
8222 if (file && file->dir)
8223 trace_create_file("trigger", 0644, file->dir, file,
8224 &event_trigger_fops);
8225 tr->trace_marker_file = file;
8226
8227 trace_create_file("trace_marker_raw", 0220, d_tracer,
8228 tr, &tracing_mark_raw_fops);
8229
8230 trace_create_file("trace_clock", 0644, d_tracer, tr,
8231 &trace_clock_fops);
8232
8233 trace_create_file("tracing_on", 0644, d_tracer,
8234 tr, &rb_simple_fops);
8235
8236 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8237 &trace_time_stamp_mode_fops);
8238
8239 tr->buffer_percent = 50;
8240
8241 trace_create_file("buffer_percent", 0444, d_tracer,
8242 tr, &buffer_percent_fops);
8243
8244 create_trace_options_dir(tr);
8245
8246 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8247 trace_create_file("tracing_max_latency", 0644, d_tracer,
8248 &tr->max_latency, &tracing_max_lat_fops);
8249 #endif
8250
8251 if (ftrace_create_function_files(tr, d_tracer))
8252 WARN(1, "Could not allocate function filter files");
8253
8254 #ifdef CONFIG_TRACER_SNAPSHOT
8255 trace_create_file("snapshot", 0644, d_tracer,
8256 tr, &snapshot_fops);
8257 #endif
8258
8259 for_each_tracing_cpu(cpu)
8260 tracing_init_tracefs_percpu(tr, cpu);
8261
8262 ftrace_init_tracefs(tr, d_tracer);
8263 }
8264
8265 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8266 {
8267 struct vfsmount *mnt;
8268 struct file_system_type *type;
8269
8270 /*
8271 * To maintain backward compatibility for tools that mount
8272 * debugfs to get to the tracing facility, tracefs is automatically
8273 * mounted to the debugfs/tracing directory.
8274 */
8275 type = get_fs_type("tracefs");
8276 if (!type)
8277 return NULL;
8278 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8279 put_filesystem(type);
8280 if (IS_ERR(mnt))
8281 return NULL;
8282 mntget(mnt);
8283
8284 return mnt;
8285 }
8286
8287 /**
8288 * tracing_init_dentry - initialize top level trace array
8289 *
8290 * This is called when creating files or directories in the tracing
8291 * directory. It is called via fs_initcall() by any of the boot up code
8292 * and expects to return the dentry of the top level tracing directory.
8293 */
8294 struct dentry *tracing_init_dentry(void)
8295 {
8296 struct trace_array *tr = &global_trace;
8297
8298 /* The top level trace array uses NULL as parent */
8299 if (tr->dir)
8300 return NULL;
8301
8302 if (WARN_ON(!tracefs_initialized()) ||
8303 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8304 WARN_ON(!debugfs_initialized())))
8305 return ERR_PTR(-ENODEV);
8306
8307 /*
8308 * As there may still be users that expect the tracing
8309 * files to exist in debugfs/tracing, we must automount
8310 * the tracefs file system there, so older tools still
8311 * work with the newer kerenl.
8312 */
8313 tr->dir = debugfs_create_automount("tracing", NULL,
8314 trace_automount, NULL);
8315 if (!tr->dir) {
8316 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8317 return ERR_PTR(-ENOMEM);
8318 }
8319
8320 return NULL;
8321 }
8322
8323 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8324 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8325
8326 static void __init trace_eval_init(void)
8327 {
8328 int len;
8329
8330 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8331 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8332 }
8333
8334 #ifdef CONFIG_MODULES
8335 static void trace_module_add_evals(struct module *mod)
8336 {
8337 if (!mod->num_trace_evals)
8338 return;
8339
8340 /*
8341 * Modules with bad taint do not have events created, do
8342 * not bother with enums either.
8343 */
8344 if (trace_module_has_bad_taint(mod))
8345 return;
8346
8347 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8348 }
8349
8350 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8351 static void trace_module_remove_evals(struct module *mod)
8352 {
8353 union trace_eval_map_item *map;
8354 union trace_eval_map_item **last = &trace_eval_maps;
8355
8356 if (!mod->num_trace_evals)
8357 return;
8358
8359 mutex_lock(&trace_eval_mutex);
8360
8361 map = trace_eval_maps;
8362
8363 while (map) {
8364 if (map->head.mod == mod)
8365 break;
8366 map = trace_eval_jmp_to_tail(map);
8367 last = &map->tail.next;
8368 map = map->tail.next;
8369 }
8370 if (!map)
8371 goto out;
8372
8373 *last = trace_eval_jmp_to_tail(map)->tail.next;
8374 kfree(map);
8375 out:
8376 mutex_unlock(&trace_eval_mutex);
8377 }
8378 #else
8379 static inline void trace_module_remove_evals(struct module *mod) { }
8380 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8381
8382 static int trace_module_notify(struct notifier_block *self,
8383 unsigned long val, void *data)
8384 {
8385 struct module *mod = data;
8386
8387 switch (val) {
8388 case MODULE_STATE_COMING:
8389 trace_module_add_evals(mod);
8390 break;
8391 case MODULE_STATE_GOING:
8392 trace_module_remove_evals(mod);
8393 break;
8394 }
8395
8396 return 0;
8397 }
8398
8399 static struct notifier_block trace_module_nb = {
8400 .notifier_call = trace_module_notify,
8401 .priority = 0,
8402 };
8403 #endif /* CONFIG_MODULES */
8404
8405 static __init int tracer_init_tracefs(void)
8406 {
8407 struct dentry *d_tracer;
8408
8409 trace_access_lock_init();
8410
8411 d_tracer = tracing_init_dentry();
8412 if (IS_ERR(d_tracer))
8413 return 0;
8414
8415 event_trace_init();
8416
8417 init_tracer_tracefs(&global_trace, d_tracer);
8418 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8419
8420 trace_create_file("tracing_thresh", 0644, d_tracer,
8421 &global_trace, &tracing_thresh_fops);
8422
8423 trace_create_file("README", 0444, d_tracer,
8424 NULL, &tracing_readme_fops);
8425
8426 trace_create_file("saved_cmdlines", 0444, d_tracer,
8427 NULL, &tracing_saved_cmdlines_fops);
8428
8429 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8430 NULL, &tracing_saved_cmdlines_size_fops);
8431
8432 trace_create_file("saved_tgids", 0444, d_tracer,
8433 NULL, &tracing_saved_tgids_fops);
8434
8435 trace_eval_init();
8436
8437 trace_create_eval_file(d_tracer);
8438
8439 #ifdef CONFIG_MODULES
8440 register_module_notifier(&trace_module_nb);
8441 #endif
8442
8443 #ifdef CONFIG_DYNAMIC_FTRACE
8444 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8445 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8446 #endif
8447
8448 create_trace_instances(d_tracer);
8449
8450 update_tracer_options(&global_trace);
8451
8452 return 0;
8453 }
8454
8455 static int trace_panic_handler(struct notifier_block *this,
8456 unsigned long event, void *unused)
8457 {
8458 if (ftrace_dump_on_oops)
8459 ftrace_dump(ftrace_dump_on_oops);
8460 return NOTIFY_OK;
8461 }
8462
8463 static struct notifier_block trace_panic_notifier = {
8464 .notifier_call = trace_panic_handler,
8465 .next = NULL,
8466 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8467 };
8468
8469 static int trace_die_handler(struct notifier_block *self,
8470 unsigned long val,
8471 void *data)
8472 {
8473 switch (val) {
8474 case DIE_OOPS:
8475 if (ftrace_dump_on_oops)
8476 ftrace_dump(ftrace_dump_on_oops);
8477 break;
8478 default:
8479 break;
8480 }
8481 return NOTIFY_OK;
8482 }
8483
8484 static struct notifier_block trace_die_notifier = {
8485 .notifier_call = trace_die_handler,
8486 .priority = 200
8487 };
8488
8489 /*
8490 * printk is set to max of 1024, we really don't need it that big.
8491 * Nothing should be printing 1000 characters anyway.
8492 */
8493 #define TRACE_MAX_PRINT 1000
8494
8495 /*
8496 * Define here KERN_TRACE so that we have one place to modify
8497 * it if we decide to change what log level the ftrace dump
8498 * should be at.
8499 */
8500 #define KERN_TRACE KERN_EMERG
8501
8502 void
8503 trace_printk_seq(struct trace_seq *s)
8504 {
8505 /* Probably should print a warning here. */
8506 if (s->seq.len >= TRACE_MAX_PRINT)
8507 s->seq.len = TRACE_MAX_PRINT;
8508
8509 /*
8510 * More paranoid code. Although the buffer size is set to
8511 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8512 * an extra layer of protection.
8513 */
8514 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8515 s->seq.len = s->seq.size - 1;
8516
8517 /* should be zero ended, but we are paranoid. */
8518 s->buffer[s->seq.len] = 0;
8519
8520 printk(KERN_TRACE "%s", s->buffer);
8521
8522 trace_seq_init(s);
8523 }
8524
8525 void trace_init_global_iter(struct trace_iterator *iter)
8526 {
8527 iter->tr = &global_trace;
8528 iter->trace = iter->tr->current_trace;
8529 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8530 iter->trace_buffer = &global_trace.trace_buffer;
8531
8532 if (iter->trace && iter->trace->open)
8533 iter->trace->open(iter);
8534
8535 /* Annotate start of buffers if we had overruns */
8536 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8537 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8538
8539 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8540 if (trace_clocks[iter->tr->clock_id].in_ns)
8541 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8542 }
8543
8544 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8545 {
8546 /* use static because iter can be a bit big for the stack */
8547 static struct trace_iterator iter;
8548 static atomic_t dump_running;
8549 struct trace_array *tr = &global_trace;
8550 unsigned int old_userobj;
8551 unsigned long flags;
8552 int cnt = 0, cpu;
8553
8554 /* Only allow one dump user at a time. */
8555 if (atomic_inc_return(&dump_running) != 1) {
8556 atomic_dec(&dump_running);
8557 return;
8558 }
8559
8560 /*
8561 * Always turn off tracing when we dump.
8562 * We don't need to show trace output of what happens
8563 * between multiple crashes.
8564 *
8565 * If the user does a sysrq-z, then they can re-enable
8566 * tracing with echo 1 > tracing_on.
8567 */
8568 tracing_off();
8569
8570 local_irq_save(flags);
8571 printk_nmi_direct_enter();
8572
8573 /* Simulate the iterator */
8574 trace_init_global_iter(&iter);
8575
8576 for_each_tracing_cpu(cpu) {
8577 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8578 }
8579
8580 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8581
8582 /* don't look at user memory in panic mode */
8583 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8584
8585 switch (oops_dump_mode) {
8586 case DUMP_ALL:
8587 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8588 break;
8589 case DUMP_ORIG:
8590 iter.cpu_file = raw_smp_processor_id();
8591 break;
8592 case DUMP_NONE:
8593 goto out_enable;
8594 default:
8595 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8596 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8597 }
8598
8599 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8600
8601 /* Did function tracer already get disabled? */
8602 if (ftrace_is_dead()) {
8603 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8604 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8605 }
8606
8607 /*
8608 * We need to stop all tracing on all CPUS to read the
8609 * the next buffer. This is a bit expensive, but is
8610 * not done often. We fill all what we can read,
8611 * and then release the locks again.
8612 */
8613
8614 while (!trace_empty(&iter)) {
8615
8616 if (!cnt)
8617 printk(KERN_TRACE "---------------------------------\n");
8618
8619 cnt++;
8620
8621 /* reset all but tr, trace, and overruns */
8622 memset(&iter.seq, 0,
8623 sizeof(struct trace_iterator) -
8624 offsetof(struct trace_iterator, seq));
8625 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8626 iter.pos = -1;
8627
8628 if (trace_find_next_entry_inc(&iter) != NULL) {
8629 int ret;
8630
8631 ret = print_trace_line(&iter);
8632 if (ret != TRACE_TYPE_NO_CONSUME)
8633 trace_consume(&iter);
8634 }
8635 touch_nmi_watchdog();
8636
8637 trace_printk_seq(&iter.seq);
8638 }
8639
8640 if (!cnt)
8641 printk(KERN_TRACE " (ftrace buffer empty)\n");
8642 else
8643 printk(KERN_TRACE "---------------------------------\n");
8644
8645 out_enable:
8646 tr->trace_flags |= old_userobj;
8647
8648 for_each_tracing_cpu(cpu) {
8649 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8650 }
8651 atomic_dec(&dump_running);
8652 printk_nmi_direct_exit();
8653 local_irq_restore(flags);
8654 }
8655 EXPORT_SYMBOL_GPL(ftrace_dump);
8656
8657 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8658 {
8659 char **argv;
8660 int argc, ret;
8661
8662 argc = 0;
8663 ret = 0;
8664 argv = argv_split(GFP_KERNEL, buf, &argc);
8665 if (!argv)
8666 return -ENOMEM;
8667
8668 if (argc)
8669 ret = createfn(argc, argv);
8670
8671 argv_free(argv);
8672
8673 return ret;
8674 }
8675
8676 #define WRITE_BUFSIZE 4096
8677
8678 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8679 size_t count, loff_t *ppos,
8680 int (*createfn)(int, char **))
8681 {
8682 char *kbuf, *buf, *tmp;
8683 int ret = 0;
8684 size_t done = 0;
8685 size_t size;
8686
8687 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8688 if (!kbuf)
8689 return -ENOMEM;
8690
8691 while (done < count) {
8692 size = count - done;
8693
8694 if (size >= WRITE_BUFSIZE)
8695 size = WRITE_BUFSIZE - 1;
8696
8697 if (copy_from_user(kbuf, buffer + done, size)) {
8698 ret = -EFAULT;
8699 goto out;
8700 }
8701 kbuf[size] = '\0';
8702 buf = kbuf;
8703 do {
8704 tmp = strchr(buf, '\n');
8705 if (tmp) {
8706 *tmp = '\0';
8707 size = tmp - buf + 1;
8708 } else {
8709 size = strlen(buf);
8710 if (done + size < count) {
8711 if (buf != kbuf)
8712 break;
8713 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8714 pr_warn("Line length is too long: Should be less than %d\n",
8715 WRITE_BUFSIZE - 2);
8716 ret = -EINVAL;
8717 goto out;
8718 }
8719 }
8720 done += size;
8721
8722 /* Remove comments */
8723 tmp = strchr(buf, '#');
8724
8725 if (tmp)
8726 *tmp = '\0';
8727
8728 ret = trace_run_command(buf, createfn);
8729 if (ret)
8730 goto out;
8731 buf += size;
8732
8733 } while (done < count);
8734 }
8735 ret = done;
8736
8737 out:
8738 kfree(kbuf);
8739
8740 return ret;
8741 }
8742
8743 __init static int tracer_alloc_buffers(void)
8744 {
8745 int ring_buf_size;
8746 int ret = -ENOMEM;
8747
8748 /*
8749 * Make sure we don't accidently add more trace options
8750 * than we have bits for.
8751 */
8752 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8753
8754 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8755 goto out;
8756
8757 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8758 goto out_free_buffer_mask;
8759
8760 /* Only allocate trace_printk buffers if a trace_printk exists */
8761 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8762 /* Must be called before global_trace.buffer is allocated */
8763 trace_printk_init_buffers();
8764
8765 /* To save memory, keep the ring buffer size to its minimum */
8766 if (ring_buffer_expanded)
8767 ring_buf_size = trace_buf_size;
8768 else
8769 ring_buf_size = 1;
8770
8771 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8772 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8773
8774 raw_spin_lock_init(&global_trace.start_lock);
8775
8776 /*
8777 * The prepare callbacks allocates some memory for the ring buffer. We
8778 * don't free the buffer if the if the CPU goes down. If we were to free
8779 * the buffer, then the user would lose any trace that was in the
8780 * buffer. The memory will be removed once the "instance" is removed.
8781 */
8782 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8783 "trace/RB:preapre", trace_rb_cpu_prepare,
8784 NULL);
8785 if (ret < 0)
8786 goto out_free_cpumask;
8787 /* Used for event triggers */
8788 ret = -ENOMEM;
8789 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8790 if (!temp_buffer)
8791 goto out_rm_hp_state;
8792
8793 if (trace_create_savedcmd() < 0)
8794 goto out_free_temp_buffer;
8795
8796 /* TODO: make the number of buffers hot pluggable with CPUS */
8797 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8798 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8799 WARN_ON(1);
8800 goto out_free_savedcmd;
8801 }
8802
8803 if (global_trace.buffer_disabled)
8804 tracing_off();
8805
8806 if (trace_boot_clock) {
8807 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8808 if (ret < 0)
8809 pr_warn("Trace clock %s not defined, going back to default\n",
8810 trace_boot_clock);
8811 }
8812
8813 /*
8814 * register_tracer() might reference current_trace, so it
8815 * needs to be set before we register anything. This is
8816 * just a bootstrap of current_trace anyway.
8817 */
8818 global_trace.current_trace = &nop_trace;
8819
8820 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8821
8822 ftrace_init_global_array_ops(&global_trace);
8823
8824 init_trace_flags_index(&global_trace);
8825
8826 register_tracer(&nop_trace);
8827
8828 /* Function tracing may start here (via kernel command line) */
8829 init_function_trace();
8830
8831 /* All seems OK, enable tracing */
8832 tracing_disabled = 0;
8833
8834 atomic_notifier_chain_register(&panic_notifier_list,
8835 &trace_panic_notifier);
8836
8837 register_die_notifier(&trace_die_notifier);
8838
8839 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8840
8841 INIT_LIST_HEAD(&global_trace.systems);
8842 INIT_LIST_HEAD(&global_trace.events);
8843 INIT_LIST_HEAD(&global_trace.hist_vars);
8844 list_add(&global_trace.list, &ftrace_trace_arrays);
8845
8846 apply_trace_boot_options();
8847
8848 register_snapshot_cmd();
8849
8850 return 0;
8851
8852 out_free_savedcmd:
8853 free_saved_cmdlines_buffer(savedcmd);
8854 out_free_temp_buffer:
8855 ring_buffer_free(temp_buffer);
8856 out_rm_hp_state:
8857 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8858 out_free_cpumask:
8859 free_cpumask_var(global_trace.tracing_cpumask);
8860 out_free_buffer_mask:
8861 free_cpumask_var(tracing_buffer_mask);
8862 out:
8863 return ret;
8864 }
8865
8866 void __init early_trace_init(void)
8867 {
8868 if (tracepoint_printk) {
8869 tracepoint_print_iter =
8870 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8871 if (WARN_ON(!tracepoint_print_iter))
8872 tracepoint_printk = 0;
8873 else
8874 static_key_enable(&tracepoint_printk_key.key);
8875 }
8876 tracer_alloc_buffers();
8877 }
8878
8879 void __init trace_init(void)
8880 {
8881 trace_event_init();
8882 }
8883
8884 __init static int clear_boot_tracer(void)
8885 {
8886 /*
8887 * The default tracer at boot buffer is an init section.
8888 * This function is called in lateinit. If we did not
8889 * find the boot tracer, then clear it out, to prevent
8890 * later registration from accessing the buffer that is
8891 * about to be freed.
8892 */
8893 if (!default_bootup_tracer)
8894 return 0;
8895
8896 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8897 default_bootup_tracer);
8898 default_bootup_tracer = NULL;
8899
8900 return 0;
8901 }
8902
8903 fs_initcall(tracer_init_tracefs);
8904 late_initcall_sync(clear_boot_tracer);
8905
8906 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8907 __init static int tracing_set_default_clock(void)
8908 {
8909 /* sched_clock_stable() is determined in late_initcall */
8910 if (!trace_boot_clock && !sched_clock_stable()) {
8911 printk(KERN_WARNING
8912 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8913 "If you want to keep using the local clock, then add:\n"
8914 " \"trace_clock=local\"\n"
8915 "on the kernel command line\n");
8916 tracing_set_clock(&global_trace, "global");
8917 }
8918
8919 return 0;
8920 }
8921 late_initcall_sync(tracing_set_default_clock);
8922 #endif