1 // SPDX-License-Identifier: GPL-2.0
3 * trace irqs off critical timings
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * From code in the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/kprobes.h>
21 #include <trace/events/preemptirq.h>
23 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
24 static struct trace_array
*irqsoff_trace __read_mostly
;
25 static int tracer_enabled __read_mostly
;
27 static DEFINE_PER_CPU(int, tracing_cpu
);
29 static DEFINE_RAW_SPINLOCK(max_trace_lock
);
32 TRACER_IRQS_OFF
= (1 << 1),
33 TRACER_PREEMPT_OFF
= (1 << 2),
36 static int trace_type __read_mostly
;
38 static int save_flags
;
40 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
);
41 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
);
43 #ifdef CONFIG_PREEMPT_TRACER
47 return ((trace_type
& TRACER_PREEMPT_OFF
) && pc
);
50 # define preempt_trace(pc) (0)
53 #ifdef CONFIG_IRQSOFF_TRACER
57 return ((trace_type
& TRACER_IRQS_OFF
) &&
61 # define irq_trace() (0)
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 static int irqsoff_display_graph(struct trace_array
*tr
, int set
);
66 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
68 static inline int irqsoff_display_graph(struct trace_array
*tr
, int set
)
72 # define is_graph(tr) false
76 * Sequence count - we record it when starting a measurement and
77 * skip the latency if the sequence has changed - some other section
78 * did a maximum and could disturb our measurement with serial console
79 * printouts, etc. Truly coinciding maximum latencies should be rare
80 * and what happens together happens separately as well, so this doesn't
81 * decrease the validity of the maximum found:
83 static __cacheline_aligned_in_smp
unsigned long max_sequence
;
85 #ifdef CONFIG_FUNCTION_TRACER
87 * Prologue for the preempt and irqs off function tracers.
89 * Returns 1 if it is OK to continue, and data->disabled is
91 * 0 if the trace is to be ignored, and data->disabled
94 * Note, this function is also used outside this ifdef but
95 * inside the #ifdef of the function graph tracer below.
96 * This is OK, since the function graph tracer is
97 * dependent on the function tracer.
99 static int func_prolog_dec(struct trace_array
*tr
,
100 struct trace_array_cpu
**data
,
101 unsigned long *flags
)
107 * Does not matter if we preempt. We test the flags
108 * afterward, to see if irqs are disabled or not.
109 * If we preempt and get a false positive, the flags
112 cpu
= raw_smp_processor_id();
113 if (likely(!per_cpu(tracing_cpu
, cpu
)))
116 local_save_flags(*flags
);
118 * Slight chance to get a false positive on tracing_cpu,
119 * although I'm starting to think there isn't a chance.
120 * Leave this for now just to be paranoid.
122 if (!irqs_disabled_flags(*flags
) && !preempt_count())
125 *data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
126 disabled
= atomic_inc_return(&(*data
)->disabled
);
128 if (likely(disabled
== 1))
131 atomic_dec(&(*data
)->disabled
);
137 * irqsoff uses its own tracer function to keep the overhead down:
140 irqsoff_tracer_call(unsigned long ip
, unsigned long parent_ip
,
141 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
143 struct trace_array
*tr
= irqsoff_trace
;
144 struct trace_array_cpu
*data
;
146 unsigned int trace_ctx
;
148 if (!func_prolog_dec(tr
, &data
, &flags
))
151 trace_ctx
= tracing_gen_ctx_flags(flags
);
153 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
155 atomic_dec(&data
->disabled
);
157 #endif /* CONFIG_FUNCTION_TRACER */
159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 static int irqsoff_display_graph(struct trace_array
*tr
, int set
)
164 if (!(is_graph(tr
) ^ set
))
167 stop_irqsoff_tracer(irqsoff_trace
, !set
);
169 for_each_possible_cpu(cpu
)
170 per_cpu(tracing_cpu
, cpu
) = 0;
173 tracing_reset_online_cpus(&irqsoff_trace
->array_buffer
);
175 return start_irqsoff_tracer(irqsoff_trace
, set
);
178 static int irqsoff_graph_entry(struct ftrace_graph_ent
*trace
)
180 struct trace_array
*tr
= irqsoff_trace
;
181 struct trace_array_cpu
*data
;
183 unsigned int trace_ctx
;
186 if (ftrace_graph_ignore_func(trace
))
189 * Do not trace a function if it's filtered by set_graph_notrace.
190 * Make the index of ret stack negative to indicate that it should
191 * ignore further functions. But it needs its own ret stack entry
192 * to recover the original index in order to continue tracing after
193 * returning from the function.
195 if (ftrace_graph_notrace_addr(trace
->func
))
198 if (!func_prolog_dec(tr
, &data
, &flags
))
201 trace_ctx
= tracing_gen_ctx_flags(flags
);
202 ret
= __trace_graph_entry(tr
, trace
, trace_ctx
);
203 atomic_dec(&data
->disabled
);
208 static void irqsoff_graph_return(struct ftrace_graph_ret
*trace
)
210 struct trace_array
*tr
= irqsoff_trace
;
211 struct trace_array_cpu
*data
;
213 unsigned int trace_ctx
;
215 ftrace_graph_addr_finish(trace
);
217 if (!func_prolog_dec(tr
, &data
, &flags
))
220 trace_ctx
= tracing_gen_ctx_flags(flags
);
221 __trace_graph_return(tr
, trace
, trace_ctx
);
222 atomic_dec(&data
->disabled
);
225 static struct fgraph_ops fgraph_ops
= {
226 .entryfunc
= &irqsoff_graph_entry
,
227 .retfunc
= &irqsoff_graph_return
,
230 static void irqsoff_trace_open(struct trace_iterator
*iter
)
232 if (is_graph(iter
->tr
))
233 graph_trace_open(iter
);
237 static void irqsoff_trace_close(struct trace_iterator
*iter
)
240 graph_trace_close(iter
);
243 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
244 TRACE_GRAPH_PRINT_PROC | \
245 TRACE_GRAPH_PRINT_REL_TIME | \
246 TRACE_GRAPH_PRINT_DURATION)
248 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
251 * In graph mode call the graph tracer output function,
252 * otherwise go with the TRACE_FN event handler
254 if (is_graph(iter
->tr
))
255 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
257 return TRACE_TYPE_UNHANDLED
;
260 static void irqsoff_print_header(struct seq_file
*s
)
262 struct trace_array
*tr
= irqsoff_trace
;
265 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
267 trace_default_header(s
);
271 __trace_function(struct trace_array
*tr
,
272 unsigned long ip
, unsigned long parent_ip
,
273 unsigned int trace_ctx
)
276 trace_graph_function(tr
, ip
, parent_ip
, trace_ctx
);
278 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
282 #define __trace_function trace_function
284 static enum print_line_t
irqsoff_print_line(struct trace_iterator
*iter
)
286 return TRACE_TYPE_UNHANDLED
;
289 static void irqsoff_trace_open(struct trace_iterator
*iter
) { }
290 static void irqsoff_trace_close(struct trace_iterator
*iter
) { }
292 #ifdef CONFIG_FUNCTION_TRACER
293 static void irqsoff_print_header(struct seq_file
*s
)
295 trace_default_header(s
);
298 static void irqsoff_print_header(struct seq_file
*s
)
300 trace_latency_header(s
);
302 #endif /* CONFIG_FUNCTION_TRACER */
303 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
306 * Should this new latency be reported/recorded?
308 static bool report_latency(struct trace_array
*tr
, u64 delta
)
310 if (tracing_thresh
) {
311 if (delta
< tracing_thresh
)
314 if (delta
<= tr
->max_latency
)
321 check_critical_timing(struct trace_array
*tr
,
322 struct trace_array_cpu
*data
,
323 unsigned long parent_ip
,
328 unsigned int trace_ctx
;
330 T0
= data
->preempt_timestamp
;
331 T1
= ftrace_now(cpu
);
334 trace_ctx
= tracing_gen_ctx();
336 if (!report_latency(tr
, delta
))
339 raw_spin_lock_irqsave(&max_trace_lock
, flags
);
341 /* check if we are still the max latency */
342 if (!report_latency(tr
, delta
))
345 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, trace_ctx
);
346 /* Skip 5 functions to get to the irq/preempt enable function */
347 __trace_stack(tr
, trace_ctx
, 5);
349 if (data
->critical_sequence
!= max_sequence
)
352 data
->critical_end
= parent_ip
;
354 if (likely(!is_tracing_stopped())) {
355 tr
->max_latency
= delta
;
356 update_max_tr_single(tr
, current
, cpu
);
362 raw_spin_unlock_irqrestore(&max_trace_lock
, flags
);
365 data
->critical_sequence
= max_sequence
;
366 data
->preempt_timestamp
= ftrace_now(cpu
);
367 __trace_function(tr
, CALLER_ADDR0
, parent_ip
, trace_ctx
);
370 static nokprobe_inline
void
371 start_critical_timing(unsigned long ip
, unsigned long parent_ip
)
374 struct trace_array
*tr
= irqsoff_trace
;
375 struct trace_array_cpu
*data
;
377 if (!tracer_enabled
|| !tracing_is_enabled())
380 cpu
= raw_smp_processor_id();
382 if (per_cpu(tracing_cpu
, cpu
))
385 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
387 if (unlikely(!data
) || atomic_read(&data
->disabled
))
390 atomic_inc(&data
->disabled
);
392 data
->critical_sequence
= max_sequence
;
393 data
->preempt_timestamp
= ftrace_now(cpu
);
394 data
->critical_start
= parent_ip
? : ip
;
396 __trace_function(tr
, ip
, parent_ip
, tracing_gen_ctx());
398 per_cpu(tracing_cpu
, cpu
) = 1;
400 atomic_dec(&data
->disabled
);
403 static nokprobe_inline
void
404 stop_critical_timing(unsigned long ip
, unsigned long parent_ip
)
407 struct trace_array
*tr
= irqsoff_trace
;
408 struct trace_array_cpu
*data
;
409 unsigned int trace_ctx
;
411 cpu
= raw_smp_processor_id();
412 /* Always clear the tracing cpu on stopping the trace */
413 if (unlikely(per_cpu(tracing_cpu
, cpu
)))
414 per_cpu(tracing_cpu
, cpu
) = 0;
418 if (!tracer_enabled
|| !tracing_is_enabled())
421 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
423 if (unlikely(!data
) ||
424 !data
->critical_start
|| atomic_read(&data
->disabled
))
427 atomic_inc(&data
->disabled
);
429 trace_ctx
= tracing_gen_ctx();
430 __trace_function(tr
, ip
, parent_ip
, trace_ctx
);
431 check_critical_timing(tr
, data
, parent_ip
? : ip
, cpu
);
432 data
->critical_start
= 0;
433 atomic_dec(&data
->disabled
);
436 /* start and stop critical timings used to for stoppage (in idle) */
437 void start_critical_timings(void)
439 if (preempt_trace(preempt_count()) || irq_trace())
440 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
442 EXPORT_SYMBOL_GPL(start_critical_timings
);
443 NOKPROBE_SYMBOL(start_critical_timings
);
445 void stop_critical_timings(void)
447 if (preempt_trace(preempt_count()) || irq_trace())
448 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
450 EXPORT_SYMBOL_GPL(stop_critical_timings
);
451 NOKPROBE_SYMBOL(stop_critical_timings
);
453 #ifdef CONFIG_FUNCTION_TRACER
454 static bool function_enabled
;
456 static int register_irqsoff_function(struct trace_array
*tr
, int graph
, int set
)
460 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
461 if (function_enabled
|| (!set
&& !(tr
->trace_flags
& TRACE_ITER_FUNCTION
)))
465 ret
= register_ftrace_graph(&fgraph_ops
);
467 ret
= register_ftrace_function(tr
->ops
);
470 function_enabled
= true;
475 static void unregister_irqsoff_function(struct trace_array
*tr
, int graph
)
477 if (!function_enabled
)
481 unregister_ftrace_graph(&fgraph_ops
);
483 unregister_ftrace_function(tr
->ops
);
485 function_enabled
= false;
488 static int irqsoff_function_set(struct trace_array
*tr
, u32 mask
, int set
)
490 if (!(mask
& TRACE_ITER_FUNCTION
))
494 register_irqsoff_function(tr
, is_graph(tr
), 1);
496 unregister_irqsoff_function(tr
, is_graph(tr
));
500 static int register_irqsoff_function(struct trace_array
*tr
, int graph
, int set
)
504 static void unregister_irqsoff_function(struct trace_array
*tr
, int graph
) { }
505 static inline int irqsoff_function_set(struct trace_array
*tr
, u32 mask
, int set
)
509 #endif /* CONFIG_FUNCTION_TRACER */
511 static int irqsoff_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
513 struct tracer
*tracer
= tr
->current_trace
;
515 if (irqsoff_function_set(tr
, mask
, set
))
518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
519 if (mask
& TRACE_ITER_DISPLAY_GRAPH
)
520 return irqsoff_display_graph(tr
, set
);
523 return trace_keep_overwrite(tracer
, mask
, set
);
526 static int start_irqsoff_tracer(struct trace_array
*tr
, int graph
)
530 ret
= register_irqsoff_function(tr
, graph
, 0);
532 if (!ret
&& tracing_is_enabled())
540 static void stop_irqsoff_tracer(struct trace_array
*tr
, int graph
)
544 unregister_irqsoff_function(tr
, graph
);
547 static bool irqsoff_busy
;
549 static int __irqsoff_tracer_init(struct trace_array
*tr
)
554 save_flags
= tr
->trace_flags
;
556 /* non overwrite screws up the latency tracers */
557 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
558 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
559 /* without pause, we will produce garbage if another latency occurs */
560 set_tracer_flag(tr
, TRACE_ITER_PAUSE_ON_TRACE
, 1);
564 /* make sure that the tracer is visible */
567 ftrace_init_array_ops(tr
, irqsoff_tracer_call
);
569 /* Only toplevel instance supports graph tracing */
570 if (start_irqsoff_tracer(tr
, (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&&
572 printk(KERN_ERR
"failed to start irqsoff tracer\n");
578 static void __irqsoff_tracer_reset(struct trace_array
*tr
)
580 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
581 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
582 int pause_flag
= save_flags
& TRACE_ITER_PAUSE_ON_TRACE
;
584 stop_irqsoff_tracer(tr
, is_graph(tr
));
586 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
587 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
588 set_tracer_flag(tr
, TRACE_ITER_PAUSE_ON_TRACE
, pause_flag
);
589 ftrace_reset_array_ops(tr
);
591 irqsoff_busy
= false;
594 static void irqsoff_tracer_start(struct trace_array
*tr
)
599 static void irqsoff_tracer_stop(struct trace_array
*tr
)
604 #ifdef CONFIG_IRQSOFF_TRACER
606 * We are only interested in hardirq on/off events:
608 void tracer_hardirqs_on(unsigned long a0
, unsigned long a1
)
610 if (!preempt_trace(preempt_count()) && irq_trace())
611 stop_critical_timing(a0
, a1
);
613 NOKPROBE_SYMBOL(tracer_hardirqs_on
);
615 void tracer_hardirqs_off(unsigned long a0
, unsigned long a1
)
617 if (!preempt_trace(preempt_count()) && irq_trace())
618 start_critical_timing(a0
, a1
);
620 NOKPROBE_SYMBOL(tracer_hardirqs_off
);
622 static int irqsoff_tracer_init(struct trace_array
*tr
)
624 trace_type
= TRACER_IRQS_OFF
;
626 return __irqsoff_tracer_init(tr
);
629 static void irqsoff_tracer_reset(struct trace_array
*tr
)
631 __irqsoff_tracer_reset(tr
);
634 static struct tracer irqsoff_tracer __read_mostly
=
637 .init
= irqsoff_tracer_init
,
638 .reset
= irqsoff_tracer_reset
,
639 .start
= irqsoff_tracer_start
,
640 .stop
= irqsoff_tracer_stop
,
642 .print_header
= irqsoff_print_header
,
643 .print_line
= irqsoff_print_line
,
644 .flag_changed
= irqsoff_flag_changed
,
645 #ifdef CONFIG_FTRACE_SELFTEST
646 .selftest
= trace_selftest_startup_irqsoff
,
648 .open
= irqsoff_trace_open
,
649 .close
= irqsoff_trace_close
,
650 .allow_instances
= true,
653 #endif /* CONFIG_IRQSOFF_TRACER */
655 #ifdef CONFIG_PREEMPT_TRACER
656 void tracer_preempt_on(unsigned long a0
, unsigned long a1
)
658 if (preempt_trace(preempt_count()) && !irq_trace())
659 stop_critical_timing(a0
, a1
);
662 void tracer_preempt_off(unsigned long a0
, unsigned long a1
)
664 if (preempt_trace(preempt_count()) && !irq_trace())
665 start_critical_timing(a0
, a1
);
668 static int preemptoff_tracer_init(struct trace_array
*tr
)
670 trace_type
= TRACER_PREEMPT_OFF
;
672 return __irqsoff_tracer_init(tr
);
675 static void preemptoff_tracer_reset(struct trace_array
*tr
)
677 __irqsoff_tracer_reset(tr
);
680 static struct tracer preemptoff_tracer __read_mostly
=
682 .name
= "preemptoff",
683 .init
= preemptoff_tracer_init
,
684 .reset
= preemptoff_tracer_reset
,
685 .start
= irqsoff_tracer_start
,
686 .stop
= irqsoff_tracer_stop
,
688 .print_header
= irqsoff_print_header
,
689 .print_line
= irqsoff_print_line
,
690 .flag_changed
= irqsoff_flag_changed
,
691 #ifdef CONFIG_FTRACE_SELFTEST
692 .selftest
= trace_selftest_startup_preemptoff
,
694 .open
= irqsoff_trace_open
,
695 .close
= irqsoff_trace_close
,
696 .allow_instances
= true,
699 #endif /* CONFIG_PREEMPT_TRACER */
701 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
703 static int preemptirqsoff_tracer_init(struct trace_array
*tr
)
705 trace_type
= TRACER_IRQS_OFF
| TRACER_PREEMPT_OFF
;
707 return __irqsoff_tracer_init(tr
);
710 static void preemptirqsoff_tracer_reset(struct trace_array
*tr
)
712 __irqsoff_tracer_reset(tr
);
715 static struct tracer preemptirqsoff_tracer __read_mostly
=
717 .name
= "preemptirqsoff",
718 .init
= preemptirqsoff_tracer_init
,
719 .reset
= preemptirqsoff_tracer_reset
,
720 .start
= irqsoff_tracer_start
,
721 .stop
= irqsoff_tracer_stop
,
723 .print_header
= irqsoff_print_header
,
724 .print_line
= irqsoff_print_line
,
725 .flag_changed
= irqsoff_flag_changed
,
726 #ifdef CONFIG_FTRACE_SELFTEST
727 .selftest
= trace_selftest_startup_preemptirqsoff
,
729 .open
= irqsoff_trace_open
,
730 .close
= irqsoff_trace_close
,
731 .allow_instances
= true,
736 __init
static int init_irqsoff_tracer(void)
738 #ifdef CONFIG_IRQSOFF_TRACER
739 register_tracer(&irqsoff_tracer
);
741 #ifdef CONFIG_PREEMPT_TRACER
742 register_tracer(&preemptoff_tracer
);
744 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
745 register_tracer(&preemptirqsoff_tracer
);
750 core_initcall(init_irqsoff_tracer
);
751 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */