]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace_irqsoff.c
Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_irqsoff.c
1 /*
2 * trace irqs off critical timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16
17 #include "trace.h"
18
19 static struct trace_array *irqsoff_trace __read_mostly;
20 static int tracer_enabled __read_mostly;
21
22 static DEFINE_PER_CPU(int, tracing_cpu);
23
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
25
26 enum {
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
29 };
30
31 static int trace_type __read_mostly;
32
33 static int save_flags;
34
35 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
36 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
37
38 #ifdef CONFIG_PREEMPT_TRACER
39 static inline int
40 preempt_trace(void)
41 {
42 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
43 }
44 #else
45 # define preempt_trace() (0)
46 #endif
47
48 #ifdef CONFIG_IRQSOFF_TRACER
49 static inline int
50 irq_trace(void)
51 {
52 return ((trace_type & TRACER_IRQS_OFF) &&
53 irqs_disabled());
54 }
55 #else
56 # define irq_trace() (0)
57 #endif
58
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 static int irqsoff_display_graph(struct trace_array *tr, int set);
61 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
62 #else
63 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
64 {
65 return -EINVAL;
66 }
67 # define is_graph(tr) false
68 #endif
69
70 /*
71 * Sequence count - we record it when starting a measurement and
72 * skip the latency if the sequence has changed - some other section
73 * did a maximum and could disturb our measurement with serial console
74 * printouts, etc. Truly coinciding maximum latencies should be rare
75 * and what happens together happens separately as well, so this doesn't
76 * decrease the validity of the maximum found:
77 */
78 static __cacheline_aligned_in_smp unsigned long max_sequence;
79
80 #ifdef CONFIG_FUNCTION_TRACER
81 /*
82 * Prologue for the preempt and irqs off function tracers.
83 *
84 * Returns 1 if it is OK to continue, and data->disabled is
85 * incremented.
86 * 0 if the trace is to be ignored, and data->disabled
87 * is kept the same.
88 *
89 * Note, this function is also used outside this ifdef but
90 * inside the #ifdef of the function graph tracer below.
91 * This is OK, since the function graph tracer is
92 * dependent on the function tracer.
93 */
94 static int func_prolog_dec(struct trace_array *tr,
95 struct trace_array_cpu **data,
96 unsigned long *flags)
97 {
98 long disabled;
99 int cpu;
100
101 /*
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
105 * test will fail.
106 */
107 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu)))
109 return 0;
110
111 local_save_flags(*flags);
112 /*
113 * Slight chance to get a false positive on tracing_cpu,
114 * although I'm starting to think there isn't a chance.
115 * Leave this for now just to be paranoid.
116 */
117 if (!irqs_disabled_flags(*flags) && !preempt_count())
118 return 0;
119
120 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
121 disabled = atomic_inc_return(&(*data)->disabled);
122
123 if (likely(disabled == 1))
124 return 1;
125
126 atomic_dec(&(*data)->disabled);
127
128 return 0;
129 }
130
131 /*
132 * irqsoff uses its own tracer function to keep the overhead down:
133 */
134 static void
135 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
136 struct ftrace_ops *op, struct pt_regs *pt_regs)
137 {
138 struct trace_array *tr = irqsoff_trace;
139 struct trace_array_cpu *data;
140 unsigned long flags;
141
142 if (!func_prolog_dec(tr, &data, &flags))
143 return;
144
145 trace_function(tr, ip, parent_ip, flags, preempt_count());
146
147 atomic_dec(&data->disabled);
148 }
149 #endif /* CONFIG_FUNCTION_TRACER */
150
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 static int irqsoff_display_graph(struct trace_array *tr, int set)
153 {
154 int cpu;
155
156 if (!(is_graph(tr) ^ set))
157 return 0;
158
159 stop_irqsoff_tracer(irqsoff_trace, !set);
160
161 for_each_possible_cpu(cpu)
162 per_cpu(tracing_cpu, cpu) = 0;
163
164 tr->max_latency = 0;
165 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
166
167 return start_irqsoff_tracer(irqsoff_trace, set);
168 }
169
170 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
171 {
172 struct trace_array *tr = irqsoff_trace;
173 struct trace_array_cpu *data;
174 unsigned long flags;
175 int ret;
176 int pc;
177
178 if (ftrace_graph_ignore_func(trace))
179 return 0;
180 /*
181 * Do not trace a function if it's filtered by set_graph_notrace.
182 * Make the index of ret stack negative to indicate that it should
183 * ignore further functions. But it needs its own ret stack entry
184 * to recover the original index in order to continue tracing after
185 * returning from the function.
186 */
187 if (ftrace_graph_notrace_addr(trace->func))
188 return 1;
189
190 if (!func_prolog_dec(tr, &data, &flags))
191 return 0;
192
193 pc = preempt_count();
194 ret = __trace_graph_entry(tr, trace, flags, pc);
195 atomic_dec(&data->disabled);
196
197 return ret;
198 }
199
200 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
201 {
202 struct trace_array *tr = irqsoff_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
205 int pc;
206
207 if (!func_prolog_dec(tr, &data, &flags))
208 return;
209
210 pc = preempt_count();
211 __trace_graph_return(tr, trace, flags, pc);
212 atomic_dec(&data->disabled);
213 }
214
215 static void irqsoff_trace_open(struct trace_iterator *iter)
216 {
217 if (is_graph(iter->tr))
218 graph_trace_open(iter);
219
220 }
221
222 static void irqsoff_trace_close(struct trace_iterator *iter)
223 {
224 if (iter->private)
225 graph_trace_close(iter);
226 }
227
228 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
229 TRACE_GRAPH_PRINT_PROC | \
230 TRACE_GRAPH_PRINT_ABS_TIME | \
231 TRACE_GRAPH_PRINT_DURATION)
232
233 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
234 {
235 /*
236 * In graph mode call the graph tracer output function,
237 * otherwise go with the TRACE_FN event handler
238 */
239 if (is_graph(iter->tr))
240 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
241
242 return TRACE_TYPE_UNHANDLED;
243 }
244
245 static void irqsoff_print_header(struct seq_file *s)
246 {
247 struct trace_array *tr = irqsoff_trace;
248
249 if (is_graph(tr))
250 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
251 else
252 trace_default_header(s);
253 }
254
255 static void
256 __trace_function(struct trace_array *tr,
257 unsigned long ip, unsigned long parent_ip,
258 unsigned long flags, int pc)
259 {
260 if (is_graph(tr))
261 trace_graph_function(tr, ip, parent_ip, flags, pc);
262 else
263 trace_function(tr, ip, parent_ip, flags, pc);
264 }
265
266 #else
267 #define __trace_function trace_function
268
269 #ifdef CONFIG_FUNCTION_TRACER
270 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
271 {
272 return -1;
273 }
274 #endif
275
276 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
277 {
278 return TRACE_TYPE_UNHANDLED;
279 }
280
281 static void irqsoff_trace_open(struct trace_iterator *iter) { }
282 static void irqsoff_trace_close(struct trace_iterator *iter) { }
283
284 #ifdef CONFIG_FUNCTION_TRACER
285 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
286 static void irqsoff_print_header(struct seq_file *s)
287 {
288 trace_default_header(s);
289 }
290 #else
291 static void irqsoff_print_header(struct seq_file *s)
292 {
293 trace_latency_header(s);
294 }
295 #endif /* CONFIG_FUNCTION_TRACER */
296 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
297
298 /*
299 * Should this new latency be reported/recorded?
300 */
301 static bool report_latency(struct trace_array *tr, u64 delta)
302 {
303 if (tracing_thresh) {
304 if (delta < tracing_thresh)
305 return false;
306 } else {
307 if (delta <= tr->max_latency)
308 return false;
309 }
310 return true;
311 }
312
313 static void
314 check_critical_timing(struct trace_array *tr,
315 struct trace_array_cpu *data,
316 unsigned long parent_ip,
317 int cpu)
318 {
319 u64 T0, T1, delta;
320 unsigned long flags;
321 int pc;
322
323 T0 = data->preempt_timestamp;
324 T1 = ftrace_now(cpu);
325 delta = T1-T0;
326
327 local_save_flags(flags);
328
329 pc = preempt_count();
330
331 if (!report_latency(tr, delta))
332 goto out;
333
334 raw_spin_lock_irqsave(&max_trace_lock, flags);
335
336 /* check if we are still the max latency */
337 if (!report_latency(tr, delta))
338 goto out_unlock;
339
340 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
341 /* Skip 5 functions to get to the irq/preempt enable function */
342 __trace_stack(tr, flags, 5, pc);
343
344 if (data->critical_sequence != max_sequence)
345 goto out_unlock;
346
347 data->critical_end = parent_ip;
348
349 if (likely(!is_tracing_stopped())) {
350 tr->max_latency = delta;
351 update_max_tr_single(tr, current, cpu);
352 }
353
354 max_sequence++;
355
356 out_unlock:
357 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
358
359 out:
360 data->critical_sequence = max_sequence;
361 data->preempt_timestamp = ftrace_now(cpu);
362 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
363 }
364
365 static inline void
366 start_critical_timing(unsigned long ip, unsigned long parent_ip)
367 {
368 int cpu;
369 struct trace_array *tr = irqsoff_trace;
370 struct trace_array_cpu *data;
371 unsigned long flags;
372
373 if (!tracer_enabled || !tracing_is_enabled())
374 return;
375
376 cpu = raw_smp_processor_id();
377
378 if (per_cpu(tracing_cpu, cpu))
379 return;
380
381 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
382
383 if (unlikely(!data) || atomic_read(&data->disabled))
384 return;
385
386 atomic_inc(&data->disabled);
387
388 data->critical_sequence = max_sequence;
389 data->preempt_timestamp = ftrace_now(cpu);
390 data->critical_start = parent_ip ? : ip;
391
392 local_save_flags(flags);
393
394 __trace_function(tr, ip, parent_ip, flags, preempt_count());
395
396 per_cpu(tracing_cpu, cpu) = 1;
397
398 atomic_dec(&data->disabled);
399 }
400
401 static inline void
402 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
403 {
404 int cpu;
405 struct trace_array *tr = irqsoff_trace;
406 struct trace_array_cpu *data;
407 unsigned long flags;
408
409 cpu = raw_smp_processor_id();
410 /* Always clear the tracing cpu on stopping the trace */
411 if (unlikely(per_cpu(tracing_cpu, cpu)))
412 per_cpu(tracing_cpu, cpu) = 0;
413 else
414 return;
415
416 if (!tracer_enabled || !tracing_is_enabled())
417 return;
418
419 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
420
421 if (unlikely(!data) ||
422 !data->critical_start || atomic_read(&data->disabled))
423 return;
424
425 atomic_inc(&data->disabled);
426
427 local_save_flags(flags);
428 __trace_function(tr, ip, parent_ip, flags, preempt_count());
429 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
430 data->critical_start = 0;
431 atomic_dec(&data->disabled);
432 }
433
434 /* start and stop critical timings used to for stoppage (in idle) */
435 void start_critical_timings(void)
436 {
437 if (preempt_trace() || irq_trace())
438 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
439 }
440 EXPORT_SYMBOL_GPL(start_critical_timings);
441
442 void stop_critical_timings(void)
443 {
444 if (preempt_trace() || irq_trace())
445 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
446 }
447 EXPORT_SYMBOL_GPL(stop_critical_timings);
448
449 #ifdef CONFIG_IRQSOFF_TRACER
450 #ifdef CONFIG_PROVE_LOCKING
451 void time_hardirqs_on(unsigned long a0, unsigned long a1)
452 {
453 if (!preempt_trace() && irq_trace())
454 stop_critical_timing(a0, a1);
455 }
456
457 void time_hardirqs_off(unsigned long a0, unsigned long a1)
458 {
459 if (!preempt_trace() && irq_trace())
460 start_critical_timing(a0, a1);
461 }
462
463 #else /* !CONFIG_PROVE_LOCKING */
464
465 /*
466 * Stubs:
467 */
468
469 void trace_softirqs_on(unsigned long ip)
470 {
471 }
472
473 void trace_softirqs_off(unsigned long ip)
474 {
475 }
476
477 inline void print_irqtrace_events(struct task_struct *curr)
478 {
479 }
480
481 /*
482 * We are only interested in hardirq on/off events:
483 */
484 void trace_hardirqs_on(void)
485 {
486 if (!preempt_trace() && irq_trace())
487 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
488 }
489 EXPORT_SYMBOL(trace_hardirqs_on);
490
491 void trace_hardirqs_off(void)
492 {
493 if (!preempt_trace() && irq_trace())
494 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
495 }
496 EXPORT_SYMBOL(trace_hardirqs_off);
497
498 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
499 {
500 if (!preempt_trace() && irq_trace())
501 stop_critical_timing(CALLER_ADDR0, caller_addr);
502 }
503 EXPORT_SYMBOL(trace_hardirqs_on_caller);
504
505 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
506 {
507 if (!preempt_trace() && irq_trace())
508 start_critical_timing(CALLER_ADDR0, caller_addr);
509 }
510 EXPORT_SYMBOL(trace_hardirqs_off_caller);
511
512 #endif /* CONFIG_PROVE_LOCKING */
513 #endif /* CONFIG_IRQSOFF_TRACER */
514
515 #ifdef CONFIG_PREEMPT_TRACER
516 void trace_preempt_on(unsigned long a0, unsigned long a1)
517 {
518 if (preempt_trace() && !irq_trace())
519 stop_critical_timing(a0, a1);
520 }
521
522 void trace_preempt_off(unsigned long a0, unsigned long a1)
523 {
524 if (preempt_trace() && !irq_trace())
525 start_critical_timing(a0, a1);
526 }
527 #endif /* CONFIG_PREEMPT_TRACER */
528
529 #ifdef CONFIG_FUNCTION_TRACER
530 static bool function_enabled;
531
532 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
533 {
534 int ret;
535
536 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
537 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
538 return 0;
539
540 if (graph)
541 ret = register_ftrace_graph(&irqsoff_graph_return,
542 &irqsoff_graph_entry);
543 else
544 ret = register_ftrace_function(tr->ops);
545
546 if (!ret)
547 function_enabled = true;
548
549 return ret;
550 }
551
552 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
553 {
554 if (!function_enabled)
555 return;
556
557 if (graph)
558 unregister_ftrace_graph();
559 else
560 unregister_ftrace_function(tr->ops);
561
562 function_enabled = false;
563 }
564
565 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
566 {
567 if (!(mask & TRACE_ITER_FUNCTION))
568 return 0;
569
570 if (set)
571 register_irqsoff_function(tr, is_graph(tr), 1);
572 else
573 unregister_irqsoff_function(tr, is_graph(tr));
574 return 1;
575 }
576 #else
577 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
578 {
579 return 0;
580 }
581 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
582 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
583 {
584 return 0;
585 }
586 #endif /* CONFIG_FUNCTION_TRACER */
587
588 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
589 {
590 struct tracer *tracer = tr->current_trace;
591
592 if (irqsoff_function_set(tr, mask, set))
593 return 0;
594
595 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
596 if (mask & TRACE_ITER_DISPLAY_GRAPH)
597 return irqsoff_display_graph(tr, set);
598 #endif
599
600 return trace_keep_overwrite(tracer, mask, set);
601 }
602
603 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
604 {
605 int ret;
606
607 ret = register_irqsoff_function(tr, graph, 0);
608
609 if (!ret && tracing_is_enabled())
610 tracer_enabled = 1;
611 else
612 tracer_enabled = 0;
613
614 return ret;
615 }
616
617 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
618 {
619 tracer_enabled = 0;
620
621 unregister_irqsoff_function(tr, graph);
622 }
623
624 static bool irqsoff_busy;
625
626 static int __irqsoff_tracer_init(struct trace_array *tr)
627 {
628 if (irqsoff_busy)
629 return -EBUSY;
630
631 save_flags = tr->trace_flags;
632
633 /* non overwrite screws up the latency tracers */
634 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
635 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
636
637 tr->max_latency = 0;
638 irqsoff_trace = tr;
639 /* make sure that the tracer is visible */
640 smp_wmb();
641
642 ftrace_init_array_ops(tr, irqsoff_tracer_call);
643
644 /* Only toplevel instance supports graph tracing */
645 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
646 is_graph(tr))))
647 printk(KERN_ERR "failed to start irqsoff tracer\n");
648
649 irqsoff_busy = true;
650 return 0;
651 }
652
653 static void irqsoff_tracer_reset(struct trace_array *tr)
654 {
655 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
656 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
657
658 stop_irqsoff_tracer(tr, is_graph(tr));
659
660 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
661 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
662 ftrace_reset_array_ops(tr);
663
664 irqsoff_busy = false;
665 }
666
667 static void irqsoff_tracer_start(struct trace_array *tr)
668 {
669 tracer_enabled = 1;
670 }
671
672 static void irqsoff_tracer_stop(struct trace_array *tr)
673 {
674 tracer_enabled = 0;
675 }
676
677 #ifdef CONFIG_IRQSOFF_TRACER
678 static int irqsoff_tracer_init(struct trace_array *tr)
679 {
680 trace_type = TRACER_IRQS_OFF;
681
682 return __irqsoff_tracer_init(tr);
683 }
684 static struct tracer irqsoff_tracer __read_mostly =
685 {
686 .name = "irqsoff",
687 .init = irqsoff_tracer_init,
688 .reset = irqsoff_tracer_reset,
689 .start = irqsoff_tracer_start,
690 .stop = irqsoff_tracer_stop,
691 .print_max = true,
692 .print_header = irqsoff_print_header,
693 .print_line = irqsoff_print_line,
694 .flag_changed = irqsoff_flag_changed,
695 #ifdef CONFIG_FTRACE_SELFTEST
696 .selftest = trace_selftest_startup_irqsoff,
697 #endif
698 .open = irqsoff_trace_open,
699 .close = irqsoff_trace_close,
700 .allow_instances = true,
701 .use_max_tr = true,
702 };
703 # define register_irqsoff(trace) register_tracer(&trace)
704 #else
705 # define register_irqsoff(trace) do { } while (0)
706 #endif
707
708 #ifdef CONFIG_PREEMPT_TRACER
709 static int preemptoff_tracer_init(struct trace_array *tr)
710 {
711 trace_type = TRACER_PREEMPT_OFF;
712
713 return __irqsoff_tracer_init(tr);
714 }
715
716 static struct tracer preemptoff_tracer __read_mostly =
717 {
718 .name = "preemptoff",
719 .init = preemptoff_tracer_init,
720 .reset = irqsoff_tracer_reset,
721 .start = irqsoff_tracer_start,
722 .stop = irqsoff_tracer_stop,
723 .print_max = true,
724 .print_header = irqsoff_print_header,
725 .print_line = irqsoff_print_line,
726 .flag_changed = irqsoff_flag_changed,
727 #ifdef CONFIG_FTRACE_SELFTEST
728 .selftest = trace_selftest_startup_preemptoff,
729 #endif
730 .open = irqsoff_trace_open,
731 .close = irqsoff_trace_close,
732 .allow_instances = true,
733 .use_max_tr = true,
734 };
735 # define register_preemptoff(trace) register_tracer(&trace)
736 #else
737 # define register_preemptoff(trace) do { } while (0)
738 #endif
739
740 #if defined(CONFIG_IRQSOFF_TRACER) && \
741 defined(CONFIG_PREEMPT_TRACER)
742
743 static int preemptirqsoff_tracer_init(struct trace_array *tr)
744 {
745 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
746
747 return __irqsoff_tracer_init(tr);
748 }
749
750 static struct tracer preemptirqsoff_tracer __read_mostly =
751 {
752 .name = "preemptirqsoff",
753 .init = preemptirqsoff_tracer_init,
754 .reset = irqsoff_tracer_reset,
755 .start = irqsoff_tracer_start,
756 .stop = irqsoff_tracer_stop,
757 .print_max = true,
758 .print_header = irqsoff_print_header,
759 .print_line = irqsoff_print_line,
760 .flag_changed = irqsoff_flag_changed,
761 #ifdef CONFIG_FTRACE_SELFTEST
762 .selftest = trace_selftest_startup_preemptirqsoff,
763 #endif
764 .open = irqsoff_trace_open,
765 .close = irqsoff_trace_close,
766 .allow_instances = true,
767 .use_max_tr = true,
768 };
769
770 # define register_preemptirqsoff(trace) register_tracer(&trace)
771 #else
772 # define register_preemptirqsoff(trace) do { } while (0)
773 #endif
774
775 __init static int init_irqsoff_tracer(void)
776 {
777 register_irqsoff(irqsoff_tracer);
778 register_preemptoff(preemptoff_tracer);
779 register_preemptirqsoff(preemptirqsoff_tracer);
780
781 return 0;
782 }
783 core_initcall(init_irqsoff_tracer);