]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/trace_irqsoff.c
tracing: consolidate print_lat_fmt and print_trace_fmt
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_irqsoff.c
CommitLineData
81d68a96 1/*
73d8b8bc 2 * trace irqs off critical timings
81d68a96
SR
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
6cd8a4bb
SR
24static DEFINE_PER_CPU(int, tracing_cpu);
25
89b2f978
SR
26static DEFINE_SPINLOCK(max_trace_lock);
27
6cd8a4bb
SR
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
35#ifdef CONFIG_PREEMPT_TRACER
e309b41d 36static inline int
6cd8a4bb
SR
37preempt_trace(void)
38{
39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
40}
41#else
42# define preempt_trace() (0)
43#endif
44
45#ifdef CONFIG_IRQSOFF_TRACER
e309b41d 46static inline int
6cd8a4bb
SR
47irq_trace(void)
48{
49 return ((trace_type & TRACER_IRQS_OFF) &&
50 irqs_disabled());
51}
52#else
53# define irq_trace() (0)
54#endif
55
81d68a96
SR
56/*
57 * Sequence count - we record it when starting a measurement and
58 * skip the latency if the sequence has changed - some other section
59 * did a maximum and could disturb our measurement with serial console
60 * printouts, etc. Truly coinciding maximum latencies should be rare
61 * and what happens together happens separately as well, so this doesnt
62 * decrease the validity of the maximum found:
63 */
64static __cacheline_aligned_in_smp unsigned long max_sequence;
65
606576ce 66#ifdef CONFIG_FUNCTION_TRACER
81d68a96
SR
67/*
68 * irqsoff uses its own tracer function to keep the overhead down:
69 */
e309b41d 70static void
81d68a96
SR
71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72{
73 struct trace_array *tr = irqsoff_trace;
74 struct trace_array_cpu *data;
75 unsigned long flags;
76 long disabled;
77 int cpu;
78
361943ad
SR
79 /*
80 * Does not matter if we preempt. We test the flags
81 * afterward, to see if irqs are disabled or not.
82 * If we preempt and get a false positive, the flags
83 * test will fail.
84 */
85 cpu = raw_smp_processor_id();
86 if (likely(!per_cpu(tracing_cpu, cpu)))
81d68a96
SR
87 return;
88
89 local_save_flags(flags);
361943ad
SR
90 /* slight chance to get a false positive on tracing_cpu */
91 if (!irqs_disabled_flags(flags))
92 return;
81d68a96 93
81d68a96
SR
94 data = tr->data[cpu];
95 disabled = atomic_inc_return(&data->disabled);
96
97 if (likely(disabled == 1))
7be42151 98 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96
SR
99
100 atomic_dec(&data->disabled);
101}
102
103static struct ftrace_ops trace_ops __read_mostly =
104{
105 .func = irqsoff_tracer_call,
106};
606576ce 107#endif /* CONFIG_FUNCTION_TRACER */
81d68a96
SR
108
109/*
110 * Should this new latency be reported/recorded?
111 */
e309b41d 112static int report_latency(cycle_t delta)
81d68a96
SR
113{
114 if (tracing_thresh) {
115 if (delta < tracing_thresh)
116 return 0;
117 } else {
118 if (delta <= tracing_max_latency)
119 return 0;
120 }
121 return 1;
122}
123
e309b41d 124static void
81d68a96
SR
125check_critical_timing(struct trace_array *tr,
126 struct trace_array_cpu *data,
127 unsigned long parent_ip,
128 int cpu)
129{
130 unsigned long latency, t0, t1;
89b2f978 131 cycle_t T0, T1, delta;
81d68a96 132 unsigned long flags;
38697053 133 int pc;
81d68a96
SR
134
135 /*
136 * usecs conversion is slow so we try to delay the conversion
137 * as long as possible:
138 */
139 T0 = data->preempt_timestamp;
750ed1a4 140 T1 = ftrace_now(cpu);
81d68a96
SR
141 delta = T1-T0;
142
143 local_save_flags(flags);
144
6450c1d3
SR
145 pc = preempt_count();
146
81d68a96
SR
147 if (!report_latency(delta))
148 goto out;
149
c7aafc54 150 spin_lock_irqsave(&max_trace_lock, flags);
81d68a96 151
89b2f978
SR
152 /* check if we are still the max latency */
153 if (!report_latency(delta))
154 goto out_unlock;
155
7be42151 156 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96
SR
157
158 latency = nsecs_to_usecs(delta);
159
160 if (data->critical_sequence != max_sequence)
89b2f978 161 goto out_unlock;
81d68a96
SR
162
163 tracing_max_latency = delta;
164 t0 = nsecs_to_usecs(T0);
165 t1 = nsecs_to_usecs(T1);
166
167 data->critical_end = parent_ip;
168
169 update_max_tr_single(tr, current, cpu);
170
81d68a96
SR
171 max_sequence++;
172
89b2f978 173out_unlock:
c7aafc54 174 spin_unlock_irqrestore(&max_trace_lock, flags);
89b2f978 175
81d68a96
SR
176out:
177 data->critical_sequence = max_sequence;
750ed1a4 178 data->preempt_timestamp = ftrace_now(cpu);
3928a8a2 179 tracing_reset(tr, cpu);
7be42151 180 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96
SR
181}
182
e309b41d 183static inline void
81d68a96
SR
184start_critical_timing(unsigned long ip, unsigned long parent_ip)
185{
186 int cpu;
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
189 unsigned long flags;
190
191 if (likely(!tracer_enabled))
192 return;
193
c5f888ca
SR
194 cpu = raw_smp_processor_id();
195
196 if (per_cpu(tracing_cpu, cpu))
6cd8a4bb
SR
197 return;
198
81d68a96
SR
199 data = tr->data[cpu];
200
c5f888ca 201 if (unlikely(!data) || atomic_read(&data->disabled))
81d68a96
SR
202 return;
203
204 atomic_inc(&data->disabled);
205
206 data->critical_sequence = max_sequence;
750ed1a4 207 data->preempt_timestamp = ftrace_now(cpu);
6cd8a4bb 208 data->critical_start = parent_ip ? : ip;
3928a8a2 209 tracing_reset(tr, cpu);
81d68a96
SR
210
211 local_save_flags(flags);
6cd8a4bb 212
7be42151 213 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96 214
c5f888ca 215 per_cpu(tracing_cpu, cpu) = 1;
6cd8a4bb 216
81d68a96
SR
217 atomic_dec(&data->disabled);
218}
219
e309b41d 220static inline void
81d68a96
SR
221stop_critical_timing(unsigned long ip, unsigned long parent_ip)
222{
223 int cpu;
224 struct trace_array *tr = irqsoff_trace;
225 struct trace_array_cpu *data;
226 unsigned long flags;
227
c5f888ca 228 cpu = raw_smp_processor_id();
6cd8a4bb 229 /* Always clear the tracing cpu on stopping the trace */
c5f888ca
SR
230 if (unlikely(per_cpu(tracing_cpu, cpu)))
231 per_cpu(tracing_cpu, cpu) = 0;
6cd8a4bb
SR
232 else
233 return;
234
235 if (!tracer_enabled)
81d68a96
SR
236 return;
237
81d68a96
SR
238 data = tr->data[cpu];
239
3928a8a2 240 if (unlikely(!data) ||
81d68a96
SR
241 !data->critical_start || atomic_read(&data->disabled))
242 return;
243
244 atomic_inc(&data->disabled);
c5f888ca 245
81d68a96 246 local_save_flags(flags);
7be42151 247 trace_function(tr, ip, parent_ip, flags, preempt_count());
6cd8a4bb 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
81d68a96
SR
249 data->critical_start = 0;
250 atomic_dec(&data->disabled);
251}
252
6cd8a4bb 253/* start and stop critical timings used to for stoppage (in idle) */
e309b41d 254void start_critical_timings(void)
81d68a96 255{
6cd8a4bb 256 if (preempt_trace() || irq_trace())
81d68a96
SR
257 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
258}
1fe37104 259EXPORT_SYMBOL_GPL(start_critical_timings);
81d68a96 260
e309b41d 261void stop_critical_timings(void)
81d68a96 262{
6cd8a4bb 263 if (preempt_trace() || irq_trace())
81d68a96
SR
264 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
265}
1fe37104 266EXPORT_SYMBOL_GPL(stop_critical_timings);
81d68a96 267
6cd8a4bb 268#ifdef CONFIG_IRQSOFF_TRACER
81d68a96 269#ifdef CONFIG_PROVE_LOCKING
e309b41d 270void time_hardirqs_on(unsigned long a0, unsigned long a1)
81d68a96 271{
6cd8a4bb 272 if (!preempt_trace() && irq_trace())
81d68a96
SR
273 stop_critical_timing(a0, a1);
274}
275
e309b41d 276void time_hardirqs_off(unsigned long a0, unsigned long a1)
81d68a96 277{
6cd8a4bb 278 if (!preempt_trace() && irq_trace())
81d68a96
SR
279 start_critical_timing(a0, a1);
280}
281
282#else /* !CONFIG_PROVE_LOCKING */
283
284/*
285 * Stubs:
286 */
287
288void early_boot_irqs_off(void)
289{
290}
291
292void early_boot_irqs_on(void)
293{
294}
295
296void trace_softirqs_on(unsigned long ip)
297{
298}
299
300void trace_softirqs_off(unsigned long ip)
301{
302}
303
e309b41d 304inline void print_irqtrace_events(struct task_struct *curr)
81d68a96
SR
305{
306}
307
308/*
309 * We are only interested in hardirq on/off events:
310 */
e309b41d 311void trace_hardirqs_on(void)
81d68a96 312{
6cd8a4bb 313 if (!preempt_trace() && irq_trace())
81d68a96
SR
314 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
315}
316EXPORT_SYMBOL(trace_hardirqs_on);
317
e309b41d 318void trace_hardirqs_off(void)
81d68a96 319{
6cd8a4bb 320 if (!preempt_trace() && irq_trace())
81d68a96
SR
321 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
322}
323EXPORT_SYMBOL(trace_hardirqs_off);
324
e309b41d 325void trace_hardirqs_on_caller(unsigned long caller_addr)
81d68a96 326{
6cd8a4bb 327 if (!preempt_trace() && irq_trace())
81d68a96
SR
328 stop_critical_timing(CALLER_ADDR0, caller_addr);
329}
330EXPORT_SYMBOL(trace_hardirqs_on_caller);
331
e309b41d 332void trace_hardirqs_off_caller(unsigned long caller_addr)
81d68a96 333{
6cd8a4bb 334 if (!preempt_trace() && irq_trace())
81d68a96
SR
335 start_critical_timing(CALLER_ADDR0, caller_addr);
336}
337EXPORT_SYMBOL(trace_hardirqs_off_caller);
338
339#endif /* CONFIG_PROVE_LOCKING */
6cd8a4bb
SR
340#endif /* CONFIG_IRQSOFF_TRACER */
341
342#ifdef CONFIG_PREEMPT_TRACER
e309b41d 343void trace_preempt_on(unsigned long a0, unsigned long a1)
6cd8a4bb 344{
1e01cb0c
SR
345 if (preempt_trace())
346 stop_critical_timing(a0, a1);
6cd8a4bb
SR
347}
348
e309b41d 349void trace_preempt_off(unsigned long a0, unsigned long a1)
6cd8a4bb 350{
1e01cb0c
SR
351 if (preempt_trace())
352 start_critical_timing(a0, a1);
6cd8a4bb
SR
353}
354#endif /* CONFIG_PREEMPT_TRACER */
81d68a96
SR
355
356static void start_irqsoff_tracer(struct trace_array *tr)
357{
81d68a96 358 register_ftrace_function(&trace_ops);
94523e81 359 if (tracing_is_enabled())
9036990d 360 tracer_enabled = 1;
94523e81 361 else
9036990d 362 tracer_enabled = 0;
81d68a96
SR
363}
364
365static void stop_irqsoff_tracer(struct trace_array *tr)
366{
81d68a96 367 tracer_enabled = 0;
89b2f978 368 unregister_ftrace_function(&trace_ops);
81d68a96
SR
369}
370
6cd8a4bb 371static void __irqsoff_tracer_init(struct trace_array *tr)
81d68a96 372{
745b1626 373 tracing_max_latency = 0;
81d68a96 374 irqsoff_trace = tr;
c5f888ca 375 /* make sure that the tracer is visible */
81d68a96 376 smp_wmb();
c76f0694 377 start_irqsoff_tracer(tr);
81d68a96
SR
378}
379
380static void irqsoff_tracer_reset(struct trace_array *tr)
381{
c76f0694 382 stop_irqsoff_tracer(tr);
81d68a96
SR
383}
384
9036990d
SR
385static void irqsoff_tracer_start(struct trace_array *tr)
386{
9036990d 387 tracer_enabled = 1;
9036990d
SR
388}
389
390static void irqsoff_tracer_stop(struct trace_array *tr)
391{
392 tracer_enabled = 0;
81d68a96
SR
393}
394
6cd8a4bb 395#ifdef CONFIG_IRQSOFF_TRACER
1c80025a 396static int irqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
397{
398 trace_type = TRACER_IRQS_OFF;
399
400 __irqsoff_tracer_init(tr);
1c80025a 401 return 0;
6cd8a4bb 402}
81d68a96
SR
403static struct tracer irqsoff_tracer __read_mostly =
404{
405 .name = "irqsoff",
406 .init = irqsoff_tracer_init,
407 .reset = irqsoff_tracer_reset,
9036990d
SR
408 .start = irqsoff_tracer_start,
409 .stop = irqsoff_tracer_stop,
81d68a96 410 .print_max = 1,
60a11774
SR
411#ifdef CONFIG_FTRACE_SELFTEST
412 .selftest = trace_selftest_startup_irqsoff,
413#endif
81d68a96 414};
6cd8a4bb
SR
415# define register_irqsoff(trace) register_tracer(&trace)
416#else
417# define register_irqsoff(trace) do { } while (0)
418#endif
419
420#ifdef CONFIG_PREEMPT_TRACER
1c80025a 421static int preemptoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
422{
423 trace_type = TRACER_PREEMPT_OFF;
424
425 __irqsoff_tracer_init(tr);
1c80025a 426 return 0;
6cd8a4bb
SR
427}
428
429static struct tracer preemptoff_tracer __read_mostly =
430{
431 .name = "preemptoff",
432 .init = preemptoff_tracer_init,
433 .reset = irqsoff_tracer_reset,
9036990d
SR
434 .start = irqsoff_tracer_start,
435 .stop = irqsoff_tracer_stop,
6cd8a4bb 436 .print_max = 1,
60a11774
SR
437#ifdef CONFIG_FTRACE_SELFTEST
438 .selftest = trace_selftest_startup_preemptoff,
439#endif
6cd8a4bb
SR
440};
441# define register_preemptoff(trace) register_tracer(&trace)
442#else
443# define register_preemptoff(trace) do { } while (0)
444#endif
445
446#if defined(CONFIG_IRQSOFF_TRACER) && \
447 defined(CONFIG_PREEMPT_TRACER)
448
1c80025a 449static int preemptirqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
450{
451 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
452
453 __irqsoff_tracer_init(tr);
1c80025a 454 return 0;
6cd8a4bb
SR
455}
456
457static struct tracer preemptirqsoff_tracer __read_mostly =
458{
459 .name = "preemptirqsoff",
460 .init = preemptirqsoff_tracer_init,
461 .reset = irqsoff_tracer_reset,
9036990d
SR
462 .start = irqsoff_tracer_start,
463 .stop = irqsoff_tracer_stop,
6cd8a4bb 464 .print_max = 1,
60a11774
SR
465#ifdef CONFIG_FTRACE_SELFTEST
466 .selftest = trace_selftest_startup_preemptirqsoff,
467#endif
6cd8a4bb
SR
468};
469
470# define register_preemptirqsoff(trace) register_tracer(&trace)
471#else
472# define register_preemptirqsoff(trace) do { } while (0)
473#endif
81d68a96
SR
474
475__init static int init_irqsoff_tracer(void)
476{
6cd8a4bb
SR
477 register_irqsoff(irqsoff_tracer);
478 register_preemptoff(preemptoff_tracer);
479 register_preemptirqsoff(preemptirqsoff_tracer);
81d68a96
SR
480
481 return 0;
482}
483device_initcall(init_irqsoff_tracer);