]>
Commit | Line | Data |
---|---|---|
81d68a96 SR |
1 | /* |
2 | * trace irqs off criticall timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/debugfs.h> | |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/ftrace.h> | |
17 | #include <linux/fs.h> | |
18 | ||
19 | #include "trace.h" | |
20 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | |
22 | static int tracer_enabled __read_mostly; | |
23 | ||
6cd8a4bb SR |
24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | ||
89b2f978 SR |
26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | ||
6cd8a4bb SR |
28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | |
30 | TRACER_PREEMPT_OFF = (1 << 2), | |
31 | }; | |
32 | ||
33 | static int trace_type __read_mostly; | |
34 | ||
35 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 36 | static inline int |
6cd8a4bb SR |
37 | preempt_trace(void) |
38 | { | |
39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
40 | } | |
41 | #else | |
42 | # define preempt_trace() (0) | |
43 | #endif | |
44 | ||
45 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 46 | static inline int |
6cd8a4bb SR |
47 | irq_trace(void) |
48 | { | |
49 | return ((trace_type & TRACER_IRQS_OFF) && | |
50 | irqs_disabled()); | |
51 | } | |
52 | #else | |
53 | # define irq_trace() (0) | |
54 | #endif | |
55 | ||
81d68a96 SR |
56 | /* |
57 | * Sequence count - we record it when starting a measurement and | |
58 | * skip the latency if the sequence has changed - some other section | |
59 | * did a maximum and could disturb our measurement with serial console | |
60 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
61 | * and what happens together happens separately as well, so this doesnt | |
62 | * decrease the validity of the maximum found: | |
63 | */ | |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
65 | ||
606576ce | 66 | #ifdef CONFIG_FUNCTION_TRACER |
81d68a96 SR |
67 | /* |
68 | * irqsoff uses its own tracer function to keep the overhead down: | |
69 | */ | |
e309b41d | 70 | static void |
81d68a96 SR |
71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
72 | { | |
73 | struct trace_array *tr = irqsoff_trace; | |
74 | struct trace_array_cpu *data; | |
75 | unsigned long flags; | |
76 | long disabled; | |
77 | int cpu; | |
78 | ||
361943ad SR |
79 | /* |
80 | * Does not matter if we preempt. We test the flags | |
81 | * afterward, to see if irqs are disabled or not. | |
82 | * If we preempt and get a false positive, the flags | |
83 | * test will fail. | |
84 | */ | |
85 | cpu = raw_smp_processor_id(); | |
86 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
81d68a96 SR |
87 | return; |
88 | ||
89 | local_save_flags(flags); | |
361943ad SR |
90 | /* slight chance to get a false positive on tracing_cpu */ |
91 | if (!irqs_disabled_flags(flags)) | |
92 | return; | |
81d68a96 | 93 | |
81d68a96 SR |
94 | data = tr->data[cpu]; |
95 | disabled = atomic_inc_return(&data->disabled); | |
96 | ||
97 | if (likely(disabled == 1)) | |
38697053 | 98 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
81d68a96 SR |
99 | |
100 | atomic_dec(&data->disabled); | |
101 | } | |
102 | ||
103 | static struct ftrace_ops trace_ops __read_mostly = | |
104 | { | |
105 | .func = irqsoff_tracer_call, | |
106 | }; | |
606576ce | 107 | #endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96 SR |
108 | |
109 | /* | |
110 | * Should this new latency be reported/recorded? | |
111 | */ | |
e309b41d | 112 | static int report_latency(cycle_t delta) |
81d68a96 SR |
113 | { |
114 | if (tracing_thresh) { | |
115 | if (delta < tracing_thresh) | |
116 | return 0; | |
117 | } else { | |
118 | if (delta <= tracing_max_latency) | |
119 | return 0; | |
120 | } | |
121 | return 1; | |
122 | } | |
123 | ||
e309b41d | 124 | static void |
81d68a96 SR |
125 | check_critical_timing(struct trace_array *tr, |
126 | struct trace_array_cpu *data, | |
127 | unsigned long parent_ip, | |
128 | int cpu) | |
129 | { | |
130 | unsigned long latency, t0, t1; | |
89b2f978 | 131 | cycle_t T0, T1, delta; |
81d68a96 | 132 | unsigned long flags; |
38697053 | 133 | int pc; |
81d68a96 SR |
134 | |
135 | /* | |
136 | * usecs conversion is slow so we try to delay the conversion | |
137 | * as long as possible: | |
138 | */ | |
139 | T0 = data->preempt_timestamp; | |
750ed1a4 | 140 | T1 = ftrace_now(cpu); |
81d68a96 SR |
141 | delta = T1-T0; |
142 | ||
143 | local_save_flags(flags); | |
144 | ||
6450c1d3 SR |
145 | pc = preempt_count(); |
146 | ||
81d68a96 SR |
147 | if (!report_latency(delta)) |
148 | goto out; | |
149 | ||
c7aafc54 | 150 | spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 151 | |
89b2f978 SR |
152 | /* check if we are still the max latency */ |
153 | if (!report_latency(delta)) | |
154 | goto out_unlock; | |
155 | ||
38697053 | 156 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
157 | |
158 | latency = nsecs_to_usecs(delta); | |
159 | ||
160 | if (data->critical_sequence != max_sequence) | |
89b2f978 | 161 | goto out_unlock; |
81d68a96 SR |
162 | |
163 | tracing_max_latency = delta; | |
164 | t0 = nsecs_to_usecs(T0); | |
165 | t1 = nsecs_to_usecs(T1); | |
166 | ||
167 | data->critical_end = parent_ip; | |
168 | ||
169 | update_max_tr_single(tr, current, cpu); | |
170 | ||
81d68a96 SR |
171 | max_sequence++; |
172 | ||
89b2f978 | 173 | out_unlock: |
c7aafc54 | 174 | spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 175 | |
81d68a96 SR |
176 | out: |
177 | data->critical_sequence = max_sequence; | |
750ed1a4 | 178 | data->preempt_timestamp = ftrace_now(cpu); |
3928a8a2 | 179 | tracing_reset(tr, cpu); |
38697053 | 180 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
181 | } |
182 | ||
e309b41d | 183 | static inline void |
81d68a96 SR |
184 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
185 | { | |
186 | int cpu; | |
187 | struct trace_array *tr = irqsoff_trace; | |
188 | struct trace_array_cpu *data; | |
189 | unsigned long flags; | |
190 | ||
191 | if (likely(!tracer_enabled)) | |
192 | return; | |
193 | ||
c5f888ca SR |
194 | cpu = raw_smp_processor_id(); |
195 | ||
196 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
197 | return; |
198 | ||
81d68a96 SR |
199 | data = tr->data[cpu]; |
200 | ||
c5f888ca | 201 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
202 | return; |
203 | ||
204 | atomic_inc(&data->disabled); | |
205 | ||
206 | data->critical_sequence = max_sequence; | |
750ed1a4 | 207 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 208 | data->critical_start = parent_ip ? : ip; |
3928a8a2 | 209 | tracing_reset(tr, cpu); |
81d68a96 SR |
210 | |
211 | local_save_flags(flags); | |
6cd8a4bb | 212 | |
38697053 | 213 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
81d68a96 | 214 | |
c5f888ca | 215 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 216 | |
81d68a96 SR |
217 | atomic_dec(&data->disabled); |
218 | } | |
219 | ||
e309b41d | 220 | static inline void |
81d68a96 SR |
221 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
222 | { | |
223 | int cpu; | |
224 | struct trace_array *tr = irqsoff_trace; | |
225 | struct trace_array_cpu *data; | |
226 | unsigned long flags; | |
227 | ||
c5f888ca | 228 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 229 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
230 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
231 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
232 | else |
233 | return; | |
234 | ||
235 | if (!tracer_enabled) | |
81d68a96 SR |
236 | return; |
237 | ||
81d68a96 SR |
238 | data = tr->data[cpu]; |
239 | ||
3928a8a2 | 240 | if (unlikely(!data) || |
81d68a96 SR |
241 | !data->critical_start || atomic_read(&data->disabled)) |
242 | return; | |
243 | ||
244 | atomic_inc(&data->disabled); | |
c5f888ca | 245 | |
81d68a96 | 246 | local_save_flags(flags); |
38697053 | 247 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
6cd8a4bb | 248 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
249 | data->critical_start = 0; |
250 | atomic_dec(&data->disabled); | |
251 | } | |
252 | ||
6cd8a4bb | 253 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 254 | void start_critical_timings(void) |
81d68a96 | 255 | { |
6cd8a4bb | 256 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
257 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
258 | } | |
1fe37104 | 259 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 260 | |
e309b41d | 261 | void stop_critical_timings(void) |
81d68a96 | 262 | { |
6cd8a4bb | 263 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
264 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
265 | } | |
1fe37104 | 266 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 267 | |
6cd8a4bb | 268 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 | 269 | #ifdef CONFIG_PROVE_LOCKING |
e309b41d | 270 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96 | 271 | { |
6cd8a4bb | 272 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
273 | stop_critical_timing(a0, a1); |
274 | } | |
275 | ||
e309b41d | 276 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96 | 277 | { |
6cd8a4bb | 278 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
279 | start_critical_timing(a0, a1); |
280 | } | |
281 | ||
282 | #else /* !CONFIG_PROVE_LOCKING */ | |
283 | ||
284 | /* | |
285 | * Stubs: | |
286 | */ | |
287 | ||
288 | void early_boot_irqs_off(void) | |
289 | { | |
290 | } | |
291 | ||
292 | void early_boot_irqs_on(void) | |
293 | { | |
294 | } | |
295 | ||
296 | void trace_softirqs_on(unsigned long ip) | |
297 | { | |
298 | } | |
299 | ||
300 | void trace_softirqs_off(unsigned long ip) | |
301 | { | |
302 | } | |
303 | ||
e309b41d | 304 | inline void print_irqtrace_events(struct task_struct *curr) |
81d68a96 SR |
305 | { |
306 | } | |
307 | ||
308 | /* | |
309 | * We are only interested in hardirq on/off events: | |
310 | */ | |
e309b41d | 311 | void trace_hardirqs_on(void) |
81d68a96 | 312 | { |
6cd8a4bb | 313 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
314 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
315 | } | |
316 | EXPORT_SYMBOL(trace_hardirqs_on); | |
317 | ||
e309b41d | 318 | void trace_hardirqs_off(void) |
81d68a96 | 319 | { |
6cd8a4bb | 320 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
321 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
322 | } | |
323 | EXPORT_SYMBOL(trace_hardirqs_off); | |
324 | ||
e309b41d | 325 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96 | 326 | { |
6cd8a4bb | 327 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
328 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
329 | } | |
330 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
331 | ||
e309b41d | 332 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96 | 333 | { |
6cd8a4bb | 334 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
335 | start_critical_timing(CALLER_ADDR0, caller_addr); |
336 | } | |
337 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
338 | ||
339 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
340 | #endif /* CONFIG_IRQSOFF_TRACER */ |
341 | ||
342 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 343 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb | 344 | { |
1e01cb0c SR |
345 | if (preempt_trace()) |
346 | stop_critical_timing(a0, a1); | |
6cd8a4bb SR |
347 | } |
348 | ||
e309b41d | 349 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb | 350 | { |
1e01cb0c SR |
351 | if (preempt_trace()) |
352 | start_critical_timing(a0, a1); | |
6cd8a4bb SR |
353 | } |
354 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 | 355 | |
9036990d SR |
356 | /* |
357 | * save_tracer_enabled is used to save the state of the tracer_enabled | |
358 | * variable when we disable it when we open a trace output file. | |
359 | */ | |
360 | static int save_tracer_enabled; | |
361 | ||
81d68a96 SR |
362 | static void start_irqsoff_tracer(struct trace_array *tr) |
363 | { | |
81d68a96 | 364 | register_ftrace_function(&trace_ops); |
9036990d SR |
365 | if (tracing_is_enabled()) { |
366 | tracer_enabled = 1; | |
367 | save_tracer_enabled = 1; | |
368 | } else { | |
369 | tracer_enabled = 0; | |
370 | save_tracer_enabled = 0; | |
371 | } | |
81d68a96 SR |
372 | } |
373 | ||
374 | static void stop_irqsoff_tracer(struct trace_array *tr) | |
375 | { | |
81d68a96 | 376 | tracer_enabled = 0; |
9036990d | 377 | save_tracer_enabled = 0; |
89b2f978 | 378 | unregister_ftrace_function(&trace_ops); |
81d68a96 SR |
379 | } |
380 | ||
6cd8a4bb | 381 | static void __irqsoff_tracer_init(struct trace_array *tr) |
81d68a96 | 382 | { |
745b1626 | 383 | tracing_max_latency = 0; |
81d68a96 | 384 | irqsoff_trace = tr; |
c5f888ca | 385 | /* make sure that the tracer is visible */ |
81d68a96 | 386 | smp_wmb(); |
c76f0694 | 387 | start_irqsoff_tracer(tr); |
81d68a96 SR |
388 | } |
389 | ||
390 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
391 | { | |
c76f0694 | 392 | stop_irqsoff_tracer(tr); |
81d68a96 SR |
393 | } |
394 | ||
9036990d SR |
395 | static void irqsoff_tracer_start(struct trace_array *tr) |
396 | { | |
9036990d SR |
397 | tracer_enabled = 1; |
398 | save_tracer_enabled = 1; | |
399 | } | |
400 | ||
401 | static void irqsoff_tracer_stop(struct trace_array *tr) | |
402 | { | |
403 | tracer_enabled = 0; | |
404 | save_tracer_enabled = 0; | |
405 | } | |
406 | ||
e309b41d | 407 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
81d68a96 SR |
408 | { |
409 | /* stop the trace while dumping */ | |
9036990d | 410 | tracer_enabled = 0; |
81d68a96 SR |
411 | } |
412 | ||
e309b41d | 413 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
81d68a96 | 414 | { |
9036990d SR |
415 | /* restart tracing */ |
416 | tracer_enabled = save_tracer_enabled; | |
81d68a96 SR |
417 | } |
418 | ||
6cd8a4bb | 419 | #ifdef CONFIG_IRQSOFF_TRACER |
1c80025a | 420 | static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
421 | { |
422 | trace_type = TRACER_IRQS_OFF; | |
423 | ||
424 | __irqsoff_tracer_init(tr); | |
1c80025a | 425 | return 0; |
6cd8a4bb | 426 | } |
81d68a96 SR |
427 | static struct tracer irqsoff_tracer __read_mostly = |
428 | { | |
429 | .name = "irqsoff", | |
430 | .init = irqsoff_tracer_init, | |
431 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
432 | .start = irqsoff_tracer_start, |
433 | .stop = irqsoff_tracer_stop, | |
81d68a96 SR |
434 | .open = irqsoff_tracer_open, |
435 | .close = irqsoff_tracer_close, | |
81d68a96 | 436 | .print_max = 1, |
60a11774 SR |
437 | #ifdef CONFIG_FTRACE_SELFTEST |
438 | .selftest = trace_selftest_startup_irqsoff, | |
439 | #endif | |
81d68a96 | 440 | }; |
6cd8a4bb SR |
441 | # define register_irqsoff(trace) register_tracer(&trace) |
442 | #else | |
443 | # define register_irqsoff(trace) do { } while (0) | |
444 | #endif | |
445 | ||
446 | #ifdef CONFIG_PREEMPT_TRACER | |
1c80025a | 447 | static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
448 | { |
449 | trace_type = TRACER_PREEMPT_OFF; | |
450 | ||
451 | __irqsoff_tracer_init(tr); | |
1c80025a | 452 | return 0; |
6cd8a4bb SR |
453 | } |
454 | ||
455 | static struct tracer preemptoff_tracer __read_mostly = | |
456 | { | |
457 | .name = "preemptoff", | |
458 | .init = preemptoff_tracer_init, | |
459 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
460 | .start = irqsoff_tracer_start, |
461 | .stop = irqsoff_tracer_stop, | |
6cd8a4bb SR |
462 | .open = irqsoff_tracer_open, |
463 | .close = irqsoff_tracer_close, | |
6cd8a4bb | 464 | .print_max = 1, |
60a11774 SR |
465 | #ifdef CONFIG_FTRACE_SELFTEST |
466 | .selftest = trace_selftest_startup_preemptoff, | |
467 | #endif | |
6cd8a4bb SR |
468 | }; |
469 | # define register_preemptoff(trace) register_tracer(&trace) | |
470 | #else | |
471 | # define register_preemptoff(trace) do { } while (0) | |
472 | #endif | |
473 | ||
474 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
475 | defined(CONFIG_PREEMPT_TRACER) | |
476 | ||
1c80025a | 477 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
478 | { |
479 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
480 | ||
481 | __irqsoff_tracer_init(tr); | |
1c80025a | 482 | return 0; |
6cd8a4bb SR |
483 | } |
484 | ||
485 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
486 | { | |
487 | .name = "preemptirqsoff", | |
488 | .init = preemptirqsoff_tracer_init, | |
489 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
490 | .start = irqsoff_tracer_start, |
491 | .stop = irqsoff_tracer_stop, | |
6cd8a4bb SR |
492 | .open = irqsoff_tracer_open, |
493 | .close = irqsoff_tracer_close, | |
6cd8a4bb | 494 | .print_max = 1, |
60a11774 SR |
495 | #ifdef CONFIG_FTRACE_SELFTEST |
496 | .selftest = trace_selftest_startup_preemptirqsoff, | |
497 | #endif | |
6cd8a4bb SR |
498 | }; |
499 | ||
500 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
501 | #else | |
502 | # define register_preemptirqsoff(trace) do { } while (0) | |
503 | #endif | |
81d68a96 SR |
504 | |
505 | __init static int init_irqsoff_tracer(void) | |
506 | { | |
6cd8a4bb SR |
507 | register_irqsoff(irqsoff_tracer); |
508 | register_preemptoff(preemptoff_tracer); | |
509 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
510 | |
511 | return 0; | |
512 | } | |
513 | device_initcall(init_irqsoff_tracer); |