]>
Commit | Line | Data |
---|---|---|
81d68a96 SR |
1 | /* |
2 | * trace irqs off criticall timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/debugfs.h> | |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/ftrace.h> | |
17 | #include <linux/fs.h> | |
18 | ||
19 | #include "trace.h" | |
20 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | |
22 | static int tracer_enabled __read_mostly; | |
23 | ||
6cd8a4bb SR |
24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | ||
89b2f978 SR |
26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | ||
6cd8a4bb SR |
28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | |
30 | TRACER_PREEMPT_OFF = (1 << 2), | |
31 | }; | |
32 | ||
33 | static int trace_type __read_mostly; | |
34 | ||
35 | #ifdef CONFIG_PREEMPT_TRACER | |
36 | static inline int notrace | |
37 | preempt_trace(void) | |
38 | { | |
39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
40 | } | |
41 | #else | |
42 | # define preempt_trace() (0) | |
43 | #endif | |
44 | ||
45 | #ifdef CONFIG_IRQSOFF_TRACER | |
46 | static inline int notrace | |
47 | irq_trace(void) | |
48 | { | |
49 | return ((trace_type & TRACER_IRQS_OFF) && | |
50 | irqs_disabled()); | |
51 | } | |
52 | #else | |
53 | # define irq_trace() (0) | |
54 | #endif | |
55 | ||
81d68a96 SR |
56 | /* |
57 | * Sequence count - we record it when starting a measurement and | |
58 | * skip the latency if the sequence has changed - some other section | |
59 | * did a maximum and could disturb our measurement with serial console | |
60 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
61 | * and what happens together happens separately as well, so this doesnt | |
62 | * decrease the validity of the maximum found: | |
63 | */ | |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
65 | ||
66 | #ifdef CONFIG_FTRACE | |
67 | /* | |
68 | * irqsoff uses its own tracer function to keep the overhead down: | |
69 | */ | |
70 | static void notrace | |
71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |
72 | { | |
73 | struct trace_array *tr = irqsoff_trace; | |
74 | struct trace_array_cpu *data; | |
75 | unsigned long flags; | |
76 | long disabled; | |
77 | int cpu; | |
78 | ||
361943ad SR |
79 | /* |
80 | * Does not matter if we preempt. We test the flags | |
81 | * afterward, to see if irqs are disabled or not. | |
82 | * If we preempt and get a false positive, the flags | |
83 | * test will fail. | |
84 | */ | |
85 | cpu = raw_smp_processor_id(); | |
86 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
81d68a96 SR |
87 | return; |
88 | ||
89 | local_save_flags(flags); | |
361943ad SR |
90 | /* slight chance to get a false positive on tracing_cpu */ |
91 | if (!irqs_disabled_flags(flags)) | |
92 | return; | |
81d68a96 | 93 | |
81d68a96 SR |
94 | data = tr->data[cpu]; |
95 | disabled = atomic_inc_return(&data->disabled); | |
96 | ||
97 | if (likely(disabled == 1)) | |
6fb44b71 | 98 | trace_function(tr, data, ip, parent_ip, flags); |
81d68a96 SR |
99 | |
100 | atomic_dec(&data->disabled); | |
101 | } | |
102 | ||
103 | static struct ftrace_ops trace_ops __read_mostly = | |
104 | { | |
105 | .func = irqsoff_tracer_call, | |
106 | }; | |
107 | #endif /* CONFIG_FTRACE */ | |
108 | ||
109 | /* | |
110 | * Should this new latency be reported/recorded? | |
111 | */ | |
112 | static int notrace report_latency(cycle_t delta) | |
113 | { | |
114 | if (tracing_thresh) { | |
115 | if (delta < tracing_thresh) | |
116 | return 0; | |
117 | } else { | |
118 | if (delta <= tracing_max_latency) | |
119 | return 0; | |
120 | } | |
121 | return 1; | |
122 | } | |
123 | ||
124 | static void notrace | |
125 | check_critical_timing(struct trace_array *tr, | |
126 | struct trace_array_cpu *data, | |
127 | unsigned long parent_ip, | |
128 | int cpu) | |
129 | { | |
130 | unsigned long latency, t0, t1; | |
89b2f978 | 131 | cycle_t T0, T1, delta; |
81d68a96 SR |
132 | unsigned long flags; |
133 | ||
134 | /* | |
135 | * usecs conversion is slow so we try to delay the conversion | |
136 | * as long as possible: | |
137 | */ | |
138 | T0 = data->preempt_timestamp; | |
750ed1a4 | 139 | T1 = ftrace_now(cpu); |
81d68a96 SR |
140 | delta = T1-T0; |
141 | ||
142 | local_save_flags(flags); | |
143 | ||
144 | if (!report_latency(delta)) | |
145 | goto out; | |
146 | ||
c7aafc54 | 147 | spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 148 | |
89b2f978 SR |
149 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | |
151 | goto out_unlock; | |
152 | ||
6fb44b71 | 153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
81d68a96 SR |
154 | |
155 | latency = nsecs_to_usecs(delta); | |
156 | ||
157 | if (data->critical_sequence != max_sequence) | |
89b2f978 | 158 | goto out_unlock; |
81d68a96 SR |
159 | |
160 | tracing_max_latency = delta; | |
161 | t0 = nsecs_to_usecs(T0); | |
162 | t1 = nsecs_to_usecs(T1); | |
163 | ||
164 | data->critical_end = parent_ip; | |
165 | ||
166 | update_max_tr_single(tr, current, cpu); | |
167 | ||
c7aafc54 IM |
168 | if (tracing_thresh) { |
169 | printk(KERN_INFO "(%16s-%-5d|#%d):" | |
170 | " %lu us critical section violates %lu us threshold.\n", | |
81d68a96 SR |
171 | current->comm, current->pid, |
172 | raw_smp_processor_id(), | |
c7aafc54 IM |
173 | latency, nsecs_to_usecs(tracing_thresh)); |
174 | } else { | |
81d68a96 | 175 | printk(KERN_INFO "(%16s-%-5d|#%d):" |
c7aafc54 | 176 | " new %lu us maximum-latency critical section.\n", |
81d68a96 SR |
177 | current->comm, current->pid, |
178 | raw_smp_processor_id(), | |
c7aafc54 IM |
179 | latency); |
180 | } | |
81d68a96 SR |
181 | |
182 | max_sequence++; | |
183 | ||
89b2f978 | 184 | out_unlock: |
c7aafc54 | 185 | spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 186 | |
81d68a96 SR |
187 | out: |
188 | data->critical_sequence = max_sequence; | |
750ed1a4 | 189 | data->preempt_timestamp = ftrace_now(cpu); |
81d68a96 | 190 | tracing_reset(data); |
6fb44b71 | 191 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
81d68a96 SR |
192 | } |
193 | ||
194 | static inline void notrace | |
195 | start_critical_timing(unsigned long ip, unsigned long parent_ip) | |
196 | { | |
197 | int cpu; | |
198 | struct trace_array *tr = irqsoff_trace; | |
199 | struct trace_array_cpu *data; | |
200 | unsigned long flags; | |
201 | ||
202 | if (likely(!tracer_enabled)) | |
203 | return; | |
204 | ||
6cd8a4bb SR |
205 | if (__get_cpu_var(tracing_cpu)) |
206 | return; | |
207 | ||
81d68a96 SR |
208 | cpu = raw_smp_processor_id(); |
209 | data = tr->data[cpu]; | |
210 | ||
c7aafc54 | 211 | if (unlikely(!data) || unlikely(!head_page(data)) || |
6cd8a4bb | 212 | atomic_read(&data->disabled)) |
81d68a96 SR |
213 | return; |
214 | ||
215 | atomic_inc(&data->disabled); | |
216 | ||
217 | data->critical_sequence = max_sequence; | |
750ed1a4 | 218 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 219 | data->critical_start = parent_ip ? : ip; |
81d68a96 SR |
220 | tracing_reset(data); |
221 | ||
222 | local_save_flags(flags); | |
6cd8a4bb | 223 | |
6fb44b71 | 224 | trace_function(tr, data, ip, parent_ip, flags); |
81d68a96 | 225 | |
6cd8a4bb SR |
226 | __get_cpu_var(tracing_cpu) = 1; |
227 | ||
81d68a96 SR |
228 | atomic_dec(&data->disabled); |
229 | } | |
230 | ||
231 | static inline void notrace | |
232 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |
233 | { | |
234 | int cpu; | |
235 | struct trace_array *tr = irqsoff_trace; | |
236 | struct trace_array_cpu *data; | |
237 | unsigned long flags; | |
238 | ||
6cd8a4bb SR |
239 | /* Always clear the tracing cpu on stopping the trace */ |
240 | if (unlikely(__get_cpu_var(tracing_cpu))) | |
241 | __get_cpu_var(tracing_cpu) = 0; | |
242 | else | |
243 | return; | |
244 | ||
245 | if (!tracer_enabled) | |
81d68a96 SR |
246 | return; |
247 | ||
248 | cpu = raw_smp_processor_id(); | |
249 | data = tr->data[cpu]; | |
250 | ||
c7aafc54 | 251 | if (unlikely(!data) || unlikely(!head_page(data)) || |
81d68a96 SR |
252 | !data->critical_start || atomic_read(&data->disabled)) |
253 | return; | |
254 | ||
255 | atomic_inc(&data->disabled); | |
256 | local_save_flags(flags); | |
6fb44b71 | 257 | trace_function(tr, data, ip, parent_ip, flags); |
6cd8a4bb | 258 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
259 | data->critical_start = 0; |
260 | atomic_dec(&data->disabled); | |
261 | } | |
262 | ||
6cd8a4bb | 263 | /* start and stop critical timings used to for stoppage (in idle) */ |
81d68a96 SR |
264 | void notrace start_critical_timings(void) |
265 | { | |
6cd8a4bb | 266 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
267 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
268 | } | |
269 | ||
270 | void notrace stop_critical_timings(void) | |
271 | { | |
6cd8a4bb | 272 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
273 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
274 | } | |
275 | ||
6cd8a4bb | 276 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 SR |
277 | #ifdef CONFIG_PROVE_LOCKING |
278 | void notrace time_hardirqs_on(unsigned long a0, unsigned long a1) | |
279 | { | |
6cd8a4bb | 280 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
281 | stop_critical_timing(a0, a1); |
282 | } | |
283 | ||
284 | void notrace time_hardirqs_off(unsigned long a0, unsigned long a1) | |
285 | { | |
6cd8a4bb | 286 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
287 | start_critical_timing(a0, a1); |
288 | } | |
289 | ||
290 | #else /* !CONFIG_PROVE_LOCKING */ | |
291 | ||
292 | /* | |
293 | * Stubs: | |
294 | */ | |
295 | ||
296 | void early_boot_irqs_off(void) | |
297 | { | |
298 | } | |
299 | ||
300 | void early_boot_irqs_on(void) | |
301 | { | |
302 | } | |
303 | ||
304 | void trace_softirqs_on(unsigned long ip) | |
305 | { | |
306 | } | |
307 | ||
308 | void trace_softirqs_off(unsigned long ip) | |
309 | { | |
310 | } | |
311 | ||
9ff9cdb2 | 312 | inline notrace void print_irqtrace_events(struct task_struct *curr) |
81d68a96 SR |
313 | { |
314 | } | |
315 | ||
316 | /* | |
317 | * We are only interested in hardirq on/off events: | |
318 | */ | |
319 | void notrace trace_hardirqs_on(void) | |
320 | { | |
6cd8a4bb | 321 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
322 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
323 | } | |
324 | EXPORT_SYMBOL(trace_hardirqs_on); | |
325 | ||
326 | void notrace trace_hardirqs_off(void) | |
327 | { | |
6cd8a4bb | 328 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
329 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
330 | } | |
331 | EXPORT_SYMBOL(trace_hardirqs_off); | |
332 | ||
333 | void notrace trace_hardirqs_on_caller(unsigned long caller_addr) | |
334 | { | |
6cd8a4bb | 335 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
336 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
337 | } | |
338 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
339 | ||
340 | void notrace trace_hardirqs_off_caller(unsigned long caller_addr) | |
341 | { | |
6cd8a4bb | 342 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
343 | start_critical_timing(CALLER_ADDR0, caller_addr); |
344 | } | |
345 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
346 | ||
347 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
348 | #endif /* CONFIG_IRQSOFF_TRACER */ |
349 | ||
350 | #ifdef CONFIG_PREEMPT_TRACER | |
351 | void notrace trace_preempt_on(unsigned long a0, unsigned long a1) | |
352 | { | |
353 | stop_critical_timing(a0, a1); | |
354 | } | |
355 | ||
356 | void notrace trace_preempt_off(unsigned long a0, unsigned long a1) | |
357 | { | |
358 | start_critical_timing(a0, a1); | |
359 | } | |
360 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 SR |
361 | |
362 | static void start_irqsoff_tracer(struct trace_array *tr) | |
363 | { | |
81d68a96 | 364 | register_ftrace_function(&trace_ops); |
89b2f978 | 365 | tracer_enabled = 1; |
81d68a96 SR |
366 | } |
367 | ||
368 | static void stop_irqsoff_tracer(struct trace_array *tr) | |
369 | { | |
81d68a96 | 370 | tracer_enabled = 0; |
89b2f978 | 371 | unregister_ftrace_function(&trace_ops); |
81d68a96 SR |
372 | } |
373 | ||
6cd8a4bb | 374 | static void __irqsoff_tracer_init(struct trace_array *tr) |
81d68a96 SR |
375 | { |
376 | irqsoff_trace = tr; | |
377 | /* make sure that the tracer is visibel */ | |
378 | smp_wmb(); | |
379 | ||
380 | if (tr->ctrl) | |
381 | start_irqsoff_tracer(tr); | |
382 | } | |
383 | ||
384 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
385 | { | |
386 | if (tr->ctrl) | |
387 | stop_irqsoff_tracer(tr); | |
388 | } | |
389 | ||
390 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | |
391 | { | |
392 | if (tr->ctrl) | |
393 | start_irqsoff_tracer(tr); | |
394 | else | |
395 | stop_irqsoff_tracer(tr); | |
396 | } | |
397 | ||
398 | static void notrace irqsoff_tracer_open(struct trace_iterator *iter) | |
399 | { | |
400 | /* stop the trace while dumping */ | |
401 | if (iter->tr->ctrl) | |
402 | stop_irqsoff_tracer(iter->tr); | |
403 | } | |
404 | ||
405 | static void notrace irqsoff_tracer_close(struct trace_iterator *iter) | |
406 | { | |
407 | if (iter->tr->ctrl) | |
408 | start_irqsoff_tracer(iter->tr); | |
409 | } | |
410 | ||
6cd8a4bb SR |
411 | #ifdef CONFIG_IRQSOFF_TRACER |
412 | static void irqsoff_tracer_init(struct trace_array *tr) | |
413 | { | |
414 | trace_type = TRACER_IRQS_OFF; | |
415 | ||
416 | __irqsoff_tracer_init(tr); | |
417 | } | |
81d68a96 SR |
418 | static struct tracer irqsoff_tracer __read_mostly = |
419 | { | |
420 | .name = "irqsoff", | |
421 | .init = irqsoff_tracer_init, | |
422 | .reset = irqsoff_tracer_reset, | |
423 | .open = irqsoff_tracer_open, | |
424 | .close = irqsoff_tracer_close, | |
425 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
426 | .print_max = 1, | |
60a11774 SR |
427 | #ifdef CONFIG_FTRACE_SELFTEST |
428 | .selftest = trace_selftest_startup_irqsoff, | |
429 | #endif | |
81d68a96 | 430 | }; |
6cd8a4bb SR |
431 | # define register_irqsoff(trace) register_tracer(&trace) |
432 | #else | |
433 | # define register_irqsoff(trace) do { } while (0) | |
434 | #endif | |
435 | ||
436 | #ifdef CONFIG_PREEMPT_TRACER | |
437 | static void preemptoff_tracer_init(struct trace_array *tr) | |
438 | { | |
439 | trace_type = TRACER_PREEMPT_OFF; | |
440 | ||
441 | __irqsoff_tracer_init(tr); | |
442 | } | |
443 | ||
444 | static struct tracer preemptoff_tracer __read_mostly = | |
445 | { | |
446 | .name = "preemptoff", | |
447 | .init = preemptoff_tracer_init, | |
448 | .reset = irqsoff_tracer_reset, | |
449 | .open = irqsoff_tracer_open, | |
450 | .close = irqsoff_tracer_close, | |
451 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
452 | .print_max = 1, | |
60a11774 SR |
453 | #ifdef CONFIG_FTRACE_SELFTEST |
454 | .selftest = trace_selftest_startup_preemptoff, | |
455 | #endif | |
6cd8a4bb SR |
456 | }; |
457 | # define register_preemptoff(trace) register_tracer(&trace) | |
458 | #else | |
459 | # define register_preemptoff(trace) do { } while (0) | |
460 | #endif | |
461 | ||
462 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
463 | defined(CONFIG_PREEMPT_TRACER) | |
464 | ||
465 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | |
466 | { | |
467 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
468 | ||
469 | __irqsoff_tracer_init(tr); | |
470 | } | |
471 | ||
472 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
473 | { | |
474 | .name = "preemptirqsoff", | |
475 | .init = preemptirqsoff_tracer_init, | |
476 | .reset = irqsoff_tracer_reset, | |
477 | .open = irqsoff_tracer_open, | |
478 | .close = irqsoff_tracer_close, | |
479 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
480 | .print_max = 1, | |
60a11774 SR |
481 | #ifdef CONFIG_FTRACE_SELFTEST |
482 | .selftest = trace_selftest_startup_preemptirqsoff, | |
483 | #endif | |
6cd8a4bb SR |
484 | }; |
485 | ||
486 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
487 | #else | |
488 | # define register_preemptirqsoff(trace) do { } while (0) | |
489 | #endif | |
81d68a96 SR |
490 | |
491 | __init static int init_irqsoff_tracer(void) | |
492 | { | |
6cd8a4bb SR |
493 | register_irqsoff(irqsoff_tracer); |
494 | register_preemptoff(preemptoff_tracer); | |
495 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
496 | |
497 | return 0; | |
498 | } | |
499 | device_initcall(init_irqsoff_tracer); |