]>
Commit | Line | Data |
---|---|---|
352ad25a SR |
1 | /* |
2 | * trace task wakeup timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/uaccess.h> | |
17 | #include <linux/ftrace.h> | |
b07c3f19 | 18 | #include <trace/sched.h> |
352ad25a SR |
19 | |
20 | #include "trace.h" | |
21 | ||
22 | static struct trace_array *wakeup_trace; | |
23 | static int __read_mostly tracer_enabled; | |
24 | ||
25 | static struct task_struct *wakeup_task; | |
26 | static int wakeup_cpu; | |
27 | static unsigned wakeup_prio = -1; | |
3244351c | 28 | static int wakeup_rt; |
352ad25a | 29 | |
e59494f4 SR |
30 | static raw_spinlock_t wakeup_lock = |
31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
352ad25a | 32 | |
e309b41d | 33 | static void __wakeup_reset(struct trace_array *tr); |
352ad25a | 34 | |
e9d25fe6 SR |
35 | static int save_lat_flag; |
36 | ||
606576ce | 37 | #ifdef CONFIG_FUNCTION_TRACER |
7e18d8e7 SR |
38 | /* |
39 | * irqsoff uses its own tracer function to keep the overhead down: | |
40 | */ | |
41 | static void | |
42 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |
43 | { | |
44 | struct trace_array *tr = wakeup_trace; | |
45 | struct trace_array_cpu *data; | |
46 | unsigned long flags; | |
47 | long disabled; | |
48 | int resched; | |
49 | int cpu; | |
38697053 | 50 | int pc; |
7e18d8e7 SR |
51 | |
52 | if (likely(!wakeup_task)) | |
53 | return; | |
54 | ||
38697053 | 55 | pc = preempt_count(); |
182e9f5f | 56 | resched = ftrace_preempt_disable(); |
7e18d8e7 SR |
57 | |
58 | cpu = raw_smp_processor_id(); | |
59 | data = tr->data[cpu]; | |
60 | disabled = atomic_inc_return(&data->disabled); | |
61 | if (unlikely(disabled != 1)) | |
62 | goto out; | |
63 | ||
e59494f4 SR |
64 | local_irq_save(flags); |
65 | __raw_spin_lock(&wakeup_lock); | |
7e18d8e7 SR |
66 | |
67 | if (unlikely(!wakeup_task)) | |
68 | goto unlock; | |
69 | ||
70 | /* | |
71 | * The task can't disappear because it needs to | |
72 | * wake up first, and we have the wakeup_lock. | |
73 | */ | |
74 | if (task_cpu(wakeup_task) != cpu) | |
75 | goto unlock; | |
76 | ||
7be42151 | 77 | trace_function(tr, ip, parent_ip, flags, pc); |
7e18d8e7 SR |
78 | |
79 | unlock: | |
e59494f4 SR |
80 | __raw_spin_unlock(&wakeup_lock); |
81 | local_irq_restore(flags); | |
7e18d8e7 SR |
82 | |
83 | out: | |
84 | atomic_dec(&data->disabled); | |
85 | ||
182e9f5f | 86 | ftrace_preempt_enable(resched); |
7e18d8e7 SR |
87 | } |
88 | ||
89 | static struct ftrace_ops trace_ops __read_mostly = | |
90 | { | |
91 | .func = wakeup_tracer_call, | |
92 | }; | |
606576ce | 93 | #endif /* CONFIG_FUNCTION_TRACER */ |
7e18d8e7 | 94 | |
352ad25a SR |
95 | /* |
96 | * Should this new latency be reported/recorded? | |
97 | */ | |
e309b41d | 98 | static int report_latency(cycle_t delta) |
352ad25a SR |
99 | { |
100 | if (tracing_thresh) { | |
101 | if (delta < tracing_thresh) | |
102 | return 0; | |
103 | } else { | |
104 | if (delta <= tracing_max_latency) | |
105 | return 0; | |
106 | } | |
107 | return 1; | |
108 | } | |
109 | ||
5b82a1b0 | 110 | static void notrace |
b07c3f19 | 111 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, |
5b82a1b0 | 112 | struct task_struct *next) |
352ad25a SR |
113 | { |
114 | unsigned long latency = 0, t0 = 0, t1 = 0; | |
352ad25a SR |
115 | struct trace_array_cpu *data; |
116 | cycle_t T0, T1, delta; | |
117 | unsigned long flags; | |
118 | long disabled; | |
119 | int cpu; | |
38697053 | 120 | int pc; |
352ad25a | 121 | |
b07c3f19 MD |
122 | tracing_record_cmdline(prev); |
123 | ||
352ad25a SR |
124 | if (unlikely(!tracer_enabled)) |
125 | return; | |
126 | ||
127 | /* | |
128 | * When we start a new trace, we set wakeup_task to NULL | |
129 | * and then set tracer_enabled = 1. We want to make sure | |
130 | * that another CPU does not see the tracer_enabled = 1 | |
131 | * and the wakeup_task with an older task, that might | |
132 | * actually be the same as next. | |
133 | */ | |
134 | smp_rmb(); | |
135 | ||
136 | if (next != wakeup_task) | |
137 | return; | |
138 | ||
38697053 SR |
139 | pc = preempt_count(); |
140 | ||
7e18d8e7 | 141 | /* The task we are waiting for is waking up */ |
b07c3f19 | 142 | data = wakeup_trace->data[wakeup_cpu]; |
352ad25a SR |
143 | |
144 | /* disable local data, not wakeup_cpu data */ | |
145 | cpu = raw_smp_processor_id(); | |
b07c3f19 | 146 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
147 | if (likely(disabled != 1)) |
148 | goto out; | |
149 | ||
e59494f4 SR |
150 | local_irq_save(flags); |
151 | __raw_spin_lock(&wakeup_lock); | |
352ad25a SR |
152 | |
153 | /* We could race with grabbing wakeup_lock */ | |
154 | if (unlikely(!tracer_enabled || next != wakeup_task)) | |
155 | goto out_unlock; | |
156 | ||
7be42151 ACM |
157 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
158 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | |
352ad25a SR |
159 | |
160 | /* | |
161 | * usecs conversion is slow so we try to delay the conversion | |
162 | * as long as possible: | |
163 | */ | |
164 | T0 = data->preempt_timestamp; | |
750ed1a4 | 165 | T1 = ftrace_now(cpu); |
352ad25a SR |
166 | delta = T1-T0; |
167 | ||
168 | if (!report_latency(delta)) | |
169 | goto out_unlock; | |
170 | ||
171 | latency = nsecs_to_usecs(delta); | |
172 | ||
173 | tracing_max_latency = delta; | |
174 | t0 = nsecs_to_usecs(T0); | |
175 | t1 = nsecs_to_usecs(T1); | |
176 | ||
b07c3f19 | 177 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
352ad25a | 178 | |
352ad25a | 179 | out_unlock: |
b07c3f19 | 180 | __wakeup_reset(wakeup_trace); |
e59494f4 SR |
181 | __raw_spin_unlock(&wakeup_lock); |
182 | local_irq_restore(flags); | |
352ad25a | 183 | out: |
b07c3f19 | 184 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
5b82a1b0 MD |
185 | } |
186 | ||
e309b41d | 187 | static void __wakeup_reset(struct trace_array *tr) |
352ad25a | 188 | { |
352ad25a SR |
189 | int cpu; |
190 | ||
b3a8c348 | 191 | for_each_possible_cpu(cpu) |
3928a8a2 | 192 | tracing_reset(tr, cpu); |
352ad25a SR |
193 | |
194 | wakeup_cpu = -1; | |
195 | wakeup_prio = -1; | |
196 | ||
197 | if (wakeup_task) | |
198 | put_task_struct(wakeup_task); | |
199 | ||
200 | wakeup_task = NULL; | |
201 | } | |
202 | ||
e309b41d | 203 | static void wakeup_reset(struct trace_array *tr) |
352ad25a SR |
204 | { |
205 | unsigned long flags; | |
206 | ||
e59494f4 SR |
207 | local_irq_save(flags); |
208 | __raw_spin_lock(&wakeup_lock); | |
352ad25a | 209 | __wakeup_reset(tr); |
e59494f4 SR |
210 | __raw_spin_unlock(&wakeup_lock); |
211 | local_irq_restore(flags); | |
352ad25a SR |
212 | } |
213 | ||
e309b41d | 214 | static void |
468a15bb | 215 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) |
352ad25a | 216 | { |
f8ec1062 | 217 | struct trace_array_cpu *data; |
352ad25a SR |
218 | int cpu = smp_processor_id(); |
219 | unsigned long flags; | |
220 | long disabled; | |
38697053 | 221 | int pc; |
352ad25a | 222 | |
b07c3f19 MD |
223 | if (likely(!tracer_enabled)) |
224 | return; | |
225 | ||
226 | tracing_record_cmdline(p); | |
227 | tracing_record_cmdline(current); | |
228 | ||
3244351c | 229 | if ((wakeup_rt && !rt_task(p)) || |
352ad25a | 230 | p->prio >= wakeup_prio || |
b07c3f19 | 231 | p->prio >= current->prio) |
352ad25a SR |
232 | return; |
233 | ||
38697053 | 234 | pc = preempt_count(); |
b07c3f19 | 235 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
236 | if (unlikely(disabled != 1)) |
237 | goto out; | |
238 | ||
239 | /* interrupts should be off from try_to_wake_up */ | |
e59494f4 | 240 | __raw_spin_lock(&wakeup_lock); |
352ad25a SR |
241 | |
242 | /* check for races. */ | |
243 | if (!tracer_enabled || p->prio >= wakeup_prio) | |
244 | goto out_locked; | |
245 | ||
246 | /* reset the trace */ | |
b07c3f19 | 247 | __wakeup_reset(wakeup_trace); |
352ad25a SR |
248 | |
249 | wakeup_cpu = task_cpu(p); | |
250 | wakeup_prio = p->prio; | |
251 | ||
252 | wakeup_task = p; | |
253 | get_task_struct(wakeup_task); | |
254 | ||
255 | local_save_flags(flags); | |
256 | ||
f8ec1062 SR |
257 | data = wakeup_trace->data[wakeup_cpu]; |
258 | data->preempt_timestamp = ftrace_now(cpu); | |
7be42151 ACM |
259 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
260 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | |
352ad25a SR |
261 | |
262 | out_locked: | |
e59494f4 | 263 | __raw_spin_unlock(&wakeup_lock); |
352ad25a | 264 | out: |
b07c3f19 | 265 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
266 | } |
267 | ||
e309b41d | 268 | static void start_wakeup_tracer(struct trace_array *tr) |
352ad25a | 269 | { |
5b82a1b0 MD |
270 | int ret; |
271 | ||
b07c3f19 | 272 | ret = register_trace_sched_wakeup(probe_wakeup); |
5b82a1b0 | 273 | if (ret) { |
b07c3f19 | 274 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
275 | " probe to kernel_sched_wakeup\n"); |
276 | return; | |
277 | } | |
278 | ||
b07c3f19 | 279 | ret = register_trace_sched_wakeup_new(probe_wakeup); |
5b82a1b0 | 280 | if (ret) { |
b07c3f19 | 281 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
282 | " probe to kernel_sched_wakeup_new\n"); |
283 | goto fail_deprobe; | |
284 | } | |
285 | ||
b07c3f19 | 286 | ret = register_trace_sched_switch(probe_wakeup_sched_switch); |
5b82a1b0 | 287 | if (ret) { |
b07c3f19 | 288 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 289 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
290 | goto fail_deprobe_wake_new; |
291 | } | |
292 | ||
352ad25a SR |
293 | wakeup_reset(tr); |
294 | ||
295 | /* | |
296 | * Don't let the tracer_enabled = 1 show up before | |
297 | * the wakeup_task is reset. This may be overkill since | |
298 | * wakeup_reset does a spin_unlock after setting the | |
299 | * wakeup_task to NULL, but I want to be safe. | |
300 | * This is a slow path anyway. | |
301 | */ | |
302 | smp_wmb(); | |
303 | ||
7e18d8e7 | 304 | register_ftrace_function(&trace_ops); |
352ad25a | 305 | |
5bc4564b | 306 | if (tracing_is_enabled()) |
9036990d | 307 | tracer_enabled = 1; |
5bc4564b | 308 | else |
9036990d | 309 | tracer_enabled = 0; |
ad591240 | 310 | |
352ad25a | 311 | return; |
5b82a1b0 | 312 | fail_deprobe_wake_new: |
b07c3f19 | 313 | unregister_trace_sched_wakeup_new(probe_wakeup); |
5b82a1b0 | 314 | fail_deprobe: |
b07c3f19 | 315 | unregister_trace_sched_wakeup(probe_wakeup); |
352ad25a SR |
316 | } |
317 | ||
e309b41d | 318 | static void stop_wakeup_tracer(struct trace_array *tr) |
352ad25a SR |
319 | { |
320 | tracer_enabled = 0; | |
7e18d8e7 | 321 | unregister_ftrace_function(&trace_ops); |
b07c3f19 MD |
322 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
323 | unregister_trace_sched_wakeup_new(probe_wakeup); | |
324 | unregister_trace_sched_wakeup(probe_wakeup); | |
352ad25a SR |
325 | } |
326 | ||
3244351c | 327 | static int __wakeup_tracer_init(struct trace_array *tr) |
352ad25a | 328 | { |
e9d25fe6 SR |
329 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; |
330 | trace_flags |= TRACE_ITER_LATENCY_FMT; | |
331 | ||
745b1626 | 332 | tracing_max_latency = 0; |
352ad25a | 333 | wakeup_trace = tr; |
c76f0694 | 334 | start_wakeup_tracer(tr); |
1c80025a | 335 | return 0; |
352ad25a SR |
336 | } |
337 | ||
3244351c SR |
338 | static int wakeup_tracer_init(struct trace_array *tr) |
339 | { | |
340 | wakeup_rt = 0; | |
341 | return __wakeup_tracer_init(tr); | |
342 | } | |
343 | ||
344 | static int wakeup_rt_tracer_init(struct trace_array *tr) | |
345 | { | |
346 | wakeup_rt = 1; | |
347 | return __wakeup_tracer_init(tr); | |
348 | } | |
349 | ||
e309b41d | 350 | static void wakeup_tracer_reset(struct trace_array *tr) |
352ad25a | 351 | { |
c76f0694 SR |
352 | stop_wakeup_tracer(tr); |
353 | /* make sure we put back any tasks we are tracing */ | |
354 | wakeup_reset(tr); | |
e9d25fe6 SR |
355 | |
356 | if (!save_lat_flag) | |
357 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | |
352ad25a SR |
358 | } |
359 | ||
9036990d SR |
360 | static void wakeup_tracer_start(struct trace_array *tr) |
361 | { | |
362 | wakeup_reset(tr); | |
363 | tracer_enabled = 1; | |
9036990d SR |
364 | } |
365 | ||
366 | static void wakeup_tracer_stop(struct trace_array *tr) | |
367 | { | |
368 | tracer_enabled = 0; | |
352ad25a SR |
369 | } |
370 | ||
371 | static struct tracer wakeup_tracer __read_mostly = | |
372 | { | |
373 | .name = "wakeup", | |
374 | .init = wakeup_tracer_init, | |
375 | .reset = wakeup_tracer_reset, | |
9036990d SR |
376 | .start = wakeup_tracer_start, |
377 | .stop = wakeup_tracer_stop, | |
352ad25a | 378 | .print_max = 1, |
60a11774 SR |
379 | #ifdef CONFIG_FTRACE_SELFTEST |
380 | .selftest = trace_selftest_startup_wakeup, | |
381 | #endif | |
352ad25a SR |
382 | }; |
383 | ||
3244351c SR |
384 | static struct tracer wakeup_rt_tracer __read_mostly = |
385 | { | |
386 | .name = "wakeup_rt", | |
387 | .init = wakeup_rt_tracer_init, | |
388 | .reset = wakeup_tracer_reset, | |
389 | .start = wakeup_tracer_start, | |
390 | .stop = wakeup_tracer_stop, | |
6eaaa5d5 | 391 | .wait_pipe = poll_wait_pipe, |
3244351c SR |
392 | .print_max = 1, |
393 | #ifdef CONFIG_FTRACE_SELFTEST | |
394 | .selftest = trace_selftest_startup_wakeup, | |
395 | #endif | |
396 | }; | |
397 | ||
352ad25a SR |
398 | __init static int init_wakeup_tracer(void) |
399 | { | |
400 | int ret; | |
401 | ||
402 | ret = register_tracer(&wakeup_tracer); | |
403 | if (ret) | |
404 | return ret; | |
405 | ||
3244351c SR |
406 | ret = register_tracer(&wakeup_rt_tracer); |
407 | if (ret) | |
408 | return ret; | |
409 | ||
352ad25a SR |
410 | return 0; |
411 | } | |
412 | device_initcall(init_wakeup_tracer); |