]>
Commit | Line | Data |
---|---|---|
352ad25a SR |
1 | /* |
2 | * trace task wakeup timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/module.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/uaccess.h> | |
17 | #include <linux/ftrace.h> | |
18 | ||
19 | #include "trace.h" | |
20 | ||
21 | static struct trace_array *wakeup_trace; | |
22 | static int __read_mostly tracer_enabled; | |
23 | ||
24 | static struct task_struct *wakeup_task; | |
25 | static int wakeup_cpu; | |
26 | static unsigned wakeup_prio = -1; | |
27 | ||
28 | static DEFINE_SPINLOCK(wakeup_lock); | |
29 | ||
30 | static void notrace __wakeup_reset(struct trace_array *tr); | |
31 | ||
32 | /* | |
33 | * Should this new latency be reported/recorded? | |
34 | */ | |
35 | static int notrace report_latency(cycle_t delta) | |
36 | { | |
37 | if (tracing_thresh) { | |
38 | if (delta < tracing_thresh) | |
39 | return 0; | |
40 | } else { | |
41 | if (delta <= tracing_max_latency) | |
42 | return 0; | |
43 | } | |
44 | return 1; | |
45 | } | |
46 | ||
47 | void notrace | |
48 | wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) | |
49 | { | |
50 | unsigned long latency = 0, t0 = 0, t1 = 0; | |
51 | struct trace_array *tr = wakeup_trace; | |
52 | struct trace_array_cpu *data; | |
53 | cycle_t T0, T1, delta; | |
54 | unsigned long flags; | |
55 | long disabled; | |
56 | int cpu; | |
57 | ||
58 | if (unlikely(!tracer_enabled)) | |
59 | return; | |
60 | ||
61 | /* | |
62 | * When we start a new trace, we set wakeup_task to NULL | |
63 | * and then set tracer_enabled = 1. We want to make sure | |
64 | * that another CPU does not see the tracer_enabled = 1 | |
65 | * and the wakeup_task with an older task, that might | |
66 | * actually be the same as next. | |
67 | */ | |
68 | smp_rmb(); | |
69 | ||
70 | if (next != wakeup_task) | |
71 | return; | |
72 | ||
73 | /* The task we are waitng for is waking up */ | |
74 | data = tr->data[wakeup_cpu]; | |
75 | ||
76 | /* disable local data, not wakeup_cpu data */ | |
77 | cpu = raw_smp_processor_id(); | |
78 | disabled = atomic_inc_return(&tr->data[cpu]->disabled); | |
79 | if (likely(disabled != 1)) | |
80 | goto out; | |
81 | ||
82 | spin_lock_irqsave(&wakeup_lock, flags); | |
83 | ||
84 | /* We could race with grabbing wakeup_lock */ | |
85 | if (unlikely(!tracer_enabled || next != wakeup_task)) | |
86 | goto out_unlock; | |
87 | ||
88 | ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); | |
89 | ||
90 | /* | |
91 | * usecs conversion is slow so we try to delay the conversion | |
92 | * as long as possible: | |
93 | */ | |
94 | T0 = data->preempt_timestamp; | |
95 | T1 = now(cpu); | |
96 | delta = T1-T0; | |
97 | ||
98 | if (!report_latency(delta)) | |
99 | goto out_unlock; | |
100 | ||
101 | latency = nsecs_to_usecs(delta); | |
102 | ||
103 | tracing_max_latency = delta; | |
104 | t0 = nsecs_to_usecs(T0); | |
105 | t1 = nsecs_to_usecs(T1); | |
106 | ||
107 | update_max_tr(tr, wakeup_task, wakeup_cpu); | |
108 | ||
109 | if (tracing_thresh) { | |
c7aafc54 IM |
110 | printk(KERN_INFO "(%16s-%-5d|#%d):" |
111 | " %lu us wakeup latency violates %lu us threshold.\n", | |
352ad25a SR |
112 | wakeup_task->comm, wakeup_task->pid, |
113 | raw_smp_processor_id(), | |
c7aafc54 | 114 | latency, nsecs_to_usecs(tracing_thresh)); |
352ad25a | 115 | } else { |
c7aafc54 IM |
116 | printk(KERN_INFO "(%16s-%-5d|#%d):" |
117 | " new %lu us maximum wakeup latency.\n", | |
352ad25a | 118 | wakeup_task->comm, wakeup_task->pid, |
c7aafc54 | 119 | cpu, latency); |
352ad25a SR |
120 | } |
121 | ||
352ad25a SR |
122 | out_unlock: |
123 | __wakeup_reset(tr); | |
124 | spin_unlock_irqrestore(&wakeup_lock, flags); | |
125 | out: | |
126 | atomic_dec(&tr->data[cpu]->disabled); | |
127 | } | |
128 | ||
129 | static void notrace __wakeup_reset(struct trace_array *tr) | |
130 | { | |
131 | struct trace_array_cpu *data; | |
132 | int cpu; | |
133 | ||
134 | assert_spin_locked(&wakeup_lock); | |
135 | ||
136 | for_each_possible_cpu(cpu) { | |
137 | data = tr->data[cpu]; | |
138 | tracing_reset(data); | |
139 | } | |
140 | ||
141 | wakeup_cpu = -1; | |
142 | wakeup_prio = -1; | |
143 | ||
144 | if (wakeup_task) | |
145 | put_task_struct(wakeup_task); | |
146 | ||
147 | wakeup_task = NULL; | |
148 | } | |
149 | ||
150 | static void notrace wakeup_reset(struct trace_array *tr) | |
151 | { | |
152 | unsigned long flags; | |
153 | ||
154 | spin_lock_irqsave(&wakeup_lock, flags); | |
155 | __wakeup_reset(tr); | |
156 | spin_unlock_irqrestore(&wakeup_lock, flags); | |
157 | } | |
158 | ||
159 | static notrace void | |
160 | wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |
161 | struct task_struct *curr) | |
162 | { | |
163 | int cpu = smp_processor_id(); | |
164 | unsigned long flags; | |
165 | long disabled; | |
166 | ||
167 | if (likely(!rt_task(p)) || | |
168 | p->prio >= wakeup_prio || | |
169 | p->prio >= curr->prio) | |
170 | return; | |
171 | ||
172 | disabled = atomic_inc_return(&tr->data[cpu]->disabled); | |
173 | if (unlikely(disabled != 1)) | |
174 | goto out; | |
175 | ||
176 | /* interrupts should be off from try_to_wake_up */ | |
177 | spin_lock(&wakeup_lock); | |
178 | ||
179 | /* check for races. */ | |
180 | if (!tracer_enabled || p->prio >= wakeup_prio) | |
181 | goto out_locked; | |
182 | ||
183 | /* reset the trace */ | |
184 | __wakeup_reset(tr); | |
185 | ||
186 | wakeup_cpu = task_cpu(p); | |
187 | wakeup_prio = p->prio; | |
188 | ||
189 | wakeup_task = p; | |
190 | get_task_struct(wakeup_task); | |
191 | ||
192 | local_save_flags(flags); | |
193 | ||
194 | tr->data[wakeup_cpu]->preempt_timestamp = now(cpu); | |
195 | ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); | |
196 | ||
197 | out_locked: | |
198 | spin_unlock(&wakeup_lock); | |
199 | out: | |
200 | atomic_dec(&tr->data[cpu]->disabled); | |
201 | } | |
202 | ||
203 | notrace void | |
204 | ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr) | |
205 | { | |
206 | if (likely(!tracer_enabled)) | |
207 | return; | |
208 | ||
209 | wakeup_check_start(wakeup_trace, wakee, curr); | |
210 | } | |
211 | ||
212 | notrace void | |
213 | ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr) | |
214 | { | |
215 | if (likely(!tracer_enabled)) | |
216 | return; | |
217 | ||
218 | wakeup_check_start(wakeup_trace, wakee, curr); | |
219 | } | |
220 | ||
221 | static notrace void start_wakeup_tracer(struct trace_array *tr) | |
222 | { | |
223 | wakeup_reset(tr); | |
224 | ||
225 | /* | |
226 | * Don't let the tracer_enabled = 1 show up before | |
227 | * the wakeup_task is reset. This may be overkill since | |
228 | * wakeup_reset does a spin_unlock after setting the | |
229 | * wakeup_task to NULL, but I want to be safe. | |
230 | * This is a slow path anyway. | |
231 | */ | |
232 | smp_wmb(); | |
233 | ||
234 | tracer_enabled = 1; | |
235 | ||
236 | return; | |
237 | } | |
238 | ||
239 | static notrace void stop_wakeup_tracer(struct trace_array *tr) | |
240 | { | |
241 | tracer_enabled = 0; | |
242 | } | |
243 | ||
244 | static notrace void wakeup_tracer_init(struct trace_array *tr) | |
245 | { | |
246 | wakeup_trace = tr; | |
247 | ||
248 | if (tr->ctrl) | |
249 | start_wakeup_tracer(tr); | |
250 | } | |
251 | ||
252 | static notrace void wakeup_tracer_reset(struct trace_array *tr) | |
253 | { | |
254 | if (tr->ctrl) { | |
255 | stop_wakeup_tracer(tr); | |
256 | /* make sure we put back any tasks we are tracing */ | |
257 | wakeup_reset(tr); | |
258 | } | |
259 | } | |
260 | ||
261 | static void wakeup_tracer_ctrl_update(struct trace_array *tr) | |
262 | { | |
263 | if (tr->ctrl) | |
264 | start_wakeup_tracer(tr); | |
265 | else | |
266 | stop_wakeup_tracer(tr); | |
267 | } | |
268 | ||
269 | static void notrace wakeup_tracer_open(struct trace_iterator *iter) | |
270 | { | |
271 | /* stop the trace while dumping */ | |
272 | if (iter->tr->ctrl) | |
273 | stop_wakeup_tracer(iter->tr); | |
274 | } | |
275 | ||
276 | static void notrace wakeup_tracer_close(struct trace_iterator *iter) | |
277 | { | |
278 | /* forget about any processes we were recording */ | |
279 | if (iter->tr->ctrl) | |
280 | start_wakeup_tracer(iter->tr); | |
281 | } | |
282 | ||
283 | static struct tracer wakeup_tracer __read_mostly = | |
284 | { | |
285 | .name = "wakeup", | |
286 | .init = wakeup_tracer_init, | |
287 | .reset = wakeup_tracer_reset, | |
288 | .open = wakeup_tracer_open, | |
289 | .close = wakeup_tracer_close, | |
290 | .ctrl_update = wakeup_tracer_ctrl_update, | |
291 | .print_max = 1, | |
60a11774 SR |
292 | #ifdef CONFIG_FTRACE_SELFTEST |
293 | .selftest = trace_selftest_startup_wakeup, | |
294 | #endif | |
352ad25a SR |
295 | }; |
296 | ||
297 | __init static int init_wakeup_tracer(void) | |
298 | { | |
299 | int ret; | |
300 | ||
301 | ret = register_tracer(&wakeup_tracer); | |
302 | if (ret) | |
303 | return ret; | |
304 | ||
305 | return 0; | |
306 | } | |
307 | device_initcall(init_wakeup_tracer); |