]>
Commit | Line | Data |
---|---|---|
1f0d69a9 SR |
1 | /* |
2 | * unlikely profiler | |
3 | * | |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
5 | */ | |
6 | #include <linux/kallsyms.h> | |
7 | #include <linux/seq_file.h> | |
8 | #include <linux/spinlock.h> | |
65c6dc6a | 9 | #include <linux/irqflags.h> |
1f0d69a9 SR |
10 | #include <linux/debugfs.h> |
11 | #include <linux/uaccess.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/ftrace.h> | |
14 | #include <linux/hash.h> | |
15 | #include <linux/fs.h> | |
16 | #include <asm/local.h> | |
f633cef0 | 17 | |
1f0d69a9 | 18 | #include "trace.h" |
f633cef0 | 19 | #include "trace_output.h" |
1f0d69a9 | 20 | |
e302cf3f FW |
21 | static struct tracer branch_trace; |
22 | ||
2ed84eeb | 23 | #ifdef CONFIG_BRANCH_TRACER |
52f232cb | 24 | |
9f029e83 SR |
25 | static int branch_tracing_enabled __read_mostly; |
26 | static DEFINE_MUTEX(branch_tracing_mutex); | |
e302cf3f | 27 | |
9f029e83 | 28 | static struct trace_array *branch_tracer; |
52f232cb SR |
29 | |
30 | static void | |
9f029e83 | 31 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
52f232cb | 32 | { |
9f029e83 | 33 | struct trace_array *tr = branch_tracer; |
52f232cb | 34 | struct ring_buffer_event *event; |
9f029e83 | 35 | struct trace_branch *entry; |
52f232cb SR |
36 | unsigned long flags, irq_flags; |
37 | int cpu, pc; | |
38 | const char *p; | |
39 | ||
40 | /* | |
41 | * I would love to save just the ftrace_likely_data pointer, but | |
42 | * this code can also be used by modules. Ugly things can happen | |
43 | * if the module is unloaded, and then we go and read the | |
44 | * pointer. This is slower, but much safer. | |
45 | */ | |
46 | ||
47 | if (unlikely(!tr)) | |
48 | return; | |
49 | ||
a5e25883 | 50 | local_irq_save(flags); |
52f232cb SR |
51 | cpu = raw_smp_processor_id(); |
52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | |
53 | goto out; | |
54 | ||
55 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | |
56 | &irq_flags); | |
57 | if (!event) | |
58 | goto out; | |
59 | ||
60 | pc = preempt_count(); | |
61 | entry = ring_buffer_event_data(event); | |
62 | tracing_generic_entry_update(&entry->ent, flags, pc); | |
9f029e83 | 63 | entry->ent.type = TRACE_BRANCH; |
52f232cb SR |
64 | |
65 | /* Strip off the path, only save the file */ | |
66 | p = f->file + strlen(f->file); | |
67 | while (p >= f->file && *p != '/') | |
68 | p--; | |
69 | p++; | |
70 | ||
71 | strncpy(entry->func, f->func, TRACE_FUNC_SIZE); | |
72 | strncpy(entry->file, p, TRACE_FILE_SIZE); | |
73 | entry->func[TRACE_FUNC_SIZE] = 0; | |
74 | entry->file[TRACE_FILE_SIZE] = 0; | |
75 | entry->line = f->line; | |
76 | entry->correct = val == expect; | |
77 | ||
78 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | |
79 | ||
80 | out: | |
81 | atomic_dec(&tr->data[cpu]->disabled); | |
a5e25883 | 82 | local_irq_restore(flags); |
52f232cb SR |
83 | } |
84 | ||
85 | static inline | |
9f029e83 | 86 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
52f232cb | 87 | { |
9f029e83 | 88 | if (!branch_tracing_enabled) |
52f232cb SR |
89 | return; |
90 | ||
91 | probe_likely_condition(f, val, expect); | |
92 | } | |
93 | ||
9f029e83 | 94 | int enable_branch_tracing(struct trace_array *tr) |
52f232cb SR |
95 | { |
96 | int ret = 0; | |
97 | ||
9f029e83 SR |
98 | mutex_lock(&branch_tracing_mutex); |
99 | branch_tracer = tr; | |
52f232cb SR |
100 | /* |
101 | * Must be seen before enabling. The reader is a condition | |
102 | * where we do not need a matching rmb() | |
103 | */ | |
104 | smp_wmb(); | |
9f029e83 SR |
105 | branch_tracing_enabled++; |
106 | mutex_unlock(&branch_tracing_mutex); | |
52f232cb SR |
107 | |
108 | return ret; | |
109 | } | |
110 | ||
9f029e83 | 111 | void disable_branch_tracing(void) |
52f232cb | 112 | { |
9f029e83 | 113 | mutex_lock(&branch_tracing_mutex); |
52f232cb | 114 | |
9f029e83 | 115 | if (!branch_tracing_enabled) |
52f232cb SR |
116 | goto out_unlock; |
117 | ||
9f029e83 | 118 | branch_tracing_enabled--; |
52f232cb SR |
119 | |
120 | out_unlock: | |
9f029e83 | 121 | mutex_unlock(&branch_tracing_mutex); |
52f232cb | 122 | } |
80e5ea45 SR |
123 | |
124 | static void start_branch_trace(struct trace_array *tr) | |
125 | { | |
126 | enable_branch_tracing(tr); | |
127 | } | |
128 | ||
129 | static void stop_branch_trace(struct trace_array *tr) | |
130 | { | |
131 | disable_branch_tracing(); | |
132 | } | |
133 | ||
1c80025a | 134 | static int branch_trace_init(struct trace_array *tr) |
80e5ea45 SR |
135 | { |
136 | int cpu; | |
137 | ||
138 | for_each_online_cpu(cpu) | |
139 | tracing_reset(tr, cpu); | |
140 | ||
141 | start_branch_trace(tr); | |
1c80025a | 142 | return 0; |
80e5ea45 SR |
143 | } |
144 | ||
145 | static void branch_trace_reset(struct trace_array *tr) | |
146 | { | |
147 | stop_branch_trace(tr); | |
148 | } | |
149 | ||
f633cef0 SR |
150 | static int |
151 | trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags) | |
152 | { | |
153 | struct print_entry *field; | |
154 | ||
155 | trace_assign_type(field, entry); | |
156 | ||
157 | if (seq_print_ip_sym(s, field->ip, flags)) | |
158 | goto partial; | |
159 | ||
160 | if (trace_seq_printf(s, ": %s", field->buf)) | |
161 | goto partial; | |
162 | ||
163 | partial: | |
164 | return TRACE_TYPE_PARTIAL_LINE; | |
165 | } | |
166 | ||
167 | static int | |
168 | trace_branch_print(struct trace_seq *s, struct trace_entry *entry, int flags) | |
169 | { | |
170 | struct trace_branch *field; | |
171 | ||
172 | trace_assign_type(field, entry); | |
173 | ||
174 | if (trace_seq_printf(s, "[%s] %s:%s:%d\n", | |
175 | field->correct ? " ok " : " MISS ", | |
176 | field->func, | |
177 | field->file, | |
178 | field->line)) | |
179 | return TRACE_TYPE_PARTIAL_LINE; | |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
e302cf3f | 184 | |
f633cef0 SR |
185 | static struct trace_event trace_branch_event = { |
186 | .type = TRACE_BRANCH, | |
187 | .trace = trace_branch_print, | |
188 | .latency_trace = trace_branch_print, | |
189 | .raw = trace_nop_print, | |
190 | .hex = trace_nop_print, | |
191 | .binary = trace_nop_print, | |
192 | }; | |
193 | ||
52f232cb SR |
194 | #else |
195 | static inline | |
9f029e83 | 196 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
52f232cb SR |
197 | { |
198 | } | |
2ed84eeb | 199 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 200 | |
9f029e83 | 201 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) |
1f0d69a9 | 202 | { |
52f232cb SR |
203 | /* |
204 | * I would love to have a trace point here instead, but the | |
205 | * trace point code is so inundated with unlikely and likely | |
206 | * conditions that the recursive nightmare that exists is too | |
207 | * much to try to get working. At least for now. | |
208 | */ | |
209 | trace_likely_condition(f, val, expect); | |
210 | ||
1f0d69a9 SR |
211 | /* FIXME: Make this atomic! */ |
212 | if (val == expect) | |
213 | f->correct++; | |
214 | else | |
215 | f->incorrect++; | |
216 | } | |
217 | EXPORT_SYMBOL(ftrace_likely_update); | |
218 | ||
e302cf3f FW |
219 | extern unsigned long __start_annotated_branch_profile[]; |
220 | extern unsigned long __stop_annotated_branch_profile[]; | |
1f0d69a9 | 221 | |
e302cf3f | 222 | static int annotated_branch_stat_headers(struct seq_file *m) |
1f0d69a9 | 223 | { |
e302cf3f FW |
224 | seq_printf(m, " correct incorrect %% "); |
225 | seq_printf(m, " Function " | |
226 | " File Line\n" | |
227 | " ------- --------- - " | |
228 | " -------- " | |
229 | " ---- ----\n"); | |
230 | return 0; | |
1f0d69a9 SR |
231 | } |
232 | ||
e302cf3f | 233 | static inline long get_incorrect_percent(struct ftrace_branch_data *p) |
1f0d69a9 | 234 | { |
e302cf3f | 235 | long percent; |
1f0d69a9 | 236 | |
e302cf3f FW |
237 | if (p->correct) { |
238 | percent = p->incorrect * 100; | |
239 | percent /= p->correct + p->incorrect; | |
240 | } else | |
241 | percent = p->incorrect ? 100 : -1; | |
1f0d69a9 | 242 | |
e302cf3f | 243 | return percent; |
1f0d69a9 SR |
244 | } |
245 | ||
e302cf3f | 246 | static int branch_stat_show(struct seq_file *m, void *v) |
1f0d69a9 | 247 | { |
9f029e83 | 248 | struct ftrace_branch_data *p = v; |
1f0d69a9 | 249 | const char *f; |
bac28bfe | 250 | long percent; |
1f0d69a9 | 251 | |
1f0d69a9 SR |
252 | /* Only print the file, not the path */ |
253 | f = p->file + strlen(p->file); | |
254 | while (f >= p->file && *f != '/') | |
255 | f--; | |
256 | f++; | |
257 | ||
2bcd521a SR |
258 | /* |
259 | * The miss is overlayed on correct, and hit on incorrect. | |
260 | */ | |
e302cf3f | 261 | percent = get_incorrect_percent(p); |
1f0d69a9 | 262 | |
bac28bfe SR |
263 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
264 | if (percent < 0) | |
265 | seq_printf(m, " X "); | |
266 | else | |
267 | seq_printf(m, "%3ld ", percent); | |
1f0d69a9 SR |
268 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
269 | return 0; | |
270 | } | |
271 | ||
e302cf3f FW |
272 | static void *annotated_branch_stat_start(void) |
273 | { | |
274 | return __start_annotated_branch_profile; | |
275 | } | |
1f0d69a9 | 276 | |
e302cf3f FW |
277 | static void * |
278 | annotated_branch_stat_next(void *v, int idx) | |
1f0d69a9 | 279 | { |
e302cf3f | 280 | struct ftrace_branch_data *p = v; |
1f0d69a9 | 281 | |
e302cf3f | 282 | ++p; |
1f0d69a9 | 283 | |
e302cf3f FW |
284 | if ((void *)p >= (void *)__stop_annotated_branch_profile) |
285 | return NULL; | |
286 | ||
287 | return p; | |
1f0d69a9 SR |
288 | } |
289 | ||
e302cf3f FW |
290 | static int annotated_branch_stat_cmp(void *p1, void *p2) |
291 | { | |
292 | struct ftrace_branch_data *a = p1; | |
293 | struct ftrace_branch_data *b = p2; | |
294 | ||
295 | long percent_a, percent_b; | |
296 | ||
297 | percent_a = get_incorrect_percent(a); | |
298 | percent_b = get_incorrect_percent(b); | |
299 | ||
300 | if (percent_a < percent_b) | |
301 | return -1; | |
302 | if (percent_a > percent_b) | |
303 | return 1; | |
304 | else | |
305 | return 0; | |
306 | } | |
1f0d69a9 | 307 | |
2bcd521a | 308 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
e302cf3f FW |
309 | enum { |
310 | TRACE_BRANCH_OPT_ALL = 0x1 | |
311 | }; | |
2bcd521a | 312 | |
e302cf3f FW |
313 | static struct tracer_opt branch_opts[] = { |
314 | { TRACER_OPT(stat_all_branch, TRACE_BRANCH_OPT_ALL) }, | |
315 | { } | |
2bcd521a SR |
316 | }; |
317 | ||
e302cf3f FW |
318 | static struct tracer_flags branch_flags = { |
319 | .val = 0, | |
320 | .opts = branch_opts | |
321 | }; | |
2bcd521a | 322 | |
e302cf3f FW |
323 | extern unsigned long __start_branch_profile[]; |
324 | extern unsigned long __stop_branch_profile[]; | |
1f0d69a9 | 325 | |
e302cf3f FW |
326 | static int all_branch_stat_headers(struct seq_file *m) |
327 | { | |
328 | seq_printf(m, " miss hit %% "); | |
329 | seq_printf(m, " Function " | |
330 | " File Line\n" | |
331 | " ------- --------- - " | |
332 | " -------- " | |
333 | " ---- ----\n"); | |
334 | return 0; | |
335 | } | |
1f0d69a9 | 336 | |
e302cf3f | 337 | static void *all_branch_stat_start(void) |
1f0d69a9 | 338 | { |
e302cf3f FW |
339 | return __start_branch_profile; |
340 | } | |
341 | ||
342 | static void * | |
343 | all_branch_stat_next(void *v, int idx) | |
344 | { | |
345 | struct ftrace_branch_data *p = v; | |
1f0d69a9 | 346 | |
e302cf3f | 347 | ++p; |
1f0d69a9 | 348 | |
e302cf3f FW |
349 | if ((void *)p >= (void *)__stop_branch_profile) |
350 | return NULL; | |
1f0d69a9 | 351 | |
e302cf3f FW |
352 | return p; |
353 | } | |
2bcd521a | 354 | |
e302cf3f FW |
355 | static int branch_set_flag(u32 old_flags, u32 bit, int set) |
356 | { | |
357 | if (bit == TRACE_BRANCH_OPT_ALL) { | |
358 | if (set) { | |
359 | branch_trace.stat_headers = all_branch_stat_headers; | |
360 | branch_trace.stat_start = all_branch_stat_start; | |
361 | branch_trace.stat_next = all_branch_stat_next; | |
362 | branch_trace.stat_cmp = NULL; | |
363 | } else { | |
364 | branch_trace.stat_headers = | |
365 | annotated_branch_stat_headers; | |
366 | branch_trace.stat_start = annotated_branch_stat_start; | |
367 | branch_trace.stat_next = annotated_branch_stat_next; | |
368 | branch_trace.stat_cmp = annotated_branch_stat_cmp; | |
369 | } | |
370 | init_tracer_stat(&branch_trace); | |
371 | } | |
1f0d69a9 SR |
372 | return 0; |
373 | } | |
374 | ||
e302cf3f FW |
375 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
376 | ||
377 | static struct tracer branch_trace __read_mostly = | |
378 | { | |
379 | .name = "branch", | |
380 | #ifdef CONFIG_BRANCH_TRACER | |
381 | .init = branch_trace_init, | |
382 | .reset = branch_trace_reset, | |
383 | #ifdef CONFIG_FTRACE_SELFTEST | |
384 | .selftest = trace_selftest_startup_branch, | |
385 | #endif /* CONFIG_FTRACE_SELFTEST */ | |
386 | #endif /* CONFIG_BRANCH_TRACER */ | |
387 | .stat_start = annotated_branch_stat_start, | |
388 | .stat_next = annotated_branch_stat_next, | |
389 | .stat_show = branch_stat_show, | |
390 | .stat_headers = annotated_branch_stat_headers, | |
391 | .stat_cmp = annotated_branch_stat_cmp, | |
392 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | |
393 | .flags = &branch_flags, | |
394 | .set_flag = branch_set_flag, | |
395 | #endif | |
396 | }; | |
397 | ||
398 | __init static int init_branch_trace(void) | |
399 | { | |
400 | #ifdef CONFIG_BRANCH_TRACER | |
401 | int ret; | |
402 | ret = register_ftrace_event(&trace_branch_event); | |
403 | if (!ret) { | |
404 | printk(KERN_WARNING "Warning: could not register branch events\n"); | |
405 | return 1; | |
406 | } | |
407 | #endif | |
408 | ||
409 | return register_tracer(&branch_trace); | |
410 | } | |
411 | device_initcall(init_branch_trace); |