4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/ftrace.h>
13 #include <linux/hash.h>
15 #include <asm/local.h>
18 #ifdef CONFIG_BRANCH_TRACER
20 static int branch_tracing_enabled __read_mostly
;
21 static DEFINE_MUTEX(branch_tracing_mutex
);
22 static struct trace_array
*branch_tracer
;
25 probe_likely_condition(struct ftrace_branch_data
*f
, int val
, int expect
)
27 struct trace_array
*tr
= branch_tracer
;
28 struct ring_buffer_event
*event
;
29 struct trace_branch
*entry
;
30 unsigned long flags
, irq_flags
;
35 * I would love to save just the ftrace_likely_data pointer, but
36 * this code can also be used by modules. Ugly things can happen
37 * if the module is unloaded, and then we go and read the
38 * pointer. This is slower, but much safer.
44 local_irq_save(flags
);
45 cpu
= raw_smp_processor_id();
46 if (atomic_inc_return(&tr
->data
[cpu
]->disabled
) != 1)
49 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
),
55 entry
= ring_buffer_event_data(event
);
56 tracing_generic_entry_update(&entry
->ent
, flags
, pc
);
57 entry
->ent
.type
= TRACE_BRANCH
;
59 /* Strip off the path, only save the file */
60 p
= f
->file
+ strlen(f
->file
);
61 while (p
>= f
->file
&& *p
!= '/')
65 strncpy(entry
->func
, f
->func
, TRACE_FUNC_SIZE
);
66 strncpy(entry
->file
, p
, TRACE_FILE_SIZE
);
67 entry
->func
[TRACE_FUNC_SIZE
] = 0;
68 entry
->file
[TRACE_FILE_SIZE
] = 0;
69 entry
->line
= f
->line
;
70 entry
->correct
= val
== expect
;
72 ring_buffer_unlock_commit(tr
->buffer
, event
, irq_flags
);
75 atomic_dec(&tr
->data
[cpu
]->disabled
);
76 local_irq_restore(flags
);
80 void trace_likely_condition(struct ftrace_branch_data
*f
, int val
, int expect
)
82 if (!branch_tracing_enabled
)
85 probe_likely_condition(f
, val
, expect
);
88 int enable_branch_tracing(struct trace_array
*tr
)
92 mutex_lock(&branch_tracing_mutex
);
95 * Must be seen before enabling. The reader is a condition
96 * where we do not need a matching rmb()
99 branch_tracing_enabled
++;
100 mutex_unlock(&branch_tracing_mutex
);
105 void disable_branch_tracing(void)
107 mutex_lock(&branch_tracing_mutex
);
109 if (!branch_tracing_enabled
)
112 branch_tracing_enabled
--;
115 mutex_unlock(&branch_tracing_mutex
);
118 static void start_branch_trace(struct trace_array
*tr
)
120 enable_branch_tracing(tr
);
123 static void stop_branch_trace(struct trace_array
*tr
)
125 disable_branch_tracing();
128 static void branch_trace_init(struct trace_array
*tr
)
132 for_each_online_cpu(cpu
)
133 tracing_reset(tr
, cpu
);
135 start_branch_trace(tr
);
138 static void branch_trace_reset(struct trace_array
*tr
)
140 stop_branch_trace(tr
);
143 struct tracer branch_trace __read_mostly
=
146 .init
= branch_trace_init
,
147 .reset
= branch_trace_reset
,
148 #ifdef CONFIG_FTRACE_SELFTEST
149 .selftest
= trace_selftest_startup_branch
,
153 __init
static int init_branch_trace(void)
155 return register_tracer(&branch_trace
);
158 device_initcall(init_branch_trace
);
161 void trace_likely_condition(struct ftrace_branch_data
*f
, int val
, int expect
)
164 #endif /* CONFIG_BRANCH_TRACER */
166 void ftrace_likely_update(struct ftrace_branch_data
*f
, int val
, int expect
)
169 * I would love to have a trace point here instead, but the
170 * trace point code is so inundated with unlikely and likely
171 * conditions that the recursive nightmare that exists is too
172 * much to try to get working. At least for now.
174 trace_likely_condition(f
, val
, expect
);
176 /* FIXME: Make this atomic! */
182 EXPORT_SYMBOL(ftrace_likely_update
);
184 struct ftrace_pointer
{
190 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
192 struct ftrace_pointer
*f
= m
->private;
193 struct ftrace_branch_data
*p
= v
;
202 if ((void *)p
>= (void *)f
->stop
)
208 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
213 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
219 static void t_stop(struct seq_file
*m
, void *p
)
223 static int t_show(struct seq_file
*m
, void *v
)
225 struct ftrace_branch_data
*p
= v
;
227 unsigned long percent
;
229 if (v
== (void *)1) {
230 seq_printf(m
, " correct incorrect %% "
233 " ------- --------- - "
239 /* Only print the file, not the path */
240 f
= p
->file
+ strlen(p
->file
);
241 while (f
>= p
->file
&& *f
!= '/')
246 percent
= p
->incorrect
* 100;
247 percent
/= p
->correct
+ p
->incorrect
;
249 percent
= p
->incorrect
? 100 : 0;
251 seq_printf(m
, "%8lu %8lu %3lu ", p
->correct
, p
->incorrect
, percent
);
252 seq_printf(m
, "%-30.30s %-20.20s %d\n", p
->func
, f
, p
->line
);
256 static struct seq_operations tracing_likely_seq_ops
= {
263 static int tracing_likely_open(struct inode
*inode
, struct file
*file
)
267 ret
= seq_open(file
, &tracing_likely_seq_ops
);
269 struct seq_file
*m
= file
->private_data
;
270 m
->private = (void *)inode
->i_private
;
276 static struct file_operations tracing_likely_fops
= {
277 .open
= tracing_likely_open
,
282 extern unsigned long __start_likely_profile
[];
283 extern unsigned long __stop_likely_profile
[];
284 extern unsigned long __start_unlikely_profile
[];
285 extern unsigned long __stop_unlikely_profile
[];
287 static struct ftrace_pointer ftrace_likely_pos
= {
288 .start
= __start_likely_profile
,
289 .stop
= __stop_likely_profile
,
292 static struct ftrace_pointer ftrace_unlikely_pos
= {
293 .start
= __start_unlikely_profile
,
294 .stop
= __stop_unlikely_profile
,
297 static __init
int ftrace_branch_init(void)
299 struct dentry
*d_tracer
;
300 struct dentry
*entry
;
302 d_tracer
= tracing_init_dentry();
304 entry
= debugfs_create_file("profile_likely", 0444, d_tracer
,
306 &tracing_likely_fops
);
308 pr_warning("Could not create debugfs 'profile_likely' entry\n");
310 entry
= debugfs_create_file("profile_unlikely", 0444, d_tracer
,
311 &ftrace_unlikely_pos
,
312 &tracing_likely_fops
);
314 pr_warning("Could not create debugfs"
315 " 'profile_unlikely' entry\n");
320 device_initcall(ftrace_branch_init
);