2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 DEFINE_PER_CPU(struct pt_regs
, perf_trace_regs
);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs
);
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs
);
17 static char *perf_trace_buf
;
18 static char *perf_trace_buf_nmi
;
20 typedef typeof(char [PERF_MAX_TRACE_SIZE
]) perf_trace_t
;
22 /* Count the events in use (per event id, not per instance) */
23 static int total_ref_count
;
25 static int perf_trace_event_enable(struct ftrace_event_call
*event
)
30 if (event
->perf_refcount
++ > 0)
33 if (!total_ref_count
) {
34 buf
= (char *)alloc_percpu(perf_trace_t
);
38 rcu_assign_pointer(perf_trace_buf
, buf
);
40 buf
= (char *)alloc_percpu(perf_trace_t
);
44 rcu_assign_pointer(perf_trace_buf_nmi
, buf
);
47 ret
= event
->perf_event_enable(event
);
54 if (!total_ref_count
) {
55 free_percpu(perf_trace_buf_nmi
);
56 free_percpu(perf_trace_buf
);
57 perf_trace_buf_nmi
= NULL
;
58 perf_trace_buf
= NULL
;
61 event
->perf_refcount
--;
66 int perf_trace_enable(int event_id
)
68 struct ftrace_event_call
*event
;
71 mutex_lock(&event_mutex
);
72 list_for_each_entry(event
, &ftrace_events
, list
) {
73 if (event
->id
== event_id
&& event
->perf_event_enable
&&
74 try_module_get(event
->mod
)) {
75 ret
= perf_trace_event_enable(event
);
79 mutex_unlock(&event_mutex
);
84 static void perf_trace_event_disable(struct ftrace_event_call
*event
)
88 if (--event
->perf_refcount
> 0)
91 event
->perf_event_disable(event
);
93 if (!--total_ref_count
) {
95 rcu_assign_pointer(perf_trace_buf
, NULL
);
97 nmi_buf
= perf_trace_buf_nmi
;
98 rcu_assign_pointer(perf_trace_buf_nmi
, NULL
);
101 * Ensure every events in profiling have finished before
102 * releasing the buffers
107 free_percpu(nmi_buf
);
111 void perf_trace_disable(int event_id
)
113 struct ftrace_event_call
*event
;
115 mutex_lock(&event_mutex
);
116 list_for_each_entry(event
, &ftrace_events
, list
) {
117 if (event
->id
== event_id
) {
118 perf_trace_event_disable(event
);
119 module_put(event
->mod
);
123 mutex_unlock(&event_mutex
);
126 __kprobes
void *perf_trace_buf_prepare(int size
, unsigned short type
,
127 int *rctxp
, unsigned long *irq_flags
)
129 struct trace_entry
*entry
;
130 char *trace_buf
, *raw_data
;
133 pc
= preempt_count();
135 /* Protect the per cpu buffer, begin the rcu read side */
136 local_irq_save(*irq_flags
);
138 *rctxp
= perf_swevent_get_recursion_context();
142 cpu
= smp_processor_id();
145 trace_buf
= rcu_dereference_sched(perf_trace_buf_nmi
);
147 trace_buf
= rcu_dereference_sched(perf_trace_buf
);
152 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
154 /* zero the dead bytes from align to not leak stack to user */
155 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
157 entry
= (struct trace_entry
*)raw_data
;
158 tracing_generic_entry_update(entry
, *irq_flags
, pc
);
163 perf_swevent_put_recursion_context(*rctxp
);
165 local_irq_restore(*irq_flags
);
168 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare
);