static char *perf_trace_buf;
static char *perf_trace_buf_nmi;
-typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
+/*
+ * Force it to be aligned to unsigned long to avoid misaligned accesses
+ * suprises
+ */
+typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
+ perf_trace_t;
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
-static int perf_trace_event_enable(struct ftrace_event_call *event)
+static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
{
char *buf;
int ret = -ENOMEM;
- if (event->perf_refcount++ > 0)
+ if (event->perf_refcount++ > 0) {
+ event->perf_data = NULL;
return 0;
+ }
if (!total_ref_count) {
buf = (char *)alloc_percpu(perf_trace_t);
ret = event->perf_event_enable(event);
if (!ret) {
+ event->perf_data = data;
total_ref_count++;
return 0;
}
return ret;
}
-int perf_trace_enable(int event_id)
+int perf_trace_enable(int event_id, void *data)
{
struct ftrace_event_call *event;
int ret = -EINVAL;
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->perf_event_enable &&
try_module_get(event->mod)) {
- ret = perf_trace_event_enable(event);
+ ret = perf_trace_event_enable(event, data);
break;
}
}
char *trace_buf, *raw_data;
int pc, cpu;
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
+
pc = preempt_count();
/* Protect the per cpu buffer, begin the rcu read side */
raw_data = per_cpu_ptr(trace_buf, cpu);
/* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
entry = (struct trace_entry *)raw_data;
tracing_generic_entry_update(entry, *irq_flags, pc);