]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
perf_counter, ftrace: Fix perf_counter integration
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 21 Jul 2009 15:34:57 +0000 (17:34 +0200)
committerIngo Molnar <mingo@elte.hu>
Sun, 9 Aug 2009 10:47:25 +0000 (12:47 +0200)
Adds possible second part to the assign argument of TP_EVENT().

  TP_perf_assign(
__perf_count(foo);
__perf_addr(bar);
  )

Which, when specified make the swcounter increment with @foo instead
of the usual 1, and report @bar for PERF_SAMPLE_ADDR (data address
associated with the event) when this triggers a counter overflow.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/trace/ftrace.h
kernel/perf_counter.c

index 1867553c61e595839428e2c394787e205228297d..fec71f8dbc4804ff3ec9d467fcc95bd8077a9e88 100644 (file)
 #undef TP_fast_assign
 #define TP_fast_assign(args...) args
 
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
 #undef TRACE_EVENT
 #define TRACE_EVENT(call, proto, args, tstruct, func, print)           \
 static int                                                             \
@@ -345,6 +348,88 @@ static inline int ftrace_get_offsets_##call(                               \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
+#ifdef CONFIG_EVENT_PROFILE
+
+/*
+ * Generate the functions needed for tracepoint perf_counter support.
+ *
+ * static void ftrace_profile_<call>(proto)
+ * {
+ *     extern void perf_tpcounter_event(int, u64, u64);
+ *     u64 __addr = 0, __count = 1;
+ *
+ *     <assign>   <-- here we expand the TP_perf_assign() macro
+ *
+ *     perf_tpcounter_event(event_<call>.id, __addr, __count);
+ * }
+ *
+ * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
+ * {
+ *     int ret = 0;
+ *
+ *     if (!atomic_inc_return(&event_call->profile_count))
+ *             ret = register_trace_<call>(ftrace_profile_<call>);
+ *
+ *     return ret;
+ * }
+ *
+ * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
+ * {
+ *     if (atomic_add_negative(-1, &event->call->profile_count))
+ *             unregister_trace_<call>(ftrace_profile_<call>);
+ * }
+ *
+ */
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...)
+
+#undef TP_perf_assign
+#define TP_perf_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) __addr = (a)
+
+#undef __perf_count
+#define __perf_count(c) __count = (c)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print)         \
+                                                                       \
+static void ftrace_profile_##call(proto)                               \
+{                                                                      \
+       extern void perf_tpcounter_event(int, u64, u64);                \
+       u64 __addr = 0, __count = 1;                                    \
+       { assign; }                                                     \
+       perf_tpcounter_event(event_##call.id, __addr, __count);         \
+}                                                                      \
+                                                                       \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
+{                                                                      \
+       int ret = 0;                                                    \
+                                                                       \
+       if (!atomic_inc_return(&event_call->profile_count))             \
+               ret = register_trace_##call(ftrace_profile_##call);     \
+                                                                       \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
+{                                                                      \
+       if (atomic_add_negative(-1, &event_call->profile_count))        \
+               unregister_trace_##call(ftrace_profile_##call);         \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
+#endif
+
 /*
  * Stage 4 of the trace events.
  *
@@ -447,28 +532,6 @@ static inline int ftrace_get_offsets_##call(                               \
 #define TP_FMT(fmt, args...)   fmt "\n", ##args
 
 #ifdef CONFIG_EVENT_PROFILE
-#define _TRACE_PROFILE(call, proto, args)                              \
-static void ftrace_profile_##call(proto)                               \
-{                                                                      \
-       extern void perf_tpcounter_event(int);                          \
-       perf_tpcounter_event(event_##call.id);                          \
-}                                                                      \
-                                                                       \
-static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
-{                                                                      \
-       int ret = 0;                                                    \
-                                                                       \
-       if (!atomic_inc_return(&event_call->profile_count))             \
-               ret = register_trace_##call(ftrace_profile_##call);     \
-                                                                       \
-       return ret;                                                     \
-}                                                                      \
-                                                                       \
-static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
-{                                                                      \
-       if (atomic_add_negative(-1, &event_call->profile_count))        \
-               unregister_trace_##call(ftrace_profile_##call);         \
-}
 
 #define _TRACE_PROFILE_INIT(call)                                      \
        .profile_count = ATOMIC_INIT(-1),                               \
@@ -476,7 +539,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
        .profile_disable = ftrace_profile_disable_##call,
 
 #else
-#define _TRACE_PROFILE(call, proto, args)
 #define _TRACE_PROFILE_INIT(call)
 #endif
 
@@ -502,7 +564,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
 
 #undef TRACE_EVENT
 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)         \
-_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args))                      \
                                                                        \
 static struct ftrace_event_call event_##call;                          \
                                                                        \
@@ -586,6 +647,5 @@ __attribute__((section("_ftrace_events"))) event_##call = {         \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#undef _TRACE_PROFILE
 #undef _TRACE_PROFILE_INIT
 
index 673c1aaf7332dc50809f558af52780e73bec5807..52eb4b68d34f9c1fbaa45dbb1df060ef21e89407 100644 (file)
@@ -3703,17 +3703,17 @@ static const struct pmu perf_ops_task_clock = {
 };
 
 #ifdef CONFIG_EVENT_PROFILE
-void perf_tpcounter_event(int event_id)
+void perf_tpcounter_event(int event_id, u64 addr, u64 count)
 {
        struct perf_sample_data data = {
                .regs = get_irq_regs(),
-               .addr = 0,
+               .addr = addr,
        };
 
        if (!data.regs)
                data.regs = task_pt_regs(current);
 
-       do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
+       do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
 }
 EXPORT_SYMBOL_GPL(perf_tpcounter_event);