]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
tracing: Update stack trace skipping for ORC unwinder
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Tue, 23 Jan 2018 18:25:04 +0000 (13:25 -0500)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Tue, 23 Jan 2018 20:57:00 +0000 (15:57 -0500)
With the addition of ORC unwinder and FRAME POINTER unwinder, the stack
trace skipping requirements have changed.

I went through the tracing stack trace dumps with ORC and with frame
pointers and recalculated the proper values.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/trace.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions.c

index 2a8d8a294345a258baca50b8a6b272c1ac0fc658..8e3f20a18a06dae09a240f54df3e60bebf9e6382 100644 (file)
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
+/*
+ * Skip 3:
+ *
+ *   trace_buffer_unlock_commit_regs()
+ *   trace_event_buffer_commit()
+ *   trace_event_raw_event_xxx()
+*/
+# define STACK_SKIP 3
+
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
        __buffer_unlock_commit(buffer, event);
 
        /*
-        * If regs is not set, then skip the following callers:
-        *   trace_buffer_unlock_commit_regs
-        *   event_trigger_unlock_commit
-        *   trace_event_buffer_commit
-        *   trace_event_raw_event_sched_switch
+        * If regs is not set, then skip the necessary functions.
         * Note, we can still get here via blktrace, wakeup tracer
         * and mmiotrace, but that's ok if they lose a function or
-        * two. They are that meaningful.
+        * two. They are not that meaningful.
         */
-       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
+       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
        ftrace_trace_userstack(buffer, flags, pc);
 }
 
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
        trace.skip              = skip;
 
        /*
-        * Add two, for this function and the call to save_stack_trace()
+        * Add one, for this function and the call to save_stack_trace()
         * If regs is set, then these functions will not be in the way.
         */
+#ifndef CONFIG_UNWINDER_ORC
        if (!regs)
-               trace.skip += 2;
+               trace.skip++;
+#endif
 
        /*
         * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
 
        local_save_flags(flags);
 
-       /*
-        * Skip 3 more, seems to get us at the caller of
-        * this function.
-        */
-       skip += 3;
+#ifndef CONFIG_UNWINDER_ORC
+       /* Skip 1 to skip this function. */
+       skip++;
+#endif
        __ftrace_trace_stack(global_trace.trace_buffer.buffer,
                             flags, skip, preempt_count(), NULL);
 }
index f2ac9d44f6c4b1f0ea4128836d9977b138c840ca..87411482a46f2753685c9eab3d9e91830d7ae47d 100644 (file)
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 #ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_UNWINDER_ORC
+/* Skip 2:
+ *   event_triggers_post_call()
+ *   trace_event_raw_event_xxx()
+ */
+# define STACK_SKIP 2
+#else
 /*
- * Skip 3:
+ * Skip 4:
  *   stacktrace_trigger()
  *   event_triggers_post_call()
+ *   trace_event_buffer_commit()
  *   trace_event_raw_event_xxx()
  */
-#define STACK_SKIP 3
+#define STACK_SKIP 4
+#endif
 
 static void
 stacktrace_trigger(struct event_trigger_data *data, void *rec)
index 27f7ad12c4b1b11da86dd161468c1c11944dd8fb..b611cd36e22db8ad79208e2ccb76bc643c6c5763 100644 (file)
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
 }
 
+#ifdef CONFIG_UNWINDER_ORC
+/*
+ * Skip 2:
+ *
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 2
+#else
+/*
+ * Skip 3:
+ *   __trace_stack()
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 3
+#endif
+
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
        if (likely(disabled == 1)) {
                pc = preempt_count();
                trace_function(tr, ip, parent_ip, flags, pc);
-               /*
-                * skip over 5 funcs:
-                *    __ftrace_trace_stack,
-                *    __trace_stack,
-                *    function_stack_trace_call
-                *    ftrace_list_func
-                *    ftrace_call
-                */
-               __trace_stack(tr, flags, 5, pc);
+               __trace_stack(tr, flags, STACK_SKIP, pc);
        }
 
        atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
        tracer_tracing_off(tr);
 }
 
+#ifdef CONFIG_UNWINDER_ORC
 /*
- * Skip 4:
+ * Skip 3:
+ *
+ *   function_trace_probe_call()
+ *   ftrace_ops_assist_func()
+ *   ftrace_call()
+ */
+#define FTRACE_STACK_SKIP 3
+#else
+/*
+ * Skip 5:
+ *
+ *   __trace_stack()
  *   ftrace_stacktrace()
  *   function_trace_probe_call()
- *   ftrace_ops_list_func()
+ *   ftrace_ops_assist_func()
  *   ftrace_call()
  */
-#define STACK_SKIP 4
+#define FTRACE_STACK_SKIP 5
+#endif
 
 static __always_inline void trace_stack(struct trace_array *tr)
 {
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
        local_save_flags(flags);
        pc = preempt_count();
 
-       __trace_stack(tr, flags, STACK_SKIP, pc);
+       __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
 }
 
 static void