]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
ftrace: Protect ftrace_graph_hash with ftrace_sync
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 5 Feb 2020 14:20:32 +0000 (09:20 -0500)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 13 Mar 2020 04:31:00 +0000 (00:31 -0400)
BugLink: https://bugs.launchpad.net/bugs/1866678
[ Upstream commit 54a16ff6f2e50775145b210bcd94d62c3c2af117 ]

As function_graph tracer can run when RCU is not "watching", it can not be
protected by synchronize_rcu() it requires running a task on each CPU before
it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used.

Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72
Cc: stable@vger.kernel.org
Fixes: b9b0c831bed26 ("ftrace: Convert graph filter to use hash tables")
Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
kernel/trace/ftrace.c
kernel/trace/trace.h

index 8f5aa69b58446b84874864af7a2a927d59f80a45..14f176dcaa45deb3a1fb0d36f5640f5753078ad9 100644 (file)
@@ -5387,8 +5387,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_sched();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               schedule_on_each_cpu(ftrace_sync);
 
                free_ftrace_hash(old_hash);
        }
index 6c4e84f451ce3794ecff7a44a1a3a08beac0cd21..6d5e5970f72a94a1ff3d8c0821cf872d46a2d703 100644 (file)
@@ -881,6 +881,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
 
@@ -933,6 +934,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
                                                 !preemptible());