]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
perf/core: Fix RCU problem with cgroup context switching code
authorStephane Eranian <eranian@google.com>
Thu, 12 Nov 2015 10:00:03 +0000 (11:00 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 23 Nov 2015 08:21:03 +0000 (09:21 +0100)
The RCU checker detected RCU violation in the cgroup switching routines
perf_cgroup_sched_in() and perf_cgroup_sched_out(). We were dereferencing
cgroup from task without holding the RCU lock.

Fix this by holding the RCU read lock. We move the locking from
perf_cgroup_switch() to avoid double locking.

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: edumazet@google.com
Link: http://lkml.kernel.org/r/1447322404-10920-2-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/events/core.c

index 36babfd2064842c28d4951a656ea1e84500bbf8f..60e71ca42c222e72d31e7fbfed3791c71f8dd3ad 100644 (file)
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
         * we reschedule only in the presence of cgroup
         * constrained events.
         */
-       rcu_read_lock();
 
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -531,8 +530,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
                }
        }
 
-       rcu_read_unlock();
-
        local_irq_restore(flags);
 }
 
@@ -542,6 +539,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
        struct perf_cgroup *cgrp1;
        struct perf_cgroup *cgrp2 = NULL;
 
+       rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
         */
@@ -561,6 +559,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
         */
        if (cgrp1 != cgrp2)
                perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+       rcu_read_unlock();
 }
 
 static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,6 +569,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
        struct perf_cgroup *cgrp1;
        struct perf_cgroup *cgrp2 = NULL;
 
+       rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
         */
@@ -584,6 +585,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
         */
        if (cgrp1 != cgrp2)
                perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+       rcu_read_unlock();
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -9452,7 +9455,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
 static int __perf_cgroup_move(void *info)
 {
        struct task_struct *task = info;
+       rcu_read_lock();
        perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+       rcu_read_unlock();
        return 0;
 }