From 66339c31bc3978d5fff9c4b4cb590a861def4db2 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Mon, 22 Sep 2014 22:36:24 +0400 Subject: [PATCH] sched: Use dl_bw_of() under RCU read lock dl_bw_of() dereferences rq->rd which has to have RCU read lock held. Probability of use-after-free isn't zero here. Also add lockdep assert into dl_bw_cpus(). Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) Cc: # v3.14+ Cc: Paul E. McKenney Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20140922183624.11015.71558.stgit@localhost Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5b0eac9f4e78..f0adb038170b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2021,6 +2021,8 @@ unsigned long to_ratio(u64 period, u64 runtime) #ifdef CONFIG_SMP inline struct dl_bw *dl_bw_of(int i) { + rcu_lockdep_assert(rcu_read_lock_sched_held(), + "sched RCU must be held"); return &cpu_rq(i)->rd->dl_bw; } @@ -2029,6 +2031,8 @@ static inline int dl_bw_cpus(int i) struct root_domain *rd = cpu_rq(i)->rd; int cpus = 0; + rcu_lockdep_assert(rcu_read_lock_sched_held(), + "sched RCU must be held"); for_each_cpu_and(i, rd->span, cpu_active_mask) cpus++; @@ -7645,6 +7649,8 @@ static int sched_dl_global_constraints(void) int cpu, ret = 0; unsigned long flags; + rcu_read_lock(); + /* * Here we want to check the bandwidth not being set to some * value smaller than the currently allocated bandwidth in @@ -7666,6 +7672,8 @@ static int sched_dl_global_constraints(void) break; } + rcu_read_unlock(); + return ret; } @@ -7681,6 +7689,7 @@ static void sched_dl_do_global(void) if (global_rt_runtime() != RUNTIME_INF) new_bw = to_ratio(global_rt_period(), global_rt_runtime()); + rcu_read_lock(); /* * FIXME: As above... */ @@ -7691,6 +7700,7 @@ static void sched_dl_do_global(void) dl_b->bw = new_bw; raw_spin_unlock_irqrestore(&dl_b->lock, flags); } + rcu_read_unlock(); } static int sched_rt_global_validate(void) -- 2.39.2