]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
sched/pelt: Fix update of blocked PELT ordering
authorVincent Guittot <vincent.guittot@linaro.org>
Wed, 30 Oct 2019 11:18:29 +0000 (12:18 +0100)
committerMarcelo Henrique Cerri <marcelo.cerri@canonical.com>
Fri, 17 Jan 2020 17:23:19 +0000 (14:23 -0300)
BugLink: https://bugs.launchpad.net/bugs/1856334
[ Upstream commit b90f7c9d2198d789709390280a43e0a46345682b ]

update_cfs_rq_load_avg() can call cpufreq_update_util() to trigger an
update of the frequency. Make sure that RT, DL and IRQ PELT signals have
been updated before calling cpufreq.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: dsmythies@telus.net
Cc: juri.lelli@redhat.com
Cc: mgorman@suse.de
Cc: rostedt@goodmis.org
Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking")
Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking")
Fixes: 91c27493e78d ("sched/irq: Add IRQ utilization tracking")
Link: https://lkml.kernel.org/r/1572434309-32512-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Connor Kuehl <connor.kuehl@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
kernel/sched/fair.c

index 649c6b60929e2b544e0cdac66f58182f087baee9..ba7cc68a3993536ee9e036b415547f5280cb2faf 100644 (file)
@@ -7530,6 +7530,19 @@ static void update_blocked_averages(int cpu)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
+       update_irq_load_avg(rq, 0);
+
+       /* Don't need periodic decay once load/util_avg are null */
+       if (others_have_blocked(rq))
+               done = false;
+
        /*
         * Iterates the task_group tree in a bottom up fashion, see
         * list_add_leaf_cfs_rq() for details.
@@ -7557,14 +7570,6 @@ static void update_blocked_averages(int cpu)
                        done = false;
        }
 
-       curr_class = rq->curr->sched_class;
-       update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
-       update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
-       update_irq_load_avg(rq, 0);
-       /* Don't need periodic decay once load/util_avg are null */
-       if (others_have_blocked(rq))
-               done = false;
-
        update_blocked_load_status(rq, !done);
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -7625,12 +7630,18 @@ static inline void update_blocked_averages(int cpu)
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
-       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 
+       /*
+        * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
+        * that RT, DL and IRQ signals have been updated before updating CFS.
+        */
        curr_class = rq->curr->sched_class;
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
+
+       update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+
        update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
        rq_unlock_irqrestore(rq, &rf);
 }