]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
sched/fair: Implement more accurate async detach
authorPeter Zijlstra <peterz@infradead.org>
Fri, 12 May 2017 12:18:10 +0000 (14:18 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 29 Sep 2017 17:35:17 +0000 (19:35 +0200)
The problem with the overestimate is that it will subtract too big a
value from the load_sum, thereby pushing it down further than it ought
to go. Since runnable_load_avg is not subject to a similar 'force',
this results in the occasional 'runnable_load > load' situation.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 954b332cd899c0263268b3723160ae74a56c79e2..67c39642a5124f39412583319d4d373e03e55161 100644 (file)
@@ -3574,6 +3574,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 
        if (cfs_rq->removed.nr) {
                unsigned long r;
+               u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
 
                raw_spin_lock(&cfs_rq->removed.lock);
                swap(cfs_rq->removed.util_avg, removed_util);
@@ -3582,17 +3583,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
                cfs_rq->removed.nr = 0;
                raw_spin_unlock(&cfs_rq->removed.lock);
 
-               /*
-                * The LOAD_AVG_MAX for _sum is a slight over-estimate,
-                * which is safe due to sub_positive() clipping at 0.
-                */
                r = removed_load;
                sub_positive(&sa->load_avg, r);
-               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+               sub_positive(&sa->load_sum, r * divider);
 
                r = removed_util;
                sub_positive(&sa->util_avg, r);
-               sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+               sub_positive(&sa->util_sum, r * divider);
 
                add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);