]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
sched/dl: Add dl_rq utilization tracking
authorVincent Guittot <vincent.guittot@linaro.org>
Thu, 28 Jun 2018 15:45:07 +0000 (17:45 +0200)
committerIngo Molnar <mingo@kernel.org>
Sun, 15 Jul 2018 21:51:20 +0000 (23:51 +0200)
Similarly to what happens with RT tasks, CFS tasks can be preempted by DL
tasks and the CFS's utilization might no longer describes the real
utilization level.

Current DL bandwidth reflects the requirements to meet deadline when tasks are
enqueued but not the current utilization of the DL sched class. We track
DL class utilization to estimate the system utilization.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: claudio@evidence.eu.com
Cc: daniel.lezcano@linaro.org
Cc: dietmar.eggemann@arm.com
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: patrick.bellasi@arm.com
Cc: quentin.perret@arm.com
Cc: rjw@rjwysocki.net
Cc: valentin.schneider@arm.com
Cc: viresh.kumar@linaro.org
Link: http://lkml.kernel.org/r/1530200714-4504-5-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/pelt.c
kernel/sched/pelt.h
kernel/sched/sched.h

index fbfc3f1d368a08dd9ebd7c510caf67f52d377334..f4de26982d80a00d1f40a08bff326d2b6b91c3c7 100644 (file)
@@ -16,6 +16,7 @@
  *                    Fabio Checconi <fchecconi@gmail.com>
  */
 #include "sched.h"
+#include "pelt.h"
 
 struct dl_bandwidth def_dl_bandwidth;
 
@@ -1761,6 +1762,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        deadline_queue_push_tasks(rq);
 
+       if (rq->curr->sched_class != &dl_sched_class)
+               update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
        return p;
 }
 
@@ -1768,6 +1772,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
 {
        update_curr_dl(rq);
 
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
@@ -1784,6 +1789,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
 {
        update_curr_dl(rq);
 
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
        /*
         * Even when we have runtime, update_curr_dl() might have resulted in us
         * not being the leftmost task anymore. In that case NEED_RESCHED will
index 5b453213cd18b4f488ede2fea6433268c52e0de7..f096275c7df2e9424217a495ff77ff63d508cf77 100644 (file)
@@ -7290,11 +7290,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
        return false;
 }
 
-static inline bool rt_rq_has_blocked(struct rq *rq)
+static inline bool others_rqs_have_blocked(struct rq *rq)
 {
        if (READ_ONCE(rq->avg_rt.util_avg))
                return true;
 
+       if (READ_ONCE(rq->avg_dl.util_avg))
+               return true;
+
        return false;
 }
 
@@ -7358,8 +7361,9 @@ static void update_blocked_averages(int cpu)
                        done = false;
        }
        update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
        /* Don't need periodic decay once load/util_avg are null */
-       if (rt_rq_has_blocked(rq))
+       if (others_rqs_have_blocked(rq))
                done = false;
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -7427,9 +7431,10 @@ static inline void update_blocked_averages(int cpu)
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
        update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
-       if (!cfs_rq_has_blocked(cfs_rq) && !rt_rq_has_blocked(rq))
+       if (!cfs_rq_has_blocked(cfs_rq) && !others_rqs_have_blocked(rq))
                rq->has_blocked_load = 0;
 #endif
        rq_unlock_irqrestore(rq, &rf);
index a00b1ba3dd5b617ebb8578e720d6c6df99c2e35a..8b78b6320cdafd2f506799e42525a2ccb99455ae 100644 (file)
@@ -334,3 +334,26 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
 
        return 0;
 }
+
+/*
+ * dl_rq:
+ *
+ *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ *   util_sum = cpu_scale * load_sum
+ *   runnable_load_sum = load_sum
+ *
+ */
+
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+                               running,
+                               running,
+                               running)) {
+
+               ___update_load_avg(&rq->avg_dl, 1, 1);
+               return 1;
+       }
+
+       return 0;
+}
index b2983b741d577c833166cceed5569238e36fbb07..0e4f912461ade1c3300bebb77f2ca322a1c278f9 100644 (file)
@@ -4,6 +4,7 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
 int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
 int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
 
 /*
  * When a task is dequeued, its estimated utilization should not be update if
@@ -45,6 +46,11 @@ update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
        return 0;
 }
 
+static inline int
+update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+       return 0;
+}
 #endif
 
 
index 405dd9ba6b3918aaf17a2c146d91461d2b8a9c84..ab8b5296b5f604eb8f98a11d71f7f6b9482cc0b7 100644 (file)
@@ -856,6 +856,7 @@ struct rq {
        u64                     rt_avg;
        u64                     age_stamp;
        struct sched_avg        avg_rt;
+       struct sched_avg        avg_dl;
        u64                     idle_stamp;
        u64                     avg_idle;