]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
sched/fair: Provide update_sg_lb_stats() with sched domain statistics
authorRicardo Neri <ricardo.neri-calderon@linux.intel.com>
Sat, 11 Sep 2021 01:18:17 +0000 (18:18 -0700)
committerAndrea Righi <andrea.righi@canonical.com>
Tue, 22 Feb 2022 18:10:39 +0000 (19:10 +0100)
Before deciding to pull tasks when using asymmetric packing of tasks,
on some architectures (e.g., x86) it is necessary to know not only the
state of dst_cpu but also of its SMT siblings. The decision to classify
a candidate busiest group as group_asym_packing is done in
update_sg_lb_stats(). Give this function access to the scheduling domain
statistics, which contains the statistics of the local group.

Originally-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Len Brown <len.brown@intel.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210911011819.12184-5-ricardo.neri-calderon@linux.intel.com
(cherry picked from commit c0d14b57fe0c11b65ce8a1a4a58a48f3f324ca0f)
Signed-off-by: Brad Figg <brad.figg@canonical.com>
kernel/sched/fair.c

index e5424278af035e5a6ac8b7625004a32428c047df..cfe6cba7f8ffcb8e07bda23580d36e83431d082e 100644 (file)
@@ -8621,6 +8621,7 @@ group_type group_classify(unsigned int imbalance_pct,
  * @sg_status: Holds flag indicating the status of the sched_group
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
+                                     struct sd_lb_stats *sds,
                                      struct sched_group *group,
                                      struct sg_lb_stats *sgs,
                                      int *sg_status)
@@ -8629,7 +8630,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
        memset(sgs, 0, sizeof(*sgs));
 
-       local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
+       local_group = group == sds->local;
 
        for_each_cpu_and(i, sched_group_span(group), env->cpus) {
                struct rq *rq = cpu_rq(i);
@@ -9192,7 +9193,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                                update_group_capacity(env->sd, env->dst_cpu);
                }
 
-               update_sg_lb_stats(env, sg, sgs, &sg_status);
+               update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
 
                if (local_group)
                        goto next_group;