]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
sched/fair: Improve consistency of allowed NUMA balance calculations
authorMel Gorman <mgorman@techsingularity.net>
Tue, 8 Feb 2022 09:43:33 +0000 (09:43 +0000)
committerStefan Bader <stefan.bader@canonical.com>
Fri, 20 May 2022 12:38:06 +0000 (14:38 +0200)
BugLink: https://bugs.launchpad.net/bugs/1969110
[ Upstream commit 2cfb7a1b031b0e816af7a6ee0c6ab83b0acdf05a ]

There are inconsistencies when determining if a NUMA imbalance is allowed
that should be corrected.

o allow_numa_imbalance changes types and is not always examining
  the destination group so both the type should be corrected as
  well as the naming.
o find_idlest_group uses the sched_domain's weight instead of the
  group weight which is different to find_busiest_group
o find_busiest_group uses the source group instead of the destination
  which is different to task_numa_find_cpu
o Both find_idlest_group and find_busiest_group should account
  for the number of running tasks if a move was allowed to be
  consistent with task_numa_find_cpu

Fixes: 7d2b5dd0bcc4 ("sched/numa: Allow a floating imbalance between NUMA nodes")
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://lore.kernel.org/r/20220208094334.16379-2-mgorman@techsingularity.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
(cherry picked from commit ec5884cbbfd34ef7e4f380e5a56d32f73e7e762e)
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
kernel/sched/fair.c

index f85c0e9635c3fe7b432effaaa8dd818293e9bf82..5b151c9776dfcc6436f1aae3c9c98c4f757274a6 100644 (file)
@@ -9098,9 +9098,10 @@ static bool update_pick_idlest(struct sched_group *idlest,
  * This is an approximation as the number of running tasks may not be
  * related to the number of busy CPUs due to sched_setaffinity.
  */
-static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
+static inline bool
+allow_numa_imbalance(unsigned int running, unsigned int weight)
 {
-       return (dst_running < (dst_weight >> 2));
+       return (running < (weight >> 2));
 }
 
 /*
@@ -9234,12 +9235,13 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                                return idlest;
 #endif
                        /*
-                        * Otherwise, keep the task on this node to stay close
-                        * its wakeup source and improve locality. If there is
-                        * a real need of migration, periodic load balance will
-                        * take care of it.
+                        * Otherwise, keep the task close to the wakeup source
+                        * and improve locality if the number of running tasks
+                        * would remain below threshold where an imbalance is
+                        * allowed. If there is a real need of migration,
+                        * periodic load balance will take care of it.
                         */
-                       if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
+                       if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, local_sgs.group_weight))
                                return NULL;
                }
 
@@ -9445,7 +9447,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
                /* Consider allowing a small imbalance between NUMA groups */
                if (env->sd->flags & SD_NUMA) {
                        env->imbalance = adjust_numa_imbalance(env->imbalance,
-                               busiest->sum_nr_running, busiest->group_weight);
+                               local->sum_nr_running + 1, local->group_weight);
                }
 
                return;