]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - kernel/sched/fair.c
sched/numa: Find the preferred nid with complex NUMA topology
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / fair.c
index eb87229ed4af5e3d78c0a6754d60ab4aed5b58a9..7760c2ad316276ed115cf35444eabc21f4c5ebfc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/latencytop.h>
 #include <linux/sched.h>
 #include <linux/cpumask.h>
+#include <linux/cpuidle.h>
 #include <linux/slab.h>
 #include <linux/profile.h>
 #include <linux/interrupt.h>
@@ -827,11 +828,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
+       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
-       if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
-               windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+       if (scan_size < MAX_SCAN_WINDOW)
+               windows = MAX_SCAN_WINDOW / scan_size;
        floor = 1000 / windows;
 
        scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -923,15 +925,81 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
                group->faults_cpu[task_faults_idx(nid, 1)];
 }
 
+/* Handle placement on systems where not all nodes are directly connected. */
+static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
+                                       int maxdist, bool task)
+{
+       unsigned long score = 0;
+       int node;
+
+       /*
+        * All nodes are directly connected, and the same distance
+        * from each other. No need for fancy placement algorithms.
+        */
+       if (sched_numa_topology_type == NUMA_DIRECT)
+               return 0;
+
+       /*
+        * This code is called for each node, introducing N^2 complexity,
+        * which should be ok given the number of nodes rarely exceeds 8.
+        */
+       for_each_online_node(node) {
+               unsigned long faults;
+               int dist = node_distance(nid, node);
+
+               /*
+                * The furthest away nodes in the system are not interesting
+                * for placement; nid was already counted.
+                */
+               if (dist == sched_max_numa_distance || node == nid)
+                       continue;
+
+               /*
+                * On systems with a backplane NUMA topology, compare groups
+                * of nodes, and move tasks towards the group with the most
+                * memory accesses. When comparing two nodes at distance
+                * "hoplimit", only nodes closer by than "hoplimit" are part
+                * of each group. Skip other nodes.
+                */
+               if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                       dist > maxdist)
+                       continue;
+
+               /* Add up the faults from nearby nodes. */
+               if (task)
+                       faults = task_faults(p, node);
+               else
+                       faults = group_faults(p, node);
+
+               /*
+                * On systems with a glueless mesh NUMA topology, there are
+                * no fixed "groups of nodes". Instead, nodes that are not
+                * directly connected bounce traffic through intermediate
+                * nodes; a numa_group can occupy any set of nodes.
+                * The further away a node is, the less the faults count.
+                * This seems to result in good task placement.
+                */
+               if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
+                       faults *= (sched_max_numa_distance - dist);
+                       faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
+               }
+
+               score += faults;
+       }
+
+       return score;
+}
+
 /*
  * These return the fraction of accesses done by a particular task, or
  * task group, on a particular numa node.  The group weight is given a
  * larger multiplier, in order to group tasks together that are almost
  * evenly spread out between numa nodes.
  */
-static inline unsigned long task_weight(struct task_struct *p, int nid)
+static inline unsigned long task_weight(struct task_struct *p, int nid,
+                                       int dist)
 {
-       unsigned long total_faults;
+       unsigned long faults, total_faults;
 
        if (!p->numa_faults_memory)
                return 0;
@@ -941,15 +1009,29 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
        if (!total_faults)
                return 0;
 
-       return 1000 * task_faults(p, nid) / total_faults;
+       faults = task_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, true);
+
+       return 1000 * faults / total_faults;
 }
 
-static inline unsigned long group_weight(struct task_struct *p, int nid)
+static inline unsigned long group_weight(struct task_struct *p, int nid,
+                                        int dist)
 {
-       if (!p->numa_group || !p->numa_group->total_faults)
+       unsigned long faults, total_faults;
+
+       if (!p->numa_group)
+               return 0;
+
+       total_faults = p->numa_group->total_faults;
+
+       if (!total_faults)
                return 0;
 
-       return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
+       faults = group_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, false);
+
+       return 1000 * faults / total_faults;
 }
 
 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
@@ -1082,6 +1164,7 @@ struct task_numa_env {
        struct numa_stats src_stats, dst_stats;
 
        int imbalance_pct;
+       int dist;
 
        struct task_struct *best_task;
        long best_imp;
@@ -1161,11 +1244,22 @@ static void task_numa_compare(struct task_numa_env *env,
        long load;
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
+       int dist = env->dist;
 
        rcu_read_lock();
-       cur = ACCESS_ONCE(dst_rq->curr);
-       if (cur->pid == 0) /* idle */
+
+       raw_spin_lock_irq(&dst_rq->lock);
+       cur = dst_rq->curr;
+       /*
+        * No need to move the exiting task, and this ensures that ->curr
+        * wasn't reaped and thus get_task_struct() in task_numa_assign()
+        * is safe under RCU read lock.
+        * Note that rcu_read_lock() itself can't protect from the final
+        * put_task_struct() after the last schedule().
+        */
+       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
         * "imp" is the fault differential for the source task between the
@@ -1184,8 +1278,8 @@ static void task_numa_compare(struct task_numa_env *env,
                 * in any group then look only at task weights.
                 */
                if (cur->numa_group == env->p->numa_group) {
-                       imp = taskimp + task_weight(cur, env->src_nid) -
-                             task_weight(cur, env->dst_nid);
+                       imp = taskimp + task_weight(cur, env->src_nid, dist) -
+                             task_weight(cur, env->dst_nid, dist);
                        /*
                         * Add some hysteresis to prevent swapping the
                         * tasks within a group over tiny differences.
@@ -1199,11 +1293,11 @@ static void task_numa_compare(struct task_numa_env *env,
                         * instead.
                         */
                        if (cur->numa_group)
-                               imp += group_weight(cur, env->src_nid) -
-                                      group_weight(cur, env->dst_nid);
+                               imp += group_weight(cur, env->src_nid, dist) -
+                                      group_weight(cur, env->dst_nid, dist);
                        else
-                               imp += task_weight(cur, env->src_nid) -
-                                      task_weight(cur, env->dst_nid);
+                               imp += task_weight(cur, env->src_nid, dist) -
+                                      task_weight(cur, env->dst_nid, dist);
                }
        }
 
@@ -1302,7 +1396,7 @@ static int task_numa_migrate(struct task_struct *p)
        };
        struct sched_domain *sd;
        unsigned long taskweight, groupweight;
-       int nid, ret;
+       int nid, ret, dist;
        long taskimp, groupimp;
 
        /*
@@ -1330,12 +1424,13 @@ static int task_numa_migrate(struct task_struct *p)
                return -EINVAL;
        }
 
-       taskweight = task_weight(p, env.src_nid);
-       groupweight = group_weight(p, env.src_nid);
-       update_numa_stats(&env.src_stats, env.src_nid);
        env.dst_nid = p->numa_preferred_nid;
-       taskimp = task_weight(p, env.dst_nid) - taskweight;
-       groupimp = group_weight(p, env.dst_nid) - groupweight;
+       dist = env.dist = node_distance(env.src_nid, env.dst_nid);
+       taskweight = task_weight(p, env.src_nid, dist);
+       groupweight = group_weight(p, env.src_nid, dist);
+       update_numa_stats(&env.src_stats, env.src_nid);
+       taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
+       groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
        /* Try to find a spot on the preferred nid. */
@@ -1347,12 +1442,20 @@ static int task_numa_migrate(struct task_struct *p)
                        if (nid == env.src_nid || nid == p->numa_preferred_nid)
                                continue;
 
+                       dist = node_distance(env.src_nid, env.dst_nid);
+                       if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                               dist != env.dist) {
+                               taskweight = task_weight(p, env.src_nid, dist);
+                               groupweight = group_weight(p, env.src_nid, dist);
+                       }
+
                        /* Only consider nodes where both task and groups benefit */
-                       taskimp = task_weight(p, nid) - taskweight;
-                       groupimp = group_weight(p, nid) - groupweight;
+                       taskimp = task_weight(p, nid, dist) - taskweight;
+                       groupimp = group_weight(p, nid, dist) - groupweight;
                        if (taskimp < 0 && groupimp < 0)
                                continue;
 
+                       env.dist = dist;
                        env.dst_nid = nid;
                        update_numa_stats(&env.dst_stats, env.dst_nid);
                        task_numa_find_cpu(&env, taskimp, groupimp);
@@ -1519,7 +1622,7 @@ static void update_task_scan_period(struct task_struct *p,
                 * scanning faster if shared accesses dominate as it may
                 * simply bounce migrations uselessly
                 */
-               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
                diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
        }
 
@@ -1556,6 +1659,92 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
        return delta;
 }
 
+/*
+ * Determine the preferred nid for a task in a numa_group. This needs to
+ * be done in a way that produces consistent results with group_weight,
+ * otherwise workloads might not converge.
+ */
+static int preferred_group_nid(struct task_struct *p, int nid)
+{
+       nodemask_t nodes;
+       int dist;
+
+       /* Direct connections between all NUMA nodes. */
+       if (sched_numa_topology_type == NUMA_DIRECT)
+               return nid;
+
+       /*
+        * On a system with glueless mesh NUMA topology, group_weight
+        * scores nodes according to the number of NUMA hinting faults on
+        * both the node itself, and on nearby nodes.
+        */
+       if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
+               unsigned long score, max_score = 0;
+               int node, max_node = nid;
+
+               dist = sched_max_numa_distance;
+
+               for_each_online_node(node) {
+                       score = group_weight(p, node, dist);
+                       if (score > max_score) {
+                               max_score = score;
+                               max_node = node;
+                       }
+               }
+               return max_node;
+       }
+
+       /*
+        * Finding the preferred nid in a system with NUMA backplane
+        * interconnect topology is more involved. The goal is to locate
+        * tasks from numa_groups near each other in the system, and
+        * untangle workloads from different sides of the system. This requires
+        * searching down the hierarchy of node groups, recursively searching
+        * inside the highest scoring group of nodes. The nodemask tricks
+        * keep the complexity of the search down.
+        */
+       nodes = node_online_map;
+       for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
+               unsigned long max_faults = 0;
+               nodemask_t max_group;
+               int a, b;
+
+               /* Are there nodes at this distance from each other? */
+               if (!find_numa_distance(dist))
+                       continue;
+
+               for_each_node_mask(a, nodes) {
+                       unsigned long faults = 0;
+                       nodemask_t this_group;
+                       nodes_clear(this_group);
+
+                       /* Sum group's NUMA faults; includes a==b case. */
+                       for_each_node_mask(b, nodes) {
+                               if (node_distance(a, b) < dist) {
+                                       faults += group_faults(p, b);
+                                       node_set(b, this_group);
+                                       node_clear(b, nodes);
+                               }
+                       }
+
+                       /* Remember the top group. */
+                       if (faults > max_faults) {
+                               max_faults = faults;
+                               max_group = this_group;
+                               /*
+                                * subtle: at the smallest distance there is
+                                * just one node left in each "group", the
+                                * winner is the preferred nid.
+                                */
+                               nid = a;
+                       }
+               }
+               /* Next round, evaluate the nodes within max_group. */
+               nodes = max_group;
+       }
+       return nid;
+}
+
 static void task_numa_placement(struct task_struct *p)
 {
        int seq, nid, max_nid = -1, max_group_nid = -1;
@@ -1638,7 +1827,7 @@ static void task_numa_placement(struct task_struct *p)
        if (p->numa_group) {
                update_numa_active_node_mask(p->numa_group);
                spin_unlock_irq(group_lock);
-               max_nid = max_group_nid;
+               max_nid = preferred_group_nid(p, max_group_nid);
        }
 
        if (max_faults) {
@@ -1817,10 +2006,6 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
        if (!p->mm)
                return;
 
-       /* Do not worry about placement if exiting */
-       if (p->state == TASK_DEAD)
-               return;
-
        /* Allocate buffer to track faults on a per-node basis */
        if (unlikely(!p->numa_faults_memory)) {
                int size = sizeof(*p->numa_faults_memory) *
@@ -1959,7 +2144,7 @@ void task_numa_work(struct callback_head *work)
                vma = mm->mmap;
        }
        for (; vma; vma = vma->vm_next) {
-               if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
+               if (!vma_migratable(vma) || !vma_policy_mof(vma))
                        continue;
 
                /*
@@ -2224,8 +2409,8 @@ static __always_inline u64 decay_load(u64 val, u64 n)
 
        /*
         * As y^PERIOD = 1/2, we can combine
-        *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
-        * With a look-up table which covers k^n (n<PERIOD)
+        *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+        * With a look-up table which covers y^n (n<PERIOD)
         *
         * To achieve constant time decay_load.
         */
@@ -4284,6 +4469,7 @@ static int wake_wide(struct task_struct *p)
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
        s64 this_load, load;
+       s64 this_eff_load, prev_eff_load;
        int idx, this_cpu, prev_cpu;
        struct task_group *tg;
        unsigned long weight;
@@ -4327,21 +4513,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       if (this_load > 0) {
-               s64 this_eff_load, prev_eff_load;
+       this_eff_load = 100;
+       this_eff_load *= capacity_of(prev_cpu);
+
+       prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+       prev_eff_load *= capacity_of(this_cpu);
 
-               this_eff_load = 100;
-               this_eff_load *= capacity_of(prev_cpu);
+       if (this_load > 0) {
                this_eff_load *= this_load +
                        effective_load(tg, this_cpu, weight, weight);
 
-               prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-               prev_eff_load *= capacity_of(this_cpu);
                prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+       }
+
+       balanced = this_eff_load <= prev_eff_load;
 
-               balanced = this_eff_load <= prev_eff_load;
-       } else
-               balanced = true;
        schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
 
        if (!balanced)
@@ -4418,20 +4604,46 @@ static int
 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
        unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
+       unsigned int min_exit_latency = UINT_MAX;
+       u64 latest_idle_timestamp = 0;
+       int least_loaded_cpu = this_cpu;
+       int shallowest_idle_cpu = -1;
        int i;
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
+               if (idle_cpu(i)) {
+                       struct rq *rq = cpu_rq(i);
+                       struct cpuidle_state *idle = idle_get_state(rq);
+                       if (idle && idle->exit_latency < min_exit_latency) {
+                               /*
+                                * We give priority to a CPU whose idle state
+                                * has the smallest exit latency irrespective
+                                * of any idle timestamp.
+                                */
+                               min_exit_latency = idle->exit_latency;
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+                                  rq->idle_stamp > latest_idle_timestamp) {
+                               /*
+                                * If equal or no active idle state, then
+                                * the most recently idled CPU might have
+                                * a warmer cache.
+                                */
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       }
+               } else {
+                       load = weighted_cpuload(i);
+                       if (load < min_load || (load == min_load && i == this_cpu)) {
+                               min_load = load;
+                               least_loaded_cpu = i;
+                       }
                }
        }
 
-       return idlest;
+       return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
 /*
@@ -5291,24 +5503,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
        if (!tsk_cache_hot)
                tsk_cache_hot = migrate_degrades_locality(p, env);
 
-       if (migrate_improves_locality(p, env)) {
-#ifdef CONFIG_SCHEDSTATS
+       if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
+           env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
                if (tsk_cache_hot) {
                        schedstat_inc(env->sd, lb_hot_gained[env->idle]);
                        schedstat_inc(p, se.statistics.nr_forced_migrations);
                }
-#endif
-               return 1;
-       }
-
-       if (!tsk_cache_hot ||
-               env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-
-               if (tsk_cache_hot) {
-                       schedstat_inc(env->sd, lb_hot_gained[env->idle]);
-                       schedstat_inc(p, se.statistics.nr_forced_migrations);
-               }
-
                return 1;
        }
 
@@ -5705,19 +5905,17 @@ unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
        return default_scale_capacity(sd, cpu);
 }
 
-static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
-       unsigned long smt_gain = sd->smt_gain;
-
-       smt_gain /= weight;
+       if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+               return sd->smt_gain / sd->span_weight;
 
-       return smt_gain;
+       return SCHED_CAPACITY_SCALE;
 }
 
-unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       return default_scale_smt_capacity(sd, cpu);
+       return default_scale_cpu_capacity(sd, cpu);
 }
 
 static unsigned long scale_rt_capacity(int cpu)
@@ -5756,18 +5954,15 @@ static unsigned long scale_rt_capacity(int cpu)
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
        unsigned long capacity = SCHED_CAPACITY_SCALE;
        struct sched_group *sdg = sd->groups;
 
-       if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
-               if (sched_feat(ARCH_CAPACITY))
-                       capacity *= arch_scale_smt_capacity(sd, cpu);
-               else
-                       capacity *= default_scale_smt_capacity(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_cpu_capacity(sd, cpu);
+       else
+               capacity *= default_scale_cpu_capacity(sd, cpu);
 
-               capacity >>= SCHED_CAPACITY_SHIFT;
-       }
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
        sdg->sgc->capacity_orig = capacity;
 
@@ -6414,7 +6609,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
                goto force_balance;
 
        /*
-        * If the local group is more busy than the selected busiest group
+        * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
         */
        if (local->avg_load >= busiest->avg_load)
@@ -6429,13 +6624,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 
        if (env->idle == CPU_IDLE) {
                /*
-                * This cpu is idle. If the busiest group load doesn't
-                * have more tasks than the number of available cpu's and
-                * there is no imbalance between this and busiest group
-                * wrt to idle cpu's, it is balanced.
+                * This cpu is idle. If the busiest group is not overloaded
+                * and there is no imbalance between this and busiest group
+                * wrt idle cpus, it is balanced. The imbalance becomes
+                * significant if the diff is greater than 1 otherwise we
+                * might end up to just move the imbalance on another group
                 */
-               if ((local->idle_cpus < busiest->idle_cpus) &&
-                   busiest->sum_nr_running <= busiest->group_weight)
+               if ((busiest->group_type != group_overloaded) &&
+                               (local->idle_cpus <= (busiest->idle_cpus + 1)))
                        goto out_balanced;
        } else {
                /*
@@ -6607,7 +6803,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        struct sched_group *group;
        struct rq *busiest;
        unsigned long flags;
-       struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
        struct lb_env env = {
                .sd             = sd,
@@ -6693,12 +6889,6 @@ more_balance:
 
                local_irq_restore(flags);
 
-               /*
-                * some other cpu did the load balance for us.
-                */
-               if (cur_ld_moved && env.dst_cpu != smp_processor_id())
-                       resched_cpu(env.dst_cpu);
-
                if (env.flags & LBF_NEED_BREAK) {
                        env.flags &= ~LBF_NEED_BREAK;
                        goto more_balance;