]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Aug 2012 17:58:13 +0000 (10:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Aug 2012 17:58:13 +0000 (10:58 -0700)
Pull scheduler fixes from Ingo Molnar:
 "Fixes and two late cleanups"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cleanups: Add load balance cpumask pointer to 'struct lb_env'
  sched: Fix comment about PREEMPT_ACTIVE bit location
  sched: Fix minor code style issues
  sched: Use task_rq_unlock() in __sched_setscheduler()
  sched/numa: Add SD_PERFER_SIBLING to CPU domain

include/linux/hardirq.h
include/linux/topology.h
kernel/sched/core.c
kernel/sched/cpupri.c
kernel/sched/fair.c

index bb7f30971858ca5127de592cdaa0cec227935384..305f23cd7cff53f3ef97794db7d6aa7dde6035c6 100644 (file)
@@ -22,7 +22,7 @@
  *
  * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
  * - bit 26 is the NMI_MASK
- * - bit 28 is the PREEMPT_ACTIVE flag
+ * - bit 27 is the PREEMPT_ACTIVE flag
  *
  * PREEMPT_MASK: 0x000000ff
  * SOFTIRQ_MASK: 0x0000ff00
index e91cd43394dfa82f3926c356abb4543b2e14bf36..fec12d667211dd398ba07ed5127b9e3485bcdd49 100644 (file)
@@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
                                | 0*SD_SHARE_CPUPOWER                   \
                                | 0*SD_SHARE_PKG_RESOURCES              \
                                | 0*SD_SERIALIZE                        \
+                               | 1*SD_PREFER_SIBLING                   \
                                ,                                       \
        .last_balance           = jiffies,                              \
        .balance_interval       = 1,                                    \
index d325c4b2dcbb0c5995a903d76e3035129d1590c3..82ad284f823b6a60e81b1ebef44bae94ca73db6e 100644 (file)
@@ -4340,9 +4340,7 @@ recheck:
         */
        if (unlikely(policy == p->policy && (!rt_policy(policy) ||
                        param->sched_priority == p->rt_priority))) {
-
-               __task_rq_unlock(rq);
-               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+               task_rq_unlock(rq, p, &flags);
                return 0;
        }
 
index d72586fdf6607db63c5f43a2e1fbbb32ff0ac2c7..23aa789c53ee5c6e061c6bf2b0f8181292695044 100644 (file)
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
                struct cpumask *lowest_mask)
 {
-       int                  idx      = 0;
-       int                  task_pri = convert_prio(p->prio);
+       int idx = 0;
+       int task_pri = convert_prio(p->prio);
 
        if (task_pri >= MAX_RT_PRIO)
                return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
  */
 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 {
-       int                 *currpri = &cp->cpu_to_pri[cpu];
-       int                  oldpri  = *currpri;
-       int                  do_mb = 0;
+       int *currpri = &cp->cpu_to_pri[cpu];
+       int oldpri = *currpri;
+       int do_mb = 0;
 
        newpri = convert_prio(newpri);
 
index 22321db64952f9461b9e03a3e90bd99217478f8f..d0cc03b3e70b9d2d2bfbbd418e6dfc858a7f6387 100644 (file)
@@ -3069,6 +3069,9 @@ struct lb_env {
        int                     new_dst_cpu;
        enum cpu_idle_type      idle;
        long                    imbalance;
+       /* The set of CPUs under consideration for load-balancing */
+       struct cpumask          *cpus;
+
        unsigned int            flags;
 
        unsigned int            loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
-                       int local_group, const struct cpumask *cpus,
-                       int *balance, struct sg_lb_stats *sgs)
+                       int local_group, int *balance, struct sg_lb_stats *sgs)
 {
        unsigned long nr_running, max_nr_running, min_nr_running;
        unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        max_nr_running = 0;
        min_nr_running = ~0UL;
 
-       for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+       for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
                struct rq *rq = cpu_rq(i);
 
                nr_running = rq->nr_running;
@@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
  * @sds: variable to hold the statistics for this sched_domain.
  */
 static inline void update_sd_lb_stats(struct lb_env *env,
-                                     const struct cpumask *cpus,
-                                     int *balance, struct sd_lb_stats *sds)
+                                       int *balance, struct sd_lb_stats *sds)
 {
        struct sched_domain *child = env->sd->child;
        struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
 
                local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
                memset(&sgs, 0, sizeof(sgs));
-               update_sg_lb_stats(env, sg, load_idx, local_group,
-                                  cpus, balance, &sgs);
+               update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
 
                if (local_group && !(*balance))
                        return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  * to restore balance.
  *
  * @env: The load balancing environment.
- * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
  *
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  *                put to idle by rebalancing its tasks onto our group.
  */
 static struct sched_group *
-find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
+find_busiest_group(struct lb_env *env, int *balance)
 {
        struct sd_lb_stats sds;
 
@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
         * Compute the various statistics relavent for load balancing at
         * this level.
         */
-       update_sd_lb_stats(env, cpus, balance, &sds);
+       update_sd_lb_stats(env, balance, &sds);
 
        /*
         * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ ret:
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
 static struct rq *find_busiest_queue(struct lb_env *env,
-                                    struct sched_group *group,
-                                    const struct cpumask *cpus)
+                                    struct sched_group *group)
 {
        struct rq *busiest = NULL, *rq;
        unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                if (!capacity)
                        capacity = fix_small_capacity(env->sd, group);
 
-               if (!cpumask_test_cpu(i, cpus))
+               if (!cpumask_test_cpu(i, env->cpus))
                        continue;
 
                rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                .dst_grpmask    = sched_group_cpus(sd->groups),
                .idle           = idle,
                .loop_break     = sched_nr_migrate_break,
+               .cpus           = cpus,
        };
 
        cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        schedstat_inc(sd, lb_count[idle]);
 
 redo:
-       group = find_busiest_group(&env, cpus, balance);
+       group = find_busiest_group(&env, balance);
 
        if (*balance == 0)
                goto out_balanced;
@@ -4270,7 +4269,7 @@ redo:
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(&env, group, cpus);
+       busiest = find_busiest_queue(&env, group);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[idle]);
                goto out_balanced;