]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - kernel/sched/core.c
sched: Exclude cond_resched() from nested sleep test
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / core.c
index 1b69603c1d3e8079596d3190272f203aef75e7d2..b9f78f12ac22ec706878d9965eff6d132c778f02 100644 (file)
@@ -1054,7 +1054,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
         * ttwu() will sort out the placement.
         */
        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_preempt_count(p) & PREEMPT_ACTIVE));
+                       !p->on_rq);
 
 #ifdef CONFIG_LOCKDEP
        /*
@@ -2034,25 +2034,6 @@ static inline int dl_bw_cpus(int i)
 }
 #endif
 
-static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw -= tsk_bw;
-}
-
-static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw += tsk_bw;
-}
-
-static inline
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
-{
-       return dl_b->bw != -1 &&
-              dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
-}
-
 /*
  * We must be sure that accepting a new task (or allowing changing the
  * parameters of an existing one) is consistent with the bandwidth
@@ -4669,6 +4650,76 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
 }
 
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
+{
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
+       unsigned long flags;
+
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
+
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+
+       return ret;
+}
+
+int task_can_attach(struct task_struct *p,
+                   const struct cpumask *cs_cpus_allowed)
+{
+       int ret = 0;
+
+       /*
+        * Kthreads which disallow setaffinity shouldn't be moved
+        * to a new cpuset; we don't want to change their cpu
+        * affinity and isolating such threads by their set of
+        * allowed nodes is unnecessary.  Thus, cpusets are not
+        * applicable for such threads.  This prevents checking for
+        * success of set_cpus_allowed_ptr() on all attached tasks
+        * before cpus_allowed may be changed.
+        */
+       if (p->flags & PF_NO_SETAFFINITY) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+#ifdef CONFIG_SMP
+       if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+                                             cs_cpus_allowed)) {
+               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+               struct dl_bw *dl_b = dl_bw_of(dest_cpu);
+               bool overflow;
+               int cpus;
+               unsigned long flags;
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(dest_cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+               if (overflow)
+                       ret = -EBUSY;
+               else {
+                       /*
+                        * We reserve space for this task in the destination
+                        * root_domain, as we can't fail after this point.
+                        * We will free resources in the source root_domain
+                        * later on (see set_cpus_allowed_dl()).
+                        */
+                       __dl_add(dl_b, p->dl.dl_bw);
+               }
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+       }
+#endif
+out:
+       return ret;
+}
+
 #ifdef CONFIG_SMP
 /*
  * move_queued_task - move a queued task to new rq.
@@ -7244,6 +7295,25 @@ static inline int preempt_count_equals(int preempt_offset)
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       /*
+        * Blocking primitives will set (and therefore destroy) current->state,
+        * since we will exit with TASK_RUNNING make sure we enter with it,
+        * otherwise we will destroy state.
+        */
+       if (WARN(current->state != TASK_RUNNING,
+                       "do not call blocking ops when !TASK_RUNNING; "
+                       "state=%lx set at [<%p>] %pS\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change))
+               __set_current_state(TASK_RUNNING);
+
+       ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
@@ -7276,7 +7346,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
 #endif
        dump_stack();
 }
-EXPORT_SYMBOL(__might_sleep);
+EXPORT_SYMBOL(___might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ