]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - kernel/sched/core.c
sched: Exclude cond_resched() from nested sleep test
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / core.c
index 44999505e1bf6894bdcdb48531a49e5a07b88ac5..b9f78f12ac22ec706878d9965eff6d132c778f02 100644 (file)
@@ -1054,7 +1054,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
         * ttwu() will sort out the placement.
         */
        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_preempt_count(p) & PREEMPT_ACTIVE));
+                       !p->on_rq);
 
 #ifdef CONFIG_LOCKDEP
        /*
@@ -2034,25 +2034,6 @@ static inline int dl_bw_cpus(int i)
 }
 #endif
 
-static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw -= tsk_bw;
-}
-
-static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw += tsk_bw;
-}
-
-static inline
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
-{
-       return dl_b->bw != -1 &&
-              dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
-}
-
 /*
  * We must be sure that accepting a new task (or allowing changing the
  * parameters of an existing one) is consistent with the bandwidth
@@ -2220,7 +2201,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 
 /**
  * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
  * @prev: the thread we just switched away from.
  *
  * finish_task_switch must be called after the context switch, paired
@@ -2232,10 +2212,16 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * so, we finish that here outside of the runqueue lock. (Doing it
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
  */
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static struct rq *finish_task_switch(struct task_struct *prev)
        __releases(rq->lock)
 {
+       struct rq *rq = this_rq();
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
 
@@ -2275,6 +2261,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        }
 
        tick_nohz_task_switch(current);
+       return rq;
 }
 
 #ifdef CONFIG_SMP
@@ -2309,25 +2296,22 @@ static inline void post_schedule(struct rq *rq)
 asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
-       struct rq *rq = this_rq();
-
-       finish_task_switch(rq, prev);
+       struct rq *rq;
 
-       /*
-        * FIXME: do we need to worry about rq being invalidated by the
-        * task_switch?
-        */
+       /* finish_task_switch() drops rq->lock and enables preemtion */
+       preempt_disable();
+       rq = finish_task_switch(prev);
        post_schedule(rq);
+       preempt_enable();
 
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
+ * context_switch - switch to the new MM and the new thread's register state.
  */
-static inline void
+static inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
@@ -2366,14 +2350,9 @@ context_switch(struct rq *rq, struct task_struct *prev,
        context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
-
        barrier();
-       /*
-        * this_rq must be evaluated again because prev may have moved
-        * CPUs since it called schedule(), thus the 'rq' on its stack
-        * frame will be invalid.
-        */
-       finish_task_switch(this_rq(), prev);
+
+       return finish_task_switch(prev);
 }
 
 /*
@@ -2855,15 +2834,8 @@ need_resched:
                rq->curr = next;
                ++*switch_count;
 
-               context_switch(rq, prev, next); /* unlocks the rq */
-               /*
-                * The context switch have flipped the stack from under us
-                * and restored the local variables which were saved when
-                * this task called schedule() in the past. prev == current
-                * is still correct, but it can be moved to another cpu/rq.
-                */
-               cpu = smp_processor_id();
-               rq = cpu_rq(cpu);
+               rq = context_switch(rq, prev, next); /* unlocks the rq */
+               cpu = cpu_of(rq);
        } else
                raw_spin_unlock_irq(&rq->lock);
 
@@ -2951,6 +2923,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
 }
 NOKPROBE_SYMBOL(preempt_schedule);
 EXPORT_SYMBOL(preempt_schedule);
+
+#ifdef CONFIG_CONTEXT_TRACKING
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
+{
+       enum ctx_state prev_ctx;
+
+       if (likely(!preemptible()))
+               return;
+
+       do {
+               __preempt_count_add(PREEMPT_ACTIVE);
+               /*
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
+                */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
+
+               __preempt_count_sub(PREEMPT_ACTIVE);
+               barrier();
+       } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #endif /* CONFIG_PREEMPT */
 
 /*
@@ -4637,6 +4650,76 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
 }
 
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
+{
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
+       unsigned long flags;
+
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
+
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+
+       return ret;
+}
+
+int task_can_attach(struct task_struct *p,
+                   const struct cpumask *cs_cpus_allowed)
+{
+       int ret = 0;
+
+       /*
+        * Kthreads which disallow setaffinity shouldn't be moved
+        * to a new cpuset; we don't want to change their cpu
+        * affinity and isolating such threads by their set of
+        * allowed nodes is unnecessary.  Thus, cpusets are not
+        * applicable for such threads.  This prevents checking for
+        * success of set_cpus_allowed_ptr() on all attached tasks
+        * before cpus_allowed may be changed.
+        */
+       if (p->flags & PF_NO_SETAFFINITY) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+#ifdef CONFIG_SMP
+       if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+                                             cs_cpus_allowed)) {
+               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+               struct dl_bw *dl_b = dl_bw_of(dest_cpu);
+               bool overflow;
+               int cpus;
+               unsigned long flags;
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(dest_cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+               if (overflow)
+                       ret = -EBUSY;
+               else {
+                       /*
+                        * We reserve space for this task in the destination
+                        * root_domain, as we can't fail after this point.
+                        * We will free resources in the source root_domain
+                        * later on (see set_cpus_allowed_dl()).
+                        */
+                       __dl_add(dl_b, p->dl.dl_bw);
+               }
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+       }
+#endif
+out:
+       return ret;
+}
+
 #ifdef CONFIG_SMP
 /*
  * move_queued_task - move a queued task to new rq.
@@ -6087,7 +6170,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
 
 #ifdef CONFIG_NUMA
 static int sched_domains_numa_levels;
+enum numa_topology_type sched_numa_topology_type;
 static int *sched_domains_numa_distance;
+int sched_max_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 #endif
@@ -6259,7 +6344,7 @@ static void sched_numa_warn(const char *str)
        printk(KERN_WARNING "\n");
 }
 
-static bool find_numa_distance(int distance)
+bool find_numa_distance(int distance)
 {
        int i;
 
@@ -6274,6 +6359,56 @@ static bool find_numa_distance(int distance)
        return false;
 }
 
+/*
+ * A system can have three types of NUMA topology:
+ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
+ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
+ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
+ *
+ * The difference between a glueless mesh topology and a backplane
+ * topology lies in whether communication between not directly
+ * connected nodes goes through intermediary nodes (where programs
+ * could run), or through backplane controllers. This affects
+ * placement of programs.
+ *
+ * The type of topology can be discerned with the following tests:
+ * - If the maximum distance between any nodes is 1 hop, the system
+ *   is directly connected.
+ * - If for two nodes A and B, located N > 1 hops away from each other,
+ *   there is an intermediary node C, which is < N hops away from both
+ *   nodes A and B, the system is a glueless mesh.
+ */
+static void init_numa_topology_type(void)
+{
+       int a, b, c, n;
+
+       n = sched_max_numa_distance;
+
+       if (n <= 1)
+               sched_numa_topology_type = NUMA_DIRECT;
+
+       for_each_online_node(a) {
+               for_each_online_node(b) {
+                       /* Find two nodes furthest removed from each other. */
+                       if (node_distance(a, b) < n)
+                               continue;
+
+                       /* Is there an intermediary node between a and b? */
+                       for_each_online_node(c) {
+                               if (node_distance(a, c) < n &&
+                                   node_distance(b, c) < n) {
+                                       sched_numa_topology_type =
+                                                       NUMA_GLUELESS_MESH;
+                                       return;
+                               }
+                       }
+
+                       sched_numa_topology_type = NUMA_BACKPLANE;
+                       return;
+               }
+       }
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6406,6 +6541,9 @@ static void sched_init_numa(void)
        sched_domain_topology = tl;
 
        sched_domains_numa_levels = level;
+       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+
+       init_numa_topology_type();
 }
 
 static void sched_domains_numa_masks_set(int cpu)
@@ -7157,6 +7295,25 @@ static inline int preempt_count_equals(int preempt_offset)
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       /*
+        * Blocking primitives will set (and therefore destroy) current->state,
+        * since we will exit with TASK_RUNNING make sure we enter with it,
+        * otherwise we will destroy state.
+        */
+       if (WARN(current->state != TASK_RUNNING,
+                       "do not call blocking ops when !TASK_RUNNING; "
+                       "state=%lx set at [<%p>] %pS\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change))
+               __set_current_state(TASK_RUNNING);
+
+       ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
@@ -7189,7 +7346,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
 #endif
        dump_stack();
 }
-EXPORT_SYMBOL(__might_sleep);
+EXPORT_SYMBOL(___might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -7833,6 +7990,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
        sched_offline_group(tg);
 }
 
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+       sched_move_task(task);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
@@ -8205,6 +8367,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,