*
* Scaled math optimizations by Thomas Gleixner
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
/*
- * Preemption granularity:
- * (default: 10 msec, units: nanoseconds)
+ * Targeted preemption latency for CPU-bound tasks:
+ * (default: 20ms, units: nanoseconds)
*
- * NOTE: this granularity value is not the same as the concept of
- * 'timeslice length' - timeslices in CFS will typically be somewhat
- * larger than this value. (to see the precise effective timeslice
- * length of your workload, run vmstat and monitor the context-switches
- * field)
+ * NOTE: this latency value is not the same as the concept of
+ * 'timeslice length' - timeslices in CFS are of variable length.
+ * (to see the precise effective timeslice length of your workload,
+ * run vmstat and monitor the context-switches field)
*
* On SMP systems the value of this is multiplied by the log2 of the
* number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
* systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
+ * Targeted preemption latency for CPU-bound tasks:
+ */
+const_debug unsigned int sysctl_sched_latency = 20000000ULL;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
+
+/*
+ * Minimal preemption granularity for CPU-bound tasks:
+ * (default: 2 msec, units: nanoseconds)
+ */
+unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
+
+/*
+ * sys_sched_yield() compat mode
+ *
+ * This option switches the agressive yield implementation of the
+ * old scheduler back on.
*/
-unsigned int sysctl_sched_granularity __read_mostly = 10000000UL;
+unsigned int __read_mostly sysctl_sched_compat_yield;
/*
* SCHED_BATCH wake-up granularity.
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
+const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
/*
* SCHED_OTHER wake-up granularity.
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
+const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
-unsigned int sysctl_sched_stat_granularity __read_mostly;
-
-/*
- * Initialized in sched_init_granularity() [to 5 times the base granularity]:
- */
unsigned int sysctl_sched_runtime_limit __read_mostly;
-/*
- * Debugging: various feature bits
- */
-enum {
- SCHED_FEAT_FAIR_SLEEPERS = 1,
- SCHED_FEAT_SLEEPER_AVG = 2,
- SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
- SCHED_FEAT_PRECISE_CPU_LOAD = 8,
- SCHED_FEAT_START_DEBIT = 16,
- SCHED_FEAT_SKIP_INITIAL = 32,
-};
-
-unsigned int sysctl_sched_features __read_mostly =
- SCHED_FEAT_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *0 |
- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
- SCHED_FEAT_PRECISE_CPU_LOAD *1 |
- SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_SKIP_INITIAL *0;
-
extern struct sched_class fair_sched_class;
/**************************************************************
return cfs_rq->rq;
}
-/* currently running entity (if any) on this cfs_rq */
-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
-{
- return cfs_rq->curr;
-}
-
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
-static inline void
-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->curr = se;
-}
-
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return container_of(cfs_rq, struct rq, cfs);
}
-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
-{
- struct rq *rq = rq_of(cfs_rq);
-
- if (unlikely(rq->curr->sched_class != &fair_sched_class))
- return NULL;
-
- return &rq->curr->se;
-}
-
#define entity_is_task(se) 1
-static inline void
-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
-
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline struct task_struct *task_of(struct sched_entity *se)
* Scheduling class tree data structure manipulation methods:
*/
+static inline void
+set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
+{
+ struct sched_entity *se;
+
+ cfs_rq->rb_leftmost = leftmost;
+ if (leftmost) {
+ se = rb_entry(leftmost, struct sched_entity, run_node);
+ cfs_rq->min_vruntime = max(se->vruntime,
+ cfs_rq->min_vruntime);
+ }
+}
+
/*
* Enqueue an entity into the rb-tree:
*/
-static inline void
+static void
__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
* used):
*/
if (leftmost)
- cfs_rq->rb_leftmost = &se->run_node;
+ set_leftmost(cfs_rq, &se->run_node);
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
update_load_add(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+
+ schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
-static inline void
+static void
__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->rb_leftmost == &se->run_node)
- cfs_rq->rb_leftmost = rb_next(&se->run_node);
+ set_leftmost(cfs_rq, rb_next(&se->run_node));
+
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+
+ schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
}
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}
+static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+{
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct sched_entity *se = NULL;
+ struct rb_node *parent;
+
+ while (*link) {
+ parent = *link;
+ se = rb_entry(parent, struct sched_entity, run_node);
+ link = &parent->rb_right;
+ }
+
+ return se;
+}
+
/**************************************************************
* Scheduling class statistics methods:
*/
-/*
- * We rescale the rescheduling granularity of tasks according to their
- * nice level, but only linearly, not exponentially:
- */
-static long
-niced_granularity(struct sched_entity *curr, unsigned long granularity)
+static u64 __sched_period(unsigned long nr_running)
{
- u64 tmp;
+ u64 period = sysctl_sched_latency;
+ unsigned long nr_latency =
+ sysctl_sched_latency / sysctl_sched_min_granularity;
- if (likely(curr->load.weight == NICE_0_LOAD))
- return granularity;
- /*
- * Positive nice levels get the same granularity as nice-0:
- */
- if (likely(curr->load.weight < NICE_0_LOAD)) {
- tmp = curr->load.weight * (u64)granularity;
- return (long) (tmp >> NICE_0_SHIFT);
+ if (unlikely(nr_running > nr_latency)) {
+ period *= nr_running;
+ do_div(period, nr_latency);
}
- /*
- * Negative nice level tasks get linearly finer
- * granularity:
- */
- tmp = curr->load.inv_weight * (u64)granularity;
- /*
- * It will always fit into 'long':
- */
- return (long) (tmp >> WMULT_SHIFT);
+ return period;
+}
+
+static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ u64 period = __sched_period(cfs_rq->nr_running);
+
+ period *= se->load.weight;
+ do_div(period, cfs_rq->load.weight);
+
+ return period;
}
static inline void
* are not in our scheduling class.
*/
static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned long delta_exec)
{
- unsigned long delta, delta_exec, delta_fair, delta_mine;
+ unsigned long delta, delta_fair, delta_mine, delta_exec_weighted;
struct load_weight *lw = &cfs_rq->load;
unsigned long load = lw->weight;
- delta_exec = curr->delta_exec;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
curr->sum_exec_runtime += delta_exec;
cfs_rq->exec_clock += delta_exec;
+ delta_exec_weighted = delta_exec;
+ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
+ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
+ &curr->load);
+ }
+ curr->vruntime += delta_exec_weighted;
+
+ if (!sched_feat(FAIR_SLEEPERS))
+ return;
if (unlikely(!load))
return;
delta_fair = calc_delta_fair(delta_exec, lw);
delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
- if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
+ if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
delta = min(delta, (unsigned long)(
(long)sysctl_sched_runtime_limit - curr->wait_runtime));
static void update_curr(struct cfs_rq *cfs_rq)
{
- struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+ u64 now = rq_of(cfs_rq)->clock;
unsigned long delta_exec;
if (unlikely(!curr))
* since the last time we changed load (this cannot
* overflow on 32 bits):
*/
- delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
-
- curr->delta_exec += delta_exec;
+ delta_exec = (unsigned long)(now - curr->exec_start);
- if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
- __update_curr(cfs_rq, curr);
- curr->delta_exec = 0;
- }
- curr->exec_start = rq_of(cfs_rq)->clock;
+ __update_curr(cfs_rq, curr, delta_exec);
+ curr->exec_start = now;
}
static inline void
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
-/*
- * We calculate fair deltas here, so protect against the random effects
- * of a multiplication overflow by capping it to the runtime limit:
- */
-#if BITS_PER_LONG == 32
static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+calc_weighted(unsigned long delta, struct sched_entity *se)
{
- u64 tmp = (u64)delta * weight >> shift;
+ unsigned long weight = se->load.weight;
- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
- return sysctl_sched_runtime_limit*2;
- return tmp;
+ if (unlikely(weight != NICE_0_LOAD))
+ return (u64)delta * se->load.weight >> NICE_0_SHIFT;
+ else
+ return delta;
}
-#else
-static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
-{
- return delta * weight >> shift;
-}
-#endif
/*
* Task is being enqueued - update stats:
*/
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 key;
-
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
- if (se != cfs_rq_curr(cfs_rq))
+ if (se != cfs_rq->curr)
update_stats_wait_start(cfs_rq, se);
/*
* Update the key:
*/
- key = cfs_rq->fair_clock;
-
- /*
- * Optimize the common nice 0 case:
- */
- if (likely(se->load.weight == NICE_0_LOAD)) {
- key -= se->wait_runtime;
- } else {
- u64 tmp;
-
- if (se->wait_runtime < 0) {
- tmp = -se->wait_runtime;
- key += (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- } else {
- tmp = se->wait_runtime;
- key -= (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- }
- }
-
- se->fair_key = key;
+ se->fair_key = se->vruntime;
}
/*
* Note: must be called with a freshly updated rq->fair_clock.
*/
static inline void
-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long delta_fair)
{
- unsigned long delta_fair = se->delta_fair_run;
-
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
add_wait_runtime(cfs_rq, se, delta_fair);
}
{
unsigned long delta_fair;
+ if (unlikely(!se->wait_start_fair))
+ return;
+
delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
(u64)(cfs_rq->fair_clock - se->wait_start_fair));
- se->delta_fair_run += delta_fair;
- if (unlikely(abs(se->delta_fair_run) >=
- sysctl_sched_stat_granularity)) {
- __update_stats_wait_end(cfs_rq, se);
- se->delta_fair_run = 0;
- }
+ __update_stats_wait_end(cfs_rq, se, delta_fair);
se->wait_start_fair = 0;
schedstat_set(se->wait_start, 0);
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
- if (se != cfs_rq_curr(cfs_rq))
+ if (se != cfs_rq->curr)
update_stats_wait_end(cfs_rq, se);
}
* Scheduling class queueing methods:
*/
-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long delta_fair)
{
- unsigned long load = cfs_rq->load.weight, delta_fair;
+ unsigned long load = cfs_rq->load.weight;
long prev_runtime;
/*
if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
return;
- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+ if (sched_feat(SLEEPER_LOAD_AVG))
load = rq_of(cfs_rq)->cpu_load[2];
- delta_fair = se->delta_fair_sleep;
-
/*
* Fix up delta_fair with the effect of us running
* during the whole sleep period:
*/
- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+ if (sched_feat(SLEEPER_AVG))
delta_fair = div64_likely32((u64)delta_fair * load,
load + se->load.weight);
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
delta_fair = se->wait_runtime - prev_runtime;
/*
unsigned long delta_fair;
if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
- !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+ !sched_feat(FAIR_SLEEPERS))
return;
delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
(u64)(cfs_rq->fair_clock - se->sleep_start_fair));
- se->delta_fair_sleep += delta_fair;
- if (unlikely(abs(se->delta_fair_sleep) >=
- sysctl_sched_stat_granularity)) {
- __enqueue_sleeper(cfs_rq, se);
- se->delta_fair_sleep = 0;
- }
+ __enqueue_sleeper(cfs_rq, se, delta_fair);
se->sleep_start_fair = 0;
se->block_start = 0;
se->sum_sleep_runtime += delta;
+
+ /*
+ * Blocking time is in units of nanosecs, so shift by 20 to
+ * get a milliseconds-range estimation of the amount of
+ * time that the task spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
+ delta >> 20);
+ }
}
#endif
}
+static void
+place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+{
+ struct sched_entity *last = __pick_last_entity(cfs_rq);
+ u64 min_runtime, latency;
+
+ min_runtime = cfs_rq->min_vruntime;
+ if (last) {
+ min_runtime += last->vruntime;
+ min_runtime >>= 1;
+ if (initial && sched_feat(START_DEBIT))
+ min_runtime += sysctl_sched_latency/2;
+ }
+
+ if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
+ latency = sysctl_sched_latency;
+ if (min_runtime > latency)
+ min_runtime -= latency;
+ else
+ min_runtime = 0;
+ }
+
+ se->vruntime = max(se->vruntime, min_runtime);
+}
+
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
{
*/
update_curr(cfs_rq);
- if (wakeup)
+ if (wakeup) {
+ place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
+ }
update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
- cfs_rq->wait_runtime -= se->wait_runtime;
#endif
}
__dequeue_entity(cfs_rq, se);
* Preempt the current task with a newly woken task if needed:
*/
static void
-__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
- struct sched_entity *curr, unsigned long granularity)
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- s64 __delta = curr->fair_key - se->fair_key;
+ unsigned long ideal_runtime, delta_exec;
- /*
- * Take scheduling granularity into account - do not
- * preempt the current task unless the best task has
- * a larger than sched_granularity fairness advantage:
- */
- if (__delta > niced_granularity(curr, granularity))
+ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime)
resched_task(rq_of(cfs_rq)->curr);
}
*/
update_stats_wait_end(cfs_rq, se);
update_stats_curr_start(cfs_rq, se);
- set_cfs_rq_curr(cfs_rq, se);
+ cfs_rq->curr = se;
+#ifdef CONFIG_SCHEDSTATS
+ /*
+ * Track our maximum slice length, if the CPU's load is at
+ * least twice that of our own weight (i.e. dont track it
+ * when there are only lesser-weight tasks around):
+ */
+ if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
+ se->slice_max = max(se->slice_max,
+ se->sum_exec_runtime - se->prev_sum_exec_runtime);
+ }
+#endif
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
if (prev->on_rq)
update_stats_wait_start(cfs_rq, prev);
- set_cfs_rq_curr(cfs_rq, NULL);
+ cfs_rq->curr = NULL;
}
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- struct sched_entity *next;
-
/*
* Dequeue and enqueue the task to update its
* position within the tree:
dequeue_entity(cfs_rq, curr, 0);
enqueue_entity(cfs_rq, curr, 0);
- /*
- * Reschedule if another task tops the current one.
- */
- next = __pick_next_entity(cfs_rq);
- if (next == curr)
- return;
-
- __check_preempt_curr_fair(cfs_rq, next, curr, sysctl_sched_granularity);
+ if (cfs_rq->nr_running > 1)
+ check_preempt_tick(cfs_rq, curr);
}
/**************************************************
}
/*
- * sched_yield() support is very simple - we dequeue and enqueue
+ * sched_yield() support is very simple - we dequeue and enqueue.
+ *
+ * If compat_yield is turned on then we requeue to the end of the tree.
*/
static void yield_task_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct sched_entity *rightmost, *se = &p->se;
+ struct rb_node *parent;
- __update_rq_clock(rq);
/*
- * Dequeue and enqueue the task to update its
- * position within the tree:
+ * Are we the only task in the tree?
+ */
+ if (unlikely(cfs_rq->nr_running == 1))
+ return;
+
+ if (likely(!sysctl_sched_compat_yield)) {
+ __update_rq_clock(rq);
+ /*
+ * Dequeue and enqueue the task to update its
+ * position within the tree:
+ */
+ dequeue_entity(cfs_rq, &p->se, 0);
+ enqueue_entity(cfs_rq, &p->se, 0);
+
+ return;
+ }
+ /*
+ * Find the rightmost entry in the rbtree:
*/
- dequeue_entity(cfs_rq, &p->se, 0);
- enqueue_entity(cfs_rq, &p->se, 0);
+ do {
+ parent = *link;
+ link = &parent->rb_right;
+ } while (*link);
+
+ rightmost = rb_entry(parent, struct sched_entity, run_node);
+ /*
+ * Already in the rightmost position?
+ */
+ if (unlikely(rightmost == se))
+ return;
+
+ /*
+ * Minimally necessary key value to be last in the tree:
+ */
+ se->fair_key = rightmost->fair_key + 1;
+
+ if (cfs_rq->rb_leftmost == &se->run_node)
+ cfs_rq->rb_leftmost = rb_next(&se->run_node);
+ /*
+ * Relink the task to the rightmost position:
+ */
+ rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+ rb_link_node(&se->run_node, parent, link);
+ rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}
/*
* Preempt the current task with a newly woken task if needed:
*/
-static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
+static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- unsigned long gran;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
resched_task(curr);
return;
}
+ if (is_same_group(curr, p)) {
+ s64 delta = curr->se.vruntime - p->se.vruntime;
- gran = sysctl_sched_wakeup_granularity;
- /*
- * Batch tasks prefer throughput over latency:
- */
- if (unlikely(p->policy == SCHED_BATCH))
- gran = sysctl_sched_batch_wakeup_granularity;
-
- if (is_same_group(curr, p))
- __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
+ if (delta > (s64)sysctl_sched_wakeup_granularity)
+ resched_task(curr);
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
}
}
+#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
static void task_new_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- struct sched_entity *se = &p->se;
+ struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
sched_info_queued(p);
- update_stats_enqueue(cfs_rq, se);
- /*
- * Child runs first: we let it run before the parent
- * until it reschedules once. We set up the key so that
- * it will preempt the parent:
- */
- p->se.fair_key = current->se.fair_key -
- niced_granularity(&rq->curr->se, sysctl_sched_granularity) - 1;
+ update_curr(cfs_rq);
+ place_entity(cfs_rq, se, 1);
+
/*
* The first wait is dominated by the child-runs-first logic,
* so do not credit it with that waiting time yet:
*/
- if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
- p->se.wait_start_fair = 0;
+ if (sched_feat(SKIP_INITIAL))
+ se->wait_start_fair = 0;
/*
* The statistical average of wait_runtime is about
* -granularity/2, so initialize the task with that:
*/
- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
- p->se.wait_runtime = -((long)sysctl_sched_granularity / 2);
+ if (sched_feat(START_DEBIT))
+ se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2);
+
+ if (sysctl_sched_child_runs_first &&
+ curr->vruntime < se->vruntime) {
+ dequeue_entity(cfs_rq, curr, 0);
+ swap(curr->vruntime, se->vruntime);
+ enqueue_entity(cfs_rq, curr, 0);
+ }
+
+ update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
+ resched_task(rq->curr);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
- .check_preempt_curr = check_preempt_curr_fair,
+ .check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,