* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
-/*
- * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
- */
-#ifdef CONFIG_SCHED_DEBUG
-# define const_debug __read_mostly
-#else
-# define const_debug static const
-#endif
-
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 20ms, units: nanoseconds)
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
unsigned int sysctl_sched_runtime_limit __read_mostly;
-/*
- * Debugging: various feature bits
- */
-enum {
- SCHED_FEAT_FAIR_SLEEPERS = 1,
- SCHED_FEAT_SLEEPER_AVG = 2,
- SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
- SCHED_FEAT_START_DEBIT = 8,
- SCHED_FEAT_SKIP_INITIAL = 16,
-};
-
-const_debug unsigned int sysctl_sched_features =
- SCHED_FEAT_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *0 |
- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
- SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_SKIP_INITIAL *0;
-
-#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
-
extern struct sched_class fair_sched_class;
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
+static inline void
+set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
+{
+ struct sched_entity *se;
+
+ cfs_rq->rb_leftmost = leftmost;
+ if (leftmost) {
+ se = rb_entry(leftmost, struct sched_entity, run_node);
+ cfs_rq->min_vruntime = max(se->vruntime,
+ cfs_rq->min_vruntime);
+ }
+}
+
/*
* Enqueue an entity into the rb-tree:
*/
* used):
*/
if (leftmost)
- cfs_rq->rb_leftmost = &se->run_node;
+ set_leftmost(cfs_rq, &se->run_node);
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->rb_leftmost == &se->run_node)
- cfs_rq->rb_leftmost = rb_next(&se->run_node);
+ set_leftmost(cfs_rq, rb_next(&se->run_node));
+
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
update_load_sub(&cfs_rq->load, se->load.weight);
cfs_rq->nr_running--;
return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}
+static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+{
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct sched_entity *se = NULL;
+ struct rb_node *parent;
+
+ while (*link) {
+ parent = *link;
+ se = rb_entry(parent, struct sched_entity, run_node);
+ link = &parent->rb_right;
+ }
+
+ return se;
+}
+
/**************************************************************
* Scheduling class statistics methods:
*/
-/*
- * Calculate the preemption granularity needed to schedule every
- * runnable task once per sysctl_sched_latency amount of time.
- * (down to a sensible low limit on granularity)
- *
- * For example, if there are 2 tasks running and latency is 10 msecs,
- * we switch tasks every 5 msecs. If we have 3 tasks running, we have
- * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
- * for each task. We do finer and finer scheduling up to until we
- * reach the minimum granularity value.
- *
- * To achieve this we use the following dynamic-granularity rule:
- *
- * gran = lat/nr - lat/nr/nr
- *
- * This comes out of the following equations:
- *
- * kA1 + gran = kB1
- * kB2 + gran = kA2
- * kA2 = kA1
- * kB2 = kB1 - d + d/nr
- * lat = d * nr
- *
- * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
- * '1' is start of time, '2' is end of time, 'd' is delay between
- * 1 and 2 (during which task B was running), 'nr' is number of tasks
- * running, 'lat' is the the period of each task. ('lat' is the
- * sched_latency that we aim for.)
- */
-static long
-sched_granularity(struct cfs_rq *cfs_rq)
+static u64 __sched_period(unsigned long nr_running)
{
- unsigned int gran = sysctl_sched_latency;
- unsigned int nr = cfs_rq->nr_running;
+ u64 period = sysctl_sched_latency;
+ unsigned long nr_latency =
+ sysctl_sched_latency / sysctl_sched_min_granularity;
- if (nr > 1) {
- gran = gran/nr - gran/nr/nr;
- gran = max(gran, sysctl_sched_min_granularity);
+ if (unlikely(nr_running > nr_latency)) {
+ period *= nr_running;
+ do_div(period, nr_latency);
}
- return gran;
+ return period;
}
-/*
- * We rescale the rescheduling granularity of tasks according to their
- * nice level, but only linearly, not exponentially:
- */
-static long
-niced_granularity(struct sched_entity *curr, unsigned long granularity)
+static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u64 tmp;
+ u64 period = __sched_period(cfs_rq->nr_running);
- if (likely(curr->load.weight == NICE_0_LOAD))
- return granularity;
- /*
- * Positive nice levels get the same granularity as nice-0:
- */
- if (likely(curr->load.weight < NICE_0_LOAD)) {
- tmp = curr->load.weight * (u64)granularity;
- return (long) (tmp >> NICE_0_SHIFT);
- }
- /*
- * Negative nice level tasks get linearly finer
- * granularity:
- */
- tmp = curr->load.inv_weight * (u64)granularity;
+ period *= se->load.weight;
+ do_div(period, cfs_rq->load.weight);
- /*
- * It will always fit into 'long':
- */
- return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
+ return period;
}
static inline void
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
unsigned long delta_exec)
{
- unsigned long delta, delta_fair, delta_mine;
+ unsigned long delta, delta_fair, delta_mine, delta_exec_weighted;
struct load_weight *lw = &cfs_rq->load;
unsigned long load = lw->weight;
curr->sum_exec_runtime += delta_exec;
cfs_rq->exec_clock += delta_exec;
+ delta_exec_weighted = delta_exec;
+ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
+ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
+ &curr->load);
+ }
+ curr->vruntime += delta_exec_weighted;
+
+ if (!sched_feat(FAIR_SLEEPERS))
+ return;
if (unlikely(!load))
return;
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
-/*
- * We calculate fair deltas here, so protect against the random effects
- * of a multiplication overflow by capping it to the runtime limit:
- */
-#if BITS_PER_LONG == 32
static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
+calc_weighted(unsigned long delta, struct sched_entity *se)
{
- u64 tmp = (u64)delta * weight >> shift;
+ unsigned long weight = se->load.weight;
- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
- return sysctl_sched_runtime_limit*2;
- return tmp;
-}
-#else
-static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
-{
- return delta * weight >> shift;
+ if (unlikely(weight != NICE_0_LOAD))
+ return (u64)delta * se->load.weight >> NICE_0_SHIFT;
+ else
+ return delta;
}
-#endif
/*
* Task is being enqueued - update stats:
*/
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 key;
-
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
/*
* Update the key:
*/
- key = cfs_rq->fair_clock;
-
- /*
- * Optimize the common nice 0 case:
- */
- if (likely(se->load.weight == NICE_0_LOAD)) {
- key -= se->wait_runtime;
- } else {
- u64 tmp;
-
- if (se->wait_runtime < 0) {
- tmp = -se->wait_runtime;
- key += (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- } else {
- tmp = se->wait_runtime;
- key -= (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- }
- }
-
- se->fair_key = key;
+ se->fair_key = se->vruntime;
}
/*
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
add_wait_runtime(cfs_rq, se, delta_fair);
}
delta_fair = div64_likely32((u64)delta_fair * load,
load + se->load.weight);
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
+ delta_fair = calc_weighted(delta_fair, se);
prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
#endif
}
+static void
+place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+{
+ struct sched_entity *last = __pick_last_entity(cfs_rq);
+ u64 min_runtime, latency;
+
+ min_runtime = cfs_rq->min_vruntime;
+ if (last) {
+ min_runtime += last->vruntime;
+ min_runtime >>= 1;
+ if (initial && sched_feat(START_DEBIT))
+ min_runtime += sysctl_sched_latency/2;
+ }
+
+ if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
+ latency = sysctl_sched_latency;
+ if (min_runtime > latency)
+ min_runtime -= latency;
+ else
+ min_runtime = 0;
+ }
+
+ se->vruntime = max(se->vruntime, min_runtime);
+}
+
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
{
*/
update_curr(cfs_rq);
- if (wakeup)
+ if (wakeup) {
+ place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
+ }
update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
* Preempt the current task with a newly woken task if needed:
*/
static void
-__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
- struct sched_entity *curr, unsigned long granularity)
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- s64 __delta = curr->fair_key - se->fair_key;
unsigned long ideal_runtime, delta_exec;
- /*
- * ideal_runtime is compared against sum_exec_runtime, which is
- * walltime, hence do not scale.
- */
- ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
- (unsigned long)sysctl_sched_min_granularity);
-
- /*
- * If we executed more than what the latency constraint suggests,
- * reduce the rescheduling granularity. This way the total latency
- * of how much a task is not scheduled converges to
- * sysctl_sched_latency:
- */
+ ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime)
- granularity = 0;
-
- /*
- * Take scheduling granularity into account - do not
- * preempt the current task unless the best task has
- * a larger than sched_granularity fairness advantage:
- *
- * scale granularity as key space is in fair_clock.
- */
- if (__delta > niced_granularity(curr, granularity))
resched_task(rq_of(cfs_rq)->curr);
}
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- struct sched_entity *next;
-
/*
* Dequeue and enqueue the task to update its
* position within the tree:
dequeue_entity(cfs_rq, curr, 0);
enqueue_entity(cfs_rq, curr, 0);
- /*
- * Reschedule if another task tops the current one.
- */
- next = __pick_next_entity(cfs_rq);
- if (next == curr)
- return;
-
- __check_preempt_curr_fair(cfs_rq, next, curr,
- sched_granularity(cfs_rq));
+ if (cfs_rq->nr_running > 1)
+ check_preempt_tick(cfs_rq, curr);
}
/**************************************************
/*
* Preempt the current task with a newly woken task if needed:
*/
-static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
+static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- unsigned long gran;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
resched_task(curr);
return;
}
+ if (is_same_group(curr, p)) {
+ s64 delta = curr->se.vruntime - p->se.vruntime;
- gran = sysctl_sched_wakeup_granularity;
- /*
- * Batch tasks prefer throughput over latency:
- */
- if (unlikely(p->policy == SCHED_BATCH))
- gran = sysctl_sched_batch_wakeup_granularity;
-
- if (is_same_group(curr, p))
- __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
+ if (delta > (s64)sysctl_sched_wakeup_granularity)
+ resched_task(curr);
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
}
}
+#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
sched_info_queued(p);
update_curr(cfs_rq);
- update_stats_enqueue(cfs_rq, se);
- /*
- * Child runs first: we let it run before the parent
- * until it reschedules once. We set up the key so that
- * it will preempt the parent:
- */
- se->fair_key = curr->fair_key -
- niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
+ place_entity(cfs_rq, se, 1);
+
/*
* The first wait is dominated by the child-runs-first logic,
* so do not credit it with that waiting time yet:
* -granularity/2, so initialize the task with that:
*/
if (sched_feat(START_DEBIT))
- se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
+ se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2);
+
+ if (sysctl_sched_child_runs_first &&
+ curr->vruntime < se->vruntime) {
+ dequeue_entity(cfs_rq, curr, 0);
+ swap(curr->vruntime, se->vruntime);
+ enqueue_entity(cfs_rq, curr, 0);
+ }
+
+ update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
resched_task(rq->curr);
}
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
- .check_preempt_curr = check_preempt_curr_fair,
+ .check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,