]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - kernel/sched_fair.c
sched: Fix volanomark performance regression
[mirror_ubuntu-kernels.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
9745512c 25
bf0f6f24 26/*
21805085 27 * Targeted preemption latency for CPU-bound tasks:
864616ee 28 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 37 */
21406928
MG
38unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 40
1983a922
CE
41/*
42 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44 *
45 * Options are:
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49 */
50enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
52
2bd8e6d4 53/*
b2be5e96 54 * Minimal preemption granularity for CPU-bound tasks:
864616ee 55 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 56 */
0bf377bb
IM
57unsigned int sysctl_sched_min_granularity = 750000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
59
60/*
b2be5e96
PZ
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */
0bf377bb 63static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
64
65/*
2bba22c5 66 * After fork, child runs first. If set to 0 (default) then
b2be5e96 67 * parent will (try to) run first.
21805085 68 */
2bba22c5 69unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 70
1799e35d
IM
71/*
72 * sys_sched_yield() compat mode
73 *
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
76 */
77unsigned int __read_mostly sysctl_sched_compat_yield;
78
bf0f6f24
IM
79/*
80 * SCHED_OTHER wake-up granularity.
172e082a 81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
172e082a 87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 89
da84d961
IM
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
a4c2f00f
PZ
92static const struct sched_class fair_sched_class;
93
bf0f6f24
IM
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
62160e3f 98#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 99
62160e3f 100/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
62160e3f 103 return cfs_rq->rq;
bf0f6f24
IM
104}
105
62160e3f
IM
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
bf0f6f24 108
8f48894f
PZ
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
113#endif
114 return container_of(se, struct task_struct, se);
115}
116
b758149c
PZ
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123 return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129 return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135 return grp->my_q;
136}
137
138/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
140 */
141static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142{
143 return cfs_rq->tg->cfs_rq[this_cpu];
144}
145
146/* Iterate thr' all leaf cfs_rq's on a runqueue */
147#define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
149
150/* Do the two (enqueued) entities belong to the same group ? */
151static inline int
152is_same_group(struct sched_entity *se, struct sched_entity *pse)
153{
154 if (se->cfs_rq == pse->cfs_rq)
155 return 1;
156
157 return 0;
158}
159
160static inline struct sched_entity *parent_entity(struct sched_entity *se)
161{
162 return se->parent;
163}
164
464b7527
PZ
165/* return depth at which a sched entity is present in the hierarchy */
166static inline int depth_se(struct sched_entity *se)
167{
168 int depth = 0;
169
170 for_each_sched_entity(se)
171 depth++;
172
173 return depth;
174}
175
176static void
177find_matching_se(struct sched_entity **se, struct sched_entity **pse)
178{
179 int se_depth, pse_depth;
180
181 /*
182 * preemption test can be made between sibling entities who are in the
183 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184 * both tasks until we find their ancestors who are siblings of common
185 * parent.
186 */
187
188 /* First walk up until both entities are at same depth */
189 se_depth = depth_se(*se);
190 pse_depth = depth_se(*pse);
191
192 while (se_depth > pse_depth) {
193 se_depth--;
194 *se = parent_entity(*se);
195 }
196
197 while (pse_depth > se_depth) {
198 pse_depth--;
199 *pse = parent_entity(*pse);
200 }
201
202 while (!is_same_group(*se, *pse)) {
203 *se = parent_entity(*se);
204 *pse = parent_entity(*pse);
205 }
206}
207
8f48894f
PZ
208#else /* !CONFIG_FAIR_GROUP_SCHED */
209
210static inline struct task_struct *task_of(struct sched_entity *se)
211{
212 return container_of(se, struct task_struct, se);
213}
bf0f6f24 214
62160e3f
IM
215static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
216{
217 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
218}
219
220#define entity_is_task(se) 1
221
b758149c
PZ
222#define for_each_sched_entity(se) \
223 for (; se; se = NULL)
bf0f6f24 224
b758149c 225static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 226{
b758149c 227 return &task_rq(p)->cfs;
bf0f6f24
IM
228}
229
b758149c
PZ
230static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
231{
232 struct task_struct *p = task_of(se);
233 struct rq *rq = task_rq(p);
234
235 return &rq->cfs;
236}
237
238/* runqueue "owned" by this group */
239static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
240{
241 return NULL;
242}
243
244static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
245{
246 return &cpu_rq(this_cpu)->cfs;
247}
248
249#define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251
252static inline int
253is_same_group(struct sched_entity *se, struct sched_entity *pse)
254{
255 return 1;
256}
257
258static inline struct sched_entity *parent_entity(struct sched_entity *se)
259{
260 return NULL;
261}
262
464b7527
PZ
263static inline void
264find_matching_se(struct sched_entity **se, struct sched_entity **pse)
265{
266}
267
b758149c
PZ
268#endif /* CONFIG_FAIR_GROUP_SCHED */
269
bf0f6f24
IM
270
271/**************************************************************
272 * Scheduling class tree data structure manipulation methods:
273 */
274
0702e3eb 275static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 276{
368059a9
PZ
277 s64 delta = (s64)(vruntime - min_vruntime);
278 if (delta > 0)
02e0431a
PZ
279 min_vruntime = vruntime;
280
281 return min_vruntime;
282}
283
0702e3eb 284static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
285{
286 s64 delta = (s64)(vruntime - min_vruntime);
287 if (delta < 0)
288 min_vruntime = vruntime;
289
290 return min_vruntime;
291}
292
54fdc581
FC
293static inline int entity_before(struct sched_entity *a,
294 struct sched_entity *b)
295{
296 return (s64)(a->vruntime - b->vruntime) < 0;
297}
298
0702e3eb 299static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 300{
30cfdcfc 301 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
302}
303
1af5f730
PZ
304static void update_min_vruntime(struct cfs_rq *cfs_rq)
305{
306 u64 vruntime = cfs_rq->min_vruntime;
307
308 if (cfs_rq->curr)
309 vruntime = cfs_rq->curr->vruntime;
310
311 if (cfs_rq->rb_leftmost) {
312 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
313 struct sched_entity,
314 run_node);
315
e17036da 316 if (!cfs_rq->curr)
1af5f730
PZ
317 vruntime = se->vruntime;
318 else
319 vruntime = min_vruntime(vruntime, se->vruntime);
320 }
321
322 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
323}
324
bf0f6f24
IM
325/*
326 * Enqueue an entity into the rb-tree:
327 */
0702e3eb 328static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
329{
330 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331 struct rb_node *parent = NULL;
332 struct sched_entity *entry;
9014623c 333 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
334 int leftmost = 1;
335
336 /*
337 * Find the right place in the rbtree:
338 */
339 while (*link) {
340 parent = *link;
341 entry = rb_entry(parent, struct sched_entity, run_node);
342 /*
343 * We dont care about collisions. Nodes with
344 * the same key stay together.
345 */
9014623c 346 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
347 link = &parent->rb_left;
348 } else {
349 link = &parent->rb_right;
350 leftmost = 0;
351 }
352 }
353
354 /*
355 * Maintain a cache of leftmost tree entries (it is frequently
356 * used):
357 */
1af5f730 358 if (leftmost)
57cb499d 359 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
360
361 rb_link_node(&se->run_node, parent, link);
362 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
363}
364
0702e3eb 365static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 366{
3fe69747
PZ
367 if (cfs_rq->rb_leftmost == &se->run_node) {
368 struct rb_node *next_node;
3fe69747
PZ
369
370 next_node = rb_next(&se->run_node);
371 cfs_rq->rb_leftmost = next_node;
3fe69747 372 }
e9acbff6 373
bf0f6f24 374 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
375}
376
bf0f6f24
IM
377static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
378{
f4b6755f
PZ
379 struct rb_node *left = cfs_rq->rb_leftmost;
380
381 if (!left)
382 return NULL;
383
384 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
385}
386
f4b6755f 387static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 388{
7eee3e67 389 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 390
70eee74b
BS
391 if (!last)
392 return NULL;
7eee3e67
IM
393
394 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
395}
396
bf0f6f24
IM
397/**************************************************************
398 * Scheduling class statistics methods:
399 */
400
b2be5e96 401#ifdef CONFIG_SCHED_DEBUG
acb4a848 402int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 403 void __user *buffer, size_t *lenp,
b2be5e96
PZ
404 loff_t *ppos)
405{
8d65af78 406 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 407 int factor = get_update_sysctl_factor();
b2be5e96
PZ
408
409 if (ret || !write)
410 return ret;
411
412 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
413 sysctl_sched_min_granularity);
414
acb4a848
CE
415#define WRT_SYSCTL(name) \
416 (normalized_sysctl_##name = sysctl_##name / (factor))
417 WRT_SYSCTL(sched_min_granularity);
418 WRT_SYSCTL(sched_latency);
419 WRT_SYSCTL(sched_wakeup_granularity);
420 WRT_SYSCTL(sched_shares_ratelimit);
421#undef WRT_SYSCTL
422
b2be5e96
PZ
423 return 0;
424}
425#endif
647e7cac 426
a7be37ac 427/*
f9c0b095 428 * delta /= w
a7be37ac
PZ
429 */
430static inline unsigned long
431calc_delta_fair(unsigned long delta, struct sched_entity *se)
432{
f9c0b095
PZ
433 if (unlikely(se->load.weight != NICE_0_LOAD))
434 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
435
436 return delta;
437}
438
647e7cac
IM
439/*
440 * The idea is to set a period in which each task runs once.
441 *
442 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
443 * this period because otherwise the slices get too small.
444 *
445 * p = (nr <= nl) ? l : l*nr/nl
446 */
4d78e7b6
PZ
447static u64 __sched_period(unsigned long nr_running)
448{
449 u64 period = sysctl_sched_latency;
b2be5e96 450 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
451
452 if (unlikely(nr_running > nr_latency)) {
4bf0b771 453 period = sysctl_sched_min_granularity;
4d78e7b6 454 period *= nr_running;
4d78e7b6
PZ
455 }
456
457 return period;
458}
459
647e7cac
IM
460/*
461 * We calculate the wall-time slice from the period by taking a part
462 * proportional to the weight.
463 *
f9c0b095 464 * s = p*P[w/rw]
647e7cac 465 */
6d0f0ebd 466static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 467{
0a582440 468 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 469
0a582440 470 for_each_sched_entity(se) {
6272d68c 471 struct load_weight *load;
3104bf03 472 struct load_weight lw;
6272d68c
LM
473
474 cfs_rq = cfs_rq_of(se);
475 load = &cfs_rq->load;
f9c0b095 476
0a582440 477 if (unlikely(!se->on_rq)) {
3104bf03 478 lw = cfs_rq->load;
0a582440
MG
479
480 update_load_add(&lw, se->load.weight);
481 load = &lw;
482 }
483 slice = calc_delta_mine(slice, se->load.weight, load);
484 }
485 return slice;
bf0f6f24
IM
486}
487
647e7cac 488/*
ac884dec 489 * We calculate the vruntime slice of a to be inserted task
647e7cac 490 *
f9c0b095 491 * vs = s/w
647e7cac 492 */
f9c0b095 493static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 494{
f9c0b095 495 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
496}
497
bf0f6f24
IM
498/*
499 * Update the current task's runtime statistics. Skip current tasks that
500 * are not in our scheduling class.
501 */
502static inline void
8ebc91d9
IM
503__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
504 unsigned long delta_exec)
bf0f6f24 505{
bbdba7c0 506 unsigned long delta_exec_weighted;
bf0f6f24 507
41acab88
LDM
508 schedstat_set(curr->statistics.exec_max,
509 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
510
511 curr->sum_exec_runtime += delta_exec;
7a62eabc 512 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 513 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 514
e9acbff6 515 curr->vruntime += delta_exec_weighted;
1af5f730 516 update_min_vruntime(cfs_rq);
bf0f6f24
IM
517}
518
b7cc0896 519static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 520{
429d43bc 521 struct sched_entity *curr = cfs_rq->curr;
305e6835 522 u64 now = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
523 unsigned long delta_exec;
524
525 if (unlikely(!curr))
526 return;
527
528 /*
529 * Get the amount of time the current task was running
530 * since the last time we changed load (this cannot
531 * overflow on 32 bits):
532 */
8ebc91d9 533 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
534 if (!delta_exec)
535 return;
bf0f6f24 536
8ebc91d9
IM
537 __update_curr(cfs_rq, curr, delta_exec);
538 curr->exec_start = now;
d842de87
SV
539
540 if (entity_is_task(curr)) {
541 struct task_struct *curtask = task_of(curr);
542
f977bb49 543 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 544 cpuacct_charge(curtask, delta_exec);
f06febc9 545 account_group_exec_runtime(curtask, delta_exec);
d842de87 546 }
bf0f6f24
IM
547}
548
549static inline void
5870db5b 550update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 551{
41acab88 552 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
553}
554
bf0f6f24
IM
555/*
556 * Task is being enqueued - update stats:
557 */
d2417e5a 558static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 559{
bf0f6f24
IM
560 /*
561 * Are we enqueueing a waiting task? (for current tasks
562 * a dequeue/enqueue event is a NOP)
563 */
429d43bc 564 if (se != cfs_rq->curr)
5870db5b 565 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
566}
567
bf0f6f24 568static void
9ef0a961 569update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 570{
41acab88
LDM
571 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
572 rq_of(cfs_rq)->clock - se->statistics.wait_start));
573 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
574 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
575 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
576#ifdef CONFIG_SCHEDSTATS
577 if (entity_is_task(se)) {
578 trace_sched_stat_wait(task_of(se),
41acab88 579 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
580 }
581#endif
41acab88 582 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
583}
584
585static inline void
19b6a2e3 586update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 587{
bf0f6f24
IM
588 /*
589 * Mark the end of the wait period if dequeueing a
590 * waiting task:
591 */
429d43bc 592 if (se != cfs_rq->curr)
9ef0a961 593 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
594}
595
596/*
597 * We are picking a new current task - update its stats:
598 */
599static inline void
79303e9e 600update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
601{
602 /*
603 * We are starting a new run period:
604 */
305e6835 605 se->exec_start = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
606}
607
bf0f6f24
IM
608/**************************************************
609 * Scheduling class queueing methods:
610 */
611
c09595f6
PZ
612#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
613static void
614add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
615{
616 cfs_rq->task_weight += weight;
617}
618#else
619static inline void
620add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
621{
622}
623#endif
624
30cfdcfc
DA
625static void
626account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
627{
628 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6
PZ
629 if (!parent_entity(se))
630 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 631 if (entity_is_task(se)) {
c09595f6 632 add_cfs_task_weight(cfs_rq, se->load.weight);
b87f1724
BR
633 list_add(&se->group_node, &cfs_rq->tasks);
634 }
30cfdcfc
DA
635 cfs_rq->nr_running++;
636 se->on_rq = 1;
637}
638
639static void
640account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
641{
642 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6
PZ
643 if (!parent_entity(se))
644 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 645 if (entity_is_task(se)) {
c09595f6 646 add_cfs_task_weight(cfs_rq, -se->load.weight);
b87f1724
BR
647 list_del_init(&se->group_node);
648 }
30cfdcfc
DA
649 cfs_rq->nr_running--;
650 se->on_rq = 0;
651}
652
2396af69 653static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 654{
bf0f6f24 655#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
656 struct task_struct *tsk = NULL;
657
658 if (entity_is_task(se))
659 tsk = task_of(se);
660
41acab88
LDM
661 if (se->statistics.sleep_start) {
662 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
bf0f6f24
IM
663
664 if ((s64)delta < 0)
665 delta = 0;
666
41acab88
LDM
667 if (unlikely(delta > se->statistics.sleep_max))
668 se->statistics.sleep_max = delta;
bf0f6f24 669
41acab88
LDM
670 se->statistics.sleep_start = 0;
671 se->statistics.sum_sleep_runtime += delta;
9745512c 672
768d0c27 673 if (tsk) {
e414314c 674 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
675 trace_sched_stat_sleep(tsk, delta);
676 }
bf0f6f24 677 }
41acab88
LDM
678 if (se->statistics.block_start) {
679 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
bf0f6f24
IM
680
681 if ((s64)delta < 0)
682 delta = 0;
683
41acab88
LDM
684 if (unlikely(delta > se->statistics.block_max))
685 se->statistics.block_max = delta;
bf0f6f24 686
41acab88
LDM
687 se->statistics.block_start = 0;
688 se->statistics.sum_sleep_runtime += delta;
30084fbd 689
e414314c 690 if (tsk) {
8f0dfc34 691 if (tsk->in_iowait) {
41acab88
LDM
692 se->statistics.iowait_sum += delta;
693 se->statistics.iowait_count++;
768d0c27 694 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
695 }
696
e414314c
PZ
697 /*
698 * Blocking time is in units of nanosecs, so shift by
699 * 20 to get a milliseconds-range estimation of the
700 * amount of time that the task spent sleeping:
701 */
702 if (unlikely(prof_on == SLEEP_PROFILING)) {
703 profile_hits(SLEEP_PROFILING,
704 (void *)get_wchan(tsk),
705 delta >> 20);
706 }
707 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 708 }
bf0f6f24
IM
709 }
710#endif
711}
712
ddc97297
PZ
713static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
714{
715#ifdef CONFIG_SCHED_DEBUG
716 s64 d = se->vruntime - cfs_rq->min_vruntime;
717
718 if (d < 0)
719 d = -d;
720
721 if (d > 3*sysctl_sched_latency)
722 schedstat_inc(cfs_rq, nr_spread_over);
723#endif
724}
725
aeb73b04
PZ
726static void
727place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
728{
1af5f730 729 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 730
2cb8600e
PZ
731 /*
732 * The 'current' period is already promised to the current tasks,
733 * however the extra weight of the new task will slow them down a
734 * little, place the new task so that it fits in the slot that
735 * stays open at the end.
736 */
94dfb5e7 737 if (initial && sched_feat(START_DEBIT))
f9c0b095 738 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 739
a2e7a7eb 740 /* sleeps up to a single latency don't count. */
5ca9880c 741 if (!initial) {
a2e7a7eb 742 unsigned long thresh = sysctl_sched_latency;
a7be37ac 743
a2e7a7eb
MG
744 /*
745 * Halve their sleep time's effect, to allow
746 * for a gentler effect of sleepers:
747 */
748 if (sched_feat(GENTLE_FAIR_SLEEPERS))
749 thresh >>= 1;
51e0304c 750
a2e7a7eb 751 vruntime -= thresh;
aeb73b04
PZ
752 }
753
b5d9d734
MG
754 /* ensure we never gain time by being placed backwards. */
755 vruntime = max_vruntime(se->vruntime, vruntime);
756
67e9fb2a 757 se->vruntime = vruntime;
aeb73b04
PZ
758}
759
bf0f6f24 760static void
88ec22d3 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 762{
88ec22d3
PZ
763 /*
764 * Update the normalized vruntime before updating min_vruntime
765 * through callig update_curr().
766 */
371fd7e7 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
768 se->vruntime += cfs_rq->min_vruntime;
769
bf0f6f24 770 /*
a2a2d680 771 * Update run-time statistics of the 'current'.
bf0f6f24 772 */
b7cc0896 773 update_curr(cfs_rq);
a992241d 774 account_entity_enqueue(cfs_rq, se);
bf0f6f24 775
88ec22d3 776 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 777 place_entity(cfs_rq, se, 0);
2396af69 778 enqueue_sleeper(cfs_rq, se);
e9acbff6 779 }
bf0f6f24 780
d2417e5a 781 update_stats_enqueue(cfs_rq, se);
ddc97297 782 check_spread(cfs_rq, se);
83b699ed
SV
783 if (se != cfs_rq->curr)
784 __enqueue_entity(cfs_rq, se);
bf0f6f24
IM
785}
786
a571bbea 787static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2002c695 788{
de69a80b 789 if (!se || cfs_rq->last == se)
2002c695
PZ
790 cfs_rq->last = NULL;
791
de69a80b 792 if (!se || cfs_rq->next == se)
2002c695
PZ
793 cfs_rq->next = NULL;
794}
795
a571bbea
PZ
796static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
797{
798 for_each_sched_entity(se)
799 __clear_buddies(cfs_rq_of(se), se);
800}
801
bf0f6f24 802static void
371fd7e7 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 804{
a2a2d680
DA
805 /*
806 * Update run-time statistics of the 'current'.
807 */
808 update_curr(cfs_rq);
809
19b6a2e3 810 update_stats_dequeue(cfs_rq, se);
371fd7e7 811 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 812#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
813 if (entity_is_task(se)) {
814 struct task_struct *tsk = task_of(se);
815
816 if (tsk->state & TASK_INTERRUPTIBLE)
41acab88 817 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 818 if (tsk->state & TASK_UNINTERRUPTIBLE)
41acab88 819 se->statistics.block_start = rq_of(cfs_rq)->clock;
bf0f6f24 820 }
db36cc7d 821#endif
67e9fb2a
PZ
822 }
823
2002c695 824 clear_buddies(cfs_rq, se);
4793241b 825
83b699ed 826 if (se != cfs_rq->curr)
30cfdcfc
DA
827 __dequeue_entity(cfs_rq, se);
828 account_entity_dequeue(cfs_rq, se);
1af5f730 829 update_min_vruntime(cfs_rq);
88ec22d3
PZ
830
831 /*
832 * Normalize the entity after updating the min_vruntime because the
833 * update can refer to the ->curr item and we need to reflect this
834 * movement in our normalized position.
835 */
371fd7e7 836 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 837 se->vruntime -= cfs_rq->min_vruntime;
bf0f6f24
IM
838}
839
840/*
841 * Preempt the current task with a newly woken task if needed:
842 */
7c92e54f 843static void
2e09bf55 844check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 845{
11697830
PZ
846 unsigned long ideal_runtime, delta_exec;
847
6d0f0ebd 848 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 849 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 850 if (delta_exec > ideal_runtime) {
bf0f6f24 851 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
852 /*
853 * The current task ran long enough, ensure it doesn't get
854 * re-elected due to buddy favours.
855 */
856 clear_buddies(cfs_rq, curr);
f685ceac
MG
857 return;
858 }
859
860 /*
861 * Ensure that a task that missed wakeup preemption by a
862 * narrow margin doesn't have to wait for a full slice.
863 * This also mitigates buddy induced latencies under load.
864 */
865 if (!sched_feat(WAKEUP_PREEMPT))
866 return;
867
868 if (delta_exec < sysctl_sched_min_granularity)
869 return;
870
871 if (cfs_rq->nr_running > 1) {
872 struct sched_entity *se = __pick_next_entity(cfs_rq);
873 s64 delta = curr->vruntime - se->vruntime;
874
875 if (delta > ideal_runtime)
876 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5 877 }
bf0f6f24
IM
878}
879
83b699ed 880static void
8494f412 881set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 882{
83b699ed
SV
883 /* 'current' is not kept within the tree. */
884 if (se->on_rq) {
885 /*
886 * Any task has to be enqueued before it get to execute on
887 * a CPU. So account for the time it spent waiting on the
888 * runqueue.
889 */
890 update_stats_wait_end(cfs_rq, se);
891 __dequeue_entity(cfs_rq, se);
892 }
893
79303e9e 894 update_stats_curr_start(cfs_rq, se);
429d43bc 895 cfs_rq->curr = se;
eba1ed4b
IM
896#ifdef CONFIG_SCHEDSTATS
897 /*
898 * Track our maximum slice length, if the CPU's load is at
899 * least twice that of our own weight (i.e. dont track it
900 * when there are only lesser-weight tasks around):
901 */
495eca49 902 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 903 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
904 se->sum_exec_runtime - se->prev_sum_exec_runtime);
905 }
906#endif
4a55b450 907 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
908}
909
3f3a4904
PZ
910static int
911wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
912
f4b6755f 913static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 914{
f4b6755f 915 struct sched_entity *se = __pick_next_entity(cfs_rq);
f685ceac 916 struct sched_entity *left = se;
f4b6755f 917
f685ceac
MG
918 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
919 se = cfs_rq->next;
aa2ac252 920
f685ceac
MG
921 /*
922 * Prefer last buddy, try to return the CPU to a preempted task.
923 */
924 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
925 se = cfs_rq->last;
926
927 clear_buddies(cfs_rq, se);
4793241b
PZ
928
929 return se;
aa2ac252
PZ
930}
931
ab6cde26 932static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
933{
934 /*
935 * If still on the runqueue then deactivate_task()
936 * was not called and update_curr() has to be done:
937 */
938 if (prev->on_rq)
b7cc0896 939 update_curr(cfs_rq);
bf0f6f24 940
ddc97297 941 check_spread(cfs_rq, prev);
30cfdcfc 942 if (prev->on_rq) {
5870db5b 943 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
944 /* Put 'current' back into the tree. */
945 __enqueue_entity(cfs_rq, prev);
946 }
429d43bc 947 cfs_rq->curr = NULL;
bf0f6f24
IM
948}
949
8f4d37ec
PZ
950static void
951entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 952{
bf0f6f24 953 /*
30cfdcfc 954 * Update run-time statistics of the 'current'.
bf0f6f24 955 */
30cfdcfc 956 update_curr(cfs_rq);
bf0f6f24 957
8f4d37ec
PZ
958#ifdef CONFIG_SCHED_HRTICK
959 /*
960 * queued ticks are scheduled to match the slice, so don't bother
961 * validating it and just reschedule.
962 */
983ed7a6
HH
963 if (queued) {
964 resched_task(rq_of(cfs_rq)->curr);
965 return;
966 }
8f4d37ec
PZ
967 /*
968 * don't let the period tick interfere with the hrtick preemption
969 */
970 if (!sched_feat(DOUBLE_TICK) &&
971 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
972 return;
973#endif
974
ce6c1311 975 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 976 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
977}
978
979/**************************************************
980 * CFS operations on tasks:
981 */
982
8f4d37ec
PZ
983#ifdef CONFIG_SCHED_HRTICK
984static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
985{
8f4d37ec
PZ
986 struct sched_entity *se = &p->se;
987 struct cfs_rq *cfs_rq = cfs_rq_of(se);
988
989 WARN_ON(task_rq(p) != rq);
990
991 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
992 u64 slice = sched_slice(cfs_rq, se);
993 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
994 s64 delta = slice - ran;
995
996 if (delta < 0) {
997 if (rq->curr == p)
998 resched_task(p);
999 return;
1000 }
1001
1002 /*
1003 * Don't schedule slices shorter than 10000ns, that just
1004 * doesn't make sense. Rely on vruntime for fairness.
1005 */
31656519 1006 if (rq->curr != p)
157124c1 1007 delta = max_t(s64, 10000LL, delta);
8f4d37ec 1008
31656519 1009 hrtick_start(rq, delta);
8f4d37ec
PZ
1010 }
1011}
a4c2f00f
PZ
1012
1013/*
1014 * called from enqueue/dequeue and updates the hrtick when the
1015 * current task is from our class and nr_running is low enough
1016 * to matter.
1017 */
1018static void hrtick_update(struct rq *rq)
1019{
1020 struct task_struct *curr = rq->curr;
1021
1022 if (curr->sched_class != &fair_sched_class)
1023 return;
1024
1025 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1026 hrtick_start_fair(rq, curr);
1027}
55e12e5e 1028#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1029static inline void
1030hrtick_start_fair(struct rq *rq, struct task_struct *p)
1031{
1032}
a4c2f00f
PZ
1033
1034static inline void hrtick_update(struct rq *rq)
1035{
1036}
8f4d37ec
PZ
1037#endif
1038
bf0f6f24
IM
1039/*
1040 * The enqueue_task method is called before nr_running is
1041 * increased. Here we update the fair scheduling stats and
1042 * then put the task into the rbtree:
1043 */
ea87bb78 1044static void
371fd7e7 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1046{
1047 struct cfs_rq *cfs_rq;
62fb1851 1048 struct sched_entity *se = &p->se;
bf0f6f24
IM
1049
1050 for_each_sched_entity(se) {
62fb1851 1051 if (se->on_rq)
bf0f6f24
IM
1052 break;
1053 cfs_rq = cfs_rq_of(se);
88ec22d3
PZ
1054 enqueue_entity(cfs_rq, se, flags);
1055 flags = ENQUEUE_WAKEUP;
bf0f6f24 1056 }
8f4d37ec 1057
a4c2f00f 1058 hrtick_update(rq);
bf0f6f24
IM
1059}
1060
1061/*
1062 * The dequeue_task method is called before nr_running is
1063 * decreased. We remove the task from the rbtree and
1064 * update the fair scheduling stats:
1065 */
371fd7e7 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1067{
1068 struct cfs_rq *cfs_rq;
62fb1851 1069 struct sched_entity *se = &p->se;
bf0f6f24
IM
1070
1071 for_each_sched_entity(se) {
1072 cfs_rq = cfs_rq_of(se);
371fd7e7 1073 dequeue_entity(cfs_rq, se, flags);
bf0f6f24 1074 /* Don't dequeue parent if it has other entities besides us */
62fb1851 1075 if (cfs_rq->load.weight)
bf0f6f24 1076 break;
371fd7e7 1077 flags |= DEQUEUE_SLEEP;
bf0f6f24 1078 }
8f4d37ec 1079
a4c2f00f 1080 hrtick_update(rq);
bf0f6f24
IM
1081}
1082
1083/*
1799e35d
IM
1084 * sched_yield() support is very simple - we dequeue and enqueue.
1085 *
1086 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 1087 */
4530d7ab 1088static void yield_task_fair(struct rq *rq)
bf0f6f24 1089{
db292ca3
IM
1090 struct task_struct *curr = rq->curr;
1091 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1092 struct sched_entity *rightmost, *se = &curr->se;
bf0f6f24
IM
1093
1094 /*
1799e35d
IM
1095 * Are we the only task in the tree?
1096 */
1097 if (unlikely(cfs_rq->nr_running == 1))
1098 return;
1099
2002c695
PZ
1100 clear_buddies(cfs_rq, se);
1101
db292ca3 1102 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
3e51f33f 1103 update_rq_clock(rq);
1799e35d 1104 /*
a2a2d680 1105 * Update run-time statistics of the 'current'.
1799e35d 1106 */
2b1e315d 1107 update_curr(cfs_rq);
1799e35d
IM
1108
1109 return;
1110 }
1111 /*
1112 * Find the rightmost entry in the rbtree:
bf0f6f24 1113 */
2b1e315d 1114 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
1115 /*
1116 * Already in the rightmost position?
1117 */
54fdc581 1118 if (unlikely(!rightmost || entity_before(rightmost, se)))
1799e35d
IM
1119 return;
1120
1121 /*
1122 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
1123 * Upon rescheduling, sched_class::put_prev_task() will place
1124 * 'current' within the tree based on its new key value.
1799e35d 1125 */
30cfdcfc 1126 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
1127}
1128
e7693a36 1129#ifdef CONFIG_SMP
098fb9db 1130
88ec22d3
PZ
1131static void task_waking_fair(struct rq *rq, struct task_struct *p)
1132{
1133 struct sched_entity *se = &p->se;
1134 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1135
1136 se->vruntime -= cfs_rq->min_vruntime;
1137}
1138
bb3469ac 1139#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
1140/*
1141 * effective_load() calculates the load change as seen from the root_task_group
1142 *
1143 * Adding load to a group doesn't make a group heavier, but can cause movement
1144 * of group shares between cpus. Assuming the shares were perfectly aligned one
1145 * can calculate the shift in shares.
1146 *
1147 * The problem is that perfectly aligning the shares is rather expensive, hence
1148 * we try to avoid doing that too often - see update_shares(), which ratelimits
1149 * this change.
1150 *
1151 * We compensate this by not only taking the current delta into account, but
1152 * also considering the delta between when the shares were last adjusted and
1153 * now.
1154 *
1155 * We still saw a performance dip, some tracing learned us that between
1156 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1157 * significantly. Therefore try to bias the error in direction of failing
1158 * the affine wakeup.
1159 *
1160 */
f1d239f7
PZ
1161static long effective_load(struct task_group *tg, int cpu,
1162 long wl, long wg)
bb3469ac 1163{
4be9daaa 1164 struct sched_entity *se = tg->se[cpu];
f1d239f7
PZ
1165
1166 if (!tg->parent)
1167 return wl;
1168
f5bfb7d9
PZ
1169 /*
1170 * By not taking the decrease of shares on the other cpu into
1171 * account our error leans towards reducing the affine wakeups.
1172 */
1173 if (!wl && sched_feat(ASYM_EFF_LOAD))
1174 return wl;
1175
4be9daaa 1176 for_each_sched_entity(se) {
cb5ef42a 1177 long S, rw, s, a, b;
940959e9
PZ
1178 long more_w;
1179
1180 /*
1181 * Instead of using this increment, also add the difference
1182 * between when the shares were last updated and now.
1183 */
1184 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1185 wl += more_w;
1186 wg += more_w;
4be9daaa
PZ
1187
1188 S = se->my_q->tg->shares;
1189 s = se->my_q->shares;
f1d239f7 1190 rw = se->my_q->rq_weight;
bb3469ac 1191
cb5ef42a
PZ
1192 a = S*(rw + wl);
1193 b = S*rw + s*wg;
4be9daaa 1194
940959e9
PZ
1195 wl = s*(a-b);
1196
1197 if (likely(b))
1198 wl /= b;
1199
83378269
PZ
1200 /*
1201 * Assume the group is already running and will
1202 * thus already be accounted for in the weight.
1203 *
1204 * That is, moving shares between CPUs, does not
1205 * alter the group weight.
1206 */
4be9daaa 1207 wg = 0;
4be9daaa 1208 }
bb3469ac 1209
4be9daaa 1210 return wl;
bb3469ac 1211}
4be9daaa 1212
bb3469ac 1213#else
4be9daaa 1214
83378269
PZ
1215static inline unsigned long effective_load(struct task_group *tg, int cpu,
1216 unsigned long wl, unsigned long wg)
4be9daaa 1217{
83378269 1218 return wl;
bb3469ac 1219}
4be9daaa 1220
bb3469ac
PZ
1221#endif
1222
c88d5910 1223static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 1224{
c88d5910
PZ
1225 unsigned long this_load, load;
1226 int idx, this_cpu, prev_cpu;
098fb9db 1227 unsigned long tl_per_task;
c88d5910 1228 struct task_group *tg;
83378269 1229 unsigned long weight;
b3137bc8 1230 int balanced;
098fb9db 1231
c88d5910
PZ
1232 idx = sd->wake_idx;
1233 this_cpu = smp_processor_id();
1234 prev_cpu = task_cpu(p);
1235 load = source_load(prev_cpu, idx);
1236 this_load = target_load(this_cpu, idx);
098fb9db 1237
b3137bc8
MG
1238 /*
1239 * If sync wakeup then subtract the (maximum possible)
1240 * effect of the currently running task from the load
1241 * of the current CPU:
1242 */
f3b577de 1243 rcu_read_lock();
83378269
PZ
1244 if (sync) {
1245 tg = task_group(current);
1246 weight = current->se.load.weight;
1247
c88d5910 1248 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
1249 load += effective_load(tg, prev_cpu, 0, -weight);
1250 }
b3137bc8 1251
83378269
PZ
1252 tg = task_group(p);
1253 weight = p->se.load.weight;
b3137bc8 1254
71a29aa7
PZ
1255 /*
1256 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
1257 * due to the sync cause above having dropped this_load to 0, we'll
1258 * always have an imbalance, but there's really nothing you can do
1259 * about that, so that's good too.
71a29aa7
PZ
1260 *
1261 * Otherwise check if either cpus are near enough in load to allow this
1262 * task to be woken on this_cpu.
1263 */
e51fd5e2
PZ
1264 if (this_load) {
1265 unsigned long this_eff_load, prev_eff_load;
1266
1267 this_eff_load = 100;
1268 this_eff_load *= power_of(prev_cpu);
1269 this_eff_load *= this_load +
1270 effective_load(tg, this_cpu, weight, weight);
1271
1272 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1273 prev_eff_load *= power_of(this_cpu);
1274 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1275
1276 balanced = this_eff_load <= prev_eff_load;
1277 } else
1278 balanced = true;
f3b577de 1279 rcu_read_unlock();
b3137bc8 1280
098fb9db 1281 /*
4ae7d5ce
IM
1282 * If the currently running task will sleep within
1283 * a reasonable amount of time then attract this newly
1284 * woken task:
098fb9db 1285 */
2fb7635c
PZ
1286 if (sync && balanced)
1287 return 1;
098fb9db 1288
41acab88 1289 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
1290 tl_per_task = cpu_avg_load_per_task(this_cpu);
1291
c88d5910
PZ
1292 if (balanced ||
1293 (this_load <= load &&
1294 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
1295 /*
1296 * This domain has SD_WAKE_AFFINE and
1297 * p is cache cold in this domain, and
1298 * there is no bad imbalance.
1299 */
c88d5910 1300 schedstat_inc(sd, ttwu_move_affine);
41acab88 1301 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
1302
1303 return 1;
1304 }
1305 return 0;
1306}
1307
aaee1203
PZ
1308/*
1309 * find_idlest_group finds and returns the least busy CPU group within the
1310 * domain.
1311 */
1312static struct sched_group *
78e7ed53 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 1314 int this_cpu, int load_idx)
e7693a36 1315{
b3bd3de6 1316 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 1319
aaee1203
PZ
1320 do {
1321 unsigned long load, avg_load;
1322 int local_group;
1323 int i;
e7693a36 1324
aaee1203
PZ
1325 /* Skip over this group if it has no CPUs allowed */
1326 if (!cpumask_intersects(sched_group_cpus(group),
1327 &p->cpus_allowed))
1328 continue;
1329
1330 local_group = cpumask_test_cpu(this_cpu,
1331 sched_group_cpus(group));
1332
1333 /* Tally up the load of all CPUs in the group */
1334 avg_load = 0;
1335
1336 for_each_cpu(i, sched_group_cpus(group)) {
1337 /* Bias balancing toward cpus of our domain */
1338 if (local_group)
1339 load = source_load(i, load_idx);
1340 else
1341 load = target_load(i, load_idx);
1342
1343 avg_load += load;
1344 }
1345
1346 /* Adjust by relative CPU power of the group */
1347 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1348
1349 if (local_group) {
1350 this_load = avg_load;
aaee1203
PZ
1351 } else if (avg_load < min_load) {
1352 min_load = avg_load;
1353 idlest = group;
1354 }
1355 } while (group = group->next, group != sd->groups);
1356
1357 if (!idlest || 100*this_load < imbalance*min_load)
1358 return NULL;
1359 return idlest;
1360}
1361
1362/*
1363 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1364 */
1365static int
1366find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1367{
1368 unsigned long load, min_load = ULONG_MAX;
1369 int idlest = -1;
1370 int i;
1371
1372 /* Traverse only the allowed CPUs */
1373 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1374 load = weighted_cpuload(i);
1375
1376 if (load < min_load || (load == min_load && i == this_cpu)) {
1377 min_load = load;
1378 idlest = i;
e7693a36
GH
1379 }
1380 }
1381
aaee1203
PZ
1382 return idlest;
1383}
e7693a36 1384
a50bde51
PZ
1385/*
1386 * Try and locate an idle CPU in the sched_domain.
1387 */
99bd5e2f 1388static int select_idle_sibling(struct task_struct *p, int target)
a50bde51
PZ
1389{
1390 int cpu = smp_processor_id();
1391 int prev_cpu = task_cpu(p);
99bd5e2f 1392 struct sched_domain *sd;
a50bde51
PZ
1393 int i;
1394
1395 /*
99bd5e2f
SS
1396 * If the task is going to be woken-up on this cpu and if it is
1397 * already idle, then it is the right target.
a50bde51 1398 */
99bd5e2f
SS
1399 if (target == cpu && idle_cpu(cpu))
1400 return cpu;
1401
1402 /*
1403 * If the task is going to be woken-up on the cpu where it previously
1404 * ran and if it is currently idle, then it the right target.
1405 */
1406 if (target == prev_cpu && idle_cpu(prev_cpu))
fe3bcfe1 1407 return prev_cpu;
a50bde51
PZ
1408
1409 /*
99bd5e2f 1410 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 1411 */
99bd5e2f
SS
1412 for_each_domain(target, sd) {
1413 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
fe3bcfe1 1414 break;
99bd5e2f
SS
1415
1416 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1417 if (idle_cpu(i)) {
1418 target = i;
1419 break;
1420 }
a50bde51 1421 }
99bd5e2f
SS
1422
1423 /*
1424 * Lets stop looking for an idle sibling when we reached
1425 * the domain that spans the current cpu and prev_cpu.
1426 */
1427 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1428 cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1429 break;
a50bde51
PZ
1430 }
1431
1432 return target;
1433}
1434
aaee1203
PZ
1435/*
1436 * sched_balance_self: balance the current task (running on cpu) in domains
1437 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1438 * SD_BALANCE_EXEC.
1439 *
1440 * Balance, ie. select the least loaded group.
1441 *
1442 * Returns the target CPU number, or the same CPU if no balancing is needed.
1443 *
1444 * preempt must be disabled.
1445 */
0017d735
PZ
1446static int
1447select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 1448{
29cd8bae 1449 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
1450 int cpu = smp_processor_id();
1451 int prev_cpu = task_cpu(p);
1452 int new_cpu = cpu;
99bd5e2f 1453 int want_affine = 0;
29cd8bae 1454 int want_sd = 1;
5158f4e4 1455 int sync = wake_flags & WF_SYNC;
c88d5910 1456
0763a660 1457 if (sd_flag & SD_BALANCE_WAKE) {
beac4c7e 1458 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
c88d5910
PZ
1459 want_affine = 1;
1460 new_cpu = prev_cpu;
1461 }
aaee1203
PZ
1462
1463 for_each_domain(cpu, tmp) {
e4f42888
PZ
1464 if (!(tmp->flags & SD_LOAD_BALANCE))
1465 continue;
1466
aaee1203 1467 /*
ae154be1
PZ
1468 * If power savings logic is enabled for a domain, see if we
1469 * are not overloaded, if so, don't balance wider.
aaee1203 1470 */
59abf026 1471 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
ae154be1
PZ
1472 unsigned long power = 0;
1473 unsigned long nr_running = 0;
1474 unsigned long capacity;
1475 int i;
1476
1477 for_each_cpu(i, sched_domain_span(tmp)) {
1478 power += power_of(i);
1479 nr_running += cpu_rq(i)->cfs.nr_running;
1480 }
1481
1482 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1483
59abf026
PZ
1484 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1485 nr_running /= 2;
1486
1487 if (nr_running < capacity)
29cd8bae 1488 want_sd = 0;
ae154be1 1489 }
aaee1203 1490
fe3bcfe1 1491 /*
99bd5e2f
SS
1492 * If both cpu and prev_cpu are part of this domain,
1493 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 1494 */
99bd5e2f
SS
1495 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1496 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1497 affine_sd = tmp;
1498 want_affine = 0;
c88d5910
PZ
1499 }
1500
29cd8bae
PZ
1501 if (!want_sd && !want_affine)
1502 break;
1503
0763a660 1504 if (!(tmp->flags & sd_flag))
c88d5910
PZ
1505 continue;
1506
29cd8bae
PZ
1507 if (want_sd)
1508 sd = tmp;
1509 }
1510
8b911acd 1511#ifdef CONFIG_FAIR_GROUP_SCHED
29cd8bae
PZ
1512 if (sched_feat(LB_SHARES_UPDATE)) {
1513 /*
1514 * Pick the largest domain to update shares over
1515 */
1516 tmp = sd;
669c55e9 1517 if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
29cd8bae
PZ
1518 tmp = affine_sd;
1519
0017d735
PZ
1520 if (tmp) {
1521 raw_spin_unlock(&rq->lock);
29cd8bae 1522 update_shares(tmp);
0017d735
PZ
1523 raw_spin_lock(&rq->lock);
1524 }
c88d5910 1525 }
8b911acd 1526#endif
aaee1203 1527
8b911acd 1528 if (affine_sd) {
99bd5e2f
SS
1529 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1530 return select_idle_sibling(p, cpu);
1531 else
1532 return select_idle_sibling(p, prev_cpu);
8b911acd 1533 }
e7693a36 1534
aaee1203 1535 while (sd) {
5158f4e4 1536 int load_idx = sd->forkexec_idx;
aaee1203 1537 struct sched_group *group;
c88d5910 1538 int weight;
098fb9db 1539
0763a660 1540 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
1541 sd = sd->child;
1542 continue;
1543 }
098fb9db 1544
5158f4e4
PZ
1545 if (sd_flag & SD_BALANCE_WAKE)
1546 load_idx = sd->wake_idx;
098fb9db 1547
5158f4e4 1548 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
1549 if (!group) {
1550 sd = sd->child;
1551 continue;
1552 }
4ae7d5ce 1553
d7c33c49 1554 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
1555 if (new_cpu == -1 || new_cpu == cpu) {
1556 /* Now try balancing at a lower domain level of cpu */
1557 sd = sd->child;
1558 continue;
e7693a36 1559 }
aaee1203
PZ
1560
1561 /* Now try balancing at a lower domain level of new_cpu */
1562 cpu = new_cpu;
669c55e9 1563 weight = sd->span_weight;
aaee1203
PZ
1564 sd = NULL;
1565 for_each_domain(cpu, tmp) {
669c55e9 1566 if (weight <= tmp->span_weight)
aaee1203 1567 break;
0763a660 1568 if (tmp->flags & sd_flag)
aaee1203
PZ
1569 sd = tmp;
1570 }
1571 /* while loop will break here if sd == NULL */
e7693a36
GH
1572 }
1573
c88d5910 1574 return new_cpu;
e7693a36
GH
1575}
1576#endif /* CONFIG_SMP */
1577
e52fb7c0
PZ
1578static unsigned long
1579wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
1580{
1581 unsigned long gran = sysctl_sched_wakeup_granularity;
1582
1583 /*
e52fb7c0
PZ
1584 * Since its curr running now, convert the gran from real-time
1585 * to virtual-time in his units.
13814d42
MG
1586 *
1587 * By using 'se' instead of 'curr' we penalize light tasks, so
1588 * they get preempted easier. That is, if 'se' < 'curr' then
1589 * the resulting gran will be larger, therefore penalizing the
1590 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1591 * be smaller, again penalizing the lighter task.
1592 *
1593 * This is especially important for buddies when the leftmost
1594 * task is higher priority than the buddy.
0bbd3336 1595 */
13814d42
MG
1596 if (unlikely(se->load.weight != NICE_0_LOAD))
1597 gran = calc_delta_fair(gran, se);
0bbd3336
PZ
1598
1599 return gran;
1600}
1601
464b7527
PZ
1602/*
1603 * Should 'se' preempt 'curr'.
1604 *
1605 * |s1
1606 * |s2
1607 * |s3
1608 * g
1609 * |<--->|c
1610 *
1611 * w(c, s1) = -1
1612 * w(c, s2) = 0
1613 * w(c, s3) = 1
1614 *
1615 */
1616static int
1617wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1618{
1619 s64 gran, vdiff = curr->vruntime - se->vruntime;
1620
1621 if (vdiff <= 0)
1622 return -1;
1623
e52fb7c0 1624 gran = wakeup_gran(curr, se);
464b7527
PZ
1625 if (vdiff > gran)
1626 return 1;
1627
1628 return 0;
1629}
1630
02479099
PZ
1631static void set_last_buddy(struct sched_entity *se)
1632{
6bc912b7
PZ
1633 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1634 for_each_sched_entity(se)
1635 cfs_rq_of(se)->last = se;
1636 }
02479099
PZ
1637}
1638
1639static void set_next_buddy(struct sched_entity *se)
1640{
6bc912b7
PZ
1641 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1642 for_each_sched_entity(se)
1643 cfs_rq_of(se)->next = se;
1644 }
02479099
PZ
1645}
1646
bf0f6f24
IM
1647/*
1648 * Preempt the current task with a newly woken task if needed:
1649 */
5a9b86f6 1650static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
1651{
1652 struct task_struct *curr = rq->curr;
8651a86c 1653 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 1654 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 1655 int scale = cfs_rq->nr_running >= sched_nr_latency;
bf0f6f24 1656
4ae7d5ce
IM
1657 if (unlikely(se == pse))
1658 return;
1659
f685ceac 1660 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
3cb63d52 1661 set_next_buddy(pse);
57fdc26d 1662
aec0a514
BR
1663 /*
1664 * We can come here with TIF_NEED_RESCHED already set from new task
1665 * wake up path.
1666 */
1667 if (test_tsk_need_resched(curr))
1668 return;
1669
91c234b4 1670 /*
6bc912b7 1671 * Batch and idle tasks do not preempt (their preemption is driven by
91c234b4
IM
1672 * the tick):
1673 */
6bc912b7 1674 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 1675 return;
bf0f6f24 1676
6bc912b7 1677 /* Idle tasks are by definition preempted by everybody. */
3a7e73a2
PZ
1678 if (unlikely(curr->policy == SCHED_IDLE))
1679 goto preempt;
bf0f6f24 1680
ad4b78bb
PZ
1681 if (!sched_feat(WAKEUP_PREEMPT))
1682 return;
1683
3a7e73a2 1684 update_curr(cfs_rq);
464b7527 1685 find_matching_se(&se, &pse);
002f128b 1686 BUG_ON(!pse);
3a7e73a2
PZ
1687 if (wakeup_preempt_entity(se, pse) == 1)
1688 goto preempt;
464b7527 1689
3a7e73a2 1690 return;
a65ac745 1691
3a7e73a2
PZ
1692preempt:
1693 resched_task(curr);
1694 /*
1695 * Only set the backward buddy when the current task is still
1696 * on the rq. This can happen when a wakeup gets interleaved
1697 * with schedule on the ->pre_schedule() or idle_balance()
1698 * point, either of which can * drop the rq lock.
1699 *
1700 * Also, during early boot the idle thread is in the fair class,
1701 * for obvious reasons its a bad idea to schedule back to it.
1702 */
1703 if (unlikely(!se->on_rq || curr == rq->idle))
1704 return;
1705
1706 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1707 set_last_buddy(se);
bf0f6f24
IM
1708}
1709
fb8d4724 1710static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1711{
8f4d37ec 1712 struct task_struct *p;
bf0f6f24
IM
1713 struct cfs_rq *cfs_rq = &rq->cfs;
1714 struct sched_entity *se;
1715
36ace27e 1716 if (!cfs_rq->nr_running)
bf0f6f24
IM
1717 return NULL;
1718
1719 do {
9948f4b2 1720 se = pick_next_entity(cfs_rq);
f4b6755f 1721 set_next_entity(cfs_rq, se);
bf0f6f24
IM
1722 cfs_rq = group_cfs_rq(se);
1723 } while (cfs_rq);
1724
8f4d37ec
PZ
1725 p = task_of(se);
1726 hrtick_start_fair(rq, p);
1727
1728 return p;
bf0f6f24
IM
1729}
1730
1731/*
1732 * Account for a descheduled task:
1733 */
31ee529c 1734static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1735{
1736 struct sched_entity *se = &prev->se;
1737 struct cfs_rq *cfs_rq;
1738
1739 for_each_sched_entity(se) {
1740 cfs_rq = cfs_rq_of(se);
ab6cde26 1741 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1742 }
1743}
1744
681f3e68 1745#ifdef CONFIG_SMP
bf0f6f24
IM
1746/**************************************************
1747 * Fair scheduling class load-balancing methods:
1748 */
1749
1e3c88bd
PZ
1750/*
1751 * pull_task - move a task from a remote runqueue to the local runqueue.
1752 * Both runqueues must be locked.
1753 */
1754static void pull_task(struct rq *src_rq, struct task_struct *p,
1755 struct rq *this_rq, int this_cpu)
1756{
1757 deactivate_task(src_rq, p, 0);
1758 set_task_cpu(p, this_cpu);
1759 activate_task(this_rq, p, 0);
1760 check_preempt_curr(this_rq, p, 0);
1761}
1762
1763/*
1764 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1765 */
1766static
1767int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1768 struct sched_domain *sd, enum cpu_idle_type idle,
1769 int *all_pinned)
1770{
1771 int tsk_cache_hot = 0;
1772 /*
1773 * We do not migrate tasks that are:
1774 * 1) running (obviously), or
1775 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1776 * 3) are cache-hot on their current CPU.
1777 */
1778 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
41acab88 1779 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1e3c88bd
PZ
1780 return 0;
1781 }
1782 *all_pinned = 0;
1783
1784 if (task_running(rq, p)) {
41acab88 1785 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
1786 return 0;
1787 }
1788
1789 /*
1790 * Aggressive migration if:
1791 * 1) task is cache cold, or
1792 * 2) too many balance attempts have failed.
1793 */
1794
305e6835 1795 tsk_cache_hot = task_hot(p, rq->clock_task, sd);
1e3c88bd
PZ
1796 if (!tsk_cache_hot ||
1797 sd->nr_balance_failed > sd->cache_nice_tries) {
1798#ifdef CONFIG_SCHEDSTATS
1799 if (tsk_cache_hot) {
1800 schedstat_inc(sd, lb_hot_gained[idle]);
41acab88 1801 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd
PZ
1802 }
1803#endif
1804 return 1;
1805 }
1806
1807 if (tsk_cache_hot) {
41acab88 1808 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1e3c88bd
PZ
1809 return 0;
1810 }
1811 return 1;
1812}
1813
897c395f
PZ
1814/*
1815 * move_one_task tries to move exactly one task from busiest to this_rq, as
1816 * part of active balancing operations within "domain".
1817 * Returns 1 if successful and 0 otherwise.
1818 *
1819 * Called with both runqueues locked.
1820 */
1821static int
1822move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1823 struct sched_domain *sd, enum cpu_idle_type idle)
1824{
1825 struct task_struct *p, *n;
1826 struct cfs_rq *cfs_rq;
1827 int pinned = 0;
1828
1829 for_each_leaf_cfs_rq(busiest, cfs_rq) {
1830 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
1831
1832 if (!can_migrate_task(p, busiest, this_cpu,
1833 sd, idle, &pinned))
1834 continue;
1835
1836 pull_task(busiest, p, this_rq, this_cpu);
1837 /*
1838 * Right now, this is only the second place pull_task()
1839 * is called, so we can safely collect pull_task()
1840 * stats here rather than inside pull_task().
1841 */
1842 schedstat_inc(sd, lb_gained[idle]);
1843 return 1;
1844 }
1845 }
1846
1847 return 0;
1848}
1849
1e3c88bd
PZ
1850static unsigned long
1851balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1852 unsigned long max_load_move, struct sched_domain *sd,
1853 enum cpu_idle_type idle, int *all_pinned,
ee00e66f 1854 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
1e3c88bd
PZ
1855{
1856 int loops = 0, pulled = 0, pinned = 0;
1e3c88bd 1857 long rem_load_move = max_load_move;
ee00e66f 1858 struct task_struct *p, *n;
1e3c88bd
PZ
1859
1860 if (max_load_move == 0)
1861 goto out;
1862
1863 pinned = 1;
1864
ee00e66f
PZ
1865 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
1866 if (loops++ > sysctl_sched_nr_migrate)
1867 break;
1e3c88bd 1868
ee00e66f
PZ
1869 if ((p->se.load.weight >> 1) > rem_load_move ||
1870 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
1871 continue;
1e3c88bd 1872
ee00e66f
PZ
1873 pull_task(busiest, p, this_rq, this_cpu);
1874 pulled++;
1875 rem_load_move -= p->se.load.weight;
1e3c88bd
PZ
1876
1877#ifdef CONFIG_PREEMPT
ee00e66f
PZ
1878 /*
1879 * NEWIDLE balancing is a source of latency, so preemptible
1880 * kernels will stop after the first task is pulled to minimize
1881 * the critical section.
1882 */
1883 if (idle == CPU_NEWLY_IDLE)
1884 break;
1e3c88bd
PZ
1885#endif
1886
ee00e66f
PZ
1887 /*
1888 * We only want to steal up to the prescribed amount of
1889 * weighted load.
1890 */
1891 if (rem_load_move <= 0)
1892 break;
1893
1e3c88bd
PZ
1894 if (p->prio < *this_best_prio)
1895 *this_best_prio = p->prio;
1e3c88bd
PZ
1896 }
1897out:
1898 /*
1899 * Right now, this is one of only two places pull_task() is called,
1900 * so we can safely collect pull_task() stats here rather than
1901 * inside pull_task().
1902 */
1903 schedstat_add(sd, lb_gained[idle], pulled);
1904
1905 if (all_pinned)
1906 *all_pinned = pinned;
1907
1908 return max_load_move - rem_load_move;
1909}
1910
230059de
PZ
1911#ifdef CONFIG_FAIR_GROUP_SCHED
1912static unsigned long
1913load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1914 unsigned long max_load_move,
1915 struct sched_domain *sd, enum cpu_idle_type idle,
1916 int *all_pinned, int *this_best_prio)
1917{
1918 long rem_load_move = max_load_move;
1919 int busiest_cpu = cpu_of(busiest);
1920 struct task_group *tg;
1921
1922 rcu_read_lock();
1923 update_h_load(busiest_cpu);
1924
1925 list_for_each_entry_rcu(tg, &task_groups, list) {
1926 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1927 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1928 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1929 u64 rem_load, moved_load;
1930
1931 /*
1932 * empty group
1933 */
1934 if (!busiest_cfs_rq->task_weight)
1935 continue;
1936
1937 rem_load = (u64)rem_load_move * busiest_weight;
1938 rem_load = div_u64(rem_load, busiest_h_load + 1);
1939
1940 moved_load = balance_tasks(this_rq, this_cpu, busiest,
1941 rem_load, sd, idle, all_pinned, this_best_prio,
1942 busiest_cfs_rq);
1943
1944 if (!moved_load)
1945 continue;
1946
1947 moved_load *= busiest_h_load;
1948 moved_load = div_u64(moved_load, busiest_weight + 1);
1949
1950 rem_load_move -= moved_load;
1951 if (rem_load_move < 0)
1952 break;
1953 }
1954 rcu_read_unlock();
1955
1956 return max_load_move - rem_load_move;
1957}
1958#else
1959static unsigned long
1960load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1961 unsigned long max_load_move,
1962 struct sched_domain *sd, enum cpu_idle_type idle,
1963 int *all_pinned, int *this_best_prio)
1964{
1965 return balance_tasks(this_rq, this_cpu, busiest,
1966 max_load_move, sd, idle, all_pinned,
1967 this_best_prio, &busiest->cfs);
1968}
1969#endif
1970
1e3c88bd
PZ
1971/*
1972 * move_tasks tries to move up to max_load_move weighted load from busiest to
1973 * this_rq, as part of a balancing operation within domain "sd".
1974 * Returns 1 if successful and 0 otherwise.
1975 *
1976 * Called with both runqueues locked.
1977 */
1978static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1979 unsigned long max_load_move,
1980 struct sched_domain *sd, enum cpu_idle_type idle,
1981 int *all_pinned)
1982{
3d45fd80 1983 unsigned long total_load_moved = 0, load_moved;
1e3c88bd
PZ
1984 int this_best_prio = this_rq->curr->prio;
1985
1986 do {
3d45fd80 1987 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
1e3c88bd
PZ
1988 max_load_move - total_load_moved,
1989 sd, idle, all_pinned, &this_best_prio);
3d45fd80
PZ
1990
1991 total_load_moved += load_moved;
1e3c88bd
PZ
1992
1993#ifdef CONFIG_PREEMPT
1994 /*
1995 * NEWIDLE balancing is a source of latency, so preemptible
1996 * kernels will stop after the first task is pulled to minimize
1997 * the critical section.
1998 */
1999 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2000 break;
baa8c110
PZ
2001
2002 if (raw_spin_is_contended(&this_rq->lock) ||
2003 raw_spin_is_contended(&busiest->lock))
2004 break;
1e3c88bd 2005#endif
3d45fd80 2006 } while (load_moved && max_load_move > total_load_moved);
1e3c88bd
PZ
2007
2008 return total_load_moved > 0;
2009}
2010
1e3c88bd
PZ
2011/********** Helpers for find_busiest_group ************************/
2012/*
2013 * sd_lb_stats - Structure to store the statistics of a sched_domain
2014 * during load balancing.
2015 */
2016struct sd_lb_stats {
2017 struct sched_group *busiest; /* Busiest group in this sd */
2018 struct sched_group *this; /* Local group in this sd */
2019 unsigned long total_load; /* Total load of all groups in sd */
2020 unsigned long total_pwr; /* Total power of all groups in sd */
2021 unsigned long avg_load; /* Average load across all groups in sd */
2022
2023 /** Statistics of this group */
2024 unsigned long this_load;
2025 unsigned long this_load_per_task;
2026 unsigned long this_nr_running;
fab47622 2027 unsigned long this_has_capacity;
aae6d3dd 2028 unsigned int this_idle_cpus;
1e3c88bd
PZ
2029
2030 /* Statistics of the busiest group */
aae6d3dd 2031 unsigned int busiest_idle_cpus;
1e3c88bd
PZ
2032 unsigned long max_load;
2033 unsigned long busiest_load_per_task;
2034 unsigned long busiest_nr_running;
dd5feea1 2035 unsigned long busiest_group_capacity;
fab47622 2036 unsigned long busiest_has_capacity;
aae6d3dd 2037 unsigned int busiest_group_weight;
1e3c88bd
PZ
2038
2039 int group_imb; /* Is there imbalance in this sd */
2040#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2041 int power_savings_balance; /* Is powersave balance needed for this sd */
2042 struct sched_group *group_min; /* Least loaded group in sd */
2043 struct sched_group *group_leader; /* Group which relieves group_min */
2044 unsigned long min_load_per_task; /* load_per_task in group_min */
2045 unsigned long leader_nr_running; /* Nr running of group_leader */
2046 unsigned long min_nr_running; /* Nr running of group_min */
2047#endif
2048};
2049
2050/*
2051 * sg_lb_stats - stats of a sched_group required for load_balancing
2052 */
2053struct sg_lb_stats {
2054 unsigned long avg_load; /*Avg load across the CPUs of the group */
2055 unsigned long group_load; /* Total load over the CPUs of the group */
2056 unsigned long sum_nr_running; /* Nr tasks running in the group */
2057 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2058 unsigned long group_capacity;
aae6d3dd
SS
2059 unsigned long idle_cpus;
2060 unsigned long group_weight;
1e3c88bd 2061 int group_imb; /* Is there an imbalance in the group ? */
fab47622 2062 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
2063};
2064
2065/**
2066 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2067 * @group: The group whose first cpu is to be returned.
2068 */
2069static inline unsigned int group_first_cpu(struct sched_group *group)
2070{
2071 return cpumask_first(sched_group_cpus(group));
2072}
2073
2074/**
2075 * get_sd_load_idx - Obtain the load index for a given sched domain.
2076 * @sd: The sched_domain whose load_idx is to be obtained.
2077 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2078 */
2079static inline int get_sd_load_idx(struct sched_domain *sd,
2080 enum cpu_idle_type idle)
2081{
2082 int load_idx;
2083
2084 switch (idle) {
2085 case CPU_NOT_IDLE:
2086 load_idx = sd->busy_idx;
2087 break;
2088
2089 case CPU_NEWLY_IDLE:
2090 load_idx = sd->newidle_idx;
2091 break;
2092 default:
2093 load_idx = sd->idle_idx;
2094 break;
2095 }
2096
2097 return load_idx;
2098}
2099
2100
2101#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2102/**
2103 * init_sd_power_savings_stats - Initialize power savings statistics for
2104 * the given sched_domain, during load balancing.
2105 *
2106 * @sd: Sched domain whose power-savings statistics are to be initialized.
2107 * @sds: Variable containing the statistics for sd.
2108 * @idle: Idle status of the CPU at which we're performing load-balancing.
2109 */
2110static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2111 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2112{
2113 /*
2114 * Busy processors will not participate in power savings
2115 * balance.
2116 */
2117 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2118 sds->power_savings_balance = 0;
2119 else {
2120 sds->power_savings_balance = 1;
2121 sds->min_nr_running = ULONG_MAX;
2122 sds->leader_nr_running = 0;
2123 }
2124}
2125
2126/**
2127 * update_sd_power_savings_stats - Update the power saving stats for a
2128 * sched_domain while performing load balancing.
2129 *
2130 * @group: sched_group belonging to the sched_domain under consideration.
2131 * @sds: Variable containing the statistics of the sched_domain
2132 * @local_group: Does group contain the CPU for which we're performing
2133 * load balancing ?
2134 * @sgs: Variable containing the statistics of the group.
2135 */
2136static inline void update_sd_power_savings_stats(struct sched_group *group,
2137 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2138{
2139
2140 if (!sds->power_savings_balance)
2141 return;
2142
2143 /*
2144 * If the local group is idle or completely loaded
2145 * no need to do power savings balance at this domain
2146 */
2147 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2148 !sds->this_nr_running))
2149 sds->power_savings_balance = 0;
2150
2151 /*
2152 * If a group is already running at full capacity or idle,
2153 * don't include that group in power savings calculations
2154 */
2155 if (!sds->power_savings_balance ||
2156 sgs->sum_nr_running >= sgs->group_capacity ||
2157 !sgs->sum_nr_running)
2158 return;
2159
2160 /*
2161 * Calculate the group which has the least non-idle load.
2162 * This is the group from where we need to pick up the load
2163 * for saving power
2164 */
2165 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2166 (sgs->sum_nr_running == sds->min_nr_running &&
2167 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2168 sds->group_min = group;
2169 sds->min_nr_running = sgs->sum_nr_running;
2170 sds->min_load_per_task = sgs->sum_weighted_load /
2171 sgs->sum_nr_running;
2172 }
2173
2174 /*
2175 * Calculate the group which is almost near its
2176 * capacity but still has some space to pick up some load
2177 * from other group and save more power
2178 */
2179 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2180 return;
2181
2182 if (sgs->sum_nr_running > sds->leader_nr_running ||
2183 (sgs->sum_nr_running == sds->leader_nr_running &&
2184 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2185 sds->group_leader = group;
2186 sds->leader_nr_running = sgs->sum_nr_running;
2187 }
2188}
2189
2190/**
2191 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2192 * @sds: Variable containing the statistics of the sched_domain
2193 * under consideration.
2194 * @this_cpu: Cpu at which we're currently performing load-balancing.
2195 * @imbalance: Variable to store the imbalance.
2196 *
2197 * Description:
2198 * Check if we have potential to perform some power-savings balance.
2199 * If yes, set the busiest group to be the least loaded group in the
2200 * sched_domain, so that it's CPUs can be put to idle.
2201 *
2202 * Returns 1 if there is potential to perform power-savings balance.
2203 * Else returns 0.
2204 */
2205static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2206 int this_cpu, unsigned long *imbalance)
2207{
2208 if (!sds->power_savings_balance)
2209 return 0;
2210
2211 if (sds->this != sds->group_leader ||
2212 sds->group_leader == sds->group_min)
2213 return 0;
2214
2215 *imbalance = sds->min_load_per_task;
2216 sds->busiest = sds->group_min;
2217
2218 return 1;
2219
2220}
2221#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2222static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2223 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2224{
2225 return;
2226}
2227
2228static inline void update_sd_power_savings_stats(struct sched_group *group,
2229 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2230{
2231 return;
2232}
2233
2234static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2235 int this_cpu, unsigned long *imbalance)
2236{
2237 return 0;
2238}
2239#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2240
2241
2242unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2243{
2244 return SCHED_LOAD_SCALE;
2245}
2246
2247unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2248{
2249 return default_scale_freq_power(sd, cpu);
2250}
2251
2252unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2253{
669c55e9 2254 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
2255 unsigned long smt_gain = sd->smt_gain;
2256
2257 smt_gain /= weight;
2258
2259 return smt_gain;
2260}
2261
2262unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2263{
2264 return default_scale_smt_power(sd, cpu);
2265}
2266
2267unsigned long scale_rt_power(int cpu)
2268{
2269 struct rq *rq = cpu_rq(cpu);
2270 u64 total, available;
2271
1e3c88bd 2272 total = sched_avg_period() + (rq->clock - rq->age_stamp);
aa483808
VP
2273
2274 if (unlikely(total < rq->rt_avg)) {
2275 /* Ensures that power won't end up being negative */
2276 available = 0;
2277 } else {
2278 available = total - rq->rt_avg;
2279 }
1e3c88bd
PZ
2280
2281 if (unlikely((s64)total < SCHED_LOAD_SCALE))
2282 total = SCHED_LOAD_SCALE;
2283
2284 total >>= SCHED_LOAD_SHIFT;
2285
2286 return div_u64(available, total);
2287}
2288
2289static void update_cpu_power(struct sched_domain *sd, int cpu)
2290{
669c55e9 2291 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
2292 unsigned long power = SCHED_LOAD_SCALE;
2293 struct sched_group *sdg = sd->groups;
2294
1e3c88bd
PZ
2295 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2296 if (sched_feat(ARCH_POWER))
2297 power *= arch_scale_smt_power(sd, cpu);
2298 else
2299 power *= default_scale_smt_power(sd, cpu);
2300
2301 power >>= SCHED_LOAD_SHIFT;
2302 }
2303
9d5efe05
SV
2304 sdg->cpu_power_orig = power;
2305
2306 if (sched_feat(ARCH_POWER))
2307 power *= arch_scale_freq_power(sd, cpu);
2308 else
2309 power *= default_scale_freq_power(sd, cpu);
2310
2311 power >>= SCHED_LOAD_SHIFT;
2312
1e3c88bd
PZ
2313 power *= scale_rt_power(cpu);
2314 power >>= SCHED_LOAD_SHIFT;
2315
2316 if (!power)
2317 power = 1;
2318
e51fd5e2 2319 cpu_rq(cpu)->cpu_power = power;
1e3c88bd
PZ
2320 sdg->cpu_power = power;
2321}
2322
2323static void update_group_power(struct sched_domain *sd, int cpu)
2324{
2325 struct sched_domain *child = sd->child;
2326 struct sched_group *group, *sdg = sd->groups;
2327 unsigned long power;
2328
2329 if (!child) {
2330 update_cpu_power(sd, cpu);
2331 return;
2332 }
2333
2334 power = 0;
2335
2336 group = child->groups;
2337 do {
2338 power += group->cpu_power;
2339 group = group->next;
2340 } while (group != child->groups);
2341
2342 sdg->cpu_power = power;
2343}
2344
9d5efe05
SV
2345/*
2346 * Try and fix up capacity for tiny siblings, this is needed when
2347 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2348 * which on its own isn't powerful enough.
2349 *
2350 * See update_sd_pick_busiest() and check_asym_packing().
2351 */
2352static inline int
2353fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2354{
2355 /*
2356 * Only siblings can have significantly less than SCHED_LOAD_SCALE
2357 */
2358 if (sd->level != SD_LV_SIBLING)
2359 return 0;
2360
2361 /*
2362 * If ~90% of the cpu_power is still there, we're good.
2363 */
694f5a11 2364 if (group->cpu_power * 32 > group->cpu_power_orig * 29)
9d5efe05
SV
2365 return 1;
2366
2367 return 0;
2368}
2369
1e3c88bd
PZ
2370/**
2371 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2372 * @sd: The sched_domain whose statistics are to be updated.
2373 * @group: sched_group whose statistics are to be updated.
2374 * @this_cpu: Cpu for which load balance is currently performed.
2375 * @idle: Idle status of this_cpu
2376 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2377 * @sd_idle: Idle status of the sched_domain containing group.
2378 * @local_group: Does group contain this_cpu.
2379 * @cpus: Set of cpus considered for load balancing.
2380 * @balance: Should we balance.
2381 * @sgs: variable to hold the statistics for this group.
2382 */
2383static inline void update_sg_lb_stats(struct sched_domain *sd,
2384 struct sched_group *group, int this_cpu,
2385 enum cpu_idle_type idle, int load_idx, int *sd_idle,
2386 int local_group, const struct cpumask *cpus,
2387 int *balance, struct sg_lb_stats *sgs)
2388{
2582f0eb 2389 unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
1e3c88bd
PZ
2390 int i;
2391 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 2392 unsigned long avg_load_per_task = 0;
1e3c88bd 2393
871e35bc 2394 if (local_group)
1e3c88bd 2395 balance_cpu = group_first_cpu(group);
1e3c88bd
PZ
2396
2397 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
2398 max_cpu_load = 0;
2399 min_cpu_load = ~0UL;
2582f0eb 2400 max_nr_running = 0;
1e3c88bd
PZ
2401
2402 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2403 struct rq *rq = cpu_rq(i);
2404
2405 if (*sd_idle && rq->nr_running)
2406 *sd_idle = 0;
2407
2408 /* Bias balancing toward cpus of our domain */
2409 if (local_group) {
2410 if (idle_cpu(i) && !first_idle_cpu) {
2411 first_idle_cpu = 1;
2412 balance_cpu = i;
2413 }
2414
2415 load = target_load(i, load_idx);
2416 } else {
2417 load = source_load(i, load_idx);
2582f0eb 2418 if (load > max_cpu_load) {
1e3c88bd 2419 max_cpu_load = load;
2582f0eb
NR
2420 max_nr_running = rq->nr_running;
2421 }
1e3c88bd
PZ
2422 if (min_cpu_load > load)
2423 min_cpu_load = load;
2424 }
2425
2426 sgs->group_load += load;
2427 sgs->sum_nr_running += rq->nr_running;
2428 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
2429 if (idle_cpu(i))
2430 sgs->idle_cpus++;
1e3c88bd
PZ
2431 }
2432
2433 /*
2434 * First idle cpu or the first cpu(busiest) in this sched group
2435 * is eligible for doing load balancing at this and above
2436 * domains. In the newly idle case, we will allow all the cpu's
2437 * to do the newly idle load balance.
2438 */
bbc8cb5b
PZ
2439 if (idle != CPU_NEWLY_IDLE && local_group) {
2440 if (balance_cpu != this_cpu) {
2441 *balance = 0;
2442 return;
2443 }
2444 update_group_power(sd, this_cpu);
1e3c88bd
PZ
2445 }
2446
2447 /* Adjust by relative CPU power of the group */
2448 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
2449
1e3c88bd
PZ
2450 /*
2451 * Consider the group unbalanced when the imbalance is larger
2452 * than the average weight of two tasks.
2453 *
2454 * APZ: with cgroup the avg task weight can vary wildly and
2455 * might not be a suitable number - should we keep a
2456 * normalized nr_running number somewhere that negates
2457 * the hierarchy?
2458 */
dd5feea1
SS
2459 if (sgs->sum_nr_running)
2460 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 2461
2582f0eb 2462 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
1e3c88bd
PZ
2463 sgs->group_imb = 1;
2464
2582f0eb 2465 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
9d5efe05
SV
2466 if (!sgs->group_capacity)
2467 sgs->group_capacity = fix_small_capacity(sd, group);
aae6d3dd 2468 sgs->group_weight = group->group_weight;
fab47622
NR
2469
2470 if (sgs->group_capacity > sgs->sum_nr_running)
2471 sgs->group_has_capacity = 1;
1e3c88bd
PZ
2472}
2473
532cb4c4
MN
2474/**
2475 * update_sd_pick_busiest - return 1 on busiest group
2476 * @sd: sched_domain whose statistics are to be checked
2477 * @sds: sched_domain statistics
2478 * @sg: sched_group candidate to be checked for being the busiest
b6b12294
MN
2479 * @sgs: sched_group statistics
2480 * @this_cpu: the current cpu
532cb4c4
MN
2481 *
2482 * Determine if @sg is a busier group than the previously selected
2483 * busiest group.
2484 */
2485static bool update_sd_pick_busiest(struct sched_domain *sd,
2486 struct sd_lb_stats *sds,
2487 struct sched_group *sg,
2488 struct sg_lb_stats *sgs,
2489 int this_cpu)
2490{
2491 if (sgs->avg_load <= sds->max_load)
2492 return false;
2493
2494 if (sgs->sum_nr_running > sgs->group_capacity)
2495 return true;
2496
2497 if (sgs->group_imb)
2498 return true;
2499
2500 /*
2501 * ASYM_PACKING needs to move all the work to the lowest
2502 * numbered CPUs in the group, therefore mark all groups
2503 * higher than ourself as busy.
2504 */
2505 if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2506 this_cpu < group_first_cpu(sg)) {
2507 if (!sds->busiest)
2508 return true;
2509
2510 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2511 return true;
2512 }
2513
2514 return false;
2515}
2516
1e3c88bd
PZ
2517/**
2518 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2519 * @sd: sched_domain whose statistics are to be updated.
2520 * @this_cpu: Cpu for which load balance is currently performed.
2521 * @idle: Idle status of this_cpu
532cb4c4 2522 * @sd_idle: Idle status of the sched_domain containing sg.
1e3c88bd
PZ
2523 * @cpus: Set of cpus considered for load balancing.
2524 * @balance: Should we balance.
2525 * @sds: variable to hold the statistics for this sched_domain.
2526 */
2527static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2528 enum cpu_idle_type idle, int *sd_idle,
2529 const struct cpumask *cpus, int *balance,
2530 struct sd_lb_stats *sds)
2531{
2532 struct sched_domain *child = sd->child;
532cb4c4 2533 struct sched_group *sg = sd->groups;
1e3c88bd
PZ
2534 struct sg_lb_stats sgs;
2535 int load_idx, prefer_sibling = 0;
2536
2537 if (child && child->flags & SD_PREFER_SIBLING)
2538 prefer_sibling = 1;
2539
2540 init_sd_power_savings_stats(sd, sds, idle);
2541 load_idx = get_sd_load_idx(sd, idle);
2542
2543 do {
2544 int local_group;
2545
532cb4c4 2546 local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
1e3c88bd 2547 memset(&sgs, 0, sizeof(sgs));
532cb4c4 2548 update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
1e3c88bd
PZ
2549 local_group, cpus, balance, &sgs);
2550
8f190fb3 2551 if (local_group && !(*balance))
1e3c88bd
PZ
2552 return;
2553
2554 sds->total_load += sgs.group_load;
532cb4c4 2555 sds->total_pwr += sg->cpu_power;
1e3c88bd
PZ
2556
2557 /*
2558 * In case the child domain prefers tasks go to siblings
532cb4c4 2559 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
2560 * and move all the excess tasks away. We lower the capacity
2561 * of a group only if the local group has the capacity to fit
2562 * these excess tasks, i.e. nr_running < group_capacity. The
2563 * extra check prevents the case where you always pull from the
2564 * heaviest group when it is already under-utilized (possible
2565 * with a large weight task outweighs the tasks on the system).
1e3c88bd 2566 */
75dd321d 2567 if (prefer_sibling && !local_group && sds->this_has_capacity)
1e3c88bd
PZ
2568 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2569
2570 if (local_group) {
2571 sds->this_load = sgs.avg_load;
532cb4c4 2572 sds->this = sg;
1e3c88bd
PZ
2573 sds->this_nr_running = sgs.sum_nr_running;
2574 sds->this_load_per_task = sgs.sum_weighted_load;
fab47622 2575 sds->this_has_capacity = sgs.group_has_capacity;
aae6d3dd 2576 sds->this_idle_cpus = sgs.idle_cpus;
532cb4c4 2577 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
1e3c88bd 2578 sds->max_load = sgs.avg_load;
532cb4c4 2579 sds->busiest = sg;
1e3c88bd 2580 sds->busiest_nr_running = sgs.sum_nr_running;
aae6d3dd 2581 sds->busiest_idle_cpus = sgs.idle_cpus;
dd5feea1 2582 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd 2583 sds->busiest_load_per_task = sgs.sum_weighted_load;
fab47622 2584 sds->busiest_has_capacity = sgs.group_has_capacity;
aae6d3dd 2585 sds->busiest_group_weight = sgs.group_weight;
1e3c88bd
PZ
2586 sds->group_imb = sgs.group_imb;
2587 }
2588
532cb4c4
MN
2589 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2590 sg = sg->next;
2591 } while (sg != sd->groups);
2592}
2593
2ec57d44 2594int __weak arch_sd_sibling_asym_packing(void)
532cb4c4
MN
2595{
2596 return 0*SD_ASYM_PACKING;
2597}
2598
2599/**
2600 * check_asym_packing - Check to see if the group is packed into the
2601 * sched doman.
2602 *
2603 * This is primarily intended to used at the sibling level. Some
2604 * cores like POWER7 prefer to use lower numbered SMT threads. In the
2605 * case of POWER7, it can move to lower SMT modes only when higher
2606 * threads are idle. When in lower SMT modes, the threads will
2607 * perform better since they share less core resources. Hence when we
2608 * have idle threads, we want them to be the higher ones.
2609 *
2610 * This packing function is run on idle threads. It checks to see if
2611 * the busiest CPU in this domain (core in the P7 case) has a higher
2612 * CPU number than the packing function is being run on. Here we are
2613 * assuming lower CPU number will be equivalent to lower a SMT thread
2614 * number.
2615 *
b6b12294
MN
2616 * Returns 1 when packing is required and a task should be moved to
2617 * this CPU. The amount of the imbalance is returned in *imbalance.
2618 *
532cb4c4
MN
2619 * @sd: The sched_domain whose packing is to be checked.
2620 * @sds: Statistics of the sched_domain which is to be packed
2621 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2622 * @imbalance: returns amount of imbalanced due to packing.
532cb4c4
MN
2623 */
2624static int check_asym_packing(struct sched_domain *sd,
2625 struct sd_lb_stats *sds,
2626 int this_cpu, unsigned long *imbalance)
2627{
2628 int busiest_cpu;
2629
2630 if (!(sd->flags & SD_ASYM_PACKING))
2631 return 0;
2632
2633 if (!sds->busiest)
2634 return 0;
2635
2636 busiest_cpu = group_first_cpu(sds->busiest);
2637 if (this_cpu > busiest_cpu)
2638 return 0;
2639
2640 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
2641 SCHED_LOAD_SCALE);
2642 return 1;
1e3c88bd
PZ
2643}
2644
2645/**
2646 * fix_small_imbalance - Calculate the minor imbalance that exists
2647 * amongst the groups of a sched_domain, during
2648 * load balancing.
2649 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2650 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2651 * @imbalance: Variable to store the imbalance.
2652 */
2653static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2654 int this_cpu, unsigned long *imbalance)
2655{
2656 unsigned long tmp, pwr_now = 0, pwr_move = 0;
2657 unsigned int imbn = 2;
dd5feea1 2658 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
2659
2660 if (sds->this_nr_running) {
2661 sds->this_load_per_task /= sds->this_nr_running;
2662 if (sds->busiest_load_per_task >
2663 sds->this_load_per_task)
2664 imbn = 1;
2665 } else
2666 sds->this_load_per_task =
2667 cpu_avg_load_per_task(this_cpu);
2668
dd5feea1
SS
2669 scaled_busy_load_per_task = sds->busiest_load_per_task
2670 * SCHED_LOAD_SCALE;
2671 scaled_busy_load_per_task /= sds->busiest->cpu_power;
2672
2673 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2674 (scaled_busy_load_per_task * imbn)) {
1e3c88bd
PZ
2675 *imbalance = sds->busiest_load_per_task;
2676 return;
2677 }
2678
2679 /*
2680 * OK, we don't have enough imbalance to justify moving tasks,
2681 * however we may be able to increase total CPU power used by
2682 * moving them.
2683 */
2684
2685 pwr_now += sds->busiest->cpu_power *
2686 min(sds->busiest_load_per_task, sds->max_load);
2687 pwr_now += sds->this->cpu_power *
2688 min(sds->this_load_per_task, sds->this_load);
2689 pwr_now /= SCHED_LOAD_SCALE;
2690
2691 /* Amount of load we'd subtract */
2692 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2693 sds->busiest->cpu_power;
2694 if (sds->max_load > tmp)
2695 pwr_move += sds->busiest->cpu_power *
2696 min(sds->busiest_load_per_task, sds->max_load - tmp);
2697
2698 /* Amount of load we'd add */
2699 if (sds->max_load * sds->busiest->cpu_power <
2700 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2701 tmp = (sds->max_load * sds->busiest->cpu_power) /
2702 sds->this->cpu_power;
2703 else
2704 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2705 sds->this->cpu_power;
2706 pwr_move += sds->this->cpu_power *
2707 min(sds->this_load_per_task, sds->this_load + tmp);
2708 pwr_move /= SCHED_LOAD_SCALE;
2709
2710 /* Move if we gain throughput */
2711 if (pwr_move > pwr_now)
2712 *imbalance = sds->busiest_load_per_task;
2713}
2714
2715/**
2716 * calculate_imbalance - Calculate the amount of imbalance present within the
2717 * groups of a given sched_domain during load balance.
2718 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2719 * @this_cpu: Cpu for which currently load balance is being performed.
2720 * @imbalance: The variable to store the imbalance.
2721 */
2722static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2723 unsigned long *imbalance)
2724{
dd5feea1
SS
2725 unsigned long max_pull, load_above_capacity = ~0UL;
2726
2727 sds->busiest_load_per_task /= sds->busiest_nr_running;
2728 if (sds->group_imb) {
2729 sds->busiest_load_per_task =
2730 min(sds->busiest_load_per_task, sds->avg_load);
2731 }
2732
1e3c88bd
PZ
2733 /*
2734 * In the presence of smp nice balancing, certain scenarios can have
2735 * max load less than avg load(as we skip the groups at or below
2736 * its cpu_power, while calculating max_load..)
2737 */
2738 if (sds->max_load < sds->avg_load) {
2739 *imbalance = 0;
2740 return fix_small_imbalance(sds, this_cpu, imbalance);
2741 }
2742
dd5feea1
SS
2743 if (!sds->group_imb) {
2744 /*
2745 * Don't want to pull so many tasks that a group would go idle.
2746 */
2747 load_above_capacity = (sds->busiest_nr_running -
2748 sds->busiest_group_capacity);
2749
2750 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
2751
2752 load_above_capacity /= sds->busiest->cpu_power;
2753 }
2754
2755 /*
2756 * We're trying to get all the cpus to the average_load, so we don't
2757 * want to push ourselves above the average load, nor do we wish to
2758 * reduce the max loaded cpu below the average load. At the same time,
2759 * we also don't want to reduce the group load below the group capacity
2760 * (so that we can implement power-savings policies etc). Thus we look
2761 * for the minimum possible imbalance.
2762 * Be careful of negative numbers as they'll appear as very large values
2763 * with unsigned longs.
2764 */
2765 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
2766
2767 /* How much load to actually move to equalise the imbalance */
2768 *imbalance = min(max_pull * sds->busiest->cpu_power,
2769 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2770 / SCHED_LOAD_SCALE;
2771
2772 /*
2773 * if *imbalance is less than the average load per runnable task
2774 * there is no gaurantee that any tasks will be moved so we'll have
2775 * a think about bumping its value to force at least one task to be
2776 * moved
2777 */
2778 if (*imbalance < sds->busiest_load_per_task)
2779 return fix_small_imbalance(sds, this_cpu, imbalance);
2780
2781}
fab47622 2782
1e3c88bd
PZ
2783/******* find_busiest_group() helpers end here *********************/
2784
2785/**
2786 * find_busiest_group - Returns the busiest group within the sched_domain
2787 * if there is an imbalance. If there isn't an imbalance, and
2788 * the user has opted for power-savings, it returns a group whose
2789 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
2790 * such a group exists.
2791 *
2792 * Also calculates the amount of weighted load which should be moved
2793 * to restore balance.
2794 *
2795 * @sd: The sched_domain whose busiest group is to be returned.
2796 * @this_cpu: The cpu for which load balancing is currently being performed.
2797 * @imbalance: Variable which stores amount of weighted load which should
2798 * be moved to restore balance/put a group to idle.
2799 * @idle: The idle status of this_cpu.
2800 * @sd_idle: The idleness of sd
2801 * @cpus: The set of CPUs under consideration for load-balancing.
2802 * @balance: Pointer to a variable indicating if this_cpu
2803 * is the appropriate cpu to perform load balancing at this_level.
2804 *
2805 * Returns: - the busiest group if imbalance exists.
2806 * - If no imbalance and user has opted for power-savings balance,
2807 * return the least loaded group whose CPUs can be
2808 * put to idle by rebalancing its tasks onto our group.
2809 */
2810static struct sched_group *
2811find_busiest_group(struct sched_domain *sd, int this_cpu,
2812 unsigned long *imbalance, enum cpu_idle_type idle,
2813 int *sd_idle, const struct cpumask *cpus, int *balance)
2814{
2815 struct sd_lb_stats sds;
2816
2817 memset(&sds, 0, sizeof(sds));
2818
2819 /*
2820 * Compute the various statistics relavent for load balancing at
2821 * this level.
2822 */
2823 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
2824 balance, &sds);
2825
2826 /* Cases where imbalance does not exist from POV of this_cpu */
2827 /* 1) this_cpu is not the appropriate cpu to perform load balancing
2828 * at this level.
2829 * 2) There is no busy sibling group to pull from.
2830 * 3) This group is the busiest group.
2831 * 4) This group is more busy than the avg busieness at this
2832 * sched_domain.
2833 * 5) The imbalance is within the specified limit.
fab47622
NR
2834 *
2835 * Note: when doing newidle balance, if the local group has excess
2836 * capacity (i.e. nr_running < group_capacity) and the busiest group
2837 * does not have any capacity, we force a load balance to pull tasks
2838 * to the local group. In this case, we skip past checks 3, 4 and 5.
1e3c88bd 2839 */
8f190fb3 2840 if (!(*balance))
1e3c88bd
PZ
2841 goto ret;
2842
532cb4c4
MN
2843 if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
2844 check_asym_packing(sd, &sds, this_cpu, imbalance))
2845 return sds.busiest;
2846
1e3c88bd
PZ
2847 if (!sds.busiest || sds.busiest_nr_running == 0)
2848 goto out_balanced;
2849
fab47622
NR
2850 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
2851 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
2852 !sds.busiest_has_capacity)
2853 goto force_balance;
2854
1e3c88bd
PZ
2855 if (sds.this_load >= sds.max_load)
2856 goto out_balanced;
2857
2858 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
2859
2860 if (sds.this_load >= sds.avg_load)
2861 goto out_balanced;
2862
aae6d3dd
SS
2863 /*
2864 * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
2865 * And to check for busy balance use !idle_cpu instead of
2866 * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
2867 * even when they are idle.
2868 */
2869 if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
2870 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
2871 goto out_balanced;
2872 } else {
2873 /*
2874 * This cpu is idle. If the busiest group load doesn't
2875 * have more tasks than the number of available cpu's and
2876 * there is no imbalance between this and busiest group
2877 * wrt to idle cpu's, it is balanced.
2878 */
2879 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
2880 sds.busiest_nr_running <= sds.busiest_group_weight)
2881 goto out_balanced;
2882 }
1e3c88bd 2883
fab47622 2884force_balance:
1e3c88bd
PZ
2885 /* Looks like there is an imbalance. Compute it */
2886 calculate_imbalance(&sds, this_cpu, imbalance);
2887 return sds.busiest;
2888
2889out_balanced:
2890 /*
2891 * There is no obvious imbalance. But check if we can do some balancing
2892 * to save power.
2893 */
2894 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
2895 return sds.busiest;
2896ret:
2897 *imbalance = 0;
2898 return NULL;
2899}
2900
2901/*
2902 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2903 */
2904static struct rq *
9d5efe05
SV
2905find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
2906 enum cpu_idle_type idle, unsigned long imbalance,
2907 const struct cpumask *cpus)
1e3c88bd
PZ
2908{
2909 struct rq *busiest = NULL, *rq;
2910 unsigned long max_load = 0;
2911 int i;
2912
2913 for_each_cpu(i, sched_group_cpus(group)) {
2914 unsigned long power = power_of(i);
2915 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
2916 unsigned long wl;
2917
9d5efe05
SV
2918 if (!capacity)
2919 capacity = fix_small_capacity(sd, group);
2920
1e3c88bd
PZ
2921 if (!cpumask_test_cpu(i, cpus))
2922 continue;
2923
2924 rq = cpu_rq(i);
6e40f5bb 2925 wl = weighted_cpuload(i);
1e3c88bd 2926
6e40f5bb
TG
2927 /*
2928 * When comparing with imbalance, use weighted_cpuload()
2929 * which is not scaled with the cpu power.
2930 */
1e3c88bd
PZ
2931 if (capacity && rq->nr_running == 1 && wl > imbalance)
2932 continue;
2933
6e40f5bb
TG
2934 /*
2935 * For the load comparisons with the other cpu's, consider
2936 * the weighted_cpuload() scaled with the cpu power, so that
2937 * the load can be moved away from the cpu that is potentially
2938 * running at a lower capacity.
2939 */
2940 wl = (wl * SCHED_LOAD_SCALE) / power;
2941
1e3c88bd
PZ
2942 if (wl > max_load) {
2943 max_load = wl;
2944 busiest = rq;
2945 }
2946 }
2947
2948 return busiest;
2949}
2950
2951/*
2952 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2953 * so long as it is large enough.
2954 */
2955#define MAX_PINNED_INTERVAL 512
2956
2957/* Working cpumask for load_balance and load_balance_newidle. */
2958static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
2959
532cb4c4
MN
2960static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
2961 int busiest_cpu, int this_cpu)
1af3ed3d
PZ
2962{
2963 if (idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
2964
2965 /*
2966 * ASYM_PACKING needs to force migrate tasks from busy but
2967 * higher numbered CPUs in order to pack all tasks in the
2968 * lowest numbered CPUs.
2969 */
2970 if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
2971 return 1;
2972
1af3ed3d
PZ
2973 /*
2974 * The only task running in a non-idle cpu can be moved to this
2975 * cpu in an attempt to completely freeup the other CPU
2976 * package.
2977 *
2978 * The package power saving logic comes from
2979 * find_busiest_group(). If there are no imbalance, then
2980 * f_b_g() will return NULL. However when sched_mc={1,2} then
2981 * f_b_g() will select a group from which a running task may be
2982 * pulled to this cpu in order to make the other package idle.
2983 * If there is no opportunity to make a package idle and if
2984 * there are no imbalance, then f_b_g() will return NULL and no
2985 * action will be taken in load_balance_newidle().
2986 *
2987 * Under normal task pull operation due to imbalance, there
2988 * will be more than one task in the source run queue and
2989 * move_tasks() will succeed. ld_moved will be true and this
2990 * active balance code will not be triggered.
2991 */
2992 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2993 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2994 return 0;
2995
2996 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
2997 return 0;
2998 }
2999
3000 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
3001}
3002
969c7921
TH
3003static int active_load_balance_cpu_stop(void *data);
3004
1e3c88bd
PZ
3005/*
3006 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3007 * tasks if there is an imbalance.
3008 */
3009static int load_balance(int this_cpu, struct rq *this_rq,
3010 struct sched_domain *sd, enum cpu_idle_type idle,
3011 int *balance)
3012{
3013 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3014 struct sched_group *group;
3015 unsigned long imbalance;
3016 struct rq *busiest;
3017 unsigned long flags;
3018 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3019
3020 cpumask_copy(cpus, cpu_active_mask);
3021
3022 /*
3023 * When power savings policy is enabled for the parent domain, idle
3024 * sibling can pick up load irrespective of busy siblings. In this case,
3025 * let the state of idle sibling percolate up as CPU_IDLE, instead of
3026 * portraying it as CPU_NOT_IDLE.
3027 */
3028 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
3029 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3030 sd_idle = 1;
3031
3032 schedstat_inc(sd, lb_count[idle]);
3033
3034redo:
3035 update_shares(sd);
3036 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
3037 cpus, balance);
3038
3039 if (*balance == 0)
3040 goto out_balanced;
3041
3042 if (!group) {
3043 schedstat_inc(sd, lb_nobusyg[idle]);
3044 goto out_balanced;
3045 }
3046
9d5efe05 3047 busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
1e3c88bd
PZ
3048 if (!busiest) {
3049 schedstat_inc(sd, lb_nobusyq[idle]);
3050 goto out_balanced;
3051 }
3052
3053 BUG_ON(busiest == this_rq);
3054
3055 schedstat_add(sd, lb_imbalance[idle], imbalance);
3056
3057 ld_moved = 0;
3058 if (busiest->nr_running > 1) {
3059 /*
3060 * Attempt to move tasks. If find_busiest_group has found
3061 * an imbalance but busiest->nr_running <= 1, the group is
3062 * still unbalanced. ld_moved simply stays zero, so it is
3063 * correctly treated as an imbalance.
3064 */
3065 local_irq_save(flags);
3066 double_rq_lock(this_rq, busiest);
3067 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3068 imbalance, sd, idle, &all_pinned);
3069 double_rq_unlock(this_rq, busiest);
3070 local_irq_restore(flags);
3071
3072 /*
3073 * some other cpu did the load balance for us.
3074 */
3075 if (ld_moved && this_cpu != smp_processor_id())
3076 resched_cpu(this_cpu);
3077
3078 /* All tasks on this runqueue were pinned by CPU affinity */
3079 if (unlikely(all_pinned)) {
3080 cpumask_clear_cpu(cpu_of(busiest), cpus);
3081 if (!cpumask_empty(cpus))
3082 goto redo;
3083 goto out_balanced;
3084 }
3085 }
3086
3087 if (!ld_moved) {
3088 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
3089 /*
3090 * Increment the failure counter only on periodic balance.
3091 * We do not want newidle balance, which can be very
3092 * frequent, pollute the failure counter causing
3093 * excessive cache_hot migrations and active balances.
3094 */
3095 if (idle != CPU_NEWLY_IDLE)
3096 sd->nr_balance_failed++;
1e3c88bd 3097
532cb4c4
MN
3098 if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
3099 this_cpu)) {
1e3c88bd
PZ
3100 raw_spin_lock_irqsave(&busiest->lock, flags);
3101
969c7921
TH
3102 /* don't kick the active_load_balance_cpu_stop,
3103 * if the curr task on busiest cpu can't be
3104 * moved to this_cpu
1e3c88bd
PZ
3105 */
3106 if (!cpumask_test_cpu(this_cpu,
3107 &busiest->curr->cpus_allowed)) {
3108 raw_spin_unlock_irqrestore(&busiest->lock,
3109 flags);
3110 all_pinned = 1;
3111 goto out_one_pinned;
3112 }
3113
969c7921
TH
3114 /*
3115 * ->active_balance synchronizes accesses to
3116 * ->active_balance_work. Once set, it's cleared
3117 * only after active load balance is finished.
3118 */
1e3c88bd
PZ
3119 if (!busiest->active_balance) {
3120 busiest->active_balance = 1;
3121 busiest->push_cpu = this_cpu;
3122 active_balance = 1;
3123 }
3124 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 3125
1e3c88bd 3126 if (active_balance)
969c7921
TH
3127 stop_one_cpu_nowait(cpu_of(busiest),
3128 active_load_balance_cpu_stop, busiest,
3129 &busiest->active_balance_work);
1e3c88bd
PZ
3130
3131 /*
3132 * We've kicked active balancing, reset the failure
3133 * counter.
3134 */
3135 sd->nr_balance_failed = sd->cache_nice_tries+1;
3136 }
3137 } else
3138 sd->nr_balance_failed = 0;
3139
3140 if (likely(!active_balance)) {
3141 /* We were unbalanced, so reset the balancing interval */
3142 sd->balance_interval = sd->min_interval;
3143 } else {
3144 /*
3145 * If we've begun active balancing, start to back off. This
3146 * case may not be covered by the all_pinned logic if there
3147 * is only 1 task on the busy runqueue (because we don't call
3148 * move_tasks).
3149 */
3150 if (sd->balance_interval < sd->max_interval)
3151 sd->balance_interval *= 2;
3152 }
3153
3154 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3155 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3156 ld_moved = -1;
3157
3158 goto out;
3159
3160out_balanced:
3161 schedstat_inc(sd, lb_balanced[idle]);
3162
3163 sd->nr_balance_failed = 0;
3164
3165out_one_pinned:
3166 /* tune up the balancing interval */
3167 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3168 (sd->balance_interval < sd->max_interval))
3169 sd->balance_interval *= 2;
3170
3171 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3172 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3173 ld_moved = -1;
3174 else
3175 ld_moved = 0;
3176out:
3177 if (ld_moved)
3178 update_shares(sd);
3179 return ld_moved;
3180}
3181
1e3c88bd
PZ
3182/*
3183 * idle_balance is called by schedule() if this_cpu is about to become
3184 * idle. Attempts to pull tasks from other CPUs.
3185 */
3186static void idle_balance(int this_cpu, struct rq *this_rq)
3187{
3188 struct sched_domain *sd;
3189 int pulled_task = 0;
3190 unsigned long next_balance = jiffies + HZ;
3191
3192 this_rq->idle_stamp = this_rq->clock;
3193
3194 if (this_rq->avg_idle < sysctl_sched_migration_cost)
3195 return;
3196
f492e12e
PZ
3197 /*
3198 * Drop the rq->lock, but keep IRQ/preempt disabled.
3199 */
3200 raw_spin_unlock(&this_rq->lock);
3201
1e3c88bd
PZ
3202 for_each_domain(this_cpu, sd) {
3203 unsigned long interval;
f492e12e 3204 int balance = 1;
1e3c88bd
PZ
3205
3206 if (!(sd->flags & SD_LOAD_BALANCE))
3207 continue;
3208
f492e12e 3209 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 3210 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
3211 pulled_task = load_balance(this_cpu, this_rq,
3212 sd, CPU_NEWLY_IDLE, &balance);
3213 }
1e3c88bd
PZ
3214
3215 interval = msecs_to_jiffies(sd->balance_interval);
3216 if (time_after(next_balance, sd->last_balance + interval))
3217 next_balance = sd->last_balance + interval;
fab47622 3218 if (pulled_task)
1e3c88bd 3219 break;
1e3c88bd 3220 }
f492e12e
PZ
3221
3222 raw_spin_lock(&this_rq->lock);
3223
1e3c88bd
PZ
3224 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3225 /*
3226 * We are going idle. next_balance may be set based on
3227 * a busy processor. So reset next_balance.
3228 */
3229 this_rq->next_balance = next_balance;
3230 }
3231}
3232
3233/*
969c7921
TH
3234 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3235 * running tasks off the busiest CPU onto idle CPUs. It requires at
3236 * least 1 task to be running on each physical CPU where possible, and
3237 * avoids physical / logical imbalances.
1e3c88bd 3238 */
969c7921 3239static int active_load_balance_cpu_stop(void *data)
1e3c88bd 3240{
969c7921
TH
3241 struct rq *busiest_rq = data;
3242 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 3243 int target_cpu = busiest_rq->push_cpu;
969c7921 3244 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 3245 struct sched_domain *sd;
969c7921
TH
3246
3247 raw_spin_lock_irq(&busiest_rq->lock);
3248
3249 /* make sure the requested cpu hasn't gone down in the meantime */
3250 if (unlikely(busiest_cpu != smp_processor_id() ||
3251 !busiest_rq->active_balance))
3252 goto out_unlock;
1e3c88bd
PZ
3253
3254 /* Is there any task to move? */
3255 if (busiest_rq->nr_running <= 1)
969c7921 3256 goto out_unlock;
1e3c88bd
PZ
3257
3258 /*
3259 * This condition is "impossible", if it occurs
3260 * we need to fix it. Originally reported by
3261 * Bjorn Helgaas on a 128-cpu setup.
3262 */
3263 BUG_ON(busiest_rq == target_rq);
3264
3265 /* move a task from busiest_rq to target_rq */
3266 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
3267
3268 /* Search for an sd spanning us and the target CPU. */
3269 for_each_domain(target_cpu, sd) {
3270 if ((sd->flags & SD_LOAD_BALANCE) &&
3271 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3272 break;
3273 }
3274
3275 if (likely(sd)) {
3276 schedstat_inc(sd, alb_count);
3277
3278 if (move_one_task(target_rq, target_cpu, busiest_rq,
3279 sd, CPU_IDLE))
3280 schedstat_inc(sd, alb_pushed);
3281 else
3282 schedstat_inc(sd, alb_failed);
3283 }
3284 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
3285out_unlock:
3286 busiest_rq->active_balance = 0;
3287 raw_spin_unlock_irq(&busiest_rq->lock);
3288 return 0;
1e3c88bd
PZ
3289}
3290
3291#ifdef CONFIG_NO_HZ
83cd4fe2
VP
3292
3293static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3294
3295static void trigger_sched_softirq(void *data)
3296{
3297 raise_softirq_irqoff(SCHED_SOFTIRQ);
3298}
3299
3300static inline void init_sched_softirq_csd(struct call_single_data *csd)
3301{
3302 csd->func = trigger_sched_softirq;
3303 csd->info = NULL;
3304 csd->flags = 0;
3305 csd->priv = 0;
3306}
3307
3308/*
3309 * idle load balancing details
3310 * - One of the idle CPUs nominates itself as idle load_balancer, while
3311 * entering idle.
3312 * - This idle load balancer CPU will also go into tickless mode when
3313 * it is idle, just like all other idle CPUs
3314 * - When one of the busy CPUs notice that there may be an idle rebalancing
3315 * needed, they will kick the idle load balancer, which then does idle
3316 * load balancing for all the idle CPUs.
3317 */
1e3c88bd
PZ
3318static struct {
3319 atomic_t load_balancer;
83cd4fe2
VP
3320 atomic_t first_pick_cpu;
3321 atomic_t second_pick_cpu;
3322 cpumask_var_t idle_cpus_mask;
3323 cpumask_var_t grp_idle_mask;
3324 unsigned long next_balance; /* in jiffy units */
3325} nohz ____cacheline_aligned;
1e3c88bd
PZ
3326
3327int get_nohz_load_balancer(void)
3328{
3329 return atomic_read(&nohz.load_balancer);
3330}
3331
3332#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3333/**
3334 * lowest_flag_domain - Return lowest sched_domain containing flag.
3335 * @cpu: The cpu whose lowest level of sched domain is to
3336 * be returned.
3337 * @flag: The flag to check for the lowest sched_domain
3338 * for the given cpu.
3339 *
3340 * Returns the lowest sched_domain of a cpu which contains the given flag.
3341 */
3342static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3343{
3344 struct sched_domain *sd;
3345
3346 for_each_domain(cpu, sd)
3347 if (sd && (sd->flags & flag))
3348 break;
3349
3350 return sd;
3351}
3352
3353/**
3354 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3355 * @cpu: The cpu whose domains we're iterating over.
3356 * @sd: variable holding the value of the power_savings_sd
3357 * for cpu.
3358 * @flag: The flag to filter the sched_domains to be iterated.
3359 *
3360 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3361 * set, starting from the lowest sched_domain to the highest.
3362 */
3363#define for_each_flag_domain(cpu, sd, flag) \
3364 for (sd = lowest_flag_domain(cpu, flag); \
3365 (sd && (sd->flags & flag)); sd = sd->parent)
3366
3367/**
3368 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3369 * @ilb_group: group to be checked for semi-idleness
3370 *
3371 * Returns: 1 if the group is semi-idle. 0 otherwise.
3372 *
3373 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3374 * and atleast one non-idle CPU. This helper function checks if the given
3375 * sched_group is semi-idle or not.
3376 */
3377static inline int is_semi_idle_group(struct sched_group *ilb_group)
3378{
83cd4fe2 3379 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
1e3c88bd
PZ
3380 sched_group_cpus(ilb_group));
3381
3382 /*
3383 * A sched_group is semi-idle when it has atleast one busy cpu
3384 * and atleast one idle cpu.
3385 */
83cd4fe2 3386 if (cpumask_empty(nohz.grp_idle_mask))
1e3c88bd
PZ
3387 return 0;
3388
83cd4fe2 3389 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
1e3c88bd
PZ
3390 return 0;
3391
3392 return 1;
3393}
3394/**
3395 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3396 * @cpu: The cpu which is nominating a new idle_load_balancer.
3397 *
3398 * Returns: Returns the id of the idle load balancer if it exists,
3399 * Else, returns >= nr_cpu_ids.
3400 *
3401 * This algorithm picks the idle load balancer such that it belongs to a
3402 * semi-idle powersavings sched_domain. The idea is to try and avoid
3403 * completely idle packages/cores just for the purpose of idle load balancing
3404 * when there are other idle cpu's which are better suited for that job.
3405 */
3406static int find_new_ilb(int cpu)
3407{
3408 struct sched_domain *sd;
3409 struct sched_group *ilb_group;
3410
3411 /*
3412 * Have idle load balancer selection from semi-idle packages only
3413 * when power-aware load balancing is enabled
3414 */
3415 if (!(sched_smt_power_savings || sched_mc_power_savings))
3416 goto out_done;
3417
3418 /*
3419 * Optimize for the case when we have no idle CPUs or only one
3420 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3421 */
83cd4fe2 3422 if (cpumask_weight(nohz.idle_cpus_mask) < 2)
1e3c88bd
PZ
3423 goto out_done;
3424
3425 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3426 ilb_group = sd->groups;
3427
3428 do {
3429 if (is_semi_idle_group(ilb_group))
83cd4fe2 3430 return cpumask_first(nohz.grp_idle_mask);
1e3c88bd
PZ
3431
3432 ilb_group = ilb_group->next;
3433
3434 } while (ilb_group != sd->groups);
3435 }
3436
3437out_done:
83cd4fe2 3438 return nr_cpu_ids;
1e3c88bd
PZ
3439}
3440#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3441static inline int find_new_ilb(int call_cpu)
3442{
83cd4fe2 3443 return nr_cpu_ids;
1e3c88bd
PZ
3444}
3445#endif
3446
83cd4fe2
VP
3447/*
3448 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3449 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3450 * CPU (if there is one).
3451 */
3452static void nohz_balancer_kick(int cpu)
3453{
3454 int ilb_cpu;
3455
3456 nohz.next_balance++;
3457
3458 ilb_cpu = get_nohz_load_balancer();
3459
3460 if (ilb_cpu >= nr_cpu_ids) {
3461 ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3462 if (ilb_cpu >= nr_cpu_ids)
3463 return;
3464 }
3465
3466 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3467 struct call_single_data *cp;
3468
3469 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3470 cp = &per_cpu(remote_sched_softirq_cb, cpu);
3471 __smp_call_function_single(ilb_cpu, cp, 0);
3472 }
3473 return;
3474}
3475
1e3c88bd
PZ
3476/*
3477 * This routine will try to nominate the ilb (idle load balancing)
3478 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
83cd4fe2 3479 * load balancing on behalf of all those cpus.
1e3c88bd 3480 *
83cd4fe2
VP
3481 * When the ilb owner becomes busy, we will not have new ilb owner until some
3482 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3483 * idle load balancing by kicking one of the idle CPUs.
1e3c88bd 3484 *
83cd4fe2
VP
3485 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3486 * ilb owner CPU in future (when there is a need for idle load balancing on
3487 * behalf of all idle CPUs).
1e3c88bd 3488 */
83cd4fe2 3489void select_nohz_load_balancer(int stop_tick)
1e3c88bd
PZ
3490{
3491 int cpu = smp_processor_id();
3492
3493 if (stop_tick) {
1e3c88bd
PZ
3494 if (!cpu_active(cpu)) {
3495 if (atomic_read(&nohz.load_balancer) != cpu)
83cd4fe2 3496 return;
1e3c88bd
PZ
3497
3498 /*
3499 * If we are going offline and still the leader,
3500 * give up!
3501 */
83cd4fe2
VP
3502 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3503 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3504 BUG();
3505
83cd4fe2 3506 return;
1e3c88bd
PZ
3507 }
3508
83cd4fe2 3509 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd 3510
83cd4fe2
VP
3511 if (atomic_read(&nohz.first_pick_cpu) == cpu)
3512 atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3513 if (atomic_read(&nohz.second_pick_cpu) == cpu)
3514 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
1e3c88bd 3515
83cd4fe2 3516 if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
1e3c88bd
PZ
3517 int new_ilb;
3518
83cd4fe2
VP
3519 /* make me the ilb owner */
3520 if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3521 cpu) != nr_cpu_ids)
3522 return;
3523
1e3c88bd
PZ
3524 /*
3525 * Check to see if there is a more power-efficient
3526 * ilb.
3527 */
3528 new_ilb = find_new_ilb(cpu);
3529 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
83cd4fe2 3530 atomic_set(&nohz.load_balancer, nr_cpu_ids);
1e3c88bd 3531 resched_cpu(new_ilb);
83cd4fe2 3532 return;
1e3c88bd 3533 }
83cd4fe2 3534 return;
1e3c88bd
PZ
3535 }
3536 } else {
83cd4fe2
VP
3537 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3538 return;
1e3c88bd 3539
83cd4fe2 3540 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd
PZ
3541
3542 if (atomic_read(&nohz.load_balancer) == cpu)
83cd4fe2
VP
3543 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3544 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3545 BUG();
3546 }
83cd4fe2 3547 return;
1e3c88bd
PZ
3548}
3549#endif
3550
3551static DEFINE_SPINLOCK(balancing);
3552
3553/*
3554 * It checks each scheduling domain to see if it is due to be balanced,
3555 * and initiates a balancing operation if so.
3556 *
3557 * Balancing parameters are set up in arch_init_sched_domains.
3558 */
3559static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3560{
3561 int balance = 1;
3562 struct rq *rq = cpu_rq(cpu);
3563 unsigned long interval;
3564 struct sched_domain *sd;
3565 /* Earliest time when we have to do rebalance again */
3566 unsigned long next_balance = jiffies + 60*HZ;
3567 int update_next_balance = 0;
3568 int need_serialize;
3569
3570 for_each_domain(cpu, sd) {
3571 if (!(sd->flags & SD_LOAD_BALANCE))
3572 continue;
3573
3574 interval = sd->balance_interval;
3575 if (idle != CPU_IDLE)
3576 interval *= sd->busy_factor;
3577
3578 /* scale ms to jiffies */
3579 interval = msecs_to_jiffies(interval);
3580 if (unlikely(!interval))
3581 interval = 1;
3582 if (interval > HZ*NR_CPUS/10)
3583 interval = HZ*NR_CPUS/10;
3584
3585 need_serialize = sd->flags & SD_SERIALIZE;
3586
3587 if (need_serialize) {
3588 if (!spin_trylock(&balancing))
3589 goto out;
3590 }
3591
3592 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3593 if (load_balance(cpu, rq, sd, idle, &balance)) {
3594 /*
3595 * We've pulled tasks over so either we're no
3596 * longer idle, or one of our SMT siblings is
3597 * not idle.
3598 */
3599 idle = CPU_NOT_IDLE;
3600 }
3601 sd->last_balance = jiffies;
3602 }
3603 if (need_serialize)
3604 spin_unlock(&balancing);
3605out:
3606 if (time_after(next_balance, sd->last_balance + interval)) {
3607 next_balance = sd->last_balance + interval;
3608 update_next_balance = 1;
3609 }
3610
3611 /*
3612 * Stop the load balance at this level. There is another
3613 * CPU in our sched group which is doing load balancing more
3614 * actively.
3615 */
3616 if (!balance)
3617 break;
3618 }
3619
3620 /*
3621 * next_balance will be updated only when there is a need.
3622 * When the cpu is attached to null domain for ex, it will not be
3623 * updated.
3624 */
3625 if (likely(update_next_balance))
3626 rq->next_balance = next_balance;
3627}
3628
83cd4fe2 3629#ifdef CONFIG_NO_HZ
1e3c88bd 3630/*
83cd4fe2 3631 * In CONFIG_NO_HZ case, the idle balance kickee will do the
1e3c88bd
PZ
3632 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3633 */
83cd4fe2
VP
3634static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3635{
3636 struct rq *this_rq = cpu_rq(this_cpu);
3637 struct rq *rq;
3638 int balance_cpu;
3639
3640 if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3641 return;
3642
3643 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3644 if (balance_cpu == this_cpu)
3645 continue;
3646
3647 /*
3648 * If this cpu gets work to do, stop the load balancing
3649 * work being done for other cpus. Next load
3650 * balancing owner will pick it up.
3651 */
3652 if (need_resched()) {
3653 this_rq->nohz_balance_kick = 0;
3654 break;
3655 }
3656
3657 raw_spin_lock_irq(&this_rq->lock);
5343bdb8 3658 update_rq_clock(this_rq);
83cd4fe2
VP
3659 update_cpu_load(this_rq);
3660 raw_spin_unlock_irq(&this_rq->lock);
3661
3662 rebalance_domains(balance_cpu, CPU_IDLE);
3663
3664 rq = cpu_rq(balance_cpu);
3665 if (time_after(this_rq->next_balance, rq->next_balance))
3666 this_rq->next_balance = rq->next_balance;
3667 }
3668 nohz.next_balance = this_rq->next_balance;
3669 this_rq->nohz_balance_kick = 0;
3670}
3671
3672/*
3673 * Current heuristic for kicking the idle load balancer
3674 * - first_pick_cpu is the one of the busy CPUs. It will kick
3675 * idle load balancer when it has more than one process active. This
3676 * eliminates the need for idle load balancing altogether when we have
3677 * only one running process in the system (common case).
3678 * - If there are more than one busy CPU, idle load balancer may have
3679 * to run for active_load_balance to happen (i.e., two busy CPUs are
3680 * SMT or core siblings and can run better if they move to different
3681 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
3682 * which will kick idle load balancer as soon as it has any load.
3683 */
3684static inline int nohz_kick_needed(struct rq *rq, int cpu)
3685{
3686 unsigned long now = jiffies;
3687 int ret;
3688 int first_pick_cpu, second_pick_cpu;
3689
3690 if (time_before(now, nohz.next_balance))
3691 return 0;
3692
f6c3f168 3693 if (rq->idle_at_tick)
83cd4fe2
VP
3694 return 0;
3695
3696 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
3697 second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
3698
3699 if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
3700 second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
3701 return 0;
3702
3703 ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
3704 if (ret == nr_cpu_ids || ret == cpu) {
3705 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
3706 if (rq->nr_running > 1)
3707 return 1;
3708 } else {
3709 ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
3710 if (ret == nr_cpu_ids || ret == cpu) {
3711 if (rq->nr_running)
3712 return 1;
3713 }
3714 }
3715 return 0;
3716}
3717#else
3718static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
3719#endif
3720
3721/*
3722 * run_rebalance_domains is triggered when needed from the scheduler tick.
3723 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
3724 */
1e3c88bd
PZ
3725static void run_rebalance_domains(struct softirq_action *h)
3726{
3727 int this_cpu = smp_processor_id();
3728 struct rq *this_rq = cpu_rq(this_cpu);
3729 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3730 CPU_IDLE : CPU_NOT_IDLE;
3731
3732 rebalance_domains(this_cpu, idle);
3733
1e3c88bd 3734 /*
83cd4fe2 3735 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
3736 * balancing on behalf of the other idle cpus whose ticks are
3737 * stopped.
3738 */
83cd4fe2 3739 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
3740}
3741
3742static inline int on_null_domain(int cpu)
3743{
90a6501f 3744 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
3745}
3746
3747/*
3748 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd
PZ
3749 */
3750static inline void trigger_load_balance(struct rq *rq, int cpu)
3751{
1e3c88bd
PZ
3752 /* Don't need to rebalance while attached to NULL domain */
3753 if (time_after_eq(jiffies, rq->next_balance) &&
3754 likely(!on_null_domain(cpu)))
3755 raise_softirq(SCHED_SOFTIRQ);
83cd4fe2
VP
3756#ifdef CONFIG_NO_HZ
3757 else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
3758 nohz_balancer_kick(cpu);
3759#endif
1e3c88bd
PZ
3760}
3761
0bcdcf28
CE
3762static void rq_online_fair(struct rq *rq)
3763{
3764 update_sysctl();
3765}
3766
3767static void rq_offline_fair(struct rq *rq)
3768{
3769 update_sysctl();
3770}
3771
1e3c88bd
PZ
3772#else /* CONFIG_SMP */
3773
3774/*
3775 * on UP we do not need to balance between CPUs:
3776 */
3777static inline void idle_balance(int cpu, struct rq *rq)
3778{
3779}
3780
55e12e5e 3781#endif /* CONFIG_SMP */
e1d1484f 3782
bf0f6f24
IM
3783/*
3784 * scheduler tick hitting a task of our scheduling class:
3785 */
8f4d37ec 3786static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
3787{
3788 struct cfs_rq *cfs_rq;
3789 struct sched_entity *se = &curr->se;
3790
3791 for_each_sched_entity(se) {
3792 cfs_rq = cfs_rq_of(se);
8f4d37ec 3793 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
3794 }
3795}
3796
3797/*
cd29fe6f
PZ
3798 * called on fork with the child task as argument from the parent's context
3799 * - child not yet on the tasklist
3800 * - preemption disabled
bf0f6f24 3801 */
cd29fe6f 3802static void task_fork_fair(struct task_struct *p)
bf0f6f24 3803{
cd29fe6f 3804 struct cfs_rq *cfs_rq = task_cfs_rq(current);
429d43bc 3805 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 3806 int this_cpu = smp_processor_id();
cd29fe6f
PZ
3807 struct rq *rq = this_rq();
3808 unsigned long flags;
3809
05fa785c 3810 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 3811
861d034e
PZ
3812 update_rq_clock(rq);
3813
b0a0f667
PM
3814 if (unlikely(task_cpu(p) != this_cpu)) {
3815 rcu_read_lock();
cd29fe6f 3816 __set_task_cpu(p, this_cpu);
b0a0f667
PM
3817 rcu_read_unlock();
3818 }
bf0f6f24 3819
7109c442 3820 update_curr(cfs_rq);
cd29fe6f 3821
b5d9d734
MG
3822 if (curr)
3823 se->vruntime = curr->vruntime;
aeb73b04 3824 place_entity(cfs_rq, se, 1);
4d78e7b6 3825
cd29fe6f 3826 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 3827 /*
edcb60a3
IM
3828 * Upon rescheduling, sched_class::put_prev_task() will place
3829 * 'current' within the tree based on its new key value.
3830 */
4d78e7b6 3831 swap(curr->vruntime, se->vruntime);
aec0a514 3832 resched_task(rq->curr);
4d78e7b6 3833 }
bf0f6f24 3834
88ec22d3
PZ
3835 se->vruntime -= cfs_rq->min_vruntime;
3836
05fa785c 3837 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
3838}
3839
cb469845
SR
3840/*
3841 * Priority of the task has changed. Check to see if we preempt
3842 * the current task.
3843 */
3844static void prio_changed_fair(struct rq *rq, struct task_struct *p,
3845 int oldprio, int running)
3846{
3847 /*
3848 * Reschedule if we are currently running on this runqueue and
3849 * our priority decreased, or if we are not currently running on
3850 * this runqueue and our priority is higher than the current's
3851 */
3852 if (running) {
3853 if (p->prio > oldprio)
3854 resched_task(rq->curr);
3855 } else
15afe09b 3856 check_preempt_curr(rq, p, 0);
cb469845
SR
3857}
3858
3859/*
3860 * We switched to the sched_fair class.
3861 */
3862static void switched_to_fair(struct rq *rq, struct task_struct *p,
3863 int running)
3864{
3865 /*
3866 * We were most likely switched from sched_rt, so
3867 * kick off the schedule if running, otherwise just see
3868 * if we can still preempt the current task.
3869 */
3870 if (running)
3871 resched_task(rq->curr);
3872 else
15afe09b 3873 check_preempt_curr(rq, p, 0);
cb469845
SR
3874}
3875
83b699ed
SV
3876/* Account for a task changing its policy or group.
3877 *
3878 * This routine is mostly called to set cfs_rq->curr field when a task
3879 * migrates between groups/classes.
3880 */
3881static void set_curr_task_fair(struct rq *rq)
3882{
3883 struct sched_entity *se = &rq->curr->se;
3884
3885 for_each_sched_entity(se)
3886 set_next_entity(cfs_rq_of(se), se);
3887}
3888
810b3817 3889#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 3890static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 3891{
b2b5ce02
PZ
3892 /*
3893 * If the task was not on the rq at the time of this cgroup movement
3894 * it must have been asleep, sleeping tasks keep their ->vruntime
3895 * absolute on their old rq until wakeup (needed for the fair sleeper
3896 * bonus in place_entity()).
3897 *
3898 * If it was on the rq, we've just 'preempted' it, which does convert
3899 * ->vruntime to a relative base.
3900 *
3901 * Make sure both cases convert their relative position when migrating
3902 * to another cgroup's rq. This does somewhat interfere with the
3903 * fair sleeper stuff for the first placement, but who cares.
3904 */
3905 if (!on_rq)
3906 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3907 set_task_rq(p, task_cpu(p));
88ec22d3 3908 if (!on_rq)
b2b5ce02 3909 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
810b3817
PZ
3910}
3911#endif
3912
6d686f45 3913static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
3914{
3915 struct sched_entity *se = &task->se;
0d721cea
PW
3916 unsigned int rr_interval = 0;
3917
3918 /*
3919 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
3920 * idle runqueue:
3921 */
0d721cea
PW
3922 if (rq->cfs.load.weight)
3923 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
3924
3925 return rr_interval;
3926}
3927
bf0f6f24
IM
3928/*
3929 * All the scheduling class methods:
3930 */
5522d5d5
IM
3931static const struct sched_class fair_sched_class = {
3932 .next = &idle_sched_class,
bf0f6f24
IM
3933 .enqueue_task = enqueue_task_fair,
3934 .dequeue_task = dequeue_task_fair,
3935 .yield_task = yield_task_fair,
3936
2e09bf55 3937 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
3938
3939 .pick_next_task = pick_next_task_fair,
3940 .put_prev_task = put_prev_task_fair,
3941
681f3e68 3942#ifdef CONFIG_SMP
4ce72a2c
LZ
3943 .select_task_rq = select_task_rq_fair,
3944
0bcdcf28
CE
3945 .rq_online = rq_online_fair,
3946 .rq_offline = rq_offline_fair,
88ec22d3
PZ
3947
3948 .task_waking = task_waking_fair,
681f3e68 3949#endif
bf0f6f24 3950
83b699ed 3951 .set_curr_task = set_curr_task_fair,
bf0f6f24 3952 .task_tick = task_tick_fair,
cd29fe6f 3953 .task_fork = task_fork_fair,
cb469845
SR
3954
3955 .prio_changed = prio_changed_fair,
3956 .switched_to = switched_to_fair,
810b3817 3957
0d721cea
PW
3958 .get_rr_interval = get_rr_interval_fair,
3959
810b3817 3960#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 3961 .task_move_group = task_move_group_fair,
810b3817 3962#endif
bf0f6f24
IM
3963};
3964
3965#ifdef CONFIG_SCHED_DEBUG
5cef9eca 3966static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 3967{
bf0f6f24
IM
3968 struct cfs_rq *cfs_rq;
3969
5973e5b9 3970 rcu_read_lock();
c3b64f1e 3971 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 3972 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 3973 rcu_read_unlock();
bf0f6f24
IM
3974}
3975#endif