]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched_fair.c
sched: Remove SYNC_WAKEUPS feature
[mirror_ubuntu-jammy-kernel.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
9745512c 25
bf0f6f24 26/*
21805085 27 * Targeted preemption latency for CPU-bound tasks:
172e082a 28 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 37 */
21406928
MG
38unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 40
1983a922
CE
41/*
42 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44 *
45 * Options are:
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49 */
50enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
52
2bd8e6d4 53/*
b2be5e96 54 * Minimal preemption granularity for CPU-bound tasks:
21406928 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 56 */
21406928
MG
57unsigned int sysctl_sched_min_granularity = 2000000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
21805085
PZ
59
60/*
b2be5e96
PZ
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */
21406928 63static unsigned int sched_nr_latency = 3;
b2be5e96
PZ
64
65/*
2bba22c5 66 * After fork, child runs first. If set to 0 (default) then
b2be5e96 67 * parent will (try to) run first.
21805085 68 */
2bba22c5 69unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 70
1799e35d
IM
71/*
72 * sys_sched_yield() compat mode
73 *
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
76 */
77unsigned int __read_mostly sysctl_sched_compat_yield;
78
bf0f6f24
IM
79/*
80 * SCHED_OTHER wake-up granularity.
172e082a 81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
172e082a 87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 89
da84d961
IM
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
a4c2f00f
PZ
92static const struct sched_class fair_sched_class;
93
bf0f6f24
IM
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
62160e3f 98#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 99
62160e3f 100/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
62160e3f 103 return cfs_rq->rq;
bf0f6f24
IM
104}
105
62160e3f
IM
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
bf0f6f24 108
8f48894f
PZ
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
113#endif
114 return container_of(se, struct task_struct, se);
115}
116
b758149c
PZ
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123 return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129 return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135 return grp->my_q;
136}
137
138/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
140 */
141static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142{
143 return cfs_rq->tg->cfs_rq[this_cpu];
144}
145
146/* Iterate thr' all leaf cfs_rq's on a runqueue */
147#define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
149
150/* Do the two (enqueued) entities belong to the same group ? */
151static inline int
152is_same_group(struct sched_entity *se, struct sched_entity *pse)
153{
154 if (se->cfs_rq == pse->cfs_rq)
155 return 1;
156
157 return 0;
158}
159
160static inline struct sched_entity *parent_entity(struct sched_entity *se)
161{
162 return se->parent;
163}
164
464b7527
PZ
165/* return depth at which a sched entity is present in the hierarchy */
166static inline int depth_se(struct sched_entity *se)
167{
168 int depth = 0;
169
170 for_each_sched_entity(se)
171 depth++;
172
173 return depth;
174}
175
176static void
177find_matching_se(struct sched_entity **se, struct sched_entity **pse)
178{
179 int se_depth, pse_depth;
180
181 /*
182 * preemption test can be made between sibling entities who are in the
183 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184 * both tasks until we find their ancestors who are siblings of common
185 * parent.
186 */
187
188 /* First walk up until both entities are at same depth */
189 se_depth = depth_se(*se);
190 pse_depth = depth_se(*pse);
191
192 while (se_depth > pse_depth) {
193 se_depth--;
194 *se = parent_entity(*se);
195 }
196
197 while (pse_depth > se_depth) {
198 pse_depth--;
199 *pse = parent_entity(*pse);
200 }
201
202 while (!is_same_group(*se, *pse)) {
203 *se = parent_entity(*se);
204 *pse = parent_entity(*pse);
205 }
206}
207
8f48894f
PZ
208#else /* !CONFIG_FAIR_GROUP_SCHED */
209
210static inline struct task_struct *task_of(struct sched_entity *se)
211{
212 return container_of(se, struct task_struct, se);
213}
bf0f6f24 214
62160e3f
IM
215static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
216{
217 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
218}
219
220#define entity_is_task(se) 1
221
b758149c
PZ
222#define for_each_sched_entity(se) \
223 for (; se; se = NULL)
bf0f6f24 224
b758149c 225static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 226{
b758149c 227 return &task_rq(p)->cfs;
bf0f6f24
IM
228}
229
b758149c
PZ
230static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
231{
232 struct task_struct *p = task_of(se);
233 struct rq *rq = task_rq(p);
234
235 return &rq->cfs;
236}
237
238/* runqueue "owned" by this group */
239static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
240{
241 return NULL;
242}
243
244static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
245{
246 return &cpu_rq(this_cpu)->cfs;
247}
248
249#define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251
252static inline int
253is_same_group(struct sched_entity *se, struct sched_entity *pse)
254{
255 return 1;
256}
257
258static inline struct sched_entity *parent_entity(struct sched_entity *se)
259{
260 return NULL;
261}
262
464b7527
PZ
263static inline void
264find_matching_se(struct sched_entity **se, struct sched_entity **pse)
265{
266}
267
b758149c
PZ
268#endif /* CONFIG_FAIR_GROUP_SCHED */
269
bf0f6f24
IM
270
271/**************************************************************
272 * Scheduling class tree data structure manipulation methods:
273 */
274
0702e3eb 275static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 276{
368059a9
PZ
277 s64 delta = (s64)(vruntime - min_vruntime);
278 if (delta > 0)
02e0431a
PZ
279 min_vruntime = vruntime;
280
281 return min_vruntime;
282}
283
0702e3eb 284static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
285{
286 s64 delta = (s64)(vruntime - min_vruntime);
287 if (delta < 0)
288 min_vruntime = vruntime;
289
290 return min_vruntime;
291}
292
54fdc581
FC
293static inline int entity_before(struct sched_entity *a,
294 struct sched_entity *b)
295{
296 return (s64)(a->vruntime - b->vruntime) < 0;
297}
298
0702e3eb 299static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 300{
30cfdcfc 301 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
302}
303
1af5f730
PZ
304static void update_min_vruntime(struct cfs_rq *cfs_rq)
305{
306 u64 vruntime = cfs_rq->min_vruntime;
307
308 if (cfs_rq->curr)
309 vruntime = cfs_rq->curr->vruntime;
310
311 if (cfs_rq->rb_leftmost) {
312 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
313 struct sched_entity,
314 run_node);
315
e17036da 316 if (!cfs_rq->curr)
1af5f730
PZ
317 vruntime = se->vruntime;
318 else
319 vruntime = min_vruntime(vruntime, se->vruntime);
320 }
321
322 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
323}
324
bf0f6f24
IM
325/*
326 * Enqueue an entity into the rb-tree:
327 */
0702e3eb 328static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
329{
330 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331 struct rb_node *parent = NULL;
332 struct sched_entity *entry;
9014623c 333 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
334 int leftmost = 1;
335
336 /*
337 * Find the right place in the rbtree:
338 */
339 while (*link) {
340 parent = *link;
341 entry = rb_entry(parent, struct sched_entity, run_node);
342 /*
343 * We dont care about collisions. Nodes with
344 * the same key stay together.
345 */
9014623c 346 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
347 link = &parent->rb_left;
348 } else {
349 link = &parent->rb_right;
350 leftmost = 0;
351 }
352 }
353
354 /*
355 * Maintain a cache of leftmost tree entries (it is frequently
356 * used):
357 */
1af5f730 358 if (leftmost)
57cb499d 359 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
360
361 rb_link_node(&se->run_node, parent, link);
362 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
363}
364
0702e3eb 365static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 366{
3fe69747
PZ
367 if (cfs_rq->rb_leftmost == &se->run_node) {
368 struct rb_node *next_node;
3fe69747
PZ
369
370 next_node = rb_next(&se->run_node);
371 cfs_rq->rb_leftmost = next_node;
3fe69747 372 }
e9acbff6 373
bf0f6f24 374 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
375}
376
bf0f6f24
IM
377static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
378{
f4b6755f
PZ
379 struct rb_node *left = cfs_rq->rb_leftmost;
380
381 if (!left)
382 return NULL;
383
384 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
385}
386
f4b6755f 387static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 388{
7eee3e67 389 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 390
70eee74b
BS
391 if (!last)
392 return NULL;
7eee3e67
IM
393
394 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
395}
396
bf0f6f24
IM
397/**************************************************************
398 * Scheduling class statistics methods:
399 */
400
b2be5e96 401#ifdef CONFIG_SCHED_DEBUG
acb4a848 402int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 403 void __user *buffer, size_t *lenp,
b2be5e96
PZ
404 loff_t *ppos)
405{
8d65af78 406 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 407 int factor = get_update_sysctl_factor();
b2be5e96
PZ
408
409 if (ret || !write)
410 return ret;
411
412 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
413 sysctl_sched_min_granularity);
414
acb4a848
CE
415#define WRT_SYSCTL(name) \
416 (normalized_sysctl_##name = sysctl_##name / (factor))
417 WRT_SYSCTL(sched_min_granularity);
418 WRT_SYSCTL(sched_latency);
419 WRT_SYSCTL(sched_wakeup_granularity);
420 WRT_SYSCTL(sched_shares_ratelimit);
421#undef WRT_SYSCTL
422
b2be5e96
PZ
423 return 0;
424}
425#endif
647e7cac 426
a7be37ac 427/*
f9c0b095 428 * delta /= w
a7be37ac
PZ
429 */
430static inline unsigned long
431calc_delta_fair(unsigned long delta, struct sched_entity *se)
432{
f9c0b095
PZ
433 if (unlikely(se->load.weight != NICE_0_LOAD))
434 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
435
436 return delta;
437}
438
647e7cac
IM
439/*
440 * The idea is to set a period in which each task runs once.
441 *
442 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
443 * this period because otherwise the slices get too small.
444 *
445 * p = (nr <= nl) ? l : l*nr/nl
446 */
4d78e7b6
PZ
447static u64 __sched_period(unsigned long nr_running)
448{
449 u64 period = sysctl_sched_latency;
b2be5e96 450 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
451
452 if (unlikely(nr_running > nr_latency)) {
4bf0b771 453 period = sysctl_sched_min_granularity;
4d78e7b6 454 period *= nr_running;
4d78e7b6
PZ
455 }
456
457 return period;
458}
459
647e7cac
IM
460/*
461 * We calculate the wall-time slice from the period by taking a part
462 * proportional to the weight.
463 *
f9c0b095 464 * s = p*P[w/rw]
647e7cac 465 */
6d0f0ebd 466static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 467{
0a582440 468 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 469
0a582440 470 for_each_sched_entity(se) {
6272d68c 471 struct load_weight *load;
3104bf03 472 struct load_weight lw;
6272d68c
LM
473
474 cfs_rq = cfs_rq_of(se);
475 load = &cfs_rq->load;
f9c0b095 476
0a582440 477 if (unlikely(!se->on_rq)) {
3104bf03 478 lw = cfs_rq->load;
0a582440
MG
479
480 update_load_add(&lw, se->load.weight);
481 load = &lw;
482 }
483 slice = calc_delta_mine(slice, se->load.weight, load);
484 }
485 return slice;
bf0f6f24
IM
486}
487
647e7cac 488/*
ac884dec 489 * We calculate the vruntime slice of a to be inserted task
647e7cac 490 *
f9c0b095 491 * vs = s/w
647e7cac 492 */
f9c0b095 493static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 494{
f9c0b095 495 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
496}
497
bf0f6f24
IM
498/*
499 * Update the current task's runtime statistics. Skip current tasks that
500 * are not in our scheduling class.
501 */
502static inline void
8ebc91d9
IM
503__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
504 unsigned long delta_exec)
bf0f6f24 505{
bbdba7c0 506 unsigned long delta_exec_weighted;
bf0f6f24 507
41acab88
LDM
508 schedstat_set(curr->statistics.exec_max,
509 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
510
511 curr->sum_exec_runtime += delta_exec;
7a62eabc 512 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 513 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 514
e9acbff6 515 curr->vruntime += delta_exec_weighted;
1af5f730 516 update_min_vruntime(cfs_rq);
bf0f6f24
IM
517}
518
b7cc0896 519static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 520{
429d43bc 521 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 522 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
523 unsigned long delta_exec;
524
525 if (unlikely(!curr))
526 return;
527
528 /*
529 * Get the amount of time the current task was running
530 * since the last time we changed load (this cannot
531 * overflow on 32 bits):
532 */
8ebc91d9 533 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
534 if (!delta_exec)
535 return;
bf0f6f24 536
8ebc91d9
IM
537 __update_curr(cfs_rq, curr, delta_exec);
538 curr->exec_start = now;
d842de87
SV
539
540 if (entity_is_task(curr)) {
541 struct task_struct *curtask = task_of(curr);
542
f977bb49 543 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 544 cpuacct_charge(curtask, delta_exec);
f06febc9 545 account_group_exec_runtime(curtask, delta_exec);
d842de87 546 }
bf0f6f24
IM
547}
548
549static inline void
5870db5b 550update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 551{
41acab88 552 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
553}
554
bf0f6f24
IM
555/*
556 * Task is being enqueued - update stats:
557 */
d2417e5a 558static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 559{
bf0f6f24
IM
560 /*
561 * Are we enqueueing a waiting task? (for current tasks
562 * a dequeue/enqueue event is a NOP)
563 */
429d43bc 564 if (se != cfs_rq->curr)
5870db5b 565 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
566}
567
bf0f6f24 568static void
9ef0a961 569update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 570{
41acab88
LDM
571 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
572 rq_of(cfs_rq)->clock - se->statistics.wait_start));
573 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
574 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
575 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
576#ifdef CONFIG_SCHEDSTATS
577 if (entity_is_task(se)) {
578 trace_sched_stat_wait(task_of(se),
41acab88 579 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
580 }
581#endif
41acab88 582 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
583}
584
585static inline void
19b6a2e3 586update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 587{
bf0f6f24
IM
588 /*
589 * Mark the end of the wait period if dequeueing a
590 * waiting task:
591 */
429d43bc 592 if (se != cfs_rq->curr)
9ef0a961 593 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
594}
595
596/*
597 * We are picking a new current task - update its stats:
598 */
599static inline void
79303e9e 600update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
601{
602 /*
603 * We are starting a new run period:
604 */
d281918d 605 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
606}
607
bf0f6f24
IM
608/**************************************************
609 * Scheduling class queueing methods:
610 */
611
c09595f6
PZ
612#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
613static void
614add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
615{
616 cfs_rq->task_weight += weight;
617}
618#else
619static inline void
620add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
621{
622}
623#endif
624
30cfdcfc
DA
625static void
626account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
627{
628 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6
PZ
629 if (!parent_entity(se))
630 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 631 if (entity_is_task(se)) {
c09595f6 632 add_cfs_task_weight(cfs_rq, se->load.weight);
b87f1724
BR
633 list_add(&se->group_node, &cfs_rq->tasks);
634 }
30cfdcfc
DA
635 cfs_rq->nr_running++;
636 se->on_rq = 1;
637}
638
639static void
640account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
641{
642 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6
PZ
643 if (!parent_entity(se))
644 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 645 if (entity_is_task(se)) {
c09595f6 646 add_cfs_task_weight(cfs_rq, -se->load.weight);
b87f1724
BR
647 list_del_init(&se->group_node);
648 }
30cfdcfc
DA
649 cfs_rq->nr_running--;
650 se->on_rq = 0;
651}
652
2396af69 653static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 654{
bf0f6f24 655#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
656 struct task_struct *tsk = NULL;
657
658 if (entity_is_task(se))
659 tsk = task_of(se);
660
41acab88
LDM
661 if (se->statistics.sleep_start) {
662 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
bf0f6f24
IM
663
664 if ((s64)delta < 0)
665 delta = 0;
666
41acab88
LDM
667 if (unlikely(delta > se->statistics.sleep_max))
668 se->statistics.sleep_max = delta;
bf0f6f24 669
41acab88
LDM
670 se->statistics.sleep_start = 0;
671 se->statistics.sum_sleep_runtime += delta;
9745512c 672
768d0c27 673 if (tsk) {
e414314c 674 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
675 trace_sched_stat_sleep(tsk, delta);
676 }
bf0f6f24 677 }
41acab88
LDM
678 if (se->statistics.block_start) {
679 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
bf0f6f24
IM
680
681 if ((s64)delta < 0)
682 delta = 0;
683
41acab88
LDM
684 if (unlikely(delta > se->statistics.block_max))
685 se->statistics.block_max = delta;
bf0f6f24 686
41acab88
LDM
687 se->statistics.block_start = 0;
688 se->statistics.sum_sleep_runtime += delta;
30084fbd 689
e414314c 690 if (tsk) {
8f0dfc34 691 if (tsk->in_iowait) {
41acab88
LDM
692 se->statistics.iowait_sum += delta;
693 se->statistics.iowait_count++;
768d0c27 694 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
695 }
696
e414314c
PZ
697 /*
698 * Blocking time is in units of nanosecs, so shift by
699 * 20 to get a milliseconds-range estimation of the
700 * amount of time that the task spent sleeping:
701 */
702 if (unlikely(prof_on == SLEEP_PROFILING)) {
703 profile_hits(SLEEP_PROFILING,
704 (void *)get_wchan(tsk),
705 delta >> 20);
706 }
707 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 708 }
bf0f6f24
IM
709 }
710#endif
711}
712
ddc97297
PZ
713static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
714{
715#ifdef CONFIG_SCHED_DEBUG
716 s64 d = se->vruntime - cfs_rq->min_vruntime;
717
718 if (d < 0)
719 d = -d;
720
721 if (d > 3*sysctl_sched_latency)
722 schedstat_inc(cfs_rq, nr_spread_over);
723#endif
724}
725
aeb73b04
PZ
726static void
727place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
728{
1af5f730 729 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 730
2cb8600e
PZ
731 /*
732 * The 'current' period is already promised to the current tasks,
733 * however the extra weight of the new task will slow them down a
734 * little, place the new task so that it fits in the slot that
735 * stays open at the end.
736 */
94dfb5e7 737 if (initial && sched_feat(START_DEBIT))
f9c0b095 738 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 739
a2e7a7eb 740 /* sleeps up to a single latency don't count. */
5ca9880c 741 if (!initial) {
a2e7a7eb 742 unsigned long thresh = sysctl_sched_latency;
a7be37ac 743
a2e7a7eb
MG
744 /*
745 * Halve their sleep time's effect, to allow
746 * for a gentler effect of sleepers:
747 */
748 if (sched_feat(GENTLE_FAIR_SLEEPERS))
749 thresh >>= 1;
51e0304c 750
a2e7a7eb 751 vruntime -= thresh;
aeb73b04
PZ
752 }
753
b5d9d734
MG
754 /* ensure we never gain time by being placed backwards. */
755 vruntime = max_vruntime(se->vruntime, vruntime);
756
67e9fb2a 757 se->vruntime = vruntime;
aeb73b04
PZ
758}
759
88ec22d3
PZ
760#define ENQUEUE_WAKEUP 1
761#define ENQUEUE_MIGRATE 2
762
bf0f6f24 763static void
88ec22d3 764enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 765{
88ec22d3
PZ
766 /*
767 * Update the normalized vruntime before updating min_vruntime
768 * through callig update_curr().
769 */
770 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
771 se->vruntime += cfs_rq->min_vruntime;
772
bf0f6f24 773 /*
a2a2d680 774 * Update run-time statistics of the 'current'.
bf0f6f24 775 */
b7cc0896 776 update_curr(cfs_rq);
a992241d 777 account_entity_enqueue(cfs_rq, se);
bf0f6f24 778
88ec22d3 779 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 780 place_entity(cfs_rq, se, 0);
2396af69 781 enqueue_sleeper(cfs_rq, se);
e9acbff6 782 }
bf0f6f24 783
d2417e5a 784 update_stats_enqueue(cfs_rq, se);
ddc97297 785 check_spread(cfs_rq, se);
83b699ed
SV
786 if (se != cfs_rq->curr)
787 __enqueue_entity(cfs_rq, se);
bf0f6f24
IM
788}
789
a571bbea 790static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2002c695 791{
de69a80b 792 if (!se || cfs_rq->last == se)
2002c695
PZ
793 cfs_rq->last = NULL;
794
de69a80b 795 if (!se || cfs_rq->next == se)
2002c695
PZ
796 cfs_rq->next = NULL;
797}
798
a571bbea
PZ
799static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
800{
801 for_each_sched_entity(se)
802 __clear_buddies(cfs_rq_of(se), se);
803}
804
bf0f6f24 805static void
525c2716 806dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 807{
a2a2d680
DA
808 /*
809 * Update run-time statistics of the 'current'.
810 */
811 update_curr(cfs_rq);
812
19b6a2e3 813 update_stats_dequeue(cfs_rq, se);
db36cc7d 814 if (sleep) {
67e9fb2a 815#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
816 if (entity_is_task(se)) {
817 struct task_struct *tsk = task_of(se);
818
819 if (tsk->state & TASK_INTERRUPTIBLE)
41acab88 820 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 821 if (tsk->state & TASK_UNINTERRUPTIBLE)
41acab88 822 se->statistics.block_start = rq_of(cfs_rq)->clock;
bf0f6f24 823 }
db36cc7d 824#endif
67e9fb2a
PZ
825 }
826
2002c695 827 clear_buddies(cfs_rq, se);
4793241b 828
83b699ed 829 if (se != cfs_rq->curr)
30cfdcfc
DA
830 __dequeue_entity(cfs_rq, se);
831 account_entity_dequeue(cfs_rq, se);
1af5f730 832 update_min_vruntime(cfs_rq);
88ec22d3
PZ
833
834 /*
835 * Normalize the entity after updating the min_vruntime because the
836 * update can refer to the ->curr item and we need to reflect this
837 * movement in our normalized position.
838 */
839 if (!sleep)
840 se->vruntime -= cfs_rq->min_vruntime;
bf0f6f24
IM
841}
842
843/*
844 * Preempt the current task with a newly woken task if needed:
845 */
7c92e54f 846static void
2e09bf55 847check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 848{
11697830
PZ
849 unsigned long ideal_runtime, delta_exec;
850
6d0f0ebd 851 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 852 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 853 if (delta_exec > ideal_runtime) {
bf0f6f24 854 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
855 /*
856 * The current task ran long enough, ensure it doesn't get
857 * re-elected due to buddy favours.
858 */
859 clear_buddies(cfs_rq, curr);
f685ceac
MG
860 return;
861 }
862
863 /*
864 * Ensure that a task that missed wakeup preemption by a
865 * narrow margin doesn't have to wait for a full slice.
866 * This also mitigates buddy induced latencies under load.
867 */
868 if (!sched_feat(WAKEUP_PREEMPT))
869 return;
870
871 if (delta_exec < sysctl_sched_min_granularity)
872 return;
873
874 if (cfs_rq->nr_running > 1) {
875 struct sched_entity *se = __pick_next_entity(cfs_rq);
876 s64 delta = curr->vruntime - se->vruntime;
877
878 if (delta > ideal_runtime)
879 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5 880 }
bf0f6f24
IM
881}
882
83b699ed 883static void
8494f412 884set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 885{
83b699ed
SV
886 /* 'current' is not kept within the tree. */
887 if (se->on_rq) {
888 /*
889 * Any task has to be enqueued before it get to execute on
890 * a CPU. So account for the time it spent waiting on the
891 * runqueue.
892 */
893 update_stats_wait_end(cfs_rq, se);
894 __dequeue_entity(cfs_rq, se);
895 }
896
79303e9e 897 update_stats_curr_start(cfs_rq, se);
429d43bc 898 cfs_rq->curr = se;
eba1ed4b
IM
899#ifdef CONFIG_SCHEDSTATS
900 /*
901 * Track our maximum slice length, if the CPU's load is at
902 * least twice that of our own weight (i.e. dont track it
903 * when there are only lesser-weight tasks around):
904 */
495eca49 905 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 906 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
907 se->sum_exec_runtime - se->prev_sum_exec_runtime);
908 }
909#endif
4a55b450 910 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
911}
912
3f3a4904
PZ
913static int
914wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
915
f4b6755f 916static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 917{
f4b6755f 918 struct sched_entity *se = __pick_next_entity(cfs_rq);
f685ceac 919 struct sched_entity *left = se;
f4b6755f 920
f685ceac
MG
921 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
922 se = cfs_rq->next;
aa2ac252 923
f685ceac
MG
924 /*
925 * Prefer last buddy, try to return the CPU to a preempted task.
926 */
927 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
928 se = cfs_rq->last;
929
930 clear_buddies(cfs_rq, se);
4793241b
PZ
931
932 return se;
aa2ac252
PZ
933}
934
ab6cde26 935static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
936{
937 /*
938 * If still on the runqueue then deactivate_task()
939 * was not called and update_curr() has to be done:
940 */
941 if (prev->on_rq)
b7cc0896 942 update_curr(cfs_rq);
bf0f6f24 943
ddc97297 944 check_spread(cfs_rq, prev);
30cfdcfc 945 if (prev->on_rq) {
5870db5b 946 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
947 /* Put 'current' back into the tree. */
948 __enqueue_entity(cfs_rq, prev);
949 }
429d43bc 950 cfs_rq->curr = NULL;
bf0f6f24
IM
951}
952
8f4d37ec
PZ
953static void
954entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 955{
bf0f6f24 956 /*
30cfdcfc 957 * Update run-time statistics of the 'current'.
bf0f6f24 958 */
30cfdcfc 959 update_curr(cfs_rq);
bf0f6f24 960
8f4d37ec
PZ
961#ifdef CONFIG_SCHED_HRTICK
962 /*
963 * queued ticks are scheduled to match the slice, so don't bother
964 * validating it and just reschedule.
965 */
983ed7a6
HH
966 if (queued) {
967 resched_task(rq_of(cfs_rq)->curr);
968 return;
969 }
8f4d37ec
PZ
970 /*
971 * don't let the period tick interfere with the hrtick preemption
972 */
973 if (!sched_feat(DOUBLE_TICK) &&
974 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
975 return;
976#endif
977
ce6c1311 978 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 979 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
980}
981
982/**************************************************
983 * CFS operations on tasks:
984 */
985
8f4d37ec
PZ
986#ifdef CONFIG_SCHED_HRTICK
987static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
988{
8f4d37ec
PZ
989 struct sched_entity *se = &p->se;
990 struct cfs_rq *cfs_rq = cfs_rq_of(se);
991
992 WARN_ON(task_rq(p) != rq);
993
994 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
995 u64 slice = sched_slice(cfs_rq, se);
996 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
997 s64 delta = slice - ran;
998
999 if (delta < 0) {
1000 if (rq->curr == p)
1001 resched_task(p);
1002 return;
1003 }
1004
1005 /*
1006 * Don't schedule slices shorter than 10000ns, that just
1007 * doesn't make sense. Rely on vruntime for fairness.
1008 */
31656519 1009 if (rq->curr != p)
157124c1 1010 delta = max_t(s64, 10000LL, delta);
8f4d37ec 1011
31656519 1012 hrtick_start(rq, delta);
8f4d37ec
PZ
1013 }
1014}
a4c2f00f
PZ
1015
1016/*
1017 * called from enqueue/dequeue and updates the hrtick when the
1018 * current task is from our class and nr_running is low enough
1019 * to matter.
1020 */
1021static void hrtick_update(struct rq *rq)
1022{
1023 struct task_struct *curr = rq->curr;
1024
1025 if (curr->sched_class != &fair_sched_class)
1026 return;
1027
1028 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1029 hrtick_start_fair(rq, curr);
1030}
55e12e5e 1031#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1032static inline void
1033hrtick_start_fair(struct rq *rq, struct task_struct *p)
1034{
1035}
a4c2f00f
PZ
1036
1037static inline void hrtick_update(struct rq *rq)
1038{
1039}
8f4d37ec
PZ
1040#endif
1041
bf0f6f24
IM
1042/*
1043 * The enqueue_task method is called before nr_running is
1044 * increased. Here we update the fair scheduling stats and
1045 * then put the task into the rbtree:
1046 */
ea87bb78
TG
1047static void
1048enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
bf0f6f24
IM
1049{
1050 struct cfs_rq *cfs_rq;
62fb1851 1051 struct sched_entity *se = &p->se;
88ec22d3
PZ
1052 int flags = 0;
1053
1054 if (wakeup)
1055 flags |= ENQUEUE_WAKEUP;
1056 if (p->state == TASK_WAKING)
1057 flags |= ENQUEUE_MIGRATE;
bf0f6f24
IM
1058
1059 for_each_sched_entity(se) {
62fb1851 1060 if (se->on_rq)
bf0f6f24
IM
1061 break;
1062 cfs_rq = cfs_rq_of(se);
88ec22d3
PZ
1063 enqueue_entity(cfs_rq, se, flags);
1064 flags = ENQUEUE_WAKEUP;
bf0f6f24 1065 }
8f4d37ec 1066
a4c2f00f 1067 hrtick_update(rq);
bf0f6f24
IM
1068}
1069
1070/*
1071 * The dequeue_task method is called before nr_running is
1072 * decreased. We remove the task from the rbtree and
1073 * update the fair scheduling stats:
1074 */
f02231e5 1075static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
1076{
1077 struct cfs_rq *cfs_rq;
62fb1851 1078 struct sched_entity *se = &p->se;
bf0f6f24
IM
1079
1080 for_each_sched_entity(se) {
1081 cfs_rq = cfs_rq_of(se);
525c2716 1082 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24 1083 /* Don't dequeue parent if it has other entities besides us */
62fb1851 1084 if (cfs_rq->load.weight)
bf0f6f24 1085 break;
b9fa3df3 1086 sleep = 1;
bf0f6f24 1087 }
8f4d37ec 1088
a4c2f00f 1089 hrtick_update(rq);
bf0f6f24
IM
1090}
1091
1092/*
1799e35d
IM
1093 * sched_yield() support is very simple - we dequeue and enqueue.
1094 *
1095 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 1096 */
4530d7ab 1097static void yield_task_fair(struct rq *rq)
bf0f6f24 1098{
db292ca3
IM
1099 struct task_struct *curr = rq->curr;
1100 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1101 struct sched_entity *rightmost, *se = &curr->se;
bf0f6f24
IM
1102
1103 /*
1799e35d
IM
1104 * Are we the only task in the tree?
1105 */
1106 if (unlikely(cfs_rq->nr_running == 1))
1107 return;
1108
2002c695
PZ
1109 clear_buddies(cfs_rq, se);
1110
db292ca3 1111 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
3e51f33f 1112 update_rq_clock(rq);
1799e35d 1113 /*
a2a2d680 1114 * Update run-time statistics of the 'current'.
1799e35d 1115 */
2b1e315d 1116 update_curr(cfs_rq);
1799e35d
IM
1117
1118 return;
1119 }
1120 /*
1121 * Find the rightmost entry in the rbtree:
bf0f6f24 1122 */
2b1e315d 1123 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
1124 /*
1125 * Already in the rightmost position?
1126 */
54fdc581 1127 if (unlikely(!rightmost || entity_before(rightmost, se)))
1799e35d
IM
1128 return;
1129
1130 /*
1131 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
1132 * Upon rescheduling, sched_class::put_prev_task() will place
1133 * 'current' within the tree based on its new key value.
1799e35d 1134 */
30cfdcfc 1135 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
1136}
1137
e7693a36 1138#ifdef CONFIG_SMP
098fb9db 1139
88ec22d3
PZ
1140static void task_waking_fair(struct rq *rq, struct task_struct *p)
1141{
1142 struct sched_entity *se = &p->se;
1143 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1144
1145 se->vruntime -= cfs_rq->min_vruntime;
1146}
1147
bb3469ac 1148#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
1149/*
1150 * effective_load() calculates the load change as seen from the root_task_group
1151 *
1152 * Adding load to a group doesn't make a group heavier, but can cause movement
1153 * of group shares between cpus. Assuming the shares were perfectly aligned one
1154 * can calculate the shift in shares.
1155 *
1156 * The problem is that perfectly aligning the shares is rather expensive, hence
1157 * we try to avoid doing that too often - see update_shares(), which ratelimits
1158 * this change.
1159 *
1160 * We compensate this by not only taking the current delta into account, but
1161 * also considering the delta between when the shares were last adjusted and
1162 * now.
1163 *
1164 * We still saw a performance dip, some tracing learned us that between
1165 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1166 * significantly. Therefore try to bias the error in direction of failing
1167 * the affine wakeup.
1168 *
1169 */
f1d239f7
PZ
1170static long effective_load(struct task_group *tg, int cpu,
1171 long wl, long wg)
bb3469ac 1172{
4be9daaa 1173 struct sched_entity *se = tg->se[cpu];
f1d239f7
PZ
1174
1175 if (!tg->parent)
1176 return wl;
1177
f5bfb7d9
PZ
1178 /*
1179 * By not taking the decrease of shares on the other cpu into
1180 * account our error leans towards reducing the affine wakeups.
1181 */
1182 if (!wl && sched_feat(ASYM_EFF_LOAD))
1183 return wl;
1184
4be9daaa 1185 for_each_sched_entity(se) {
cb5ef42a 1186 long S, rw, s, a, b;
940959e9
PZ
1187 long more_w;
1188
1189 /*
1190 * Instead of using this increment, also add the difference
1191 * between when the shares were last updated and now.
1192 */
1193 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1194 wl += more_w;
1195 wg += more_w;
4be9daaa
PZ
1196
1197 S = se->my_q->tg->shares;
1198 s = se->my_q->shares;
f1d239f7 1199 rw = se->my_q->rq_weight;
bb3469ac 1200
cb5ef42a
PZ
1201 a = S*(rw + wl);
1202 b = S*rw + s*wg;
4be9daaa 1203
940959e9
PZ
1204 wl = s*(a-b);
1205
1206 if (likely(b))
1207 wl /= b;
1208
83378269
PZ
1209 /*
1210 * Assume the group is already running and will
1211 * thus already be accounted for in the weight.
1212 *
1213 * That is, moving shares between CPUs, does not
1214 * alter the group weight.
1215 */
4be9daaa 1216 wg = 0;
4be9daaa 1217 }
bb3469ac 1218
4be9daaa 1219 return wl;
bb3469ac 1220}
4be9daaa 1221
bb3469ac 1222#else
4be9daaa 1223
83378269
PZ
1224static inline unsigned long effective_load(struct task_group *tg, int cpu,
1225 unsigned long wl, unsigned long wg)
4be9daaa 1226{
83378269 1227 return wl;
bb3469ac 1228}
4be9daaa 1229
bb3469ac
PZ
1230#endif
1231
c88d5910 1232static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 1233{
c88d5910
PZ
1234 unsigned long this_load, load;
1235 int idx, this_cpu, prev_cpu;
098fb9db 1236 unsigned long tl_per_task;
c88d5910
PZ
1237 unsigned int imbalance;
1238 struct task_group *tg;
83378269 1239 unsigned long weight;
b3137bc8 1240 int balanced;
098fb9db 1241
c88d5910
PZ
1242 idx = sd->wake_idx;
1243 this_cpu = smp_processor_id();
1244 prev_cpu = task_cpu(p);
1245 load = source_load(prev_cpu, idx);
1246 this_load = target_load(this_cpu, idx);
098fb9db 1247
b3137bc8
MG
1248 /*
1249 * If sync wakeup then subtract the (maximum possible)
1250 * effect of the currently running task from the load
1251 * of the current CPU:
1252 */
83378269
PZ
1253 if (sync) {
1254 tg = task_group(current);
1255 weight = current->se.load.weight;
1256
c88d5910 1257 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
1258 load += effective_load(tg, prev_cpu, 0, -weight);
1259 }
b3137bc8 1260
83378269
PZ
1261 tg = task_group(p);
1262 weight = p->se.load.weight;
b3137bc8 1263
c88d5910
PZ
1264 imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1265
71a29aa7
PZ
1266 /*
1267 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
1268 * due to the sync cause above having dropped this_load to 0, we'll
1269 * always have an imbalance, but there's really nothing you can do
1270 * about that, so that's good too.
71a29aa7
PZ
1271 *
1272 * Otherwise check if either cpus are near enough in load to allow this
1273 * task to be woken on this_cpu.
1274 */
c88d5910
PZ
1275 balanced = !this_load ||
1276 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
83378269 1277 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
b3137bc8 1278
098fb9db 1279 /*
4ae7d5ce
IM
1280 * If the currently running task will sleep within
1281 * a reasonable amount of time then attract this newly
1282 * woken task:
098fb9db 1283 */
2fb7635c
PZ
1284 if (sync && balanced)
1285 return 1;
098fb9db 1286
41acab88 1287 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
1288 tl_per_task = cpu_avg_load_per_task(this_cpu);
1289
c88d5910
PZ
1290 if (balanced ||
1291 (this_load <= load &&
1292 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
1293 /*
1294 * This domain has SD_WAKE_AFFINE and
1295 * p is cache cold in this domain, and
1296 * there is no bad imbalance.
1297 */
c88d5910 1298 schedstat_inc(sd, ttwu_move_affine);
41acab88 1299 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
1300
1301 return 1;
1302 }
1303 return 0;
1304}
1305
aaee1203
PZ
1306/*
1307 * find_idlest_group finds and returns the least busy CPU group within the
1308 * domain.
1309 */
1310static struct sched_group *
78e7ed53 1311find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 1312 int this_cpu, int load_idx)
e7693a36 1313{
aaee1203
PZ
1314 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1315 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 1316 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 1317
aaee1203
PZ
1318 do {
1319 unsigned long load, avg_load;
1320 int local_group;
1321 int i;
e7693a36 1322
aaee1203
PZ
1323 /* Skip over this group if it has no CPUs allowed */
1324 if (!cpumask_intersects(sched_group_cpus(group),
1325 &p->cpus_allowed))
1326 continue;
1327
1328 local_group = cpumask_test_cpu(this_cpu,
1329 sched_group_cpus(group));
1330
1331 /* Tally up the load of all CPUs in the group */
1332 avg_load = 0;
1333
1334 for_each_cpu(i, sched_group_cpus(group)) {
1335 /* Bias balancing toward cpus of our domain */
1336 if (local_group)
1337 load = source_load(i, load_idx);
1338 else
1339 load = target_load(i, load_idx);
1340
1341 avg_load += load;
1342 }
1343
1344 /* Adjust by relative CPU power of the group */
1345 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1346
1347 if (local_group) {
1348 this_load = avg_load;
1349 this = group;
1350 } else if (avg_load < min_load) {
1351 min_load = avg_load;
1352 idlest = group;
1353 }
1354 } while (group = group->next, group != sd->groups);
1355
1356 if (!idlest || 100*this_load < imbalance*min_load)
1357 return NULL;
1358 return idlest;
1359}
1360
1361/*
1362 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1363 */
1364static int
1365find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1366{
1367 unsigned long load, min_load = ULONG_MAX;
1368 int idlest = -1;
1369 int i;
1370
1371 /* Traverse only the allowed CPUs */
1372 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1373 load = weighted_cpuload(i);
1374
1375 if (load < min_load || (load == min_load && i == this_cpu)) {
1376 min_load = load;
1377 idlest = i;
e7693a36
GH
1378 }
1379 }
1380
aaee1203
PZ
1381 return idlest;
1382}
e7693a36 1383
a50bde51
PZ
1384/*
1385 * Try and locate an idle CPU in the sched_domain.
1386 */
1387static int
1388select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1389{
1390 int cpu = smp_processor_id();
1391 int prev_cpu = task_cpu(p);
1392 int i;
1393
1394 /*
1395 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1396 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1397 * always a better target than the current cpu.
1398 */
fe3bcfe1
PZ
1399 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1400 return prev_cpu;
a50bde51
PZ
1401
1402 /*
1403 * Otherwise, iterate the domain and find an elegible idle cpu.
1404 */
fe3bcfe1
PZ
1405 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1406 if (!cpu_rq(i)->cfs.nr_running) {
1407 target = i;
1408 break;
a50bde51
PZ
1409 }
1410 }
1411
1412 return target;
1413}
1414
aaee1203
PZ
1415/*
1416 * sched_balance_self: balance the current task (running on cpu) in domains
1417 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1418 * SD_BALANCE_EXEC.
1419 *
1420 * Balance, ie. select the least loaded group.
1421 *
1422 * Returns the target CPU number, or the same CPU if no balancing is needed.
1423 *
1424 * preempt must be disabled.
1425 */
5158f4e4 1426static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 1427{
29cd8bae 1428 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
1429 int cpu = smp_processor_id();
1430 int prev_cpu = task_cpu(p);
1431 int new_cpu = cpu;
8b911acd 1432 int want_affine = 0, cpu_idle = !current->pid;
29cd8bae 1433 int want_sd = 1;
5158f4e4 1434 int sync = wake_flags & WF_SYNC;
c88d5910 1435
0763a660 1436 if (sd_flag & SD_BALANCE_WAKE) {
3f04e8cd
MG
1437 if (sched_feat(AFFINE_WAKEUPS) &&
1438 cpumask_test_cpu(cpu, &p->cpus_allowed))
c88d5910
PZ
1439 want_affine = 1;
1440 new_cpu = prev_cpu;
1441 }
aaee1203
PZ
1442
1443 for_each_domain(cpu, tmp) {
e4f42888
PZ
1444 if (!(tmp->flags & SD_LOAD_BALANCE))
1445 continue;
1446
aaee1203 1447 /*
ae154be1
PZ
1448 * If power savings logic is enabled for a domain, see if we
1449 * are not overloaded, if so, don't balance wider.
aaee1203 1450 */
59abf026 1451 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
ae154be1
PZ
1452 unsigned long power = 0;
1453 unsigned long nr_running = 0;
1454 unsigned long capacity;
1455 int i;
1456
1457 for_each_cpu(i, sched_domain_span(tmp)) {
1458 power += power_of(i);
1459 nr_running += cpu_rq(i)->cfs.nr_running;
1460 }
1461
1462 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1463
59abf026
PZ
1464 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1465 nr_running /= 2;
1466
1467 if (nr_running < capacity)
29cd8bae 1468 want_sd = 0;
ae154be1 1469 }
aaee1203 1470
fe3bcfe1
PZ
1471 /*
1472 * While iterating the domains looking for a spanning
1473 * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1474 * in cache sharing domains along the way.
1475 */
1476 if (want_affine) {
a50bde51 1477 int target = -1;
c88d5910 1478
a50bde51
PZ
1479 /*
1480 * If both cpu and prev_cpu are part of this domain,
1481 * cpu is a valid SD_WAKE_AFFINE target.
1482 */
a1f84a3a 1483 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
a50bde51 1484 target = cpu;
a1f84a3a
MG
1485
1486 /*
a50bde51
PZ
1487 * If there's an idle sibling in this domain, make that
1488 * the wake_affine target instead of the current cpu.
a1f84a3a 1489 */
8b911acd 1490 if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
a50bde51 1491 target = select_idle_sibling(p, tmp, target);
a1f84a3a 1492
a50bde51 1493 if (target >= 0) {
fe3bcfe1
PZ
1494 if (tmp->flags & SD_WAKE_AFFINE) {
1495 affine_sd = tmp;
1496 want_affine = 0;
8b911acd
MG
1497 if (target != cpu)
1498 cpu_idle = 1;
fe3bcfe1 1499 }
a50bde51 1500 cpu = target;
a1f84a3a 1501 }
c88d5910
PZ
1502 }
1503
29cd8bae
PZ
1504 if (!want_sd && !want_affine)
1505 break;
1506
0763a660 1507 if (!(tmp->flags & sd_flag))
c88d5910
PZ
1508 continue;
1509
29cd8bae
PZ
1510 if (want_sd)
1511 sd = tmp;
1512 }
1513
8b911acd 1514#ifdef CONFIG_FAIR_GROUP_SCHED
29cd8bae
PZ
1515 if (sched_feat(LB_SHARES_UPDATE)) {
1516 /*
1517 * Pick the largest domain to update shares over
1518 */
1519 tmp = sd;
1520 if (affine_sd && (!tmp ||
1521 cpumask_weight(sched_domain_span(affine_sd)) >
1522 cpumask_weight(sched_domain_span(sd))))
1523 tmp = affine_sd;
1524
1525 if (tmp)
1526 update_shares(tmp);
c88d5910 1527 }
8b911acd 1528#endif
aaee1203 1529
8b911acd
MG
1530 if (affine_sd) {
1531 if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1532 return cpu;
1533 }
e7693a36 1534
aaee1203 1535 while (sd) {
5158f4e4 1536 int load_idx = sd->forkexec_idx;
aaee1203 1537 struct sched_group *group;
c88d5910 1538 int weight;
098fb9db 1539
0763a660 1540 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
1541 sd = sd->child;
1542 continue;
1543 }
098fb9db 1544
5158f4e4
PZ
1545 if (sd_flag & SD_BALANCE_WAKE)
1546 load_idx = sd->wake_idx;
098fb9db 1547
5158f4e4 1548 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
1549 if (!group) {
1550 sd = sd->child;
1551 continue;
1552 }
4ae7d5ce 1553
d7c33c49 1554 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
1555 if (new_cpu == -1 || new_cpu == cpu) {
1556 /* Now try balancing at a lower domain level of cpu */
1557 sd = sd->child;
1558 continue;
e7693a36 1559 }
aaee1203
PZ
1560
1561 /* Now try balancing at a lower domain level of new_cpu */
1562 cpu = new_cpu;
1563 weight = cpumask_weight(sched_domain_span(sd));
1564 sd = NULL;
1565 for_each_domain(cpu, tmp) {
1566 if (weight <= cpumask_weight(sched_domain_span(tmp)))
1567 break;
0763a660 1568 if (tmp->flags & sd_flag)
aaee1203
PZ
1569 sd = tmp;
1570 }
1571 /* while loop will break here if sd == NULL */
e7693a36
GH
1572 }
1573
c88d5910 1574 return new_cpu;
e7693a36
GH
1575}
1576#endif /* CONFIG_SMP */
1577
e52fb7c0
PZ
1578static unsigned long
1579wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
1580{
1581 unsigned long gran = sysctl_sched_wakeup_granularity;
1582
1583 /*
e52fb7c0
PZ
1584 * Since its curr running now, convert the gran from real-time
1585 * to virtual-time in his units.
0bbd3336 1586 */
e52fb7c0
PZ
1587 if (sched_feat(ASYM_GRAN)) {
1588 /*
1589 * By using 'se' instead of 'curr' we penalize light tasks, so
1590 * they get preempted easier. That is, if 'se' < 'curr' then
1591 * the resulting gran will be larger, therefore penalizing the
1592 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1593 * be smaller, again penalizing the lighter task.
1594 *
1595 * This is especially important for buddies when the leftmost
1596 * task is higher priority than the buddy.
1597 */
1598 if (unlikely(se->load.weight != NICE_0_LOAD))
1599 gran = calc_delta_fair(gran, se);
1600 } else {
1601 if (unlikely(curr->load.weight != NICE_0_LOAD))
1602 gran = calc_delta_fair(gran, curr);
1603 }
0bbd3336
PZ
1604
1605 return gran;
1606}
1607
464b7527
PZ
1608/*
1609 * Should 'se' preempt 'curr'.
1610 *
1611 * |s1
1612 * |s2
1613 * |s3
1614 * g
1615 * |<--->|c
1616 *
1617 * w(c, s1) = -1
1618 * w(c, s2) = 0
1619 * w(c, s3) = 1
1620 *
1621 */
1622static int
1623wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1624{
1625 s64 gran, vdiff = curr->vruntime - se->vruntime;
1626
1627 if (vdiff <= 0)
1628 return -1;
1629
e52fb7c0 1630 gran = wakeup_gran(curr, se);
464b7527
PZ
1631 if (vdiff > gran)
1632 return 1;
1633
1634 return 0;
1635}
1636
02479099
PZ
1637static void set_last_buddy(struct sched_entity *se)
1638{
6bc912b7
PZ
1639 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1640 for_each_sched_entity(se)
1641 cfs_rq_of(se)->last = se;
1642 }
02479099
PZ
1643}
1644
1645static void set_next_buddy(struct sched_entity *se)
1646{
6bc912b7
PZ
1647 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1648 for_each_sched_entity(se)
1649 cfs_rq_of(se)->next = se;
1650 }
02479099
PZ
1651}
1652
bf0f6f24
IM
1653/*
1654 * Preempt the current task with a newly woken task if needed:
1655 */
5a9b86f6 1656static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
1657{
1658 struct task_struct *curr = rq->curr;
8651a86c 1659 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 1660 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 1661 int scale = cfs_rq->nr_running >= sched_nr_latency;
bf0f6f24 1662
3a7e73a2
PZ
1663 if (unlikely(rt_prio(p->prio)))
1664 goto preempt;
aa2ac252 1665
d95f98d0
PZ
1666 if (unlikely(p->sched_class != &fair_sched_class))
1667 return;
1668
4ae7d5ce
IM
1669 if (unlikely(se == pse))
1670 return;
1671
f685ceac 1672 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
3cb63d52 1673 set_next_buddy(pse);
57fdc26d 1674
aec0a514
BR
1675 /*
1676 * We can come here with TIF_NEED_RESCHED already set from new task
1677 * wake up path.
1678 */
1679 if (test_tsk_need_resched(curr))
1680 return;
1681
91c234b4 1682 /*
6bc912b7 1683 * Batch and idle tasks do not preempt (their preemption is driven by
91c234b4
IM
1684 * the tick):
1685 */
6bc912b7 1686 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 1687 return;
bf0f6f24 1688
6bc912b7 1689 /* Idle tasks are by definition preempted by everybody. */
3a7e73a2
PZ
1690 if (unlikely(curr->policy == SCHED_IDLE))
1691 goto preempt;
bf0f6f24 1692
ad4b78bb
PZ
1693 if (!sched_feat(WAKEUP_PREEMPT))
1694 return;
1695
3a7e73a2 1696 update_curr(cfs_rq);
464b7527 1697 find_matching_se(&se, &pse);
002f128b 1698 BUG_ON(!pse);
3a7e73a2
PZ
1699 if (wakeup_preempt_entity(se, pse) == 1)
1700 goto preempt;
464b7527 1701
3a7e73a2 1702 return;
a65ac745 1703
3a7e73a2
PZ
1704preempt:
1705 resched_task(curr);
1706 /*
1707 * Only set the backward buddy when the current task is still
1708 * on the rq. This can happen when a wakeup gets interleaved
1709 * with schedule on the ->pre_schedule() or idle_balance()
1710 * point, either of which can * drop the rq lock.
1711 *
1712 * Also, during early boot the idle thread is in the fair class,
1713 * for obvious reasons its a bad idea to schedule back to it.
1714 */
1715 if (unlikely(!se->on_rq || curr == rq->idle))
1716 return;
1717
1718 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1719 set_last_buddy(se);
bf0f6f24
IM
1720}
1721
fb8d4724 1722static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1723{
8f4d37ec 1724 struct task_struct *p;
bf0f6f24
IM
1725 struct cfs_rq *cfs_rq = &rq->cfs;
1726 struct sched_entity *se;
1727
36ace27e 1728 if (!cfs_rq->nr_running)
bf0f6f24
IM
1729 return NULL;
1730
1731 do {
9948f4b2 1732 se = pick_next_entity(cfs_rq);
f4b6755f 1733 set_next_entity(cfs_rq, se);
bf0f6f24
IM
1734 cfs_rq = group_cfs_rq(se);
1735 } while (cfs_rq);
1736
8f4d37ec
PZ
1737 p = task_of(se);
1738 hrtick_start_fair(rq, p);
1739
1740 return p;
bf0f6f24
IM
1741}
1742
1743/*
1744 * Account for a descheduled task:
1745 */
31ee529c 1746static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1747{
1748 struct sched_entity *se = &prev->se;
1749 struct cfs_rq *cfs_rq;
1750
1751 for_each_sched_entity(se) {
1752 cfs_rq = cfs_rq_of(se);
ab6cde26 1753 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1754 }
1755}
1756
681f3e68 1757#ifdef CONFIG_SMP
bf0f6f24
IM
1758/**************************************************
1759 * Fair scheduling class load-balancing methods:
1760 */
1761
1e3c88bd
PZ
1762/*
1763 * pull_task - move a task from a remote runqueue to the local runqueue.
1764 * Both runqueues must be locked.
1765 */
1766static void pull_task(struct rq *src_rq, struct task_struct *p,
1767 struct rq *this_rq, int this_cpu)
1768{
1769 deactivate_task(src_rq, p, 0);
1770 set_task_cpu(p, this_cpu);
1771 activate_task(this_rq, p, 0);
1772 check_preempt_curr(this_rq, p, 0);
1773}
1774
1775/*
1776 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1777 */
1778static
1779int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1780 struct sched_domain *sd, enum cpu_idle_type idle,
1781 int *all_pinned)
1782{
1783 int tsk_cache_hot = 0;
1784 /*
1785 * We do not migrate tasks that are:
1786 * 1) running (obviously), or
1787 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1788 * 3) are cache-hot on their current CPU.
1789 */
1790 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
41acab88 1791 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1e3c88bd
PZ
1792 return 0;
1793 }
1794 *all_pinned = 0;
1795
1796 if (task_running(rq, p)) {
41acab88 1797 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
1798 return 0;
1799 }
1800
1801 /*
1802 * Aggressive migration if:
1803 * 1) task is cache cold, or
1804 * 2) too many balance attempts have failed.
1805 */
1806
1807 tsk_cache_hot = task_hot(p, rq->clock, sd);
1808 if (!tsk_cache_hot ||
1809 sd->nr_balance_failed > sd->cache_nice_tries) {
1810#ifdef CONFIG_SCHEDSTATS
1811 if (tsk_cache_hot) {
1812 schedstat_inc(sd, lb_hot_gained[idle]);
41acab88 1813 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd
PZ
1814 }
1815#endif
1816 return 1;
1817 }
1818
1819 if (tsk_cache_hot) {
41acab88 1820 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1e3c88bd
PZ
1821 return 0;
1822 }
1823 return 1;
1824}
1825
897c395f
PZ
1826/*
1827 * move_one_task tries to move exactly one task from busiest to this_rq, as
1828 * part of active balancing operations within "domain".
1829 * Returns 1 if successful and 0 otherwise.
1830 *
1831 * Called with both runqueues locked.
1832 */
1833static int
1834move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1835 struct sched_domain *sd, enum cpu_idle_type idle)
1836{
1837 struct task_struct *p, *n;
1838 struct cfs_rq *cfs_rq;
1839 int pinned = 0;
1840
1841 for_each_leaf_cfs_rq(busiest, cfs_rq) {
1842 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
1843
1844 if (!can_migrate_task(p, busiest, this_cpu,
1845 sd, idle, &pinned))
1846 continue;
1847
1848 pull_task(busiest, p, this_rq, this_cpu);
1849 /*
1850 * Right now, this is only the second place pull_task()
1851 * is called, so we can safely collect pull_task()
1852 * stats here rather than inside pull_task().
1853 */
1854 schedstat_inc(sd, lb_gained[idle]);
1855 return 1;
1856 }
1857 }
1858
1859 return 0;
1860}
1861
1e3c88bd
PZ
1862static unsigned long
1863balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1864 unsigned long max_load_move, struct sched_domain *sd,
1865 enum cpu_idle_type idle, int *all_pinned,
ee00e66f 1866 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
1e3c88bd
PZ
1867{
1868 int loops = 0, pulled = 0, pinned = 0;
1e3c88bd 1869 long rem_load_move = max_load_move;
ee00e66f 1870 struct task_struct *p, *n;
1e3c88bd
PZ
1871
1872 if (max_load_move == 0)
1873 goto out;
1874
1875 pinned = 1;
1876
ee00e66f
PZ
1877 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
1878 if (loops++ > sysctl_sched_nr_migrate)
1879 break;
1e3c88bd 1880
ee00e66f
PZ
1881 if ((p->se.load.weight >> 1) > rem_load_move ||
1882 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
1883 continue;
1e3c88bd 1884
ee00e66f
PZ
1885 pull_task(busiest, p, this_rq, this_cpu);
1886 pulled++;
1887 rem_load_move -= p->se.load.weight;
1e3c88bd
PZ
1888
1889#ifdef CONFIG_PREEMPT
ee00e66f
PZ
1890 /*
1891 * NEWIDLE balancing is a source of latency, so preemptible
1892 * kernels will stop after the first task is pulled to minimize
1893 * the critical section.
1894 */
1895 if (idle == CPU_NEWLY_IDLE)
1896 break;
1e3c88bd
PZ
1897#endif
1898
ee00e66f
PZ
1899 /*
1900 * We only want to steal up to the prescribed amount of
1901 * weighted load.
1902 */
1903 if (rem_load_move <= 0)
1904 break;
1905
1e3c88bd
PZ
1906 if (p->prio < *this_best_prio)
1907 *this_best_prio = p->prio;
1e3c88bd
PZ
1908 }
1909out:
1910 /*
1911 * Right now, this is one of only two places pull_task() is called,
1912 * so we can safely collect pull_task() stats here rather than
1913 * inside pull_task().
1914 */
1915 schedstat_add(sd, lb_gained[idle], pulled);
1916
1917 if (all_pinned)
1918 *all_pinned = pinned;
1919
1920 return max_load_move - rem_load_move;
1921}
1922
230059de
PZ
1923#ifdef CONFIG_FAIR_GROUP_SCHED
1924static unsigned long
1925load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1926 unsigned long max_load_move,
1927 struct sched_domain *sd, enum cpu_idle_type idle,
1928 int *all_pinned, int *this_best_prio)
1929{
1930 long rem_load_move = max_load_move;
1931 int busiest_cpu = cpu_of(busiest);
1932 struct task_group *tg;
1933
1934 rcu_read_lock();
1935 update_h_load(busiest_cpu);
1936
1937 list_for_each_entry_rcu(tg, &task_groups, list) {
1938 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1939 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1940 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1941 u64 rem_load, moved_load;
1942
1943 /*
1944 * empty group
1945 */
1946 if (!busiest_cfs_rq->task_weight)
1947 continue;
1948
1949 rem_load = (u64)rem_load_move * busiest_weight;
1950 rem_load = div_u64(rem_load, busiest_h_load + 1);
1951
1952 moved_load = balance_tasks(this_rq, this_cpu, busiest,
1953 rem_load, sd, idle, all_pinned, this_best_prio,
1954 busiest_cfs_rq);
1955
1956 if (!moved_load)
1957 continue;
1958
1959 moved_load *= busiest_h_load;
1960 moved_load = div_u64(moved_load, busiest_weight + 1);
1961
1962 rem_load_move -= moved_load;
1963 if (rem_load_move < 0)
1964 break;
1965 }
1966 rcu_read_unlock();
1967
1968 return max_load_move - rem_load_move;
1969}
1970#else
1971static unsigned long
1972load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1973 unsigned long max_load_move,
1974 struct sched_domain *sd, enum cpu_idle_type idle,
1975 int *all_pinned, int *this_best_prio)
1976{
1977 return balance_tasks(this_rq, this_cpu, busiest,
1978 max_load_move, sd, idle, all_pinned,
1979 this_best_prio, &busiest->cfs);
1980}
1981#endif
1982
1e3c88bd
PZ
1983/*
1984 * move_tasks tries to move up to max_load_move weighted load from busiest to
1985 * this_rq, as part of a balancing operation within domain "sd".
1986 * Returns 1 if successful and 0 otherwise.
1987 *
1988 * Called with both runqueues locked.
1989 */
1990static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1991 unsigned long max_load_move,
1992 struct sched_domain *sd, enum cpu_idle_type idle,
1993 int *all_pinned)
1994{
3d45fd80 1995 unsigned long total_load_moved = 0, load_moved;
1e3c88bd
PZ
1996 int this_best_prio = this_rq->curr->prio;
1997
1998 do {
3d45fd80 1999 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
1e3c88bd
PZ
2000 max_load_move - total_load_moved,
2001 sd, idle, all_pinned, &this_best_prio);
3d45fd80
PZ
2002
2003 total_load_moved += load_moved;
1e3c88bd
PZ
2004
2005#ifdef CONFIG_PREEMPT
2006 /*
2007 * NEWIDLE balancing is a source of latency, so preemptible
2008 * kernels will stop after the first task is pulled to minimize
2009 * the critical section.
2010 */
2011 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2012 break;
baa8c110
PZ
2013
2014 if (raw_spin_is_contended(&this_rq->lock) ||
2015 raw_spin_is_contended(&busiest->lock))
2016 break;
1e3c88bd 2017#endif
3d45fd80 2018 } while (load_moved && max_load_move > total_load_moved);
1e3c88bd
PZ
2019
2020 return total_load_moved > 0;
2021}
2022
1e3c88bd
PZ
2023/********** Helpers for find_busiest_group ************************/
2024/*
2025 * sd_lb_stats - Structure to store the statistics of a sched_domain
2026 * during load balancing.
2027 */
2028struct sd_lb_stats {
2029 struct sched_group *busiest; /* Busiest group in this sd */
2030 struct sched_group *this; /* Local group in this sd */
2031 unsigned long total_load; /* Total load of all groups in sd */
2032 unsigned long total_pwr; /* Total power of all groups in sd */
2033 unsigned long avg_load; /* Average load across all groups in sd */
2034
2035 /** Statistics of this group */
2036 unsigned long this_load;
2037 unsigned long this_load_per_task;
2038 unsigned long this_nr_running;
2039
2040 /* Statistics of the busiest group */
2041 unsigned long max_load;
2042 unsigned long busiest_load_per_task;
2043 unsigned long busiest_nr_running;
dd5feea1 2044 unsigned long busiest_group_capacity;
1e3c88bd
PZ
2045
2046 int group_imb; /* Is there imbalance in this sd */
2047#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2048 int power_savings_balance; /* Is powersave balance needed for this sd */
2049 struct sched_group *group_min; /* Least loaded group in sd */
2050 struct sched_group *group_leader; /* Group which relieves group_min */
2051 unsigned long min_load_per_task; /* load_per_task in group_min */
2052 unsigned long leader_nr_running; /* Nr running of group_leader */
2053 unsigned long min_nr_running; /* Nr running of group_min */
2054#endif
2055};
2056
2057/*
2058 * sg_lb_stats - stats of a sched_group required for load_balancing
2059 */
2060struct sg_lb_stats {
2061 unsigned long avg_load; /*Avg load across the CPUs of the group */
2062 unsigned long group_load; /* Total load over the CPUs of the group */
2063 unsigned long sum_nr_running; /* Nr tasks running in the group */
2064 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2065 unsigned long group_capacity;
2066 int group_imb; /* Is there an imbalance in the group ? */
2067};
2068
2069/**
2070 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2071 * @group: The group whose first cpu is to be returned.
2072 */
2073static inline unsigned int group_first_cpu(struct sched_group *group)
2074{
2075 return cpumask_first(sched_group_cpus(group));
2076}
2077
2078/**
2079 * get_sd_load_idx - Obtain the load index for a given sched domain.
2080 * @sd: The sched_domain whose load_idx is to be obtained.
2081 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2082 */
2083static inline int get_sd_load_idx(struct sched_domain *sd,
2084 enum cpu_idle_type idle)
2085{
2086 int load_idx;
2087
2088 switch (idle) {
2089 case CPU_NOT_IDLE:
2090 load_idx = sd->busy_idx;
2091 break;
2092
2093 case CPU_NEWLY_IDLE:
2094 load_idx = sd->newidle_idx;
2095 break;
2096 default:
2097 load_idx = sd->idle_idx;
2098 break;
2099 }
2100
2101 return load_idx;
2102}
2103
2104
2105#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2106/**
2107 * init_sd_power_savings_stats - Initialize power savings statistics for
2108 * the given sched_domain, during load balancing.
2109 *
2110 * @sd: Sched domain whose power-savings statistics are to be initialized.
2111 * @sds: Variable containing the statistics for sd.
2112 * @idle: Idle status of the CPU at which we're performing load-balancing.
2113 */
2114static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2115 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2116{
2117 /*
2118 * Busy processors will not participate in power savings
2119 * balance.
2120 */
2121 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2122 sds->power_savings_balance = 0;
2123 else {
2124 sds->power_savings_balance = 1;
2125 sds->min_nr_running = ULONG_MAX;
2126 sds->leader_nr_running = 0;
2127 }
2128}
2129
2130/**
2131 * update_sd_power_savings_stats - Update the power saving stats for a
2132 * sched_domain while performing load balancing.
2133 *
2134 * @group: sched_group belonging to the sched_domain under consideration.
2135 * @sds: Variable containing the statistics of the sched_domain
2136 * @local_group: Does group contain the CPU for which we're performing
2137 * load balancing ?
2138 * @sgs: Variable containing the statistics of the group.
2139 */
2140static inline void update_sd_power_savings_stats(struct sched_group *group,
2141 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2142{
2143
2144 if (!sds->power_savings_balance)
2145 return;
2146
2147 /*
2148 * If the local group is idle or completely loaded
2149 * no need to do power savings balance at this domain
2150 */
2151 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2152 !sds->this_nr_running))
2153 sds->power_savings_balance = 0;
2154
2155 /*
2156 * If a group is already running at full capacity or idle,
2157 * don't include that group in power savings calculations
2158 */
2159 if (!sds->power_savings_balance ||
2160 sgs->sum_nr_running >= sgs->group_capacity ||
2161 !sgs->sum_nr_running)
2162 return;
2163
2164 /*
2165 * Calculate the group which has the least non-idle load.
2166 * This is the group from where we need to pick up the load
2167 * for saving power
2168 */
2169 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2170 (sgs->sum_nr_running == sds->min_nr_running &&
2171 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2172 sds->group_min = group;
2173 sds->min_nr_running = sgs->sum_nr_running;
2174 sds->min_load_per_task = sgs->sum_weighted_load /
2175 sgs->sum_nr_running;
2176 }
2177
2178 /*
2179 * Calculate the group which is almost near its
2180 * capacity but still has some space to pick up some load
2181 * from other group and save more power
2182 */
2183 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2184 return;
2185
2186 if (sgs->sum_nr_running > sds->leader_nr_running ||
2187 (sgs->sum_nr_running == sds->leader_nr_running &&
2188 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2189 sds->group_leader = group;
2190 sds->leader_nr_running = sgs->sum_nr_running;
2191 }
2192}
2193
2194/**
2195 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2196 * @sds: Variable containing the statistics of the sched_domain
2197 * under consideration.
2198 * @this_cpu: Cpu at which we're currently performing load-balancing.
2199 * @imbalance: Variable to store the imbalance.
2200 *
2201 * Description:
2202 * Check if we have potential to perform some power-savings balance.
2203 * If yes, set the busiest group to be the least loaded group in the
2204 * sched_domain, so that it's CPUs can be put to idle.
2205 *
2206 * Returns 1 if there is potential to perform power-savings balance.
2207 * Else returns 0.
2208 */
2209static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2210 int this_cpu, unsigned long *imbalance)
2211{
2212 if (!sds->power_savings_balance)
2213 return 0;
2214
2215 if (sds->this != sds->group_leader ||
2216 sds->group_leader == sds->group_min)
2217 return 0;
2218
2219 *imbalance = sds->min_load_per_task;
2220 sds->busiest = sds->group_min;
2221
2222 return 1;
2223
2224}
2225#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2226static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2227 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2228{
2229 return;
2230}
2231
2232static inline void update_sd_power_savings_stats(struct sched_group *group,
2233 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2234{
2235 return;
2236}
2237
2238static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2239 int this_cpu, unsigned long *imbalance)
2240{
2241 return 0;
2242}
2243#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2244
2245
2246unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2247{
2248 return SCHED_LOAD_SCALE;
2249}
2250
2251unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2252{
2253 return default_scale_freq_power(sd, cpu);
2254}
2255
2256unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2257{
2258 unsigned long weight = cpumask_weight(sched_domain_span(sd));
2259 unsigned long smt_gain = sd->smt_gain;
2260
2261 smt_gain /= weight;
2262
2263 return smt_gain;
2264}
2265
2266unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2267{
2268 return default_scale_smt_power(sd, cpu);
2269}
2270
2271unsigned long scale_rt_power(int cpu)
2272{
2273 struct rq *rq = cpu_rq(cpu);
2274 u64 total, available;
2275
2276 sched_avg_update(rq);
2277
2278 total = sched_avg_period() + (rq->clock - rq->age_stamp);
2279 available = total - rq->rt_avg;
2280
2281 if (unlikely((s64)total < SCHED_LOAD_SCALE))
2282 total = SCHED_LOAD_SCALE;
2283
2284 total >>= SCHED_LOAD_SHIFT;
2285
2286 return div_u64(available, total);
2287}
2288
2289static void update_cpu_power(struct sched_domain *sd, int cpu)
2290{
2291 unsigned long weight = cpumask_weight(sched_domain_span(sd));
2292 unsigned long power = SCHED_LOAD_SCALE;
2293 struct sched_group *sdg = sd->groups;
2294
2295 if (sched_feat(ARCH_POWER))
2296 power *= arch_scale_freq_power(sd, cpu);
2297 else
2298 power *= default_scale_freq_power(sd, cpu);
2299
2300 power >>= SCHED_LOAD_SHIFT;
2301
2302 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2303 if (sched_feat(ARCH_POWER))
2304 power *= arch_scale_smt_power(sd, cpu);
2305 else
2306 power *= default_scale_smt_power(sd, cpu);
2307
2308 power >>= SCHED_LOAD_SHIFT;
2309 }
2310
2311 power *= scale_rt_power(cpu);
2312 power >>= SCHED_LOAD_SHIFT;
2313
2314 if (!power)
2315 power = 1;
2316
2317 sdg->cpu_power = power;
2318}
2319
2320static void update_group_power(struct sched_domain *sd, int cpu)
2321{
2322 struct sched_domain *child = sd->child;
2323 struct sched_group *group, *sdg = sd->groups;
2324 unsigned long power;
2325
2326 if (!child) {
2327 update_cpu_power(sd, cpu);
2328 return;
2329 }
2330
2331 power = 0;
2332
2333 group = child->groups;
2334 do {
2335 power += group->cpu_power;
2336 group = group->next;
2337 } while (group != child->groups);
2338
2339 sdg->cpu_power = power;
2340}
2341
2342/**
2343 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2344 * @sd: The sched_domain whose statistics are to be updated.
2345 * @group: sched_group whose statistics are to be updated.
2346 * @this_cpu: Cpu for which load balance is currently performed.
2347 * @idle: Idle status of this_cpu
2348 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2349 * @sd_idle: Idle status of the sched_domain containing group.
2350 * @local_group: Does group contain this_cpu.
2351 * @cpus: Set of cpus considered for load balancing.
2352 * @balance: Should we balance.
2353 * @sgs: variable to hold the statistics for this group.
2354 */
2355static inline void update_sg_lb_stats(struct sched_domain *sd,
2356 struct sched_group *group, int this_cpu,
2357 enum cpu_idle_type idle, int load_idx, int *sd_idle,
2358 int local_group, const struct cpumask *cpus,
2359 int *balance, struct sg_lb_stats *sgs)
2360{
2361 unsigned long load, max_cpu_load, min_cpu_load;
2362 int i;
2363 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 2364 unsigned long avg_load_per_task = 0;
1e3c88bd 2365
871e35bc 2366 if (local_group)
1e3c88bd 2367 balance_cpu = group_first_cpu(group);
1e3c88bd
PZ
2368
2369 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
2370 max_cpu_load = 0;
2371 min_cpu_load = ~0UL;
2372
2373 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2374 struct rq *rq = cpu_rq(i);
2375
2376 if (*sd_idle && rq->nr_running)
2377 *sd_idle = 0;
2378
2379 /* Bias balancing toward cpus of our domain */
2380 if (local_group) {
2381 if (idle_cpu(i) && !first_idle_cpu) {
2382 first_idle_cpu = 1;
2383 balance_cpu = i;
2384 }
2385
2386 load = target_load(i, load_idx);
2387 } else {
2388 load = source_load(i, load_idx);
2389 if (load > max_cpu_load)
2390 max_cpu_load = load;
2391 if (min_cpu_load > load)
2392 min_cpu_load = load;
2393 }
2394
2395 sgs->group_load += load;
2396 sgs->sum_nr_running += rq->nr_running;
2397 sgs->sum_weighted_load += weighted_cpuload(i);
2398
1e3c88bd
PZ
2399 }
2400
2401 /*
2402 * First idle cpu or the first cpu(busiest) in this sched group
2403 * is eligible for doing load balancing at this and above
2404 * domains. In the newly idle case, we will allow all the cpu's
2405 * to do the newly idle load balance.
2406 */
2407 if (idle != CPU_NEWLY_IDLE && local_group &&
8f190fb3 2408 balance_cpu != this_cpu) {
1e3c88bd
PZ
2409 *balance = 0;
2410 return;
2411 }
2412
871e35bc
GS
2413 update_group_power(sd, this_cpu);
2414
1e3c88bd
PZ
2415 /* Adjust by relative CPU power of the group */
2416 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
2417
1e3c88bd
PZ
2418 /*
2419 * Consider the group unbalanced when the imbalance is larger
2420 * than the average weight of two tasks.
2421 *
2422 * APZ: with cgroup the avg task weight can vary wildly and
2423 * might not be a suitable number - should we keep a
2424 * normalized nr_running number somewhere that negates
2425 * the hierarchy?
2426 */
dd5feea1
SS
2427 if (sgs->sum_nr_running)
2428 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd
PZ
2429
2430 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
2431 sgs->group_imb = 1;
2432
2433 sgs->group_capacity =
2434 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
2435}
2436
2437/**
2438 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2439 * @sd: sched_domain whose statistics are to be updated.
2440 * @this_cpu: Cpu for which load balance is currently performed.
2441 * @idle: Idle status of this_cpu
2442 * @sd_idle: Idle status of the sched_domain containing group.
2443 * @cpus: Set of cpus considered for load balancing.
2444 * @balance: Should we balance.
2445 * @sds: variable to hold the statistics for this sched_domain.
2446 */
2447static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2448 enum cpu_idle_type idle, int *sd_idle,
2449 const struct cpumask *cpus, int *balance,
2450 struct sd_lb_stats *sds)
2451{
2452 struct sched_domain *child = sd->child;
2453 struct sched_group *group = sd->groups;
2454 struct sg_lb_stats sgs;
2455 int load_idx, prefer_sibling = 0;
2456
2457 if (child && child->flags & SD_PREFER_SIBLING)
2458 prefer_sibling = 1;
2459
2460 init_sd_power_savings_stats(sd, sds, idle);
2461 load_idx = get_sd_load_idx(sd, idle);
2462
2463 do {
2464 int local_group;
2465
2466 local_group = cpumask_test_cpu(this_cpu,
2467 sched_group_cpus(group));
2468 memset(&sgs, 0, sizeof(sgs));
2469 update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
2470 local_group, cpus, balance, &sgs);
2471
8f190fb3 2472 if (local_group && !(*balance))
1e3c88bd
PZ
2473 return;
2474
2475 sds->total_load += sgs.group_load;
2476 sds->total_pwr += group->cpu_power;
2477
2478 /*
2479 * In case the child domain prefers tasks go to siblings
2480 * first, lower the group capacity to one so that we'll try
2481 * and move all the excess tasks away.
2482 */
2483 if (prefer_sibling)
2484 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2485
2486 if (local_group) {
2487 sds->this_load = sgs.avg_load;
2488 sds->this = group;
2489 sds->this_nr_running = sgs.sum_nr_running;
2490 sds->this_load_per_task = sgs.sum_weighted_load;
2491 } else if (sgs.avg_load > sds->max_load &&
2492 (sgs.sum_nr_running > sgs.group_capacity ||
2493 sgs.group_imb)) {
2494 sds->max_load = sgs.avg_load;
2495 sds->busiest = group;
2496 sds->busiest_nr_running = sgs.sum_nr_running;
dd5feea1 2497 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd
PZ
2498 sds->busiest_load_per_task = sgs.sum_weighted_load;
2499 sds->group_imb = sgs.group_imb;
2500 }
2501
2502 update_sd_power_savings_stats(group, sds, local_group, &sgs);
2503 group = group->next;
2504 } while (group != sd->groups);
2505}
2506
2507/**
2508 * fix_small_imbalance - Calculate the minor imbalance that exists
2509 * amongst the groups of a sched_domain, during
2510 * load balancing.
2511 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2512 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2513 * @imbalance: Variable to store the imbalance.
2514 */
2515static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2516 int this_cpu, unsigned long *imbalance)
2517{
2518 unsigned long tmp, pwr_now = 0, pwr_move = 0;
2519 unsigned int imbn = 2;
dd5feea1 2520 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
2521
2522 if (sds->this_nr_running) {
2523 sds->this_load_per_task /= sds->this_nr_running;
2524 if (sds->busiest_load_per_task >
2525 sds->this_load_per_task)
2526 imbn = 1;
2527 } else
2528 sds->this_load_per_task =
2529 cpu_avg_load_per_task(this_cpu);
2530
dd5feea1
SS
2531 scaled_busy_load_per_task = sds->busiest_load_per_task
2532 * SCHED_LOAD_SCALE;
2533 scaled_busy_load_per_task /= sds->busiest->cpu_power;
2534
2535 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2536 (scaled_busy_load_per_task * imbn)) {
1e3c88bd
PZ
2537 *imbalance = sds->busiest_load_per_task;
2538 return;
2539 }
2540
2541 /*
2542 * OK, we don't have enough imbalance to justify moving tasks,
2543 * however we may be able to increase total CPU power used by
2544 * moving them.
2545 */
2546
2547 pwr_now += sds->busiest->cpu_power *
2548 min(sds->busiest_load_per_task, sds->max_load);
2549 pwr_now += sds->this->cpu_power *
2550 min(sds->this_load_per_task, sds->this_load);
2551 pwr_now /= SCHED_LOAD_SCALE;
2552
2553 /* Amount of load we'd subtract */
2554 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2555 sds->busiest->cpu_power;
2556 if (sds->max_load > tmp)
2557 pwr_move += sds->busiest->cpu_power *
2558 min(sds->busiest_load_per_task, sds->max_load - tmp);
2559
2560 /* Amount of load we'd add */
2561 if (sds->max_load * sds->busiest->cpu_power <
2562 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2563 tmp = (sds->max_load * sds->busiest->cpu_power) /
2564 sds->this->cpu_power;
2565 else
2566 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2567 sds->this->cpu_power;
2568 pwr_move += sds->this->cpu_power *
2569 min(sds->this_load_per_task, sds->this_load + tmp);
2570 pwr_move /= SCHED_LOAD_SCALE;
2571
2572 /* Move if we gain throughput */
2573 if (pwr_move > pwr_now)
2574 *imbalance = sds->busiest_load_per_task;
2575}
2576
2577/**
2578 * calculate_imbalance - Calculate the amount of imbalance present within the
2579 * groups of a given sched_domain during load balance.
2580 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2581 * @this_cpu: Cpu for which currently load balance is being performed.
2582 * @imbalance: The variable to store the imbalance.
2583 */
2584static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2585 unsigned long *imbalance)
2586{
dd5feea1
SS
2587 unsigned long max_pull, load_above_capacity = ~0UL;
2588
2589 sds->busiest_load_per_task /= sds->busiest_nr_running;
2590 if (sds->group_imb) {
2591 sds->busiest_load_per_task =
2592 min(sds->busiest_load_per_task, sds->avg_load);
2593 }
2594
1e3c88bd
PZ
2595 /*
2596 * In the presence of smp nice balancing, certain scenarios can have
2597 * max load less than avg load(as we skip the groups at or below
2598 * its cpu_power, while calculating max_load..)
2599 */
2600 if (sds->max_load < sds->avg_load) {
2601 *imbalance = 0;
2602 return fix_small_imbalance(sds, this_cpu, imbalance);
2603 }
2604
dd5feea1
SS
2605 if (!sds->group_imb) {
2606 /*
2607 * Don't want to pull so many tasks that a group would go idle.
2608 */
2609 load_above_capacity = (sds->busiest_nr_running -
2610 sds->busiest_group_capacity);
2611
2612 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
2613
2614 load_above_capacity /= sds->busiest->cpu_power;
2615 }
2616
2617 /*
2618 * We're trying to get all the cpus to the average_load, so we don't
2619 * want to push ourselves above the average load, nor do we wish to
2620 * reduce the max loaded cpu below the average load. At the same time,
2621 * we also don't want to reduce the group load below the group capacity
2622 * (so that we can implement power-savings policies etc). Thus we look
2623 * for the minimum possible imbalance.
2624 * Be careful of negative numbers as they'll appear as very large values
2625 * with unsigned longs.
2626 */
2627 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
2628
2629 /* How much load to actually move to equalise the imbalance */
2630 *imbalance = min(max_pull * sds->busiest->cpu_power,
2631 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2632 / SCHED_LOAD_SCALE;
2633
2634 /*
2635 * if *imbalance is less than the average load per runnable task
2636 * there is no gaurantee that any tasks will be moved so we'll have
2637 * a think about bumping its value to force at least one task to be
2638 * moved
2639 */
2640 if (*imbalance < sds->busiest_load_per_task)
2641 return fix_small_imbalance(sds, this_cpu, imbalance);
2642
2643}
2644/******* find_busiest_group() helpers end here *********************/
2645
2646/**
2647 * find_busiest_group - Returns the busiest group within the sched_domain
2648 * if there is an imbalance. If there isn't an imbalance, and
2649 * the user has opted for power-savings, it returns a group whose
2650 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
2651 * such a group exists.
2652 *
2653 * Also calculates the amount of weighted load which should be moved
2654 * to restore balance.
2655 *
2656 * @sd: The sched_domain whose busiest group is to be returned.
2657 * @this_cpu: The cpu for which load balancing is currently being performed.
2658 * @imbalance: Variable which stores amount of weighted load which should
2659 * be moved to restore balance/put a group to idle.
2660 * @idle: The idle status of this_cpu.
2661 * @sd_idle: The idleness of sd
2662 * @cpus: The set of CPUs under consideration for load-balancing.
2663 * @balance: Pointer to a variable indicating if this_cpu
2664 * is the appropriate cpu to perform load balancing at this_level.
2665 *
2666 * Returns: - the busiest group if imbalance exists.
2667 * - If no imbalance and user has opted for power-savings balance,
2668 * return the least loaded group whose CPUs can be
2669 * put to idle by rebalancing its tasks onto our group.
2670 */
2671static struct sched_group *
2672find_busiest_group(struct sched_domain *sd, int this_cpu,
2673 unsigned long *imbalance, enum cpu_idle_type idle,
2674 int *sd_idle, const struct cpumask *cpus, int *balance)
2675{
2676 struct sd_lb_stats sds;
2677
2678 memset(&sds, 0, sizeof(sds));
2679
2680 /*
2681 * Compute the various statistics relavent for load balancing at
2682 * this level.
2683 */
2684 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
2685 balance, &sds);
2686
2687 /* Cases where imbalance does not exist from POV of this_cpu */
2688 /* 1) this_cpu is not the appropriate cpu to perform load balancing
2689 * at this level.
2690 * 2) There is no busy sibling group to pull from.
2691 * 3) This group is the busiest group.
2692 * 4) This group is more busy than the avg busieness at this
2693 * sched_domain.
2694 * 5) The imbalance is within the specified limit.
1e3c88bd 2695 */
8f190fb3 2696 if (!(*balance))
1e3c88bd
PZ
2697 goto ret;
2698
2699 if (!sds.busiest || sds.busiest_nr_running == 0)
2700 goto out_balanced;
2701
2702 if (sds.this_load >= sds.max_load)
2703 goto out_balanced;
2704
2705 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
2706
2707 if (sds.this_load >= sds.avg_load)
2708 goto out_balanced;
2709
2710 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
2711 goto out_balanced;
2712
1e3c88bd
PZ
2713 /* Looks like there is an imbalance. Compute it */
2714 calculate_imbalance(&sds, this_cpu, imbalance);
2715 return sds.busiest;
2716
2717out_balanced:
2718 /*
2719 * There is no obvious imbalance. But check if we can do some balancing
2720 * to save power.
2721 */
2722 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
2723 return sds.busiest;
2724ret:
2725 *imbalance = 0;
2726 return NULL;
2727}
2728
2729/*
2730 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2731 */
2732static struct rq *
2733find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
2734 unsigned long imbalance, const struct cpumask *cpus)
2735{
2736 struct rq *busiest = NULL, *rq;
2737 unsigned long max_load = 0;
2738 int i;
2739
2740 for_each_cpu(i, sched_group_cpus(group)) {
2741 unsigned long power = power_of(i);
2742 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
2743 unsigned long wl;
2744
2745 if (!cpumask_test_cpu(i, cpus))
2746 continue;
2747
2748 rq = cpu_rq(i);
6e40f5bb 2749 wl = weighted_cpuload(i);
1e3c88bd 2750
6e40f5bb
TG
2751 /*
2752 * When comparing with imbalance, use weighted_cpuload()
2753 * which is not scaled with the cpu power.
2754 */
1e3c88bd
PZ
2755 if (capacity && rq->nr_running == 1 && wl > imbalance)
2756 continue;
2757
6e40f5bb
TG
2758 /*
2759 * For the load comparisons with the other cpu's, consider
2760 * the weighted_cpuload() scaled with the cpu power, so that
2761 * the load can be moved away from the cpu that is potentially
2762 * running at a lower capacity.
2763 */
2764 wl = (wl * SCHED_LOAD_SCALE) / power;
2765
1e3c88bd
PZ
2766 if (wl > max_load) {
2767 max_load = wl;
2768 busiest = rq;
2769 }
2770 }
2771
2772 return busiest;
2773}
2774
2775/*
2776 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2777 * so long as it is large enough.
2778 */
2779#define MAX_PINNED_INTERVAL 512
2780
2781/* Working cpumask for load_balance and load_balance_newidle. */
2782static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
2783
1af3ed3d
PZ
2784static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
2785{
2786 if (idle == CPU_NEWLY_IDLE) {
2787 /*
2788 * The only task running in a non-idle cpu can be moved to this
2789 * cpu in an attempt to completely freeup the other CPU
2790 * package.
2791 *
2792 * The package power saving logic comes from
2793 * find_busiest_group(). If there are no imbalance, then
2794 * f_b_g() will return NULL. However when sched_mc={1,2} then
2795 * f_b_g() will select a group from which a running task may be
2796 * pulled to this cpu in order to make the other package idle.
2797 * If there is no opportunity to make a package idle and if
2798 * there are no imbalance, then f_b_g() will return NULL and no
2799 * action will be taken in load_balance_newidle().
2800 *
2801 * Under normal task pull operation due to imbalance, there
2802 * will be more than one task in the source run queue and
2803 * move_tasks() will succeed. ld_moved will be true and this
2804 * active balance code will not be triggered.
2805 */
2806 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2807 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2808 return 0;
2809
2810 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
2811 return 0;
2812 }
2813
2814 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
2815}
2816
1e3c88bd
PZ
2817/*
2818 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2819 * tasks if there is an imbalance.
2820 */
2821static int load_balance(int this_cpu, struct rq *this_rq,
2822 struct sched_domain *sd, enum cpu_idle_type idle,
2823 int *balance)
2824{
2825 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2826 struct sched_group *group;
2827 unsigned long imbalance;
2828 struct rq *busiest;
2829 unsigned long flags;
2830 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
2831
2832 cpumask_copy(cpus, cpu_active_mask);
2833
2834 /*
2835 * When power savings policy is enabled for the parent domain, idle
2836 * sibling can pick up load irrespective of busy siblings. In this case,
2837 * let the state of idle sibling percolate up as CPU_IDLE, instead of
2838 * portraying it as CPU_NOT_IDLE.
2839 */
2840 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2841 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2842 sd_idle = 1;
2843
2844 schedstat_inc(sd, lb_count[idle]);
2845
2846redo:
2847 update_shares(sd);
2848 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2849 cpus, balance);
2850
2851 if (*balance == 0)
2852 goto out_balanced;
2853
2854 if (!group) {
2855 schedstat_inc(sd, lb_nobusyg[idle]);
2856 goto out_balanced;
2857 }
2858
2859 busiest = find_busiest_queue(group, idle, imbalance, cpus);
2860 if (!busiest) {
2861 schedstat_inc(sd, lb_nobusyq[idle]);
2862 goto out_balanced;
2863 }
2864
2865 BUG_ON(busiest == this_rq);
2866
2867 schedstat_add(sd, lb_imbalance[idle], imbalance);
2868
2869 ld_moved = 0;
2870 if (busiest->nr_running > 1) {
2871 /*
2872 * Attempt to move tasks. If find_busiest_group has found
2873 * an imbalance but busiest->nr_running <= 1, the group is
2874 * still unbalanced. ld_moved simply stays zero, so it is
2875 * correctly treated as an imbalance.
2876 */
2877 local_irq_save(flags);
2878 double_rq_lock(this_rq, busiest);
2879 ld_moved = move_tasks(this_rq, this_cpu, busiest,
2880 imbalance, sd, idle, &all_pinned);
2881 double_rq_unlock(this_rq, busiest);
2882 local_irq_restore(flags);
2883
2884 /*
2885 * some other cpu did the load balance for us.
2886 */
2887 if (ld_moved && this_cpu != smp_processor_id())
2888 resched_cpu(this_cpu);
2889
2890 /* All tasks on this runqueue were pinned by CPU affinity */
2891 if (unlikely(all_pinned)) {
2892 cpumask_clear_cpu(cpu_of(busiest), cpus);
2893 if (!cpumask_empty(cpus))
2894 goto redo;
2895 goto out_balanced;
2896 }
2897 }
2898
2899 if (!ld_moved) {
2900 schedstat_inc(sd, lb_failed[idle]);
2901 sd->nr_balance_failed++;
2902
1af3ed3d 2903 if (need_active_balance(sd, sd_idle, idle)) {
1e3c88bd
PZ
2904 raw_spin_lock_irqsave(&busiest->lock, flags);
2905
2906 /* don't kick the migration_thread, if the curr
2907 * task on busiest cpu can't be moved to this_cpu
2908 */
2909 if (!cpumask_test_cpu(this_cpu,
2910 &busiest->curr->cpus_allowed)) {
2911 raw_spin_unlock_irqrestore(&busiest->lock,
2912 flags);
2913 all_pinned = 1;
2914 goto out_one_pinned;
2915 }
2916
2917 if (!busiest->active_balance) {
2918 busiest->active_balance = 1;
2919 busiest->push_cpu = this_cpu;
2920 active_balance = 1;
2921 }
2922 raw_spin_unlock_irqrestore(&busiest->lock, flags);
2923 if (active_balance)
2924 wake_up_process(busiest->migration_thread);
2925
2926 /*
2927 * We've kicked active balancing, reset the failure
2928 * counter.
2929 */
2930 sd->nr_balance_failed = sd->cache_nice_tries+1;
2931 }
2932 } else
2933 sd->nr_balance_failed = 0;
2934
2935 if (likely(!active_balance)) {
2936 /* We were unbalanced, so reset the balancing interval */
2937 sd->balance_interval = sd->min_interval;
2938 } else {
2939 /*
2940 * If we've begun active balancing, start to back off. This
2941 * case may not be covered by the all_pinned logic if there
2942 * is only 1 task on the busy runqueue (because we don't call
2943 * move_tasks).
2944 */
2945 if (sd->balance_interval < sd->max_interval)
2946 sd->balance_interval *= 2;
2947 }
2948
2949 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2950 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2951 ld_moved = -1;
2952
2953 goto out;
2954
2955out_balanced:
2956 schedstat_inc(sd, lb_balanced[idle]);
2957
2958 sd->nr_balance_failed = 0;
2959
2960out_one_pinned:
2961 /* tune up the balancing interval */
2962 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
2963 (sd->balance_interval < sd->max_interval))
2964 sd->balance_interval *= 2;
2965
2966 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2967 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2968 ld_moved = -1;
2969 else
2970 ld_moved = 0;
2971out:
2972 if (ld_moved)
2973 update_shares(sd);
2974 return ld_moved;
2975}
2976
1e3c88bd
PZ
2977/*
2978 * idle_balance is called by schedule() if this_cpu is about to become
2979 * idle. Attempts to pull tasks from other CPUs.
2980 */
2981static void idle_balance(int this_cpu, struct rq *this_rq)
2982{
2983 struct sched_domain *sd;
2984 int pulled_task = 0;
2985 unsigned long next_balance = jiffies + HZ;
2986
2987 this_rq->idle_stamp = this_rq->clock;
2988
2989 if (this_rq->avg_idle < sysctl_sched_migration_cost)
2990 return;
2991
f492e12e
PZ
2992 /*
2993 * Drop the rq->lock, but keep IRQ/preempt disabled.
2994 */
2995 raw_spin_unlock(&this_rq->lock);
2996
1e3c88bd
PZ
2997 for_each_domain(this_cpu, sd) {
2998 unsigned long interval;
f492e12e 2999 int balance = 1;
1e3c88bd
PZ
3000
3001 if (!(sd->flags & SD_LOAD_BALANCE))
3002 continue;
3003
f492e12e 3004 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 3005 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
3006 pulled_task = load_balance(this_cpu, this_rq,
3007 sd, CPU_NEWLY_IDLE, &balance);
3008 }
1e3c88bd
PZ
3009
3010 interval = msecs_to_jiffies(sd->balance_interval);
3011 if (time_after(next_balance, sd->last_balance + interval))
3012 next_balance = sd->last_balance + interval;
3013 if (pulled_task) {
3014 this_rq->idle_stamp = 0;
3015 break;
3016 }
3017 }
f492e12e
PZ
3018
3019 raw_spin_lock(&this_rq->lock);
3020
1e3c88bd
PZ
3021 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3022 /*
3023 * We are going idle. next_balance may be set based on
3024 * a busy processor. So reset next_balance.
3025 */
3026 this_rq->next_balance = next_balance;
3027 }
3028}
3029
3030/*
3031 * active_load_balance is run by migration threads. It pushes running tasks
3032 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
3033 * running on each physical CPU where possible, and avoids physical /
3034 * logical imbalances.
3035 *
3036 * Called with busiest_rq locked.
3037 */
3038static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3039{
3040 int target_cpu = busiest_rq->push_cpu;
3041 struct sched_domain *sd;
3042 struct rq *target_rq;
3043
3044 /* Is there any task to move? */
3045 if (busiest_rq->nr_running <= 1)
3046 return;
3047
3048 target_rq = cpu_rq(target_cpu);
3049
3050 /*
3051 * This condition is "impossible", if it occurs
3052 * we need to fix it. Originally reported by
3053 * Bjorn Helgaas on a 128-cpu setup.
3054 */
3055 BUG_ON(busiest_rq == target_rq);
3056
3057 /* move a task from busiest_rq to target_rq */
3058 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
3059
3060 /* Search for an sd spanning us and the target CPU. */
3061 for_each_domain(target_cpu, sd) {
3062 if ((sd->flags & SD_LOAD_BALANCE) &&
3063 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3064 break;
3065 }
3066
3067 if (likely(sd)) {
3068 schedstat_inc(sd, alb_count);
3069
3070 if (move_one_task(target_rq, target_cpu, busiest_rq,
3071 sd, CPU_IDLE))
3072 schedstat_inc(sd, alb_pushed);
3073 else
3074 schedstat_inc(sd, alb_failed);
3075 }
3076 double_unlock_balance(busiest_rq, target_rq);
3077}
3078
3079#ifdef CONFIG_NO_HZ
3080static struct {
3081 atomic_t load_balancer;
3082 cpumask_var_t cpu_mask;
3083 cpumask_var_t ilb_grp_nohz_mask;
3084} nohz ____cacheline_aligned = {
3085 .load_balancer = ATOMIC_INIT(-1),
3086};
3087
3088int get_nohz_load_balancer(void)
3089{
3090 return atomic_read(&nohz.load_balancer);
3091}
3092
3093#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3094/**
3095 * lowest_flag_domain - Return lowest sched_domain containing flag.
3096 * @cpu: The cpu whose lowest level of sched domain is to
3097 * be returned.
3098 * @flag: The flag to check for the lowest sched_domain
3099 * for the given cpu.
3100 *
3101 * Returns the lowest sched_domain of a cpu which contains the given flag.
3102 */
3103static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3104{
3105 struct sched_domain *sd;
3106
3107 for_each_domain(cpu, sd)
3108 if (sd && (sd->flags & flag))
3109 break;
3110
3111 return sd;
3112}
3113
3114/**
3115 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3116 * @cpu: The cpu whose domains we're iterating over.
3117 * @sd: variable holding the value of the power_savings_sd
3118 * for cpu.
3119 * @flag: The flag to filter the sched_domains to be iterated.
3120 *
3121 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3122 * set, starting from the lowest sched_domain to the highest.
3123 */
3124#define for_each_flag_domain(cpu, sd, flag) \
3125 for (sd = lowest_flag_domain(cpu, flag); \
3126 (sd && (sd->flags & flag)); sd = sd->parent)
3127
3128/**
3129 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3130 * @ilb_group: group to be checked for semi-idleness
3131 *
3132 * Returns: 1 if the group is semi-idle. 0 otherwise.
3133 *
3134 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3135 * and atleast one non-idle CPU. This helper function checks if the given
3136 * sched_group is semi-idle or not.
3137 */
3138static inline int is_semi_idle_group(struct sched_group *ilb_group)
3139{
3140 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
3141 sched_group_cpus(ilb_group));
3142
3143 /*
3144 * A sched_group is semi-idle when it has atleast one busy cpu
3145 * and atleast one idle cpu.
3146 */
3147 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
3148 return 0;
3149
3150 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
3151 return 0;
3152
3153 return 1;
3154}
3155/**
3156 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3157 * @cpu: The cpu which is nominating a new idle_load_balancer.
3158 *
3159 * Returns: Returns the id of the idle load balancer if it exists,
3160 * Else, returns >= nr_cpu_ids.
3161 *
3162 * This algorithm picks the idle load balancer such that it belongs to a
3163 * semi-idle powersavings sched_domain. The idea is to try and avoid
3164 * completely idle packages/cores just for the purpose of idle load balancing
3165 * when there are other idle cpu's which are better suited for that job.
3166 */
3167static int find_new_ilb(int cpu)
3168{
3169 struct sched_domain *sd;
3170 struct sched_group *ilb_group;
3171
3172 /*
3173 * Have idle load balancer selection from semi-idle packages only
3174 * when power-aware load balancing is enabled
3175 */
3176 if (!(sched_smt_power_savings || sched_mc_power_savings))
3177 goto out_done;
3178
3179 /*
3180 * Optimize for the case when we have no idle CPUs or only one
3181 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3182 */
3183 if (cpumask_weight(nohz.cpu_mask) < 2)
3184 goto out_done;
3185
3186 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3187 ilb_group = sd->groups;
3188
3189 do {
3190 if (is_semi_idle_group(ilb_group))
3191 return cpumask_first(nohz.ilb_grp_nohz_mask);
3192
3193 ilb_group = ilb_group->next;
3194
3195 } while (ilb_group != sd->groups);
3196 }
3197
3198out_done:
3199 return cpumask_first(nohz.cpu_mask);
3200}
3201#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3202static inline int find_new_ilb(int call_cpu)
3203{
3204 return cpumask_first(nohz.cpu_mask);
3205}
3206#endif
3207
3208/*
3209 * This routine will try to nominate the ilb (idle load balancing)
3210 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3211 * load balancing on behalf of all those cpus. If all the cpus in the system
3212 * go into this tickless mode, then there will be no ilb owner (as there is
3213 * no need for one) and all the cpus will sleep till the next wakeup event
3214 * arrives...
3215 *
3216 * For the ilb owner, tick is not stopped. And this tick will be used
3217 * for idle load balancing. ilb owner will still be part of
3218 * nohz.cpu_mask..
3219 *
3220 * While stopping the tick, this cpu will become the ilb owner if there
3221 * is no other owner. And will be the owner till that cpu becomes busy
3222 * or if all cpus in the system stop their ticks at which point
3223 * there is no need for ilb owner.
3224 *
3225 * When the ilb owner becomes busy, it nominates another owner, during the
3226 * next busy scheduler_tick()
3227 */
3228int select_nohz_load_balancer(int stop_tick)
3229{
3230 int cpu = smp_processor_id();
3231
3232 if (stop_tick) {
3233 cpu_rq(cpu)->in_nohz_recently = 1;
3234
3235 if (!cpu_active(cpu)) {
3236 if (atomic_read(&nohz.load_balancer) != cpu)
3237 return 0;
3238
3239 /*
3240 * If we are going offline and still the leader,
3241 * give up!
3242 */
3243 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3244 BUG();
3245
3246 return 0;
3247 }
3248
3249 cpumask_set_cpu(cpu, nohz.cpu_mask);
3250
3251 /* time for ilb owner also to sleep */
3252 if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
3253 if (atomic_read(&nohz.load_balancer) == cpu)
3254 atomic_set(&nohz.load_balancer, -1);
3255 return 0;
3256 }
3257
3258 if (atomic_read(&nohz.load_balancer) == -1) {
3259 /* make me the ilb owner */
3260 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
3261 return 1;
3262 } else if (atomic_read(&nohz.load_balancer) == cpu) {
3263 int new_ilb;
3264
3265 if (!(sched_smt_power_savings ||
3266 sched_mc_power_savings))
3267 return 1;
3268 /*
3269 * Check to see if there is a more power-efficient
3270 * ilb.
3271 */
3272 new_ilb = find_new_ilb(cpu);
3273 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
3274 atomic_set(&nohz.load_balancer, -1);
3275 resched_cpu(new_ilb);
3276 return 0;
3277 }
3278 return 1;
3279 }
3280 } else {
3281 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
3282 return 0;
3283
3284 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3285
3286 if (atomic_read(&nohz.load_balancer) == cpu)
3287 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3288 BUG();
3289 }
3290 return 0;
3291}
3292#endif
3293
3294static DEFINE_SPINLOCK(balancing);
3295
3296/*
3297 * It checks each scheduling domain to see if it is due to be balanced,
3298 * and initiates a balancing operation if so.
3299 *
3300 * Balancing parameters are set up in arch_init_sched_domains.
3301 */
3302static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3303{
3304 int balance = 1;
3305 struct rq *rq = cpu_rq(cpu);
3306 unsigned long interval;
3307 struct sched_domain *sd;
3308 /* Earliest time when we have to do rebalance again */
3309 unsigned long next_balance = jiffies + 60*HZ;
3310 int update_next_balance = 0;
3311 int need_serialize;
3312
3313 for_each_domain(cpu, sd) {
3314 if (!(sd->flags & SD_LOAD_BALANCE))
3315 continue;
3316
3317 interval = sd->balance_interval;
3318 if (idle != CPU_IDLE)
3319 interval *= sd->busy_factor;
3320
3321 /* scale ms to jiffies */
3322 interval = msecs_to_jiffies(interval);
3323 if (unlikely(!interval))
3324 interval = 1;
3325 if (interval > HZ*NR_CPUS/10)
3326 interval = HZ*NR_CPUS/10;
3327
3328 need_serialize = sd->flags & SD_SERIALIZE;
3329
3330 if (need_serialize) {
3331 if (!spin_trylock(&balancing))
3332 goto out;
3333 }
3334
3335 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3336 if (load_balance(cpu, rq, sd, idle, &balance)) {
3337 /*
3338 * We've pulled tasks over so either we're no
3339 * longer idle, or one of our SMT siblings is
3340 * not idle.
3341 */
3342 idle = CPU_NOT_IDLE;
3343 }
3344 sd->last_balance = jiffies;
3345 }
3346 if (need_serialize)
3347 spin_unlock(&balancing);
3348out:
3349 if (time_after(next_balance, sd->last_balance + interval)) {
3350 next_balance = sd->last_balance + interval;
3351 update_next_balance = 1;
3352 }
3353
3354 /*
3355 * Stop the load balance at this level. There is another
3356 * CPU in our sched group which is doing load balancing more
3357 * actively.
3358 */
3359 if (!balance)
3360 break;
3361 }
3362
3363 /*
3364 * next_balance will be updated only when there is a need.
3365 * When the cpu is attached to null domain for ex, it will not be
3366 * updated.
3367 */
3368 if (likely(update_next_balance))
3369 rq->next_balance = next_balance;
3370}
3371
3372/*
3373 * run_rebalance_domains is triggered when needed from the scheduler tick.
3374 * In CONFIG_NO_HZ case, the idle load balance owner will do the
3375 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3376 */
3377static void run_rebalance_domains(struct softirq_action *h)
3378{
3379 int this_cpu = smp_processor_id();
3380 struct rq *this_rq = cpu_rq(this_cpu);
3381 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3382 CPU_IDLE : CPU_NOT_IDLE;
3383
3384 rebalance_domains(this_cpu, idle);
3385
3386#ifdef CONFIG_NO_HZ
3387 /*
3388 * If this cpu is the owner for idle load balancing, then do the
3389 * balancing on behalf of the other idle cpus whose ticks are
3390 * stopped.
3391 */
3392 if (this_rq->idle_at_tick &&
3393 atomic_read(&nohz.load_balancer) == this_cpu) {
3394 struct rq *rq;
3395 int balance_cpu;
3396
3397 for_each_cpu(balance_cpu, nohz.cpu_mask) {
3398 if (balance_cpu == this_cpu)
3399 continue;
3400
3401 /*
3402 * If this cpu gets work to do, stop the load balancing
3403 * work being done for other cpus. Next load
3404 * balancing owner will pick it up.
3405 */
3406 if (need_resched())
3407 break;
3408
3409 rebalance_domains(balance_cpu, CPU_IDLE);
3410
3411 rq = cpu_rq(balance_cpu);
3412 if (time_after(this_rq->next_balance, rq->next_balance))
3413 this_rq->next_balance = rq->next_balance;
3414 }
3415 }
3416#endif
3417}
3418
3419static inline int on_null_domain(int cpu)
3420{
3421 return !rcu_dereference(cpu_rq(cpu)->sd);
3422}
3423
3424/*
3425 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
3426 *
3427 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
3428 * idle load balancing owner or decide to stop the periodic load balancing,
3429 * if the whole system is idle.
3430 */
3431static inline void trigger_load_balance(struct rq *rq, int cpu)
3432{
3433#ifdef CONFIG_NO_HZ
3434 /*
3435 * If we were in the nohz mode recently and busy at the current
3436 * scheduler tick, then check if we need to nominate new idle
3437 * load balancer.
3438 */
3439 if (rq->in_nohz_recently && !rq->idle_at_tick) {
3440 rq->in_nohz_recently = 0;
3441
3442 if (atomic_read(&nohz.load_balancer) == cpu) {
3443 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3444 atomic_set(&nohz.load_balancer, -1);
3445 }
3446
3447 if (atomic_read(&nohz.load_balancer) == -1) {
3448 int ilb = find_new_ilb(cpu);
3449
3450 if (ilb < nr_cpu_ids)
3451 resched_cpu(ilb);
3452 }
3453 }
3454
3455 /*
3456 * If this cpu is idle and doing idle load balancing for all the
3457 * cpus with ticks stopped, is it time for that to stop?
3458 */
3459 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
3460 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3461 resched_cpu(cpu);
3462 return;
3463 }
3464
3465 /*
3466 * If this cpu is idle and the idle load balancing is done by
3467 * someone else, then no need raise the SCHED_SOFTIRQ
3468 */
3469 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
3470 cpumask_test_cpu(cpu, nohz.cpu_mask))
3471 return;
3472#endif
3473 /* Don't need to rebalance while attached to NULL domain */
3474 if (time_after_eq(jiffies, rq->next_balance) &&
3475 likely(!on_null_domain(cpu)))
3476 raise_softirq(SCHED_SOFTIRQ);
3477}
3478
0bcdcf28
CE
3479static void rq_online_fair(struct rq *rq)
3480{
3481 update_sysctl();
3482}
3483
3484static void rq_offline_fair(struct rq *rq)
3485{
3486 update_sysctl();
3487}
3488
1e3c88bd
PZ
3489#else /* CONFIG_SMP */
3490
3491/*
3492 * on UP we do not need to balance between CPUs:
3493 */
3494static inline void idle_balance(int cpu, struct rq *rq)
3495{
3496}
3497
55e12e5e 3498#endif /* CONFIG_SMP */
e1d1484f 3499
bf0f6f24
IM
3500/*
3501 * scheduler tick hitting a task of our scheduling class:
3502 */
8f4d37ec 3503static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
3504{
3505 struct cfs_rq *cfs_rq;
3506 struct sched_entity *se = &curr->se;
3507
3508 for_each_sched_entity(se) {
3509 cfs_rq = cfs_rq_of(se);
8f4d37ec 3510 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
3511 }
3512}
3513
3514/*
cd29fe6f
PZ
3515 * called on fork with the child task as argument from the parent's context
3516 * - child not yet on the tasklist
3517 * - preemption disabled
bf0f6f24 3518 */
cd29fe6f 3519static void task_fork_fair(struct task_struct *p)
bf0f6f24 3520{
cd29fe6f 3521 struct cfs_rq *cfs_rq = task_cfs_rq(current);
429d43bc 3522 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 3523 int this_cpu = smp_processor_id();
cd29fe6f
PZ
3524 struct rq *rq = this_rq();
3525 unsigned long flags;
3526
05fa785c 3527 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 3528
cd29fe6f
PZ
3529 if (unlikely(task_cpu(p) != this_cpu))
3530 __set_task_cpu(p, this_cpu);
bf0f6f24 3531
7109c442 3532 update_curr(cfs_rq);
cd29fe6f 3533
b5d9d734
MG
3534 if (curr)
3535 se->vruntime = curr->vruntime;
aeb73b04 3536 place_entity(cfs_rq, se, 1);
4d78e7b6 3537
cd29fe6f 3538 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 3539 /*
edcb60a3
IM
3540 * Upon rescheduling, sched_class::put_prev_task() will place
3541 * 'current' within the tree based on its new key value.
3542 */
4d78e7b6 3543 swap(curr->vruntime, se->vruntime);
aec0a514 3544 resched_task(rq->curr);
4d78e7b6 3545 }
bf0f6f24 3546
88ec22d3
PZ
3547 se->vruntime -= cfs_rq->min_vruntime;
3548
05fa785c 3549 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
3550}
3551
cb469845
SR
3552/*
3553 * Priority of the task has changed. Check to see if we preempt
3554 * the current task.
3555 */
3556static void prio_changed_fair(struct rq *rq, struct task_struct *p,
3557 int oldprio, int running)
3558{
3559 /*
3560 * Reschedule if we are currently running on this runqueue and
3561 * our priority decreased, or if we are not currently running on
3562 * this runqueue and our priority is higher than the current's
3563 */
3564 if (running) {
3565 if (p->prio > oldprio)
3566 resched_task(rq->curr);
3567 } else
15afe09b 3568 check_preempt_curr(rq, p, 0);
cb469845
SR
3569}
3570
3571/*
3572 * We switched to the sched_fair class.
3573 */
3574static void switched_to_fair(struct rq *rq, struct task_struct *p,
3575 int running)
3576{
3577 /*
3578 * We were most likely switched from sched_rt, so
3579 * kick off the schedule if running, otherwise just see
3580 * if we can still preempt the current task.
3581 */
3582 if (running)
3583 resched_task(rq->curr);
3584 else
15afe09b 3585 check_preempt_curr(rq, p, 0);
cb469845
SR
3586}
3587
83b699ed
SV
3588/* Account for a task changing its policy or group.
3589 *
3590 * This routine is mostly called to set cfs_rq->curr field when a task
3591 * migrates between groups/classes.
3592 */
3593static void set_curr_task_fair(struct rq *rq)
3594{
3595 struct sched_entity *se = &rq->curr->se;
3596
3597 for_each_sched_entity(se)
3598 set_next_entity(cfs_rq_of(se), se);
3599}
3600
810b3817 3601#ifdef CONFIG_FAIR_GROUP_SCHED
88ec22d3 3602static void moved_group_fair(struct task_struct *p, int on_rq)
810b3817
PZ
3603{
3604 struct cfs_rq *cfs_rq = task_cfs_rq(p);
3605
3606 update_curr(cfs_rq);
88ec22d3
PZ
3607 if (!on_rq)
3608 place_entity(cfs_rq, &p->se, 1);
810b3817
PZ
3609}
3610#endif
3611
6d686f45 3612static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
3613{
3614 struct sched_entity *se = &task->se;
0d721cea
PW
3615 unsigned int rr_interval = 0;
3616
3617 /*
3618 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
3619 * idle runqueue:
3620 */
0d721cea
PW
3621 if (rq->cfs.load.weight)
3622 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
3623
3624 return rr_interval;
3625}
3626
bf0f6f24
IM
3627/*
3628 * All the scheduling class methods:
3629 */
5522d5d5
IM
3630static const struct sched_class fair_sched_class = {
3631 .next = &idle_sched_class,
bf0f6f24
IM
3632 .enqueue_task = enqueue_task_fair,
3633 .dequeue_task = dequeue_task_fair,
3634 .yield_task = yield_task_fair,
3635
2e09bf55 3636 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
3637
3638 .pick_next_task = pick_next_task_fair,
3639 .put_prev_task = put_prev_task_fair,
3640
681f3e68 3641#ifdef CONFIG_SMP
4ce72a2c
LZ
3642 .select_task_rq = select_task_rq_fair,
3643
0bcdcf28
CE
3644 .rq_online = rq_online_fair,
3645 .rq_offline = rq_offline_fair,
88ec22d3
PZ
3646
3647 .task_waking = task_waking_fair,
681f3e68 3648#endif
bf0f6f24 3649
83b699ed 3650 .set_curr_task = set_curr_task_fair,
bf0f6f24 3651 .task_tick = task_tick_fair,
cd29fe6f 3652 .task_fork = task_fork_fair,
cb469845
SR
3653
3654 .prio_changed = prio_changed_fair,
3655 .switched_to = switched_to_fair,
810b3817 3656
0d721cea
PW
3657 .get_rr_interval = get_rr_interval_fair,
3658
810b3817
PZ
3659#ifdef CONFIG_FAIR_GROUP_SCHED
3660 .moved_group = moved_group_fair,
3661#endif
bf0f6f24
IM
3662};
3663
3664#ifdef CONFIG_SCHED_DEBUG
5cef9eca 3665static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 3666{
bf0f6f24
IM
3667 struct cfs_rq *cfs_rq;
3668
5973e5b9 3669 rcu_read_lock();
c3b64f1e 3670 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 3671 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 3672 rcu_read_unlock();
bf0f6f24
IM
3673}
3674#endif