]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/sched/fair.c
mm: numa: Limit NUMA scanning to migrate-on-fault VMAs
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
cbee9f88 29#include <linux/mempolicy.h>
e14808b4 30#include <linux/migrate.h>
cbee9f88 31#include <linux/task_work.h>
029632fb
PZ
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
9745512c 36
bf0f6f24 37/*
21805085 38 * Targeted preemption latency for CPU-bound tasks:
864616ee 39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 40 *
21805085 41 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
bf0f6f24 45 *
d274a4ce
IM
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 48 */
21406928
MG
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 51
1983a922
CE
52/*
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
2bd8e6d4 64/*
b2be5e96 65 * Minimal preemption granularity for CPU-bound tasks:
864616ee 66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 67 */
0bf377bb
IM
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
70
71/*
b2be5e96
PZ
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
0bf377bb 74static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
75
76/*
2bba22c5 77 * After fork, child runs first. If set to 0 (default) then
b2be5e96 78 * parent will (try to) run first.
21805085 79 */
2bba22c5 80unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 81
bf0f6f24
IM
82/*
83 * SCHED_OTHER wake-up granularity.
172e082a 84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
85 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
172e082a 90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 92
da84d961
IM
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
a7a4f8a7
PT
95/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
ec12cb7f
PT
102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
a4c2f00f 238
bf0f6f24
IM
239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
62160e3f 243#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 244
62160e3f 245/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
62160e3f 248 return cfs_rq->rq;
bf0f6f24
IM
249}
250
62160e3f
IM
251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
bf0f6f24 253
8f48894f
PZ
254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
b758149c
PZ
262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
aff3e498
PT
283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
9ee474f5 285
3d4b47b4
PZ
286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
67e86250
PT
289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 302 }
3d4b47b4
PZ
303
304 cfs_rq->on_list = 1;
9ee474f5 305 /* We should have no load, but we need to update last_decay. */
aff3e498 306 update_cfs_rq_blocked_load(cfs_rq, 0);
3d4b47b4
PZ
307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
b758149c
PZ
318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
464b7527
PZ
337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
8f48894f
PZ
380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
bf0f6f24 386
62160e3f
IM
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
390}
391
392#define entity_is_task(se) 1
393
b758149c
PZ
394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
bf0f6f24 396
b758149c 397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 398{
b758149c 399 return &task_rq(p)->cfs;
bf0f6f24
IM
400}
401
b758149c
PZ
402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
3d4b47b4
PZ
416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
b758149c
PZ
424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
464b7527
PZ
438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
b758149c
PZ
443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
6c16a6dc
PZ
445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
bf0f6f24
IM
447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
1bf08230 452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 453{
1bf08230 454 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 455 if (delta > 0)
1bf08230 456 max_vruntime = vruntime;
02e0431a 457
1bf08230 458 return max_vruntime;
02e0431a
PZ
459}
460
0702e3eb 461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
54fdc581
FC
470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
1af5f730
PZ
476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
e17036da 488 if (!cfs_rq->curr)
1af5f730
PZ
489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
1bf08230 494 /* ensure we never gain time by being placed backwards. */
1af5f730 495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
1af5f730
PZ
500}
501
bf0f6f24
IM
502/*
503 * Enqueue an entity into the rb-tree:
504 */
0702e3eb 505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
bf0f6f24
IM
510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
2bd2d6f2 522 if (entity_before(se, entry)) {
bf0f6f24
IM
523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
1af5f730 534 if (leftmost)
57cb499d 535 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
539}
540
0702e3eb 541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 542{
3fe69747
PZ
543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
3fe69747
PZ
545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
3fe69747 548 }
e9acbff6 549
bf0f6f24 550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
551}
552
029632fb 553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 554{
f4b6755f
PZ
555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
561}
562
ac53db59
RR
563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
029632fb 574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 575{
7eee3e67 576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 577
70eee74b
BS
578 if (!last)
579 return NULL;
7eee3e67
IM
580
581 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
582}
583
bf0f6f24
IM
584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
acb4a848 588int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 589 void __user *buffer, size_t *lenp,
b2be5e96
PZ
590 loff_t *ppos)
591{
8d65af78 592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 593 int factor = get_update_sysctl_factor();
b2be5e96
PZ
594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
acb4a848
CE
601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
606#undef WRT_SYSCTL
607
b2be5e96
PZ
608 return 0;
609}
610#endif
647e7cac 611
a7be37ac 612/*
f9c0b095 613 * delta /= w
a7be37ac
PZ
614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
f9c0b095
PZ
618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
620
621 return delta;
622}
623
647e7cac
IM
624/*
625 * The idea is to set a period in which each task runs once.
626 *
532b1858 627 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
4d78e7b6
PZ
632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
b2be5e96 635 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
636
637 if (unlikely(nr_running > nr_latency)) {
4bf0b771 638 period = sysctl_sched_min_granularity;
4d78e7b6 639 period *= nr_running;
4d78e7b6
PZ
640 }
641
642 return period;
643}
644
647e7cac
IM
645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
f9c0b095 649 * s = p*P[w/rw]
647e7cac 650 */
6d0f0ebd 651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 652{
0a582440 653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 654
0a582440 655 for_each_sched_entity(se) {
6272d68c 656 struct load_weight *load;
3104bf03 657 struct load_weight lw;
6272d68c
LM
658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
f9c0b095 661
0a582440 662 if (unlikely(!se->on_rq)) {
3104bf03 663 lw = cfs_rq->load;
0a582440
MG
664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
bf0f6f24
IM
671}
672
647e7cac 673/*
660cc00f 674 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 675 *
f9c0b095 676 * vs = s/w
647e7cac 677 */
f9c0b095 678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 679{
f9c0b095 680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
681}
682
a75cdaa9
AS
683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
bf0f6f24
IM
703/*
704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
8ebc91d9
IM
708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
bf0f6f24 710{
bbdba7c0 711 unsigned long delta_exec_weighted;
bf0f6f24 712
41acab88
LDM
713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
715
716 curr->sum_exec_runtime += delta_exec;
7a62eabc 717 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 719
e9acbff6 720 curr->vruntime += delta_exec_weighted;
1af5f730 721 update_min_vruntime(cfs_rq);
bf0f6f24
IM
722}
723
b7cc0896 724static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 725{
429d43bc 726 struct sched_entity *curr = cfs_rq->curr;
78becc27 727 u64 now = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
8ebc91d9 738 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
739 if (!delta_exec)
740 return;
bf0f6f24 741
8ebc91d9
IM
742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
d842de87
SV
744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
f977bb49 748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 749 cpuacct_charge(curtask, delta_exec);
f06febc9 750 account_group_exec_runtime(curtask, delta_exec);
d842de87 751 }
ec12cb7f
PT
752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
754}
755
756static inline void
5870db5b 757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 758{
78becc27 759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
bf0f6f24
IM
760}
761
bf0f6f24
IM
762/*
763 * Task is being enqueued - update stats:
764 */
d2417e5a 765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 766{
bf0f6f24
IM
767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
429d43bc 771 if (se != cfs_rq->curr)
5870db5b 772 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
773}
774
bf0f6f24 775static void
9ef0a961 776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 777{
41acab88 778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
78becc27 779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
41acab88
LDM
780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
78becc27 782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
78becc27 786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
787 }
788#endif
41acab88 789 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
790}
791
792static inline void
19b6a2e3 793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 794{
bf0f6f24
IM
795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
429d43bc 799 if (se != cfs_rq->curr)
9ef0a961 800 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
79303e9e 807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
808{
809 /*
810 * We are starting a new run period:
811 */
78becc27 812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
813}
814
bf0f6f24
IM
815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
cbee9f88
PZ
819#ifdef CONFIG_NUMA_BALANCING
820/*
598f0ec0
MG
821 * Approximate time to scan a full NUMA task in ms. The task scan period is
822 * calculated based on the tasks virtual memory size and
823 * numa_balancing_scan_size.
cbee9f88 824 */
598f0ec0
MG
825unsigned int sysctl_numa_balancing_scan_period_min = 1000;
826unsigned int sysctl_numa_balancing_scan_period_max = 60000;
827unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
6e5fb223
PZ
828
829/* Portion of address space to scan in MB */
830unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 831
4b96a29b
PZ
832/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
833unsigned int sysctl_numa_balancing_scan_delay = 1000;
834
598f0ec0
MG
835static unsigned int task_nr_scan_windows(struct task_struct *p)
836{
837 unsigned long rss = 0;
838 unsigned long nr_scan_pages;
839
840 /*
841 * Calculations based on RSS as non-present and empty pages are skipped
842 * by the PTE scanner and NUMA hinting faults should be trapped based
843 * on resident pages
844 */
845 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
846 rss = get_mm_rss(p->mm);
847 if (!rss)
848 rss = nr_scan_pages;
849
850 rss = round_up(rss, nr_scan_pages);
851 return rss / nr_scan_pages;
852}
853
854/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
855#define MAX_SCAN_WINDOW 2560
856
857static unsigned int task_scan_min(struct task_struct *p)
858{
859 unsigned int scan, floor;
860 unsigned int windows = 1;
861
862 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
863 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
864 floor = 1000 / windows;
865
866 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
867 return max_t(unsigned int, floor, scan);
868}
869
870static unsigned int task_scan_max(struct task_struct *p)
871{
872 unsigned int smin = task_scan_min(p);
873 unsigned int smax;
874
875 /* Watch for min being lower than max due to floor calculations */
876 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
877 return max(smin, smax);
878}
879
3a7053b3
MG
880/*
881 * Once a preferred node is selected the scheduler balancer will prefer moving
882 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
883 * scans. This will give the process the chance to accumulate more faults on
884 * the preferred node but still allow the scheduler to move the task again if
885 * the nodes CPUs are overloaded.
886 */
6fe6b2d6 887unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
3a7053b3 888
ac8e895b
MG
889static inline int task_faults_idx(int nid, int priv)
890{
891 return 2 * nid + priv;
892}
893
894static inline unsigned long task_faults(struct task_struct *p, int nid)
895{
896 if (!p->numa_faults)
897 return 0;
898
899 return p->numa_faults[task_faults_idx(nid, 0)] +
900 p->numa_faults[task_faults_idx(nid, 1)];
901}
902
e6628d5b
MG
903static unsigned long weighted_cpuload(const int cpu);
904
905
906static int
907find_idlest_cpu_node(int this_cpu, int nid)
908{
909 unsigned long load, min_load = ULONG_MAX;
910 int i, idlest_cpu = this_cpu;
911
912 BUG_ON(cpu_to_node(this_cpu) == nid);
913
914 rcu_read_lock();
915 for_each_cpu(i, cpumask_of_node(nid)) {
916 load = weighted_cpuload(i);
917
918 if (load < min_load) {
919 min_load = load;
920 idlest_cpu = i;
921 }
922 }
923 rcu_read_unlock();
924
925 return idlest_cpu;
926}
927
cbee9f88
PZ
928static void task_numa_placement(struct task_struct *p)
929{
688b7585
MG
930 int seq, nid, max_nid = -1;
931 unsigned long max_faults = 0;
cbee9f88 932
2832bc19 933 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
934 if (p->numa_scan_seq == seq)
935 return;
936 p->numa_scan_seq = seq;
3a7053b3 937 p->numa_migrate_seq++;
598f0ec0 938 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 939
688b7585
MG
940 /* Find the node with the highest number of faults */
941 for_each_online_node(nid) {
745d6147 942 unsigned long faults;
ac8e895b 943 int priv, i;
745d6147 944
ac8e895b
MG
945 for (priv = 0; priv < 2; priv++) {
946 i = task_faults_idx(nid, priv);
745d6147 947
ac8e895b
MG
948 /* Decay existing window, copy faults since last scan */
949 p->numa_faults[i] >>= 1;
950 p->numa_faults[i] += p->numa_faults_buffer[i];
951 p->numa_faults_buffer[i] = 0;
952 }
953
954 /* Find maximum private faults */
955 faults = p->numa_faults[task_faults_idx(nid, 1)];
688b7585
MG
956 if (faults > max_faults) {
957 max_faults = faults;
958 max_nid = nid;
959 }
960 }
961
e6628d5b
MG
962 /*
963 * Record the preferred node as the node with the most faults,
964 * requeue the task to be running on the idlest CPU on the
965 * preferred node and reset the scanning rate to recheck
966 * the working set placement.
967 */
3a7053b3 968 if (max_faults && max_nid != p->numa_preferred_nid) {
e6628d5b
MG
969 int preferred_cpu;
970
971 /*
972 * If the task is not on the preferred node then find the most
973 * idle CPU to migrate to.
974 */
975 preferred_cpu = task_cpu(p);
976 if (cpu_to_node(preferred_cpu) != max_nid) {
977 preferred_cpu = find_idlest_cpu_node(preferred_cpu,
978 max_nid);
979 }
980
981 /* Update the preferred nid and migrate task if possible */
688b7585 982 p->numa_preferred_nid = max_nid;
6fe6b2d6 983 p->numa_migrate_seq = 1;
e6628d5b 984 migrate_task_to(p, preferred_cpu);
3a7053b3 985 }
cbee9f88
PZ
986}
987
988/*
989 * Got a PROT_NONE fault for a page on @node.
990 */
b795854b 991void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
cbee9f88
PZ
992{
993 struct task_struct *p = current;
ac8e895b 994 int priv;
cbee9f88 995
10e84b97 996 if (!numabalancing_enabled)
1a687c2e
MG
997 return;
998
9ff1d9ff
MG
999 /* for example, ksmd faulting in a user's mm */
1000 if (!p->mm)
1001 return;
1002
b795854b
MG
1003 /*
1004 * First accesses are treated as private, otherwise consider accesses
1005 * to be private if the accessing pid has not changed
1006 */
1007 if (!nidpid_pid_unset(last_nidpid))
1008 priv = ((p->pid & LAST__PID_MASK) == nidpid_to_pid(last_nidpid));
1009 else
1010 priv = 1;
ac8e895b 1011
f809ca9a
MG
1012 /* Allocate buffer to track faults on a per-node basis */
1013 if (unlikely(!p->numa_faults)) {
ac8e895b 1014 int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
f809ca9a 1015
745d6147
MG
1016 /* numa_faults and numa_faults_buffer share the allocation */
1017 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
f809ca9a
MG
1018 if (!p->numa_faults)
1019 return;
745d6147
MG
1020
1021 BUG_ON(p->numa_faults_buffer);
ac8e895b 1022 p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
f809ca9a 1023 }
cbee9f88 1024
fb003b80 1025 /*
b8593bfd
MG
1026 * If pages are properly placed (did not migrate) then scan slower.
1027 * This is reset periodically in case of phase changes
fb003b80 1028 */
598f0ec0
MG
1029 if (!migrated) {
1030 /* Initialise if necessary */
1031 if (!p->numa_scan_period_max)
1032 p->numa_scan_period_max = task_scan_max(p);
1033
1034 p->numa_scan_period = min(p->numa_scan_period_max,
1035 p->numa_scan_period + 10);
1036 }
fb003b80 1037
cbee9f88 1038 task_numa_placement(p);
f809ca9a 1039
ac8e895b 1040 p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
cbee9f88
PZ
1041}
1042
6e5fb223
PZ
1043static void reset_ptenuma_scan(struct task_struct *p)
1044{
1045 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1046 p->mm->numa_scan_offset = 0;
1047}
1048
cbee9f88
PZ
1049/*
1050 * The expensive part of numa migration is done from task_work context.
1051 * Triggered from task_tick_numa().
1052 */
1053void task_numa_work(struct callback_head *work)
1054{
1055 unsigned long migrate, next_scan, now = jiffies;
1056 struct task_struct *p = current;
1057 struct mm_struct *mm = p->mm;
6e5fb223 1058 struct vm_area_struct *vma;
9f40604c 1059 unsigned long start, end;
598f0ec0 1060 unsigned long nr_pte_updates = 0;
9f40604c 1061 long pages;
cbee9f88
PZ
1062
1063 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1064
1065 work->next = work; /* protect against double add */
1066 /*
1067 * Who cares about NUMA placement when they're dying.
1068 *
1069 * NOTE: make sure not to dereference p->mm before this check,
1070 * exit_task_work() happens _after_ exit_mm() so we could be called
1071 * without p->mm even though we still had it when we enqueued this
1072 * work.
1073 */
1074 if (p->flags & PF_EXITING)
1075 return;
1076
7e8d16b6
MG
1077 if (!mm->numa_next_reset || !mm->numa_next_scan) {
1078 mm->numa_next_scan = now +
1079 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1080 mm->numa_next_reset = now +
1081 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1082 }
1083
b8593bfd
MG
1084 /*
1085 * Reset the scan period if enough time has gone by. Objective is that
1086 * scanning will be reduced if pages are properly placed. As tasks
1087 * can enter different phases this needs to be re-examined. Lacking
1088 * proper tracking of reference behaviour, this blunt hammer is used.
1089 */
1090 migrate = mm->numa_next_reset;
1091 if (time_after(now, migrate)) {
598f0ec0 1092 p->numa_scan_period = task_scan_min(p);
b8593bfd
MG
1093 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1094 xchg(&mm->numa_next_reset, next_scan);
1095 }
1096
cbee9f88
PZ
1097 /*
1098 * Enforce maximal scan/migration frequency..
1099 */
1100 migrate = mm->numa_next_scan;
1101 if (time_before(now, migrate))
1102 return;
1103
598f0ec0
MG
1104 if (p->numa_scan_period == 0) {
1105 p->numa_scan_period_max = task_scan_max(p);
1106 p->numa_scan_period = task_scan_min(p);
1107 }
cbee9f88 1108
fb003b80 1109 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
1110 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1111 return;
1112
19a78d11
PZ
1113 /*
1114 * Delay this task enough that another task of this mm will likely win
1115 * the next time around.
1116 */
1117 p->node_stamp += 2 * TICK_NSEC;
1118
9f40604c
MG
1119 start = mm->numa_scan_offset;
1120 pages = sysctl_numa_balancing_scan_size;
1121 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1122 if (!pages)
1123 return;
cbee9f88 1124
6e5fb223 1125 down_read(&mm->mmap_sem);
9f40604c 1126 vma = find_vma(mm, start);
6e5fb223
PZ
1127 if (!vma) {
1128 reset_ptenuma_scan(p);
9f40604c 1129 start = 0;
6e5fb223
PZ
1130 vma = mm->mmap;
1131 }
9f40604c 1132 for (; vma; vma = vma->vm_next) {
fc314724 1133 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
6e5fb223
PZ
1134 continue;
1135
9f40604c
MG
1136 do {
1137 start = max(start, vma->vm_start);
1138 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1139 end = min(end, vma->vm_end);
598f0ec0
MG
1140 nr_pte_updates += change_prot_numa(vma, start, end);
1141
1142 /*
1143 * Scan sysctl_numa_balancing_scan_size but ensure that
1144 * at least one PTE is updated so that unused virtual
1145 * address space is quickly skipped.
1146 */
1147 if (nr_pte_updates)
1148 pages -= (end - start) >> PAGE_SHIFT;
6e5fb223 1149
9f40604c
MG
1150 start = end;
1151 if (pages <= 0)
1152 goto out;
1153 } while (end != vma->vm_end);
cbee9f88 1154 }
6e5fb223 1155
9f40604c 1156out:
f307cd1a
MG
1157 /*
1158 * If the whole process was scanned without updates then no NUMA
1159 * hinting faults are being recorded and scan rate should be lower.
1160 */
1161 if (mm->numa_scan_offset == 0 && !nr_pte_updates) {
1162 p->numa_scan_period = min(p->numa_scan_period_max,
1163 p->numa_scan_period << 1);
1164
1165 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1166 mm->numa_next_scan = next_scan;
1167 }
1168
6e5fb223 1169 /*
c69307d5
PZ
1170 * It is possible to reach the end of the VMA list but the last few
1171 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1172 * would find the !migratable VMA on the next scan but not reset the
1173 * scanner to the start so check it now.
6e5fb223
PZ
1174 */
1175 if (vma)
9f40604c 1176 mm->numa_scan_offset = start;
6e5fb223
PZ
1177 else
1178 reset_ptenuma_scan(p);
1179 up_read(&mm->mmap_sem);
cbee9f88
PZ
1180}
1181
1182/*
1183 * Drive the periodic memory faults..
1184 */
1185void task_tick_numa(struct rq *rq, struct task_struct *curr)
1186{
1187 struct callback_head *work = &curr->numa_work;
1188 u64 period, now;
1189
1190 /*
1191 * We don't care about NUMA placement if we don't have memory.
1192 */
1193 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1194 return;
1195
1196 /*
1197 * Using runtime rather than walltime has the dual advantage that
1198 * we (mostly) drive the selection from busy threads and that the
1199 * task needs to have done some actual work before we bother with
1200 * NUMA placement.
1201 */
1202 now = curr->se.sum_exec_runtime;
1203 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1204
1205 if (now - curr->node_stamp > period) {
4b96a29b 1206 if (!curr->node_stamp)
598f0ec0 1207 curr->numa_scan_period = task_scan_min(curr);
19a78d11 1208 curr->node_stamp += period;
cbee9f88
PZ
1209
1210 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1211 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1212 task_work_add(curr, work, true);
1213 }
1214 }
1215}
1216#else
1217static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1218{
1219}
1220#endif /* CONFIG_NUMA_BALANCING */
1221
30cfdcfc
DA
1222static void
1223account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1224{
1225 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 1226 if (!parent_entity(se))
029632fb 1227 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7
PZ
1228#ifdef CONFIG_SMP
1229 if (entity_is_task(se))
eb95308e 1230 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
367456c7 1231#endif
30cfdcfc 1232 cfs_rq->nr_running++;
30cfdcfc
DA
1233}
1234
1235static void
1236account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1237{
1238 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 1239 if (!parent_entity(se))
029632fb 1240 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 1241 if (entity_is_task(se))
b87f1724 1242 list_del_init(&se->group_node);
30cfdcfc 1243 cfs_rq->nr_running--;
30cfdcfc
DA
1244}
1245
3ff6dcac
YZ
1246#ifdef CONFIG_FAIR_GROUP_SCHED
1247# ifdef CONFIG_SMP
cf5f0acf
PZ
1248static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1249{
1250 long tg_weight;
1251
1252 /*
1253 * Use this CPU's actual weight instead of the last load_contribution
1254 * to gain a more accurate current total weight. See
1255 * update_cfs_rq_load_contribution().
1256 */
bf5b986e 1257 tg_weight = atomic_long_read(&tg->load_avg);
82958366 1258 tg_weight -= cfs_rq->tg_load_contrib;
cf5f0acf
PZ
1259 tg_weight += cfs_rq->load.weight;
1260
1261 return tg_weight;
1262}
1263
6d5ab293 1264static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 1265{
cf5f0acf 1266 long tg_weight, load, shares;
3ff6dcac 1267
cf5f0acf 1268 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 1269 load = cfs_rq->load.weight;
3ff6dcac 1270
3ff6dcac 1271 shares = (tg->shares * load);
cf5f0acf
PZ
1272 if (tg_weight)
1273 shares /= tg_weight;
3ff6dcac
YZ
1274
1275 if (shares < MIN_SHARES)
1276 shares = MIN_SHARES;
1277 if (shares > tg->shares)
1278 shares = tg->shares;
1279
1280 return shares;
1281}
3ff6dcac 1282# else /* CONFIG_SMP */
6d5ab293 1283static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
1284{
1285 return tg->shares;
1286}
3ff6dcac 1287# endif /* CONFIG_SMP */
2069dd75
PZ
1288static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1289 unsigned long weight)
1290{
19e5eebb
PT
1291 if (se->on_rq) {
1292 /* commit outstanding execution time */
1293 if (cfs_rq->curr == se)
1294 update_curr(cfs_rq);
2069dd75 1295 account_entity_dequeue(cfs_rq, se);
19e5eebb 1296 }
2069dd75
PZ
1297
1298 update_load_set(&se->load, weight);
1299
1300 if (se->on_rq)
1301 account_entity_enqueue(cfs_rq, se);
1302}
1303
82958366
PT
1304static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1305
6d5ab293 1306static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1307{
1308 struct task_group *tg;
1309 struct sched_entity *se;
3ff6dcac 1310 long shares;
2069dd75 1311
2069dd75
PZ
1312 tg = cfs_rq->tg;
1313 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 1314 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 1315 return;
3ff6dcac
YZ
1316#ifndef CONFIG_SMP
1317 if (likely(se->load.weight == tg->shares))
1318 return;
1319#endif
6d5ab293 1320 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
1321
1322 reweight_entity(cfs_rq_of(se), se, shares);
1323}
1324#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 1325static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1326{
1327}
1328#endif /* CONFIG_FAIR_GROUP_SCHED */
1329
141965c7 1330#ifdef CONFIG_SMP
5b51f2f8
PT
1331/*
1332 * We choose a half-life close to 1 scheduling period.
1333 * Note: The tables below are dependent on this value.
1334 */
1335#define LOAD_AVG_PERIOD 32
1336#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1337#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1338
1339/* Precomputed fixed inverse multiplies for multiplication by y^n */
1340static const u32 runnable_avg_yN_inv[] = {
1341 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1342 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1343 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1344 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1345 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1346 0x85aac367, 0x82cd8698,
1347};
1348
1349/*
1350 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1351 * over-estimates when re-combining.
1352 */
1353static const u32 runnable_avg_yN_sum[] = {
1354 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1355 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1356 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1357};
1358
9d85f21c
PT
1359/*
1360 * Approximate:
1361 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1362 */
1363static __always_inline u64 decay_load(u64 val, u64 n)
1364{
5b51f2f8
PT
1365 unsigned int local_n;
1366
1367 if (!n)
1368 return val;
1369 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1370 return 0;
1371
1372 /* after bounds checking we can collapse to 32-bit */
1373 local_n = n;
1374
1375 /*
1376 * As y^PERIOD = 1/2, we can combine
1377 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1378 * With a look-up table which covers k^n (n<PERIOD)
1379 *
1380 * To achieve constant time decay_load.
1381 */
1382 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1383 val >>= local_n / LOAD_AVG_PERIOD;
1384 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
1385 }
1386
5b51f2f8
PT
1387 val *= runnable_avg_yN_inv[local_n];
1388 /* We don't use SRR here since we always want to round down. */
1389 return val >> 32;
1390}
1391
1392/*
1393 * For updates fully spanning n periods, the contribution to runnable
1394 * average will be: \Sum 1024*y^n
1395 *
1396 * We can compute this reasonably efficiently by combining:
1397 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1398 */
1399static u32 __compute_runnable_contrib(u64 n)
1400{
1401 u32 contrib = 0;
1402
1403 if (likely(n <= LOAD_AVG_PERIOD))
1404 return runnable_avg_yN_sum[n];
1405 else if (unlikely(n >= LOAD_AVG_MAX_N))
1406 return LOAD_AVG_MAX;
1407
1408 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1409 do {
1410 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1411 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1412
1413 n -= LOAD_AVG_PERIOD;
1414 } while (n > LOAD_AVG_PERIOD);
1415
1416 contrib = decay_load(contrib, n);
1417 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
1418}
1419
1420/*
1421 * We can represent the historical contribution to runnable average as the
1422 * coefficients of a geometric series. To do this we sub-divide our runnable
1423 * history into segments of approximately 1ms (1024us); label the segment that
1424 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1425 *
1426 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1427 * p0 p1 p2
1428 * (now) (~1ms ago) (~2ms ago)
1429 *
1430 * Let u_i denote the fraction of p_i that the entity was runnable.
1431 *
1432 * We then designate the fractions u_i as our co-efficients, yielding the
1433 * following representation of historical load:
1434 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1435 *
1436 * We choose y based on the with of a reasonably scheduling period, fixing:
1437 * y^32 = 0.5
1438 *
1439 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1440 * approximately half as much as the contribution to load within the last ms
1441 * (u_0).
1442 *
1443 * When a period "rolls over" and we have new u_0`, multiplying the previous
1444 * sum again by y is sufficient to update:
1445 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1446 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1447 */
1448static __always_inline int __update_entity_runnable_avg(u64 now,
1449 struct sched_avg *sa,
1450 int runnable)
1451{
5b51f2f8
PT
1452 u64 delta, periods;
1453 u32 runnable_contrib;
9d85f21c
PT
1454 int delta_w, decayed = 0;
1455
1456 delta = now - sa->last_runnable_update;
1457 /*
1458 * This should only happen when time goes backwards, which it
1459 * unfortunately does during sched clock init when we swap over to TSC.
1460 */
1461 if ((s64)delta < 0) {
1462 sa->last_runnable_update = now;
1463 return 0;
1464 }
1465
1466 /*
1467 * Use 1024ns as the unit of measurement since it's a reasonable
1468 * approximation of 1us and fast to compute.
1469 */
1470 delta >>= 10;
1471 if (!delta)
1472 return 0;
1473 sa->last_runnable_update = now;
1474
1475 /* delta_w is the amount already accumulated against our next period */
1476 delta_w = sa->runnable_avg_period % 1024;
1477 if (delta + delta_w >= 1024) {
1478 /* period roll-over */
1479 decayed = 1;
1480
1481 /*
1482 * Now that we know we're crossing a period boundary, figure
1483 * out how much from delta we need to complete the current
1484 * period and accrue it.
1485 */
1486 delta_w = 1024 - delta_w;
5b51f2f8
PT
1487 if (runnable)
1488 sa->runnable_avg_sum += delta_w;
1489 sa->runnable_avg_period += delta_w;
1490
1491 delta -= delta_w;
1492
1493 /* Figure out how many additional periods this update spans */
1494 periods = delta / 1024;
1495 delta %= 1024;
1496
1497 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1498 periods + 1);
1499 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1500 periods + 1);
1501
1502 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1503 runnable_contrib = __compute_runnable_contrib(periods);
1504 if (runnable)
1505 sa->runnable_avg_sum += runnable_contrib;
1506 sa->runnable_avg_period += runnable_contrib;
9d85f21c
PT
1507 }
1508
1509 /* Remainder of delta accrued against u_0` */
1510 if (runnable)
1511 sa->runnable_avg_sum += delta;
1512 sa->runnable_avg_period += delta;
1513
1514 return decayed;
1515}
1516
9ee474f5 1517/* Synchronize an entity's decay with its parenting cfs_rq.*/
aff3e498 1518static inline u64 __synchronize_entity_decay(struct sched_entity *se)
9ee474f5
PT
1519{
1520 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1521 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1522
1523 decays -= se->avg.decay_count;
1524 if (!decays)
aff3e498 1525 return 0;
9ee474f5
PT
1526
1527 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1528 se->avg.decay_count = 0;
aff3e498
PT
1529
1530 return decays;
9ee474f5
PT
1531}
1532
c566e8e9
PT
1533#ifdef CONFIG_FAIR_GROUP_SCHED
1534static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1535 int force_update)
1536{
1537 struct task_group *tg = cfs_rq->tg;
bf5b986e 1538 long tg_contrib;
c566e8e9
PT
1539
1540 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1541 tg_contrib -= cfs_rq->tg_load_contrib;
1542
bf5b986e
AS
1543 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1544 atomic_long_add(tg_contrib, &tg->load_avg);
c566e8e9
PT
1545 cfs_rq->tg_load_contrib += tg_contrib;
1546 }
1547}
8165e145 1548
bb17f655
PT
1549/*
1550 * Aggregate cfs_rq runnable averages into an equivalent task_group
1551 * representation for computing load contributions.
1552 */
1553static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1554 struct cfs_rq *cfs_rq)
1555{
1556 struct task_group *tg = cfs_rq->tg;
1557 long contrib;
1558
1559 /* The fraction of a cpu used by this cfs_rq */
1560 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1561 sa->runnable_avg_period + 1);
1562 contrib -= cfs_rq->tg_runnable_contrib;
1563
1564 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1565 atomic_add(contrib, &tg->runnable_avg);
1566 cfs_rq->tg_runnable_contrib += contrib;
1567 }
1568}
1569
8165e145
PT
1570static inline void __update_group_entity_contrib(struct sched_entity *se)
1571{
1572 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1573 struct task_group *tg = cfs_rq->tg;
bb17f655
PT
1574 int runnable_avg;
1575
8165e145
PT
1576 u64 contrib;
1577
1578 contrib = cfs_rq->tg_load_contrib * tg->shares;
bf5b986e
AS
1579 se->avg.load_avg_contrib = div_u64(contrib,
1580 atomic_long_read(&tg->load_avg) + 1);
bb17f655
PT
1581
1582 /*
1583 * For group entities we need to compute a correction term in the case
1584 * that they are consuming <1 cpu so that we would contribute the same
1585 * load as a task of equal weight.
1586 *
1587 * Explicitly co-ordinating this measurement would be expensive, but
1588 * fortunately the sum of each cpus contribution forms a usable
1589 * lower-bound on the true value.
1590 *
1591 * Consider the aggregate of 2 contributions. Either they are disjoint
1592 * (and the sum represents true value) or they are disjoint and we are
1593 * understating by the aggregate of their overlap.
1594 *
1595 * Extending this to N cpus, for a given overlap, the maximum amount we
1596 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1597 * cpus that overlap for this interval and w_i is the interval width.
1598 *
1599 * On a small machine; the first term is well-bounded which bounds the
1600 * total error since w_i is a subset of the period. Whereas on a
1601 * larger machine, while this first term can be larger, if w_i is the
1602 * of consequential size guaranteed to see n_i*w_i quickly converge to
1603 * our upper bound of 1-cpu.
1604 */
1605 runnable_avg = atomic_read(&tg->runnable_avg);
1606 if (runnable_avg < NICE_0_LOAD) {
1607 se->avg.load_avg_contrib *= runnable_avg;
1608 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1609 }
8165e145 1610}
c566e8e9
PT
1611#else
1612static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1613 int force_update) {}
bb17f655
PT
1614static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1615 struct cfs_rq *cfs_rq) {}
8165e145 1616static inline void __update_group_entity_contrib(struct sched_entity *se) {}
c566e8e9
PT
1617#endif
1618
8165e145
PT
1619static inline void __update_task_entity_contrib(struct sched_entity *se)
1620{
1621 u32 contrib;
1622
1623 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1624 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1625 contrib /= (se->avg.runnable_avg_period + 1);
1626 se->avg.load_avg_contrib = scale_load(contrib);
1627}
1628
2dac754e
PT
1629/* Compute the current contribution to load_avg by se, return any delta */
1630static long __update_entity_load_avg_contrib(struct sched_entity *se)
1631{
1632 long old_contrib = se->avg.load_avg_contrib;
1633
8165e145
PT
1634 if (entity_is_task(se)) {
1635 __update_task_entity_contrib(se);
1636 } else {
bb17f655 1637 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
8165e145
PT
1638 __update_group_entity_contrib(se);
1639 }
2dac754e
PT
1640
1641 return se->avg.load_avg_contrib - old_contrib;
1642}
1643
9ee474f5
PT
1644static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1645 long load_contrib)
1646{
1647 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1648 cfs_rq->blocked_load_avg -= load_contrib;
1649 else
1650 cfs_rq->blocked_load_avg = 0;
1651}
1652
f1b17280
PT
1653static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1654
9d85f21c 1655/* Update a sched_entity's runnable average */
9ee474f5
PT
1656static inline void update_entity_load_avg(struct sched_entity *se,
1657 int update_cfs_rq)
9d85f21c 1658{
2dac754e
PT
1659 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1660 long contrib_delta;
f1b17280 1661 u64 now;
2dac754e 1662
f1b17280
PT
1663 /*
1664 * For a group entity we need to use their owned cfs_rq_clock_task() in
1665 * case they are the parent of a throttled hierarchy.
1666 */
1667 if (entity_is_task(se))
1668 now = cfs_rq_clock_task(cfs_rq);
1669 else
1670 now = cfs_rq_clock_task(group_cfs_rq(se));
1671
1672 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2dac754e
PT
1673 return;
1674
1675 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
1676
1677 if (!update_cfs_rq)
1678 return;
1679
2dac754e
PT
1680 if (se->on_rq)
1681 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
1682 else
1683 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1684}
1685
1686/*
1687 * Decay the load contributed by all blocked children and account this so that
1688 * their contribution may appropriately discounted when they wake up.
1689 */
aff3e498 1690static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
9ee474f5 1691{
f1b17280 1692 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
9ee474f5
PT
1693 u64 decays;
1694
1695 decays = now - cfs_rq->last_decay;
aff3e498 1696 if (!decays && !force_update)
9ee474f5
PT
1697 return;
1698
2509940f
AS
1699 if (atomic_long_read(&cfs_rq->removed_load)) {
1700 unsigned long removed_load;
1701 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
aff3e498
PT
1702 subtract_blocked_load_contrib(cfs_rq, removed_load);
1703 }
9ee474f5 1704
aff3e498
PT
1705 if (decays) {
1706 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1707 decays);
1708 atomic64_add(decays, &cfs_rq->decay_counter);
1709 cfs_rq->last_decay = now;
1710 }
c566e8e9
PT
1711
1712 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
9d85f21c 1713}
18bf2805
BS
1714
1715static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1716{
78becc27 1717 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
bb17f655 1718 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
18bf2805 1719}
2dac754e
PT
1720
1721/* Add the load generated by se into cfs_rq's child load-average */
1722static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1723 struct sched_entity *se,
1724 int wakeup)
2dac754e 1725{
aff3e498
PT
1726 /*
1727 * We track migrations using entity decay_count <= 0, on a wake-up
1728 * migration we use a negative decay count to track the remote decays
1729 * accumulated while sleeping.
a75cdaa9
AS
1730 *
1731 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1732 * are seen by enqueue_entity_load_avg() as a migration with an already
1733 * constructed load_avg_contrib.
aff3e498
PT
1734 */
1735 if (unlikely(se->avg.decay_count <= 0)) {
78becc27 1736 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
aff3e498
PT
1737 if (se->avg.decay_count) {
1738 /*
1739 * In a wake-up migration we have to approximate the
1740 * time sleeping. This is because we can't synchronize
1741 * clock_task between the two cpus, and it is not
1742 * guaranteed to be read-safe. Instead, we can
1743 * approximate this using our carried decays, which are
1744 * explicitly atomically readable.
1745 */
1746 se->avg.last_runnable_update -= (-se->avg.decay_count)
1747 << 20;
1748 update_entity_load_avg(se, 0);
1749 /* Indicate that we're now synchronized and on-rq */
1750 se->avg.decay_count = 0;
1751 }
9ee474f5
PT
1752 wakeup = 0;
1753 } else {
282cf499
AS
1754 /*
1755 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1756 * would have made count negative); we must be careful to avoid
1757 * double-accounting blocked time after synchronizing decays.
1758 */
1759 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1760 << 20;
9ee474f5
PT
1761 }
1762
aff3e498
PT
1763 /* migrated tasks did not contribute to our blocked load */
1764 if (wakeup) {
9ee474f5 1765 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
aff3e498
PT
1766 update_entity_load_avg(se, 0);
1767 }
9ee474f5 1768
2dac754e 1769 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
1770 /* we force update consideration on load-balancer moves */
1771 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2dac754e
PT
1772}
1773
9ee474f5
PT
1774/*
1775 * Remove se's load from this cfs_rq child load-average, if the entity is
1776 * transitioning to a blocked state we track its projected decay using
1777 * blocked_load_avg.
1778 */
2dac754e 1779static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1780 struct sched_entity *se,
1781 int sleep)
2dac754e 1782{
9ee474f5 1783 update_entity_load_avg(se, 1);
aff3e498
PT
1784 /* we force update consideration on load-balancer moves */
1785 update_cfs_rq_blocked_load(cfs_rq, !sleep);
9ee474f5 1786
2dac754e 1787 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
1788 if (sleep) {
1789 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1790 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1791 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 1792}
642dbc39
VG
1793
1794/*
1795 * Update the rq's load with the elapsed running time before entering
1796 * idle. if the last scheduled task is not a CFS task, idle_enter will
1797 * be the only way to update the runnable statistic.
1798 */
1799void idle_enter_fair(struct rq *this_rq)
1800{
1801 update_rq_runnable_avg(this_rq, 1);
1802}
1803
1804/*
1805 * Update the rq's load with the elapsed idle time before a task is
1806 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1807 * be the only way to update the runnable statistic.
1808 */
1809void idle_exit_fair(struct rq *this_rq)
1810{
1811 update_rq_runnable_avg(this_rq, 0);
1812}
1813
9d85f21c 1814#else
9ee474f5
PT
1815static inline void update_entity_load_avg(struct sched_entity *se,
1816 int update_cfs_rq) {}
18bf2805 1817static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 1818static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1819 struct sched_entity *se,
1820 int wakeup) {}
2dac754e 1821static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1822 struct sched_entity *se,
1823 int sleep) {}
aff3e498
PT
1824static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1825 int force_update) {}
9d85f21c
PT
1826#endif
1827
2396af69 1828static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1829{
bf0f6f24 1830#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
1831 struct task_struct *tsk = NULL;
1832
1833 if (entity_is_task(se))
1834 tsk = task_of(se);
1835
41acab88 1836 if (se->statistics.sleep_start) {
78becc27 1837 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
bf0f6f24
IM
1838
1839 if ((s64)delta < 0)
1840 delta = 0;
1841
41acab88
LDM
1842 if (unlikely(delta > se->statistics.sleep_max))
1843 se->statistics.sleep_max = delta;
bf0f6f24 1844
8c79a045 1845 se->statistics.sleep_start = 0;
41acab88 1846 se->statistics.sum_sleep_runtime += delta;
9745512c 1847
768d0c27 1848 if (tsk) {
e414314c 1849 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
1850 trace_sched_stat_sleep(tsk, delta);
1851 }
bf0f6f24 1852 }
41acab88 1853 if (se->statistics.block_start) {
78becc27 1854 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
bf0f6f24
IM
1855
1856 if ((s64)delta < 0)
1857 delta = 0;
1858
41acab88
LDM
1859 if (unlikely(delta > se->statistics.block_max))
1860 se->statistics.block_max = delta;
bf0f6f24 1861
8c79a045 1862 se->statistics.block_start = 0;
41acab88 1863 se->statistics.sum_sleep_runtime += delta;
30084fbd 1864
e414314c 1865 if (tsk) {
8f0dfc34 1866 if (tsk->in_iowait) {
41acab88
LDM
1867 se->statistics.iowait_sum += delta;
1868 se->statistics.iowait_count++;
768d0c27 1869 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
1870 }
1871
b781a602
AV
1872 trace_sched_stat_blocked(tsk, delta);
1873
e414314c
PZ
1874 /*
1875 * Blocking time is in units of nanosecs, so shift by
1876 * 20 to get a milliseconds-range estimation of the
1877 * amount of time that the task spent sleeping:
1878 */
1879 if (unlikely(prof_on == SLEEP_PROFILING)) {
1880 profile_hits(SLEEP_PROFILING,
1881 (void *)get_wchan(tsk),
1882 delta >> 20);
1883 }
1884 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 1885 }
bf0f6f24
IM
1886 }
1887#endif
1888}
1889
ddc97297
PZ
1890static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1891{
1892#ifdef CONFIG_SCHED_DEBUG
1893 s64 d = se->vruntime - cfs_rq->min_vruntime;
1894
1895 if (d < 0)
1896 d = -d;
1897
1898 if (d > 3*sysctl_sched_latency)
1899 schedstat_inc(cfs_rq, nr_spread_over);
1900#endif
1901}
1902
aeb73b04
PZ
1903static void
1904place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1905{
1af5f730 1906 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 1907
2cb8600e
PZ
1908 /*
1909 * The 'current' period is already promised to the current tasks,
1910 * however the extra weight of the new task will slow them down a
1911 * little, place the new task so that it fits in the slot that
1912 * stays open at the end.
1913 */
94dfb5e7 1914 if (initial && sched_feat(START_DEBIT))
f9c0b095 1915 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 1916
a2e7a7eb 1917 /* sleeps up to a single latency don't count. */
5ca9880c 1918 if (!initial) {
a2e7a7eb 1919 unsigned long thresh = sysctl_sched_latency;
a7be37ac 1920
a2e7a7eb
MG
1921 /*
1922 * Halve their sleep time's effect, to allow
1923 * for a gentler effect of sleepers:
1924 */
1925 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1926 thresh >>= 1;
51e0304c 1927
a2e7a7eb 1928 vruntime -= thresh;
aeb73b04
PZ
1929 }
1930
b5d9d734 1931 /* ensure we never gain time by being placed backwards. */
16c8f1c7 1932 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
1933}
1934
d3d9dc33
PT
1935static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1936
bf0f6f24 1937static void
88ec22d3 1938enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1939{
88ec22d3
PZ
1940 /*
1941 * Update the normalized vruntime before updating min_vruntime
0fc576d5 1942 * through calling update_curr().
88ec22d3 1943 */
371fd7e7 1944 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
1945 se->vruntime += cfs_rq->min_vruntime;
1946
bf0f6f24 1947 /*
a2a2d680 1948 * Update run-time statistics of the 'current'.
bf0f6f24 1949 */
b7cc0896 1950 update_curr(cfs_rq);
f269ae04 1951 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
17bc14b7
LT
1952 account_entity_enqueue(cfs_rq, se);
1953 update_cfs_shares(cfs_rq);
bf0f6f24 1954
88ec22d3 1955 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 1956 place_entity(cfs_rq, se, 0);
2396af69 1957 enqueue_sleeper(cfs_rq, se);
e9acbff6 1958 }
bf0f6f24 1959
d2417e5a 1960 update_stats_enqueue(cfs_rq, se);
ddc97297 1961 check_spread(cfs_rq, se);
83b699ed
SV
1962 if (se != cfs_rq->curr)
1963 __enqueue_entity(cfs_rq, se);
2069dd75 1964 se->on_rq = 1;
3d4b47b4 1965
d3d9dc33 1966 if (cfs_rq->nr_running == 1) {
3d4b47b4 1967 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
1968 check_enqueue_throttle(cfs_rq);
1969 }
bf0f6f24
IM
1970}
1971
2c13c919 1972static void __clear_buddies_last(struct sched_entity *se)
2002c695 1973{
2c13c919
RR
1974 for_each_sched_entity(se) {
1975 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1976 if (cfs_rq->last == se)
1977 cfs_rq->last = NULL;
1978 else
1979 break;
1980 }
1981}
2002c695 1982
2c13c919
RR
1983static void __clear_buddies_next(struct sched_entity *se)
1984{
1985 for_each_sched_entity(se) {
1986 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1987 if (cfs_rq->next == se)
1988 cfs_rq->next = NULL;
1989 else
1990 break;
1991 }
2002c695
PZ
1992}
1993
ac53db59
RR
1994static void __clear_buddies_skip(struct sched_entity *se)
1995{
1996 for_each_sched_entity(se) {
1997 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1998 if (cfs_rq->skip == se)
1999 cfs_rq->skip = NULL;
2000 else
2001 break;
2002 }
2003}
2004
a571bbea
PZ
2005static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2006{
2c13c919
RR
2007 if (cfs_rq->last == se)
2008 __clear_buddies_last(se);
2009
2010 if (cfs_rq->next == se)
2011 __clear_buddies_next(se);
ac53db59
RR
2012
2013 if (cfs_rq->skip == se)
2014 __clear_buddies_skip(se);
a571bbea
PZ
2015}
2016
6c16a6dc 2017static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 2018
bf0f6f24 2019static void
371fd7e7 2020dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 2021{
a2a2d680
DA
2022 /*
2023 * Update run-time statistics of the 'current'.
2024 */
2025 update_curr(cfs_rq);
17bc14b7 2026 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 2027
19b6a2e3 2028 update_stats_dequeue(cfs_rq, se);
371fd7e7 2029 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 2030#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
2031 if (entity_is_task(se)) {
2032 struct task_struct *tsk = task_of(se);
2033
2034 if (tsk->state & TASK_INTERRUPTIBLE)
78becc27 2035 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2036 if (tsk->state & TASK_UNINTERRUPTIBLE)
78becc27 2037 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2038 }
db36cc7d 2039#endif
67e9fb2a
PZ
2040 }
2041
2002c695 2042 clear_buddies(cfs_rq, se);
4793241b 2043
83b699ed 2044 if (se != cfs_rq->curr)
30cfdcfc 2045 __dequeue_entity(cfs_rq, se);
17bc14b7 2046 se->on_rq = 0;
30cfdcfc 2047 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
2048
2049 /*
2050 * Normalize the entity after updating the min_vruntime because the
2051 * update can refer to the ->curr item and we need to reflect this
2052 * movement in our normalized position.
2053 */
371fd7e7 2054 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 2055 se->vruntime -= cfs_rq->min_vruntime;
1e876231 2056
d8b4986d
PT
2057 /* return excess runtime on last dequeue */
2058 return_cfs_rq_runtime(cfs_rq);
2059
1e876231 2060 update_min_vruntime(cfs_rq);
17bc14b7 2061 update_cfs_shares(cfs_rq);
bf0f6f24
IM
2062}
2063
2064/*
2065 * Preempt the current task with a newly woken task if needed:
2066 */
7c92e54f 2067static void
2e09bf55 2068check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 2069{
11697830 2070 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
2071 struct sched_entity *se;
2072 s64 delta;
11697830 2073
6d0f0ebd 2074 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 2075 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 2076 if (delta_exec > ideal_runtime) {
bf0f6f24 2077 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
2078 /*
2079 * The current task ran long enough, ensure it doesn't get
2080 * re-elected due to buddy favours.
2081 */
2082 clear_buddies(cfs_rq, curr);
f685ceac
MG
2083 return;
2084 }
2085
2086 /*
2087 * Ensure that a task that missed wakeup preemption by a
2088 * narrow margin doesn't have to wait for a full slice.
2089 * This also mitigates buddy induced latencies under load.
2090 */
f685ceac
MG
2091 if (delta_exec < sysctl_sched_min_granularity)
2092 return;
2093
f4cfb33e
WX
2094 se = __pick_first_entity(cfs_rq);
2095 delta = curr->vruntime - se->vruntime;
f685ceac 2096
f4cfb33e
WX
2097 if (delta < 0)
2098 return;
d7d82944 2099
f4cfb33e
WX
2100 if (delta > ideal_runtime)
2101 resched_task(rq_of(cfs_rq)->curr);
bf0f6f24
IM
2102}
2103
83b699ed 2104static void
8494f412 2105set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 2106{
83b699ed
SV
2107 /* 'current' is not kept within the tree. */
2108 if (se->on_rq) {
2109 /*
2110 * Any task has to be enqueued before it get to execute on
2111 * a CPU. So account for the time it spent waiting on the
2112 * runqueue.
2113 */
2114 update_stats_wait_end(cfs_rq, se);
2115 __dequeue_entity(cfs_rq, se);
2116 }
2117
79303e9e 2118 update_stats_curr_start(cfs_rq, se);
429d43bc 2119 cfs_rq->curr = se;
eba1ed4b
IM
2120#ifdef CONFIG_SCHEDSTATS
2121 /*
2122 * Track our maximum slice length, if the CPU's load is at
2123 * least twice that of our own weight (i.e. dont track it
2124 * when there are only lesser-weight tasks around):
2125 */
495eca49 2126 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 2127 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
2128 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2129 }
2130#endif
4a55b450 2131 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
2132}
2133
3f3a4904
PZ
2134static int
2135wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2136
ac53db59
RR
2137/*
2138 * Pick the next process, keeping these things in mind, in this order:
2139 * 1) keep things fair between processes/task groups
2140 * 2) pick the "next" process, since someone really wants that to run
2141 * 3) pick the "last" process, for cache locality
2142 * 4) do not run the "skip" process, if something else is available
2143 */
f4b6755f 2144static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 2145{
ac53db59 2146 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 2147 struct sched_entity *left = se;
f4b6755f 2148
ac53db59
RR
2149 /*
2150 * Avoid running the skip buddy, if running something else can
2151 * be done without getting too unfair.
2152 */
2153 if (cfs_rq->skip == se) {
2154 struct sched_entity *second = __pick_next_entity(se);
2155 if (second && wakeup_preempt_entity(second, left) < 1)
2156 se = second;
2157 }
aa2ac252 2158
f685ceac
MG
2159 /*
2160 * Prefer last buddy, try to return the CPU to a preempted task.
2161 */
2162 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2163 se = cfs_rq->last;
2164
ac53db59
RR
2165 /*
2166 * Someone really wants this to run. If it's not unfair, run it.
2167 */
2168 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2169 se = cfs_rq->next;
2170
f685ceac 2171 clear_buddies(cfs_rq, se);
4793241b
PZ
2172
2173 return se;
aa2ac252
PZ
2174}
2175
d3d9dc33
PT
2176static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2177
ab6cde26 2178static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
2179{
2180 /*
2181 * If still on the runqueue then deactivate_task()
2182 * was not called and update_curr() has to be done:
2183 */
2184 if (prev->on_rq)
b7cc0896 2185 update_curr(cfs_rq);
bf0f6f24 2186
d3d9dc33
PT
2187 /* throttle cfs_rqs exceeding runtime */
2188 check_cfs_rq_runtime(cfs_rq);
2189
ddc97297 2190 check_spread(cfs_rq, prev);
30cfdcfc 2191 if (prev->on_rq) {
5870db5b 2192 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
2193 /* Put 'current' back into the tree. */
2194 __enqueue_entity(cfs_rq, prev);
9d85f21c 2195 /* in !on_rq case, update occurred at dequeue */
9ee474f5 2196 update_entity_load_avg(prev, 1);
30cfdcfc 2197 }
429d43bc 2198 cfs_rq->curr = NULL;
bf0f6f24
IM
2199}
2200
8f4d37ec
PZ
2201static void
2202entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 2203{
bf0f6f24 2204 /*
30cfdcfc 2205 * Update run-time statistics of the 'current'.
bf0f6f24 2206 */
30cfdcfc 2207 update_curr(cfs_rq);
bf0f6f24 2208
9d85f21c
PT
2209 /*
2210 * Ensure that runnable average is periodically updated.
2211 */
9ee474f5 2212 update_entity_load_avg(curr, 1);
aff3e498 2213 update_cfs_rq_blocked_load(cfs_rq, 1);
bf0bd948 2214 update_cfs_shares(cfs_rq);
9d85f21c 2215
8f4d37ec
PZ
2216#ifdef CONFIG_SCHED_HRTICK
2217 /*
2218 * queued ticks are scheduled to match the slice, so don't bother
2219 * validating it and just reschedule.
2220 */
983ed7a6
HH
2221 if (queued) {
2222 resched_task(rq_of(cfs_rq)->curr);
2223 return;
2224 }
8f4d37ec
PZ
2225 /*
2226 * don't let the period tick interfere with the hrtick preemption
2227 */
2228 if (!sched_feat(DOUBLE_TICK) &&
2229 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2230 return;
2231#endif
2232
2c2efaed 2233 if (cfs_rq->nr_running > 1)
2e09bf55 2234 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
2235}
2236
ab84d31e
PT
2237
2238/**************************************************
2239 * CFS bandwidth control machinery
2240 */
2241
2242#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
2243
2244#ifdef HAVE_JUMP_LABEL
c5905afb 2245static struct static_key __cfs_bandwidth_used;
029632fb
PZ
2246
2247static inline bool cfs_bandwidth_used(void)
2248{
c5905afb 2249 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
2250}
2251
2252void account_cfs_bandwidth_used(int enabled, int was_enabled)
2253{
2254 /* only need to count groups transitioning between enabled/!enabled */
2255 if (enabled && !was_enabled)
c5905afb 2256 static_key_slow_inc(&__cfs_bandwidth_used);
029632fb 2257 else if (!enabled && was_enabled)
c5905afb 2258 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
2259}
2260#else /* HAVE_JUMP_LABEL */
2261static bool cfs_bandwidth_used(void)
2262{
2263 return true;
2264}
2265
2266void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2267#endif /* HAVE_JUMP_LABEL */
2268
ab84d31e
PT
2269/*
2270 * default period for cfs group bandwidth.
2271 * default: 0.1s, units: nanoseconds
2272 */
2273static inline u64 default_cfs_period(void)
2274{
2275 return 100000000ULL;
2276}
ec12cb7f
PT
2277
2278static inline u64 sched_cfs_bandwidth_slice(void)
2279{
2280 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2281}
2282
a9cf55b2
PT
2283/*
2284 * Replenish runtime according to assigned quota and update expiration time.
2285 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2286 * additional synchronization around rq->lock.
2287 *
2288 * requires cfs_b->lock
2289 */
029632fb 2290void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
2291{
2292 u64 now;
2293
2294 if (cfs_b->quota == RUNTIME_INF)
2295 return;
2296
2297 now = sched_clock_cpu(smp_processor_id());
2298 cfs_b->runtime = cfs_b->quota;
2299 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2300}
2301
029632fb
PZ
2302static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2303{
2304 return &tg->cfs_bandwidth;
2305}
2306
f1b17280
PT
2307/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2308static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2309{
2310 if (unlikely(cfs_rq->throttle_count))
2311 return cfs_rq->throttled_clock_task;
2312
78becc27 2313 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
2314}
2315
85dac906
PT
2316/* returns 0 on failure to allocate runtime */
2317static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
2318{
2319 struct task_group *tg = cfs_rq->tg;
2320 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 2321 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
2322
2323 /* note: this is a positive sum as runtime_remaining <= 0 */
2324 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2325
2326 raw_spin_lock(&cfs_b->lock);
2327 if (cfs_b->quota == RUNTIME_INF)
2328 amount = min_amount;
58088ad0 2329 else {
a9cf55b2
PT
2330 /*
2331 * If the bandwidth pool has become inactive, then at least one
2332 * period must have elapsed since the last consumption.
2333 * Refresh the global state and ensure bandwidth timer becomes
2334 * active.
2335 */
2336 if (!cfs_b->timer_active) {
2337 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 2338 __start_cfs_bandwidth(cfs_b);
a9cf55b2 2339 }
58088ad0
PT
2340
2341 if (cfs_b->runtime > 0) {
2342 amount = min(cfs_b->runtime, min_amount);
2343 cfs_b->runtime -= amount;
2344 cfs_b->idle = 0;
2345 }
ec12cb7f 2346 }
a9cf55b2 2347 expires = cfs_b->runtime_expires;
ec12cb7f
PT
2348 raw_spin_unlock(&cfs_b->lock);
2349
2350 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
2351 /*
2352 * we may have advanced our local expiration to account for allowed
2353 * spread between our sched_clock and the one on which runtime was
2354 * issued.
2355 */
2356 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2357 cfs_rq->runtime_expires = expires;
85dac906
PT
2358
2359 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
2360}
2361
a9cf55b2
PT
2362/*
2363 * Note: This depends on the synchronization provided by sched_clock and the
2364 * fact that rq->clock snapshots this value.
2365 */
2366static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 2367{
a9cf55b2 2368 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
2369
2370 /* if the deadline is ahead of our clock, nothing to do */
78becc27 2371 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
2372 return;
2373
a9cf55b2
PT
2374 if (cfs_rq->runtime_remaining < 0)
2375 return;
2376
2377 /*
2378 * If the local deadline has passed we have to consider the
2379 * possibility that our sched_clock is 'fast' and the global deadline
2380 * has not truly expired.
2381 *
2382 * Fortunately we can check determine whether this the case by checking
2383 * whether the global deadline has advanced.
2384 */
2385
2386 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2387 /* extend local deadline, drift is bounded above by 2 ticks */
2388 cfs_rq->runtime_expires += TICK_NSEC;
2389 } else {
2390 /* global deadline is ahead, expiration has passed */
2391 cfs_rq->runtime_remaining = 0;
2392 }
2393}
2394
2395static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2396 unsigned long delta_exec)
2397{
2398 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 2399 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
2400 expire_cfs_rq_runtime(cfs_rq);
2401
2402 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
2403 return;
2404
85dac906
PT
2405 /*
2406 * if we're unable to extend our runtime we resched so that the active
2407 * hierarchy can be throttled
2408 */
2409 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2410 resched_task(rq_of(cfs_rq)->curr);
ec12cb7f
PT
2411}
2412
6c16a6dc
PZ
2413static __always_inline
2414void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
ec12cb7f 2415{
56f570e5 2416 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
2417 return;
2418
2419 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2420}
2421
85dac906
PT
2422static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2423{
56f570e5 2424 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
2425}
2426
64660c86
PT
2427/* check whether cfs_rq, or any parent, is throttled */
2428static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2429{
56f570e5 2430 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
2431}
2432
2433/*
2434 * Ensure that neither of the group entities corresponding to src_cpu or
2435 * dest_cpu are members of a throttled hierarchy when performing group
2436 * load-balance operations.
2437 */
2438static inline int throttled_lb_pair(struct task_group *tg,
2439 int src_cpu, int dest_cpu)
2440{
2441 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2442
2443 src_cfs_rq = tg->cfs_rq[src_cpu];
2444 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2445
2446 return throttled_hierarchy(src_cfs_rq) ||
2447 throttled_hierarchy(dest_cfs_rq);
2448}
2449
2450/* updated child weight may affect parent so we have to do this bottom up */
2451static int tg_unthrottle_up(struct task_group *tg, void *data)
2452{
2453 struct rq *rq = data;
2454 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2455
2456 cfs_rq->throttle_count--;
2457#ifdef CONFIG_SMP
2458 if (!cfs_rq->throttle_count) {
f1b17280 2459 /* adjust cfs_rq_clock_task() */
78becc27 2460 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 2461 cfs_rq->throttled_clock_task;
64660c86
PT
2462 }
2463#endif
2464
2465 return 0;
2466}
2467
2468static int tg_throttle_down(struct task_group *tg, void *data)
2469{
2470 struct rq *rq = data;
2471 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2472
82958366
PT
2473 /* group is entering throttled state, stop time */
2474 if (!cfs_rq->throttle_count)
78becc27 2475 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
2476 cfs_rq->throttle_count++;
2477
2478 return 0;
2479}
2480
d3d9dc33 2481static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
2482{
2483 struct rq *rq = rq_of(cfs_rq);
2484 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2485 struct sched_entity *se;
2486 long task_delta, dequeue = 1;
2487
2488 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2489
f1b17280 2490 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
2491 rcu_read_lock();
2492 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2493 rcu_read_unlock();
85dac906
PT
2494
2495 task_delta = cfs_rq->h_nr_running;
2496 for_each_sched_entity(se) {
2497 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2498 /* throttled entity or throttle-on-deactivate */
2499 if (!se->on_rq)
2500 break;
2501
2502 if (dequeue)
2503 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2504 qcfs_rq->h_nr_running -= task_delta;
2505
2506 if (qcfs_rq->load.weight)
2507 dequeue = 0;
2508 }
2509
2510 if (!se)
2511 rq->nr_running -= task_delta;
2512
2513 cfs_rq->throttled = 1;
78becc27 2514 cfs_rq->throttled_clock = rq_clock(rq);
85dac906
PT
2515 raw_spin_lock(&cfs_b->lock);
2516 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2517 raw_spin_unlock(&cfs_b->lock);
2518}
2519
029632fb 2520void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
2521{
2522 struct rq *rq = rq_of(cfs_rq);
2523 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2524 struct sched_entity *se;
2525 int enqueue = 1;
2526 long task_delta;
2527
22b958d8 2528 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
2529
2530 cfs_rq->throttled = 0;
1a55af2e
FW
2531
2532 update_rq_clock(rq);
2533
671fd9da 2534 raw_spin_lock(&cfs_b->lock);
78becc27 2535 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
2536 list_del_rcu(&cfs_rq->throttled_list);
2537 raw_spin_unlock(&cfs_b->lock);
2538
64660c86
PT
2539 /* update hierarchical throttle state */
2540 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2541
671fd9da
PT
2542 if (!cfs_rq->load.weight)
2543 return;
2544
2545 task_delta = cfs_rq->h_nr_running;
2546 for_each_sched_entity(se) {
2547 if (se->on_rq)
2548 enqueue = 0;
2549
2550 cfs_rq = cfs_rq_of(se);
2551 if (enqueue)
2552 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2553 cfs_rq->h_nr_running += task_delta;
2554
2555 if (cfs_rq_throttled(cfs_rq))
2556 break;
2557 }
2558
2559 if (!se)
2560 rq->nr_running += task_delta;
2561
2562 /* determine whether we need to wake up potentially idle cpu */
2563 if (rq->curr == rq->idle && rq->cfs.nr_running)
2564 resched_task(rq->curr);
2565}
2566
2567static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2568 u64 remaining, u64 expires)
2569{
2570 struct cfs_rq *cfs_rq;
2571 u64 runtime = remaining;
2572
2573 rcu_read_lock();
2574 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2575 throttled_list) {
2576 struct rq *rq = rq_of(cfs_rq);
2577
2578 raw_spin_lock(&rq->lock);
2579 if (!cfs_rq_throttled(cfs_rq))
2580 goto next;
2581
2582 runtime = -cfs_rq->runtime_remaining + 1;
2583 if (runtime > remaining)
2584 runtime = remaining;
2585 remaining -= runtime;
2586
2587 cfs_rq->runtime_remaining += runtime;
2588 cfs_rq->runtime_expires = expires;
2589
2590 /* we check whether we're throttled above */
2591 if (cfs_rq->runtime_remaining > 0)
2592 unthrottle_cfs_rq(cfs_rq);
2593
2594next:
2595 raw_spin_unlock(&rq->lock);
2596
2597 if (!remaining)
2598 break;
2599 }
2600 rcu_read_unlock();
2601
2602 return remaining;
2603}
2604
58088ad0
PT
2605/*
2606 * Responsible for refilling a task_group's bandwidth and unthrottling its
2607 * cfs_rqs as appropriate. If there has been no activity within the last
2608 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2609 * used to track this state.
2610 */
2611static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2612{
671fd9da
PT
2613 u64 runtime, runtime_expires;
2614 int idle = 1, throttled;
58088ad0
PT
2615
2616 raw_spin_lock(&cfs_b->lock);
2617 /* no need to continue the timer with no bandwidth constraint */
2618 if (cfs_b->quota == RUNTIME_INF)
2619 goto out_unlock;
2620
671fd9da
PT
2621 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2622 /* idle depends on !throttled (for the case of a large deficit) */
2623 idle = cfs_b->idle && !throttled;
e8da1b18 2624 cfs_b->nr_periods += overrun;
671fd9da 2625
a9cf55b2
PT
2626 /* if we're going inactive then everything else can be deferred */
2627 if (idle)
2628 goto out_unlock;
2629
2630 __refill_cfs_bandwidth_runtime(cfs_b);
2631
671fd9da
PT
2632 if (!throttled) {
2633 /* mark as potentially idle for the upcoming period */
2634 cfs_b->idle = 1;
2635 goto out_unlock;
2636 }
2637
e8da1b18
NR
2638 /* account preceding periods in which throttling occurred */
2639 cfs_b->nr_throttled += overrun;
2640
671fd9da
PT
2641 /*
2642 * There are throttled entities so we must first use the new bandwidth
2643 * to unthrottle them before making it generally available. This
2644 * ensures that all existing debts will be paid before a new cfs_rq is
2645 * allowed to run.
2646 */
2647 runtime = cfs_b->runtime;
2648 runtime_expires = cfs_b->runtime_expires;
2649 cfs_b->runtime = 0;
2650
2651 /*
2652 * This check is repeated as we are holding onto the new bandwidth
2653 * while we unthrottle. This can potentially race with an unthrottled
2654 * group trying to acquire new bandwidth from the global pool.
2655 */
2656 while (throttled && runtime > 0) {
2657 raw_spin_unlock(&cfs_b->lock);
2658 /* we can't nest cfs_b->lock while distributing bandwidth */
2659 runtime = distribute_cfs_runtime(cfs_b, runtime,
2660 runtime_expires);
2661 raw_spin_lock(&cfs_b->lock);
2662
2663 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2664 }
58088ad0 2665
671fd9da
PT
2666 /* return (any) remaining runtime */
2667 cfs_b->runtime = runtime;
2668 /*
2669 * While we are ensured activity in the period following an
2670 * unthrottle, this also covers the case in which the new bandwidth is
2671 * insufficient to cover the existing bandwidth deficit. (Forcing the
2672 * timer to remain active while there are any throttled entities.)
2673 */
2674 cfs_b->idle = 0;
58088ad0
PT
2675out_unlock:
2676 if (idle)
2677 cfs_b->timer_active = 0;
2678 raw_spin_unlock(&cfs_b->lock);
2679
2680 return idle;
2681}
d3d9dc33 2682
d8b4986d
PT
2683/* a cfs_rq won't donate quota below this amount */
2684static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2685/* minimum remaining period time to redistribute slack quota */
2686static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2687/* how long we wait to gather additional slack before distributing */
2688static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2689
2690/* are we near the end of the current quota period? */
2691static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2692{
2693 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2694 u64 remaining;
2695
2696 /* if the call-back is running a quota refresh is already occurring */
2697 if (hrtimer_callback_running(refresh_timer))
2698 return 1;
2699
2700 /* is a quota refresh about to occur? */
2701 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2702 if (remaining < min_expire)
2703 return 1;
2704
2705 return 0;
2706}
2707
2708static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2709{
2710 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2711
2712 /* if there's a quota refresh soon don't bother with slack */
2713 if (runtime_refresh_within(cfs_b, min_left))
2714 return;
2715
2716 start_bandwidth_timer(&cfs_b->slack_timer,
2717 ns_to_ktime(cfs_bandwidth_slack_period));
2718}
2719
2720/* we know any runtime found here is valid as update_curr() precedes return */
2721static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2722{
2723 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2724 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2725
2726 if (slack_runtime <= 0)
2727 return;
2728
2729 raw_spin_lock(&cfs_b->lock);
2730 if (cfs_b->quota != RUNTIME_INF &&
2731 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2732 cfs_b->runtime += slack_runtime;
2733
2734 /* we are under rq->lock, defer unthrottling using a timer */
2735 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2736 !list_empty(&cfs_b->throttled_cfs_rq))
2737 start_cfs_slack_bandwidth(cfs_b);
2738 }
2739 raw_spin_unlock(&cfs_b->lock);
2740
2741 /* even if it's not valid for return we don't want to try again */
2742 cfs_rq->runtime_remaining -= slack_runtime;
2743}
2744
2745static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2746{
56f570e5
PT
2747 if (!cfs_bandwidth_used())
2748 return;
2749
fccfdc6f 2750 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
2751 return;
2752
2753 __return_cfs_rq_runtime(cfs_rq);
2754}
2755
2756/*
2757 * This is done with a timer (instead of inline with bandwidth return) since
2758 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2759 */
2760static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2761{
2762 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2763 u64 expires;
2764
2765 /* confirm we're still not at a refresh boundary */
2766 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2767 return;
2768
2769 raw_spin_lock(&cfs_b->lock);
2770 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2771 runtime = cfs_b->runtime;
2772 cfs_b->runtime = 0;
2773 }
2774 expires = cfs_b->runtime_expires;
2775 raw_spin_unlock(&cfs_b->lock);
2776
2777 if (!runtime)
2778 return;
2779
2780 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2781
2782 raw_spin_lock(&cfs_b->lock);
2783 if (expires == cfs_b->runtime_expires)
2784 cfs_b->runtime = runtime;
2785 raw_spin_unlock(&cfs_b->lock);
2786}
2787
d3d9dc33
PT
2788/*
2789 * When a group wakes up we want to make sure that its quota is not already
2790 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2791 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2792 */
2793static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2794{
56f570e5
PT
2795 if (!cfs_bandwidth_used())
2796 return;
2797
d3d9dc33
PT
2798 /* an active group must be handled by the update_curr()->put() path */
2799 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2800 return;
2801
2802 /* ensure the group is not already throttled */
2803 if (cfs_rq_throttled(cfs_rq))
2804 return;
2805
2806 /* update runtime allocation */
2807 account_cfs_rq_runtime(cfs_rq, 0);
2808 if (cfs_rq->runtime_remaining <= 0)
2809 throttle_cfs_rq(cfs_rq);
2810}
2811
2812/* conditionally throttle active cfs_rq's from put_prev_entity() */
2813static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2814{
56f570e5
PT
2815 if (!cfs_bandwidth_used())
2816 return;
2817
d3d9dc33
PT
2818 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2819 return;
2820
2821 /*
2822 * it's possible for a throttled entity to be forced into a running
2823 * state (e.g. set_curr_task), in this case we're finished.
2824 */
2825 if (cfs_rq_throttled(cfs_rq))
2826 return;
2827
2828 throttle_cfs_rq(cfs_rq);
2829}
029632fb 2830
029632fb
PZ
2831static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2832{
2833 struct cfs_bandwidth *cfs_b =
2834 container_of(timer, struct cfs_bandwidth, slack_timer);
2835 do_sched_cfs_slack_timer(cfs_b);
2836
2837 return HRTIMER_NORESTART;
2838}
2839
2840static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2841{
2842 struct cfs_bandwidth *cfs_b =
2843 container_of(timer, struct cfs_bandwidth, period_timer);
2844 ktime_t now;
2845 int overrun;
2846 int idle = 0;
2847
2848 for (;;) {
2849 now = hrtimer_cb_get_time(timer);
2850 overrun = hrtimer_forward(timer, now, cfs_b->period);
2851
2852 if (!overrun)
2853 break;
2854
2855 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2856 }
2857
2858 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2859}
2860
2861void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2862{
2863 raw_spin_lock_init(&cfs_b->lock);
2864 cfs_b->runtime = 0;
2865 cfs_b->quota = RUNTIME_INF;
2866 cfs_b->period = ns_to_ktime(default_cfs_period());
2867
2868 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2869 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2870 cfs_b->period_timer.function = sched_cfs_period_timer;
2871 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2872 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2873}
2874
2875static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2876{
2877 cfs_rq->runtime_enabled = 0;
2878 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2879}
2880
2881/* requires cfs_b->lock, may release to reprogram timer */
2882void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2883{
2884 /*
2885 * The timer may be active because we're trying to set a new bandwidth
2886 * period or because we're racing with the tear-down path
2887 * (timer_active==0 becomes visible before the hrtimer call-back
2888 * terminates). In either case we ensure that it's re-programmed
2889 */
2890 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2891 raw_spin_unlock(&cfs_b->lock);
2892 /* ensure cfs_b->lock is available while we wait */
2893 hrtimer_cancel(&cfs_b->period_timer);
2894
2895 raw_spin_lock(&cfs_b->lock);
2896 /* if someone else restarted the timer then we're done */
2897 if (cfs_b->timer_active)
2898 return;
2899 }
2900
2901 cfs_b->timer_active = 1;
2902 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2903}
2904
2905static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2906{
2907 hrtimer_cancel(&cfs_b->period_timer);
2908 hrtimer_cancel(&cfs_b->slack_timer);
2909}
2910
38dc3348 2911static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
2912{
2913 struct cfs_rq *cfs_rq;
2914
2915 for_each_leaf_cfs_rq(rq, cfs_rq) {
2916 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2917
2918 if (!cfs_rq->runtime_enabled)
2919 continue;
2920
2921 /*
2922 * clock_task is not advancing so we just need to make sure
2923 * there's some valid quota amount
2924 */
2925 cfs_rq->runtime_remaining = cfs_b->quota;
2926 if (cfs_rq_throttled(cfs_rq))
2927 unthrottle_cfs_rq(cfs_rq);
2928 }
2929}
2930
2931#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
2932static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2933{
78becc27 2934 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
2935}
2936
2937static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2938 unsigned long delta_exec) {}
d3d9dc33
PT
2939static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2940static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 2941static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
2942
2943static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2944{
2945 return 0;
2946}
64660c86
PT
2947
2948static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2949{
2950 return 0;
2951}
2952
2953static inline int throttled_lb_pair(struct task_group *tg,
2954 int src_cpu, int dest_cpu)
2955{
2956 return 0;
2957}
029632fb
PZ
2958
2959void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2960
2961#ifdef CONFIG_FAIR_GROUP_SCHED
2962static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
2963#endif
2964
029632fb
PZ
2965static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2966{
2967 return NULL;
2968}
2969static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
a4c96ae3 2970static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
2971
2972#endif /* CONFIG_CFS_BANDWIDTH */
2973
bf0f6f24
IM
2974/**************************************************
2975 * CFS operations on tasks:
2976 */
2977
8f4d37ec
PZ
2978#ifdef CONFIG_SCHED_HRTICK
2979static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2980{
8f4d37ec
PZ
2981 struct sched_entity *se = &p->se;
2982 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2983
2984 WARN_ON(task_rq(p) != rq);
2985
b39e66ea 2986 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
2987 u64 slice = sched_slice(cfs_rq, se);
2988 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2989 s64 delta = slice - ran;
2990
2991 if (delta < 0) {
2992 if (rq->curr == p)
2993 resched_task(p);
2994 return;
2995 }
2996
2997 /*
2998 * Don't schedule slices shorter than 10000ns, that just
2999 * doesn't make sense. Rely on vruntime for fairness.
3000 */
31656519 3001 if (rq->curr != p)
157124c1 3002 delta = max_t(s64, 10000LL, delta);
8f4d37ec 3003
31656519 3004 hrtick_start(rq, delta);
8f4d37ec
PZ
3005 }
3006}
a4c2f00f
PZ
3007
3008/*
3009 * called from enqueue/dequeue and updates the hrtick when the
3010 * current task is from our class and nr_running is low enough
3011 * to matter.
3012 */
3013static void hrtick_update(struct rq *rq)
3014{
3015 struct task_struct *curr = rq->curr;
3016
b39e66ea 3017 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
3018 return;
3019
3020 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3021 hrtick_start_fair(rq, curr);
3022}
55e12e5e 3023#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
3024static inline void
3025hrtick_start_fair(struct rq *rq, struct task_struct *p)
3026{
3027}
a4c2f00f
PZ
3028
3029static inline void hrtick_update(struct rq *rq)
3030{
3031}
8f4d37ec
PZ
3032#endif
3033
bf0f6f24
IM
3034/*
3035 * The enqueue_task method is called before nr_running is
3036 * increased. Here we update the fair scheduling stats and
3037 * then put the task into the rbtree:
3038 */
ea87bb78 3039static void
371fd7e7 3040enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3041{
3042 struct cfs_rq *cfs_rq;
62fb1851 3043 struct sched_entity *se = &p->se;
bf0f6f24
IM
3044
3045 for_each_sched_entity(se) {
62fb1851 3046 if (se->on_rq)
bf0f6f24
IM
3047 break;
3048 cfs_rq = cfs_rq_of(se);
88ec22d3 3049 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
3050
3051 /*
3052 * end evaluation on encountering a throttled cfs_rq
3053 *
3054 * note: in the case of encountering a throttled cfs_rq we will
3055 * post the final h_nr_running increment below.
3056 */
3057 if (cfs_rq_throttled(cfs_rq))
3058 break;
953bfcd1 3059 cfs_rq->h_nr_running++;
85dac906 3060
88ec22d3 3061 flags = ENQUEUE_WAKEUP;
bf0f6f24 3062 }
8f4d37ec 3063
2069dd75 3064 for_each_sched_entity(se) {
0f317143 3065 cfs_rq = cfs_rq_of(se);
953bfcd1 3066 cfs_rq->h_nr_running++;
2069dd75 3067
85dac906
PT
3068 if (cfs_rq_throttled(cfs_rq))
3069 break;
3070
17bc14b7 3071 update_cfs_shares(cfs_rq);
9ee474f5 3072 update_entity_load_avg(se, 1);
2069dd75
PZ
3073 }
3074
18bf2805
BS
3075 if (!se) {
3076 update_rq_runnable_avg(rq, rq->nr_running);
85dac906 3077 inc_nr_running(rq);
18bf2805 3078 }
a4c2f00f 3079 hrtick_update(rq);
bf0f6f24
IM
3080}
3081
2f36825b
VP
3082static void set_next_buddy(struct sched_entity *se);
3083
bf0f6f24
IM
3084/*
3085 * The dequeue_task method is called before nr_running is
3086 * decreased. We remove the task from the rbtree and
3087 * update the fair scheduling stats:
3088 */
371fd7e7 3089static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3090{
3091 struct cfs_rq *cfs_rq;
62fb1851 3092 struct sched_entity *se = &p->se;
2f36825b 3093 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
3094
3095 for_each_sched_entity(se) {
3096 cfs_rq = cfs_rq_of(se);
371fd7e7 3097 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
3098
3099 /*
3100 * end evaluation on encountering a throttled cfs_rq
3101 *
3102 * note: in the case of encountering a throttled cfs_rq we will
3103 * post the final h_nr_running decrement below.
3104 */
3105 if (cfs_rq_throttled(cfs_rq))
3106 break;
953bfcd1 3107 cfs_rq->h_nr_running--;
2069dd75 3108
bf0f6f24 3109 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
3110 if (cfs_rq->load.weight) {
3111 /*
3112 * Bias pick_next to pick a task from this cfs_rq, as
3113 * p is sleeping when it is within its sched_slice.
3114 */
3115 if (task_sleep && parent_entity(se))
3116 set_next_buddy(parent_entity(se));
9598c82d
PT
3117
3118 /* avoid re-evaluating load for this entity */
3119 se = parent_entity(se);
bf0f6f24 3120 break;
2f36825b 3121 }
371fd7e7 3122 flags |= DEQUEUE_SLEEP;
bf0f6f24 3123 }
8f4d37ec 3124
2069dd75 3125 for_each_sched_entity(se) {
0f317143 3126 cfs_rq = cfs_rq_of(se);
953bfcd1 3127 cfs_rq->h_nr_running--;
2069dd75 3128
85dac906
PT
3129 if (cfs_rq_throttled(cfs_rq))
3130 break;
3131
17bc14b7 3132 update_cfs_shares(cfs_rq);
9ee474f5 3133 update_entity_load_avg(se, 1);
2069dd75
PZ
3134 }
3135
18bf2805 3136 if (!se) {
85dac906 3137 dec_nr_running(rq);
18bf2805
BS
3138 update_rq_runnable_avg(rq, 1);
3139 }
a4c2f00f 3140 hrtick_update(rq);
bf0f6f24
IM
3141}
3142
e7693a36 3143#ifdef CONFIG_SMP
029632fb
PZ
3144/* Used instead of source_load when we know the type == 0 */
3145static unsigned long weighted_cpuload(const int cpu)
3146{
b92486cb 3147 return cpu_rq(cpu)->cfs.runnable_load_avg;
029632fb
PZ
3148}
3149
3150/*
3151 * Return a low guess at the load of a migration-source cpu weighted
3152 * according to the scheduling class and "nice" value.
3153 *
3154 * We want to under-estimate the load of migration sources, to
3155 * balance conservatively.
3156 */
3157static unsigned long source_load(int cpu, int type)
3158{
3159 struct rq *rq = cpu_rq(cpu);
3160 unsigned long total = weighted_cpuload(cpu);
3161
3162 if (type == 0 || !sched_feat(LB_BIAS))
3163 return total;
3164
3165 return min(rq->cpu_load[type-1], total);
3166}
3167
3168/*
3169 * Return a high guess at the load of a migration-target cpu weighted
3170 * according to the scheduling class and "nice" value.
3171 */
3172static unsigned long target_load(int cpu, int type)
3173{
3174 struct rq *rq = cpu_rq(cpu);
3175 unsigned long total = weighted_cpuload(cpu);
3176
3177 if (type == 0 || !sched_feat(LB_BIAS))
3178 return total;
3179
3180 return max(rq->cpu_load[type-1], total);
3181}
3182
3183static unsigned long power_of(int cpu)
3184{
3185 return cpu_rq(cpu)->cpu_power;
3186}
3187
3188static unsigned long cpu_avg_load_per_task(int cpu)
3189{
3190 struct rq *rq = cpu_rq(cpu);
3191 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
b92486cb 3192 unsigned long load_avg = rq->cfs.runnable_load_avg;
029632fb
PZ
3193
3194 if (nr_running)
b92486cb 3195 return load_avg / nr_running;
029632fb
PZ
3196
3197 return 0;
3198}
3199
62470419
MW
3200static void record_wakee(struct task_struct *p)
3201{
3202 /*
3203 * Rough decay (wiping) for cost saving, don't worry
3204 * about the boundary, really active task won't care
3205 * about the loss.
3206 */
3207 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3208 current->wakee_flips = 0;
3209 current->wakee_flip_decay_ts = jiffies;
3210 }
3211
3212 if (current->last_wakee != p) {
3213 current->last_wakee = p;
3214 current->wakee_flips++;
3215 }
3216}
098fb9db 3217
74f8e4b2 3218static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
3219{
3220 struct sched_entity *se = &p->se;
3221 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
3222 u64 min_vruntime;
3223
3224#ifndef CONFIG_64BIT
3225 u64 min_vruntime_copy;
88ec22d3 3226
3fe1698b
PZ
3227 do {
3228 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3229 smp_rmb();
3230 min_vruntime = cfs_rq->min_vruntime;
3231 } while (min_vruntime != min_vruntime_copy);
3232#else
3233 min_vruntime = cfs_rq->min_vruntime;
3234#endif
88ec22d3 3235
3fe1698b 3236 se->vruntime -= min_vruntime;
62470419 3237 record_wakee(p);
88ec22d3
PZ
3238}
3239
bb3469ac 3240#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
3241/*
3242 * effective_load() calculates the load change as seen from the root_task_group
3243 *
3244 * Adding load to a group doesn't make a group heavier, but can cause movement
3245 * of group shares between cpus. Assuming the shares were perfectly aligned one
3246 * can calculate the shift in shares.
cf5f0acf
PZ
3247 *
3248 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3249 * on this @cpu and results in a total addition (subtraction) of @wg to the
3250 * total group weight.
3251 *
3252 * Given a runqueue weight distribution (rw_i) we can compute a shares
3253 * distribution (s_i) using:
3254 *
3255 * s_i = rw_i / \Sum rw_j (1)
3256 *
3257 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3258 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3259 * shares distribution (s_i):
3260 *
3261 * rw_i = { 2, 4, 1, 0 }
3262 * s_i = { 2/7, 4/7, 1/7, 0 }
3263 *
3264 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3265 * task used to run on and the CPU the waker is running on), we need to
3266 * compute the effect of waking a task on either CPU and, in case of a sync
3267 * wakeup, compute the effect of the current task going to sleep.
3268 *
3269 * So for a change of @wl to the local @cpu with an overall group weight change
3270 * of @wl we can compute the new shares distribution (s'_i) using:
3271 *
3272 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3273 *
3274 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3275 * differences in waking a task to CPU 0. The additional task changes the
3276 * weight and shares distributions like:
3277 *
3278 * rw'_i = { 3, 4, 1, 0 }
3279 * s'_i = { 3/8, 4/8, 1/8, 0 }
3280 *
3281 * We can then compute the difference in effective weight by using:
3282 *
3283 * dw_i = S * (s'_i - s_i) (3)
3284 *
3285 * Where 'S' is the group weight as seen by its parent.
3286 *
3287 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3288 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3289 * 4/7) times the weight of the group.
f5bfb7d9 3290 */
2069dd75 3291static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 3292{
4be9daaa 3293 struct sched_entity *se = tg->se[cpu];
f1d239f7 3294
cf5f0acf 3295 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
3296 return wl;
3297
4be9daaa 3298 for_each_sched_entity(se) {
cf5f0acf 3299 long w, W;
4be9daaa 3300
977dda7c 3301 tg = se->my_q->tg;
bb3469ac 3302
cf5f0acf
PZ
3303 /*
3304 * W = @wg + \Sum rw_j
3305 */
3306 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 3307
cf5f0acf
PZ
3308 /*
3309 * w = rw_i + @wl
3310 */
3311 w = se->my_q->load.weight + wl;
940959e9 3312
cf5f0acf
PZ
3313 /*
3314 * wl = S * s'_i; see (2)
3315 */
3316 if (W > 0 && w < W)
3317 wl = (w * tg->shares) / W;
977dda7c
PT
3318 else
3319 wl = tg->shares;
940959e9 3320
cf5f0acf
PZ
3321 /*
3322 * Per the above, wl is the new se->load.weight value; since
3323 * those are clipped to [MIN_SHARES, ...) do so now. See
3324 * calc_cfs_shares().
3325 */
977dda7c
PT
3326 if (wl < MIN_SHARES)
3327 wl = MIN_SHARES;
cf5f0acf
PZ
3328
3329 /*
3330 * wl = dw_i = S * (s'_i - s_i); see (3)
3331 */
977dda7c 3332 wl -= se->load.weight;
cf5f0acf
PZ
3333
3334 /*
3335 * Recursively apply this logic to all parent groups to compute
3336 * the final effective load change on the root group. Since
3337 * only the @tg group gets extra weight, all parent groups can
3338 * only redistribute existing shares. @wl is the shift in shares
3339 * resulting from this level per the above.
3340 */
4be9daaa 3341 wg = 0;
4be9daaa 3342 }
bb3469ac 3343
4be9daaa 3344 return wl;
bb3469ac
PZ
3345}
3346#else
4be9daaa 3347
83378269
PZ
3348static inline unsigned long effective_load(struct task_group *tg, int cpu,
3349 unsigned long wl, unsigned long wg)
4be9daaa 3350{
83378269 3351 return wl;
bb3469ac 3352}
4be9daaa 3353
bb3469ac
PZ
3354#endif
3355
62470419
MW
3356static int wake_wide(struct task_struct *p)
3357{
7d9ffa89 3358 int factor = this_cpu_read(sd_llc_size);
62470419
MW
3359
3360 /*
3361 * Yeah, it's the switching-frequency, could means many wakee or
3362 * rapidly switch, use factor here will just help to automatically
3363 * adjust the loose-degree, so bigger node will lead to more pull.
3364 */
3365 if (p->wakee_flips > factor) {
3366 /*
3367 * wakee is somewhat hot, it needs certain amount of cpu
3368 * resource, so if waker is far more hot, prefer to leave
3369 * it alone.
3370 */
3371 if (current->wakee_flips > (factor * p->wakee_flips))
3372 return 1;
3373 }
3374
3375 return 0;
3376}
3377
c88d5910 3378static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 3379{
e37b6a7b 3380 s64 this_load, load;
c88d5910 3381 int idx, this_cpu, prev_cpu;
098fb9db 3382 unsigned long tl_per_task;
c88d5910 3383 struct task_group *tg;
83378269 3384 unsigned long weight;
b3137bc8 3385 int balanced;
098fb9db 3386
62470419
MW
3387 /*
3388 * If we wake multiple tasks be careful to not bounce
3389 * ourselves around too much.
3390 */
3391 if (wake_wide(p))
3392 return 0;
3393
c88d5910
PZ
3394 idx = sd->wake_idx;
3395 this_cpu = smp_processor_id();
3396 prev_cpu = task_cpu(p);
3397 load = source_load(prev_cpu, idx);
3398 this_load = target_load(this_cpu, idx);
098fb9db 3399
b3137bc8
MG
3400 /*
3401 * If sync wakeup then subtract the (maximum possible)
3402 * effect of the currently running task from the load
3403 * of the current CPU:
3404 */
83378269
PZ
3405 if (sync) {
3406 tg = task_group(current);
3407 weight = current->se.load.weight;
3408
c88d5910 3409 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
3410 load += effective_load(tg, prev_cpu, 0, -weight);
3411 }
b3137bc8 3412
83378269
PZ
3413 tg = task_group(p);
3414 weight = p->se.load.weight;
b3137bc8 3415
71a29aa7
PZ
3416 /*
3417 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
3418 * due to the sync cause above having dropped this_load to 0, we'll
3419 * always have an imbalance, but there's really nothing you can do
3420 * about that, so that's good too.
71a29aa7
PZ
3421 *
3422 * Otherwise check if either cpus are near enough in load to allow this
3423 * task to be woken on this_cpu.
3424 */
e37b6a7b
PT
3425 if (this_load > 0) {
3426 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
3427
3428 this_eff_load = 100;
3429 this_eff_load *= power_of(prev_cpu);
3430 this_eff_load *= this_load +
3431 effective_load(tg, this_cpu, weight, weight);
3432
3433 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3434 prev_eff_load *= power_of(this_cpu);
3435 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3436
3437 balanced = this_eff_load <= prev_eff_load;
3438 } else
3439 balanced = true;
b3137bc8 3440
098fb9db 3441 /*
4ae7d5ce
IM
3442 * If the currently running task will sleep within
3443 * a reasonable amount of time then attract this newly
3444 * woken task:
098fb9db 3445 */
2fb7635c
PZ
3446 if (sync && balanced)
3447 return 1;
098fb9db 3448
41acab88 3449 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
3450 tl_per_task = cpu_avg_load_per_task(this_cpu);
3451
c88d5910
PZ
3452 if (balanced ||
3453 (this_load <= load &&
3454 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
3455 /*
3456 * This domain has SD_WAKE_AFFINE and
3457 * p is cache cold in this domain, and
3458 * there is no bad imbalance.
3459 */
c88d5910 3460 schedstat_inc(sd, ttwu_move_affine);
41acab88 3461 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
3462
3463 return 1;
3464 }
3465 return 0;
3466}
3467
aaee1203
PZ
3468/*
3469 * find_idlest_group finds and returns the least busy CPU group within the
3470 * domain.
3471 */
3472static struct sched_group *
78e7ed53 3473find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 3474 int this_cpu, int load_idx)
e7693a36 3475{
b3bd3de6 3476 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 3477 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 3478 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 3479
aaee1203
PZ
3480 do {
3481 unsigned long load, avg_load;
3482 int local_group;
3483 int i;
e7693a36 3484
aaee1203
PZ
3485 /* Skip over this group if it has no CPUs allowed */
3486 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 3487 tsk_cpus_allowed(p)))
aaee1203
PZ
3488 continue;
3489
3490 local_group = cpumask_test_cpu(this_cpu,
3491 sched_group_cpus(group));
3492
3493 /* Tally up the load of all CPUs in the group */
3494 avg_load = 0;
3495
3496 for_each_cpu(i, sched_group_cpus(group)) {
3497 /* Bias balancing toward cpus of our domain */
3498 if (local_group)
3499 load = source_load(i, load_idx);
3500 else
3501 load = target_load(i, load_idx);
3502
3503 avg_load += load;
3504 }
3505
3506 /* Adjust by relative CPU power of the group */
9c3f75cb 3507 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
3508
3509 if (local_group) {
3510 this_load = avg_load;
aaee1203
PZ
3511 } else if (avg_load < min_load) {
3512 min_load = avg_load;
3513 idlest = group;
3514 }
3515 } while (group = group->next, group != sd->groups);
3516
3517 if (!idlest || 100*this_load < imbalance*min_load)
3518 return NULL;
3519 return idlest;
3520}
3521
3522/*
3523 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3524 */
3525static int
3526find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3527{
3528 unsigned long load, min_load = ULONG_MAX;
3529 int idlest = -1;
3530 int i;
3531
3532 /* Traverse only the allowed CPUs */
fa17b507 3533 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
3534 load = weighted_cpuload(i);
3535
3536 if (load < min_load || (load == min_load && i == this_cpu)) {
3537 min_load = load;
3538 idlest = i;
e7693a36
GH
3539 }
3540 }
3541
aaee1203
PZ
3542 return idlest;
3543}
e7693a36 3544
a50bde51
PZ
3545/*
3546 * Try and locate an idle CPU in the sched_domain.
3547 */
99bd5e2f 3548static int select_idle_sibling(struct task_struct *p, int target)
a50bde51 3549{
99bd5e2f 3550 struct sched_domain *sd;
37407ea7 3551 struct sched_group *sg;
e0a79f52 3552 int i = task_cpu(p);
a50bde51 3553
e0a79f52
MG
3554 if (idle_cpu(target))
3555 return target;
99bd5e2f
SS
3556
3557 /*
e0a79f52 3558 * If the prevous cpu is cache affine and idle, don't be stupid.
99bd5e2f 3559 */
e0a79f52
MG
3560 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3561 return i;
a50bde51
PZ
3562
3563 /*
37407ea7 3564 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 3565 */
518cd623 3566 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 3567 for_each_lower_domain(sd) {
37407ea7
LT
3568 sg = sd->groups;
3569 do {
3570 if (!cpumask_intersects(sched_group_cpus(sg),
3571 tsk_cpus_allowed(p)))
3572 goto next;
3573
3574 for_each_cpu(i, sched_group_cpus(sg)) {
e0a79f52 3575 if (i == target || !idle_cpu(i))
37407ea7
LT
3576 goto next;
3577 }
970e1789 3578
37407ea7
LT
3579 target = cpumask_first_and(sched_group_cpus(sg),
3580 tsk_cpus_allowed(p));
3581 goto done;
3582next:
3583 sg = sg->next;
3584 } while (sg != sd->groups);
3585 }
3586done:
a50bde51
PZ
3587 return target;
3588}
3589
aaee1203
PZ
3590/*
3591 * sched_balance_self: balance the current task (running on cpu) in domains
3592 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3593 * SD_BALANCE_EXEC.
3594 *
3595 * Balance, ie. select the least loaded group.
3596 *
3597 * Returns the target CPU number, or the same CPU if no balancing is needed.
3598 *
3599 * preempt must be disabled.
3600 */
0017d735 3601static int
7608dec2 3602select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 3603{
29cd8bae 3604 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
3605 int cpu = smp_processor_id();
3606 int prev_cpu = task_cpu(p);
3607 int new_cpu = cpu;
99bd5e2f 3608 int want_affine = 0;
5158f4e4 3609 int sync = wake_flags & WF_SYNC;
c88d5910 3610
29baa747 3611 if (p->nr_cpus_allowed == 1)
76854c7e
MG
3612 return prev_cpu;
3613
0763a660 3614 if (sd_flag & SD_BALANCE_WAKE) {
fa17b507 3615 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
c88d5910
PZ
3616 want_affine = 1;
3617 new_cpu = prev_cpu;
3618 }
aaee1203 3619
dce840a0 3620 rcu_read_lock();
aaee1203 3621 for_each_domain(cpu, tmp) {
e4f42888
PZ
3622 if (!(tmp->flags & SD_LOAD_BALANCE))
3623 continue;
3624
fe3bcfe1 3625 /*
99bd5e2f
SS
3626 * If both cpu and prev_cpu are part of this domain,
3627 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 3628 */
99bd5e2f
SS
3629 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3630 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3631 affine_sd = tmp;
29cd8bae 3632 break;
f03542a7 3633 }
29cd8bae 3634
f03542a7 3635 if (tmp->flags & sd_flag)
29cd8bae
PZ
3636 sd = tmp;
3637 }
3638
8b911acd 3639 if (affine_sd) {
f03542a7 3640 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
dce840a0
PZ
3641 prev_cpu = cpu;
3642
3643 new_cpu = select_idle_sibling(p, prev_cpu);
3644 goto unlock;
8b911acd 3645 }
e7693a36 3646
aaee1203 3647 while (sd) {
5158f4e4 3648 int load_idx = sd->forkexec_idx;
aaee1203 3649 struct sched_group *group;
c88d5910 3650 int weight;
098fb9db 3651
0763a660 3652 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
3653 sd = sd->child;
3654 continue;
3655 }
098fb9db 3656
5158f4e4
PZ
3657 if (sd_flag & SD_BALANCE_WAKE)
3658 load_idx = sd->wake_idx;
098fb9db 3659
5158f4e4 3660 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
3661 if (!group) {
3662 sd = sd->child;
3663 continue;
3664 }
4ae7d5ce 3665
d7c33c49 3666 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
3667 if (new_cpu == -1 || new_cpu == cpu) {
3668 /* Now try balancing at a lower domain level of cpu */
3669 sd = sd->child;
3670 continue;
e7693a36 3671 }
aaee1203
PZ
3672
3673 /* Now try balancing at a lower domain level of new_cpu */
3674 cpu = new_cpu;
669c55e9 3675 weight = sd->span_weight;
aaee1203
PZ
3676 sd = NULL;
3677 for_each_domain(cpu, tmp) {
669c55e9 3678 if (weight <= tmp->span_weight)
aaee1203 3679 break;
0763a660 3680 if (tmp->flags & sd_flag)
aaee1203
PZ
3681 sd = tmp;
3682 }
3683 /* while loop will break here if sd == NULL */
e7693a36 3684 }
dce840a0
PZ
3685unlock:
3686 rcu_read_unlock();
e7693a36 3687
c88d5910 3688 return new_cpu;
e7693a36 3689}
0a74bef8
PT
3690
3691/*
3692 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3693 * cfs_rq_of(p) references at time of call are still valid and identify the
3694 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3695 * other assumptions, including the state of rq->lock, should be made.
3696 */
3697static void
3698migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3699{
aff3e498
PT
3700 struct sched_entity *se = &p->se;
3701 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3702
3703 /*
3704 * Load tracking: accumulate removed load so that it can be processed
3705 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3706 * to blocked load iff they have a positive decay-count. It can never
3707 * be negative here since on-rq tasks have decay-count == 0.
3708 */
3709 if (se->avg.decay_count) {
3710 se->avg.decay_count = -__synchronize_entity_decay(se);
2509940f
AS
3711 atomic_long_add(se->avg.load_avg_contrib,
3712 &cfs_rq->removed_load);
aff3e498 3713 }
0a74bef8 3714}
e7693a36
GH
3715#endif /* CONFIG_SMP */
3716
e52fb7c0
PZ
3717static unsigned long
3718wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
3719{
3720 unsigned long gran = sysctl_sched_wakeup_granularity;
3721
3722 /*
e52fb7c0
PZ
3723 * Since its curr running now, convert the gran from real-time
3724 * to virtual-time in his units.
13814d42
MG
3725 *
3726 * By using 'se' instead of 'curr' we penalize light tasks, so
3727 * they get preempted easier. That is, if 'se' < 'curr' then
3728 * the resulting gran will be larger, therefore penalizing the
3729 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3730 * be smaller, again penalizing the lighter task.
3731 *
3732 * This is especially important for buddies when the leftmost
3733 * task is higher priority than the buddy.
0bbd3336 3734 */
f4ad9bd2 3735 return calc_delta_fair(gran, se);
0bbd3336
PZ
3736}
3737
464b7527
PZ
3738/*
3739 * Should 'se' preempt 'curr'.
3740 *
3741 * |s1
3742 * |s2
3743 * |s3
3744 * g
3745 * |<--->|c
3746 *
3747 * w(c, s1) = -1
3748 * w(c, s2) = 0
3749 * w(c, s3) = 1
3750 *
3751 */
3752static int
3753wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3754{
3755 s64 gran, vdiff = curr->vruntime - se->vruntime;
3756
3757 if (vdiff <= 0)
3758 return -1;
3759
e52fb7c0 3760 gran = wakeup_gran(curr, se);
464b7527
PZ
3761 if (vdiff > gran)
3762 return 1;
3763
3764 return 0;
3765}
3766
02479099
PZ
3767static void set_last_buddy(struct sched_entity *se)
3768{
69c80f3e
VP
3769 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3770 return;
3771
3772 for_each_sched_entity(se)
3773 cfs_rq_of(se)->last = se;
02479099
PZ
3774}
3775
3776static void set_next_buddy(struct sched_entity *se)
3777{
69c80f3e
VP
3778 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3779 return;
3780
3781 for_each_sched_entity(se)
3782 cfs_rq_of(se)->next = se;
02479099
PZ
3783}
3784
ac53db59
RR
3785static void set_skip_buddy(struct sched_entity *se)
3786{
69c80f3e
VP
3787 for_each_sched_entity(se)
3788 cfs_rq_of(se)->skip = se;
ac53db59
RR
3789}
3790
bf0f6f24
IM
3791/*
3792 * Preempt the current task with a newly woken task if needed:
3793 */
5a9b86f6 3794static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
3795{
3796 struct task_struct *curr = rq->curr;
8651a86c 3797 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 3798 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 3799 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 3800 int next_buddy_marked = 0;
bf0f6f24 3801
4ae7d5ce
IM
3802 if (unlikely(se == pse))
3803 return;
3804
5238cdd3 3805 /*
ddcdf6e7 3806 * This is possible from callers such as move_task(), in which we
5238cdd3
PT
3807 * unconditionally check_prempt_curr() after an enqueue (which may have
3808 * lead to a throttle). This both saves work and prevents false
3809 * next-buddy nomination below.
3810 */
3811 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3812 return;
3813
2f36825b 3814 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 3815 set_next_buddy(pse);
2f36825b
VP
3816 next_buddy_marked = 1;
3817 }
57fdc26d 3818
aec0a514
BR
3819 /*
3820 * We can come here with TIF_NEED_RESCHED already set from new task
3821 * wake up path.
5238cdd3
PT
3822 *
3823 * Note: this also catches the edge-case of curr being in a throttled
3824 * group (e.g. via set_curr_task), since update_curr() (in the
3825 * enqueue of curr) will have resulted in resched being set. This
3826 * prevents us from potentially nominating it as a false LAST_BUDDY
3827 * below.
aec0a514
BR
3828 */
3829 if (test_tsk_need_resched(curr))
3830 return;
3831
a2f5c9ab
DH
3832 /* Idle tasks are by definition preempted by non-idle tasks. */
3833 if (unlikely(curr->policy == SCHED_IDLE) &&
3834 likely(p->policy != SCHED_IDLE))
3835 goto preempt;
3836
91c234b4 3837 /*
a2f5c9ab
DH
3838 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3839 * is driven by the tick):
91c234b4 3840 */
8ed92e51 3841 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 3842 return;
bf0f6f24 3843
464b7527 3844 find_matching_se(&se, &pse);
9bbd7374 3845 update_curr(cfs_rq_of(se));
002f128b 3846 BUG_ON(!pse);
2f36825b
VP
3847 if (wakeup_preempt_entity(se, pse) == 1) {
3848 /*
3849 * Bias pick_next to pick the sched entity that is
3850 * triggering this preemption.
3851 */
3852 if (!next_buddy_marked)
3853 set_next_buddy(pse);
3a7e73a2 3854 goto preempt;
2f36825b 3855 }
464b7527 3856
3a7e73a2 3857 return;
a65ac745 3858
3a7e73a2
PZ
3859preempt:
3860 resched_task(curr);
3861 /*
3862 * Only set the backward buddy when the current task is still
3863 * on the rq. This can happen when a wakeup gets interleaved
3864 * with schedule on the ->pre_schedule() or idle_balance()
3865 * point, either of which can * drop the rq lock.
3866 *
3867 * Also, during early boot the idle thread is in the fair class,
3868 * for obvious reasons its a bad idea to schedule back to it.
3869 */
3870 if (unlikely(!se->on_rq || curr == rq->idle))
3871 return;
3872
3873 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3874 set_last_buddy(se);
bf0f6f24
IM
3875}
3876
fb8d4724 3877static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 3878{
8f4d37ec 3879 struct task_struct *p;
bf0f6f24
IM
3880 struct cfs_rq *cfs_rq = &rq->cfs;
3881 struct sched_entity *se;
3882
36ace27e 3883 if (!cfs_rq->nr_running)
bf0f6f24
IM
3884 return NULL;
3885
3886 do {
9948f4b2 3887 se = pick_next_entity(cfs_rq);
f4b6755f 3888 set_next_entity(cfs_rq, se);
bf0f6f24
IM
3889 cfs_rq = group_cfs_rq(se);
3890 } while (cfs_rq);
3891
8f4d37ec 3892 p = task_of(se);
b39e66ea
MG
3893 if (hrtick_enabled(rq))
3894 hrtick_start_fair(rq, p);
8f4d37ec
PZ
3895
3896 return p;
bf0f6f24
IM
3897}
3898
3899/*
3900 * Account for a descheduled task:
3901 */
31ee529c 3902static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
3903{
3904 struct sched_entity *se = &prev->se;
3905 struct cfs_rq *cfs_rq;
3906
3907 for_each_sched_entity(se) {
3908 cfs_rq = cfs_rq_of(se);
ab6cde26 3909 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
3910 }
3911}
3912
ac53db59
RR
3913/*
3914 * sched_yield() is very simple
3915 *
3916 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3917 */
3918static void yield_task_fair(struct rq *rq)
3919{
3920 struct task_struct *curr = rq->curr;
3921 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3922 struct sched_entity *se = &curr->se;
3923
3924 /*
3925 * Are we the only task in the tree?
3926 */
3927 if (unlikely(rq->nr_running == 1))
3928 return;
3929
3930 clear_buddies(cfs_rq, se);
3931
3932 if (curr->policy != SCHED_BATCH) {
3933 update_rq_clock(rq);
3934 /*
3935 * Update run-time statistics of the 'current'.
3936 */
3937 update_curr(cfs_rq);
916671c0
MG
3938 /*
3939 * Tell update_rq_clock() that we've just updated,
3940 * so we don't do microscopic update in schedule()
3941 * and double the fastpath cost.
3942 */
3943 rq->skip_clock_update = 1;
ac53db59
RR
3944 }
3945
3946 set_skip_buddy(se);
3947}
3948
d95f4122
MG
3949static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3950{
3951 struct sched_entity *se = &p->se;
3952
5238cdd3
PT
3953 /* throttled hierarchies are not runnable */
3954 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
3955 return false;
3956
3957 /* Tell the scheduler that we'd really like pse to run next. */
3958 set_next_buddy(se);
3959
d95f4122
MG
3960 yield_task_fair(rq);
3961
3962 return true;
3963}
3964
681f3e68 3965#ifdef CONFIG_SMP
bf0f6f24 3966/**************************************************
e9c84cb8
PZ
3967 * Fair scheduling class load-balancing methods.
3968 *
3969 * BASICS
3970 *
3971 * The purpose of load-balancing is to achieve the same basic fairness the
3972 * per-cpu scheduler provides, namely provide a proportional amount of compute
3973 * time to each task. This is expressed in the following equation:
3974 *
3975 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3976 *
3977 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3978 * W_i,0 is defined as:
3979 *
3980 * W_i,0 = \Sum_j w_i,j (2)
3981 *
3982 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3983 * is derived from the nice value as per prio_to_weight[].
3984 *
3985 * The weight average is an exponential decay average of the instantaneous
3986 * weight:
3987 *
3988 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3989 *
3990 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3991 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3992 * can also include other factors [XXX].
3993 *
3994 * To achieve this balance we define a measure of imbalance which follows
3995 * directly from (1):
3996 *
3997 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3998 *
3999 * We them move tasks around to minimize the imbalance. In the continuous
4000 * function space it is obvious this converges, in the discrete case we get
4001 * a few fun cases generally called infeasible weight scenarios.
4002 *
4003 * [XXX expand on:
4004 * - infeasible weights;
4005 * - local vs global optima in the discrete case. ]
4006 *
4007 *
4008 * SCHED DOMAINS
4009 *
4010 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4011 * for all i,j solution, we create a tree of cpus that follows the hardware
4012 * topology where each level pairs two lower groups (or better). This results
4013 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4014 * tree to only the first of the previous level and we decrease the frequency
4015 * of load-balance at each level inv. proportional to the number of cpus in
4016 * the groups.
4017 *
4018 * This yields:
4019 *
4020 * log_2 n 1 n
4021 * \Sum { --- * --- * 2^i } = O(n) (5)
4022 * i = 0 2^i 2^i
4023 * `- size of each group
4024 * | | `- number of cpus doing load-balance
4025 * | `- freq
4026 * `- sum over all levels
4027 *
4028 * Coupled with a limit on how many tasks we can migrate every balance pass,
4029 * this makes (5) the runtime complexity of the balancer.
4030 *
4031 * An important property here is that each CPU is still (indirectly) connected
4032 * to every other cpu in at most O(log n) steps:
4033 *
4034 * The adjacency matrix of the resulting graph is given by:
4035 *
4036 * log_2 n
4037 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4038 * k = 0
4039 *
4040 * And you'll find that:
4041 *
4042 * A^(log_2 n)_i,j != 0 for all i,j (7)
4043 *
4044 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4045 * The task movement gives a factor of O(m), giving a convergence complexity
4046 * of:
4047 *
4048 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4049 *
4050 *
4051 * WORK CONSERVING
4052 *
4053 * In order to avoid CPUs going idle while there's still work to do, new idle
4054 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4055 * tree itself instead of relying on other CPUs to bring it work.
4056 *
4057 * This adds some complexity to both (5) and (8) but it reduces the total idle
4058 * time.
4059 *
4060 * [XXX more?]
4061 *
4062 *
4063 * CGROUPS
4064 *
4065 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4066 *
4067 * s_k,i
4068 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4069 * S_k
4070 *
4071 * Where
4072 *
4073 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4074 *
4075 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4076 *
4077 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4078 * property.
4079 *
4080 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4081 * rewrite all of this once again.]
4082 */
bf0f6f24 4083
ed387b78
HS
4084static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4085
ddcdf6e7 4086#define LBF_ALL_PINNED 0x01
367456c7 4087#define LBF_NEED_BREAK 0x02
6263322c
PZ
4088#define LBF_DST_PINNED 0x04
4089#define LBF_SOME_PINNED 0x08
ddcdf6e7
PZ
4090
4091struct lb_env {
4092 struct sched_domain *sd;
4093
ddcdf6e7 4094 struct rq *src_rq;
85c1e7da 4095 int src_cpu;
ddcdf6e7
PZ
4096
4097 int dst_cpu;
4098 struct rq *dst_rq;
4099
88b8dac0
SV
4100 struct cpumask *dst_grpmask;
4101 int new_dst_cpu;
ddcdf6e7 4102 enum cpu_idle_type idle;
bd939f45 4103 long imbalance;
b9403130
MW
4104 /* The set of CPUs under consideration for load-balancing */
4105 struct cpumask *cpus;
4106
ddcdf6e7 4107 unsigned int flags;
367456c7
PZ
4108
4109 unsigned int loop;
4110 unsigned int loop_break;
4111 unsigned int loop_max;
ddcdf6e7
PZ
4112};
4113
1e3c88bd 4114/*
ddcdf6e7 4115 * move_task - move a task from one runqueue to another runqueue.
1e3c88bd
PZ
4116 * Both runqueues must be locked.
4117 */
ddcdf6e7 4118static void move_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 4119{
ddcdf6e7
PZ
4120 deactivate_task(env->src_rq, p, 0);
4121 set_task_cpu(p, env->dst_cpu);
4122 activate_task(env->dst_rq, p, 0);
4123 check_preempt_curr(env->dst_rq, p, 0);
6fe6b2d6
RR
4124#ifdef CONFIG_NUMA_BALANCING
4125 if (p->numa_preferred_nid != -1) {
4126 int src_nid = cpu_to_node(env->src_cpu);
4127 int dst_nid = cpu_to_node(env->dst_cpu);
4128
4129 /*
4130 * If the load balancer has moved the task then limit
4131 * migrations from taking place in the short term in
4132 * case this is a short-lived migration.
4133 */
4134 if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid)
4135 p->numa_migrate_seq = 0;
4136 }
4137#endif
1e3c88bd
PZ
4138}
4139
029632fb
PZ
4140/*
4141 * Is this task likely cache-hot:
4142 */
4143static int
4144task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4145{
4146 s64 delta;
4147
4148 if (p->sched_class != &fair_sched_class)
4149 return 0;
4150
4151 if (unlikely(p->policy == SCHED_IDLE))
4152 return 0;
4153
4154 /*
4155 * Buddy candidates are cache hot:
4156 */
4157 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4158 (&p->se == cfs_rq_of(&p->se)->next ||
4159 &p->se == cfs_rq_of(&p->se)->last))
4160 return 1;
4161
4162 if (sysctl_sched_migration_cost == -1)
4163 return 1;
4164 if (sysctl_sched_migration_cost == 0)
4165 return 0;
4166
4167 delta = now - p->se.exec_start;
4168
4169 return delta < (s64)sysctl_sched_migration_cost;
4170}
4171
3a7053b3
MG
4172#ifdef CONFIG_NUMA_BALANCING
4173/* Returns true if the destination node has incurred more faults */
4174static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4175{
4176 int src_nid, dst_nid;
4177
4178 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4179 !(env->sd->flags & SD_NUMA)) {
4180 return false;
4181 }
4182
4183 src_nid = cpu_to_node(env->src_cpu);
4184 dst_nid = cpu_to_node(env->dst_cpu);
4185
4186 if (src_nid == dst_nid ||
4187 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4188 return false;
4189
4190 if (dst_nid == p->numa_preferred_nid ||
ac8e895b 4191 task_faults(p, dst_nid) > task_faults(p, src_nid))
3a7053b3
MG
4192 return true;
4193
4194 return false;
4195}
7a0f3083
MG
4196
4197
4198static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4199{
4200 int src_nid, dst_nid;
4201
4202 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4203 return false;
4204
4205 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4206 return false;
4207
4208 src_nid = cpu_to_node(env->src_cpu);
4209 dst_nid = cpu_to_node(env->dst_cpu);
4210
4211 if (src_nid == dst_nid ||
4212 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4213 return false;
4214
ac8e895b 4215 if (task_faults(p, dst_nid) < task_faults(p, src_nid))
7a0f3083
MG
4216 return true;
4217
4218 return false;
4219}
4220
3a7053b3
MG
4221#else
4222static inline bool migrate_improves_locality(struct task_struct *p,
4223 struct lb_env *env)
4224{
4225 return false;
4226}
7a0f3083
MG
4227
4228static inline bool migrate_degrades_locality(struct task_struct *p,
4229 struct lb_env *env)
4230{
4231 return false;
4232}
3a7053b3
MG
4233#endif
4234
1e3c88bd
PZ
4235/*
4236 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4237 */
4238static
8e45cb54 4239int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
4240{
4241 int tsk_cache_hot = 0;
4242 /*
4243 * We do not migrate tasks that are:
d3198084 4244 * 1) throttled_lb_pair, or
1e3c88bd 4245 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
4246 * 3) running (obviously), or
4247 * 4) are cache-hot on their current CPU.
1e3c88bd 4248 */
d3198084
JK
4249 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4250 return 0;
4251
ddcdf6e7 4252 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 4253 int cpu;
88b8dac0 4254
41acab88 4255 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0 4256
6263322c
PZ
4257 env->flags |= LBF_SOME_PINNED;
4258
88b8dac0
SV
4259 /*
4260 * Remember if this task can be migrated to any other cpu in
4261 * our sched_group. We may want to revisit it if we couldn't
4262 * meet load balance goals by pulling other tasks on src_cpu.
4263 *
4264 * Also avoid computing new_dst_cpu if we have already computed
4265 * one in current iteration.
4266 */
6263322c 4267 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
4268 return 0;
4269
e02e60c1
JK
4270 /* Prevent to re-select dst_cpu via env's cpus */
4271 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4272 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6263322c 4273 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
4274 env->new_dst_cpu = cpu;
4275 break;
4276 }
88b8dac0 4277 }
e02e60c1 4278
1e3c88bd
PZ
4279 return 0;
4280 }
88b8dac0
SV
4281
4282 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 4283 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 4284
ddcdf6e7 4285 if (task_running(env->src_rq, p)) {
41acab88 4286 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
4287 return 0;
4288 }
4289
4290 /*
4291 * Aggressive migration if:
3a7053b3
MG
4292 * 1) destination numa is preferred
4293 * 2) task is cache cold, or
4294 * 3) too many balance attempts have failed.
1e3c88bd 4295 */
78becc27 4296 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
7a0f3083
MG
4297 if (!tsk_cache_hot)
4298 tsk_cache_hot = migrate_degrades_locality(p, env);
3a7053b3
MG
4299
4300 if (migrate_improves_locality(p, env)) {
4301#ifdef CONFIG_SCHEDSTATS
4302 if (tsk_cache_hot) {
4303 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4304 schedstat_inc(p, se.statistics.nr_forced_migrations);
4305 }
4306#endif
4307 return 1;
4308 }
4309
1e3c88bd 4310 if (!tsk_cache_hot ||
8e45cb54 4311 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4e2dcb73 4312
1e3c88bd 4313 if (tsk_cache_hot) {
8e45cb54 4314 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 4315 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd 4316 }
4e2dcb73 4317
1e3c88bd
PZ
4318 return 1;
4319 }
4320
4e2dcb73
ZH
4321 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4322 return 0;
1e3c88bd
PZ
4323}
4324
897c395f
PZ
4325/*
4326 * move_one_task tries to move exactly one task from busiest to this_rq, as
4327 * part of active balancing operations within "domain".
4328 * Returns 1 if successful and 0 otherwise.
4329 *
4330 * Called with both runqueues locked.
4331 */
8e45cb54 4332static int move_one_task(struct lb_env *env)
897c395f
PZ
4333{
4334 struct task_struct *p, *n;
897c395f 4335
367456c7 4336 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
4337 if (!can_migrate_task(p, env))
4338 continue;
897c395f 4339
367456c7
PZ
4340 move_task(p, env);
4341 /*
4342 * Right now, this is only the second place move_task()
4343 * is called, so we can safely collect move_task()
4344 * stats here rather than inside move_task().
4345 */
4346 schedstat_inc(env->sd, lb_gained[env->idle]);
4347 return 1;
897c395f 4348 }
897c395f
PZ
4349 return 0;
4350}
4351
367456c7
PZ
4352static unsigned long task_h_load(struct task_struct *p);
4353
eb95308e
PZ
4354static const unsigned int sched_nr_migrate_break = 32;
4355
5d6523eb 4356/*
bd939f45 4357 * move_tasks tries to move up to imbalance weighted load from busiest to
5d6523eb
PZ
4358 * this_rq, as part of a balancing operation within domain "sd".
4359 * Returns 1 if successful and 0 otherwise.
4360 *
4361 * Called with both runqueues locked.
4362 */
4363static int move_tasks(struct lb_env *env)
1e3c88bd 4364{
5d6523eb
PZ
4365 struct list_head *tasks = &env->src_rq->cfs_tasks;
4366 struct task_struct *p;
367456c7
PZ
4367 unsigned long load;
4368 int pulled = 0;
1e3c88bd 4369
bd939f45 4370 if (env->imbalance <= 0)
5d6523eb 4371 return 0;
1e3c88bd 4372
5d6523eb
PZ
4373 while (!list_empty(tasks)) {
4374 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 4375
367456c7
PZ
4376 env->loop++;
4377 /* We've more or less seen every task there is, call it quits */
5d6523eb 4378 if (env->loop > env->loop_max)
367456c7 4379 break;
5d6523eb
PZ
4380
4381 /* take a breather every nr_migrate tasks */
367456c7 4382 if (env->loop > env->loop_break) {
eb95308e 4383 env->loop_break += sched_nr_migrate_break;
8e45cb54 4384 env->flags |= LBF_NEED_BREAK;
ee00e66f 4385 break;
a195f004 4386 }
1e3c88bd 4387
d3198084 4388 if (!can_migrate_task(p, env))
367456c7
PZ
4389 goto next;
4390
4391 load = task_h_load(p);
5d6523eb 4392
eb95308e 4393 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
4394 goto next;
4395
bd939f45 4396 if ((load / 2) > env->imbalance)
367456c7 4397 goto next;
1e3c88bd 4398
ddcdf6e7 4399 move_task(p, env);
ee00e66f 4400 pulled++;
bd939f45 4401 env->imbalance -= load;
1e3c88bd
PZ
4402
4403#ifdef CONFIG_PREEMPT
ee00e66f
PZ
4404 /*
4405 * NEWIDLE balancing is a source of latency, so preemptible
4406 * kernels will stop after the first task is pulled to minimize
4407 * the critical section.
4408 */
5d6523eb 4409 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 4410 break;
1e3c88bd
PZ
4411#endif
4412
ee00e66f
PZ
4413 /*
4414 * We only want to steal up to the prescribed amount of
4415 * weighted load.
4416 */
bd939f45 4417 if (env->imbalance <= 0)
ee00e66f 4418 break;
367456c7
PZ
4419
4420 continue;
4421next:
5d6523eb 4422 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 4423 }
5d6523eb 4424
1e3c88bd 4425 /*
ddcdf6e7
PZ
4426 * Right now, this is one of only two places move_task() is called,
4427 * so we can safely collect move_task() stats here rather than
4428 * inside move_task().
1e3c88bd 4429 */
8e45cb54 4430 schedstat_add(env->sd, lb_gained[env->idle], pulled);
1e3c88bd 4431
5d6523eb 4432 return pulled;
1e3c88bd
PZ
4433}
4434
230059de 4435#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
4436/*
4437 * update tg->load_weight by folding this cpu's load_avg
4438 */
48a16753 4439static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
9e3081ca 4440{
48a16753
PT
4441 struct sched_entity *se = tg->se[cpu];
4442 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
9e3081ca 4443
48a16753
PT
4444 /* throttled entities do not contribute to load */
4445 if (throttled_hierarchy(cfs_rq))
4446 return;
9e3081ca 4447
aff3e498 4448 update_cfs_rq_blocked_load(cfs_rq, 1);
9e3081ca 4449
82958366
PT
4450 if (se) {
4451 update_entity_load_avg(se, 1);
4452 /*
4453 * We pivot on our runnable average having decayed to zero for
4454 * list removal. This generally implies that all our children
4455 * have also been removed (modulo rounding error or bandwidth
4456 * control); however, such cases are rare and we can fix these
4457 * at enqueue.
4458 *
4459 * TODO: fix up out-of-order children on enqueue.
4460 */
4461 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4462 list_del_leaf_cfs_rq(cfs_rq);
4463 } else {
48a16753 4464 struct rq *rq = rq_of(cfs_rq);
82958366
PT
4465 update_rq_runnable_avg(rq, rq->nr_running);
4466 }
9e3081ca
PZ
4467}
4468
48a16753 4469static void update_blocked_averages(int cpu)
9e3081ca 4470{
9e3081ca 4471 struct rq *rq = cpu_rq(cpu);
48a16753
PT
4472 struct cfs_rq *cfs_rq;
4473 unsigned long flags;
9e3081ca 4474
48a16753
PT
4475 raw_spin_lock_irqsave(&rq->lock, flags);
4476 update_rq_clock(rq);
9763b67f
PZ
4477 /*
4478 * Iterates the task_group tree in a bottom up fashion, see
4479 * list_add_leaf_cfs_rq() for details.
4480 */
64660c86 4481 for_each_leaf_cfs_rq(rq, cfs_rq) {
48a16753
PT
4482 /*
4483 * Note: We may want to consider periodically releasing
4484 * rq->lock about these updates so that creating many task
4485 * groups does not result in continually extending hold time.
4486 */
4487 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
64660c86 4488 }
48a16753
PT
4489
4490 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
4491}
4492
9763b67f 4493/*
68520796 4494 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
4495 * This needs to be done in a top-down fashion because the load of a child
4496 * group is a fraction of its parents load.
4497 */
68520796 4498static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 4499{
68520796
VD
4500 struct rq *rq = rq_of(cfs_rq);
4501 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 4502 unsigned long now = jiffies;
68520796 4503 unsigned long load;
a35b6466 4504
68520796 4505 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
4506 return;
4507
68520796
VD
4508 cfs_rq->h_load_next = NULL;
4509 for_each_sched_entity(se) {
4510 cfs_rq = cfs_rq_of(se);
4511 cfs_rq->h_load_next = se;
4512 if (cfs_rq->last_h_load_update == now)
4513 break;
4514 }
a35b6466 4515
68520796 4516 if (!se) {
7e3115ef 4517 cfs_rq->h_load = cfs_rq->runnable_load_avg;
68520796
VD
4518 cfs_rq->last_h_load_update = now;
4519 }
4520
4521 while ((se = cfs_rq->h_load_next) != NULL) {
4522 load = cfs_rq->h_load;
4523 load = div64_ul(load * se->avg.load_avg_contrib,
4524 cfs_rq->runnable_load_avg + 1);
4525 cfs_rq = group_cfs_rq(se);
4526 cfs_rq->h_load = load;
4527 cfs_rq->last_h_load_update = now;
4528 }
9763b67f
PZ
4529}
4530
367456c7 4531static unsigned long task_h_load(struct task_struct *p)
230059de 4532{
367456c7 4533 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 4534
68520796 4535 update_cfs_rq_h_load(cfs_rq);
a003a25b
AS
4536 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4537 cfs_rq->runnable_load_avg + 1);
230059de
PZ
4538}
4539#else
48a16753 4540static inline void update_blocked_averages(int cpu)
9e3081ca
PZ
4541{
4542}
4543
367456c7 4544static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 4545{
a003a25b 4546 return p->se.avg.load_avg_contrib;
1e3c88bd 4547}
230059de 4548#endif
1e3c88bd 4549
1e3c88bd 4550/********** Helpers for find_busiest_group ************************/
1e3c88bd
PZ
4551/*
4552 * sg_lb_stats - stats of a sched_group required for load_balancing
4553 */
4554struct sg_lb_stats {
4555 unsigned long avg_load; /*Avg load across the CPUs of the group */
4556 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 4557 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 4558 unsigned long load_per_task;
3ae11c90 4559 unsigned long group_power;
147c5fc2
PZ
4560 unsigned int sum_nr_running; /* Nr tasks running in the group */
4561 unsigned int group_capacity;
4562 unsigned int idle_cpus;
4563 unsigned int group_weight;
1e3c88bd 4564 int group_imb; /* Is there an imbalance in the group ? */
fab47622 4565 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
4566};
4567
56cf515b
JK
4568/*
4569 * sd_lb_stats - Structure to store the statistics of a sched_domain
4570 * during load balancing.
4571 */
4572struct sd_lb_stats {
4573 struct sched_group *busiest; /* Busiest group in this sd */
4574 struct sched_group *local; /* Local group in this sd */
4575 unsigned long total_load; /* Total load of all groups in sd */
4576 unsigned long total_pwr; /* Total power of all groups in sd */
4577 unsigned long avg_load; /* Average load across all groups in sd */
4578
56cf515b 4579 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 4580 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
4581};
4582
147c5fc2
PZ
4583static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
4584{
4585 /*
4586 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
4587 * local_stat because update_sg_lb_stats() does a full clear/assignment.
4588 * We must however clear busiest_stat::avg_load because
4589 * update_sd_pick_busiest() reads this before assignment.
4590 */
4591 *sds = (struct sd_lb_stats){
4592 .busiest = NULL,
4593 .local = NULL,
4594 .total_load = 0UL,
4595 .total_pwr = 0UL,
4596 .busiest_stat = {
4597 .avg_load = 0UL,
4598 },
4599 };
4600}
4601
1e3c88bd
PZ
4602/**
4603 * get_sd_load_idx - Obtain the load index for a given sched domain.
4604 * @sd: The sched_domain whose load_idx is to be obtained.
4605 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
e69f6186
YB
4606 *
4607 * Return: The load index.
1e3c88bd
PZ
4608 */
4609static inline int get_sd_load_idx(struct sched_domain *sd,
4610 enum cpu_idle_type idle)
4611{
4612 int load_idx;
4613
4614 switch (idle) {
4615 case CPU_NOT_IDLE:
4616 load_idx = sd->busy_idx;
4617 break;
4618
4619 case CPU_NEWLY_IDLE:
4620 load_idx = sd->newidle_idx;
4621 break;
4622 default:
4623 load_idx = sd->idle_idx;
4624 break;
4625 }
4626
4627 return load_idx;
4628}
4629
15f803c9 4630static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
1e3c88bd 4631{
1399fa78 4632 return SCHED_POWER_SCALE;
1e3c88bd
PZ
4633}
4634
4635unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4636{
4637 return default_scale_freq_power(sd, cpu);
4638}
4639
15f803c9 4640static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
1e3c88bd 4641{
669c55e9 4642 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
4643 unsigned long smt_gain = sd->smt_gain;
4644
4645 smt_gain /= weight;
4646
4647 return smt_gain;
4648}
4649
4650unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4651{
4652 return default_scale_smt_power(sd, cpu);
4653}
4654
15f803c9 4655static unsigned long scale_rt_power(int cpu)
1e3c88bd
PZ
4656{
4657 struct rq *rq = cpu_rq(cpu);
b654f7de 4658 u64 total, available, age_stamp, avg;
1e3c88bd 4659
b654f7de
PZ
4660 /*
4661 * Since we're reading these variables without serialization make sure
4662 * we read them once before doing sanity checks on them.
4663 */
4664 age_stamp = ACCESS_ONCE(rq->age_stamp);
4665 avg = ACCESS_ONCE(rq->rt_avg);
4666
78becc27 4667 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
aa483808 4668
b654f7de 4669 if (unlikely(total < avg)) {
aa483808
VP
4670 /* Ensures that power won't end up being negative */
4671 available = 0;
4672 } else {
b654f7de 4673 available = total - avg;
aa483808 4674 }
1e3c88bd 4675
1399fa78
NR
4676 if (unlikely((s64)total < SCHED_POWER_SCALE))
4677 total = SCHED_POWER_SCALE;
1e3c88bd 4678
1399fa78 4679 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4680
4681 return div_u64(available, total);
4682}
4683
4684static void update_cpu_power(struct sched_domain *sd, int cpu)
4685{
669c55e9 4686 unsigned long weight = sd->span_weight;
1399fa78 4687 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
4688 struct sched_group *sdg = sd->groups;
4689
1e3c88bd
PZ
4690 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4691 if (sched_feat(ARCH_POWER))
4692 power *= arch_scale_smt_power(sd, cpu);
4693 else
4694 power *= default_scale_smt_power(sd, cpu);
4695
1399fa78 4696 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4697 }
4698
9c3f75cb 4699 sdg->sgp->power_orig = power;
9d5efe05
SV
4700
4701 if (sched_feat(ARCH_POWER))
4702 power *= arch_scale_freq_power(sd, cpu);
4703 else
4704 power *= default_scale_freq_power(sd, cpu);
4705
1399fa78 4706 power >>= SCHED_POWER_SHIFT;
9d5efe05 4707
1e3c88bd 4708 power *= scale_rt_power(cpu);
1399fa78 4709 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4710
4711 if (!power)
4712 power = 1;
4713
e51fd5e2 4714 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 4715 sdg->sgp->power = power;
1e3c88bd
PZ
4716}
4717
029632fb 4718void update_group_power(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
4719{
4720 struct sched_domain *child = sd->child;
4721 struct sched_group *group, *sdg = sd->groups;
863bffc8 4722 unsigned long power, power_orig;
4ec4412e
VG
4723 unsigned long interval;
4724
4725 interval = msecs_to_jiffies(sd->balance_interval);
4726 interval = clamp(interval, 1UL, max_load_balance_interval);
4727 sdg->sgp->next_update = jiffies + interval;
1e3c88bd
PZ
4728
4729 if (!child) {
4730 update_cpu_power(sd, cpu);
4731 return;
4732 }
4733
863bffc8 4734 power_orig = power = 0;
1e3c88bd 4735
74a5ce20
PZ
4736 if (child->flags & SD_OVERLAP) {
4737 /*
4738 * SD_OVERLAP domains cannot assume that child groups
4739 * span the current group.
4740 */
4741
863bffc8
PZ
4742 for_each_cpu(cpu, sched_group_cpus(sdg)) {
4743 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
4744
4745 power_orig += sg->sgp->power_orig;
4746 power += sg->sgp->power;
4747 }
74a5ce20
PZ
4748 } else {
4749 /*
4750 * !SD_OVERLAP domains can assume that child groups
4751 * span the current group.
4752 */
4753
4754 group = child->groups;
4755 do {
863bffc8 4756 power_orig += group->sgp->power_orig;
74a5ce20
PZ
4757 power += group->sgp->power;
4758 group = group->next;
4759 } while (group != child->groups);
4760 }
1e3c88bd 4761
863bffc8
PZ
4762 sdg->sgp->power_orig = power_orig;
4763 sdg->sgp->power = power;
1e3c88bd
PZ
4764}
4765
9d5efe05
SV
4766/*
4767 * Try and fix up capacity for tiny siblings, this is needed when
4768 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4769 * which on its own isn't powerful enough.
4770 *
4771 * See update_sd_pick_busiest() and check_asym_packing().
4772 */
4773static inline int
4774fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4775{
4776 /*
1399fa78 4777 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 4778 */
a6c75f2f 4779 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
4780 return 0;
4781
4782 /*
4783 * If ~90% of the cpu_power is still there, we're good.
4784 */
9c3f75cb 4785 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
4786 return 1;
4787
4788 return 0;
4789}
4790
30ce5dab
PZ
4791/*
4792 * Group imbalance indicates (and tries to solve) the problem where balancing
4793 * groups is inadequate due to tsk_cpus_allowed() constraints.
4794 *
4795 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
4796 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
4797 * Something like:
4798 *
4799 * { 0 1 2 3 } { 4 5 6 7 }
4800 * * * * *
4801 *
4802 * If we were to balance group-wise we'd place two tasks in the first group and
4803 * two tasks in the second group. Clearly this is undesired as it will overload
4804 * cpu 3 and leave one of the cpus in the second group unused.
4805 *
4806 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
4807 * by noticing the lower domain failed to reach balance and had difficulty
4808 * moving tasks due to affinity constraints.
30ce5dab
PZ
4809 *
4810 * When this is so detected; this group becomes a candidate for busiest; see
4811 * update_sd_pick_busiest(). And calculcate_imbalance() and
6263322c 4812 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
4813 * to create an effective group imbalance.
4814 *
4815 * This is a somewhat tricky proposition since the next run might not find the
4816 * group imbalance and decide the groups need to be balanced again. A most
4817 * subtle and fragile situation.
4818 */
4819
6263322c 4820static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 4821{
6263322c 4822 return group->sgp->imbalance;
30ce5dab
PZ
4823}
4824
b37d9316
PZ
4825/*
4826 * Compute the group capacity.
4827 *
c61037e9
PZ
4828 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
4829 * first dividing out the smt factor and computing the actual number of cores
4830 * and limit power unit capacity with that.
b37d9316
PZ
4831 */
4832static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
4833{
c61037e9
PZ
4834 unsigned int capacity, smt, cpus;
4835 unsigned int power, power_orig;
4836
4837 power = group->sgp->power;
4838 power_orig = group->sgp->power_orig;
4839 cpus = group->group_weight;
b37d9316 4840
c61037e9
PZ
4841 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
4842 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
4843 capacity = cpus / smt; /* cores */
b37d9316 4844
c61037e9 4845 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
b37d9316
PZ
4846 if (!capacity)
4847 capacity = fix_small_capacity(env->sd, group);
4848
4849 return capacity;
4850}
4851
1e3c88bd
PZ
4852/**
4853 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 4854 * @env: The load balancing environment.
1e3c88bd 4855 * @group: sched_group whose statistics are to be updated.
1e3c88bd 4856 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 4857 * @local_group: Does group contain this_cpu.
1e3c88bd
PZ
4858 * @sgs: variable to hold the statistics for this group.
4859 */
bd939f45
PZ
4860static inline void update_sg_lb_stats(struct lb_env *env,
4861 struct sched_group *group, int load_idx,
23f0d209 4862 int local_group, struct sg_lb_stats *sgs)
1e3c88bd 4863{
30ce5dab
PZ
4864 unsigned long nr_running;
4865 unsigned long load;
bd939f45 4866 int i;
1e3c88bd 4867
b72ff13c
PZ
4868 memset(sgs, 0, sizeof(*sgs));
4869
b9403130 4870 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
4871 struct rq *rq = cpu_rq(i);
4872
e44bc5c5
PZ
4873 nr_running = rq->nr_running;
4874
1e3c88bd 4875 /* Bias balancing toward cpus of our domain */
6263322c 4876 if (local_group)
04f733b4 4877 load = target_load(i, load_idx);
6263322c 4878 else
1e3c88bd 4879 load = source_load(i, load_idx);
1e3c88bd
PZ
4880
4881 sgs->group_load += load;
e44bc5c5 4882 sgs->sum_nr_running += nr_running;
1e3c88bd 4883 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
4884 if (idle_cpu(i))
4885 sgs->idle_cpus++;
1e3c88bd
PZ
4886 }
4887
1e3c88bd 4888 /* Adjust by relative CPU power of the group */
3ae11c90
PZ
4889 sgs->group_power = group->sgp->power;
4890 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
1e3c88bd 4891
dd5feea1 4892 if (sgs->sum_nr_running)
38d0f770 4893 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 4894
aae6d3dd 4895 sgs->group_weight = group->group_weight;
fab47622 4896
b37d9316
PZ
4897 sgs->group_imb = sg_imbalanced(group);
4898 sgs->group_capacity = sg_capacity(env, group);
4899
fab47622
NR
4900 if (sgs->group_capacity > sgs->sum_nr_running)
4901 sgs->group_has_capacity = 1;
1e3c88bd
PZ
4902}
4903
532cb4c4
MN
4904/**
4905 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 4906 * @env: The load balancing environment.
532cb4c4
MN
4907 * @sds: sched_domain statistics
4908 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 4909 * @sgs: sched_group statistics
532cb4c4
MN
4910 *
4911 * Determine if @sg is a busier group than the previously selected
4912 * busiest group.
e69f6186
YB
4913 *
4914 * Return: %true if @sg is a busier group than the previously selected
4915 * busiest group. %false otherwise.
532cb4c4 4916 */
bd939f45 4917static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
4918 struct sd_lb_stats *sds,
4919 struct sched_group *sg,
bd939f45 4920 struct sg_lb_stats *sgs)
532cb4c4 4921{
56cf515b 4922 if (sgs->avg_load <= sds->busiest_stat.avg_load)
532cb4c4
MN
4923 return false;
4924
4925 if (sgs->sum_nr_running > sgs->group_capacity)
4926 return true;
4927
4928 if (sgs->group_imb)
4929 return true;
4930
4931 /*
4932 * ASYM_PACKING needs to move all the work to the lowest
4933 * numbered CPUs in the group, therefore mark all groups
4934 * higher than ourself as busy.
4935 */
bd939f45
PZ
4936 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4937 env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
4938 if (!sds->busiest)
4939 return true;
4940
4941 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4942 return true;
4943 }
4944
4945 return false;
4946}
4947
1e3c88bd 4948/**
461819ac 4949 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 4950 * @env: The load balancing environment.
1e3c88bd
PZ
4951 * @balance: Should we balance.
4952 * @sds: variable to hold the statistics for this sched_domain.
4953 */
bd939f45 4954static inline void update_sd_lb_stats(struct lb_env *env,
23f0d209 4955 struct sd_lb_stats *sds)
1e3c88bd 4956{
bd939f45
PZ
4957 struct sched_domain *child = env->sd->child;
4958 struct sched_group *sg = env->sd->groups;
56cf515b 4959 struct sg_lb_stats tmp_sgs;
1e3c88bd
PZ
4960 int load_idx, prefer_sibling = 0;
4961
4962 if (child && child->flags & SD_PREFER_SIBLING)
4963 prefer_sibling = 1;
4964
bd939f45 4965 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
4966
4967 do {
56cf515b 4968 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
4969 int local_group;
4970
bd939f45 4971 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
56cf515b
JK
4972 if (local_group) {
4973 sds->local = sg;
4974 sgs = &sds->local_stat;
b72ff13c
PZ
4975
4976 if (env->idle != CPU_NEWLY_IDLE ||
4977 time_after_eq(jiffies, sg->sgp->next_update))
4978 update_group_power(env->sd, env->dst_cpu);
56cf515b 4979 }
1e3c88bd 4980
56cf515b 4981 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
1e3c88bd 4982
b72ff13c
PZ
4983 if (local_group)
4984 goto next_group;
4985
1e3c88bd
PZ
4986 /*
4987 * In case the child domain prefers tasks go to siblings
532cb4c4 4988 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
4989 * and move all the excess tasks away. We lower the capacity
4990 * of a group only if the local group has the capacity to fit
4991 * these excess tasks, i.e. nr_running < group_capacity. The
4992 * extra check prevents the case where you always pull from the
4993 * heaviest group when it is already under-utilized (possible
4994 * with a large weight task outweighs the tasks on the system).
1e3c88bd 4995 */
b72ff13c
PZ
4996 if (prefer_sibling && sds->local &&
4997 sds->local_stat.group_has_capacity)
147c5fc2 4998 sgs->group_capacity = min(sgs->group_capacity, 1U);
1e3c88bd 4999
b72ff13c 5000 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 5001 sds->busiest = sg;
56cf515b 5002 sds->busiest_stat = *sgs;
1e3c88bd
PZ
5003 }
5004
b72ff13c
PZ
5005next_group:
5006 /* Now, start updating sd_lb_stats */
5007 sds->total_load += sgs->group_load;
5008 sds->total_pwr += sgs->group_power;
5009
532cb4c4 5010 sg = sg->next;
bd939f45 5011 } while (sg != env->sd->groups);
532cb4c4
MN
5012}
5013
532cb4c4
MN
5014/**
5015 * check_asym_packing - Check to see if the group is packed into the
5016 * sched doman.
5017 *
5018 * This is primarily intended to used at the sibling level. Some
5019 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5020 * case of POWER7, it can move to lower SMT modes only when higher
5021 * threads are idle. When in lower SMT modes, the threads will
5022 * perform better since they share less core resources. Hence when we
5023 * have idle threads, we want them to be the higher ones.
5024 *
5025 * This packing function is run on idle threads. It checks to see if
5026 * the busiest CPU in this domain (core in the P7 case) has a higher
5027 * CPU number than the packing function is being run on. Here we are
5028 * assuming lower CPU number will be equivalent to lower a SMT thread
5029 * number.
5030 *
e69f6186 5031 * Return: 1 when packing is required and a task should be moved to
b6b12294
MN
5032 * this CPU. The amount of the imbalance is returned in *imbalance.
5033 *
cd96891d 5034 * @env: The load balancing environment.
532cb4c4 5035 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 5036 */
bd939f45 5037static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
5038{
5039 int busiest_cpu;
5040
bd939f45 5041 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
5042 return 0;
5043
5044 if (!sds->busiest)
5045 return 0;
5046
5047 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 5048 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
5049 return 0;
5050
bd939f45 5051 env->imbalance = DIV_ROUND_CLOSEST(
3ae11c90
PZ
5052 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5053 SCHED_POWER_SCALE);
bd939f45 5054
532cb4c4 5055 return 1;
1e3c88bd
PZ
5056}
5057
5058/**
5059 * fix_small_imbalance - Calculate the minor imbalance that exists
5060 * amongst the groups of a sched_domain, during
5061 * load balancing.
cd96891d 5062 * @env: The load balancing environment.
1e3c88bd 5063 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 5064 */
bd939f45
PZ
5065static inline
5066void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd
PZ
5067{
5068 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5069 unsigned int imbn = 2;
dd5feea1 5070 unsigned long scaled_busy_load_per_task;
56cf515b 5071 struct sg_lb_stats *local, *busiest;
1e3c88bd 5072
56cf515b
JK
5073 local = &sds->local_stat;
5074 busiest = &sds->busiest_stat;
1e3c88bd 5075
56cf515b
JK
5076 if (!local->sum_nr_running)
5077 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5078 else if (busiest->load_per_task > local->load_per_task)
5079 imbn = 1;
dd5feea1 5080
56cf515b
JK
5081 scaled_busy_load_per_task =
5082 (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5083 busiest->group_power;
56cf515b 5084
3029ede3
VD
5085 if (busiest->avg_load + scaled_busy_load_per_task >=
5086 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 5087 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
5088 return;
5089 }
5090
5091 /*
5092 * OK, we don't have enough imbalance to justify moving tasks,
5093 * however we may be able to increase total CPU power used by
5094 * moving them.
5095 */
5096
3ae11c90 5097 pwr_now += busiest->group_power *
56cf515b 5098 min(busiest->load_per_task, busiest->avg_load);
3ae11c90 5099 pwr_now += local->group_power *
56cf515b 5100 min(local->load_per_task, local->avg_load);
1399fa78 5101 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
5102
5103 /* Amount of load we'd subtract */
56cf515b 5104 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5105 busiest->group_power;
56cf515b 5106 if (busiest->avg_load > tmp) {
3ae11c90 5107 pwr_move += busiest->group_power *
56cf515b
JK
5108 min(busiest->load_per_task,
5109 busiest->avg_load - tmp);
5110 }
1e3c88bd
PZ
5111
5112 /* Amount of load we'd add */
3ae11c90 5113 if (busiest->avg_load * busiest->group_power <
56cf515b 5114 busiest->load_per_task * SCHED_POWER_SCALE) {
3ae11c90
PZ
5115 tmp = (busiest->avg_load * busiest->group_power) /
5116 local->group_power;
56cf515b
JK
5117 } else {
5118 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5119 local->group_power;
56cf515b 5120 }
3ae11c90
PZ
5121 pwr_move += local->group_power *
5122 min(local->load_per_task, local->avg_load + tmp);
1399fa78 5123 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
5124
5125 /* Move if we gain throughput */
5126 if (pwr_move > pwr_now)
56cf515b 5127 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
5128}
5129
5130/**
5131 * calculate_imbalance - Calculate the amount of imbalance present within the
5132 * groups of a given sched_domain during load balance.
bd939f45 5133 * @env: load balance environment
1e3c88bd 5134 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 5135 */
bd939f45 5136static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 5137{
dd5feea1 5138 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
5139 struct sg_lb_stats *local, *busiest;
5140
5141 local = &sds->local_stat;
56cf515b 5142 busiest = &sds->busiest_stat;
dd5feea1 5143
56cf515b 5144 if (busiest->group_imb) {
30ce5dab
PZ
5145 /*
5146 * In the group_imb case we cannot rely on group-wide averages
5147 * to ensure cpu-load equilibrium, look at wider averages. XXX
5148 */
56cf515b
JK
5149 busiest->load_per_task =
5150 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
5151 }
5152
1e3c88bd
PZ
5153 /*
5154 * In the presence of smp nice balancing, certain scenarios can have
5155 * max load less than avg load(as we skip the groups at or below
5156 * its cpu_power, while calculating max_load..)
5157 */
b1885550
VD
5158 if (busiest->avg_load <= sds->avg_load ||
5159 local->avg_load >= sds->avg_load) {
bd939f45
PZ
5160 env->imbalance = 0;
5161 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
5162 }
5163
56cf515b 5164 if (!busiest->group_imb) {
dd5feea1
SS
5165 /*
5166 * Don't want to pull so many tasks that a group would go idle.
30ce5dab
PZ
5167 * Except of course for the group_imb case, since then we might
5168 * have to drop below capacity to reach cpu-load equilibrium.
dd5feea1 5169 */
56cf515b
JK
5170 load_above_capacity =
5171 (busiest->sum_nr_running - busiest->group_capacity);
dd5feea1 5172
1399fa78 5173 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3ae11c90 5174 load_above_capacity /= busiest->group_power;
dd5feea1
SS
5175 }
5176
5177 /*
5178 * We're trying to get all the cpus to the average_load, so we don't
5179 * want to push ourselves above the average load, nor do we wish to
5180 * reduce the max loaded cpu below the average load. At the same time,
5181 * we also don't want to reduce the group load below the group capacity
5182 * (so that we can implement power-savings policies etc). Thus we look
5183 * for the minimum possible imbalance.
dd5feea1 5184 */
30ce5dab 5185 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
5186
5187 /* How much load to actually move to equalise the imbalance */
56cf515b 5188 env->imbalance = min(
3ae11c90
PZ
5189 max_pull * busiest->group_power,
5190 (sds->avg_load - local->avg_load) * local->group_power
56cf515b 5191 ) / SCHED_POWER_SCALE;
1e3c88bd
PZ
5192
5193 /*
5194 * if *imbalance is less than the average load per runnable task
25985edc 5195 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
5196 * a think about bumping its value to force at least one task to be
5197 * moved
5198 */
56cf515b 5199 if (env->imbalance < busiest->load_per_task)
bd939f45 5200 return fix_small_imbalance(env, sds);
1e3c88bd 5201}
fab47622 5202
1e3c88bd
PZ
5203/******* find_busiest_group() helpers end here *********************/
5204
5205/**
5206 * find_busiest_group - Returns the busiest group within the sched_domain
5207 * if there is an imbalance. If there isn't an imbalance, and
5208 * the user has opted for power-savings, it returns a group whose
5209 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5210 * such a group exists.
5211 *
5212 * Also calculates the amount of weighted load which should be moved
5213 * to restore balance.
5214 *
cd96891d 5215 * @env: The load balancing environment.
1e3c88bd 5216 *
e69f6186 5217 * Return: - The busiest group if imbalance exists.
1e3c88bd
PZ
5218 * - If no imbalance and user has opted for power-savings balance,
5219 * return the least loaded group whose CPUs can be
5220 * put to idle by rebalancing its tasks onto our group.
5221 */
56cf515b 5222static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 5223{
56cf515b 5224 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
5225 struct sd_lb_stats sds;
5226
147c5fc2 5227 init_sd_lb_stats(&sds);
1e3c88bd
PZ
5228
5229 /*
5230 * Compute the various statistics relavent for load balancing at
5231 * this level.
5232 */
23f0d209 5233 update_sd_lb_stats(env, &sds);
56cf515b
JK
5234 local = &sds.local_stat;
5235 busiest = &sds.busiest_stat;
1e3c88bd 5236
bd939f45
PZ
5237 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5238 check_asym_packing(env, &sds))
532cb4c4
MN
5239 return sds.busiest;
5240
cc57aa8f 5241 /* There is no busy sibling group to pull tasks from */
56cf515b 5242 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
5243 goto out_balanced;
5244
1399fa78 5245 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 5246
866ab43e
PZ
5247 /*
5248 * If the busiest group is imbalanced the below checks don't
30ce5dab 5249 * work because they assume all things are equal, which typically
866ab43e
PZ
5250 * isn't true due to cpus_allowed constraints and the like.
5251 */
56cf515b 5252 if (busiest->group_imb)
866ab43e
PZ
5253 goto force_balance;
5254
cc57aa8f 5255 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
56cf515b
JK
5256 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5257 !busiest->group_has_capacity)
fab47622
NR
5258 goto force_balance;
5259
cc57aa8f
PZ
5260 /*
5261 * If the local group is more busy than the selected busiest group
5262 * don't try and pull any tasks.
5263 */
56cf515b 5264 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
5265 goto out_balanced;
5266
cc57aa8f
PZ
5267 /*
5268 * Don't pull any tasks if this group is already above the domain
5269 * average load.
5270 */
56cf515b 5271 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
5272 goto out_balanced;
5273
bd939f45 5274 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
5275 /*
5276 * This cpu is idle. If the busiest group load doesn't
5277 * have more tasks than the number of available cpu's and
5278 * there is no imbalance between this and busiest group
5279 * wrt to idle cpu's, it is balanced.
5280 */
56cf515b
JK
5281 if ((local->idle_cpus < busiest->idle_cpus) &&
5282 busiest->sum_nr_running <= busiest->group_weight)
aae6d3dd 5283 goto out_balanced;
c186fafe
PZ
5284 } else {
5285 /*
5286 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5287 * imbalance_pct to be conservative.
5288 */
56cf515b
JK
5289 if (100 * busiest->avg_load <=
5290 env->sd->imbalance_pct * local->avg_load)
c186fafe 5291 goto out_balanced;
aae6d3dd 5292 }
1e3c88bd 5293
fab47622 5294force_balance:
1e3c88bd 5295 /* Looks like there is an imbalance. Compute it */
bd939f45 5296 calculate_imbalance(env, &sds);
1e3c88bd
PZ
5297 return sds.busiest;
5298
5299out_balanced:
bd939f45 5300 env->imbalance = 0;
1e3c88bd
PZ
5301 return NULL;
5302}
5303
5304/*
5305 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5306 */
bd939f45 5307static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 5308 struct sched_group *group)
1e3c88bd
PZ
5309{
5310 struct rq *busiest = NULL, *rq;
95a79b80 5311 unsigned long busiest_load = 0, busiest_power = 1;
1e3c88bd
PZ
5312 int i;
5313
6906a408 5314 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd 5315 unsigned long power = power_of(i);
1399fa78
NR
5316 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5317 SCHED_POWER_SCALE);
1e3c88bd
PZ
5318 unsigned long wl;
5319
9d5efe05 5320 if (!capacity)
bd939f45 5321 capacity = fix_small_capacity(env->sd, group);
9d5efe05 5322
1e3c88bd 5323 rq = cpu_rq(i);
6e40f5bb 5324 wl = weighted_cpuload(i);
1e3c88bd 5325
6e40f5bb
TG
5326 /*
5327 * When comparing with imbalance, use weighted_cpuload()
5328 * which is not scaled with the cpu power.
5329 */
bd939f45 5330 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
5331 continue;
5332
6e40f5bb
TG
5333 /*
5334 * For the load comparisons with the other cpu's, consider
5335 * the weighted_cpuload() scaled with the cpu power, so that
5336 * the load can be moved away from the cpu that is potentially
5337 * running at a lower capacity.
95a79b80
JK
5338 *
5339 * Thus we're looking for max(wl_i / power_i), crosswise
5340 * multiplication to rid ourselves of the division works out
5341 * to: wl_i * power_j > wl_j * power_i; where j is our
5342 * previous maximum.
6e40f5bb 5343 */
95a79b80
JK
5344 if (wl * busiest_power > busiest_load * power) {
5345 busiest_load = wl;
5346 busiest_power = power;
1e3c88bd
PZ
5347 busiest = rq;
5348 }
5349 }
5350
5351 return busiest;
5352}
5353
5354/*
5355 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5356 * so long as it is large enough.
5357 */
5358#define MAX_PINNED_INTERVAL 512
5359
5360/* Working cpumask for load_balance and load_balance_newidle. */
e6252c3e 5361DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
1e3c88bd 5362
bd939f45 5363static int need_active_balance(struct lb_env *env)
1af3ed3d 5364{
bd939f45
PZ
5365 struct sched_domain *sd = env->sd;
5366
5367 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
5368
5369 /*
5370 * ASYM_PACKING needs to force migrate tasks from busy but
5371 * higher numbered CPUs in order to pack all tasks in the
5372 * lowest numbered CPUs.
5373 */
bd939f45 5374 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 5375 return 1;
1af3ed3d
PZ
5376 }
5377
5378 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5379}
5380
969c7921
TH
5381static int active_load_balance_cpu_stop(void *data);
5382
23f0d209
JK
5383static int should_we_balance(struct lb_env *env)
5384{
5385 struct sched_group *sg = env->sd->groups;
5386 struct cpumask *sg_cpus, *sg_mask;
5387 int cpu, balance_cpu = -1;
5388
5389 /*
5390 * In the newly idle case, we will allow all the cpu's
5391 * to do the newly idle load balance.
5392 */
5393 if (env->idle == CPU_NEWLY_IDLE)
5394 return 1;
5395
5396 sg_cpus = sched_group_cpus(sg);
5397 sg_mask = sched_group_mask(sg);
5398 /* Try to find first idle cpu */
5399 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5400 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5401 continue;
5402
5403 balance_cpu = cpu;
5404 break;
5405 }
5406
5407 if (balance_cpu == -1)
5408 balance_cpu = group_balance_cpu(sg);
5409
5410 /*
5411 * First idle cpu or the first cpu(busiest) in this sched group
5412 * is eligible for doing load balancing at this and above domains.
5413 */
b0cff9d8 5414 return balance_cpu == env->dst_cpu;
23f0d209
JK
5415}
5416
1e3c88bd
PZ
5417/*
5418 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5419 * tasks if there is an imbalance.
5420 */
5421static int load_balance(int this_cpu, struct rq *this_rq,
5422 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 5423 int *continue_balancing)
1e3c88bd 5424{
88b8dac0 5425 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 5426 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 5427 struct sched_group *group;
1e3c88bd
PZ
5428 struct rq *busiest;
5429 unsigned long flags;
e6252c3e 5430 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
1e3c88bd 5431
8e45cb54
PZ
5432 struct lb_env env = {
5433 .sd = sd,
ddcdf6e7
PZ
5434 .dst_cpu = this_cpu,
5435 .dst_rq = this_rq,
88b8dac0 5436 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 5437 .idle = idle,
eb95308e 5438 .loop_break = sched_nr_migrate_break,
b9403130 5439 .cpus = cpus,
8e45cb54
PZ
5440 };
5441
cfc03118
JK
5442 /*
5443 * For NEWLY_IDLE load_balancing, we don't need to consider
5444 * other cpus in our group
5445 */
e02e60c1 5446 if (idle == CPU_NEWLY_IDLE)
cfc03118 5447 env.dst_grpmask = NULL;
cfc03118 5448
1e3c88bd
PZ
5449 cpumask_copy(cpus, cpu_active_mask);
5450
1e3c88bd
PZ
5451 schedstat_inc(sd, lb_count[idle]);
5452
5453redo:
23f0d209
JK
5454 if (!should_we_balance(&env)) {
5455 *continue_balancing = 0;
1e3c88bd 5456 goto out_balanced;
23f0d209 5457 }
1e3c88bd 5458
23f0d209 5459 group = find_busiest_group(&env);
1e3c88bd
PZ
5460 if (!group) {
5461 schedstat_inc(sd, lb_nobusyg[idle]);
5462 goto out_balanced;
5463 }
5464
b9403130 5465 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
5466 if (!busiest) {
5467 schedstat_inc(sd, lb_nobusyq[idle]);
5468 goto out_balanced;
5469 }
5470
78feefc5 5471 BUG_ON(busiest == env.dst_rq);
1e3c88bd 5472
bd939f45 5473 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
5474
5475 ld_moved = 0;
5476 if (busiest->nr_running > 1) {
5477 /*
5478 * Attempt to move tasks. If find_busiest_group has found
5479 * an imbalance but busiest->nr_running <= 1, the group is
5480 * still unbalanced. ld_moved simply stays zero, so it is
5481 * correctly treated as an imbalance.
5482 */
8e45cb54 5483 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
5484 env.src_cpu = busiest->cpu;
5485 env.src_rq = busiest;
5486 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 5487
5d6523eb 5488more_balance:
1e3c88bd 5489 local_irq_save(flags);
78feefc5 5490 double_rq_lock(env.dst_rq, busiest);
88b8dac0
SV
5491
5492 /*
5493 * cur_ld_moved - load moved in current iteration
5494 * ld_moved - cumulative load moved across iterations
5495 */
5496 cur_ld_moved = move_tasks(&env);
5497 ld_moved += cur_ld_moved;
78feefc5 5498 double_rq_unlock(env.dst_rq, busiest);
1e3c88bd
PZ
5499 local_irq_restore(flags);
5500
5501 /*
5502 * some other cpu did the load balance for us.
5503 */
88b8dac0
SV
5504 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5505 resched_cpu(env.dst_cpu);
5506
f1cd0858
JK
5507 if (env.flags & LBF_NEED_BREAK) {
5508 env.flags &= ~LBF_NEED_BREAK;
5509 goto more_balance;
5510 }
5511
88b8dac0
SV
5512 /*
5513 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5514 * us and move them to an alternate dst_cpu in our sched_group
5515 * where they can run. The upper limit on how many times we
5516 * iterate on same src_cpu is dependent on number of cpus in our
5517 * sched_group.
5518 *
5519 * This changes load balance semantics a bit on who can move
5520 * load to a given_cpu. In addition to the given_cpu itself
5521 * (or a ilb_cpu acting on its behalf where given_cpu is
5522 * nohz-idle), we now have balance_cpu in a position to move
5523 * load to given_cpu. In rare situations, this may cause
5524 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5525 * _independently_ and at _same_ time to move some load to
5526 * given_cpu) causing exceess load to be moved to given_cpu.
5527 * This however should not happen so much in practice and
5528 * moreover subsequent load balance cycles should correct the
5529 * excess load moved.
5530 */
6263322c 5531 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 5532
7aff2e3a
VD
5533 /* Prevent to re-select dst_cpu via env's cpus */
5534 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5535
78feefc5 5536 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 5537 env.dst_cpu = env.new_dst_cpu;
6263322c 5538 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
5539 env.loop = 0;
5540 env.loop_break = sched_nr_migrate_break;
e02e60c1 5541
88b8dac0
SV
5542 /*
5543 * Go back to "more_balance" rather than "redo" since we
5544 * need to continue with same src_cpu.
5545 */
5546 goto more_balance;
5547 }
1e3c88bd 5548
6263322c
PZ
5549 /*
5550 * We failed to reach balance because of affinity.
5551 */
5552 if (sd_parent) {
5553 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
5554
5555 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5556 *group_imbalance = 1;
5557 } else if (*group_imbalance)
5558 *group_imbalance = 0;
5559 }
5560
1e3c88bd 5561 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 5562 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 5563 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
5564 if (!cpumask_empty(cpus)) {
5565 env.loop = 0;
5566 env.loop_break = sched_nr_migrate_break;
1e3c88bd 5567 goto redo;
bbf18b19 5568 }
1e3c88bd
PZ
5569 goto out_balanced;
5570 }
5571 }
5572
5573 if (!ld_moved) {
5574 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
5575 /*
5576 * Increment the failure counter only on periodic balance.
5577 * We do not want newidle balance, which can be very
5578 * frequent, pollute the failure counter causing
5579 * excessive cache_hot migrations and active balances.
5580 */
5581 if (idle != CPU_NEWLY_IDLE)
5582 sd->nr_balance_failed++;
1e3c88bd 5583
bd939f45 5584 if (need_active_balance(&env)) {
1e3c88bd
PZ
5585 raw_spin_lock_irqsave(&busiest->lock, flags);
5586
969c7921
TH
5587 /* don't kick the active_load_balance_cpu_stop,
5588 * if the curr task on busiest cpu can't be
5589 * moved to this_cpu
1e3c88bd
PZ
5590 */
5591 if (!cpumask_test_cpu(this_cpu,
fa17b507 5592 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
5593 raw_spin_unlock_irqrestore(&busiest->lock,
5594 flags);
8e45cb54 5595 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
5596 goto out_one_pinned;
5597 }
5598
969c7921
TH
5599 /*
5600 * ->active_balance synchronizes accesses to
5601 * ->active_balance_work. Once set, it's cleared
5602 * only after active load balance is finished.
5603 */
1e3c88bd
PZ
5604 if (!busiest->active_balance) {
5605 busiest->active_balance = 1;
5606 busiest->push_cpu = this_cpu;
5607 active_balance = 1;
5608 }
5609 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 5610
bd939f45 5611 if (active_balance) {
969c7921
TH
5612 stop_one_cpu_nowait(cpu_of(busiest),
5613 active_load_balance_cpu_stop, busiest,
5614 &busiest->active_balance_work);
bd939f45 5615 }
1e3c88bd
PZ
5616
5617 /*
5618 * We've kicked active balancing, reset the failure
5619 * counter.
5620 */
5621 sd->nr_balance_failed = sd->cache_nice_tries+1;
5622 }
5623 } else
5624 sd->nr_balance_failed = 0;
5625
5626 if (likely(!active_balance)) {
5627 /* We were unbalanced, so reset the balancing interval */
5628 sd->balance_interval = sd->min_interval;
5629 } else {
5630 /*
5631 * If we've begun active balancing, start to back off. This
5632 * case may not be covered by the all_pinned logic if there
5633 * is only 1 task on the busy runqueue (because we don't call
5634 * move_tasks).
5635 */
5636 if (sd->balance_interval < sd->max_interval)
5637 sd->balance_interval *= 2;
5638 }
5639
1e3c88bd
PZ
5640 goto out;
5641
5642out_balanced:
5643 schedstat_inc(sd, lb_balanced[idle]);
5644
5645 sd->nr_balance_failed = 0;
5646
5647out_one_pinned:
5648 /* tune up the balancing interval */
8e45cb54 5649 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 5650 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
5651 (sd->balance_interval < sd->max_interval))
5652 sd->balance_interval *= 2;
5653
46e49b38 5654 ld_moved = 0;
1e3c88bd 5655out:
1e3c88bd
PZ
5656 return ld_moved;
5657}
5658
1e3c88bd
PZ
5659/*
5660 * idle_balance is called by schedule() if this_cpu is about to become
5661 * idle. Attempts to pull tasks from other CPUs.
5662 */
029632fb 5663void idle_balance(int this_cpu, struct rq *this_rq)
1e3c88bd
PZ
5664{
5665 struct sched_domain *sd;
5666 int pulled_task = 0;
5667 unsigned long next_balance = jiffies + HZ;
9bd721c5 5668 u64 curr_cost = 0;
1e3c88bd 5669
78becc27 5670 this_rq->idle_stamp = rq_clock(this_rq);
1e3c88bd
PZ
5671
5672 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5673 return;
5674
f492e12e
PZ
5675 /*
5676 * Drop the rq->lock, but keep IRQ/preempt disabled.
5677 */
5678 raw_spin_unlock(&this_rq->lock);
5679
48a16753 5680 update_blocked_averages(this_cpu);
dce840a0 5681 rcu_read_lock();
1e3c88bd
PZ
5682 for_each_domain(this_cpu, sd) {
5683 unsigned long interval;
23f0d209 5684 int continue_balancing = 1;
9bd721c5 5685 u64 t0, domain_cost;
1e3c88bd
PZ
5686
5687 if (!(sd->flags & SD_LOAD_BALANCE))
5688 continue;
5689
9bd721c5
JL
5690 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
5691 break;
5692
f492e12e 5693 if (sd->flags & SD_BALANCE_NEWIDLE) {
9bd721c5
JL
5694 t0 = sched_clock_cpu(this_cpu);
5695
1e3c88bd 5696 /* If we've pulled tasks over stop searching: */
f492e12e 5697 pulled_task = load_balance(this_cpu, this_rq,
23f0d209
JK
5698 sd, CPU_NEWLY_IDLE,
5699 &continue_balancing);
9bd721c5
JL
5700
5701 domain_cost = sched_clock_cpu(this_cpu) - t0;
5702 if (domain_cost > sd->max_newidle_lb_cost)
5703 sd->max_newidle_lb_cost = domain_cost;
5704
5705 curr_cost += domain_cost;
f492e12e 5706 }
1e3c88bd
PZ
5707
5708 interval = msecs_to_jiffies(sd->balance_interval);
5709 if (time_after(next_balance, sd->last_balance + interval))
5710 next_balance = sd->last_balance + interval;
d5ad140b
NR
5711 if (pulled_task) {
5712 this_rq->idle_stamp = 0;
1e3c88bd 5713 break;
d5ad140b 5714 }
1e3c88bd 5715 }
dce840a0 5716 rcu_read_unlock();
f492e12e
PZ
5717
5718 raw_spin_lock(&this_rq->lock);
5719
1e3c88bd
PZ
5720 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5721 /*
5722 * We are going idle. next_balance may be set based on
5723 * a busy processor. So reset next_balance.
5724 */
5725 this_rq->next_balance = next_balance;
5726 }
9bd721c5
JL
5727
5728 if (curr_cost > this_rq->max_idle_balance_cost)
5729 this_rq->max_idle_balance_cost = curr_cost;
1e3c88bd
PZ
5730}
5731
5732/*
969c7921
TH
5733 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5734 * running tasks off the busiest CPU onto idle CPUs. It requires at
5735 * least 1 task to be running on each physical CPU where possible, and
5736 * avoids physical / logical imbalances.
1e3c88bd 5737 */
969c7921 5738static int active_load_balance_cpu_stop(void *data)
1e3c88bd 5739{
969c7921
TH
5740 struct rq *busiest_rq = data;
5741 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 5742 int target_cpu = busiest_rq->push_cpu;
969c7921 5743 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 5744 struct sched_domain *sd;
969c7921
TH
5745
5746 raw_spin_lock_irq(&busiest_rq->lock);
5747
5748 /* make sure the requested cpu hasn't gone down in the meantime */
5749 if (unlikely(busiest_cpu != smp_processor_id() ||
5750 !busiest_rq->active_balance))
5751 goto out_unlock;
1e3c88bd
PZ
5752
5753 /* Is there any task to move? */
5754 if (busiest_rq->nr_running <= 1)
969c7921 5755 goto out_unlock;
1e3c88bd
PZ
5756
5757 /*
5758 * This condition is "impossible", if it occurs
5759 * we need to fix it. Originally reported by
5760 * Bjorn Helgaas on a 128-cpu setup.
5761 */
5762 BUG_ON(busiest_rq == target_rq);
5763
5764 /* move a task from busiest_rq to target_rq */
5765 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
5766
5767 /* Search for an sd spanning us and the target CPU. */
dce840a0 5768 rcu_read_lock();
1e3c88bd
PZ
5769 for_each_domain(target_cpu, sd) {
5770 if ((sd->flags & SD_LOAD_BALANCE) &&
5771 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5772 break;
5773 }
5774
5775 if (likely(sd)) {
8e45cb54
PZ
5776 struct lb_env env = {
5777 .sd = sd,
ddcdf6e7
PZ
5778 .dst_cpu = target_cpu,
5779 .dst_rq = target_rq,
5780 .src_cpu = busiest_rq->cpu,
5781 .src_rq = busiest_rq,
8e45cb54
PZ
5782 .idle = CPU_IDLE,
5783 };
5784
1e3c88bd
PZ
5785 schedstat_inc(sd, alb_count);
5786
8e45cb54 5787 if (move_one_task(&env))
1e3c88bd
PZ
5788 schedstat_inc(sd, alb_pushed);
5789 else
5790 schedstat_inc(sd, alb_failed);
5791 }
dce840a0 5792 rcu_read_unlock();
1e3c88bd 5793 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
5794out_unlock:
5795 busiest_rq->active_balance = 0;
5796 raw_spin_unlock_irq(&busiest_rq->lock);
5797 return 0;
1e3c88bd
PZ
5798}
5799
3451d024 5800#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
5801/*
5802 * idle load balancing details
83cd4fe2
VP
5803 * - When one of the busy CPUs notice that there may be an idle rebalancing
5804 * needed, they will kick the idle load balancer, which then does idle
5805 * load balancing for all the idle CPUs.
5806 */
1e3c88bd 5807static struct {
83cd4fe2 5808 cpumask_var_t idle_cpus_mask;
0b005cf5 5809 atomic_t nr_cpus;
83cd4fe2
VP
5810 unsigned long next_balance; /* in jiffy units */
5811} nohz ____cacheline_aligned;
1e3c88bd 5812
8e7fbcbc 5813static inline int find_new_ilb(int call_cpu)
1e3c88bd 5814{
0b005cf5 5815 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 5816
786d6dc7
SS
5817 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5818 return ilb;
5819
5820 return nr_cpu_ids;
1e3c88bd 5821}
1e3c88bd 5822
83cd4fe2
VP
5823/*
5824 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5825 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5826 * CPU (if there is one).
5827 */
5828static void nohz_balancer_kick(int cpu)
5829{
5830 int ilb_cpu;
5831
5832 nohz.next_balance++;
5833
0b005cf5 5834 ilb_cpu = find_new_ilb(cpu);
83cd4fe2 5835
0b005cf5
SS
5836 if (ilb_cpu >= nr_cpu_ids)
5837 return;
83cd4fe2 5838
cd490c5b 5839 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
5840 return;
5841 /*
5842 * Use smp_send_reschedule() instead of resched_cpu().
5843 * This way we generate a sched IPI on the target cpu which
5844 * is idle. And the softirq performing nohz idle load balance
5845 * will be run before returning from the IPI.
5846 */
5847 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
5848 return;
5849}
5850
c1cc017c 5851static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
5852{
5853 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5854 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5855 atomic_dec(&nohz.nr_cpus);
5856 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5857 }
5858}
5859
69e1e811
SS
5860static inline void set_cpu_sd_state_busy(void)
5861{
5862 struct sched_domain *sd;
69e1e811 5863
69e1e811 5864 rcu_read_lock();
424c93fe 5865 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5866
5867 if (!sd || !sd->nohz_idle)
5868 goto unlock;
5869 sd->nohz_idle = 0;
5870
5871 for (; sd; sd = sd->parent)
69e1e811 5872 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5873unlock:
69e1e811
SS
5874 rcu_read_unlock();
5875}
5876
5877void set_cpu_sd_state_idle(void)
5878{
5879 struct sched_domain *sd;
69e1e811 5880
69e1e811 5881 rcu_read_lock();
424c93fe 5882 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5883
5884 if (!sd || sd->nohz_idle)
5885 goto unlock;
5886 sd->nohz_idle = 1;
5887
5888 for (; sd; sd = sd->parent)
69e1e811 5889 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5890unlock:
69e1e811
SS
5891 rcu_read_unlock();
5892}
5893
1e3c88bd 5894/*
c1cc017c 5895 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 5896 * This info will be used in performing idle load balancing in the future.
1e3c88bd 5897 */
c1cc017c 5898void nohz_balance_enter_idle(int cpu)
1e3c88bd 5899{
71325960
SS
5900 /*
5901 * If this cpu is going down, then nothing needs to be done.
5902 */
5903 if (!cpu_active(cpu))
5904 return;
5905
c1cc017c
AS
5906 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5907 return;
1e3c88bd 5908
c1cc017c
AS
5909 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5910 atomic_inc(&nohz.nr_cpus);
5911 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 5912}
71325960 5913
0db0628d 5914static int sched_ilb_notifier(struct notifier_block *nfb,
71325960
SS
5915 unsigned long action, void *hcpu)
5916{
5917 switch (action & ~CPU_TASKS_FROZEN) {
5918 case CPU_DYING:
c1cc017c 5919 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
5920 return NOTIFY_OK;
5921 default:
5922 return NOTIFY_DONE;
5923 }
5924}
1e3c88bd
PZ
5925#endif
5926
5927static DEFINE_SPINLOCK(balancing);
5928
49c022e6
PZ
5929/*
5930 * Scale the max load_balance interval with the number of CPUs in the system.
5931 * This trades load-balance latency on larger machines for less cross talk.
5932 */
029632fb 5933void update_max_interval(void)
49c022e6
PZ
5934{
5935 max_load_balance_interval = HZ*num_online_cpus()/10;
5936}
5937
1e3c88bd
PZ
5938/*
5939 * It checks each scheduling domain to see if it is due to be balanced,
5940 * and initiates a balancing operation if so.
5941 *
b9b0853a 5942 * Balancing parameters are set up in init_sched_domains.
1e3c88bd
PZ
5943 */
5944static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5945{
23f0d209 5946 int continue_balancing = 1;
1e3c88bd
PZ
5947 struct rq *rq = cpu_rq(cpu);
5948 unsigned long interval;
04f733b4 5949 struct sched_domain *sd;
1e3c88bd
PZ
5950 /* Earliest time when we have to do rebalance again */
5951 unsigned long next_balance = jiffies + 60*HZ;
5952 int update_next_balance = 0;
f48627e6
JL
5953 int need_serialize, need_decay = 0;
5954 u64 max_cost = 0;
1e3c88bd 5955
48a16753 5956 update_blocked_averages(cpu);
2069dd75 5957
dce840a0 5958 rcu_read_lock();
1e3c88bd 5959 for_each_domain(cpu, sd) {
f48627e6
JL
5960 /*
5961 * Decay the newidle max times here because this is a regular
5962 * visit to all the domains. Decay ~1% per second.
5963 */
5964 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
5965 sd->max_newidle_lb_cost =
5966 (sd->max_newidle_lb_cost * 253) / 256;
5967 sd->next_decay_max_lb_cost = jiffies + HZ;
5968 need_decay = 1;
5969 }
5970 max_cost += sd->max_newidle_lb_cost;
5971
1e3c88bd
PZ
5972 if (!(sd->flags & SD_LOAD_BALANCE))
5973 continue;
5974
f48627e6
JL
5975 /*
5976 * Stop the load balance at this level. There is another
5977 * CPU in our sched group which is doing load balancing more
5978 * actively.
5979 */
5980 if (!continue_balancing) {
5981 if (need_decay)
5982 continue;
5983 break;
5984 }
5985
1e3c88bd
PZ
5986 interval = sd->balance_interval;
5987 if (idle != CPU_IDLE)
5988 interval *= sd->busy_factor;
5989
5990 /* scale ms to jiffies */
5991 interval = msecs_to_jiffies(interval);
49c022e6 5992 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
5993
5994 need_serialize = sd->flags & SD_SERIALIZE;
5995
5996 if (need_serialize) {
5997 if (!spin_trylock(&balancing))
5998 goto out;
5999 }
6000
6001 if (time_after_eq(jiffies, sd->last_balance + interval)) {
23f0d209 6002 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
1e3c88bd 6003 /*
6263322c 6004 * The LBF_DST_PINNED logic could have changed
de5eb2dd
JK
6005 * env->dst_cpu, so we can't know our idle
6006 * state even if we migrated tasks. Update it.
1e3c88bd 6007 */
de5eb2dd 6008 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
6009 }
6010 sd->last_balance = jiffies;
6011 }
6012 if (need_serialize)
6013 spin_unlock(&balancing);
6014out:
6015 if (time_after(next_balance, sd->last_balance + interval)) {
6016 next_balance = sd->last_balance + interval;
6017 update_next_balance = 1;
6018 }
f48627e6
JL
6019 }
6020 if (need_decay) {
1e3c88bd 6021 /*
f48627e6
JL
6022 * Ensure the rq-wide value also decays but keep it at a
6023 * reasonable floor to avoid funnies with rq->avg_idle.
1e3c88bd 6024 */
f48627e6
JL
6025 rq->max_idle_balance_cost =
6026 max((u64)sysctl_sched_migration_cost, max_cost);
1e3c88bd 6027 }
dce840a0 6028 rcu_read_unlock();
1e3c88bd
PZ
6029
6030 /*
6031 * next_balance will be updated only when there is a need.
6032 * When the cpu is attached to null domain for ex, it will not be
6033 * updated.
6034 */
6035 if (likely(update_next_balance))
6036 rq->next_balance = next_balance;
6037}
6038
3451d024 6039#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 6040/*
3451d024 6041 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
6042 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6043 */
83cd4fe2
VP
6044static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6045{
6046 struct rq *this_rq = cpu_rq(this_cpu);
6047 struct rq *rq;
6048 int balance_cpu;
6049
1c792db7
SS
6050 if (idle != CPU_IDLE ||
6051 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6052 goto end;
83cd4fe2
VP
6053
6054 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 6055 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
6056 continue;
6057
6058 /*
6059 * If this cpu gets work to do, stop the load balancing
6060 * work being done for other cpus. Next load
6061 * balancing owner will pick it up.
6062 */
1c792db7 6063 if (need_resched())
83cd4fe2 6064 break;
83cd4fe2 6065
5ed4f1d9
VG
6066 rq = cpu_rq(balance_cpu);
6067
6068 raw_spin_lock_irq(&rq->lock);
6069 update_rq_clock(rq);
6070 update_idle_cpu_load(rq);
6071 raw_spin_unlock_irq(&rq->lock);
83cd4fe2
VP
6072
6073 rebalance_domains(balance_cpu, CPU_IDLE);
6074
83cd4fe2
VP
6075 if (time_after(this_rq->next_balance, rq->next_balance))
6076 this_rq->next_balance = rq->next_balance;
6077 }
6078 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
6079end:
6080 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
6081}
6082
6083/*
0b005cf5
SS
6084 * Current heuristic for kicking the idle load balancer in the presence
6085 * of an idle cpu is the system.
6086 * - This rq has more than one task.
6087 * - At any scheduler domain level, this cpu's scheduler group has multiple
6088 * busy cpu's exceeding the group's power.
6089 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6090 * domain span are idle.
83cd4fe2
VP
6091 */
6092static inline int nohz_kick_needed(struct rq *rq, int cpu)
6093{
6094 unsigned long now = jiffies;
0b005cf5 6095 struct sched_domain *sd;
83cd4fe2 6096
1c792db7 6097 if (unlikely(idle_cpu(cpu)))
83cd4fe2
VP
6098 return 0;
6099
1c792db7
SS
6100 /*
6101 * We may be recently in ticked or tickless idle mode. At the first
6102 * busy tick after returning from idle, we will update the busy stats.
6103 */
69e1e811 6104 set_cpu_sd_state_busy();
c1cc017c 6105 nohz_balance_exit_idle(cpu);
0b005cf5
SS
6106
6107 /*
6108 * None are in tickless mode and hence no need for NOHZ idle load
6109 * balancing.
6110 */
6111 if (likely(!atomic_read(&nohz.nr_cpus)))
6112 return 0;
1c792db7
SS
6113
6114 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
6115 return 0;
6116
0b005cf5
SS
6117 if (rq->nr_running >= 2)
6118 goto need_kick;
83cd4fe2 6119
067491b7 6120 rcu_read_lock();
0b005cf5
SS
6121 for_each_domain(cpu, sd) {
6122 struct sched_group *sg = sd->groups;
6123 struct sched_group_power *sgp = sg->sgp;
6124 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
83cd4fe2 6125
0b005cf5 6126 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
067491b7 6127 goto need_kick_unlock;
0b005cf5
SS
6128
6129 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
6130 && (cpumask_first_and(nohz.idle_cpus_mask,
6131 sched_domain_span(sd)) < cpu))
067491b7 6132 goto need_kick_unlock;
0b005cf5
SS
6133
6134 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6135 break;
83cd4fe2 6136 }
067491b7 6137 rcu_read_unlock();
83cd4fe2 6138 return 0;
067491b7
PZ
6139
6140need_kick_unlock:
6141 rcu_read_unlock();
0b005cf5
SS
6142need_kick:
6143 return 1;
83cd4fe2
VP
6144}
6145#else
6146static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6147#endif
6148
6149/*
6150 * run_rebalance_domains is triggered when needed from the scheduler tick.
6151 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
6152 */
1e3c88bd
PZ
6153static void run_rebalance_domains(struct softirq_action *h)
6154{
6155 int this_cpu = smp_processor_id();
6156 struct rq *this_rq = cpu_rq(this_cpu);
6eb57e0d 6157 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
6158 CPU_IDLE : CPU_NOT_IDLE;
6159
6160 rebalance_domains(this_cpu, idle);
6161
1e3c88bd 6162 /*
83cd4fe2 6163 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
6164 * balancing on behalf of the other idle cpus whose ticks are
6165 * stopped.
6166 */
83cd4fe2 6167 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
6168}
6169
6170static inline int on_null_domain(int cpu)
6171{
90a6501f 6172 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
6173}
6174
6175/*
6176 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 6177 */
029632fb 6178void trigger_load_balance(struct rq *rq, int cpu)
1e3c88bd 6179{
1e3c88bd
PZ
6180 /* Don't need to rebalance while attached to NULL domain */
6181 if (time_after_eq(jiffies, rq->next_balance) &&
6182 likely(!on_null_domain(cpu)))
6183 raise_softirq(SCHED_SOFTIRQ);
3451d024 6184#ifdef CONFIG_NO_HZ_COMMON
1c792db7 6185 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
83cd4fe2
VP
6186 nohz_balancer_kick(cpu);
6187#endif
1e3c88bd
PZ
6188}
6189
0bcdcf28
CE
6190static void rq_online_fair(struct rq *rq)
6191{
6192 update_sysctl();
6193}
6194
6195static void rq_offline_fair(struct rq *rq)
6196{
6197 update_sysctl();
a4c96ae3
PB
6198
6199 /* Ensure any throttled groups are reachable by pick_next_task */
6200 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
6201}
6202
55e12e5e 6203#endif /* CONFIG_SMP */
e1d1484f 6204
bf0f6f24
IM
6205/*
6206 * scheduler tick hitting a task of our scheduling class:
6207 */
8f4d37ec 6208static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
6209{
6210 struct cfs_rq *cfs_rq;
6211 struct sched_entity *se = &curr->se;
6212
6213 for_each_sched_entity(se) {
6214 cfs_rq = cfs_rq_of(se);
8f4d37ec 6215 entity_tick(cfs_rq, se, queued);
bf0f6f24 6216 }
18bf2805 6217
10e84b97 6218 if (numabalancing_enabled)
cbee9f88 6219 task_tick_numa(rq, curr);
3d59eebc 6220
18bf2805 6221 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
6222}
6223
6224/*
cd29fe6f
PZ
6225 * called on fork with the child task as argument from the parent's context
6226 * - child not yet on the tasklist
6227 * - preemption disabled
bf0f6f24 6228 */
cd29fe6f 6229static void task_fork_fair(struct task_struct *p)
bf0f6f24 6230{
4fc420c9
DN
6231 struct cfs_rq *cfs_rq;
6232 struct sched_entity *se = &p->se, *curr;
00bf7bfc 6233 int this_cpu = smp_processor_id();
cd29fe6f
PZ
6234 struct rq *rq = this_rq();
6235 unsigned long flags;
6236
05fa785c 6237 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 6238
861d034e
PZ
6239 update_rq_clock(rq);
6240
4fc420c9
DN
6241 cfs_rq = task_cfs_rq(current);
6242 curr = cfs_rq->curr;
6243
6c9a27f5
DN
6244 /*
6245 * Not only the cpu but also the task_group of the parent might have
6246 * been changed after parent->se.parent,cfs_rq were copied to
6247 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6248 * of child point to valid ones.
6249 */
6250 rcu_read_lock();
6251 __set_task_cpu(p, this_cpu);
6252 rcu_read_unlock();
bf0f6f24 6253
7109c442 6254 update_curr(cfs_rq);
cd29fe6f 6255
b5d9d734
MG
6256 if (curr)
6257 se->vruntime = curr->vruntime;
aeb73b04 6258 place_entity(cfs_rq, se, 1);
4d78e7b6 6259
cd29fe6f 6260 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 6261 /*
edcb60a3
IM
6262 * Upon rescheduling, sched_class::put_prev_task() will place
6263 * 'current' within the tree based on its new key value.
6264 */
4d78e7b6 6265 swap(curr->vruntime, se->vruntime);
aec0a514 6266 resched_task(rq->curr);
4d78e7b6 6267 }
bf0f6f24 6268
88ec22d3
PZ
6269 se->vruntime -= cfs_rq->min_vruntime;
6270
05fa785c 6271 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
6272}
6273
cb469845
SR
6274/*
6275 * Priority of the task has changed. Check to see if we preempt
6276 * the current task.
6277 */
da7a735e
PZ
6278static void
6279prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 6280{
da7a735e
PZ
6281 if (!p->se.on_rq)
6282 return;
6283
cb469845
SR
6284 /*
6285 * Reschedule if we are currently running on this runqueue and
6286 * our priority decreased, or if we are not currently running on
6287 * this runqueue and our priority is higher than the current's
6288 */
da7a735e 6289 if (rq->curr == p) {
cb469845
SR
6290 if (p->prio > oldprio)
6291 resched_task(rq->curr);
6292 } else
15afe09b 6293 check_preempt_curr(rq, p, 0);
cb469845
SR
6294}
6295
da7a735e
PZ
6296static void switched_from_fair(struct rq *rq, struct task_struct *p)
6297{
6298 struct sched_entity *se = &p->se;
6299 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6300
6301 /*
6302 * Ensure the task's vruntime is normalized, so that when its
6303 * switched back to the fair class the enqueue_entity(.flags=0) will
6304 * do the right thing.
6305 *
6306 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6307 * have normalized the vruntime, if it was !on_rq, then only when
6308 * the task is sleeping will it still have non-normalized vruntime.
6309 */
6310 if (!se->on_rq && p->state != TASK_RUNNING) {
6311 /*
6312 * Fix up our vruntime so that the current sleep doesn't
6313 * cause 'unlimited' sleep bonus.
6314 */
6315 place_entity(cfs_rq, se, 0);
6316 se->vruntime -= cfs_rq->min_vruntime;
6317 }
9ee474f5 6318
141965c7 6319#ifdef CONFIG_SMP
9ee474f5
PT
6320 /*
6321 * Remove our load from contribution when we leave sched_fair
6322 * and ensure we don't carry in an old decay_count if we
6323 * switch back.
6324 */
87e3c8ae
KT
6325 if (se->avg.decay_count) {
6326 __synchronize_entity_decay(se);
6327 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
9ee474f5
PT
6328 }
6329#endif
da7a735e
PZ
6330}
6331
cb469845
SR
6332/*
6333 * We switched to the sched_fair class.
6334 */
da7a735e 6335static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 6336{
da7a735e
PZ
6337 if (!p->se.on_rq)
6338 return;
6339
cb469845
SR
6340 /*
6341 * We were most likely switched from sched_rt, so
6342 * kick off the schedule if running, otherwise just see
6343 * if we can still preempt the current task.
6344 */
da7a735e 6345 if (rq->curr == p)
cb469845
SR
6346 resched_task(rq->curr);
6347 else
15afe09b 6348 check_preempt_curr(rq, p, 0);
cb469845
SR
6349}
6350
83b699ed
SV
6351/* Account for a task changing its policy or group.
6352 *
6353 * This routine is mostly called to set cfs_rq->curr field when a task
6354 * migrates between groups/classes.
6355 */
6356static void set_curr_task_fair(struct rq *rq)
6357{
6358 struct sched_entity *se = &rq->curr->se;
6359
ec12cb7f
PT
6360 for_each_sched_entity(se) {
6361 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6362
6363 set_next_entity(cfs_rq, se);
6364 /* ensure bandwidth has been allocated on our new cfs_rq */
6365 account_cfs_rq_runtime(cfs_rq, 0);
6366 }
83b699ed
SV
6367}
6368
029632fb
PZ
6369void init_cfs_rq(struct cfs_rq *cfs_rq)
6370{
6371 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
6372 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6373#ifndef CONFIG_64BIT
6374 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6375#endif
141965c7 6376#ifdef CONFIG_SMP
9ee474f5 6377 atomic64_set(&cfs_rq->decay_counter, 1);
2509940f 6378 atomic_long_set(&cfs_rq->removed_load, 0);
9ee474f5 6379#endif
029632fb
PZ
6380}
6381
810b3817 6382#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6383static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 6384{
aff3e498 6385 struct cfs_rq *cfs_rq;
b2b5ce02
PZ
6386 /*
6387 * If the task was not on the rq at the time of this cgroup movement
6388 * it must have been asleep, sleeping tasks keep their ->vruntime
6389 * absolute on their old rq until wakeup (needed for the fair sleeper
6390 * bonus in place_entity()).
6391 *
6392 * If it was on the rq, we've just 'preempted' it, which does convert
6393 * ->vruntime to a relative base.
6394 *
6395 * Make sure both cases convert their relative position when migrating
6396 * to another cgroup's rq. This does somewhat interfere with the
6397 * fair sleeper stuff for the first placement, but who cares.
6398 */
7ceff013
DN
6399 /*
6400 * When !on_rq, vruntime of the task has usually NOT been normalized.
6401 * But there are some cases where it has already been normalized:
6402 *
6403 * - Moving a forked child which is waiting for being woken up by
6404 * wake_up_new_task().
62af3783
DN
6405 * - Moving a task which has been woken up by try_to_wake_up() and
6406 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
6407 *
6408 * To prevent boost or penalty in the new cfs_rq caused by delta
6409 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6410 */
62af3783 6411 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7ceff013
DN
6412 on_rq = 1;
6413
b2b5ce02
PZ
6414 if (!on_rq)
6415 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6416 set_task_rq(p, task_cpu(p));
aff3e498
PT
6417 if (!on_rq) {
6418 cfs_rq = cfs_rq_of(&p->se);
6419 p->se.vruntime += cfs_rq->min_vruntime;
6420#ifdef CONFIG_SMP
6421 /*
6422 * migrate_task_rq_fair() will have removed our previous
6423 * contribution, but we must synchronize for ongoing future
6424 * decay.
6425 */
6426 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6427 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6428#endif
6429 }
810b3817 6430}
029632fb
PZ
6431
6432void free_fair_sched_group(struct task_group *tg)
6433{
6434 int i;
6435
6436 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6437
6438 for_each_possible_cpu(i) {
6439 if (tg->cfs_rq)
6440 kfree(tg->cfs_rq[i]);
6441 if (tg->se)
6442 kfree(tg->se[i]);
6443 }
6444
6445 kfree(tg->cfs_rq);
6446 kfree(tg->se);
6447}
6448
6449int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6450{
6451 struct cfs_rq *cfs_rq;
6452 struct sched_entity *se;
6453 int i;
6454
6455 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6456 if (!tg->cfs_rq)
6457 goto err;
6458 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6459 if (!tg->se)
6460 goto err;
6461
6462 tg->shares = NICE_0_LOAD;
6463
6464 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6465
6466 for_each_possible_cpu(i) {
6467 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6468 GFP_KERNEL, cpu_to_node(i));
6469 if (!cfs_rq)
6470 goto err;
6471
6472 se = kzalloc_node(sizeof(struct sched_entity),
6473 GFP_KERNEL, cpu_to_node(i));
6474 if (!se)
6475 goto err_free_rq;
6476
6477 init_cfs_rq(cfs_rq);
6478 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6479 }
6480
6481 return 1;
6482
6483err_free_rq:
6484 kfree(cfs_rq);
6485err:
6486 return 0;
6487}
6488
6489void unregister_fair_sched_group(struct task_group *tg, int cpu)
6490{
6491 struct rq *rq = cpu_rq(cpu);
6492 unsigned long flags;
6493
6494 /*
6495 * Only empty task groups can be destroyed; so we can speculatively
6496 * check on_list without danger of it being re-added.
6497 */
6498 if (!tg->cfs_rq[cpu]->on_list)
6499 return;
6500
6501 raw_spin_lock_irqsave(&rq->lock, flags);
6502 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6503 raw_spin_unlock_irqrestore(&rq->lock, flags);
6504}
6505
6506void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6507 struct sched_entity *se, int cpu,
6508 struct sched_entity *parent)
6509{
6510 struct rq *rq = cpu_rq(cpu);
6511
6512 cfs_rq->tg = tg;
6513 cfs_rq->rq = rq;
029632fb
PZ
6514 init_cfs_rq_runtime(cfs_rq);
6515
6516 tg->cfs_rq[cpu] = cfs_rq;
6517 tg->se[cpu] = se;
6518
6519 /* se could be NULL for root_task_group */
6520 if (!se)
6521 return;
6522
6523 if (!parent)
6524 se->cfs_rq = &rq->cfs;
6525 else
6526 se->cfs_rq = parent->my_q;
6527
6528 se->my_q = cfs_rq;
6529 update_load_set(&se->load, 0);
6530 se->parent = parent;
6531}
6532
6533static DEFINE_MUTEX(shares_mutex);
6534
6535int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6536{
6537 int i;
6538 unsigned long flags;
6539
6540 /*
6541 * We can't change the weight of the root cgroup.
6542 */
6543 if (!tg->se[0])
6544 return -EINVAL;
6545
6546 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6547
6548 mutex_lock(&shares_mutex);
6549 if (tg->shares == shares)
6550 goto done;
6551
6552 tg->shares = shares;
6553 for_each_possible_cpu(i) {
6554 struct rq *rq = cpu_rq(i);
6555 struct sched_entity *se;
6556
6557 se = tg->se[i];
6558 /* Propagate contribution to hierarchy */
6559 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
6560
6561 /* Possible calls to update_curr() need rq clock */
6562 update_rq_clock(rq);
17bc14b7 6563 for_each_sched_entity(se)
029632fb
PZ
6564 update_cfs_shares(group_cfs_rq(se));
6565 raw_spin_unlock_irqrestore(&rq->lock, flags);
6566 }
6567
6568done:
6569 mutex_unlock(&shares_mutex);
6570 return 0;
6571}
6572#else /* CONFIG_FAIR_GROUP_SCHED */
6573
6574void free_fair_sched_group(struct task_group *tg) { }
6575
6576int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6577{
6578 return 1;
6579}
6580
6581void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6582
6583#endif /* CONFIG_FAIR_GROUP_SCHED */
6584
810b3817 6585
6d686f45 6586static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
6587{
6588 struct sched_entity *se = &task->se;
0d721cea
PW
6589 unsigned int rr_interval = 0;
6590
6591 /*
6592 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6593 * idle runqueue:
6594 */
0d721cea 6595 if (rq->cfs.load.weight)
a59f4e07 6596 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
6597
6598 return rr_interval;
6599}
6600
bf0f6f24
IM
6601/*
6602 * All the scheduling class methods:
6603 */
029632fb 6604const struct sched_class fair_sched_class = {
5522d5d5 6605 .next = &idle_sched_class,
bf0f6f24
IM
6606 .enqueue_task = enqueue_task_fair,
6607 .dequeue_task = dequeue_task_fair,
6608 .yield_task = yield_task_fair,
d95f4122 6609 .yield_to_task = yield_to_task_fair,
bf0f6f24 6610
2e09bf55 6611 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
6612
6613 .pick_next_task = pick_next_task_fair,
6614 .put_prev_task = put_prev_task_fair,
6615
681f3e68 6616#ifdef CONFIG_SMP
4ce72a2c 6617 .select_task_rq = select_task_rq_fair,
0a74bef8 6618 .migrate_task_rq = migrate_task_rq_fair,
141965c7 6619
0bcdcf28
CE
6620 .rq_online = rq_online_fair,
6621 .rq_offline = rq_offline_fair,
88ec22d3
PZ
6622
6623 .task_waking = task_waking_fair,
681f3e68 6624#endif
bf0f6f24 6625
83b699ed 6626 .set_curr_task = set_curr_task_fair,
bf0f6f24 6627 .task_tick = task_tick_fair,
cd29fe6f 6628 .task_fork = task_fork_fair,
cb469845
SR
6629
6630 .prio_changed = prio_changed_fair,
da7a735e 6631 .switched_from = switched_from_fair,
cb469845 6632 .switched_to = switched_to_fair,
810b3817 6633
0d721cea
PW
6634 .get_rr_interval = get_rr_interval_fair,
6635
810b3817 6636#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6637 .task_move_group = task_move_group_fair,
810b3817 6638#endif
bf0f6f24
IM
6639};
6640
6641#ifdef CONFIG_SCHED_DEBUG
029632fb 6642void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 6643{
bf0f6f24
IM
6644 struct cfs_rq *cfs_rq;
6645
5973e5b9 6646 rcu_read_lock();
c3b64f1e 6647 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 6648 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 6649 rcu_read_unlock();
bf0f6f24
IM
6650}
6651#endif
029632fb
PZ
6652
6653__init void init_sched_fair_class(void)
6654{
6655#ifdef CONFIG_SMP
6656 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6657
3451d024 6658#ifdef CONFIG_NO_HZ_COMMON
554cecaf 6659 nohz.next_balance = jiffies;
029632fb 6660 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 6661 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
6662#endif
6663#endif /* SMP */
6664
6665}