]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - kernel/sched/fair.c
sched: Implement smarter wake-affine logic
[mirror_ubuntu-kernels.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
cbee9f88 29#include <linux/mempolicy.h>
e14808b4 30#include <linux/migrate.h>
cbee9f88 31#include <linux/task_work.h>
029632fb
PZ
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
9745512c 36
bf0f6f24 37/*
21805085 38 * Targeted preemption latency for CPU-bound tasks:
864616ee 39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 40 *
21805085 41 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
bf0f6f24 45 *
d274a4ce
IM
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 48 */
21406928
MG
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 51
1983a922
CE
52/*
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
2bd8e6d4 64/*
b2be5e96 65 * Minimal preemption granularity for CPU-bound tasks:
864616ee 66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 67 */
0bf377bb
IM
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
70
71/*
b2be5e96
PZ
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
0bf377bb 74static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
75
76/*
2bba22c5 77 * After fork, child runs first. If set to 0 (default) then
b2be5e96 78 * parent will (try to) run first.
21805085 79 */
2bba22c5 80unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 81
bf0f6f24
IM
82/*
83 * SCHED_OTHER wake-up granularity.
172e082a 84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
85 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
172e082a 90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 92
da84d961
IM
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
a7a4f8a7
PT
95/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
ec12cb7f
PT
102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
a4c2f00f 238
bf0f6f24
IM
239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
62160e3f 243#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 244
62160e3f 245/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
62160e3f 248 return cfs_rq->rq;
bf0f6f24
IM
249}
250
62160e3f
IM
251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
bf0f6f24 253
8f48894f
PZ
254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
b758149c
PZ
262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
aff3e498
PT
283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
9ee474f5 285
3d4b47b4
PZ
286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
67e86250
PT
289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 302 }
3d4b47b4
PZ
303
304 cfs_rq->on_list = 1;
9ee474f5 305 /* We should have no load, but we need to update last_decay. */
aff3e498 306 update_cfs_rq_blocked_load(cfs_rq, 0);
3d4b47b4
PZ
307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
b758149c
PZ
318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
464b7527
PZ
337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
8f48894f
PZ
380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
bf0f6f24 386
62160e3f
IM
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
390}
391
392#define entity_is_task(se) 1
393
b758149c
PZ
394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
bf0f6f24 396
b758149c 397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 398{
b758149c 399 return &task_rq(p)->cfs;
bf0f6f24
IM
400}
401
b758149c
PZ
402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
3d4b47b4
PZ
416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
b758149c
PZ
424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
464b7527
PZ
438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
b758149c
PZ
443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
6c16a6dc
PZ
445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
bf0f6f24
IM
447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
1bf08230 452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 453{
1bf08230 454 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 455 if (delta > 0)
1bf08230 456 max_vruntime = vruntime;
02e0431a 457
1bf08230 458 return max_vruntime;
02e0431a
PZ
459}
460
0702e3eb 461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
54fdc581
FC
470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
1af5f730
PZ
476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
e17036da 488 if (!cfs_rq->curr)
1af5f730
PZ
489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
1bf08230 494 /* ensure we never gain time by being placed backwards. */
1af5f730 495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
1af5f730
PZ
500}
501
bf0f6f24
IM
502/*
503 * Enqueue an entity into the rb-tree:
504 */
0702e3eb 505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
bf0f6f24
IM
510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
2bd2d6f2 522 if (entity_before(se, entry)) {
bf0f6f24
IM
523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
1af5f730 534 if (leftmost)
57cb499d 535 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
539}
540
0702e3eb 541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 542{
3fe69747
PZ
543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
3fe69747
PZ
545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
3fe69747 548 }
e9acbff6 549
bf0f6f24 550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
551}
552
029632fb 553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 554{
f4b6755f
PZ
555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
561}
562
ac53db59
RR
563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
029632fb 574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 575{
7eee3e67 576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 577
70eee74b
BS
578 if (!last)
579 return NULL;
7eee3e67
IM
580
581 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
582}
583
bf0f6f24
IM
584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
acb4a848 588int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 589 void __user *buffer, size_t *lenp,
b2be5e96
PZ
590 loff_t *ppos)
591{
8d65af78 592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 593 int factor = get_update_sysctl_factor();
b2be5e96
PZ
594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
acb4a848
CE
601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
606#undef WRT_SYSCTL
607
b2be5e96
PZ
608 return 0;
609}
610#endif
647e7cac 611
a7be37ac 612/*
f9c0b095 613 * delta /= w
a7be37ac
PZ
614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
f9c0b095
PZ
618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
620
621 return delta;
622}
623
647e7cac
IM
624/*
625 * The idea is to set a period in which each task runs once.
626 *
532b1858 627 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
4d78e7b6
PZ
632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
b2be5e96 635 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
636
637 if (unlikely(nr_running > nr_latency)) {
4bf0b771 638 period = sysctl_sched_min_granularity;
4d78e7b6 639 period *= nr_running;
4d78e7b6
PZ
640 }
641
642 return period;
643}
644
647e7cac
IM
645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
f9c0b095 649 * s = p*P[w/rw]
647e7cac 650 */
6d0f0ebd 651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 652{
0a582440 653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 654
0a582440 655 for_each_sched_entity(se) {
6272d68c 656 struct load_weight *load;
3104bf03 657 struct load_weight lw;
6272d68c
LM
658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
f9c0b095 661
0a582440 662 if (unlikely(!se->on_rq)) {
3104bf03 663 lw = cfs_rq->load;
0a582440
MG
664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
bf0f6f24
IM
671}
672
647e7cac 673/*
660cc00f 674 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 675 *
f9c0b095 676 * vs = s/w
647e7cac 677 */
f9c0b095 678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 679{
f9c0b095 680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
681}
682
a75cdaa9
AS
683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
bf0f6f24
IM
703/*
704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
8ebc91d9
IM
708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
bf0f6f24 710{
bbdba7c0 711 unsigned long delta_exec_weighted;
bf0f6f24 712
41acab88
LDM
713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
715
716 curr->sum_exec_runtime += delta_exec;
7a62eabc 717 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 719
e9acbff6 720 curr->vruntime += delta_exec_weighted;
1af5f730 721 update_min_vruntime(cfs_rq);
bf0f6f24
IM
722}
723
b7cc0896 724static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 725{
429d43bc 726 struct sched_entity *curr = cfs_rq->curr;
78becc27 727 u64 now = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
8ebc91d9 738 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
739 if (!delta_exec)
740 return;
bf0f6f24 741
8ebc91d9
IM
742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
d842de87
SV
744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
f977bb49 748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 749 cpuacct_charge(curtask, delta_exec);
f06febc9 750 account_group_exec_runtime(curtask, delta_exec);
d842de87 751 }
ec12cb7f
PT
752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
754}
755
756static inline void
5870db5b 757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 758{
78becc27 759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
bf0f6f24
IM
760}
761
bf0f6f24
IM
762/*
763 * Task is being enqueued - update stats:
764 */
d2417e5a 765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 766{
bf0f6f24
IM
767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
429d43bc 771 if (se != cfs_rq->curr)
5870db5b 772 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
773}
774
bf0f6f24 775static void
9ef0a961 776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 777{
41acab88 778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
78becc27 779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
41acab88
LDM
780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
78becc27 782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
78becc27 786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
787 }
788#endif
41acab88 789 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
790}
791
792static inline void
19b6a2e3 793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 794{
bf0f6f24
IM
795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
429d43bc 799 if (se != cfs_rq->curr)
9ef0a961 800 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
79303e9e 807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
808{
809 /*
810 * We are starting a new run period:
811 */
78becc27 812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
813}
814
bf0f6f24
IM
815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
cbee9f88
PZ
819#ifdef CONFIG_NUMA_BALANCING
820/*
6e5fb223 821 * numa task sample period in ms
cbee9f88 822 */
6e5fb223 823unsigned int sysctl_numa_balancing_scan_period_min = 100;
b8593bfd
MG
824unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
6e5fb223
PZ
826
827/* Portion of address space to scan in MB */
828unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 829
4b96a29b
PZ
830/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831unsigned int sysctl_numa_balancing_scan_delay = 1000;
832
cbee9f88
PZ
833static void task_numa_placement(struct task_struct *p)
834{
2832bc19 835 int seq;
cbee9f88 836
2832bc19
HD
837 if (!p->mm) /* for example, ksmd faulting in a user's mm */
838 return;
839 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
840 if (p->numa_scan_seq == seq)
841 return;
842 p->numa_scan_seq = seq;
843
844 /* FIXME: Scheduling placement policy hints go here */
845}
846
847/*
848 * Got a PROT_NONE fault for a page on @node.
849 */
b8593bfd 850void task_numa_fault(int node, int pages, bool migrated)
cbee9f88
PZ
851{
852 struct task_struct *p = current;
853
1a687c2e
MG
854 if (!sched_feat_numa(NUMA))
855 return;
856
cbee9f88
PZ
857 /* FIXME: Allocate task-specific structure for placement policy here */
858
fb003b80 859 /*
b8593bfd
MG
860 * If pages are properly placed (did not migrate) then scan slower.
861 * This is reset periodically in case of phase changes
fb003b80 862 */
b8593bfd
MG
863 if (!migrated)
864 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 p->numa_scan_period + jiffies_to_msecs(10));
fb003b80 866
cbee9f88
PZ
867 task_numa_placement(p);
868}
869
6e5fb223
PZ
870static void reset_ptenuma_scan(struct task_struct *p)
871{
872 ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 p->mm->numa_scan_offset = 0;
874}
875
cbee9f88
PZ
876/*
877 * The expensive part of numa migration is done from task_work context.
878 * Triggered from task_tick_numa().
879 */
880void task_numa_work(struct callback_head *work)
881{
882 unsigned long migrate, next_scan, now = jiffies;
883 struct task_struct *p = current;
884 struct mm_struct *mm = p->mm;
6e5fb223 885 struct vm_area_struct *vma;
9f40604c
MG
886 unsigned long start, end;
887 long pages;
cbee9f88
PZ
888
889 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890
891 work->next = work; /* protect against double add */
892 /*
893 * Who cares about NUMA placement when they're dying.
894 *
895 * NOTE: make sure not to dereference p->mm before this check,
896 * exit_task_work() happens _after_ exit_mm() so we could be called
897 * without p->mm even though we still had it when we enqueued this
898 * work.
899 */
900 if (p->flags & PF_EXITING)
901 return;
902
5bca2303
MG
903 /*
904 * We do not care about task placement until a task runs on a node
905 * other than the first one used by the address space. This is
906 * largely because migrations are driven by what CPU the task
907 * is running on. If it's never scheduled on another node, it'll
908 * not migrate so why bother trapping the fault.
909 */
910 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 mm->first_nid = numa_node_id();
912 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 /* Are we running on a new node yet? */
914 if (numa_node_id() == mm->first_nid &&
915 !sched_feat_numa(NUMA_FORCE))
916 return;
917
918 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 }
920
b8593bfd
MG
921 /*
922 * Reset the scan period if enough time has gone by. Objective is that
923 * scanning will be reduced if pages are properly placed. As tasks
924 * can enter different phases this needs to be re-examined. Lacking
925 * proper tracking of reference behaviour, this blunt hammer is used.
926 */
927 migrate = mm->numa_next_reset;
928 if (time_after(now, migrate)) {
929 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 xchg(&mm->numa_next_reset, next_scan);
932 }
933
cbee9f88
PZ
934 /*
935 * Enforce maximal scan/migration frequency..
936 */
937 migrate = mm->numa_next_scan;
938 if (time_before(now, migrate))
939 return;
940
941 if (p->numa_scan_period == 0)
942 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943
fb003b80 944 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
945 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 return;
947
e14808b4
MG
948 /*
949 * Do not set pte_numa if the current running node is rate-limited.
950 * This loses statistics on the fault but if we are unwilling to
951 * migrate to this node, it is less likely we can do useful work
952 */
953 if (migrate_ratelimited(numa_node_id()))
954 return;
955
9f40604c
MG
956 start = mm->numa_scan_offset;
957 pages = sysctl_numa_balancing_scan_size;
958 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 if (!pages)
960 return;
cbee9f88 961
6e5fb223 962 down_read(&mm->mmap_sem);
9f40604c 963 vma = find_vma(mm, start);
6e5fb223
PZ
964 if (!vma) {
965 reset_ptenuma_scan(p);
9f40604c 966 start = 0;
6e5fb223
PZ
967 vma = mm->mmap;
968 }
9f40604c 969 for (; vma; vma = vma->vm_next) {
6e5fb223
PZ
970 if (!vma_migratable(vma))
971 continue;
972
973 /* Skip small VMAs. They are not likely to be of relevance */
221392c3 974 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
6e5fb223
PZ
975 continue;
976
9f40604c
MG
977 do {
978 start = max(start, vma->vm_start);
979 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 end = min(end, vma->vm_end);
981 pages -= change_prot_numa(vma, start, end);
6e5fb223 982
9f40604c
MG
983 start = end;
984 if (pages <= 0)
985 goto out;
986 } while (end != vma->vm_end);
cbee9f88 987 }
6e5fb223 988
9f40604c 989out:
6e5fb223
PZ
990 /*
991 * It is possible to reach the end of the VMA list but the last few VMAs are
992 * not guaranteed to the vma_migratable. If they are not, we would find the
993 * !migratable VMA on the next scan but not reset the scanner to the start
994 * so check it now.
995 */
996 if (vma)
9f40604c 997 mm->numa_scan_offset = start;
6e5fb223
PZ
998 else
999 reset_ptenuma_scan(p);
1000 up_read(&mm->mmap_sem);
cbee9f88
PZ
1001}
1002
1003/*
1004 * Drive the periodic memory faults..
1005 */
1006void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007{
1008 struct callback_head *work = &curr->numa_work;
1009 u64 period, now;
1010
1011 /*
1012 * We don't care about NUMA placement if we don't have memory.
1013 */
1014 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 return;
1016
1017 /*
1018 * Using runtime rather than walltime has the dual advantage that
1019 * we (mostly) drive the selection from busy threads and that the
1020 * task needs to have done some actual work before we bother with
1021 * NUMA placement.
1022 */
1023 now = curr->se.sum_exec_runtime;
1024 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025
1026 if (now - curr->node_stamp > period) {
4b96a29b
PZ
1027 if (!curr->node_stamp)
1028 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
cbee9f88
PZ
1029 curr->node_stamp = now;
1030
1031 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 task_work_add(curr, work, true);
1034 }
1035 }
1036}
1037#else
1038static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039{
1040}
1041#endif /* CONFIG_NUMA_BALANCING */
1042
30cfdcfc
DA
1043static void
1044account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045{
1046 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 1047 if (!parent_entity(se))
029632fb 1048 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7
PZ
1049#ifdef CONFIG_SMP
1050 if (entity_is_task(se))
eb95308e 1051 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
367456c7 1052#endif
30cfdcfc 1053 cfs_rq->nr_running++;
30cfdcfc
DA
1054}
1055
1056static void
1057account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058{
1059 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 1060 if (!parent_entity(se))
029632fb 1061 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 1062 if (entity_is_task(se))
b87f1724 1063 list_del_init(&se->group_node);
30cfdcfc 1064 cfs_rq->nr_running--;
30cfdcfc
DA
1065}
1066
3ff6dcac
YZ
1067#ifdef CONFIG_FAIR_GROUP_SCHED
1068# ifdef CONFIG_SMP
cf5f0acf
PZ
1069static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070{
1071 long tg_weight;
1072
1073 /*
1074 * Use this CPU's actual weight instead of the last load_contribution
1075 * to gain a more accurate current total weight. See
1076 * update_cfs_rq_load_contribution().
1077 */
bf5b986e 1078 tg_weight = atomic_long_read(&tg->load_avg);
82958366 1079 tg_weight -= cfs_rq->tg_load_contrib;
cf5f0acf
PZ
1080 tg_weight += cfs_rq->load.weight;
1081
1082 return tg_weight;
1083}
1084
6d5ab293 1085static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 1086{
cf5f0acf 1087 long tg_weight, load, shares;
3ff6dcac 1088
cf5f0acf 1089 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 1090 load = cfs_rq->load.weight;
3ff6dcac 1091
3ff6dcac 1092 shares = (tg->shares * load);
cf5f0acf
PZ
1093 if (tg_weight)
1094 shares /= tg_weight;
3ff6dcac
YZ
1095
1096 if (shares < MIN_SHARES)
1097 shares = MIN_SHARES;
1098 if (shares > tg->shares)
1099 shares = tg->shares;
1100
1101 return shares;
1102}
3ff6dcac 1103# else /* CONFIG_SMP */
6d5ab293 1104static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
1105{
1106 return tg->shares;
1107}
3ff6dcac 1108# endif /* CONFIG_SMP */
2069dd75
PZ
1109static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 unsigned long weight)
1111{
19e5eebb
PT
1112 if (se->on_rq) {
1113 /* commit outstanding execution time */
1114 if (cfs_rq->curr == se)
1115 update_curr(cfs_rq);
2069dd75 1116 account_entity_dequeue(cfs_rq, se);
19e5eebb 1117 }
2069dd75
PZ
1118
1119 update_load_set(&se->load, weight);
1120
1121 if (se->on_rq)
1122 account_entity_enqueue(cfs_rq, se);
1123}
1124
82958366
PT
1125static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126
6d5ab293 1127static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1128{
1129 struct task_group *tg;
1130 struct sched_entity *se;
3ff6dcac 1131 long shares;
2069dd75 1132
2069dd75
PZ
1133 tg = cfs_rq->tg;
1134 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 1135 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 1136 return;
3ff6dcac
YZ
1137#ifndef CONFIG_SMP
1138 if (likely(se->load.weight == tg->shares))
1139 return;
1140#endif
6d5ab293 1141 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
1142
1143 reweight_entity(cfs_rq_of(se), se, shares);
1144}
1145#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 1146static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1147{
1148}
1149#endif /* CONFIG_FAIR_GROUP_SCHED */
1150
141965c7 1151#ifdef CONFIG_SMP
5b51f2f8
PT
1152/*
1153 * We choose a half-life close to 1 scheduling period.
1154 * Note: The tables below are dependent on this value.
1155 */
1156#define LOAD_AVG_PERIOD 32
1157#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159
1160/* Precomputed fixed inverse multiplies for multiplication by y^n */
1161static const u32 runnable_avg_yN_inv[] = {
1162 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 0x85aac367, 0x82cd8698,
1168};
1169
1170/*
1171 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1172 * over-estimates when re-combining.
1173 */
1174static const u32 runnable_avg_yN_sum[] = {
1175 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178};
1179
9d85f21c
PT
1180/*
1181 * Approximate:
1182 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1183 */
1184static __always_inline u64 decay_load(u64 val, u64 n)
1185{
5b51f2f8
PT
1186 unsigned int local_n;
1187
1188 if (!n)
1189 return val;
1190 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 return 0;
1192
1193 /* after bounds checking we can collapse to 32-bit */
1194 local_n = n;
1195
1196 /*
1197 * As y^PERIOD = 1/2, we can combine
1198 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 * With a look-up table which covers k^n (n<PERIOD)
1200 *
1201 * To achieve constant time decay_load.
1202 */
1203 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 val >>= local_n / LOAD_AVG_PERIOD;
1205 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
1206 }
1207
5b51f2f8
PT
1208 val *= runnable_avg_yN_inv[local_n];
1209 /* We don't use SRR here since we always want to round down. */
1210 return val >> 32;
1211}
1212
1213/*
1214 * For updates fully spanning n periods, the contribution to runnable
1215 * average will be: \Sum 1024*y^n
1216 *
1217 * We can compute this reasonably efficiently by combining:
1218 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1219 */
1220static u32 __compute_runnable_contrib(u64 n)
1221{
1222 u32 contrib = 0;
1223
1224 if (likely(n <= LOAD_AVG_PERIOD))
1225 return runnable_avg_yN_sum[n];
1226 else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 return LOAD_AVG_MAX;
1228
1229 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 do {
1231 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233
1234 n -= LOAD_AVG_PERIOD;
1235 } while (n > LOAD_AVG_PERIOD);
1236
1237 contrib = decay_load(contrib, n);
1238 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
1239}
1240
1241/*
1242 * We can represent the historical contribution to runnable average as the
1243 * coefficients of a geometric series. To do this we sub-divide our runnable
1244 * history into segments of approximately 1ms (1024us); label the segment that
1245 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246 *
1247 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248 * p0 p1 p2
1249 * (now) (~1ms ago) (~2ms ago)
1250 *
1251 * Let u_i denote the fraction of p_i that the entity was runnable.
1252 *
1253 * We then designate the fractions u_i as our co-efficients, yielding the
1254 * following representation of historical load:
1255 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256 *
1257 * We choose y based on the with of a reasonably scheduling period, fixing:
1258 * y^32 = 0.5
1259 *
1260 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261 * approximately half as much as the contribution to load within the last ms
1262 * (u_0).
1263 *
1264 * When a period "rolls over" and we have new u_0`, multiplying the previous
1265 * sum again by y is sufficient to update:
1266 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268 */
1269static __always_inline int __update_entity_runnable_avg(u64 now,
1270 struct sched_avg *sa,
1271 int runnable)
1272{
5b51f2f8
PT
1273 u64 delta, periods;
1274 u32 runnable_contrib;
9d85f21c
PT
1275 int delta_w, decayed = 0;
1276
1277 delta = now - sa->last_runnable_update;
1278 /*
1279 * This should only happen when time goes backwards, which it
1280 * unfortunately does during sched clock init when we swap over to TSC.
1281 */
1282 if ((s64)delta < 0) {
1283 sa->last_runnable_update = now;
1284 return 0;
1285 }
1286
1287 /*
1288 * Use 1024ns as the unit of measurement since it's a reasonable
1289 * approximation of 1us and fast to compute.
1290 */
1291 delta >>= 10;
1292 if (!delta)
1293 return 0;
1294 sa->last_runnable_update = now;
1295
1296 /* delta_w is the amount already accumulated against our next period */
1297 delta_w = sa->runnable_avg_period % 1024;
1298 if (delta + delta_w >= 1024) {
1299 /* period roll-over */
1300 decayed = 1;
1301
1302 /*
1303 * Now that we know we're crossing a period boundary, figure
1304 * out how much from delta we need to complete the current
1305 * period and accrue it.
1306 */
1307 delta_w = 1024 - delta_w;
5b51f2f8
PT
1308 if (runnable)
1309 sa->runnable_avg_sum += delta_w;
1310 sa->runnable_avg_period += delta_w;
1311
1312 delta -= delta_w;
1313
1314 /* Figure out how many additional periods this update spans */
1315 periods = delta / 1024;
1316 delta %= 1024;
1317
1318 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 periods + 1);
1320 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 periods + 1);
1322
1323 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 runnable_contrib = __compute_runnable_contrib(periods);
1325 if (runnable)
1326 sa->runnable_avg_sum += runnable_contrib;
1327 sa->runnable_avg_period += runnable_contrib;
9d85f21c
PT
1328 }
1329
1330 /* Remainder of delta accrued against u_0` */
1331 if (runnable)
1332 sa->runnable_avg_sum += delta;
1333 sa->runnable_avg_period += delta;
1334
1335 return decayed;
1336}
1337
9ee474f5 1338/* Synchronize an entity's decay with its parenting cfs_rq.*/
aff3e498 1339static inline u64 __synchronize_entity_decay(struct sched_entity *se)
9ee474f5
PT
1340{
1341 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343
1344 decays -= se->avg.decay_count;
1345 if (!decays)
aff3e498 1346 return 0;
9ee474f5
PT
1347
1348 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 se->avg.decay_count = 0;
aff3e498
PT
1350
1351 return decays;
9ee474f5
PT
1352}
1353
c566e8e9
PT
1354#ifdef CONFIG_FAIR_GROUP_SCHED
1355static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 int force_update)
1357{
1358 struct task_group *tg = cfs_rq->tg;
bf5b986e 1359 long tg_contrib;
c566e8e9
PT
1360
1361 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 tg_contrib -= cfs_rq->tg_load_contrib;
1363
bf5b986e
AS
1364 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 atomic_long_add(tg_contrib, &tg->load_avg);
c566e8e9
PT
1366 cfs_rq->tg_load_contrib += tg_contrib;
1367 }
1368}
8165e145 1369
bb17f655
PT
1370/*
1371 * Aggregate cfs_rq runnable averages into an equivalent task_group
1372 * representation for computing load contributions.
1373 */
1374static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 struct cfs_rq *cfs_rq)
1376{
1377 struct task_group *tg = cfs_rq->tg;
1378 long contrib;
1379
1380 /* The fraction of a cpu used by this cfs_rq */
1381 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 sa->runnable_avg_period + 1);
1383 contrib -= cfs_rq->tg_runnable_contrib;
1384
1385 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 atomic_add(contrib, &tg->runnable_avg);
1387 cfs_rq->tg_runnable_contrib += contrib;
1388 }
1389}
1390
8165e145
PT
1391static inline void __update_group_entity_contrib(struct sched_entity *se)
1392{
1393 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 struct task_group *tg = cfs_rq->tg;
bb17f655
PT
1395 int runnable_avg;
1396
8165e145
PT
1397 u64 contrib;
1398
1399 contrib = cfs_rq->tg_load_contrib * tg->shares;
bf5b986e
AS
1400 se->avg.load_avg_contrib = div_u64(contrib,
1401 atomic_long_read(&tg->load_avg) + 1);
bb17f655
PT
1402
1403 /*
1404 * For group entities we need to compute a correction term in the case
1405 * that they are consuming <1 cpu so that we would contribute the same
1406 * load as a task of equal weight.
1407 *
1408 * Explicitly co-ordinating this measurement would be expensive, but
1409 * fortunately the sum of each cpus contribution forms a usable
1410 * lower-bound on the true value.
1411 *
1412 * Consider the aggregate of 2 contributions. Either they are disjoint
1413 * (and the sum represents true value) or they are disjoint and we are
1414 * understating by the aggregate of their overlap.
1415 *
1416 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 * cpus that overlap for this interval and w_i is the interval width.
1419 *
1420 * On a small machine; the first term is well-bounded which bounds the
1421 * total error since w_i is a subset of the period. Whereas on a
1422 * larger machine, while this first term can be larger, if w_i is the
1423 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 * our upper bound of 1-cpu.
1425 */
1426 runnable_avg = atomic_read(&tg->runnable_avg);
1427 if (runnable_avg < NICE_0_LOAD) {
1428 se->avg.load_avg_contrib *= runnable_avg;
1429 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 }
8165e145 1431}
c566e8e9
PT
1432#else
1433static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 int force_update) {}
bb17f655
PT
1435static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 struct cfs_rq *cfs_rq) {}
8165e145 1437static inline void __update_group_entity_contrib(struct sched_entity *se) {}
c566e8e9
PT
1438#endif
1439
8165e145
PT
1440static inline void __update_task_entity_contrib(struct sched_entity *se)
1441{
1442 u32 contrib;
1443
1444 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 contrib /= (se->avg.runnable_avg_period + 1);
1447 se->avg.load_avg_contrib = scale_load(contrib);
1448}
1449
2dac754e
PT
1450/* Compute the current contribution to load_avg by se, return any delta */
1451static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452{
1453 long old_contrib = se->avg.load_avg_contrib;
1454
8165e145
PT
1455 if (entity_is_task(se)) {
1456 __update_task_entity_contrib(se);
1457 } else {
bb17f655 1458 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
8165e145
PT
1459 __update_group_entity_contrib(se);
1460 }
2dac754e
PT
1461
1462 return se->avg.load_avg_contrib - old_contrib;
1463}
1464
9ee474f5
PT
1465static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 long load_contrib)
1467{
1468 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 cfs_rq->blocked_load_avg -= load_contrib;
1470 else
1471 cfs_rq->blocked_load_avg = 0;
1472}
1473
f1b17280
PT
1474static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475
9d85f21c 1476/* Update a sched_entity's runnable average */
9ee474f5
PT
1477static inline void update_entity_load_avg(struct sched_entity *se,
1478 int update_cfs_rq)
9d85f21c 1479{
2dac754e
PT
1480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 long contrib_delta;
f1b17280 1482 u64 now;
2dac754e 1483
f1b17280
PT
1484 /*
1485 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 * case they are the parent of a throttled hierarchy.
1487 */
1488 if (entity_is_task(se))
1489 now = cfs_rq_clock_task(cfs_rq);
1490 else
1491 now = cfs_rq_clock_task(group_cfs_rq(se));
1492
1493 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2dac754e
PT
1494 return;
1495
1496 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
1497
1498 if (!update_cfs_rq)
1499 return;
1500
2dac754e
PT
1501 if (se->on_rq)
1502 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
1503 else
1504 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505}
1506
1507/*
1508 * Decay the load contributed by all blocked children and account this so that
1509 * their contribution may appropriately discounted when they wake up.
1510 */
aff3e498 1511static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
9ee474f5 1512{
f1b17280 1513 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
9ee474f5
PT
1514 u64 decays;
1515
1516 decays = now - cfs_rq->last_decay;
aff3e498 1517 if (!decays && !force_update)
9ee474f5
PT
1518 return;
1519
2509940f
AS
1520 if (atomic_long_read(&cfs_rq->removed_load)) {
1521 unsigned long removed_load;
1522 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
aff3e498
PT
1523 subtract_blocked_load_contrib(cfs_rq, removed_load);
1524 }
9ee474f5 1525
aff3e498
PT
1526 if (decays) {
1527 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1528 decays);
1529 atomic64_add(decays, &cfs_rq->decay_counter);
1530 cfs_rq->last_decay = now;
1531 }
c566e8e9
PT
1532
1533 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
9d85f21c 1534}
18bf2805
BS
1535
1536static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1537{
78becc27 1538 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
bb17f655 1539 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
18bf2805 1540}
2dac754e
PT
1541
1542/* Add the load generated by se into cfs_rq's child load-average */
1543static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1544 struct sched_entity *se,
1545 int wakeup)
2dac754e 1546{
aff3e498
PT
1547 /*
1548 * We track migrations using entity decay_count <= 0, on a wake-up
1549 * migration we use a negative decay count to track the remote decays
1550 * accumulated while sleeping.
a75cdaa9
AS
1551 *
1552 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1553 * are seen by enqueue_entity_load_avg() as a migration with an already
1554 * constructed load_avg_contrib.
aff3e498
PT
1555 */
1556 if (unlikely(se->avg.decay_count <= 0)) {
78becc27 1557 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
aff3e498
PT
1558 if (se->avg.decay_count) {
1559 /*
1560 * In a wake-up migration we have to approximate the
1561 * time sleeping. This is because we can't synchronize
1562 * clock_task between the two cpus, and it is not
1563 * guaranteed to be read-safe. Instead, we can
1564 * approximate this using our carried decays, which are
1565 * explicitly atomically readable.
1566 */
1567 se->avg.last_runnable_update -= (-se->avg.decay_count)
1568 << 20;
1569 update_entity_load_avg(se, 0);
1570 /* Indicate that we're now synchronized and on-rq */
1571 se->avg.decay_count = 0;
1572 }
9ee474f5
PT
1573 wakeup = 0;
1574 } else {
282cf499
AS
1575 /*
1576 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1577 * would have made count negative); we must be careful to avoid
1578 * double-accounting blocked time after synchronizing decays.
1579 */
1580 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1581 << 20;
9ee474f5
PT
1582 }
1583
aff3e498
PT
1584 /* migrated tasks did not contribute to our blocked load */
1585 if (wakeup) {
9ee474f5 1586 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
aff3e498
PT
1587 update_entity_load_avg(se, 0);
1588 }
9ee474f5 1589
2dac754e 1590 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
1591 /* we force update consideration on load-balancer moves */
1592 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2dac754e
PT
1593}
1594
9ee474f5
PT
1595/*
1596 * Remove se's load from this cfs_rq child load-average, if the entity is
1597 * transitioning to a blocked state we track its projected decay using
1598 * blocked_load_avg.
1599 */
2dac754e 1600static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1601 struct sched_entity *se,
1602 int sleep)
2dac754e 1603{
9ee474f5 1604 update_entity_load_avg(se, 1);
aff3e498
PT
1605 /* we force update consideration on load-balancer moves */
1606 update_cfs_rq_blocked_load(cfs_rq, !sleep);
9ee474f5 1607
2dac754e 1608 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
1609 if (sleep) {
1610 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1611 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1612 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 1613}
642dbc39
VG
1614
1615/*
1616 * Update the rq's load with the elapsed running time before entering
1617 * idle. if the last scheduled task is not a CFS task, idle_enter will
1618 * be the only way to update the runnable statistic.
1619 */
1620void idle_enter_fair(struct rq *this_rq)
1621{
1622 update_rq_runnable_avg(this_rq, 1);
1623}
1624
1625/*
1626 * Update the rq's load with the elapsed idle time before a task is
1627 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1628 * be the only way to update the runnable statistic.
1629 */
1630void idle_exit_fair(struct rq *this_rq)
1631{
1632 update_rq_runnable_avg(this_rq, 0);
1633}
1634
9d85f21c 1635#else
9ee474f5
PT
1636static inline void update_entity_load_avg(struct sched_entity *se,
1637 int update_cfs_rq) {}
18bf2805 1638static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 1639static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1640 struct sched_entity *se,
1641 int wakeup) {}
2dac754e 1642static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1643 struct sched_entity *se,
1644 int sleep) {}
aff3e498
PT
1645static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1646 int force_update) {}
9d85f21c
PT
1647#endif
1648
2396af69 1649static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1650{
bf0f6f24 1651#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
1652 struct task_struct *tsk = NULL;
1653
1654 if (entity_is_task(se))
1655 tsk = task_of(se);
1656
41acab88 1657 if (se->statistics.sleep_start) {
78becc27 1658 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
bf0f6f24
IM
1659
1660 if ((s64)delta < 0)
1661 delta = 0;
1662
41acab88
LDM
1663 if (unlikely(delta > se->statistics.sleep_max))
1664 se->statistics.sleep_max = delta;
bf0f6f24 1665
8c79a045 1666 se->statistics.sleep_start = 0;
41acab88 1667 se->statistics.sum_sleep_runtime += delta;
9745512c 1668
768d0c27 1669 if (tsk) {
e414314c 1670 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
1671 trace_sched_stat_sleep(tsk, delta);
1672 }
bf0f6f24 1673 }
41acab88 1674 if (se->statistics.block_start) {
78becc27 1675 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
bf0f6f24
IM
1676
1677 if ((s64)delta < 0)
1678 delta = 0;
1679
41acab88
LDM
1680 if (unlikely(delta > se->statistics.block_max))
1681 se->statistics.block_max = delta;
bf0f6f24 1682
8c79a045 1683 se->statistics.block_start = 0;
41acab88 1684 se->statistics.sum_sleep_runtime += delta;
30084fbd 1685
e414314c 1686 if (tsk) {
8f0dfc34 1687 if (tsk->in_iowait) {
41acab88
LDM
1688 se->statistics.iowait_sum += delta;
1689 se->statistics.iowait_count++;
768d0c27 1690 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
1691 }
1692
b781a602
AV
1693 trace_sched_stat_blocked(tsk, delta);
1694
e414314c
PZ
1695 /*
1696 * Blocking time is in units of nanosecs, so shift by
1697 * 20 to get a milliseconds-range estimation of the
1698 * amount of time that the task spent sleeping:
1699 */
1700 if (unlikely(prof_on == SLEEP_PROFILING)) {
1701 profile_hits(SLEEP_PROFILING,
1702 (void *)get_wchan(tsk),
1703 delta >> 20);
1704 }
1705 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 1706 }
bf0f6f24
IM
1707 }
1708#endif
1709}
1710
ddc97297
PZ
1711static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712{
1713#ifdef CONFIG_SCHED_DEBUG
1714 s64 d = se->vruntime - cfs_rq->min_vruntime;
1715
1716 if (d < 0)
1717 d = -d;
1718
1719 if (d > 3*sysctl_sched_latency)
1720 schedstat_inc(cfs_rq, nr_spread_over);
1721#endif
1722}
1723
aeb73b04
PZ
1724static void
1725place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1726{
1af5f730 1727 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 1728
2cb8600e
PZ
1729 /*
1730 * The 'current' period is already promised to the current tasks,
1731 * however the extra weight of the new task will slow them down a
1732 * little, place the new task so that it fits in the slot that
1733 * stays open at the end.
1734 */
94dfb5e7 1735 if (initial && sched_feat(START_DEBIT))
f9c0b095 1736 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 1737
a2e7a7eb 1738 /* sleeps up to a single latency don't count. */
5ca9880c 1739 if (!initial) {
a2e7a7eb 1740 unsigned long thresh = sysctl_sched_latency;
a7be37ac 1741
a2e7a7eb
MG
1742 /*
1743 * Halve their sleep time's effect, to allow
1744 * for a gentler effect of sleepers:
1745 */
1746 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1747 thresh >>= 1;
51e0304c 1748
a2e7a7eb 1749 vruntime -= thresh;
aeb73b04
PZ
1750 }
1751
b5d9d734 1752 /* ensure we never gain time by being placed backwards. */
16c8f1c7 1753 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
1754}
1755
d3d9dc33
PT
1756static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1757
bf0f6f24 1758static void
88ec22d3 1759enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1760{
88ec22d3
PZ
1761 /*
1762 * Update the normalized vruntime before updating min_vruntime
0fc576d5 1763 * through calling update_curr().
88ec22d3 1764 */
371fd7e7 1765 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
1766 se->vruntime += cfs_rq->min_vruntime;
1767
bf0f6f24 1768 /*
a2a2d680 1769 * Update run-time statistics of the 'current'.
bf0f6f24 1770 */
b7cc0896 1771 update_curr(cfs_rq);
f269ae04 1772 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
17bc14b7
LT
1773 account_entity_enqueue(cfs_rq, se);
1774 update_cfs_shares(cfs_rq);
bf0f6f24 1775
88ec22d3 1776 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 1777 place_entity(cfs_rq, se, 0);
2396af69 1778 enqueue_sleeper(cfs_rq, se);
e9acbff6 1779 }
bf0f6f24 1780
d2417e5a 1781 update_stats_enqueue(cfs_rq, se);
ddc97297 1782 check_spread(cfs_rq, se);
83b699ed
SV
1783 if (se != cfs_rq->curr)
1784 __enqueue_entity(cfs_rq, se);
2069dd75 1785 se->on_rq = 1;
3d4b47b4 1786
d3d9dc33 1787 if (cfs_rq->nr_running == 1) {
3d4b47b4 1788 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
1789 check_enqueue_throttle(cfs_rq);
1790 }
bf0f6f24
IM
1791}
1792
2c13c919 1793static void __clear_buddies_last(struct sched_entity *se)
2002c695 1794{
2c13c919
RR
1795 for_each_sched_entity(se) {
1796 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1797 if (cfs_rq->last == se)
1798 cfs_rq->last = NULL;
1799 else
1800 break;
1801 }
1802}
2002c695 1803
2c13c919
RR
1804static void __clear_buddies_next(struct sched_entity *se)
1805{
1806 for_each_sched_entity(se) {
1807 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1808 if (cfs_rq->next == se)
1809 cfs_rq->next = NULL;
1810 else
1811 break;
1812 }
2002c695
PZ
1813}
1814
ac53db59
RR
1815static void __clear_buddies_skip(struct sched_entity *se)
1816{
1817 for_each_sched_entity(se) {
1818 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1819 if (cfs_rq->skip == se)
1820 cfs_rq->skip = NULL;
1821 else
1822 break;
1823 }
1824}
1825
a571bbea
PZ
1826static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1827{
2c13c919
RR
1828 if (cfs_rq->last == se)
1829 __clear_buddies_last(se);
1830
1831 if (cfs_rq->next == se)
1832 __clear_buddies_next(se);
ac53db59
RR
1833
1834 if (cfs_rq->skip == se)
1835 __clear_buddies_skip(se);
a571bbea
PZ
1836}
1837
6c16a6dc 1838static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 1839
bf0f6f24 1840static void
371fd7e7 1841dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1842{
a2a2d680
DA
1843 /*
1844 * Update run-time statistics of the 'current'.
1845 */
1846 update_curr(cfs_rq);
17bc14b7 1847 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 1848
19b6a2e3 1849 update_stats_dequeue(cfs_rq, se);
371fd7e7 1850 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 1851#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
1852 if (entity_is_task(se)) {
1853 struct task_struct *tsk = task_of(se);
1854
1855 if (tsk->state & TASK_INTERRUPTIBLE)
78becc27 1856 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 1857 if (tsk->state & TASK_UNINTERRUPTIBLE)
78becc27 1858 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 1859 }
db36cc7d 1860#endif
67e9fb2a
PZ
1861 }
1862
2002c695 1863 clear_buddies(cfs_rq, se);
4793241b 1864
83b699ed 1865 if (se != cfs_rq->curr)
30cfdcfc 1866 __dequeue_entity(cfs_rq, se);
17bc14b7 1867 se->on_rq = 0;
30cfdcfc 1868 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
1869
1870 /*
1871 * Normalize the entity after updating the min_vruntime because the
1872 * update can refer to the ->curr item and we need to reflect this
1873 * movement in our normalized position.
1874 */
371fd7e7 1875 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 1876 se->vruntime -= cfs_rq->min_vruntime;
1e876231 1877
d8b4986d
PT
1878 /* return excess runtime on last dequeue */
1879 return_cfs_rq_runtime(cfs_rq);
1880
1e876231 1881 update_min_vruntime(cfs_rq);
17bc14b7 1882 update_cfs_shares(cfs_rq);
bf0f6f24
IM
1883}
1884
1885/*
1886 * Preempt the current task with a newly woken task if needed:
1887 */
7c92e54f 1888static void
2e09bf55 1889check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 1890{
11697830 1891 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
1892 struct sched_entity *se;
1893 s64 delta;
11697830 1894
6d0f0ebd 1895 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 1896 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 1897 if (delta_exec > ideal_runtime) {
bf0f6f24 1898 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
1899 /*
1900 * The current task ran long enough, ensure it doesn't get
1901 * re-elected due to buddy favours.
1902 */
1903 clear_buddies(cfs_rq, curr);
f685ceac
MG
1904 return;
1905 }
1906
1907 /*
1908 * Ensure that a task that missed wakeup preemption by a
1909 * narrow margin doesn't have to wait for a full slice.
1910 * This also mitigates buddy induced latencies under load.
1911 */
f685ceac
MG
1912 if (delta_exec < sysctl_sched_min_granularity)
1913 return;
1914
f4cfb33e
WX
1915 se = __pick_first_entity(cfs_rq);
1916 delta = curr->vruntime - se->vruntime;
f685ceac 1917
f4cfb33e
WX
1918 if (delta < 0)
1919 return;
d7d82944 1920
f4cfb33e
WX
1921 if (delta > ideal_runtime)
1922 resched_task(rq_of(cfs_rq)->curr);
bf0f6f24
IM
1923}
1924
83b699ed 1925static void
8494f412 1926set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1927{
83b699ed
SV
1928 /* 'current' is not kept within the tree. */
1929 if (se->on_rq) {
1930 /*
1931 * Any task has to be enqueued before it get to execute on
1932 * a CPU. So account for the time it spent waiting on the
1933 * runqueue.
1934 */
1935 update_stats_wait_end(cfs_rq, se);
1936 __dequeue_entity(cfs_rq, se);
1937 }
1938
79303e9e 1939 update_stats_curr_start(cfs_rq, se);
429d43bc 1940 cfs_rq->curr = se;
eba1ed4b
IM
1941#ifdef CONFIG_SCHEDSTATS
1942 /*
1943 * Track our maximum slice length, if the CPU's load is at
1944 * least twice that of our own weight (i.e. dont track it
1945 * when there are only lesser-weight tasks around):
1946 */
495eca49 1947 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 1948 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
1949 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1950 }
1951#endif
4a55b450 1952 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
1953}
1954
3f3a4904
PZ
1955static int
1956wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1957
ac53db59
RR
1958/*
1959 * Pick the next process, keeping these things in mind, in this order:
1960 * 1) keep things fair between processes/task groups
1961 * 2) pick the "next" process, since someone really wants that to run
1962 * 3) pick the "last" process, for cache locality
1963 * 4) do not run the "skip" process, if something else is available
1964 */
f4b6755f 1965static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 1966{
ac53db59 1967 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 1968 struct sched_entity *left = se;
f4b6755f 1969
ac53db59
RR
1970 /*
1971 * Avoid running the skip buddy, if running something else can
1972 * be done without getting too unfair.
1973 */
1974 if (cfs_rq->skip == se) {
1975 struct sched_entity *second = __pick_next_entity(se);
1976 if (second && wakeup_preempt_entity(second, left) < 1)
1977 se = second;
1978 }
aa2ac252 1979
f685ceac
MG
1980 /*
1981 * Prefer last buddy, try to return the CPU to a preempted task.
1982 */
1983 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1984 se = cfs_rq->last;
1985
ac53db59
RR
1986 /*
1987 * Someone really wants this to run. If it's not unfair, run it.
1988 */
1989 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1990 se = cfs_rq->next;
1991
f685ceac 1992 clear_buddies(cfs_rq, se);
4793241b
PZ
1993
1994 return se;
aa2ac252
PZ
1995}
1996
d3d9dc33
PT
1997static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1998
ab6cde26 1999static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
2000{
2001 /*
2002 * If still on the runqueue then deactivate_task()
2003 * was not called and update_curr() has to be done:
2004 */
2005 if (prev->on_rq)
b7cc0896 2006 update_curr(cfs_rq);
bf0f6f24 2007
d3d9dc33
PT
2008 /* throttle cfs_rqs exceeding runtime */
2009 check_cfs_rq_runtime(cfs_rq);
2010
ddc97297 2011 check_spread(cfs_rq, prev);
30cfdcfc 2012 if (prev->on_rq) {
5870db5b 2013 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
2014 /* Put 'current' back into the tree. */
2015 __enqueue_entity(cfs_rq, prev);
9d85f21c 2016 /* in !on_rq case, update occurred at dequeue */
9ee474f5 2017 update_entity_load_avg(prev, 1);
30cfdcfc 2018 }
429d43bc 2019 cfs_rq->curr = NULL;
bf0f6f24
IM
2020}
2021
8f4d37ec
PZ
2022static void
2023entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 2024{
bf0f6f24 2025 /*
30cfdcfc 2026 * Update run-time statistics of the 'current'.
bf0f6f24 2027 */
30cfdcfc 2028 update_curr(cfs_rq);
bf0f6f24 2029
9d85f21c
PT
2030 /*
2031 * Ensure that runnable average is periodically updated.
2032 */
9ee474f5 2033 update_entity_load_avg(curr, 1);
aff3e498 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
9d85f21c 2035
8f4d37ec
PZ
2036#ifdef CONFIG_SCHED_HRTICK
2037 /*
2038 * queued ticks are scheduled to match the slice, so don't bother
2039 * validating it and just reschedule.
2040 */
983ed7a6
HH
2041 if (queued) {
2042 resched_task(rq_of(cfs_rq)->curr);
2043 return;
2044 }
8f4d37ec
PZ
2045 /*
2046 * don't let the period tick interfere with the hrtick preemption
2047 */
2048 if (!sched_feat(DOUBLE_TICK) &&
2049 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2050 return;
2051#endif
2052
2c2efaed 2053 if (cfs_rq->nr_running > 1)
2e09bf55 2054 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
2055}
2056
ab84d31e
PT
2057
2058/**************************************************
2059 * CFS bandwidth control machinery
2060 */
2061
2062#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
2063
2064#ifdef HAVE_JUMP_LABEL
c5905afb 2065static struct static_key __cfs_bandwidth_used;
029632fb
PZ
2066
2067static inline bool cfs_bandwidth_used(void)
2068{
c5905afb 2069 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
2070}
2071
2072void account_cfs_bandwidth_used(int enabled, int was_enabled)
2073{
2074 /* only need to count groups transitioning between enabled/!enabled */
2075 if (enabled && !was_enabled)
c5905afb 2076 static_key_slow_inc(&__cfs_bandwidth_used);
029632fb 2077 else if (!enabled && was_enabled)
c5905afb 2078 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
2079}
2080#else /* HAVE_JUMP_LABEL */
2081static bool cfs_bandwidth_used(void)
2082{
2083 return true;
2084}
2085
2086void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2087#endif /* HAVE_JUMP_LABEL */
2088
ab84d31e
PT
2089/*
2090 * default period for cfs group bandwidth.
2091 * default: 0.1s, units: nanoseconds
2092 */
2093static inline u64 default_cfs_period(void)
2094{
2095 return 100000000ULL;
2096}
ec12cb7f
PT
2097
2098static inline u64 sched_cfs_bandwidth_slice(void)
2099{
2100 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2101}
2102
a9cf55b2
PT
2103/*
2104 * Replenish runtime according to assigned quota and update expiration time.
2105 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2106 * additional synchronization around rq->lock.
2107 *
2108 * requires cfs_b->lock
2109 */
029632fb 2110void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
2111{
2112 u64 now;
2113
2114 if (cfs_b->quota == RUNTIME_INF)
2115 return;
2116
2117 now = sched_clock_cpu(smp_processor_id());
2118 cfs_b->runtime = cfs_b->quota;
2119 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2120}
2121
029632fb
PZ
2122static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2123{
2124 return &tg->cfs_bandwidth;
2125}
2126
f1b17280
PT
2127/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2128static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2129{
2130 if (unlikely(cfs_rq->throttle_count))
2131 return cfs_rq->throttled_clock_task;
2132
78becc27 2133 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
2134}
2135
85dac906
PT
2136/* returns 0 on failure to allocate runtime */
2137static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
2138{
2139 struct task_group *tg = cfs_rq->tg;
2140 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 2141 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
2142
2143 /* note: this is a positive sum as runtime_remaining <= 0 */
2144 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2145
2146 raw_spin_lock(&cfs_b->lock);
2147 if (cfs_b->quota == RUNTIME_INF)
2148 amount = min_amount;
58088ad0 2149 else {
a9cf55b2
PT
2150 /*
2151 * If the bandwidth pool has become inactive, then at least one
2152 * period must have elapsed since the last consumption.
2153 * Refresh the global state and ensure bandwidth timer becomes
2154 * active.
2155 */
2156 if (!cfs_b->timer_active) {
2157 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 2158 __start_cfs_bandwidth(cfs_b);
a9cf55b2 2159 }
58088ad0
PT
2160
2161 if (cfs_b->runtime > 0) {
2162 amount = min(cfs_b->runtime, min_amount);
2163 cfs_b->runtime -= amount;
2164 cfs_b->idle = 0;
2165 }
ec12cb7f 2166 }
a9cf55b2 2167 expires = cfs_b->runtime_expires;
ec12cb7f
PT
2168 raw_spin_unlock(&cfs_b->lock);
2169
2170 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
2171 /*
2172 * we may have advanced our local expiration to account for allowed
2173 * spread between our sched_clock and the one on which runtime was
2174 * issued.
2175 */
2176 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2177 cfs_rq->runtime_expires = expires;
85dac906
PT
2178
2179 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
2180}
2181
a9cf55b2
PT
2182/*
2183 * Note: This depends on the synchronization provided by sched_clock and the
2184 * fact that rq->clock snapshots this value.
2185 */
2186static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 2187{
a9cf55b2 2188 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
2189
2190 /* if the deadline is ahead of our clock, nothing to do */
78becc27 2191 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
2192 return;
2193
a9cf55b2
PT
2194 if (cfs_rq->runtime_remaining < 0)
2195 return;
2196
2197 /*
2198 * If the local deadline has passed we have to consider the
2199 * possibility that our sched_clock is 'fast' and the global deadline
2200 * has not truly expired.
2201 *
2202 * Fortunately we can check determine whether this the case by checking
2203 * whether the global deadline has advanced.
2204 */
2205
2206 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2207 /* extend local deadline, drift is bounded above by 2 ticks */
2208 cfs_rq->runtime_expires += TICK_NSEC;
2209 } else {
2210 /* global deadline is ahead, expiration has passed */
2211 cfs_rq->runtime_remaining = 0;
2212 }
2213}
2214
2215static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2216 unsigned long delta_exec)
2217{
2218 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 2219 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
2220 expire_cfs_rq_runtime(cfs_rq);
2221
2222 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
2223 return;
2224
85dac906
PT
2225 /*
2226 * if we're unable to extend our runtime we resched so that the active
2227 * hierarchy can be throttled
2228 */
2229 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2230 resched_task(rq_of(cfs_rq)->curr);
ec12cb7f
PT
2231}
2232
6c16a6dc
PZ
2233static __always_inline
2234void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
ec12cb7f 2235{
56f570e5 2236 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
2237 return;
2238
2239 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2240}
2241
85dac906
PT
2242static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2243{
56f570e5 2244 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
2245}
2246
64660c86
PT
2247/* check whether cfs_rq, or any parent, is throttled */
2248static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2249{
56f570e5 2250 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
2251}
2252
2253/*
2254 * Ensure that neither of the group entities corresponding to src_cpu or
2255 * dest_cpu are members of a throttled hierarchy when performing group
2256 * load-balance operations.
2257 */
2258static inline int throttled_lb_pair(struct task_group *tg,
2259 int src_cpu, int dest_cpu)
2260{
2261 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2262
2263 src_cfs_rq = tg->cfs_rq[src_cpu];
2264 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2265
2266 return throttled_hierarchy(src_cfs_rq) ||
2267 throttled_hierarchy(dest_cfs_rq);
2268}
2269
2270/* updated child weight may affect parent so we have to do this bottom up */
2271static int tg_unthrottle_up(struct task_group *tg, void *data)
2272{
2273 struct rq *rq = data;
2274 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2275
2276 cfs_rq->throttle_count--;
2277#ifdef CONFIG_SMP
2278 if (!cfs_rq->throttle_count) {
f1b17280 2279 /* adjust cfs_rq_clock_task() */
78becc27 2280 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 2281 cfs_rq->throttled_clock_task;
64660c86
PT
2282 }
2283#endif
2284
2285 return 0;
2286}
2287
2288static int tg_throttle_down(struct task_group *tg, void *data)
2289{
2290 struct rq *rq = data;
2291 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2292
82958366
PT
2293 /* group is entering throttled state, stop time */
2294 if (!cfs_rq->throttle_count)
78becc27 2295 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
2296 cfs_rq->throttle_count++;
2297
2298 return 0;
2299}
2300
d3d9dc33 2301static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
2302{
2303 struct rq *rq = rq_of(cfs_rq);
2304 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2305 struct sched_entity *se;
2306 long task_delta, dequeue = 1;
2307
2308 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2309
f1b17280 2310 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
2311 rcu_read_lock();
2312 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2313 rcu_read_unlock();
85dac906
PT
2314
2315 task_delta = cfs_rq->h_nr_running;
2316 for_each_sched_entity(se) {
2317 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2318 /* throttled entity or throttle-on-deactivate */
2319 if (!se->on_rq)
2320 break;
2321
2322 if (dequeue)
2323 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2324 qcfs_rq->h_nr_running -= task_delta;
2325
2326 if (qcfs_rq->load.weight)
2327 dequeue = 0;
2328 }
2329
2330 if (!se)
2331 rq->nr_running -= task_delta;
2332
2333 cfs_rq->throttled = 1;
78becc27 2334 cfs_rq->throttled_clock = rq_clock(rq);
85dac906
PT
2335 raw_spin_lock(&cfs_b->lock);
2336 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2337 raw_spin_unlock(&cfs_b->lock);
2338}
2339
029632fb 2340void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
2341{
2342 struct rq *rq = rq_of(cfs_rq);
2343 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2344 struct sched_entity *se;
2345 int enqueue = 1;
2346 long task_delta;
2347
22b958d8 2348 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
2349
2350 cfs_rq->throttled = 0;
1a55af2e
FW
2351
2352 update_rq_clock(rq);
2353
671fd9da 2354 raw_spin_lock(&cfs_b->lock);
78becc27 2355 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
2356 list_del_rcu(&cfs_rq->throttled_list);
2357 raw_spin_unlock(&cfs_b->lock);
2358
64660c86
PT
2359 /* update hierarchical throttle state */
2360 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2361
671fd9da
PT
2362 if (!cfs_rq->load.weight)
2363 return;
2364
2365 task_delta = cfs_rq->h_nr_running;
2366 for_each_sched_entity(se) {
2367 if (se->on_rq)
2368 enqueue = 0;
2369
2370 cfs_rq = cfs_rq_of(se);
2371 if (enqueue)
2372 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2373 cfs_rq->h_nr_running += task_delta;
2374
2375 if (cfs_rq_throttled(cfs_rq))
2376 break;
2377 }
2378
2379 if (!se)
2380 rq->nr_running += task_delta;
2381
2382 /* determine whether we need to wake up potentially idle cpu */
2383 if (rq->curr == rq->idle && rq->cfs.nr_running)
2384 resched_task(rq->curr);
2385}
2386
2387static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2388 u64 remaining, u64 expires)
2389{
2390 struct cfs_rq *cfs_rq;
2391 u64 runtime = remaining;
2392
2393 rcu_read_lock();
2394 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2395 throttled_list) {
2396 struct rq *rq = rq_of(cfs_rq);
2397
2398 raw_spin_lock(&rq->lock);
2399 if (!cfs_rq_throttled(cfs_rq))
2400 goto next;
2401
2402 runtime = -cfs_rq->runtime_remaining + 1;
2403 if (runtime > remaining)
2404 runtime = remaining;
2405 remaining -= runtime;
2406
2407 cfs_rq->runtime_remaining += runtime;
2408 cfs_rq->runtime_expires = expires;
2409
2410 /* we check whether we're throttled above */
2411 if (cfs_rq->runtime_remaining > 0)
2412 unthrottle_cfs_rq(cfs_rq);
2413
2414next:
2415 raw_spin_unlock(&rq->lock);
2416
2417 if (!remaining)
2418 break;
2419 }
2420 rcu_read_unlock();
2421
2422 return remaining;
2423}
2424
58088ad0
PT
2425/*
2426 * Responsible for refilling a task_group's bandwidth and unthrottling its
2427 * cfs_rqs as appropriate. If there has been no activity within the last
2428 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2429 * used to track this state.
2430 */
2431static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2432{
671fd9da
PT
2433 u64 runtime, runtime_expires;
2434 int idle = 1, throttled;
58088ad0
PT
2435
2436 raw_spin_lock(&cfs_b->lock);
2437 /* no need to continue the timer with no bandwidth constraint */
2438 if (cfs_b->quota == RUNTIME_INF)
2439 goto out_unlock;
2440
671fd9da
PT
2441 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2442 /* idle depends on !throttled (for the case of a large deficit) */
2443 idle = cfs_b->idle && !throttled;
e8da1b18 2444 cfs_b->nr_periods += overrun;
671fd9da 2445
a9cf55b2
PT
2446 /* if we're going inactive then everything else can be deferred */
2447 if (idle)
2448 goto out_unlock;
2449
2450 __refill_cfs_bandwidth_runtime(cfs_b);
2451
671fd9da
PT
2452 if (!throttled) {
2453 /* mark as potentially idle for the upcoming period */
2454 cfs_b->idle = 1;
2455 goto out_unlock;
2456 }
2457
e8da1b18
NR
2458 /* account preceding periods in which throttling occurred */
2459 cfs_b->nr_throttled += overrun;
2460
671fd9da
PT
2461 /*
2462 * There are throttled entities so we must first use the new bandwidth
2463 * to unthrottle them before making it generally available. This
2464 * ensures that all existing debts will be paid before a new cfs_rq is
2465 * allowed to run.
2466 */
2467 runtime = cfs_b->runtime;
2468 runtime_expires = cfs_b->runtime_expires;
2469 cfs_b->runtime = 0;
2470
2471 /*
2472 * This check is repeated as we are holding onto the new bandwidth
2473 * while we unthrottle. This can potentially race with an unthrottled
2474 * group trying to acquire new bandwidth from the global pool.
2475 */
2476 while (throttled && runtime > 0) {
2477 raw_spin_unlock(&cfs_b->lock);
2478 /* we can't nest cfs_b->lock while distributing bandwidth */
2479 runtime = distribute_cfs_runtime(cfs_b, runtime,
2480 runtime_expires);
2481 raw_spin_lock(&cfs_b->lock);
2482
2483 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2484 }
58088ad0 2485
671fd9da
PT
2486 /* return (any) remaining runtime */
2487 cfs_b->runtime = runtime;
2488 /*
2489 * While we are ensured activity in the period following an
2490 * unthrottle, this also covers the case in which the new bandwidth is
2491 * insufficient to cover the existing bandwidth deficit. (Forcing the
2492 * timer to remain active while there are any throttled entities.)
2493 */
2494 cfs_b->idle = 0;
58088ad0
PT
2495out_unlock:
2496 if (idle)
2497 cfs_b->timer_active = 0;
2498 raw_spin_unlock(&cfs_b->lock);
2499
2500 return idle;
2501}
d3d9dc33 2502
d8b4986d
PT
2503/* a cfs_rq won't donate quota below this amount */
2504static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2505/* minimum remaining period time to redistribute slack quota */
2506static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2507/* how long we wait to gather additional slack before distributing */
2508static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2509
2510/* are we near the end of the current quota period? */
2511static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2512{
2513 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2514 u64 remaining;
2515
2516 /* if the call-back is running a quota refresh is already occurring */
2517 if (hrtimer_callback_running(refresh_timer))
2518 return 1;
2519
2520 /* is a quota refresh about to occur? */
2521 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2522 if (remaining < min_expire)
2523 return 1;
2524
2525 return 0;
2526}
2527
2528static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2529{
2530 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2531
2532 /* if there's a quota refresh soon don't bother with slack */
2533 if (runtime_refresh_within(cfs_b, min_left))
2534 return;
2535
2536 start_bandwidth_timer(&cfs_b->slack_timer,
2537 ns_to_ktime(cfs_bandwidth_slack_period));
2538}
2539
2540/* we know any runtime found here is valid as update_curr() precedes return */
2541static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2542{
2543 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2544 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2545
2546 if (slack_runtime <= 0)
2547 return;
2548
2549 raw_spin_lock(&cfs_b->lock);
2550 if (cfs_b->quota != RUNTIME_INF &&
2551 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2552 cfs_b->runtime += slack_runtime;
2553
2554 /* we are under rq->lock, defer unthrottling using a timer */
2555 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2556 !list_empty(&cfs_b->throttled_cfs_rq))
2557 start_cfs_slack_bandwidth(cfs_b);
2558 }
2559 raw_spin_unlock(&cfs_b->lock);
2560
2561 /* even if it's not valid for return we don't want to try again */
2562 cfs_rq->runtime_remaining -= slack_runtime;
2563}
2564
2565static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2566{
56f570e5
PT
2567 if (!cfs_bandwidth_used())
2568 return;
2569
fccfdc6f 2570 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
2571 return;
2572
2573 __return_cfs_rq_runtime(cfs_rq);
2574}
2575
2576/*
2577 * This is done with a timer (instead of inline with bandwidth return) since
2578 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2579 */
2580static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2581{
2582 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2583 u64 expires;
2584
2585 /* confirm we're still not at a refresh boundary */
2586 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2587 return;
2588
2589 raw_spin_lock(&cfs_b->lock);
2590 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2591 runtime = cfs_b->runtime;
2592 cfs_b->runtime = 0;
2593 }
2594 expires = cfs_b->runtime_expires;
2595 raw_spin_unlock(&cfs_b->lock);
2596
2597 if (!runtime)
2598 return;
2599
2600 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2601
2602 raw_spin_lock(&cfs_b->lock);
2603 if (expires == cfs_b->runtime_expires)
2604 cfs_b->runtime = runtime;
2605 raw_spin_unlock(&cfs_b->lock);
2606}
2607
d3d9dc33
PT
2608/*
2609 * When a group wakes up we want to make sure that its quota is not already
2610 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2611 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2612 */
2613static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2614{
56f570e5
PT
2615 if (!cfs_bandwidth_used())
2616 return;
2617
d3d9dc33
PT
2618 /* an active group must be handled by the update_curr()->put() path */
2619 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2620 return;
2621
2622 /* ensure the group is not already throttled */
2623 if (cfs_rq_throttled(cfs_rq))
2624 return;
2625
2626 /* update runtime allocation */
2627 account_cfs_rq_runtime(cfs_rq, 0);
2628 if (cfs_rq->runtime_remaining <= 0)
2629 throttle_cfs_rq(cfs_rq);
2630}
2631
2632/* conditionally throttle active cfs_rq's from put_prev_entity() */
2633static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2634{
56f570e5
PT
2635 if (!cfs_bandwidth_used())
2636 return;
2637
d3d9dc33
PT
2638 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2639 return;
2640
2641 /*
2642 * it's possible for a throttled entity to be forced into a running
2643 * state (e.g. set_curr_task), in this case we're finished.
2644 */
2645 if (cfs_rq_throttled(cfs_rq))
2646 return;
2647
2648 throttle_cfs_rq(cfs_rq);
2649}
029632fb 2650
029632fb
PZ
2651static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2652{
2653 struct cfs_bandwidth *cfs_b =
2654 container_of(timer, struct cfs_bandwidth, slack_timer);
2655 do_sched_cfs_slack_timer(cfs_b);
2656
2657 return HRTIMER_NORESTART;
2658}
2659
2660static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2661{
2662 struct cfs_bandwidth *cfs_b =
2663 container_of(timer, struct cfs_bandwidth, period_timer);
2664 ktime_t now;
2665 int overrun;
2666 int idle = 0;
2667
2668 for (;;) {
2669 now = hrtimer_cb_get_time(timer);
2670 overrun = hrtimer_forward(timer, now, cfs_b->period);
2671
2672 if (!overrun)
2673 break;
2674
2675 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2676 }
2677
2678 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2679}
2680
2681void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2682{
2683 raw_spin_lock_init(&cfs_b->lock);
2684 cfs_b->runtime = 0;
2685 cfs_b->quota = RUNTIME_INF;
2686 cfs_b->period = ns_to_ktime(default_cfs_period());
2687
2688 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2689 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2690 cfs_b->period_timer.function = sched_cfs_period_timer;
2691 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2692 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2693}
2694
2695static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2696{
2697 cfs_rq->runtime_enabled = 0;
2698 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2699}
2700
2701/* requires cfs_b->lock, may release to reprogram timer */
2702void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2703{
2704 /*
2705 * The timer may be active because we're trying to set a new bandwidth
2706 * period or because we're racing with the tear-down path
2707 * (timer_active==0 becomes visible before the hrtimer call-back
2708 * terminates). In either case we ensure that it's re-programmed
2709 */
2710 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2711 raw_spin_unlock(&cfs_b->lock);
2712 /* ensure cfs_b->lock is available while we wait */
2713 hrtimer_cancel(&cfs_b->period_timer);
2714
2715 raw_spin_lock(&cfs_b->lock);
2716 /* if someone else restarted the timer then we're done */
2717 if (cfs_b->timer_active)
2718 return;
2719 }
2720
2721 cfs_b->timer_active = 1;
2722 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2723}
2724
2725static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2726{
2727 hrtimer_cancel(&cfs_b->period_timer);
2728 hrtimer_cancel(&cfs_b->slack_timer);
2729}
2730
38dc3348 2731static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
2732{
2733 struct cfs_rq *cfs_rq;
2734
2735 for_each_leaf_cfs_rq(rq, cfs_rq) {
2736 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2737
2738 if (!cfs_rq->runtime_enabled)
2739 continue;
2740
2741 /*
2742 * clock_task is not advancing so we just need to make sure
2743 * there's some valid quota amount
2744 */
2745 cfs_rq->runtime_remaining = cfs_b->quota;
2746 if (cfs_rq_throttled(cfs_rq))
2747 unthrottle_cfs_rq(cfs_rq);
2748 }
2749}
2750
2751#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
2752static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2753{
78becc27 2754 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
2755}
2756
2757static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2758 unsigned long delta_exec) {}
d3d9dc33
PT
2759static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2760static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 2761static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
2762
2763static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2764{
2765 return 0;
2766}
64660c86
PT
2767
2768static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2769{
2770 return 0;
2771}
2772
2773static inline int throttled_lb_pair(struct task_group *tg,
2774 int src_cpu, int dest_cpu)
2775{
2776 return 0;
2777}
029632fb
PZ
2778
2779void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2780
2781#ifdef CONFIG_FAIR_GROUP_SCHED
2782static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
2783#endif
2784
029632fb
PZ
2785static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2786{
2787 return NULL;
2788}
2789static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
a4c96ae3 2790static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
2791
2792#endif /* CONFIG_CFS_BANDWIDTH */
2793
bf0f6f24
IM
2794/**************************************************
2795 * CFS operations on tasks:
2796 */
2797
8f4d37ec
PZ
2798#ifdef CONFIG_SCHED_HRTICK
2799static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2800{
8f4d37ec
PZ
2801 struct sched_entity *se = &p->se;
2802 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2803
2804 WARN_ON(task_rq(p) != rq);
2805
b39e66ea 2806 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
2807 u64 slice = sched_slice(cfs_rq, se);
2808 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2809 s64 delta = slice - ran;
2810
2811 if (delta < 0) {
2812 if (rq->curr == p)
2813 resched_task(p);
2814 return;
2815 }
2816
2817 /*
2818 * Don't schedule slices shorter than 10000ns, that just
2819 * doesn't make sense. Rely on vruntime for fairness.
2820 */
31656519 2821 if (rq->curr != p)
157124c1 2822 delta = max_t(s64, 10000LL, delta);
8f4d37ec 2823
31656519 2824 hrtick_start(rq, delta);
8f4d37ec
PZ
2825 }
2826}
a4c2f00f
PZ
2827
2828/*
2829 * called from enqueue/dequeue and updates the hrtick when the
2830 * current task is from our class and nr_running is low enough
2831 * to matter.
2832 */
2833static void hrtick_update(struct rq *rq)
2834{
2835 struct task_struct *curr = rq->curr;
2836
b39e66ea 2837 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
2838 return;
2839
2840 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2841 hrtick_start_fair(rq, curr);
2842}
55e12e5e 2843#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
2844static inline void
2845hrtick_start_fair(struct rq *rq, struct task_struct *p)
2846{
2847}
a4c2f00f
PZ
2848
2849static inline void hrtick_update(struct rq *rq)
2850{
2851}
8f4d37ec
PZ
2852#endif
2853
bf0f6f24
IM
2854/*
2855 * The enqueue_task method is called before nr_running is
2856 * increased. Here we update the fair scheduling stats and
2857 * then put the task into the rbtree:
2858 */
ea87bb78 2859static void
371fd7e7 2860enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2861{
2862 struct cfs_rq *cfs_rq;
62fb1851 2863 struct sched_entity *se = &p->se;
bf0f6f24
IM
2864
2865 for_each_sched_entity(se) {
62fb1851 2866 if (se->on_rq)
bf0f6f24
IM
2867 break;
2868 cfs_rq = cfs_rq_of(se);
88ec22d3 2869 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
2870
2871 /*
2872 * end evaluation on encountering a throttled cfs_rq
2873 *
2874 * note: in the case of encountering a throttled cfs_rq we will
2875 * post the final h_nr_running increment below.
2876 */
2877 if (cfs_rq_throttled(cfs_rq))
2878 break;
953bfcd1 2879 cfs_rq->h_nr_running++;
85dac906 2880
88ec22d3 2881 flags = ENQUEUE_WAKEUP;
bf0f6f24 2882 }
8f4d37ec 2883
2069dd75 2884 for_each_sched_entity(se) {
0f317143 2885 cfs_rq = cfs_rq_of(se);
953bfcd1 2886 cfs_rq->h_nr_running++;
2069dd75 2887
85dac906
PT
2888 if (cfs_rq_throttled(cfs_rq))
2889 break;
2890
17bc14b7 2891 update_cfs_shares(cfs_rq);
9ee474f5 2892 update_entity_load_avg(se, 1);
2069dd75
PZ
2893 }
2894
18bf2805
BS
2895 if (!se) {
2896 update_rq_runnable_avg(rq, rq->nr_running);
85dac906 2897 inc_nr_running(rq);
18bf2805 2898 }
a4c2f00f 2899 hrtick_update(rq);
bf0f6f24
IM
2900}
2901
2f36825b
VP
2902static void set_next_buddy(struct sched_entity *se);
2903
bf0f6f24
IM
2904/*
2905 * The dequeue_task method is called before nr_running is
2906 * decreased. We remove the task from the rbtree and
2907 * update the fair scheduling stats:
2908 */
371fd7e7 2909static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2910{
2911 struct cfs_rq *cfs_rq;
62fb1851 2912 struct sched_entity *se = &p->se;
2f36825b 2913 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
2914
2915 for_each_sched_entity(se) {
2916 cfs_rq = cfs_rq_of(se);
371fd7e7 2917 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
2918
2919 /*
2920 * end evaluation on encountering a throttled cfs_rq
2921 *
2922 * note: in the case of encountering a throttled cfs_rq we will
2923 * post the final h_nr_running decrement below.
2924 */
2925 if (cfs_rq_throttled(cfs_rq))
2926 break;
953bfcd1 2927 cfs_rq->h_nr_running--;
2069dd75 2928
bf0f6f24 2929 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
2930 if (cfs_rq->load.weight) {
2931 /*
2932 * Bias pick_next to pick a task from this cfs_rq, as
2933 * p is sleeping when it is within its sched_slice.
2934 */
2935 if (task_sleep && parent_entity(se))
2936 set_next_buddy(parent_entity(se));
9598c82d
PT
2937
2938 /* avoid re-evaluating load for this entity */
2939 se = parent_entity(se);
bf0f6f24 2940 break;
2f36825b 2941 }
371fd7e7 2942 flags |= DEQUEUE_SLEEP;
bf0f6f24 2943 }
8f4d37ec 2944
2069dd75 2945 for_each_sched_entity(se) {
0f317143 2946 cfs_rq = cfs_rq_of(se);
953bfcd1 2947 cfs_rq->h_nr_running--;
2069dd75 2948
85dac906
PT
2949 if (cfs_rq_throttled(cfs_rq))
2950 break;
2951
17bc14b7 2952 update_cfs_shares(cfs_rq);
9ee474f5 2953 update_entity_load_avg(se, 1);
2069dd75
PZ
2954 }
2955
18bf2805 2956 if (!se) {
85dac906 2957 dec_nr_running(rq);
18bf2805
BS
2958 update_rq_runnable_avg(rq, 1);
2959 }
a4c2f00f 2960 hrtick_update(rq);
bf0f6f24
IM
2961}
2962
e7693a36 2963#ifdef CONFIG_SMP
029632fb
PZ
2964/* Used instead of source_load when we know the type == 0 */
2965static unsigned long weighted_cpuload(const int cpu)
2966{
b92486cb 2967 return cpu_rq(cpu)->cfs.runnable_load_avg;
029632fb
PZ
2968}
2969
2970/*
2971 * Return a low guess at the load of a migration-source cpu weighted
2972 * according to the scheduling class and "nice" value.
2973 *
2974 * We want to under-estimate the load of migration sources, to
2975 * balance conservatively.
2976 */
2977static unsigned long source_load(int cpu, int type)
2978{
2979 struct rq *rq = cpu_rq(cpu);
2980 unsigned long total = weighted_cpuload(cpu);
2981
2982 if (type == 0 || !sched_feat(LB_BIAS))
2983 return total;
2984
2985 return min(rq->cpu_load[type-1], total);
2986}
2987
2988/*
2989 * Return a high guess at the load of a migration-target cpu weighted
2990 * according to the scheduling class and "nice" value.
2991 */
2992static unsigned long target_load(int cpu, int type)
2993{
2994 struct rq *rq = cpu_rq(cpu);
2995 unsigned long total = weighted_cpuload(cpu);
2996
2997 if (type == 0 || !sched_feat(LB_BIAS))
2998 return total;
2999
3000 return max(rq->cpu_load[type-1], total);
3001}
3002
3003static unsigned long power_of(int cpu)
3004{
3005 return cpu_rq(cpu)->cpu_power;
3006}
3007
3008static unsigned long cpu_avg_load_per_task(int cpu)
3009{
3010 struct rq *rq = cpu_rq(cpu);
3011 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
b92486cb 3012 unsigned long load_avg = rq->cfs.runnable_load_avg;
029632fb
PZ
3013
3014 if (nr_running)
b92486cb 3015 return load_avg / nr_running;
029632fb
PZ
3016
3017 return 0;
3018}
3019
62470419
MW
3020static void record_wakee(struct task_struct *p)
3021{
3022 /*
3023 * Rough decay (wiping) for cost saving, don't worry
3024 * about the boundary, really active task won't care
3025 * about the loss.
3026 */
3027 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3028 current->wakee_flips = 0;
3029 current->wakee_flip_decay_ts = jiffies;
3030 }
3031
3032 if (current->last_wakee != p) {
3033 current->last_wakee = p;
3034 current->wakee_flips++;
3035 }
3036}
098fb9db 3037
74f8e4b2 3038static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
3039{
3040 struct sched_entity *se = &p->se;
3041 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
3042 u64 min_vruntime;
3043
3044#ifndef CONFIG_64BIT
3045 u64 min_vruntime_copy;
88ec22d3 3046
3fe1698b
PZ
3047 do {
3048 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3049 smp_rmb();
3050 min_vruntime = cfs_rq->min_vruntime;
3051 } while (min_vruntime != min_vruntime_copy);
3052#else
3053 min_vruntime = cfs_rq->min_vruntime;
3054#endif
88ec22d3 3055
3fe1698b 3056 se->vruntime -= min_vruntime;
62470419 3057 record_wakee(p);
88ec22d3
PZ
3058}
3059
bb3469ac 3060#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
3061/*
3062 * effective_load() calculates the load change as seen from the root_task_group
3063 *
3064 * Adding load to a group doesn't make a group heavier, but can cause movement
3065 * of group shares between cpus. Assuming the shares were perfectly aligned one
3066 * can calculate the shift in shares.
cf5f0acf
PZ
3067 *
3068 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3069 * on this @cpu and results in a total addition (subtraction) of @wg to the
3070 * total group weight.
3071 *
3072 * Given a runqueue weight distribution (rw_i) we can compute a shares
3073 * distribution (s_i) using:
3074 *
3075 * s_i = rw_i / \Sum rw_j (1)
3076 *
3077 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3078 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3079 * shares distribution (s_i):
3080 *
3081 * rw_i = { 2, 4, 1, 0 }
3082 * s_i = { 2/7, 4/7, 1/7, 0 }
3083 *
3084 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3085 * task used to run on and the CPU the waker is running on), we need to
3086 * compute the effect of waking a task on either CPU and, in case of a sync
3087 * wakeup, compute the effect of the current task going to sleep.
3088 *
3089 * So for a change of @wl to the local @cpu with an overall group weight change
3090 * of @wl we can compute the new shares distribution (s'_i) using:
3091 *
3092 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3093 *
3094 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3095 * differences in waking a task to CPU 0. The additional task changes the
3096 * weight and shares distributions like:
3097 *
3098 * rw'_i = { 3, 4, 1, 0 }
3099 * s'_i = { 3/8, 4/8, 1/8, 0 }
3100 *
3101 * We can then compute the difference in effective weight by using:
3102 *
3103 * dw_i = S * (s'_i - s_i) (3)
3104 *
3105 * Where 'S' is the group weight as seen by its parent.
3106 *
3107 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3108 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3109 * 4/7) times the weight of the group.
f5bfb7d9 3110 */
2069dd75 3111static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 3112{
4be9daaa 3113 struct sched_entity *se = tg->se[cpu];
f1d239f7 3114
cf5f0acf 3115 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
3116 return wl;
3117
4be9daaa 3118 for_each_sched_entity(se) {
cf5f0acf 3119 long w, W;
4be9daaa 3120
977dda7c 3121 tg = se->my_q->tg;
bb3469ac 3122
cf5f0acf
PZ
3123 /*
3124 * W = @wg + \Sum rw_j
3125 */
3126 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 3127
cf5f0acf
PZ
3128 /*
3129 * w = rw_i + @wl
3130 */
3131 w = se->my_q->load.weight + wl;
940959e9 3132
cf5f0acf
PZ
3133 /*
3134 * wl = S * s'_i; see (2)
3135 */
3136 if (W > 0 && w < W)
3137 wl = (w * tg->shares) / W;
977dda7c
PT
3138 else
3139 wl = tg->shares;
940959e9 3140
cf5f0acf
PZ
3141 /*
3142 * Per the above, wl is the new se->load.weight value; since
3143 * those are clipped to [MIN_SHARES, ...) do so now. See
3144 * calc_cfs_shares().
3145 */
977dda7c
PT
3146 if (wl < MIN_SHARES)
3147 wl = MIN_SHARES;
cf5f0acf
PZ
3148
3149 /*
3150 * wl = dw_i = S * (s'_i - s_i); see (3)
3151 */
977dda7c 3152 wl -= se->load.weight;
cf5f0acf
PZ
3153
3154 /*
3155 * Recursively apply this logic to all parent groups to compute
3156 * the final effective load change on the root group. Since
3157 * only the @tg group gets extra weight, all parent groups can
3158 * only redistribute existing shares. @wl is the shift in shares
3159 * resulting from this level per the above.
3160 */
4be9daaa 3161 wg = 0;
4be9daaa 3162 }
bb3469ac 3163
4be9daaa 3164 return wl;
bb3469ac
PZ
3165}
3166#else
4be9daaa 3167
83378269
PZ
3168static inline unsigned long effective_load(struct task_group *tg, int cpu,
3169 unsigned long wl, unsigned long wg)
4be9daaa 3170{
83378269 3171 return wl;
bb3469ac 3172}
4be9daaa 3173
bb3469ac
PZ
3174#endif
3175
62470419
MW
3176static int wake_wide(struct task_struct *p)
3177{
3178 int factor = nr_cpus_node(cpu_to_node(smp_processor_id()));
3179
3180 /*
3181 * Yeah, it's the switching-frequency, could means many wakee or
3182 * rapidly switch, use factor here will just help to automatically
3183 * adjust the loose-degree, so bigger node will lead to more pull.
3184 */
3185 if (p->wakee_flips > factor) {
3186 /*
3187 * wakee is somewhat hot, it needs certain amount of cpu
3188 * resource, so if waker is far more hot, prefer to leave
3189 * it alone.
3190 */
3191 if (current->wakee_flips > (factor * p->wakee_flips))
3192 return 1;
3193 }
3194
3195 return 0;
3196}
3197
c88d5910 3198static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 3199{
e37b6a7b 3200 s64 this_load, load;
c88d5910 3201 int idx, this_cpu, prev_cpu;
098fb9db 3202 unsigned long tl_per_task;
c88d5910 3203 struct task_group *tg;
83378269 3204 unsigned long weight;
b3137bc8 3205 int balanced;
098fb9db 3206
62470419
MW
3207 /*
3208 * If we wake multiple tasks be careful to not bounce
3209 * ourselves around too much.
3210 */
3211 if (wake_wide(p))
3212 return 0;
3213
c88d5910
PZ
3214 idx = sd->wake_idx;
3215 this_cpu = smp_processor_id();
3216 prev_cpu = task_cpu(p);
3217 load = source_load(prev_cpu, idx);
3218 this_load = target_load(this_cpu, idx);
098fb9db 3219
b3137bc8
MG
3220 /*
3221 * If sync wakeup then subtract the (maximum possible)
3222 * effect of the currently running task from the load
3223 * of the current CPU:
3224 */
83378269
PZ
3225 if (sync) {
3226 tg = task_group(current);
3227 weight = current->se.load.weight;
3228
c88d5910 3229 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
3230 load += effective_load(tg, prev_cpu, 0, -weight);
3231 }
b3137bc8 3232
83378269
PZ
3233 tg = task_group(p);
3234 weight = p->se.load.weight;
b3137bc8 3235
71a29aa7
PZ
3236 /*
3237 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
3238 * due to the sync cause above having dropped this_load to 0, we'll
3239 * always have an imbalance, but there's really nothing you can do
3240 * about that, so that's good too.
71a29aa7
PZ
3241 *
3242 * Otherwise check if either cpus are near enough in load to allow this
3243 * task to be woken on this_cpu.
3244 */
e37b6a7b
PT
3245 if (this_load > 0) {
3246 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
3247
3248 this_eff_load = 100;
3249 this_eff_load *= power_of(prev_cpu);
3250 this_eff_load *= this_load +
3251 effective_load(tg, this_cpu, weight, weight);
3252
3253 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3254 prev_eff_load *= power_of(this_cpu);
3255 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3256
3257 balanced = this_eff_load <= prev_eff_load;
3258 } else
3259 balanced = true;
b3137bc8 3260
098fb9db 3261 /*
4ae7d5ce
IM
3262 * If the currently running task will sleep within
3263 * a reasonable amount of time then attract this newly
3264 * woken task:
098fb9db 3265 */
2fb7635c
PZ
3266 if (sync && balanced)
3267 return 1;
098fb9db 3268
41acab88 3269 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
3270 tl_per_task = cpu_avg_load_per_task(this_cpu);
3271
c88d5910
PZ
3272 if (balanced ||
3273 (this_load <= load &&
3274 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
3275 /*
3276 * This domain has SD_WAKE_AFFINE and
3277 * p is cache cold in this domain, and
3278 * there is no bad imbalance.
3279 */
c88d5910 3280 schedstat_inc(sd, ttwu_move_affine);
41acab88 3281 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
3282
3283 return 1;
3284 }
3285 return 0;
3286}
3287
aaee1203
PZ
3288/*
3289 * find_idlest_group finds and returns the least busy CPU group within the
3290 * domain.
3291 */
3292static struct sched_group *
78e7ed53 3293find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 3294 int this_cpu, int load_idx)
e7693a36 3295{
b3bd3de6 3296 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 3297 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 3298 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 3299
aaee1203
PZ
3300 do {
3301 unsigned long load, avg_load;
3302 int local_group;
3303 int i;
e7693a36 3304
aaee1203
PZ
3305 /* Skip over this group if it has no CPUs allowed */
3306 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 3307 tsk_cpus_allowed(p)))
aaee1203
PZ
3308 continue;
3309
3310 local_group = cpumask_test_cpu(this_cpu,
3311 sched_group_cpus(group));
3312
3313 /* Tally up the load of all CPUs in the group */
3314 avg_load = 0;
3315
3316 for_each_cpu(i, sched_group_cpus(group)) {
3317 /* Bias balancing toward cpus of our domain */
3318 if (local_group)
3319 load = source_load(i, load_idx);
3320 else
3321 load = target_load(i, load_idx);
3322
3323 avg_load += load;
3324 }
3325
3326 /* Adjust by relative CPU power of the group */
9c3f75cb 3327 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
3328
3329 if (local_group) {
3330 this_load = avg_load;
aaee1203
PZ
3331 } else if (avg_load < min_load) {
3332 min_load = avg_load;
3333 idlest = group;
3334 }
3335 } while (group = group->next, group != sd->groups);
3336
3337 if (!idlest || 100*this_load < imbalance*min_load)
3338 return NULL;
3339 return idlest;
3340}
3341
3342/*
3343 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3344 */
3345static int
3346find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3347{
3348 unsigned long load, min_load = ULONG_MAX;
3349 int idlest = -1;
3350 int i;
3351
3352 /* Traverse only the allowed CPUs */
fa17b507 3353 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
3354 load = weighted_cpuload(i);
3355
3356 if (load < min_load || (load == min_load && i == this_cpu)) {
3357 min_load = load;
3358 idlest = i;
e7693a36
GH
3359 }
3360 }
3361
aaee1203
PZ
3362 return idlest;
3363}
e7693a36 3364
a50bde51
PZ
3365/*
3366 * Try and locate an idle CPU in the sched_domain.
3367 */
99bd5e2f 3368static int select_idle_sibling(struct task_struct *p, int target)
a50bde51 3369{
99bd5e2f 3370 struct sched_domain *sd;
37407ea7 3371 struct sched_group *sg;
e0a79f52 3372 int i = task_cpu(p);
a50bde51 3373
e0a79f52
MG
3374 if (idle_cpu(target))
3375 return target;
99bd5e2f
SS
3376
3377 /*
e0a79f52 3378 * If the prevous cpu is cache affine and idle, don't be stupid.
99bd5e2f 3379 */
e0a79f52
MG
3380 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3381 return i;
a50bde51
PZ
3382
3383 /*
37407ea7 3384 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 3385 */
518cd623 3386 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 3387 for_each_lower_domain(sd) {
37407ea7
LT
3388 sg = sd->groups;
3389 do {
3390 if (!cpumask_intersects(sched_group_cpus(sg),
3391 tsk_cpus_allowed(p)))
3392 goto next;
3393
3394 for_each_cpu(i, sched_group_cpus(sg)) {
e0a79f52 3395 if (i == target || !idle_cpu(i))
37407ea7
LT
3396 goto next;
3397 }
970e1789 3398
37407ea7
LT
3399 target = cpumask_first_and(sched_group_cpus(sg),
3400 tsk_cpus_allowed(p));
3401 goto done;
3402next:
3403 sg = sg->next;
3404 } while (sg != sd->groups);
3405 }
3406done:
a50bde51
PZ
3407 return target;
3408}
3409
aaee1203
PZ
3410/*
3411 * sched_balance_self: balance the current task (running on cpu) in domains
3412 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3413 * SD_BALANCE_EXEC.
3414 *
3415 * Balance, ie. select the least loaded group.
3416 *
3417 * Returns the target CPU number, or the same CPU if no balancing is needed.
3418 *
3419 * preempt must be disabled.
3420 */
0017d735 3421static int
7608dec2 3422select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 3423{
29cd8bae 3424 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
3425 int cpu = smp_processor_id();
3426 int prev_cpu = task_cpu(p);
3427 int new_cpu = cpu;
99bd5e2f 3428 int want_affine = 0;
5158f4e4 3429 int sync = wake_flags & WF_SYNC;
c88d5910 3430
29baa747 3431 if (p->nr_cpus_allowed == 1)
76854c7e
MG
3432 return prev_cpu;
3433
0763a660 3434 if (sd_flag & SD_BALANCE_WAKE) {
fa17b507 3435 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
c88d5910
PZ
3436 want_affine = 1;
3437 new_cpu = prev_cpu;
3438 }
aaee1203 3439
dce840a0 3440 rcu_read_lock();
aaee1203 3441 for_each_domain(cpu, tmp) {
e4f42888
PZ
3442 if (!(tmp->flags & SD_LOAD_BALANCE))
3443 continue;
3444
fe3bcfe1 3445 /*
99bd5e2f
SS
3446 * If both cpu and prev_cpu are part of this domain,
3447 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 3448 */
99bd5e2f
SS
3449 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3450 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3451 affine_sd = tmp;
29cd8bae 3452 break;
f03542a7 3453 }
29cd8bae 3454
f03542a7 3455 if (tmp->flags & sd_flag)
29cd8bae
PZ
3456 sd = tmp;
3457 }
3458
8b911acd 3459 if (affine_sd) {
f03542a7 3460 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
dce840a0
PZ
3461 prev_cpu = cpu;
3462
3463 new_cpu = select_idle_sibling(p, prev_cpu);
3464 goto unlock;
8b911acd 3465 }
e7693a36 3466
aaee1203 3467 while (sd) {
5158f4e4 3468 int load_idx = sd->forkexec_idx;
aaee1203 3469 struct sched_group *group;
c88d5910 3470 int weight;
098fb9db 3471
0763a660 3472 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
3473 sd = sd->child;
3474 continue;
3475 }
098fb9db 3476
5158f4e4
PZ
3477 if (sd_flag & SD_BALANCE_WAKE)
3478 load_idx = sd->wake_idx;
098fb9db 3479
5158f4e4 3480 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
3481 if (!group) {
3482 sd = sd->child;
3483 continue;
3484 }
4ae7d5ce 3485
d7c33c49 3486 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
3487 if (new_cpu == -1 || new_cpu == cpu) {
3488 /* Now try balancing at a lower domain level of cpu */
3489 sd = sd->child;
3490 continue;
e7693a36 3491 }
aaee1203
PZ
3492
3493 /* Now try balancing at a lower domain level of new_cpu */
3494 cpu = new_cpu;
669c55e9 3495 weight = sd->span_weight;
aaee1203
PZ
3496 sd = NULL;
3497 for_each_domain(cpu, tmp) {
669c55e9 3498 if (weight <= tmp->span_weight)
aaee1203 3499 break;
0763a660 3500 if (tmp->flags & sd_flag)
aaee1203
PZ
3501 sd = tmp;
3502 }
3503 /* while loop will break here if sd == NULL */
e7693a36 3504 }
dce840a0
PZ
3505unlock:
3506 rcu_read_unlock();
e7693a36 3507
c88d5910 3508 return new_cpu;
e7693a36 3509}
0a74bef8
PT
3510
3511/*
3512 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3513 * cfs_rq_of(p) references at time of call are still valid and identify the
3514 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3515 * other assumptions, including the state of rq->lock, should be made.
3516 */
3517static void
3518migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3519{
aff3e498
PT
3520 struct sched_entity *se = &p->se;
3521 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3522
3523 /*
3524 * Load tracking: accumulate removed load so that it can be processed
3525 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3526 * to blocked load iff they have a positive decay-count. It can never
3527 * be negative here since on-rq tasks have decay-count == 0.
3528 */
3529 if (se->avg.decay_count) {
3530 se->avg.decay_count = -__synchronize_entity_decay(se);
2509940f
AS
3531 atomic_long_add(se->avg.load_avg_contrib,
3532 &cfs_rq->removed_load);
aff3e498 3533 }
0a74bef8 3534}
e7693a36
GH
3535#endif /* CONFIG_SMP */
3536
e52fb7c0
PZ
3537static unsigned long
3538wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
3539{
3540 unsigned long gran = sysctl_sched_wakeup_granularity;
3541
3542 /*
e52fb7c0
PZ
3543 * Since its curr running now, convert the gran from real-time
3544 * to virtual-time in his units.
13814d42
MG
3545 *
3546 * By using 'se' instead of 'curr' we penalize light tasks, so
3547 * they get preempted easier. That is, if 'se' < 'curr' then
3548 * the resulting gran will be larger, therefore penalizing the
3549 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3550 * be smaller, again penalizing the lighter task.
3551 *
3552 * This is especially important for buddies when the leftmost
3553 * task is higher priority than the buddy.
0bbd3336 3554 */
f4ad9bd2 3555 return calc_delta_fair(gran, se);
0bbd3336
PZ
3556}
3557
464b7527
PZ
3558/*
3559 * Should 'se' preempt 'curr'.
3560 *
3561 * |s1
3562 * |s2
3563 * |s3
3564 * g
3565 * |<--->|c
3566 *
3567 * w(c, s1) = -1
3568 * w(c, s2) = 0
3569 * w(c, s3) = 1
3570 *
3571 */
3572static int
3573wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3574{
3575 s64 gran, vdiff = curr->vruntime - se->vruntime;
3576
3577 if (vdiff <= 0)
3578 return -1;
3579
e52fb7c0 3580 gran = wakeup_gran(curr, se);
464b7527
PZ
3581 if (vdiff > gran)
3582 return 1;
3583
3584 return 0;
3585}
3586
02479099
PZ
3587static void set_last_buddy(struct sched_entity *se)
3588{
69c80f3e
VP
3589 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3590 return;
3591
3592 for_each_sched_entity(se)
3593 cfs_rq_of(se)->last = se;
02479099
PZ
3594}
3595
3596static void set_next_buddy(struct sched_entity *se)
3597{
69c80f3e
VP
3598 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3599 return;
3600
3601 for_each_sched_entity(se)
3602 cfs_rq_of(se)->next = se;
02479099
PZ
3603}
3604
ac53db59
RR
3605static void set_skip_buddy(struct sched_entity *se)
3606{
69c80f3e
VP
3607 for_each_sched_entity(se)
3608 cfs_rq_of(se)->skip = se;
ac53db59
RR
3609}
3610
bf0f6f24
IM
3611/*
3612 * Preempt the current task with a newly woken task if needed:
3613 */
5a9b86f6 3614static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
3615{
3616 struct task_struct *curr = rq->curr;
8651a86c 3617 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 3618 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 3619 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 3620 int next_buddy_marked = 0;
bf0f6f24 3621
4ae7d5ce
IM
3622 if (unlikely(se == pse))
3623 return;
3624
5238cdd3 3625 /*
ddcdf6e7 3626 * This is possible from callers such as move_task(), in which we
5238cdd3
PT
3627 * unconditionally check_prempt_curr() after an enqueue (which may have
3628 * lead to a throttle). This both saves work and prevents false
3629 * next-buddy nomination below.
3630 */
3631 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3632 return;
3633
2f36825b 3634 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 3635 set_next_buddy(pse);
2f36825b
VP
3636 next_buddy_marked = 1;
3637 }
57fdc26d 3638
aec0a514
BR
3639 /*
3640 * We can come here with TIF_NEED_RESCHED already set from new task
3641 * wake up path.
5238cdd3
PT
3642 *
3643 * Note: this also catches the edge-case of curr being in a throttled
3644 * group (e.g. via set_curr_task), since update_curr() (in the
3645 * enqueue of curr) will have resulted in resched being set. This
3646 * prevents us from potentially nominating it as a false LAST_BUDDY
3647 * below.
aec0a514
BR
3648 */
3649 if (test_tsk_need_resched(curr))
3650 return;
3651
a2f5c9ab
DH
3652 /* Idle tasks are by definition preempted by non-idle tasks. */
3653 if (unlikely(curr->policy == SCHED_IDLE) &&
3654 likely(p->policy != SCHED_IDLE))
3655 goto preempt;
3656
91c234b4 3657 /*
a2f5c9ab
DH
3658 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3659 * is driven by the tick):
91c234b4 3660 */
8ed92e51 3661 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 3662 return;
bf0f6f24 3663
464b7527 3664 find_matching_se(&se, &pse);
9bbd7374 3665 update_curr(cfs_rq_of(se));
002f128b 3666 BUG_ON(!pse);
2f36825b
VP
3667 if (wakeup_preempt_entity(se, pse) == 1) {
3668 /*
3669 * Bias pick_next to pick the sched entity that is
3670 * triggering this preemption.
3671 */
3672 if (!next_buddy_marked)
3673 set_next_buddy(pse);
3a7e73a2 3674 goto preempt;
2f36825b 3675 }
464b7527 3676
3a7e73a2 3677 return;
a65ac745 3678
3a7e73a2
PZ
3679preempt:
3680 resched_task(curr);
3681 /*
3682 * Only set the backward buddy when the current task is still
3683 * on the rq. This can happen when a wakeup gets interleaved
3684 * with schedule on the ->pre_schedule() or idle_balance()
3685 * point, either of which can * drop the rq lock.
3686 *
3687 * Also, during early boot the idle thread is in the fair class,
3688 * for obvious reasons its a bad idea to schedule back to it.
3689 */
3690 if (unlikely(!se->on_rq || curr == rq->idle))
3691 return;
3692
3693 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3694 set_last_buddy(se);
bf0f6f24
IM
3695}
3696
fb8d4724 3697static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 3698{
8f4d37ec 3699 struct task_struct *p;
bf0f6f24
IM
3700 struct cfs_rq *cfs_rq = &rq->cfs;
3701 struct sched_entity *se;
3702
36ace27e 3703 if (!cfs_rq->nr_running)
bf0f6f24
IM
3704 return NULL;
3705
3706 do {
9948f4b2 3707 se = pick_next_entity(cfs_rq);
f4b6755f 3708 set_next_entity(cfs_rq, se);
bf0f6f24
IM
3709 cfs_rq = group_cfs_rq(se);
3710 } while (cfs_rq);
3711
8f4d37ec 3712 p = task_of(se);
b39e66ea
MG
3713 if (hrtick_enabled(rq))
3714 hrtick_start_fair(rq, p);
8f4d37ec
PZ
3715
3716 return p;
bf0f6f24
IM
3717}
3718
3719/*
3720 * Account for a descheduled task:
3721 */
31ee529c 3722static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
3723{
3724 struct sched_entity *se = &prev->se;
3725 struct cfs_rq *cfs_rq;
3726
3727 for_each_sched_entity(se) {
3728 cfs_rq = cfs_rq_of(se);
ab6cde26 3729 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
3730 }
3731}
3732
ac53db59
RR
3733/*
3734 * sched_yield() is very simple
3735 *
3736 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3737 */
3738static void yield_task_fair(struct rq *rq)
3739{
3740 struct task_struct *curr = rq->curr;
3741 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3742 struct sched_entity *se = &curr->se;
3743
3744 /*
3745 * Are we the only task in the tree?
3746 */
3747 if (unlikely(rq->nr_running == 1))
3748 return;
3749
3750 clear_buddies(cfs_rq, se);
3751
3752 if (curr->policy != SCHED_BATCH) {
3753 update_rq_clock(rq);
3754 /*
3755 * Update run-time statistics of the 'current'.
3756 */
3757 update_curr(cfs_rq);
916671c0
MG
3758 /*
3759 * Tell update_rq_clock() that we've just updated,
3760 * so we don't do microscopic update in schedule()
3761 * and double the fastpath cost.
3762 */
3763 rq->skip_clock_update = 1;
ac53db59
RR
3764 }
3765
3766 set_skip_buddy(se);
3767}
3768
d95f4122
MG
3769static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3770{
3771 struct sched_entity *se = &p->se;
3772
5238cdd3
PT
3773 /* throttled hierarchies are not runnable */
3774 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
3775 return false;
3776
3777 /* Tell the scheduler that we'd really like pse to run next. */
3778 set_next_buddy(se);
3779
d95f4122
MG
3780 yield_task_fair(rq);
3781
3782 return true;
3783}
3784
681f3e68 3785#ifdef CONFIG_SMP
bf0f6f24 3786/**************************************************
e9c84cb8
PZ
3787 * Fair scheduling class load-balancing methods.
3788 *
3789 * BASICS
3790 *
3791 * The purpose of load-balancing is to achieve the same basic fairness the
3792 * per-cpu scheduler provides, namely provide a proportional amount of compute
3793 * time to each task. This is expressed in the following equation:
3794 *
3795 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3796 *
3797 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3798 * W_i,0 is defined as:
3799 *
3800 * W_i,0 = \Sum_j w_i,j (2)
3801 *
3802 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3803 * is derived from the nice value as per prio_to_weight[].
3804 *
3805 * The weight average is an exponential decay average of the instantaneous
3806 * weight:
3807 *
3808 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3809 *
3810 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3811 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3812 * can also include other factors [XXX].
3813 *
3814 * To achieve this balance we define a measure of imbalance which follows
3815 * directly from (1):
3816 *
3817 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3818 *
3819 * We them move tasks around to minimize the imbalance. In the continuous
3820 * function space it is obvious this converges, in the discrete case we get
3821 * a few fun cases generally called infeasible weight scenarios.
3822 *
3823 * [XXX expand on:
3824 * - infeasible weights;
3825 * - local vs global optima in the discrete case. ]
3826 *
3827 *
3828 * SCHED DOMAINS
3829 *
3830 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3831 * for all i,j solution, we create a tree of cpus that follows the hardware
3832 * topology where each level pairs two lower groups (or better). This results
3833 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3834 * tree to only the first of the previous level and we decrease the frequency
3835 * of load-balance at each level inv. proportional to the number of cpus in
3836 * the groups.
3837 *
3838 * This yields:
3839 *
3840 * log_2 n 1 n
3841 * \Sum { --- * --- * 2^i } = O(n) (5)
3842 * i = 0 2^i 2^i
3843 * `- size of each group
3844 * | | `- number of cpus doing load-balance
3845 * | `- freq
3846 * `- sum over all levels
3847 *
3848 * Coupled with a limit on how many tasks we can migrate every balance pass,
3849 * this makes (5) the runtime complexity of the balancer.
3850 *
3851 * An important property here is that each CPU is still (indirectly) connected
3852 * to every other cpu in at most O(log n) steps:
3853 *
3854 * The adjacency matrix of the resulting graph is given by:
3855 *
3856 * log_2 n
3857 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3858 * k = 0
3859 *
3860 * And you'll find that:
3861 *
3862 * A^(log_2 n)_i,j != 0 for all i,j (7)
3863 *
3864 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3865 * The task movement gives a factor of O(m), giving a convergence complexity
3866 * of:
3867 *
3868 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3869 *
3870 *
3871 * WORK CONSERVING
3872 *
3873 * In order to avoid CPUs going idle while there's still work to do, new idle
3874 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3875 * tree itself instead of relying on other CPUs to bring it work.
3876 *
3877 * This adds some complexity to both (5) and (8) but it reduces the total idle
3878 * time.
3879 *
3880 * [XXX more?]
3881 *
3882 *
3883 * CGROUPS
3884 *
3885 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3886 *
3887 * s_k,i
3888 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3889 * S_k
3890 *
3891 * Where
3892 *
3893 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3894 *
3895 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3896 *
3897 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3898 * property.
3899 *
3900 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3901 * rewrite all of this once again.]
3902 */
bf0f6f24 3903
ed387b78
HS
3904static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3905
ddcdf6e7 3906#define LBF_ALL_PINNED 0x01
367456c7 3907#define LBF_NEED_BREAK 0x02
88b8dac0 3908#define LBF_SOME_PINNED 0x04
ddcdf6e7
PZ
3909
3910struct lb_env {
3911 struct sched_domain *sd;
3912
ddcdf6e7 3913 struct rq *src_rq;
85c1e7da 3914 int src_cpu;
ddcdf6e7
PZ
3915
3916 int dst_cpu;
3917 struct rq *dst_rq;
3918
88b8dac0
SV
3919 struct cpumask *dst_grpmask;
3920 int new_dst_cpu;
ddcdf6e7 3921 enum cpu_idle_type idle;
bd939f45 3922 long imbalance;
b9403130
MW
3923 /* The set of CPUs under consideration for load-balancing */
3924 struct cpumask *cpus;
3925
ddcdf6e7 3926 unsigned int flags;
367456c7
PZ
3927
3928 unsigned int loop;
3929 unsigned int loop_break;
3930 unsigned int loop_max;
ddcdf6e7
PZ
3931};
3932
1e3c88bd 3933/*
ddcdf6e7 3934 * move_task - move a task from one runqueue to another runqueue.
1e3c88bd
PZ
3935 * Both runqueues must be locked.
3936 */
ddcdf6e7 3937static void move_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 3938{
ddcdf6e7
PZ
3939 deactivate_task(env->src_rq, p, 0);
3940 set_task_cpu(p, env->dst_cpu);
3941 activate_task(env->dst_rq, p, 0);
3942 check_preempt_curr(env->dst_rq, p, 0);
1e3c88bd
PZ
3943}
3944
029632fb
PZ
3945/*
3946 * Is this task likely cache-hot:
3947 */
3948static int
3949task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3950{
3951 s64 delta;
3952
3953 if (p->sched_class != &fair_sched_class)
3954 return 0;
3955
3956 if (unlikely(p->policy == SCHED_IDLE))
3957 return 0;
3958
3959 /*
3960 * Buddy candidates are cache hot:
3961 */
3962 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3963 (&p->se == cfs_rq_of(&p->se)->next ||
3964 &p->se == cfs_rq_of(&p->se)->last))
3965 return 1;
3966
3967 if (sysctl_sched_migration_cost == -1)
3968 return 1;
3969 if (sysctl_sched_migration_cost == 0)
3970 return 0;
3971
3972 delta = now - p->se.exec_start;
3973
3974 return delta < (s64)sysctl_sched_migration_cost;
3975}
3976
1e3c88bd
PZ
3977/*
3978 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3979 */
3980static
8e45cb54 3981int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
3982{
3983 int tsk_cache_hot = 0;
3984 /*
3985 * We do not migrate tasks that are:
d3198084 3986 * 1) throttled_lb_pair, or
1e3c88bd 3987 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
3988 * 3) running (obviously), or
3989 * 4) are cache-hot on their current CPU.
1e3c88bd 3990 */
d3198084
JK
3991 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3992 return 0;
3993
ddcdf6e7 3994 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 3995 int cpu;
88b8dac0 3996
41acab88 3997 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0
SV
3998
3999 /*
4000 * Remember if this task can be migrated to any other cpu in
4001 * our sched_group. We may want to revisit it if we couldn't
4002 * meet load balance goals by pulling other tasks on src_cpu.
4003 *
4004 * Also avoid computing new_dst_cpu if we have already computed
4005 * one in current iteration.
4006 */
4007 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
4008 return 0;
4009
e02e60c1
JK
4010 /* Prevent to re-select dst_cpu via env's cpus */
4011 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4012 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
4013 env->flags |= LBF_SOME_PINNED;
4014 env->new_dst_cpu = cpu;
4015 break;
4016 }
88b8dac0 4017 }
e02e60c1 4018
1e3c88bd
PZ
4019 return 0;
4020 }
88b8dac0
SV
4021
4022 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 4023 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 4024
ddcdf6e7 4025 if (task_running(env->src_rq, p)) {
41acab88 4026 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
4027 return 0;
4028 }
4029
4030 /*
4031 * Aggressive migration if:
4032 * 1) task is cache cold, or
4033 * 2) too many balance attempts have failed.
4034 */
4035
78becc27 4036 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
1e3c88bd 4037 if (!tsk_cache_hot ||
8e45cb54 4038 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4e2dcb73 4039
1e3c88bd 4040 if (tsk_cache_hot) {
8e45cb54 4041 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 4042 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd 4043 }
4e2dcb73 4044
1e3c88bd
PZ
4045 return 1;
4046 }
4047
4e2dcb73
ZH
4048 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4049 return 0;
1e3c88bd
PZ
4050}
4051
897c395f
PZ
4052/*
4053 * move_one_task tries to move exactly one task from busiest to this_rq, as
4054 * part of active balancing operations within "domain".
4055 * Returns 1 if successful and 0 otherwise.
4056 *
4057 * Called with both runqueues locked.
4058 */
8e45cb54 4059static int move_one_task(struct lb_env *env)
897c395f
PZ
4060{
4061 struct task_struct *p, *n;
897c395f 4062
367456c7 4063 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
4064 if (!can_migrate_task(p, env))
4065 continue;
897c395f 4066
367456c7
PZ
4067 move_task(p, env);
4068 /*
4069 * Right now, this is only the second place move_task()
4070 * is called, so we can safely collect move_task()
4071 * stats here rather than inside move_task().
4072 */
4073 schedstat_inc(env->sd, lb_gained[env->idle]);
4074 return 1;
897c395f 4075 }
897c395f
PZ
4076 return 0;
4077}
4078
367456c7
PZ
4079static unsigned long task_h_load(struct task_struct *p);
4080
eb95308e
PZ
4081static const unsigned int sched_nr_migrate_break = 32;
4082
5d6523eb 4083/*
bd939f45 4084 * move_tasks tries to move up to imbalance weighted load from busiest to
5d6523eb
PZ
4085 * this_rq, as part of a balancing operation within domain "sd".
4086 * Returns 1 if successful and 0 otherwise.
4087 *
4088 * Called with both runqueues locked.
4089 */
4090static int move_tasks(struct lb_env *env)
1e3c88bd 4091{
5d6523eb
PZ
4092 struct list_head *tasks = &env->src_rq->cfs_tasks;
4093 struct task_struct *p;
367456c7
PZ
4094 unsigned long load;
4095 int pulled = 0;
1e3c88bd 4096
bd939f45 4097 if (env->imbalance <= 0)
5d6523eb 4098 return 0;
1e3c88bd 4099
5d6523eb
PZ
4100 while (!list_empty(tasks)) {
4101 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 4102
367456c7
PZ
4103 env->loop++;
4104 /* We've more or less seen every task there is, call it quits */
5d6523eb 4105 if (env->loop > env->loop_max)
367456c7 4106 break;
5d6523eb
PZ
4107
4108 /* take a breather every nr_migrate tasks */
367456c7 4109 if (env->loop > env->loop_break) {
eb95308e 4110 env->loop_break += sched_nr_migrate_break;
8e45cb54 4111 env->flags |= LBF_NEED_BREAK;
ee00e66f 4112 break;
a195f004 4113 }
1e3c88bd 4114
d3198084 4115 if (!can_migrate_task(p, env))
367456c7
PZ
4116 goto next;
4117
4118 load = task_h_load(p);
5d6523eb 4119
eb95308e 4120 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
4121 goto next;
4122
bd939f45 4123 if ((load / 2) > env->imbalance)
367456c7 4124 goto next;
1e3c88bd 4125
ddcdf6e7 4126 move_task(p, env);
ee00e66f 4127 pulled++;
bd939f45 4128 env->imbalance -= load;
1e3c88bd
PZ
4129
4130#ifdef CONFIG_PREEMPT
ee00e66f
PZ
4131 /*
4132 * NEWIDLE balancing is a source of latency, so preemptible
4133 * kernels will stop after the first task is pulled to minimize
4134 * the critical section.
4135 */
5d6523eb 4136 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 4137 break;
1e3c88bd
PZ
4138#endif
4139
ee00e66f
PZ
4140 /*
4141 * We only want to steal up to the prescribed amount of
4142 * weighted load.
4143 */
bd939f45 4144 if (env->imbalance <= 0)
ee00e66f 4145 break;
367456c7
PZ
4146
4147 continue;
4148next:
5d6523eb 4149 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 4150 }
5d6523eb 4151
1e3c88bd 4152 /*
ddcdf6e7
PZ
4153 * Right now, this is one of only two places move_task() is called,
4154 * so we can safely collect move_task() stats here rather than
4155 * inside move_task().
1e3c88bd 4156 */
8e45cb54 4157 schedstat_add(env->sd, lb_gained[env->idle], pulled);
1e3c88bd 4158
5d6523eb 4159 return pulled;
1e3c88bd
PZ
4160}
4161
230059de 4162#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
4163/*
4164 * update tg->load_weight by folding this cpu's load_avg
4165 */
48a16753 4166static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
9e3081ca 4167{
48a16753
PT
4168 struct sched_entity *se = tg->se[cpu];
4169 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
9e3081ca 4170
48a16753
PT
4171 /* throttled entities do not contribute to load */
4172 if (throttled_hierarchy(cfs_rq))
4173 return;
9e3081ca 4174
aff3e498 4175 update_cfs_rq_blocked_load(cfs_rq, 1);
9e3081ca 4176
82958366
PT
4177 if (se) {
4178 update_entity_load_avg(se, 1);
4179 /*
4180 * We pivot on our runnable average having decayed to zero for
4181 * list removal. This generally implies that all our children
4182 * have also been removed (modulo rounding error or bandwidth
4183 * control); however, such cases are rare and we can fix these
4184 * at enqueue.
4185 *
4186 * TODO: fix up out-of-order children on enqueue.
4187 */
4188 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4189 list_del_leaf_cfs_rq(cfs_rq);
4190 } else {
48a16753 4191 struct rq *rq = rq_of(cfs_rq);
82958366
PT
4192 update_rq_runnable_avg(rq, rq->nr_running);
4193 }
9e3081ca
PZ
4194}
4195
48a16753 4196static void update_blocked_averages(int cpu)
9e3081ca 4197{
9e3081ca 4198 struct rq *rq = cpu_rq(cpu);
48a16753
PT
4199 struct cfs_rq *cfs_rq;
4200 unsigned long flags;
9e3081ca 4201
48a16753
PT
4202 raw_spin_lock_irqsave(&rq->lock, flags);
4203 update_rq_clock(rq);
9763b67f
PZ
4204 /*
4205 * Iterates the task_group tree in a bottom up fashion, see
4206 * list_add_leaf_cfs_rq() for details.
4207 */
64660c86 4208 for_each_leaf_cfs_rq(rq, cfs_rq) {
48a16753
PT
4209 /*
4210 * Note: We may want to consider periodically releasing
4211 * rq->lock about these updates so that creating many task
4212 * groups does not result in continually extending hold time.
4213 */
4214 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
64660c86 4215 }
48a16753
PT
4216
4217 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
4218}
4219
9763b67f 4220/*
68520796 4221 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
4222 * This needs to be done in a top-down fashion because the load of a child
4223 * group is a fraction of its parents load.
4224 */
68520796 4225static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 4226{
68520796
VD
4227 struct rq *rq = rq_of(cfs_rq);
4228 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 4229 unsigned long now = jiffies;
68520796 4230 unsigned long load;
a35b6466 4231
68520796 4232 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
4233 return;
4234
68520796
VD
4235 cfs_rq->h_load_next = NULL;
4236 for_each_sched_entity(se) {
4237 cfs_rq = cfs_rq_of(se);
4238 cfs_rq->h_load_next = se;
4239 if (cfs_rq->last_h_load_update == now)
4240 break;
4241 }
a35b6466 4242
68520796
VD
4243 if (!se) {
4244 cfs_rq->h_load = rq->avg.load_avg_contrib;
4245 cfs_rq->last_h_load_update = now;
4246 }
4247
4248 while ((se = cfs_rq->h_load_next) != NULL) {
4249 load = cfs_rq->h_load;
4250 load = div64_ul(load * se->avg.load_avg_contrib,
4251 cfs_rq->runnable_load_avg + 1);
4252 cfs_rq = group_cfs_rq(se);
4253 cfs_rq->h_load = load;
4254 cfs_rq->last_h_load_update = now;
4255 }
9763b67f
PZ
4256}
4257
367456c7 4258static unsigned long task_h_load(struct task_struct *p)
230059de 4259{
367456c7 4260 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 4261
68520796 4262 update_cfs_rq_h_load(cfs_rq);
a003a25b
AS
4263 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4264 cfs_rq->runnable_load_avg + 1);
230059de
PZ
4265}
4266#else
48a16753 4267static inline void update_blocked_averages(int cpu)
9e3081ca
PZ
4268{
4269}
4270
367456c7 4271static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 4272{
a003a25b 4273 return p->se.avg.load_avg_contrib;
1e3c88bd 4274}
230059de 4275#endif
1e3c88bd 4276
1e3c88bd
PZ
4277/********** Helpers for find_busiest_group ************************/
4278/*
4279 * sd_lb_stats - Structure to store the statistics of a sched_domain
4280 * during load balancing.
4281 */
4282struct sd_lb_stats {
4283 struct sched_group *busiest; /* Busiest group in this sd */
4284 struct sched_group *this; /* Local group in this sd */
4285 unsigned long total_load; /* Total load of all groups in sd */
4286 unsigned long total_pwr; /* Total power of all groups in sd */
4287 unsigned long avg_load; /* Average load across all groups in sd */
4288
4289 /** Statistics of this group */
4290 unsigned long this_load;
4291 unsigned long this_load_per_task;
4292 unsigned long this_nr_running;
fab47622 4293 unsigned long this_has_capacity;
aae6d3dd 4294 unsigned int this_idle_cpus;
1e3c88bd
PZ
4295
4296 /* Statistics of the busiest group */
aae6d3dd 4297 unsigned int busiest_idle_cpus;
1e3c88bd
PZ
4298 unsigned long max_load;
4299 unsigned long busiest_load_per_task;
4300 unsigned long busiest_nr_running;
dd5feea1 4301 unsigned long busiest_group_capacity;
fab47622 4302 unsigned long busiest_has_capacity;
aae6d3dd 4303 unsigned int busiest_group_weight;
1e3c88bd
PZ
4304
4305 int group_imb; /* Is there imbalance in this sd */
1e3c88bd
PZ
4306};
4307
4308/*
4309 * sg_lb_stats - stats of a sched_group required for load_balancing
4310 */
4311struct sg_lb_stats {
4312 unsigned long avg_load; /*Avg load across the CPUs of the group */
4313 unsigned long group_load; /* Total load over the CPUs of the group */
4314 unsigned long sum_nr_running; /* Nr tasks running in the group */
4315 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4316 unsigned long group_capacity;
aae6d3dd
SS
4317 unsigned long idle_cpus;
4318 unsigned long group_weight;
1e3c88bd 4319 int group_imb; /* Is there an imbalance in the group ? */
fab47622 4320 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
4321};
4322
1e3c88bd
PZ
4323/**
4324 * get_sd_load_idx - Obtain the load index for a given sched domain.
4325 * @sd: The sched_domain whose load_idx is to be obtained.
4326 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4327 */
4328static inline int get_sd_load_idx(struct sched_domain *sd,
4329 enum cpu_idle_type idle)
4330{
4331 int load_idx;
4332
4333 switch (idle) {
4334 case CPU_NOT_IDLE:
4335 load_idx = sd->busy_idx;
4336 break;
4337
4338 case CPU_NEWLY_IDLE:
4339 load_idx = sd->newidle_idx;
4340 break;
4341 default:
4342 load_idx = sd->idle_idx;
4343 break;
4344 }
4345
4346 return load_idx;
4347}
4348
15f803c9 4349static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
1e3c88bd 4350{
1399fa78 4351 return SCHED_POWER_SCALE;
1e3c88bd
PZ
4352}
4353
4354unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4355{
4356 return default_scale_freq_power(sd, cpu);
4357}
4358
15f803c9 4359static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
1e3c88bd 4360{
669c55e9 4361 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
4362 unsigned long smt_gain = sd->smt_gain;
4363
4364 smt_gain /= weight;
4365
4366 return smt_gain;
4367}
4368
4369unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4370{
4371 return default_scale_smt_power(sd, cpu);
4372}
4373
15f803c9 4374static unsigned long scale_rt_power(int cpu)
1e3c88bd
PZ
4375{
4376 struct rq *rq = cpu_rq(cpu);
b654f7de 4377 u64 total, available, age_stamp, avg;
1e3c88bd 4378
b654f7de
PZ
4379 /*
4380 * Since we're reading these variables without serialization make sure
4381 * we read them once before doing sanity checks on them.
4382 */
4383 age_stamp = ACCESS_ONCE(rq->age_stamp);
4384 avg = ACCESS_ONCE(rq->rt_avg);
4385
78becc27 4386 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
aa483808 4387
b654f7de 4388 if (unlikely(total < avg)) {
aa483808
VP
4389 /* Ensures that power won't end up being negative */
4390 available = 0;
4391 } else {
b654f7de 4392 available = total - avg;
aa483808 4393 }
1e3c88bd 4394
1399fa78
NR
4395 if (unlikely((s64)total < SCHED_POWER_SCALE))
4396 total = SCHED_POWER_SCALE;
1e3c88bd 4397
1399fa78 4398 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4399
4400 return div_u64(available, total);
4401}
4402
4403static void update_cpu_power(struct sched_domain *sd, int cpu)
4404{
669c55e9 4405 unsigned long weight = sd->span_weight;
1399fa78 4406 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
4407 struct sched_group *sdg = sd->groups;
4408
1e3c88bd
PZ
4409 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4410 if (sched_feat(ARCH_POWER))
4411 power *= arch_scale_smt_power(sd, cpu);
4412 else
4413 power *= default_scale_smt_power(sd, cpu);
4414
1399fa78 4415 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4416 }
4417
9c3f75cb 4418 sdg->sgp->power_orig = power;
9d5efe05
SV
4419
4420 if (sched_feat(ARCH_POWER))
4421 power *= arch_scale_freq_power(sd, cpu);
4422 else
4423 power *= default_scale_freq_power(sd, cpu);
4424
1399fa78 4425 power >>= SCHED_POWER_SHIFT;
9d5efe05 4426
1e3c88bd 4427 power *= scale_rt_power(cpu);
1399fa78 4428 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4429
4430 if (!power)
4431 power = 1;
4432
e51fd5e2 4433 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 4434 sdg->sgp->power = power;
1e3c88bd
PZ
4435}
4436
029632fb 4437void update_group_power(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
4438{
4439 struct sched_domain *child = sd->child;
4440 struct sched_group *group, *sdg = sd->groups;
4441 unsigned long power;
4ec4412e
VG
4442 unsigned long interval;
4443
4444 interval = msecs_to_jiffies(sd->balance_interval);
4445 interval = clamp(interval, 1UL, max_load_balance_interval);
4446 sdg->sgp->next_update = jiffies + interval;
1e3c88bd
PZ
4447
4448 if (!child) {
4449 update_cpu_power(sd, cpu);
4450 return;
4451 }
4452
4453 power = 0;
4454
74a5ce20
PZ
4455 if (child->flags & SD_OVERLAP) {
4456 /*
4457 * SD_OVERLAP domains cannot assume that child groups
4458 * span the current group.
4459 */
4460
4461 for_each_cpu(cpu, sched_group_cpus(sdg))
4462 power += power_of(cpu);
4463 } else {
4464 /*
4465 * !SD_OVERLAP domains can assume that child groups
4466 * span the current group.
4467 */
4468
4469 group = child->groups;
4470 do {
4471 power += group->sgp->power;
4472 group = group->next;
4473 } while (group != child->groups);
4474 }
1e3c88bd 4475
c3decf0d 4476 sdg->sgp->power_orig = sdg->sgp->power = power;
1e3c88bd
PZ
4477}
4478
9d5efe05
SV
4479/*
4480 * Try and fix up capacity for tiny siblings, this is needed when
4481 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4482 * which on its own isn't powerful enough.
4483 *
4484 * See update_sd_pick_busiest() and check_asym_packing().
4485 */
4486static inline int
4487fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4488{
4489 /*
1399fa78 4490 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 4491 */
a6c75f2f 4492 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
4493 return 0;
4494
4495 /*
4496 * If ~90% of the cpu_power is still there, we're good.
4497 */
9c3f75cb 4498 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
4499 return 1;
4500
4501 return 0;
4502}
4503
1e3c88bd
PZ
4504/**
4505 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 4506 * @env: The load balancing environment.
1e3c88bd 4507 * @group: sched_group whose statistics are to be updated.
1e3c88bd 4508 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 4509 * @local_group: Does group contain this_cpu.
1e3c88bd
PZ
4510 * @balance: Should we balance.
4511 * @sgs: variable to hold the statistics for this group.
4512 */
bd939f45
PZ
4513static inline void update_sg_lb_stats(struct lb_env *env,
4514 struct sched_group *group, int load_idx,
b9403130 4515 int local_group, int *balance, struct sg_lb_stats *sgs)
1e3c88bd 4516{
e44bc5c5
PZ
4517 unsigned long nr_running, max_nr_running, min_nr_running;
4518 unsigned long load, max_cpu_load, min_cpu_load;
04f733b4 4519 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 4520 unsigned long avg_load_per_task = 0;
bd939f45 4521 int i;
1e3c88bd 4522
871e35bc 4523 if (local_group)
c1174876 4524 balance_cpu = group_balance_cpu(group);
1e3c88bd
PZ
4525
4526 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
4527 max_cpu_load = 0;
4528 min_cpu_load = ~0UL;
2582f0eb 4529 max_nr_running = 0;
e44bc5c5 4530 min_nr_running = ~0UL;
1e3c88bd 4531
b9403130 4532 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
4533 struct rq *rq = cpu_rq(i);
4534
e44bc5c5
PZ
4535 nr_running = rq->nr_running;
4536
1e3c88bd
PZ
4537 /* Bias balancing toward cpus of our domain */
4538 if (local_group) {
c1174876
PZ
4539 if (idle_cpu(i) && !first_idle_cpu &&
4540 cpumask_test_cpu(i, sched_group_mask(group))) {
04f733b4 4541 first_idle_cpu = 1;
1e3c88bd
PZ
4542 balance_cpu = i;
4543 }
04f733b4
PZ
4544
4545 load = target_load(i, load_idx);
1e3c88bd
PZ
4546 } else {
4547 load = source_load(i, load_idx);
e44bc5c5 4548 if (load > max_cpu_load)
1e3c88bd
PZ
4549 max_cpu_load = load;
4550 if (min_cpu_load > load)
4551 min_cpu_load = load;
e44bc5c5
PZ
4552
4553 if (nr_running > max_nr_running)
4554 max_nr_running = nr_running;
4555 if (min_nr_running > nr_running)
4556 min_nr_running = nr_running;
1e3c88bd
PZ
4557 }
4558
4559 sgs->group_load += load;
e44bc5c5 4560 sgs->sum_nr_running += nr_running;
1e3c88bd 4561 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
4562 if (idle_cpu(i))
4563 sgs->idle_cpus++;
1e3c88bd
PZ
4564 }
4565
4566 /*
4567 * First idle cpu or the first cpu(busiest) in this sched group
4568 * is eligible for doing load balancing at this and above
4569 * domains. In the newly idle case, we will allow all the cpu's
4570 * to do the newly idle load balance.
4571 */
4ec4412e 4572 if (local_group) {
bd939f45 4573 if (env->idle != CPU_NEWLY_IDLE) {
04f733b4 4574 if (balance_cpu != env->dst_cpu) {
4ec4412e
VG
4575 *balance = 0;
4576 return;
4577 }
bd939f45 4578 update_group_power(env->sd, env->dst_cpu);
4ec4412e 4579 } else if (time_after_eq(jiffies, group->sgp->next_update))
bd939f45 4580 update_group_power(env->sd, env->dst_cpu);
1e3c88bd
PZ
4581 }
4582
4583 /* Adjust by relative CPU power of the group */
9c3f75cb 4584 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
1e3c88bd 4585
1e3c88bd
PZ
4586 /*
4587 * Consider the group unbalanced when the imbalance is larger
866ab43e 4588 * than the average weight of a task.
1e3c88bd
PZ
4589 *
4590 * APZ: with cgroup the avg task weight can vary wildly and
4591 * might not be a suitable number - should we keep a
4592 * normalized nr_running number somewhere that negates
4593 * the hierarchy?
4594 */
dd5feea1
SS
4595 if (sgs->sum_nr_running)
4596 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 4597
e44bc5c5
PZ
4598 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4599 (max_nr_running - min_nr_running) > 1)
1e3c88bd
PZ
4600 sgs->group_imb = 1;
4601
9c3f75cb 4602 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
1399fa78 4603 SCHED_POWER_SCALE);
9d5efe05 4604 if (!sgs->group_capacity)
bd939f45 4605 sgs->group_capacity = fix_small_capacity(env->sd, group);
aae6d3dd 4606 sgs->group_weight = group->group_weight;
fab47622
NR
4607
4608 if (sgs->group_capacity > sgs->sum_nr_running)
4609 sgs->group_has_capacity = 1;
1e3c88bd
PZ
4610}
4611
532cb4c4
MN
4612/**
4613 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 4614 * @env: The load balancing environment.
532cb4c4
MN
4615 * @sds: sched_domain statistics
4616 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 4617 * @sgs: sched_group statistics
532cb4c4
MN
4618 *
4619 * Determine if @sg is a busier group than the previously selected
4620 * busiest group.
4621 */
bd939f45 4622static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
4623 struct sd_lb_stats *sds,
4624 struct sched_group *sg,
bd939f45 4625 struct sg_lb_stats *sgs)
532cb4c4
MN
4626{
4627 if (sgs->avg_load <= sds->max_load)
4628 return false;
4629
4630 if (sgs->sum_nr_running > sgs->group_capacity)
4631 return true;
4632
4633 if (sgs->group_imb)
4634 return true;
4635
4636 /*
4637 * ASYM_PACKING needs to move all the work to the lowest
4638 * numbered CPUs in the group, therefore mark all groups
4639 * higher than ourself as busy.
4640 */
bd939f45
PZ
4641 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4642 env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
4643 if (!sds->busiest)
4644 return true;
4645
4646 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4647 return true;
4648 }
4649
4650 return false;
4651}
4652
1e3c88bd 4653/**
461819ac 4654 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 4655 * @env: The load balancing environment.
1e3c88bd
PZ
4656 * @balance: Should we balance.
4657 * @sds: variable to hold the statistics for this sched_domain.
4658 */
bd939f45 4659static inline void update_sd_lb_stats(struct lb_env *env,
b9403130 4660 int *balance, struct sd_lb_stats *sds)
1e3c88bd 4661{
bd939f45
PZ
4662 struct sched_domain *child = env->sd->child;
4663 struct sched_group *sg = env->sd->groups;
1e3c88bd
PZ
4664 struct sg_lb_stats sgs;
4665 int load_idx, prefer_sibling = 0;
4666
4667 if (child && child->flags & SD_PREFER_SIBLING)
4668 prefer_sibling = 1;
4669
bd939f45 4670 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
4671
4672 do {
4673 int local_group;
4674
bd939f45 4675 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
1e3c88bd 4676 memset(&sgs, 0, sizeof(sgs));
b9403130 4677 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
1e3c88bd 4678
8f190fb3 4679 if (local_group && !(*balance))
1e3c88bd
PZ
4680 return;
4681
4682 sds->total_load += sgs.group_load;
9c3f75cb 4683 sds->total_pwr += sg->sgp->power;
1e3c88bd
PZ
4684
4685 /*
4686 * In case the child domain prefers tasks go to siblings
532cb4c4 4687 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
4688 * and move all the excess tasks away. We lower the capacity
4689 * of a group only if the local group has the capacity to fit
4690 * these excess tasks, i.e. nr_running < group_capacity. The
4691 * extra check prevents the case where you always pull from the
4692 * heaviest group when it is already under-utilized (possible
4693 * with a large weight task outweighs the tasks on the system).
1e3c88bd 4694 */
75dd321d 4695 if (prefer_sibling && !local_group && sds->this_has_capacity)
1e3c88bd
PZ
4696 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4697
4698 if (local_group) {
4699 sds->this_load = sgs.avg_load;
532cb4c4 4700 sds->this = sg;
1e3c88bd
PZ
4701 sds->this_nr_running = sgs.sum_nr_running;
4702 sds->this_load_per_task = sgs.sum_weighted_load;
fab47622 4703 sds->this_has_capacity = sgs.group_has_capacity;
aae6d3dd 4704 sds->this_idle_cpus = sgs.idle_cpus;
bd939f45 4705 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
1e3c88bd 4706 sds->max_load = sgs.avg_load;
532cb4c4 4707 sds->busiest = sg;
1e3c88bd 4708 sds->busiest_nr_running = sgs.sum_nr_running;
aae6d3dd 4709 sds->busiest_idle_cpus = sgs.idle_cpus;
dd5feea1 4710 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd 4711 sds->busiest_load_per_task = sgs.sum_weighted_load;
fab47622 4712 sds->busiest_has_capacity = sgs.group_has_capacity;
aae6d3dd 4713 sds->busiest_group_weight = sgs.group_weight;
1e3c88bd
PZ
4714 sds->group_imb = sgs.group_imb;
4715 }
4716
532cb4c4 4717 sg = sg->next;
bd939f45 4718 } while (sg != env->sd->groups);
532cb4c4
MN
4719}
4720
532cb4c4
MN
4721/**
4722 * check_asym_packing - Check to see if the group is packed into the
4723 * sched doman.
4724 *
4725 * This is primarily intended to used at the sibling level. Some
4726 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4727 * case of POWER7, it can move to lower SMT modes only when higher
4728 * threads are idle. When in lower SMT modes, the threads will
4729 * perform better since they share less core resources. Hence when we
4730 * have idle threads, we want them to be the higher ones.
4731 *
4732 * This packing function is run on idle threads. It checks to see if
4733 * the busiest CPU in this domain (core in the P7 case) has a higher
4734 * CPU number than the packing function is being run on. Here we are
4735 * assuming lower CPU number will be equivalent to lower a SMT thread
4736 * number.
4737 *
b6b12294
MN
4738 * Returns 1 when packing is required and a task should be moved to
4739 * this CPU. The amount of the imbalance is returned in *imbalance.
4740 *
cd96891d 4741 * @env: The load balancing environment.
532cb4c4 4742 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 4743 */
bd939f45 4744static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
4745{
4746 int busiest_cpu;
4747
bd939f45 4748 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
4749 return 0;
4750
4751 if (!sds->busiest)
4752 return 0;
4753
4754 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 4755 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
4756 return 0;
4757
bd939f45
PZ
4758 env->imbalance = DIV_ROUND_CLOSEST(
4759 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4760
532cb4c4 4761 return 1;
1e3c88bd
PZ
4762}
4763
4764/**
4765 * fix_small_imbalance - Calculate the minor imbalance that exists
4766 * amongst the groups of a sched_domain, during
4767 * load balancing.
cd96891d 4768 * @env: The load balancing environment.
1e3c88bd 4769 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4770 */
bd939f45
PZ
4771static inline
4772void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd
PZ
4773{
4774 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4775 unsigned int imbn = 2;
dd5feea1 4776 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
4777
4778 if (sds->this_nr_running) {
4779 sds->this_load_per_task /= sds->this_nr_running;
4780 if (sds->busiest_load_per_task >
4781 sds->this_load_per_task)
4782 imbn = 1;
bd939f45 4783 } else {
1e3c88bd 4784 sds->this_load_per_task =
bd939f45
PZ
4785 cpu_avg_load_per_task(env->dst_cpu);
4786 }
1e3c88bd 4787
dd5feea1 4788 scaled_busy_load_per_task = sds->busiest_load_per_task
1399fa78 4789 * SCHED_POWER_SCALE;
9c3f75cb 4790 scaled_busy_load_per_task /= sds->busiest->sgp->power;
dd5feea1
SS
4791
4792 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4793 (scaled_busy_load_per_task * imbn)) {
bd939f45 4794 env->imbalance = sds->busiest_load_per_task;
1e3c88bd
PZ
4795 return;
4796 }
4797
4798 /*
4799 * OK, we don't have enough imbalance to justify moving tasks,
4800 * however we may be able to increase total CPU power used by
4801 * moving them.
4802 */
4803
9c3f75cb 4804 pwr_now += sds->busiest->sgp->power *
1e3c88bd 4805 min(sds->busiest_load_per_task, sds->max_load);
9c3f75cb 4806 pwr_now += sds->this->sgp->power *
1e3c88bd 4807 min(sds->this_load_per_task, sds->this_load);
1399fa78 4808 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4809
4810 /* Amount of load we'd subtract */
1399fa78 4811 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb 4812 sds->busiest->sgp->power;
1e3c88bd 4813 if (sds->max_load > tmp)
9c3f75cb 4814 pwr_move += sds->busiest->sgp->power *
1e3c88bd
PZ
4815 min(sds->busiest_load_per_task, sds->max_load - tmp);
4816
4817 /* Amount of load we'd add */
9c3f75cb 4818 if (sds->max_load * sds->busiest->sgp->power <
1399fa78 4819 sds->busiest_load_per_task * SCHED_POWER_SCALE)
9c3f75cb
PZ
4820 tmp = (sds->max_load * sds->busiest->sgp->power) /
4821 sds->this->sgp->power;
1e3c88bd 4822 else
1399fa78 4823 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb
PZ
4824 sds->this->sgp->power;
4825 pwr_move += sds->this->sgp->power *
1e3c88bd 4826 min(sds->this_load_per_task, sds->this_load + tmp);
1399fa78 4827 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4828
4829 /* Move if we gain throughput */
4830 if (pwr_move > pwr_now)
bd939f45 4831 env->imbalance = sds->busiest_load_per_task;
1e3c88bd
PZ
4832}
4833
4834/**
4835 * calculate_imbalance - Calculate the amount of imbalance present within the
4836 * groups of a given sched_domain during load balance.
bd939f45 4837 * @env: load balance environment
1e3c88bd 4838 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4839 */
bd939f45 4840static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 4841{
dd5feea1
SS
4842 unsigned long max_pull, load_above_capacity = ~0UL;
4843
4844 sds->busiest_load_per_task /= sds->busiest_nr_running;
4845 if (sds->group_imb) {
4846 sds->busiest_load_per_task =
4847 min(sds->busiest_load_per_task, sds->avg_load);
4848 }
4849
1e3c88bd
PZ
4850 /*
4851 * In the presence of smp nice balancing, certain scenarios can have
4852 * max load less than avg load(as we skip the groups at or below
4853 * its cpu_power, while calculating max_load..)
4854 */
4855 if (sds->max_load < sds->avg_load) {
bd939f45
PZ
4856 env->imbalance = 0;
4857 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
4858 }
4859
dd5feea1
SS
4860 if (!sds->group_imb) {
4861 /*
4862 * Don't want to pull so many tasks that a group would go idle.
4863 */
4864 load_above_capacity = (sds->busiest_nr_running -
4865 sds->busiest_group_capacity);
4866
1399fa78 4867 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
dd5feea1 4868
9c3f75cb 4869 load_above_capacity /= sds->busiest->sgp->power;
dd5feea1
SS
4870 }
4871
4872 /*
4873 * We're trying to get all the cpus to the average_load, so we don't
4874 * want to push ourselves above the average load, nor do we wish to
4875 * reduce the max loaded cpu below the average load. At the same time,
4876 * we also don't want to reduce the group load below the group capacity
4877 * (so that we can implement power-savings policies etc). Thus we look
4878 * for the minimum possible imbalance.
4879 * Be careful of negative numbers as they'll appear as very large values
4880 * with unsigned longs.
4881 */
4882 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
4883
4884 /* How much load to actually move to equalise the imbalance */
bd939f45 4885 env->imbalance = min(max_pull * sds->busiest->sgp->power,
9c3f75cb 4886 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
1399fa78 4887 / SCHED_POWER_SCALE;
1e3c88bd
PZ
4888
4889 /*
4890 * if *imbalance is less than the average load per runnable task
25985edc 4891 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
4892 * a think about bumping its value to force at least one task to be
4893 * moved
4894 */
bd939f45
PZ
4895 if (env->imbalance < sds->busiest_load_per_task)
4896 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
4897
4898}
fab47622 4899
1e3c88bd
PZ
4900/******* find_busiest_group() helpers end here *********************/
4901
4902/**
4903 * find_busiest_group - Returns the busiest group within the sched_domain
4904 * if there is an imbalance. If there isn't an imbalance, and
4905 * the user has opted for power-savings, it returns a group whose
4906 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4907 * such a group exists.
4908 *
4909 * Also calculates the amount of weighted load which should be moved
4910 * to restore balance.
4911 *
cd96891d 4912 * @env: The load balancing environment.
1e3c88bd
PZ
4913 * @balance: Pointer to a variable indicating if this_cpu
4914 * is the appropriate cpu to perform load balancing at this_level.
4915 *
4916 * Returns: - the busiest group if imbalance exists.
4917 * - If no imbalance and user has opted for power-savings balance,
4918 * return the least loaded group whose CPUs can be
4919 * put to idle by rebalancing its tasks onto our group.
4920 */
4921static struct sched_group *
b9403130 4922find_busiest_group(struct lb_env *env, int *balance)
1e3c88bd
PZ
4923{
4924 struct sd_lb_stats sds;
4925
4926 memset(&sds, 0, sizeof(sds));
4927
4928 /*
4929 * Compute the various statistics relavent for load balancing at
4930 * this level.
4931 */
b9403130 4932 update_sd_lb_stats(env, balance, &sds);
1e3c88bd 4933
cc57aa8f
PZ
4934 /*
4935 * this_cpu is not the appropriate cpu to perform load balancing at
4936 * this level.
1e3c88bd 4937 */
8f190fb3 4938 if (!(*balance))
1e3c88bd
PZ
4939 goto ret;
4940
bd939f45
PZ
4941 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4942 check_asym_packing(env, &sds))
532cb4c4
MN
4943 return sds.busiest;
4944
cc57aa8f 4945 /* There is no busy sibling group to pull tasks from */
1e3c88bd
PZ
4946 if (!sds.busiest || sds.busiest_nr_running == 0)
4947 goto out_balanced;
4948
1399fa78 4949 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 4950
866ab43e
PZ
4951 /*
4952 * If the busiest group is imbalanced the below checks don't
4953 * work because they assumes all things are equal, which typically
4954 * isn't true due to cpus_allowed constraints and the like.
4955 */
4956 if (sds.group_imb)
4957 goto force_balance;
4958
cc57aa8f 4959 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
bd939f45 4960 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
fab47622
NR
4961 !sds.busiest_has_capacity)
4962 goto force_balance;
4963
cc57aa8f
PZ
4964 /*
4965 * If the local group is more busy than the selected busiest group
4966 * don't try and pull any tasks.
4967 */
1e3c88bd
PZ
4968 if (sds.this_load >= sds.max_load)
4969 goto out_balanced;
4970
cc57aa8f
PZ
4971 /*
4972 * Don't pull any tasks if this group is already above the domain
4973 * average load.
4974 */
1e3c88bd
PZ
4975 if (sds.this_load >= sds.avg_load)
4976 goto out_balanced;
4977
bd939f45 4978 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
4979 /*
4980 * This cpu is idle. If the busiest group load doesn't
4981 * have more tasks than the number of available cpu's and
4982 * there is no imbalance between this and busiest group
4983 * wrt to idle cpu's, it is balanced.
4984 */
c186fafe 4985 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
aae6d3dd
SS
4986 sds.busiest_nr_running <= sds.busiest_group_weight)
4987 goto out_balanced;
c186fafe
PZ
4988 } else {
4989 /*
4990 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4991 * imbalance_pct to be conservative.
4992 */
bd939f45 4993 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
c186fafe 4994 goto out_balanced;
aae6d3dd 4995 }
1e3c88bd 4996
fab47622 4997force_balance:
1e3c88bd 4998 /* Looks like there is an imbalance. Compute it */
bd939f45 4999 calculate_imbalance(env, &sds);
1e3c88bd
PZ
5000 return sds.busiest;
5001
5002out_balanced:
1e3c88bd 5003ret:
bd939f45 5004 env->imbalance = 0;
1e3c88bd
PZ
5005 return NULL;
5006}
5007
5008/*
5009 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5010 */
bd939f45 5011static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 5012 struct sched_group *group)
1e3c88bd
PZ
5013{
5014 struct rq *busiest = NULL, *rq;
5015 unsigned long max_load = 0;
5016 int i;
5017
5018 for_each_cpu(i, sched_group_cpus(group)) {
5019 unsigned long power = power_of(i);
1399fa78
NR
5020 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5021 SCHED_POWER_SCALE);
1e3c88bd
PZ
5022 unsigned long wl;
5023
9d5efe05 5024 if (!capacity)
bd939f45 5025 capacity = fix_small_capacity(env->sd, group);
9d5efe05 5026
b9403130 5027 if (!cpumask_test_cpu(i, env->cpus))
1e3c88bd
PZ
5028 continue;
5029
5030 rq = cpu_rq(i);
6e40f5bb 5031 wl = weighted_cpuload(i);
1e3c88bd 5032
6e40f5bb
TG
5033 /*
5034 * When comparing with imbalance, use weighted_cpuload()
5035 * which is not scaled with the cpu power.
5036 */
bd939f45 5037 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
5038 continue;
5039
6e40f5bb
TG
5040 /*
5041 * For the load comparisons with the other cpu's, consider
5042 * the weighted_cpuload() scaled with the cpu power, so that
5043 * the load can be moved away from the cpu that is potentially
5044 * running at a lower capacity.
5045 */
1399fa78 5046 wl = (wl * SCHED_POWER_SCALE) / power;
6e40f5bb 5047
1e3c88bd
PZ
5048 if (wl > max_load) {
5049 max_load = wl;
5050 busiest = rq;
5051 }
5052 }
5053
5054 return busiest;
5055}
5056
5057/*
5058 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5059 * so long as it is large enough.
5060 */
5061#define MAX_PINNED_INTERVAL 512
5062
5063/* Working cpumask for load_balance and load_balance_newidle. */
e6252c3e 5064DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
1e3c88bd 5065
bd939f45 5066static int need_active_balance(struct lb_env *env)
1af3ed3d 5067{
bd939f45
PZ
5068 struct sched_domain *sd = env->sd;
5069
5070 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
5071
5072 /*
5073 * ASYM_PACKING needs to force migrate tasks from busy but
5074 * higher numbered CPUs in order to pack all tasks in the
5075 * lowest numbered CPUs.
5076 */
bd939f45 5077 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 5078 return 1;
1af3ed3d
PZ
5079 }
5080
5081 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5082}
5083
969c7921
TH
5084static int active_load_balance_cpu_stop(void *data);
5085
1e3c88bd
PZ
5086/*
5087 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5088 * tasks if there is an imbalance.
5089 */
5090static int load_balance(int this_cpu, struct rq *this_rq,
5091 struct sched_domain *sd, enum cpu_idle_type idle,
5092 int *balance)
5093{
88b8dac0 5094 int ld_moved, cur_ld_moved, active_balance = 0;
1e3c88bd 5095 struct sched_group *group;
1e3c88bd
PZ
5096 struct rq *busiest;
5097 unsigned long flags;
e6252c3e 5098 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
1e3c88bd 5099
8e45cb54
PZ
5100 struct lb_env env = {
5101 .sd = sd,
ddcdf6e7
PZ
5102 .dst_cpu = this_cpu,
5103 .dst_rq = this_rq,
88b8dac0 5104 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 5105 .idle = idle,
eb95308e 5106 .loop_break = sched_nr_migrate_break,
b9403130 5107 .cpus = cpus,
8e45cb54
PZ
5108 };
5109
cfc03118
JK
5110 /*
5111 * For NEWLY_IDLE load_balancing, we don't need to consider
5112 * other cpus in our group
5113 */
e02e60c1 5114 if (idle == CPU_NEWLY_IDLE)
cfc03118 5115 env.dst_grpmask = NULL;
cfc03118 5116
1e3c88bd
PZ
5117 cpumask_copy(cpus, cpu_active_mask);
5118
1e3c88bd
PZ
5119 schedstat_inc(sd, lb_count[idle]);
5120
5121redo:
b9403130 5122 group = find_busiest_group(&env, balance);
1e3c88bd
PZ
5123
5124 if (*balance == 0)
5125 goto out_balanced;
5126
5127 if (!group) {
5128 schedstat_inc(sd, lb_nobusyg[idle]);
5129 goto out_balanced;
5130 }
5131
b9403130 5132 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
5133 if (!busiest) {
5134 schedstat_inc(sd, lb_nobusyq[idle]);
5135 goto out_balanced;
5136 }
5137
78feefc5 5138 BUG_ON(busiest == env.dst_rq);
1e3c88bd 5139
bd939f45 5140 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
5141
5142 ld_moved = 0;
5143 if (busiest->nr_running > 1) {
5144 /*
5145 * Attempt to move tasks. If find_busiest_group has found
5146 * an imbalance but busiest->nr_running <= 1, the group is
5147 * still unbalanced. ld_moved simply stays zero, so it is
5148 * correctly treated as an imbalance.
5149 */
8e45cb54 5150 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
5151 env.src_cpu = busiest->cpu;
5152 env.src_rq = busiest;
5153 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 5154
5d6523eb 5155more_balance:
1e3c88bd 5156 local_irq_save(flags);
78feefc5 5157 double_rq_lock(env.dst_rq, busiest);
88b8dac0
SV
5158
5159 /*
5160 * cur_ld_moved - load moved in current iteration
5161 * ld_moved - cumulative load moved across iterations
5162 */
5163 cur_ld_moved = move_tasks(&env);
5164 ld_moved += cur_ld_moved;
78feefc5 5165 double_rq_unlock(env.dst_rq, busiest);
1e3c88bd
PZ
5166 local_irq_restore(flags);
5167
5168 /*
5169 * some other cpu did the load balance for us.
5170 */
88b8dac0
SV
5171 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5172 resched_cpu(env.dst_cpu);
5173
f1cd0858
JK
5174 if (env.flags & LBF_NEED_BREAK) {
5175 env.flags &= ~LBF_NEED_BREAK;
5176 goto more_balance;
5177 }
5178
88b8dac0
SV
5179 /*
5180 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5181 * us and move them to an alternate dst_cpu in our sched_group
5182 * where they can run. The upper limit on how many times we
5183 * iterate on same src_cpu is dependent on number of cpus in our
5184 * sched_group.
5185 *
5186 * This changes load balance semantics a bit on who can move
5187 * load to a given_cpu. In addition to the given_cpu itself
5188 * (or a ilb_cpu acting on its behalf where given_cpu is
5189 * nohz-idle), we now have balance_cpu in a position to move
5190 * load to given_cpu. In rare situations, this may cause
5191 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5192 * _independently_ and at _same_ time to move some load to
5193 * given_cpu) causing exceess load to be moved to given_cpu.
5194 * This however should not happen so much in practice and
5195 * moreover subsequent load balance cycles should correct the
5196 * excess load moved.
5197 */
e02e60c1 5198 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
88b8dac0 5199
78feefc5 5200 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0
SV
5201 env.dst_cpu = env.new_dst_cpu;
5202 env.flags &= ~LBF_SOME_PINNED;
5203 env.loop = 0;
5204 env.loop_break = sched_nr_migrate_break;
e02e60c1
JK
5205
5206 /* Prevent to re-select dst_cpu via env's cpus */
5207 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5208
88b8dac0
SV
5209 /*
5210 * Go back to "more_balance" rather than "redo" since we
5211 * need to continue with same src_cpu.
5212 */
5213 goto more_balance;
5214 }
1e3c88bd
PZ
5215
5216 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 5217 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 5218 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
5219 if (!cpumask_empty(cpus)) {
5220 env.loop = 0;
5221 env.loop_break = sched_nr_migrate_break;
1e3c88bd 5222 goto redo;
bbf18b19 5223 }
1e3c88bd
PZ
5224 goto out_balanced;
5225 }
5226 }
5227
5228 if (!ld_moved) {
5229 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
5230 /*
5231 * Increment the failure counter only on periodic balance.
5232 * We do not want newidle balance, which can be very
5233 * frequent, pollute the failure counter causing
5234 * excessive cache_hot migrations and active balances.
5235 */
5236 if (idle != CPU_NEWLY_IDLE)
5237 sd->nr_balance_failed++;
1e3c88bd 5238
bd939f45 5239 if (need_active_balance(&env)) {
1e3c88bd
PZ
5240 raw_spin_lock_irqsave(&busiest->lock, flags);
5241
969c7921
TH
5242 /* don't kick the active_load_balance_cpu_stop,
5243 * if the curr task on busiest cpu can't be
5244 * moved to this_cpu
1e3c88bd
PZ
5245 */
5246 if (!cpumask_test_cpu(this_cpu,
fa17b507 5247 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
5248 raw_spin_unlock_irqrestore(&busiest->lock,
5249 flags);
8e45cb54 5250 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
5251 goto out_one_pinned;
5252 }
5253
969c7921
TH
5254 /*
5255 * ->active_balance synchronizes accesses to
5256 * ->active_balance_work. Once set, it's cleared
5257 * only after active load balance is finished.
5258 */
1e3c88bd
PZ
5259 if (!busiest->active_balance) {
5260 busiest->active_balance = 1;
5261 busiest->push_cpu = this_cpu;
5262 active_balance = 1;
5263 }
5264 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 5265
bd939f45 5266 if (active_balance) {
969c7921
TH
5267 stop_one_cpu_nowait(cpu_of(busiest),
5268 active_load_balance_cpu_stop, busiest,
5269 &busiest->active_balance_work);
bd939f45 5270 }
1e3c88bd
PZ
5271
5272 /*
5273 * We've kicked active balancing, reset the failure
5274 * counter.
5275 */
5276 sd->nr_balance_failed = sd->cache_nice_tries+1;
5277 }
5278 } else
5279 sd->nr_balance_failed = 0;
5280
5281 if (likely(!active_balance)) {
5282 /* We were unbalanced, so reset the balancing interval */
5283 sd->balance_interval = sd->min_interval;
5284 } else {
5285 /*
5286 * If we've begun active balancing, start to back off. This
5287 * case may not be covered by the all_pinned logic if there
5288 * is only 1 task on the busy runqueue (because we don't call
5289 * move_tasks).
5290 */
5291 if (sd->balance_interval < sd->max_interval)
5292 sd->balance_interval *= 2;
5293 }
5294
1e3c88bd
PZ
5295 goto out;
5296
5297out_balanced:
5298 schedstat_inc(sd, lb_balanced[idle]);
5299
5300 sd->nr_balance_failed = 0;
5301
5302out_one_pinned:
5303 /* tune up the balancing interval */
8e45cb54 5304 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 5305 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
5306 (sd->balance_interval < sd->max_interval))
5307 sd->balance_interval *= 2;
5308
46e49b38 5309 ld_moved = 0;
1e3c88bd 5310out:
1e3c88bd
PZ
5311 return ld_moved;
5312}
5313
1e3c88bd
PZ
5314/*
5315 * idle_balance is called by schedule() if this_cpu is about to become
5316 * idle. Attempts to pull tasks from other CPUs.
5317 */
029632fb 5318void idle_balance(int this_cpu, struct rq *this_rq)
1e3c88bd
PZ
5319{
5320 struct sched_domain *sd;
5321 int pulled_task = 0;
5322 unsigned long next_balance = jiffies + HZ;
5323
78becc27 5324 this_rq->idle_stamp = rq_clock(this_rq);
1e3c88bd
PZ
5325
5326 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5327 return;
5328
f492e12e
PZ
5329 /*
5330 * Drop the rq->lock, but keep IRQ/preempt disabled.
5331 */
5332 raw_spin_unlock(&this_rq->lock);
5333
48a16753 5334 update_blocked_averages(this_cpu);
dce840a0 5335 rcu_read_lock();
1e3c88bd
PZ
5336 for_each_domain(this_cpu, sd) {
5337 unsigned long interval;
f492e12e 5338 int balance = 1;
1e3c88bd
PZ
5339
5340 if (!(sd->flags & SD_LOAD_BALANCE))
5341 continue;
5342
f492e12e 5343 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 5344 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
5345 pulled_task = load_balance(this_cpu, this_rq,
5346 sd, CPU_NEWLY_IDLE, &balance);
5347 }
1e3c88bd
PZ
5348
5349 interval = msecs_to_jiffies(sd->balance_interval);
5350 if (time_after(next_balance, sd->last_balance + interval))
5351 next_balance = sd->last_balance + interval;
d5ad140b
NR
5352 if (pulled_task) {
5353 this_rq->idle_stamp = 0;
1e3c88bd 5354 break;
d5ad140b 5355 }
1e3c88bd 5356 }
dce840a0 5357 rcu_read_unlock();
f492e12e
PZ
5358
5359 raw_spin_lock(&this_rq->lock);
5360
1e3c88bd
PZ
5361 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5362 /*
5363 * We are going idle. next_balance may be set based on
5364 * a busy processor. So reset next_balance.
5365 */
5366 this_rq->next_balance = next_balance;
5367 }
5368}
5369
5370/*
969c7921
TH
5371 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5372 * running tasks off the busiest CPU onto idle CPUs. It requires at
5373 * least 1 task to be running on each physical CPU where possible, and
5374 * avoids physical / logical imbalances.
1e3c88bd 5375 */
969c7921 5376static int active_load_balance_cpu_stop(void *data)
1e3c88bd 5377{
969c7921
TH
5378 struct rq *busiest_rq = data;
5379 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 5380 int target_cpu = busiest_rq->push_cpu;
969c7921 5381 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 5382 struct sched_domain *sd;
969c7921
TH
5383
5384 raw_spin_lock_irq(&busiest_rq->lock);
5385
5386 /* make sure the requested cpu hasn't gone down in the meantime */
5387 if (unlikely(busiest_cpu != smp_processor_id() ||
5388 !busiest_rq->active_balance))
5389 goto out_unlock;
1e3c88bd
PZ
5390
5391 /* Is there any task to move? */
5392 if (busiest_rq->nr_running <= 1)
969c7921 5393 goto out_unlock;
1e3c88bd
PZ
5394
5395 /*
5396 * This condition is "impossible", if it occurs
5397 * we need to fix it. Originally reported by
5398 * Bjorn Helgaas on a 128-cpu setup.
5399 */
5400 BUG_ON(busiest_rq == target_rq);
5401
5402 /* move a task from busiest_rq to target_rq */
5403 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
5404
5405 /* Search for an sd spanning us and the target CPU. */
dce840a0 5406 rcu_read_lock();
1e3c88bd
PZ
5407 for_each_domain(target_cpu, sd) {
5408 if ((sd->flags & SD_LOAD_BALANCE) &&
5409 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5410 break;
5411 }
5412
5413 if (likely(sd)) {
8e45cb54
PZ
5414 struct lb_env env = {
5415 .sd = sd,
ddcdf6e7
PZ
5416 .dst_cpu = target_cpu,
5417 .dst_rq = target_rq,
5418 .src_cpu = busiest_rq->cpu,
5419 .src_rq = busiest_rq,
8e45cb54
PZ
5420 .idle = CPU_IDLE,
5421 };
5422
1e3c88bd
PZ
5423 schedstat_inc(sd, alb_count);
5424
8e45cb54 5425 if (move_one_task(&env))
1e3c88bd
PZ
5426 schedstat_inc(sd, alb_pushed);
5427 else
5428 schedstat_inc(sd, alb_failed);
5429 }
dce840a0 5430 rcu_read_unlock();
1e3c88bd 5431 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
5432out_unlock:
5433 busiest_rq->active_balance = 0;
5434 raw_spin_unlock_irq(&busiest_rq->lock);
5435 return 0;
1e3c88bd
PZ
5436}
5437
3451d024 5438#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
5439/*
5440 * idle load balancing details
83cd4fe2
VP
5441 * - When one of the busy CPUs notice that there may be an idle rebalancing
5442 * needed, they will kick the idle load balancer, which then does idle
5443 * load balancing for all the idle CPUs.
5444 */
1e3c88bd 5445static struct {
83cd4fe2 5446 cpumask_var_t idle_cpus_mask;
0b005cf5 5447 atomic_t nr_cpus;
83cd4fe2
VP
5448 unsigned long next_balance; /* in jiffy units */
5449} nohz ____cacheline_aligned;
1e3c88bd 5450
8e7fbcbc 5451static inline int find_new_ilb(int call_cpu)
1e3c88bd 5452{
0b005cf5 5453 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 5454
786d6dc7
SS
5455 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5456 return ilb;
5457
5458 return nr_cpu_ids;
1e3c88bd 5459}
1e3c88bd 5460
83cd4fe2
VP
5461/*
5462 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5463 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5464 * CPU (if there is one).
5465 */
5466static void nohz_balancer_kick(int cpu)
5467{
5468 int ilb_cpu;
5469
5470 nohz.next_balance++;
5471
0b005cf5 5472 ilb_cpu = find_new_ilb(cpu);
83cd4fe2 5473
0b005cf5
SS
5474 if (ilb_cpu >= nr_cpu_ids)
5475 return;
83cd4fe2 5476
cd490c5b 5477 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
5478 return;
5479 /*
5480 * Use smp_send_reschedule() instead of resched_cpu().
5481 * This way we generate a sched IPI on the target cpu which
5482 * is idle. And the softirq performing nohz idle load balance
5483 * will be run before returning from the IPI.
5484 */
5485 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
5486 return;
5487}
5488
c1cc017c 5489static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
5490{
5491 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5492 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5493 atomic_dec(&nohz.nr_cpus);
5494 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5495 }
5496}
5497
69e1e811
SS
5498static inline void set_cpu_sd_state_busy(void)
5499{
5500 struct sched_domain *sd;
69e1e811 5501
69e1e811 5502 rcu_read_lock();
424c93fe 5503 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5504
5505 if (!sd || !sd->nohz_idle)
5506 goto unlock;
5507 sd->nohz_idle = 0;
5508
5509 for (; sd; sd = sd->parent)
69e1e811 5510 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5511unlock:
69e1e811
SS
5512 rcu_read_unlock();
5513}
5514
5515void set_cpu_sd_state_idle(void)
5516{
5517 struct sched_domain *sd;
69e1e811 5518
69e1e811 5519 rcu_read_lock();
424c93fe 5520 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5521
5522 if (!sd || sd->nohz_idle)
5523 goto unlock;
5524 sd->nohz_idle = 1;
5525
5526 for (; sd; sd = sd->parent)
69e1e811 5527 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5528unlock:
69e1e811
SS
5529 rcu_read_unlock();
5530}
5531
1e3c88bd 5532/*
c1cc017c 5533 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 5534 * This info will be used in performing idle load balancing in the future.
1e3c88bd 5535 */
c1cc017c 5536void nohz_balance_enter_idle(int cpu)
1e3c88bd 5537{
71325960
SS
5538 /*
5539 * If this cpu is going down, then nothing needs to be done.
5540 */
5541 if (!cpu_active(cpu))
5542 return;
5543
c1cc017c
AS
5544 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5545 return;
1e3c88bd 5546
c1cc017c
AS
5547 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5548 atomic_inc(&nohz.nr_cpus);
5549 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 5550}
71325960 5551
0db0628d 5552static int sched_ilb_notifier(struct notifier_block *nfb,
71325960
SS
5553 unsigned long action, void *hcpu)
5554{
5555 switch (action & ~CPU_TASKS_FROZEN) {
5556 case CPU_DYING:
c1cc017c 5557 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
5558 return NOTIFY_OK;
5559 default:
5560 return NOTIFY_DONE;
5561 }
5562}
1e3c88bd
PZ
5563#endif
5564
5565static DEFINE_SPINLOCK(balancing);
5566
49c022e6
PZ
5567/*
5568 * Scale the max load_balance interval with the number of CPUs in the system.
5569 * This trades load-balance latency on larger machines for less cross talk.
5570 */
029632fb 5571void update_max_interval(void)
49c022e6
PZ
5572{
5573 max_load_balance_interval = HZ*num_online_cpus()/10;
5574}
5575
1e3c88bd
PZ
5576/*
5577 * It checks each scheduling domain to see if it is due to be balanced,
5578 * and initiates a balancing operation if so.
5579 *
b9b0853a 5580 * Balancing parameters are set up in init_sched_domains.
1e3c88bd
PZ
5581 */
5582static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5583{
5584 int balance = 1;
5585 struct rq *rq = cpu_rq(cpu);
5586 unsigned long interval;
04f733b4 5587 struct sched_domain *sd;
1e3c88bd
PZ
5588 /* Earliest time when we have to do rebalance again */
5589 unsigned long next_balance = jiffies + 60*HZ;
5590 int update_next_balance = 0;
5591 int need_serialize;
5592
48a16753 5593 update_blocked_averages(cpu);
2069dd75 5594
dce840a0 5595 rcu_read_lock();
1e3c88bd
PZ
5596 for_each_domain(cpu, sd) {
5597 if (!(sd->flags & SD_LOAD_BALANCE))
5598 continue;
5599
5600 interval = sd->balance_interval;
5601 if (idle != CPU_IDLE)
5602 interval *= sd->busy_factor;
5603
5604 /* scale ms to jiffies */
5605 interval = msecs_to_jiffies(interval);
49c022e6 5606 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
5607
5608 need_serialize = sd->flags & SD_SERIALIZE;
5609
5610 if (need_serialize) {
5611 if (!spin_trylock(&balancing))
5612 goto out;
5613 }
5614
5615 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5616 if (load_balance(cpu, rq, sd, idle, &balance)) {
5617 /*
de5eb2dd
JK
5618 * The LBF_SOME_PINNED logic could have changed
5619 * env->dst_cpu, so we can't know our idle
5620 * state even if we migrated tasks. Update it.
1e3c88bd 5621 */
de5eb2dd 5622 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
5623 }
5624 sd->last_balance = jiffies;
5625 }
5626 if (need_serialize)
5627 spin_unlock(&balancing);
5628out:
5629 if (time_after(next_balance, sd->last_balance + interval)) {
5630 next_balance = sd->last_balance + interval;
5631 update_next_balance = 1;
5632 }
5633
5634 /*
5635 * Stop the load balance at this level. There is another
5636 * CPU in our sched group which is doing load balancing more
5637 * actively.
5638 */
5639 if (!balance)
5640 break;
5641 }
dce840a0 5642 rcu_read_unlock();
1e3c88bd
PZ
5643
5644 /*
5645 * next_balance will be updated only when there is a need.
5646 * When the cpu is attached to null domain for ex, it will not be
5647 * updated.
5648 */
5649 if (likely(update_next_balance))
5650 rq->next_balance = next_balance;
5651}
5652
3451d024 5653#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 5654/*
3451d024 5655 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
5656 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5657 */
83cd4fe2
VP
5658static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5659{
5660 struct rq *this_rq = cpu_rq(this_cpu);
5661 struct rq *rq;
5662 int balance_cpu;
5663
1c792db7
SS
5664 if (idle != CPU_IDLE ||
5665 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5666 goto end;
83cd4fe2
VP
5667
5668 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 5669 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
5670 continue;
5671
5672 /*
5673 * If this cpu gets work to do, stop the load balancing
5674 * work being done for other cpus. Next load
5675 * balancing owner will pick it up.
5676 */
1c792db7 5677 if (need_resched())
83cd4fe2 5678 break;
83cd4fe2 5679
5ed4f1d9
VG
5680 rq = cpu_rq(balance_cpu);
5681
5682 raw_spin_lock_irq(&rq->lock);
5683 update_rq_clock(rq);
5684 update_idle_cpu_load(rq);
5685 raw_spin_unlock_irq(&rq->lock);
83cd4fe2
VP
5686
5687 rebalance_domains(balance_cpu, CPU_IDLE);
5688
83cd4fe2
VP
5689 if (time_after(this_rq->next_balance, rq->next_balance))
5690 this_rq->next_balance = rq->next_balance;
5691 }
5692 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
5693end:
5694 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
5695}
5696
5697/*
0b005cf5
SS
5698 * Current heuristic for kicking the idle load balancer in the presence
5699 * of an idle cpu is the system.
5700 * - This rq has more than one task.
5701 * - At any scheduler domain level, this cpu's scheduler group has multiple
5702 * busy cpu's exceeding the group's power.
5703 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5704 * domain span are idle.
83cd4fe2
VP
5705 */
5706static inline int nohz_kick_needed(struct rq *rq, int cpu)
5707{
5708 unsigned long now = jiffies;
0b005cf5 5709 struct sched_domain *sd;
83cd4fe2 5710
1c792db7 5711 if (unlikely(idle_cpu(cpu)))
83cd4fe2
VP
5712 return 0;
5713
1c792db7
SS
5714 /*
5715 * We may be recently in ticked or tickless idle mode. At the first
5716 * busy tick after returning from idle, we will update the busy stats.
5717 */
69e1e811 5718 set_cpu_sd_state_busy();
c1cc017c 5719 nohz_balance_exit_idle(cpu);
0b005cf5
SS
5720
5721 /*
5722 * None are in tickless mode and hence no need for NOHZ idle load
5723 * balancing.
5724 */
5725 if (likely(!atomic_read(&nohz.nr_cpus)))
5726 return 0;
1c792db7
SS
5727
5728 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
5729 return 0;
5730
0b005cf5
SS
5731 if (rq->nr_running >= 2)
5732 goto need_kick;
83cd4fe2 5733
067491b7 5734 rcu_read_lock();
0b005cf5
SS
5735 for_each_domain(cpu, sd) {
5736 struct sched_group *sg = sd->groups;
5737 struct sched_group_power *sgp = sg->sgp;
5738 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
83cd4fe2 5739
0b005cf5 5740 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
067491b7 5741 goto need_kick_unlock;
0b005cf5
SS
5742
5743 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5744 && (cpumask_first_and(nohz.idle_cpus_mask,
5745 sched_domain_span(sd)) < cpu))
067491b7 5746 goto need_kick_unlock;
0b005cf5
SS
5747
5748 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5749 break;
83cd4fe2 5750 }
067491b7 5751 rcu_read_unlock();
83cd4fe2 5752 return 0;
067491b7
PZ
5753
5754need_kick_unlock:
5755 rcu_read_unlock();
0b005cf5
SS
5756need_kick:
5757 return 1;
83cd4fe2
VP
5758}
5759#else
5760static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5761#endif
5762
5763/*
5764 * run_rebalance_domains is triggered when needed from the scheduler tick.
5765 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5766 */
1e3c88bd
PZ
5767static void run_rebalance_domains(struct softirq_action *h)
5768{
5769 int this_cpu = smp_processor_id();
5770 struct rq *this_rq = cpu_rq(this_cpu);
6eb57e0d 5771 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
5772 CPU_IDLE : CPU_NOT_IDLE;
5773
5774 rebalance_domains(this_cpu, idle);
5775
1e3c88bd 5776 /*
83cd4fe2 5777 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
5778 * balancing on behalf of the other idle cpus whose ticks are
5779 * stopped.
5780 */
83cd4fe2 5781 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
5782}
5783
5784static inline int on_null_domain(int cpu)
5785{
90a6501f 5786 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
5787}
5788
5789/*
5790 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 5791 */
029632fb 5792void trigger_load_balance(struct rq *rq, int cpu)
1e3c88bd 5793{
1e3c88bd
PZ
5794 /* Don't need to rebalance while attached to NULL domain */
5795 if (time_after_eq(jiffies, rq->next_balance) &&
5796 likely(!on_null_domain(cpu)))
5797 raise_softirq(SCHED_SOFTIRQ);
3451d024 5798#ifdef CONFIG_NO_HZ_COMMON
1c792db7 5799 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
83cd4fe2
VP
5800 nohz_balancer_kick(cpu);
5801#endif
1e3c88bd
PZ
5802}
5803
0bcdcf28
CE
5804static void rq_online_fair(struct rq *rq)
5805{
5806 update_sysctl();
5807}
5808
5809static void rq_offline_fair(struct rq *rq)
5810{
5811 update_sysctl();
a4c96ae3
PB
5812
5813 /* Ensure any throttled groups are reachable by pick_next_task */
5814 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
5815}
5816
55e12e5e 5817#endif /* CONFIG_SMP */
e1d1484f 5818
bf0f6f24
IM
5819/*
5820 * scheduler tick hitting a task of our scheduling class:
5821 */
8f4d37ec 5822static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
5823{
5824 struct cfs_rq *cfs_rq;
5825 struct sched_entity *se = &curr->se;
5826
5827 for_each_sched_entity(se) {
5828 cfs_rq = cfs_rq_of(se);
8f4d37ec 5829 entity_tick(cfs_rq, se, queued);
bf0f6f24 5830 }
18bf2805 5831
cbee9f88
PZ
5832 if (sched_feat_numa(NUMA))
5833 task_tick_numa(rq, curr);
3d59eebc 5834
18bf2805 5835 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
5836}
5837
5838/*
cd29fe6f
PZ
5839 * called on fork with the child task as argument from the parent's context
5840 * - child not yet on the tasklist
5841 * - preemption disabled
bf0f6f24 5842 */
cd29fe6f 5843static void task_fork_fair(struct task_struct *p)
bf0f6f24 5844{
4fc420c9
DN
5845 struct cfs_rq *cfs_rq;
5846 struct sched_entity *se = &p->se, *curr;
00bf7bfc 5847 int this_cpu = smp_processor_id();
cd29fe6f
PZ
5848 struct rq *rq = this_rq();
5849 unsigned long flags;
5850
05fa785c 5851 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 5852
861d034e
PZ
5853 update_rq_clock(rq);
5854
4fc420c9
DN
5855 cfs_rq = task_cfs_rq(current);
5856 curr = cfs_rq->curr;
5857
b0a0f667
PM
5858 if (unlikely(task_cpu(p) != this_cpu)) {
5859 rcu_read_lock();
cd29fe6f 5860 __set_task_cpu(p, this_cpu);
b0a0f667
PM
5861 rcu_read_unlock();
5862 }
bf0f6f24 5863
7109c442 5864 update_curr(cfs_rq);
cd29fe6f 5865
b5d9d734
MG
5866 if (curr)
5867 se->vruntime = curr->vruntime;
aeb73b04 5868 place_entity(cfs_rq, se, 1);
4d78e7b6 5869
cd29fe6f 5870 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 5871 /*
edcb60a3
IM
5872 * Upon rescheduling, sched_class::put_prev_task() will place
5873 * 'current' within the tree based on its new key value.
5874 */
4d78e7b6 5875 swap(curr->vruntime, se->vruntime);
aec0a514 5876 resched_task(rq->curr);
4d78e7b6 5877 }
bf0f6f24 5878
88ec22d3
PZ
5879 se->vruntime -= cfs_rq->min_vruntime;
5880
05fa785c 5881 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
5882}
5883
cb469845
SR
5884/*
5885 * Priority of the task has changed. Check to see if we preempt
5886 * the current task.
5887 */
da7a735e
PZ
5888static void
5889prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 5890{
da7a735e
PZ
5891 if (!p->se.on_rq)
5892 return;
5893
cb469845
SR
5894 /*
5895 * Reschedule if we are currently running on this runqueue and
5896 * our priority decreased, or if we are not currently running on
5897 * this runqueue and our priority is higher than the current's
5898 */
da7a735e 5899 if (rq->curr == p) {
cb469845
SR
5900 if (p->prio > oldprio)
5901 resched_task(rq->curr);
5902 } else
15afe09b 5903 check_preempt_curr(rq, p, 0);
cb469845
SR
5904}
5905
da7a735e
PZ
5906static void switched_from_fair(struct rq *rq, struct task_struct *p)
5907{
5908 struct sched_entity *se = &p->se;
5909 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5910
5911 /*
5912 * Ensure the task's vruntime is normalized, so that when its
5913 * switched back to the fair class the enqueue_entity(.flags=0) will
5914 * do the right thing.
5915 *
5916 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5917 * have normalized the vruntime, if it was !on_rq, then only when
5918 * the task is sleeping will it still have non-normalized vruntime.
5919 */
5920 if (!se->on_rq && p->state != TASK_RUNNING) {
5921 /*
5922 * Fix up our vruntime so that the current sleep doesn't
5923 * cause 'unlimited' sleep bonus.
5924 */
5925 place_entity(cfs_rq, se, 0);
5926 se->vruntime -= cfs_rq->min_vruntime;
5927 }
9ee474f5 5928
141965c7 5929#ifdef CONFIG_SMP
9ee474f5
PT
5930 /*
5931 * Remove our load from contribution when we leave sched_fair
5932 * and ensure we don't carry in an old decay_count if we
5933 * switch back.
5934 */
5935 if (p->se.avg.decay_count) {
5936 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5937 __synchronize_entity_decay(&p->se);
5938 subtract_blocked_load_contrib(cfs_rq,
5939 p->se.avg.load_avg_contrib);
5940 }
5941#endif
da7a735e
PZ
5942}
5943
cb469845
SR
5944/*
5945 * We switched to the sched_fair class.
5946 */
da7a735e 5947static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 5948{
da7a735e
PZ
5949 if (!p->se.on_rq)
5950 return;
5951
cb469845
SR
5952 /*
5953 * We were most likely switched from sched_rt, so
5954 * kick off the schedule if running, otherwise just see
5955 * if we can still preempt the current task.
5956 */
da7a735e 5957 if (rq->curr == p)
cb469845
SR
5958 resched_task(rq->curr);
5959 else
15afe09b 5960 check_preempt_curr(rq, p, 0);
cb469845
SR
5961}
5962
83b699ed
SV
5963/* Account for a task changing its policy or group.
5964 *
5965 * This routine is mostly called to set cfs_rq->curr field when a task
5966 * migrates between groups/classes.
5967 */
5968static void set_curr_task_fair(struct rq *rq)
5969{
5970 struct sched_entity *se = &rq->curr->se;
5971
ec12cb7f
PT
5972 for_each_sched_entity(se) {
5973 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5974
5975 set_next_entity(cfs_rq, se);
5976 /* ensure bandwidth has been allocated on our new cfs_rq */
5977 account_cfs_rq_runtime(cfs_rq, 0);
5978 }
83b699ed
SV
5979}
5980
029632fb
PZ
5981void init_cfs_rq(struct cfs_rq *cfs_rq)
5982{
5983 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
5984 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5985#ifndef CONFIG_64BIT
5986 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5987#endif
141965c7 5988#ifdef CONFIG_SMP
9ee474f5 5989 atomic64_set(&cfs_rq->decay_counter, 1);
2509940f 5990 atomic_long_set(&cfs_rq->removed_load, 0);
9ee474f5 5991#endif
029632fb
PZ
5992}
5993
810b3817 5994#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 5995static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 5996{
aff3e498 5997 struct cfs_rq *cfs_rq;
b2b5ce02
PZ
5998 /*
5999 * If the task was not on the rq at the time of this cgroup movement
6000 * it must have been asleep, sleeping tasks keep their ->vruntime
6001 * absolute on their old rq until wakeup (needed for the fair sleeper
6002 * bonus in place_entity()).
6003 *
6004 * If it was on the rq, we've just 'preempted' it, which does convert
6005 * ->vruntime to a relative base.
6006 *
6007 * Make sure both cases convert their relative position when migrating
6008 * to another cgroup's rq. This does somewhat interfere with the
6009 * fair sleeper stuff for the first placement, but who cares.
6010 */
7ceff013
DN
6011 /*
6012 * When !on_rq, vruntime of the task has usually NOT been normalized.
6013 * But there are some cases where it has already been normalized:
6014 *
6015 * - Moving a forked child which is waiting for being woken up by
6016 * wake_up_new_task().
62af3783
DN
6017 * - Moving a task which has been woken up by try_to_wake_up() and
6018 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
6019 *
6020 * To prevent boost or penalty in the new cfs_rq caused by delta
6021 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6022 */
62af3783 6023 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7ceff013
DN
6024 on_rq = 1;
6025
b2b5ce02
PZ
6026 if (!on_rq)
6027 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6028 set_task_rq(p, task_cpu(p));
aff3e498
PT
6029 if (!on_rq) {
6030 cfs_rq = cfs_rq_of(&p->se);
6031 p->se.vruntime += cfs_rq->min_vruntime;
6032#ifdef CONFIG_SMP
6033 /*
6034 * migrate_task_rq_fair() will have removed our previous
6035 * contribution, but we must synchronize for ongoing future
6036 * decay.
6037 */
6038 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6039 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6040#endif
6041 }
810b3817 6042}
029632fb
PZ
6043
6044void free_fair_sched_group(struct task_group *tg)
6045{
6046 int i;
6047
6048 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6049
6050 for_each_possible_cpu(i) {
6051 if (tg->cfs_rq)
6052 kfree(tg->cfs_rq[i]);
6053 if (tg->se)
6054 kfree(tg->se[i]);
6055 }
6056
6057 kfree(tg->cfs_rq);
6058 kfree(tg->se);
6059}
6060
6061int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6062{
6063 struct cfs_rq *cfs_rq;
6064 struct sched_entity *se;
6065 int i;
6066
6067 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6068 if (!tg->cfs_rq)
6069 goto err;
6070 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6071 if (!tg->se)
6072 goto err;
6073
6074 tg->shares = NICE_0_LOAD;
6075
6076 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6077
6078 for_each_possible_cpu(i) {
6079 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6080 GFP_KERNEL, cpu_to_node(i));
6081 if (!cfs_rq)
6082 goto err;
6083
6084 se = kzalloc_node(sizeof(struct sched_entity),
6085 GFP_KERNEL, cpu_to_node(i));
6086 if (!se)
6087 goto err_free_rq;
6088
6089 init_cfs_rq(cfs_rq);
6090 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6091 }
6092
6093 return 1;
6094
6095err_free_rq:
6096 kfree(cfs_rq);
6097err:
6098 return 0;
6099}
6100
6101void unregister_fair_sched_group(struct task_group *tg, int cpu)
6102{
6103 struct rq *rq = cpu_rq(cpu);
6104 unsigned long flags;
6105
6106 /*
6107 * Only empty task groups can be destroyed; so we can speculatively
6108 * check on_list without danger of it being re-added.
6109 */
6110 if (!tg->cfs_rq[cpu]->on_list)
6111 return;
6112
6113 raw_spin_lock_irqsave(&rq->lock, flags);
6114 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6115 raw_spin_unlock_irqrestore(&rq->lock, flags);
6116}
6117
6118void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6119 struct sched_entity *se, int cpu,
6120 struct sched_entity *parent)
6121{
6122 struct rq *rq = cpu_rq(cpu);
6123
6124 cfs_rq->tg = tg;
6125 cfs_rq->rq = rq;
029632fb
PZ
6126 init_cfs_rq_runtime(cfs_rq);
6127
6128 tg->cfs_rq[cpu] = cfs_rq;
6129 tg->se[cpu] = se;
6130
6131 /* se could be NULL for root_task_group */
6132 if (!se)
6133 return;
6134
6135 if (!parent)
6136 se->cfs_rq = &rq->cfs;
6137 else
6138 se->cfs_rq = parent->my_q;
6139
6140 se->my_q = cfs_rq;
6141 update_load_set(&se->load, 0);
6142 se->parent = parent;
6143}
6144
6145static DEFINE_MUTEX(shares_mutex);
6146
6147int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6148{
6149 int i;
6150 unsigned long flags;
6151
6152 /*
6153 * We can't change the weight of the root cgroup.
6154 */
6155 if (!tg->se[0])
6156 return -EINVAL;
6157
6158 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6159
6160 mutex_lock(&shares_mutex);
6161 if (tg->shares == shares)
6162 goto done;
6163
6164 tg->shares = shares;
6165 for_each_possible_cpu(i) {
6166 struct rq *rq = cpu_rq(i);
6167 struct sched_entity *se;
6168
6169 se = tg->se[i];
6170 /* Propagate contribution to hierarchy */
6171 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
6172
6173 /* Possible calls to update_curr() need rq clock */
6174 update_rq_clock(rq);
17bc14b7 6175 for_each_sched_entity(se)
029632fb
PZ
6176 update_cfs_shares(group_cfs_rq(se));
6177 raw_spin_unlock_irqrestore(&rq->lock, flags);
6178 }
6179
6180done:
6181 mutex_unlock(&shares_mutex);
6182 return 0;
6183}
6184#else /* CONFIG_FAIR_GROUP_SCHED */
6185
6186void free_fair_sched_group(struct task_group *tg) { }
6187
6188int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6189{
6190 return 1;
6191}
6192
6193void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6194
6195#endif /* CONFIG_FAIR_GROUP_SCHED */
6196
810b3817 6197
6d686f45 6198static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
6199{
6200 struct sched_entity *se = &task->se;
0d721cea
PW
6201 unsigned int rr_interval = 0;
6202
6203 /*
6204 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6205 * idle runqueue:
6206 */
0d721cea 6207 if (rq->cfs.load.weight)
a59f4e07 6208 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
6209
6210 return rr_interval;
6211}
6212
bf0f6f24
IM
6213/*
6214 * All the scheduling class methods:
6215 */
029632fb 6216const struct sched_class fair_sched_class = {
5522d5d5 6217 .next = &idle_sched_class,
bf0f6f24
IM
6218 .enqueue_task = enqueue_task_fair,
6219 .dequeue_task = dequeue_task_fair,
6220 .yield_task = yield_task_fair,
d95f4122 6221 .yield_to_task = yield_to_task_fair,
bf0f6f24 6222
2e09bf55 6223 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
6224
6225 .pick_next_task = pick_next_task_fair,
6226 .put_prev_task = put_prev_task_fair,
6227
681f3e68 6228#ifdef CONFIG_SMP
4ce72a2c 6229 .select_task_rq = select_task_rq_fair,
0a74bef8 6230 .migrate_task_rq = migrate_task_rq_fair,
141965c7 6231
0bcdcf28
CE
6232 .rq_online = rq_online_fair,
6233 .rq_offline = rq_offline_fair,
88ec22d3
PZ
6234
6235 .task_waking = task_waking_fair,
681f3e68 6236#endif
bf0f6f24 6237
83b699ed 6238 .set_curr_task = set_curr_task_fair,
bf0f6f24 6239 .task_tick = task_tick_fair,
cd29fe6f 6240 .task_fork = task_fork_fair,
cb469845
SR
6241
6242 .prio_changed = prio_changed_fair,
da7a735e 6243 .switched_from = switched_from_fair,
cb469845 6244 .switched_to = switched_to_fair,
810b3817 6245
0d721cea
PW
6246 .get_rr_interval = get_rr_interval_fair,
6247
810b3817 6248#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6249 .task_move_group = task_move_group_fair,
810b3817 6250#endif
bf0f6f24
IM
6251};
6252
6253#ifdef CONFIG_SCHED_DEBUG
029632fb 6254void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 6255{
bf0f6f24
IM
6256 struct cfs_rq *cfs_rq;
6257
5973e5b9 6258 rcu_read_lock();
c3b64f1e 6259 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 6260 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 6261 rcu_read_unlock();
bf0f6f24
IM
6262}
6263#endif
029632fb
PZ
6264
6265__init void init_sched_fair_class(void)
6266{
6267#ifdef CONFIG_SMP
6268 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6269
3451d024 6270#ifdef CONFIG_NO_HZ_COMMON
554cecaf 6271 nohz.next_balance = jiffies;
029632fb 6272 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 6273 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
6274#endif
6275#endif /* SMP */
6276
6277}