]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - kernel/sched/fair.c
sched/numa: Prevent parallel updates to group stats during placement
[mirror_ubuntu-kernels.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
cbee9f88 29#include <linux/mempolicy.h>
e14808b4 30#include <linux/migrate.h>
cbee9f88 31#include <linux/task_work.h>
029632fb
PZ
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
9745512c 36
bf0f6f24 37/*
21805085 38 * Targeted preemption latency for CPU-bound tasks:
864616ee 39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 40 *
21805085 41 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
bf0f6f24 45 *
d274a4ce
IM
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 48 */
21406928
MG
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 51
1983a922
CE
52/*
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
2bd8e6d4 64/*
b2be5e96 65 * Minimal preemption granularity for CPU-bound tasks:
864616ee 66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 67 */
0bf377bb
IM
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
70
71/*
b2be5e96
PZ
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
0bf377bb 74static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
75
76/*
2bba22c5 77 * After fork, child runs first. If set to 0 (default) then
b2be5e96 78 * parent will (try to) run first.
21805085 79 */
2bba22c5 80unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 81
bf0f6f24
IM
82/*
83 * SCHED_OTHER wake-up granularity.
172e082a 84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
85 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
172e082a 90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 92
da84d961
IM
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
a7a4f8a7
PT
95/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
ec12cb7f
PT
102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
a4c2f00f 238
bf0f6f24
IM
239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
62160e3f 243#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 244
62160e3f 245/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
62160e3f 248 return cfs_rq->rq;
bf0f6f24
IM
249}
250
62160e3f
IM
251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
bf0f6f24 253
8f48894f
PZ
254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
b758149c
PZ
262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
aff3e498
PT
283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
9ee474f5 285
3d4b47b4
PZ
286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
67e86250
PT
289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 302 }
3d4b47b4
PZ
303
304 cfs_rq->on_list = 1;
9ee474f5 305 /* We should have no load, but we need to update last_decay. */
aff3e498 306 update_cfs_rq_blocked_load(cfs_rq, 0);
3d4b47b4
PZ
307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
b758149c
PZ
318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
464b7527
PZ
337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
8f48894f
PZ
380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
bf0f6f24 386
62160e3f
IM
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
390}
391
392#define entity_is_task(se) 1
393
b758149c
PZ
394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
bf0f6f24 396
b758149c 397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 398{
b758149c 399 return &task_rq(p)->cfs;
bf0f6f24
IM
400}
401
b758149c
PZ
402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
3d4b47b4
PZ
416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
b758149c
PZ
424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
464b7527
PZ
438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
b758149c
PZ
443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
6c16a6dc
PZ
445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
bf0f6f24
IM
447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
1bf08230 452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 453{
1bf08230 454 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 455 if (delta > 0)
1bf08230 456 max_vruntime = vruntime;
02e0431a 457
1bf08230 458 return max_vruntime;
02e0431a
PZ
459}
460
0702e3eb 461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
54fdc581
FC
470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
1af5f730
PZ
476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
e17036da 488 if (!cfs_rq->curr)
1af5f730
PZ
489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
1bf08230 494 /* ensure we never gain time by being placed backwards. */
1af5f730 495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
1af5f730
PZ
500}
501
bf0f6f24
IM
502/*
503 * Enqueue an entity into the rb-tree:
504 */
0702e3eb 505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
bf0f6f24
IM
510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
2bd2d6f2 522 if (entity_before(se, entry)) {
bf0f6f24
IM
523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
1af5f730 534 if (leftmost)
57cb499d 535 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
539}
540
0702e3eb 541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 542{
3fe69747
PZ
543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
3fe69747
PZ
545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
3fe69747 548 }
e9acbff6 549
bf0f6f24 550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
551}
552
029632fb 553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 554{
f4b6755f
PZ
555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
561}
562
ac53db59
RR
563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
029632fb 574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 575{
7eee3e67 576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 577
70eee74b
BS
578 if (!last)
579 return NULL;
7eee3e67
IM
580
581 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
582}
583
bf0f6f24
IM
584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
acb4a848 588int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 589 void __user *buffer, size_t *lenp,
b2be5e96
PZ
590 loff_t *ppos)
591{
8d65af78 592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 593 int factor = get_update_sysctl_factor();
b2be5e96
PZ
594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
acb4a848
CE
601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
606#undef WRT_SYSCTL
607
b2be5e96
PZ
608 return 0;
609}
610#endif
647e7cac 611
a7be37ac 612/*
f9c0b095 613 * delta /= w
a7be37ac
PZ
614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
f9c0b095
PZ
618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
620
621 return delta;
622}
623
647e7cac
IM
624/*
625 * The idea is to set a period in which each task runs once.
626 *
532b1858 627 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
4d78e7b6
PZ
632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
b2be5e96 635 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
636
637 if (unlikely(nr_running > nr_latency)) {
4bf0b771 638 period = sysctl_sched_min_granularity;
4d78e7b6 639 period *= nr_running;
4d78e7b6
PZ
640 }
641
642 return period;
643}
644
647e7cac
IM
645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
f9c0b095 649 * s = p*P[w/rw]
647e7cac 650 */
6d0f0ebd 651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 652{
0a582440 653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 654
0a582440 655 for_each_sched_entity(se) {
6272d68c 656 struct load_weight *load;
3104bf03 657 struct load_weight lw;
6272d68c
LM
658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
f9c0b095 661
0a582440 662 if (unlikely(!se->on_rq)) {
3104bf03 663 lw = cfs_rq->load;
0a582440
MG
664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
bf0f6f24
IM
671}
672
647e7cac 673/*
660cc00f 674 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 675 *
f9c0b095 676 * vs = s/w
647e7cac 677 */
f9c0b095 678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 679{
f9c0b095 680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
681}
682
a75cdaa9 683#ifdef CONFIG_SMP
fb13c7ee
MG
684static unsigned long task_h_load(struct task_struct *p);
685
a75cdaa9
AS
686static inline void __update_task_entity_contrib(struct sched_entity *se);
687
688/* Give new task start runnable values to heavy its load in infant time */
689void init_task_runnable_average(struct task_struct *p)
690{
691 u32 slice;
692
693 p->se.avg.decay_count = 0;
694 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
695 p->se.avg.runnable_avg_sum = slice;
696 p->se.avg.runnable_avg_period = slice;
697 __update_task_entity_contrib(&p->se);
698}
699#else
700void init_task_runnable_average(struct task_struct *p)
701{
702}
703#endif
704
bf0f6f24
IM
705/*
706 * Update the current task's runtime statistics. Skip current tasks that
707 * are not in our scheduling class.
708 */
709static inline void
8ebc91d9
IM
710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
bf0f6f24 712{
bbdba7c0 713 unsigned long delta_exec_weighted;
bf0f6f24 714
41acab88
LDM
715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
717
718 curr->sum_exec_runtime += delta_exec;
7a62eabc 719 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 721
e9acbff6 722 curr->vruntime += delta_exec_weighted;
1af5f730 723 update_min_vruntime(cfs_rq);
bf0f6f24
IM
724}
725
b7cc0896 726static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 727{
429d43bc 728 struct sched_entity *curr = cfs_rq->curr;
78becc27 729 u64 now = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
730 unsigned long delta_exec;
731
732 if (unlikely(!curr))
733 return;
734
735 /*
736 * Get the amount of time the current task was running
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
8ebc91d9 740 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
741 if (!delta_exec)
742 return;
bf0f6f24 743
8ebc91d9
IM
744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now;
d842de87
SV
746
747 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr);
749
f977bb49 750 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 751 cpuacct_charge(curtask, delta_exec);
f06febc9 752 account_group_exec_runtime(curtask, delta_exec);
d842de87 753 }
ec12cb7f
PT
754
755 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
756}
757
758static inline void
5870db5b 759update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 760{
78becc27 761 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
bf0f6f24
IM
762}
763
bf0f6f24
IM
764/*
765 * Task is being enqueued - update stats:
766 */
d2417e5a 767static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 768{
bf0f6f24
IM
769 /*
770 * Are we enqueueing a waiting task? (for current tasks
771 * a dequeue/enqueue event is a NOP)
772 */
429d43bc 773 if (se != cfs_rq->curr)
5870db5b 774 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
775}
776
bf0f6f24 777static void
9ef0a961 778update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 779{
41acab88 780 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
78becc27 781 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
41acab88
LDM
782 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
783 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
78becc27 784 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
785#ifdef CONFIG_SCHEDSTATS
786 if (entity_is_task(se)) {
787 trace_sched_stat_wait(task_of(se),
78becc27 788 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
789 }
790#endif
41acab88 791 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
792}
793
794static inline void
19b6a2e3 795update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 796{
bf0f6f24
IM
797 /*
798 * Mark the end of the wait period if dequeueing a
799 * waiting task:
800 */
429d43bc 801 if (se != cfs_rq->curr)
9ef0a961 802 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
803}
804
805/*
806 * We are picking a new current task - update its stats:
807 */
808static inline void
79303e9e 809update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
810{
811 /*
812 * We are starting a new run period:
813 */
78becc27 814 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
815}
816
bf0f6f24
IM
817/**************************************************
818 * Scheduling class queueing methods:
819 */
820
cbee9f88
PZ
821#ifdef CONFIG_NUMA_BALANCING
822/*
598f0ec0
MG
823 * Approximate time to scan a full NUMA task in ms. The task scan period is
824 * calculated based on the tasks virtual memory size and
825 * numa_balancing_scan_size.
cbee9f88 826 */
598f0ec0
MG
827unsigned int sysctl_numa_balancing_scan_period_min = 1000;
828unsigned int sysctl_numa_balancing_scan_period_max = 60000;
829unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
6e5fb223
PZ
830
831/* Portion of address space to scan in MB */
832unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 833
4b96a29b
PZ
834/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
835unsigned int sysctl_numa_balancing_scan_delay = 1000;
836
598f0ec0
MG
837static unsigned int task_nr_scan_windows(struct task_struct *p)
838{
839 unsigned long rss = 0;
840 unsigned long nr_scan_pages;
841
842 /*
843 * Calculations based on RSS as non-present and empty pages are skipped
844 * by the PTE scanner and NUMA hinting faults should be trapped based
845 * on resident pages
846 */
847 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
848 rss = get_mm_rss(p->mm);
849 if (!rss)
850 rss = nr_scan_pages;
851
852 rss = round_up(rss, nr_scan_pages);
853 return rss / nr_scan_pages;
854}
855
856/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
857#define MAX_SCAN_WINDOW 2560
858
859static unsigned int task_scan_min(struct task_struct *p)
860{
861 unsigned int scan, floor;
862 unsigned int windows = 1;
863
864 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
865 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
866 floor = 1000 / windows;
867
868 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
869 return max_t(unsigned int, floor, scan);
870}
871
872static unsigned int task_scan_max(struct task_struct *p)
873{
874 unsigned int smin = task_scan_min(p);
875 unsigned int smax;
876
877 /* Watch for min being lower than max due to floor calculations */
878 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
879 return max(smin, smax);
880}
881
3a7053b3
MG
882/*
883 * Once a preferred node is selected the scheduler balancer will prefer moving
884 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
885 * scans. This will give the process the chance to accumulate more faults on
886 * the preferred node but still allow the scheduler to move the task again if
887 * the nodes CPUs are overloaded.
888 */
6fe6b2d6 889unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
3a7053b3 890
8c8a743c
PZ
891struct numa_group {
892 atomic_t refcount;
893
894 spinlock_t lock; /* nr_tasks, tasks */
895 int nr_tasks;
e29cf08b 896 pid_t gid;
8c8a743c
PZ
897 struct list_head task_list;
898
899 struct rcu_head rcu;
83e1d2cd 900 atomic_long_t total_faults;
8c8a743c
PZ
901 atomic_long_t faults[0];
902};
903
e29cf08b
MG
904pid_t task_numa_group_id(struct task_struct *p)
905{
906 return p->numa_group ? p->numa_group->gid : 0;
907}
908
ac8e895b
MG
909static inline int task_faults_idx(int nid, int priv)
910{
911 return 2 * nid + priv;
912}
913
914static inline unsigned long task_faults(struct task_struct *p, int nid)
915{
916 if (!p->numa_faults)
917 return 0;
918
919 return p->numa_faults[task_faults_idx(nid, 0)] +
920 p->numa_faults[task_faults_idx(nid, 1)];
921}
922
83e1d2cd
MG
923static inline unsigned long group_faults(struct task_struct *p, int nid)
924{
925 if (!p->numa_group)
926 return 0;
927
928 return atomic_long_read(&p->numa_group->faults[2*nid]) +
929 atomic_long_read(&p->numa_group->faults[2*nid+1]);
930}
931
932/*
933 * These return the fraction of accesses done by a particular task, or
934 * task group, on a particular numa node. The group weight is given a
935 * larger multiplier, in order to group tasks together that are almost
936 * evenly spread out between numa nodes.
937 */
938static inline unsigned long task_weight(struct task_struct *p, int nid)
939{
940 unsigned long total_faults;
941
942 if (!p->numa_faults)
943 return 0;
944
945 total_faults = p->total_numa_faults;
946
947 if (!total_faults)
948 return 0;
949
950 return 1000 * task_faults(p, nid) / total_faults;
951}
952
953static inline unsigned long group_weight(struct task_struct *p, int nid)
954{
955 unsigned long total_faults;
956
957 if (!p->numa_group)
958 return 0;
959
960 total_faults = atomic_long_read(&p->numa_group->total_faults);
961
962 if (!total_faults)
963 return 0;
964
965 return 1200 * group_faults(p, nid) / total_faults;
966}
967
e6628d5b 968static unsigned long weighted_cpuload(const int cpu);
58d081b5
MG
969static unsigned long source_load(int cpu, int type);
970static unsigned long target_load(int cpu, int type);
971static unsigned long power_of(int cpu);
972static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
973
fb13c7ee 974/* Cached statistics for all CPUs within a node */
58d081b5 975struct numa_stats {
fb13c7ee 976 unsigned long nr_running;
58d081b5 977 unsigned long load;
fb13c7ee
MG
978
979 /* Total compute capacity of CPUs on a node */
980 unsigned long power;
981
982 /* Approximate capacity in terms of runnable tasks on a node */
983 unsigned long capacity;
984 int has_capacity;
58d081b5 985};
e6628d5b 986
fb13c7ee
MG
987/*
988 * XXX borrowed from update_sg_lb_stats
989 */
990static void update_numa_stats(struct numa_stats *ns, int nid)
991{
992 int cpu;
993
994 memset(ns, 0, sizeof(*ns));
995 for_each_cpu(cpu, cpumask_of_node(nid)) {
996 struct rq *rq = cpu_rq(cpu);
997
998 ns->nr_running += rq->nr_running;
999 ns->load += weighted_cpuload(cpu);
1000 ns->power += power_of(cpu);
1001 }
1002
1003 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1004 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1005 ns->has_capacity = (ns->nr_running < ns->capacity);
1006}
1007
58d081b5
MG
1008struct task_numa_env {
1009 struct task_struct *p;
e6628d5b 1010
58d081b5
MG
1011 int src_cpu, src_nid;
1012 int dst_cpu, dst_nid;
e6628d5b 1013
58d081b5 1014 struct numa_stats src_stats, dst_stats;
e6628d5b 1015
fb13c7ee
MG
1016 int imbalance_pct, idx;
1017
1018 struct task_struct *best_task;
1019 long best_imp;
58d081b5
MG
1020 int best_cpu;
1021};
1022
fb13c7ee
MG
1023static void task_numa_assign(struct task_numa_env *env,
1024 struct task_struct *p, long imp)
1025{
1026 if (env->best_task)
1027 put_task_struct(env->best_task);
1028 if (p)
1029 get_task_struct(p);
1030
1031 env->best_task = p;
1032 env->best_imp = imp;
1033 env->best_cpu = env->dst_cpu;
1034}
1035
1036/*
1037 * This checks if the overall compute and NUMA accesses of the system would
1038 * be improved if the source tasks was migrated to the target dst_cpu taking
1039 * into account that it might be best if task running on the dst_cpu should
1040 * be exchanged with the source task
1041 */
1042static void task_numa_compare(struct task_numa_env *env, long imp)
1043{
1044 struct rq *src_rq = cpu_rq(env->src_cpu);
1045 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1046 struct task_struct *cur;
1047 long dst_load, src_load;
1048 long load;
1049
1050 rcu_read_lock();
1051 cur = ACCESS_ONCE(dst_rq->curr);
1052 if (cur->pid == 0) /* idle */
1053 cur = NULL;
1054
1055 /*
1056 * "imp" is the fault differential for the source task between the
1057 * source and destination node. Calculate the total differential for
1058 * the source task and potential destination task. The more negative
1059 * the value is, the more rmeote accesses that would be expected to
1060 * be incurred if the tasks were swapped.
1061 */
1062 if (cur) {
1063 /* Skip this swap candidate if cannot move to the source cpu */
1064 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1065 goto unlock;
1066
83e1d2cd
MG
1067 imp += task_weight(cur, env->src_nid) +
1068 group_weight(cur, env->src_nid) -
1069 task_weight(cur, env->dst_nid) -
1070 group_weight(cur, env->dst_nid);
fb13c7ee
MG
1071 }
1072
1073 if (imp < env->best_imp)
1074 goto unlock;
1075
1076 if (!cur) {
1077 /* Is there capacity at our destination? */
1078 if (env->src_stats.has_capacity &&
1079 !env->dst_stats.has_capacity)
1080 goto unlock;
1081
1082 goto balance;
1083 }
1084
1085 /* Balance doesn't matter much if we're running a task per cpu */
1086 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1087 goto assign;
1088
1089 /*
1090 * In the overloaded case, try and keep the load balanced.
1091 */
1092balance:
1093 dst_load = env->dst_stats.load;
1094 src_load = env->src_stats.load;
1095
1096 /* XXX missing power terms */
1097 load = task_h_load(env->p);
1098 dst_load += load;
1099 src_load -= load;
1100
1101 if (cur) {
1102 load = task_h_load(cur);
1103 dst_load -= load;
1104 src_load += load;
1105 }
1106
1107 /* make src_load the smaller */
1108 if (dst_load < src_load)
1109 swap(dst_load, src_load);
1110
1111 if (src_load * env->imbalance_pct < dst_load * 100)
1112 goto unlock;
1113
1114assign:
1115 task_numa_assign(env, cur, imp);
1116unlock:
1117 rcu_read_unlock();
1118}
1119
2c8a50aa
MG
1120static void task_numa_find_cpu(struct task_numa_env *env, long imp)
1121{
1122 int cpu;
1123
1124 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1125 /* Skip this CPU if the source task cannot migrate */
1126 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1127 continue;
1128
1129 env->dst_cpu = cpu;
1130 task_numa_compare(env, imp);
1131 }
1132}
1133
58d081b5
MG
1134static int task_numa_migrate(struct task_struct *p)
1135{
58d081b5
MG
1136 struct task_numa_env env = {
1137 .p = p,
fb13c7ee 1138
58d081b5
MG
1139 .src_cpu = task_cpu(p),
1140 .src_nid = cpu_to_node(task_cpu(p)),
fb13c7ee
MG
1141
1142 .imbalance_pct = 112,
1143
1144 .best_task = NULL,
1145 .best_imp = 0,
1146 .best_cpu = -1
58d081b5
MG
1147 };
1148 struct sched_domain *sd;
83e1d2cd 1149 unsigned long weight;
2c8a50aa
MG
1150 int nid, ret;
1151 long imp;
e6628d5b 1152
58d081b5 1153 /*
fb13c7ee
MG
1154 * Pick the lowest SD_NUMA domain, as that would have the smallest
1155 * imbalance and would be the first to start moving tasks about.
1156 *
1157 * And we want to avoid any moving of tasks about, as that would create
1158 * random movement of tasks -- counter the numa conditions we're trying
1159 * to satisfy here.
58d081b5
MG
1160 */
1161 rcu_read_lock();
fb13c7ee
MG
1162 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1163 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
e6628d5b
MG
1164 rcu_read_unlock();
1165
83e1d2cd 1166 weight = task_weight(p, env.src_nid) + group_weight(p, env.src_nid);
fb13c7ee 1167 update_numa_stats(&env.src_stats, env.src_nid);
2c8a50aa 1168 env.dst_nid = p->numa_preferred_nid;
83e1d2cd 1169 imp = task_weight(p, env.dst_nid) + group_weight(p, env.dst_nid) - weight;
2c8a50aa 1170 update_numa_stats(&env.dst_stats, env.dst_nid);
58d081b5 1171
e1dda8a7
RR
1172 /* If the preferred nid has capacity, try to use it. */
1173 if (env.dst_stats.has_capacity)
2c8a50aa 1174 task_numa_find_cpu(&env, imp);
e1dda8a7
RR
1175
1176 /* No space available on the preferred nid. Look elsewhere. */
1177 if (env.best_cpu == -1) {
2c8a50aa
MG
1178 for_each_online_node(nid) {
1179 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1180 continue;
58d081b5 1181
83e1d2cd
MG
1182 /* Only consider nodes where both task and groups benefit */
1183 imp = task_weight(p, nid) + group_weight(p, nid) - weight;
2c8a50aa 1184 if (imp < 0)
fb13c7ee
MG
1185 continue;
1186
2c8a50aa
MG
1187 env.dst_nid = nid;
1188 update_numa_stats(&env.dst_stats, env.dst_nid);
1189 task_numa_find_cpu(&env, imp);
58d081b5
MG
1190 }
1191 }
1192
fb13c7ee
MG
1193 /* No better CPU than the current one was found. */
1194 if (env.best_cpu == -1)
1195 return -EAGAIN;
1196
1197 if (env.best_task == NULL) {
1198 int ret = migrate_task_to(p, env.best_cpu);
1199 return ret;
1200 }
1201
1202 ret = migrate_swap(p, env.best_task);
1203 put_task_struct(env.best_task);
1204 return ret;
e6628d5b
MG
1205}
1206
6b9a7460
MG
1207/* Attempt to migrate a task to a CPU on the preferred node. */
1208static void numa_migrate_preferred(struct task_struct *p)
1209{
1210 /* Success if task is already running on preferred CPU */
1211 p->numa_migrate_retry = 0;
06ea5e03
RR
1212 if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) {
1213 /*
1214 * If migration is temporarily disabled due to a task migration
1215 * then re-enable it now as the task is running on its
1216 * preferred node and memory should migrate locally
1217 */
1218 if (!p->numa_migrate_seq)
1219 p->numa_migrate_seq++;
6b9a7460 1220 return;
06ea5e03 1221 }
6b9a7460
MG
1222
1223 /* This task has no NUMA fault statistics yet */
1224 if (unlikely(p->numa_preferred_nid == -1))
1225 return;
1226
1227 /* Otherwise, try migrate to a CPU on the preferred node */
1228 if (task_numa_migrate(p) != 0)
1229 p->numa_migrate_retry = jiffies + HZ*5;
1230}
1231
cbee9f88
PZ
1232static void task_numa_placement(struct task_struct *p)
1233{
83e1d2cd
MG
1234 int seq, nid, max_nid = -1, max_group_nid = -1;
1235 unsigned long max_faults = 0, max_group_faults = 0;
7dbd13ed 1236 spinlock_t *group_lock = NULL;
cbee9f88 1237
2832bc19 1238 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
1239 if (p->numa_scan_seq == seq)
1240 return;
1241 p->numa_scan_seq = seq;
3a7053b3 1242 p->numa_migrate_seq++;
598f0ec0 1243 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 1244
7dbd13ed
MG
1245 /* If the task is part of a group prevent parallel updates to group stats */
1246 if (p->numa_group) {
1247 group_lock = &p->numa_group->lock;
1248 spin_lock(group_lock);
1249 }
1250
688b7585
MG
1251 /* Find the node with the highest number of faults */
1252 for_each_online_node(nid) {
83e1d2cd 1253 unsigned long faults = 0, group_faults = 0;
ac8e895b 1254 int priv, i;
745d6147 1255
ac8e895b 1256 for (priv = 0; priv < 2; priv++) {
8c8a743c
PZ
1257 long diff;
1258
ac8e895b 1259 i = task_faults_idx(nid, priv);
8c8a743c 1260 diff = -p->numa_faults[i];
745d6147 1261
ac8e895b
MG
1262 /* Decay existing window, copy faults since last scan */
1263 p->numa_faults[i] >>= 1;
1264 p->numa_faults[i] += p->numa_faults_buffer[i];
1265 p->numa_faults_buffer[i] = 0;
fb13c7ee
MG
1266
1267 faults += p->numa_faults[i];
8c8a743c 1268 diff += p->numa_faults[i];
83e1d2cd 1269 p->total_numa_faults += diff;
8c8a743c
PZ
1270 if (p->numa_group) {
1271 /* safe because we can only change our own group */
1272 atomic_long_add(diff, &p->numa_group->faults[i]);
83e1d2cd
MG
1273 atomic_long_add(diff, &p->numa_group->total_faults);
1274 group_faults += atomic_long_read(&p->numa_group->faults[i]);
8c8a743c 1275 }
ac8e895b
MG
1276 }
1277
688b7585
MG
1278 if (faults > max_faults) {
1279 max_faults = faults;
1280 max_nid = nid;
1281 }
83e1d2cd
MG
1282
1283 if (group_faults > max_group_faults) {
1284 max_group_faults = group_faults;
1285 max_group_nid = nid;
1286 }
1287 }
1288
7dbd13ed
MG
1289 if (p->numa_group) {
1290 /*
1291 * If the preferred task and group nids are different,
1292 * iterate over the nodes again to find the best place.
1293 */
1294 if (max_nid != max_group_nid) {
1295 unsigned long weight, max_weight = 0;
1296
1297 for_each_online_node(nid) {
1298 weight = task_weight(p, nid) + group_weight(p, nid);
1299 if (weight > max_weight) {
1300 max_weight = weight;
1301 max_nid = nid;
1302 }
83e1d2cd
MG
1303 }
1304 }
7dbd13ed
MG
1305
1306 spin_unlock(group_lock);
688b7585
MG
1307 }
1308
6b9a7460 1309 /* Preferred node as the node with the most faults */
3a7053b3 1310 if (max_faults && max_nid != p->numa_preferred_nid) {
e6628d5b 1311 /* Update the preferred nid and migrate task if possible */
688b7585 1312 p->numa_preferred_nid = max_nid;
6fe6b2d6 1313 p->numa_migrate_seq = 1;
6b9a7460 1314 numa_migrate_preferred(p);
3a7053b3 1315 }
cbee9f88
PZ
1316}
1317
8c8a743c
PZ
1318static inline int get_numa_group(struct numa_group *grp)
1319{
1320 return atomic_inc_not_zero(&grp->refcount);
1321}
1322
1323static inline void put_numa_group(struct numa_group *grp)
1324{
1325 if (atomic_dec_and_test(&grp->refcount))
1326 kfree_rcu(grp, rcu);
1327}
1328
1329static void double_lock(spinlock_t *l1, spinlock_t *l2)
1330{
1331 if (l1 > l2)
1332 swap(l1, l2);
1333
1334 spin_lock(l1);
1335 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1336}
1337
1338static void task_numa_group(struct task_struct *p, int cpupid)
1339{
1340 struct numa_group *grp, *my_grp;
1341 struct task_struct *tsk;
1342 bool join = false;
1343 int cpu = cpupid_to_cpu(cpupid);
1344 int i;
1345
1346 if (unlikely(!p->numa_group)) {
1347 unsigned int size = sizeof(struct numa_group) +
1348 2*nr_node_ids*sizeof(atomic_long_t);
1349
1350 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1351 if (!grp)
1352 return;
1353
1354 atomic_set(&grp->refcount, 1);
1355 spin_lock_init(&grp->lock);
1356 INIT_LIST_HEAD(&grp->task_list);
e29cf08b 1357 grp->gid = p->pid;
8c8a743c
PZ
1358
1359 for (i = 0; i < 2*nr_node_ids; i++)
1360 atomic_long_set(&grp->faults[i], p->numa_faults[i]);
1361
83e1d2cd
MG
1362 atomic_long_set(&grp->total_faults, p->total_numa_faults);
1363
8c8a743c
PZ
1364 list_add(&p->numa_entry, &grp->task_list);
1365 grp->nr_tasks++;
1366 rcu_assign_pointer(p->numa_group, grp);
1367 }
1368
1369 rcu_read_lock();
1370 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1371
1372 if (!cpupid_match_pid(tsk, cpupid))
1373 goto unlock;
1374
1375 grp = rcu_dereference(tsk->numa_group);
1376 if (!grp)
1377 goto unlock;
1378
1379 my_grp = p->numa_group;
1380 if (grp == my_grp)
1381 goto unlock;
1382
1383 /*
1384 * Only join the other group if its bigger; if we're the bigger group,
1385 * the other task will join us.
1386 */
1387 if (my_grp->nr_tasks > grp->nr_tasks)
1388 goto unlock;
1389
1390 /*
1391 * Tie-break on the grp address.
1392 */
1393 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1394 goto unlock;
1395
1396 if (!get_numa_group(grp))
1397 goto unlock;
1398
1399 join = true;
1400
1401unlock:
1402 rcu_read_unlock();
1403
1404 if (!join)
1405 return;
1406
1407 for (i = 0; i < 2*nr_node_ids; i++) {
1408 atomic_long_sub(p->numa_faults[i], &my_grp->faults[i]);
1409 atomic_long_add(p->numa_faults[i], &grp->faults[i]);
1410 }
83e1d2cd
MG
1411 atomic_long_sub(p->total_numa_faults, &my_grp->total_faults);
1412 atomic_long_add(p->total_numa_faults, &grp->total_faults);
8c8a743c
PZ
1413
1414 double_lock(&my_grp->lock, &grp->lock);
1415
1416 list_move(&p->numa_entry, &grp->task_list);
1417 my_grp->nr_tasks--;
1418 grp->nr_tasks++;
1419
1420 spin_unlock(&my_grp->lock);
1421 spin_unlock(&grp->lock);
1422
1423 rcu_assign_pointer(p->numa_group, grp);
1424
1425 put_numa_group(my_grp);
1426}
1427
1428void task_numa_free(struct task_struct *p)
1429{
1430 struct numa_group *grp = p->numa_group;
1431 int i;
82727018 1432 void *numa_faults = p->numa_faults;
8c8a743c
PZ
1433
1434 if (grp) {
1435 for (i = 0; i < 2*nr_node_ids; i++)
1436 atomic_long_sub(p->numa_faults[i], &grp->faults[i]);
1437
83e1d2cd
MG
1438 atomic_long_sub(p->total_numa_faults, &grp->total_faults);
1439
8c8a743c
PZ
1440 spin_lock(&grp->lock);
1441 list_del(&p->numa_entry);
1442 grp->nr_tasks--;
1443 spin_unlock(&grp->lock);
1444 rcu_assign_pointer(p->numa_group, NULL);
1445 put_numa_group(grp);
1446 }
1447
82727018
RR
1448 p->numa_faults = NULL;
1449 p->numa_faults_buffer = NULL;
1450 kfree(numa_faults);
8c8a743c
PZ
1451}
1452
cbee9f88
PZ
1453/*
1454 * Got a PROT_NONE fault for a page on @node.
1455 */
6688cc05 1456void task_numa_fault(int last_cpupid, int node, int pages, int flags)
cbee9f88
PZ
1457{
1458 struct task_struct *p = current;
6688cc05 1459 bool migrated = flags & TNF_MIGRATED;
ac8e895b 1460 int priv;
cbee9f88 1461
10e84b97 1462 if (!numabalancing_enabled)
1a687c2e
MG
1463 return;
1464
9ff1d9ff
MG
1465 /* for example, ksmd faulting in a user's mm */
1466 if (!p->mm)
1467 return;
1468
82727018
RR
1469 /* Do not worry about placement if exiting */
1470 if (p->state == TASK_DEAD)
1471 return;
1472
f809ca9a
MG
1473 /* Allocate buffer to track faults on a per-node basis */
1474 if (unlikely(!p->numa_faults)) {
ac8e895b 1475 int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
f809ca9a 1476
745d6147
MG
1477 /* numa_faults and numa_faults_buffer share the allocation */
1478 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
f809ca9a
MG
1479 if (!p->numa_faults)
1480 return;
745d6147
MG
1481
1482 BUG_ON(p->numa_faults_buffer);
ac8e895b 1483 p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
83e1d2cd 1484 p->total_numa_faults = 0;
f809ca9a 1485 }
cbee9f88 1486
8c8a743c
PZ
1487 /*
1488 * First accesses are treated as private, otherwise consider accesses
1489 * to be private if the accessing pid has not changed
1490 */
1491 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1492 priv = 1;
1493 } else {
1494 priv = cpupid_match_pid(p, last_cpupid);
6688cc05 1495 if (!priv && !(flags & TNF_NO_GROUP))
8c8a743c
PZ
1496 task_numa_group(p, last_cpupid);
1497 }
1498
fb003b80 1499 /*
b8593bfd
MG
1500 * If pages are properly placed (did not migrate) then scan slower.
1501 * This is reset periodically in case of phase changes
fb003b80 1502 */
598f0ec0
MG
1503 if (!migrated) {
1504 /* Initialise if necessary */
1505 if (!p->numa_scan_period_max)
1506 p->numa_scan_period_max = task_scan_max(p);
1507
1508 p->numa_scan_period = min(p->numa_scan_period_max,
1509 p->numa_scan_period + 10);
1510 }
fb003b80 1511
cbee9f88 1512 task_numa_placement(p);
f809ca9a 1513
6b9a7460
MG
1514 /* Retry task to preferred node migration if it previously failed */
1515 if (p->numa_migrate_retry && time_after(jiffies, p->numa_migrate_retry))
1516 numa_migrate_preferred(p);
1517
ac8e895b 1518 p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
cbee9f88
PZ
1519}
1520
6e5fb223
PZ
1521static void reset_ptenuma_scan(struct task_struct *p)
1522{
1523 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1524 p->mm->numa_scan_offset = 0;
1525}
1526
cbee9f88
PZ
1527/*
1528 * The expensive part of numa migration is done from task_work context.
1529 * Triggered from task_tick_numa().
1530 */
1531void task_numa_work(struct callback_head *work)
1532{
1533 unsigned long migrate, next_scan, now = jiffies;
1534 struct task_struct *p = current;
1535 struct mm_struct *mm = p->mm;
6e5fb223 1536 struct vm_area_struct *vma;
9f40604c 1537 unsigned long start, end;
598f0ec0 1538 unsigned long nr_pte_updates = 0;
9f40604c 1539 long pages;
cbee9f88
PZ
1540
1541 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1542
1543 work->next = work; /* protect against double add */
1544 /*
1545 * Who cares about NUMA placement when they're dying.
1546 *
1547 * NOTE: make sure not to dereference p->mm before this check,
1548 * exit_task_work() happens _after_ exit_mm() so we could be called
1549 * without p->mm even though we still had it when we enqueued this
1550 * work.
1551 */
1552 if (p->flags & PF_EXITING)
1553 return;
1554
7e8d16b6
MG
1555 if (!mm->numa_next_reset || !mm->numa_next_scan) {
1556 mm->numa_next_scan = now +
1557 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1558 mm->numa_next_reset = now +
1559 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1560 }
1561
b8593bfd
MG
1562 /*
1563 * Reset the scan period if enough time has gone by. Objective is that
1564 * scanning will be reduced if pages are properly placed. As tasks
1565 * can enter different phases this needs to be re-examined. Lacking
1566 * proper tracking of reference behaviour, this blunt hammer is used.
1567 */
1568 migrate = mm->numa_next_reset;
1569 if (time_after(now, migrate)) {
598f0ec0 1570 p->numa_scan_period = task_scan_min(p);
b8593bfd
MG
1571 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1572 xchg(&mm->numa_next_reset, next_scan);
1573 }
1574
cbee9f88
PZ
1575 /*
1576 * Enforce maximal scan/migration frequency..
1577 */
1578 migrate = mm->numa_next_scan;
1579 if (time_before(now, migrate))
1580 return;
1581
598f0ec0
MG
1582 if (p->numa_scan_period == 0) {
1583 p->numa_scan_period_max = task_scan_max(p);
1584 p->numa_scan_period = task_scan_min(p);
1585 }
cbee9f88 1586
fb003b80 1587 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
1588 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1589 return;
1590
19a78d11
PZ
1591 /*
1592 * Delay this task enough that another task of this mm will likely win
1593 * the next time around.
1594 */
1595 p->node_stamp += 2 * TICK_NSEC;
1596
9f40604c
MG
1597 start = mm->numa_scan_offset;
1598 pages = sysctl_numa_balancing_scan_size;
1599 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1600 if (!pages)
1601 return;
cbee9f88 1602
6e5fb223 1603 down_read(&mm->mmap_sem);
9f40604c 1604 vma = find_vma(mm, start);
6e5fb223
PZ
1605 if (!vma) {
1606 reset_ptenuma_scan(p);
9f40604c 1607 start = 0;
6e5fb223
PZ
1608 vma = mm->mmap;
1609 }
9f40604c 1610 for (; vma; vma = vma->vm_next) {
fc314724 1611 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
6e5fb223
PZ
1612 continue;
1613
4591ce4f
MG
1614 /*
1615 * Shared library pages mapped by multiple processes are not
1616 * migrated as it is expected they are cache replicated. Avoid
1617 * hinting faults in read-only file-backed mappings or the vdso
1618 * as migrating the pages will be of marginal benefit.
1619 */
1620 if (!vma->vm_mm ||
1621 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1622 continue;
1623
9f40604c
MG
1624 do {
1625 start = max(start, vma->vm_start);
1626 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1627 end = min(end, vma->vm_end);
598f0ec0
MG
1628 nr_pte_updates += change_prot_numa(vma, start, end);
1629
1630 /*
1631 * Scan sysctl_numa_balancing_scan_size but ensure that
1632 * at least one PTE is updated so that unused virtual
1633 * address space is quickly skipped.
1634 */
1635 if (nr_pte_updates)
1636 pages -= (end - start) >> PAGE_SHIFT;
6e5fb223 1637
9f40604c
MG
1638 start = end;
1639 if (pages <= 0)
1640 goto out;
1641 } while (end != vma->vm_end);
cbee9f88 1642 }
6e5fb223 1643
9f40604c 1644out:
f307cd1a
MG
1645 /*
1646 * If the whole process was scanned without updates then no NUMA
1647 * hinting faults are being recorded and scan rate should be lower.
1648 */
1649 if (mm->numa_scan_offset == 0 && !nr_pte_updates) {
1650 p->numa_scan_period = min(p->numa_scan_period_max,
1651 p->numa_scan_period << 1);
1652
1653 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1654 mm->numa_next_scan = next_scan;
1655 }
1656
6e5fb223 1657 /*
c69307d5
PZ
1658 * It is possible to reach the end of the VMA list but the last few
1659 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1660 * would find the !migratable VMA on the next scan but not reset the
1661 * scanner to the start so check it now.
6e5fb223
PZ
1662 */
1663 if (vma)
9f40604c 1664 mm->numa_scan_offset = start;
6e5fb223
PZ
1665 else
1666 reset_ptenuma_scan(p);
1667 up_read(&mm->mmap_sem);
cbee9f88
PZ
1668}
1669
1670/*
1671 * Drive the periodic memory faults..
1672 */
1673void task_tick_numa(struct rq *rq, struct task_struct *curr)
1674{
1675 struct callback_head *work = &curr->numa_work;
1676 u64 period, now;
1677
1678 /*
1679 * We don't care about NUMA placement if we don't have memory.
1680 */
1681 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1682 return;
1683
1684 /*
1685 * Using runtime rather than walltime has the dual advantage that
1686 * we (mostly) drive the selection from busy threads and that the
1687 * task needs to have done some actual work before we bother with
1688 * NUMA placement.
1689 */
1690 now = curr->se.sum_exec_runtime;
1691 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1692
1693 if (now - curr->node_stamp > period) {
4b96a29b 1694 if (!curr->node_stamp)
598f0ec0 1695 curr->numa_scan_period = task_scan_min(curr);
19a78d11 1696 curr->node_stamp += period;
cbee9f88
PZ
1697
1698 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1699 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1700 task_work_add(curr, work, true);
1701 }
1702 }
1703}
1704#else
1705static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1706{
1707}
1708#endif /* CONFIG_NUMA_BALANCING */
1709
30cfdcfc
DA
1710static void
1711account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712{
1713 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 1714 if (!parent_entity(se))
029632fb 1715 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7
PZ
1716#ifdef CONFIG_SMP
1717 if (entity_is_task(se))
eb95308e 1718 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
367456c7 1719#endif
30cfdcfc 1720 cfs_rq->nr_running++;
30cfdcfc
DA
1721}
1722
1723static void
1724account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1725{
1726 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 1727 if (!parent_entity(se))
029632fb 1728 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 1729 if (entity_is_task(se))
b87f1724 1730 list_del_init(&se->group_node);
30cfdcfc 1731 cfs_rq->nr_running--;
30cfdcfc
DA
1732}
1733
3ff6dcac
YZ
1734#ifdef CONFIG_FAIR_GROUP_SCHED
1735# ifdef CONFIG_SMP
cf5f0acf
PZ
1736static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1737{
1738 long tg_weight;
1739
1740 /*
1741 * Use this CPU's actual weight instead of the last load_contribution
1742 * to gain a more accurate current total weight. See
1743 * update_cfs_rq_load_contribution().
1744 */
bf5b986e 1745 tg_weight = atomic_long_read(&tg->load_avg);
82958366 1746 tg_weight -= cfs_rq->tg_load_contrib;
cf5f0acf
PZ
1747 tg_weight += cfs_rq->load.weight;
1748
1749 return tg_weight;
1750}
1751
6d5ab293 1752static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 1753{
cf5f0acf 1754 long tg_weight, load, shares;
3ff6dcac 1755
cf5f0acf 1756 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 1757 load = cfs_rq->load.weight;
3ff6dcac 1758
3ff6dcac 1759 shares = (tg->shares * load);
cf5f0acf
PZ
1760 if (tg_weight)
1761 shares /= tg_weight;
3ff6dcac
YZ
1762
1763 if (shares < MIN_SHARES)
1764 shares = MIN_SHARES;
1765 if (shares > tg->shares)
1766 shares = tg->shares;
1767
1768 return shares;
1769}
3ff6dcac 1770# else /* CONFIG_SMP */
6d5ab293 1771static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
1772{
1773 return tg->shares;
1774}
3ff6dcac 1775# endif /* CONFIG_SMP */
2069dd75
PZ
1776static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1777 unsigned long weight)
1778{
19e5eebb
PT
1779 if (se->on_rq) {
1780 /* commit outstanding execution time */
1781 if (cfs_rq->curr == se)
1782 update_curr(cfs_rq);
2069dd75 1783 account_entity_dequeue(cfs_rq, se);
19e5eebb 1784 }
2069dd75
PZ
1785
1786 update_load_set(&se->load, weight);
1787
1788 if (se->on_rq)
1789 account_entity_enqueue(cfs_rq, se);
1790}
1791
82958366
PT
1792static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1793
6d5ab293 1794static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1795{
1796 struct task_group *tg;
1797 struct sched_entity *se;
3ff6dcac 1798 long shares;
2069dd75 1799
2069dd75
PZ
1800 tg = cfs_rq->tg;
1801 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 1802 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 1803 return;
3ff6dcac
YZ
1804#ifndef CONFIG_SMP
1805 if (likely(se->load.weight == tg->shares))
1806 return;
1807#endif
6d5ab293 1808 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
1809
1810 reweight_entity(cfs_rq_of(se), se, shares);
1811}
1812#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 1813static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1814{
1815}
1816#endif /* CONFIG_FAIR_GROUP_SCHED */
1817
141965c7 1818#ifdef CONFIG_SMP
5b51f2f8
PT
1819/*
1820 * We choose a half-life close to 1 scheduling period.
1821 * Note: The tables below are dependent on this value.
1822 */
1823#define LOAD_AVG_PERIOD 32
1824#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1825#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1826
1827/* Precomputed fixed inverse multiplies for multiplication by y^n */
1828static const u32 runnable_avg_yN_inv[] = {
1829 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1830 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1831 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1832 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1833 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1834 0x85aac367, 0x82cd8698,
1835};
1836
1837/*
1838 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1839 * over-estimates when re-combining.
1840 */
1841static const u32 runnable_avg_yN_sum[] = {
1842 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1843 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1844 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1845};
1846
9d85f21c
PT
1847/*
1848 * Approximate:
1849 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1850 */
1851static __always_inline u64 decay_load(u64 val, u64 n)
1852{
5b51f2f8
PT
1853 unsigned int local_n;
1854
1855 if (!n)
1856 return val;
1857 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1858 return 0;
1859
1860 /* after bounds checking we can collapse to 32-bit */
1861 local_n = n;
1862
1863 /*
1864 * As y^PERIOD = 1/2, we can combine
1865 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1866 * With a look-up table which covers k^n (n<PERIOD)
1867 *
1868 * To achieve constant time decay_load.
1869 */
1870 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1871 val >>= local_n / LOAD_AVG_PERIOD;
1872 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
1873 }
1874
5b51f2f8
PT
1875 val *= runnable_avg_yN_inv[local_n];
1876 /* We don't use SRR here since we always want to round down. */
1877 return val >> 32;
1878}
1879
1880/*
1881 * For updates fully spanning n periods, the contribution to runnable
1882 * average will be: \Sum 1024*y^n
1883 *
1884 * We can compute this reasonably efficiently by combining:
1885 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1886 */
1887static u32 __compute_runnable_contrib(u64 n)
1888{
1889 u32 contrib = 0;
1890
1891 if (likely(n <= LOAD_AVG_PERIOD))
1892 return runnable_avg_yN_sum[n];
1893 else if (unlikely(n >= LOAD_AVG_MAX_N))
1894 return LOAD_AVG_MAX;
1895
1896 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1897 do {
1898 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1899 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1900
1901 n -= LOAD_AVG_PERIOD;
1902 } while (n > LOAD_AVG_PERIOD);
1903
1904 contrib = decay_load(contrib, n);
1905 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
1906}
1907
1908/*
1909 * We can represent the historical contribution to runnable average as the
1910 * coefficients of a geometric series. To do this we sub-divide our runnable
1911 * history into segments of approximately 1ms (1024us); label the segment that
1912 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1913 *
1914 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1915 * p0 p1 p2
1916 * (now) (~1ms ago) (~2ms ago)
1917 *
1918 * Let u_i denote the fraction of p_i that the entity was runnable.
1919 *
1920 * We then designate the fractions u_i as our co-efficients, yielding the
1921 * following representation of historical load:
1922 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1923 *
1924 * We choose y based on the with of a reasonably scheduling period, fixing:
1925 * y^32 = 0.5
1926 *
1927 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1928 * approximately half as much as the contribution to load within the last ms
1929 * (u_0).
1930 *
1931 * When a period "rolls over" and we have new u_0`, multiplying the previous
1932 * sum again by y is sufficient to update:
1933 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1934 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1935 */
1936static __always_inline int __update_entity_runnable_avg(u64 now,
1937 struct sched_avg *sa,
1938 int runnable)
1939{
5b51f2f8
PT
1940 u64 delta, periods;
1941 u32 runnable_contrib;
9d85f21c
PT
1942 int delta_w, decayed = 0;
1943
1944 delta = now - sa->last_runnable_update;
1945 /*
1946 * This should only happen when time goes backwards, which it
1947 * unfortunately does during sched clock init when we swap over to TSC.
1948 */
1949 if ((s64)delta < 0) {
1950 sa->last_runnable_update = now;
1951 return 0;
1952 }
1953
1954 /*
1955 * Use 1024ns as the unit of measurement since it's a reasonable
1956 * approximation of 1us and fast to compute.
1957 */
1958 delta >>= 10;
1959 if (!delta)
1960 return 0;
1961 sa->last_runnable_update = now;
1962
1963 /* delta_w is the amount already accumulated against our next period */
1964 delta_w = sa->runnable_avg_period % 1024;
1965 if (delta + delta_w >= 1024) {
1966 /* period roll-over */
1967 decayed = 1;
1968
1969 /*
1970 * Now that we know we're crossing a period boundary, figure
1971 * out how much from delta we need to complete the current
1972 * period and accrue it.
1973 */
1974 delta_w = 1024 - delta_w;
5b51f2f8
PT
1975 if (runnable)
1976 sa->runnable_avg_sum += delta_w;
1977 sa->runnable_avg_period += delta_w;
1978
1979 delta -= delta_w;
1980
1981 /* Figure out how many additional periods this update spans */
1982 periods = delta / 1024;
1983 delta %= 1024;
1984
1985 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1986 periods + 1);
1987 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1988 periods + 1);
1989
1990 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1991 runnable_contrib = __compute_runnable_contrib(periods);
1992 if (runnable)
1993 sa->runnable_avg_sum += runnable_contrib;
1994 sa->runnable_avg_period += runnable_contrib;
9d85f21c
PT
1995 }
1996
1997 /* Remainder of delta accrued against u_0` */
1998 if (runnable)
1999 sa->runnable_avg_sum += delta;
2000 sa->runnable_avg_period += delta;
2001
2002 return decayed;
2003}
2004
9ee474f5 2005/* Synchronize an entity's decay with its parenting cfs_rq.*/
aff3e498 2006static inline u64 __synchronize_entity_decay(struct sched_entity *se)
9ee474f5
PT
2007{
2008 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2009 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2010
2011 decays -= se->avg.decay_count;
2012 if (!decays)
aff3e498 2013 return 0;
9ee474f5
PT
2014
2015 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2016 se->avg.decay_count = 0;
aff3e498
PT
2017
2018 return decays;
9ee474f5
PT
2019}
2020
c566e8e9
PT
2021#ifdef CONFIG_FAIR_GROUP_SCHED
2022static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2023 int force_update)
2024{
2025 struct task_group *tg = cfs_rq->tg;
bf5b986e 2026 long tg_contrib;
c566e8e9
PT
2027
2028 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2029 tg_contrib -= cfs_rq->tg_load_contrib;
2030
bf5b986e
AS
2031 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2032 atomic_long_add(tg_contrib, &tg->load_avg);
c566e8e9
PT
2033 cfs_rq->tg_load_contrib += tg_contrib;
2034 }
2035}
8165e145 2036
bb17f655
PT
2037/*
2038 * Aggregate cfs_rq runnable averages into an equivalent task_group
2039 * representation for computing load contributions.
2040 */
2041static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2042 struct cfs_rq *cfs_rq)
2043{
2044 struct task_group *tg = cfs_rq->tg;
2045 long contrib;
2046
2047 /* The fraction of a cpu used by this cfs_rq */
2048 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
2049 sa->runnable_avg_period + 1);
2050 contrib -= cfs_rq->tg_runnable_contrib;
2051
2052 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2053 atomic_add(contrib, &tg->runnable_avg);
2054 cfs_rq->tg_runnable_contrib += contrib;
2055 }
2056}
2057
8165e145
PT
2058static inline void __update_group_entity_contrib(struct sched_entity *se)
2059{
2060 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2061 struct task_group *tg = cfs_rq->tg;
bb17f655
PT
2062 int runnable_avg;
2063
8165e145
PT
2064 u64 contrib;
2065
2066 contrib = cfs_rq->tg_load_contrib * tg->shares;
bf5b986e
AS
2067 se->avg.load_avg_contrib = div_u64(contrib,
2068 atomic_long_read(&tg->load_avg) + 1);
bb17f655
PT
2069
2070 /*
2071 * For group entities we need to compute a correction term in the case
2072 * that they are consuming <1 cpu so that we would contribute the same
2073 * load as a task of equal weight.
2074 *
2075 * Explicitly co-ordinating this measurement would be expensive, but
2076 * fortunately the sum of each cpus contribution forms a usable
2077 * lower-bound on the true value.
2078 *
2079 * Consider the aggregate of 2 contributions. Either they are disjoint
2080 * (and the sum represents true value) or they are disjoint and we are
2081 * understating by the aggregate of their overlap.
2082 *
2083 * Extending this to N cpus, for a given overlap, the maximum amount we
2084 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2085 * cpus that overlap for this interval and w_i is the interval width.
2086 *
2087 * On a small machine; the first term is well-bounded which bounds the
2088 * total error since w_i is a subset of the period. Whereas on a
2089 * larger machine, while this first term can be larger, if w_i is the
2090 * of consequential size guaranteed to see n_i*w_i quickly converge to
2091 * our upper bound of 1-cpu.
2092 */
2093 runnable_avg = atomic_read(&tg->runnable_avg);
2094 if (runnable_avg < NICE_0_LOAD) {
2095 se->avg.load_avg_contrib *= runnable_avg;
2096 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2097 }
8165e145 2098}
c566e8e9
PT
2099#else
2100static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2101 int force_update) {}
bb17f655
PT
2102static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2103 struct cfs_rq *cfs_rq) {}
8165e145 2104static inline void __update_group_entity_contrib(struct sched_entity *se) {}
c566e8e9
PT
2105#endif
2106
8165e145
PT
2107static inline void __update_task_entity_contrib(struct sched_entity *se)
2108{
2109 u32 contrib;
2110
2111 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2112 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2113 contrib /= (se->avg.runnable_avg_period + 1);
2114 se->avg.load_avg_contrib = scale_load(contrib);
2115}
2116
2dac754e
PT
2117/* Compute the current contribution to load_avg by se, return any delta */
2118static long __update_entity_load_avg_contrib(struct sched_entity *se)
2119{
2120 long old_contrib = se->avg.load_avg_contrib;
2121
8165e145
PT
2122 if (entity_is_task(se)) {
2123 __update_task_entity_contrib(se);
2124 } else {
bb17f655 2125 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
8165e145
PT
2126 __update_group_entity_contrib(se);
2127 }
2dac754e
PT
2128
2129 return se->avg.load_avg_contrib - old_contrib;
2130}
2131
9ee474f5
PT
2132static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2133 long load_contrib)
2134{
2135 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2136 cfs_rq->blocked_load_avg -= load_contrib;
2137 else
2138 cfs_rq->blocked_load_avg = 0;
2139}
2140
f1b17280
PT
2141static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2142
9d85f21c 2143/* Update a sched_entity's runnable average */
9ee474f5
PT
2144static inline void update_entity_load_avg(struct sched_entity *se,
2145 int update_cfs_rq)
9d85f21c 2146{
2dac754e
PT
2147 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2148 long contrib_delta;
f1b17280 2149 u64 now;
2dac754e 2150
f1b17280
PT
2151 /*
2152 * For a group entity we need to use their owned cfs_rq_clock_task() in
2153 * case they are the parent of a throttled hierarchy.
2154 */
2155 if (entity_is_task(se))
2156 now = cfs_rq_clock_task(cfs_rq);
2157 else
2158 now = cfs_rq_clock_task(group_cfs_rq(se));
2159
2160 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2dac754e
PT
2161 return;
2162
2163 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
2164
2165 if (!update_cfs_rq)
2166 return;
2167
2dac754e
PT
2168 if (se->on_rq)
2169 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
2170 else
2171 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2172}
2173
2174/*
2175 * Decay the load contributed by all blocked children and account this so that
2176 * their contribution may appropriately discounted when they wake up.
2177 */
aff3e498 2178static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
9ee474f5 2179{
f1b17280 2180 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
9ee474f5
PT
2181 u64 decays;
2182
2183 decays = now - cfs_rq->last_decay;
aff3e498 2184 if (!decays && !force_update)
9ee474f5
PT
2185 return;
2186
2509940f
AS
2187 if (atomic_long_read(&cfs_rq->removed_load)) {
2188 unsigned long removed_load;
2189 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
aff3e498
PT
2190 subtract_blocked_load_contrib(cfs_rq, removed_load);
2191 }
9ee474f5 2192
aff3e498
PT
2193 if (decays) {
2194 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2195 decays);
2196 atomic64_add(decays, &cfs_rq->decay_counter);
2197 cfs_rq->last_decay = now;
2198 }
c566e8e9
PT
2199
2200 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
9d85f21c 2201}
18bf2805
BS
2202
2203static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2204{
78becc27 2205 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
bb17f655 2206 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
18bf2805 2207}
2dac754e
PT
2208
2209/* Add the load generated by se into cfs_rq's child load-average */
2210static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2211 struct sched_entity *se,
2212 int wakeup)
2dac754e 2213{
aff3e498
PT
2214 /*
2215 * We track migrations using entity decay_count <= 0, on a wake-up
2216 * migration we use a negative decay count to track the remote decays
2217 * accumulated while sleeping.
a75cdaa9
AS
2218 *
2219 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2220 * are seen by enqueue_entity_load_avg() as a migration with an already
2221 * constructed load_avg_contrib.
aff3e498
PT
2222 */
2223 if (unlikely(se->avg.decay_count <= 0)) {
78becc27 2224 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
aff3e498
PT
2225 if (se->avg.decay_count) {
2226 /*
2227 * In a wake-up migration we have to approximate the
2228 * time sleeping. This is because we can't synchronize
2229 * clock_task between the two cpus, and it is not
2230 * guaranteed to be read-safe. Instead, we can
2231 * approximate this using our carried decays, which are
2232 * explicitly atomically readable.
2233 */
2234 se->avg.last_runnable_update -= (-se->avg.decay_count)
2235 << 20;
2236 update_entity_load_avg(se, 0);
2237 /* Indicate that we're now synchronized and on-rq */
2238 se->avg.decay_count = 0;
2239 }
9ee474f5
PT
2240 wakeup = 0;
2241 } else {
282cf499
AS
2242 /*
2243 * Task re-woke on same cpu (or else migrate_task_rq_fair()
2244 * would have made count negative); we must be careful to avoid
2245 * double-accounting blocked time after synchronizing decays.
2246 */
2247 se->avg.last_runnable_update += __synchronize_entity_decay(se)
2248 << 20;
9ee474f5
PT
2249 }
2250
aff3e498
PT
2251 /* migrated tasks did not contribute to our blocked load */
2252 if (wakeup) {
9ee474f5 2253 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
aff3e498
PT
2254 update_entity_load_avg(se, 0);
2255 }
9ee474f5 2256
2dac754e 2257 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
2258 /* we force update consideration on load-balancer moves */
2259 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2dac754e
PT
2260}
2261
9ee474f5
PT
2262/*
2263 * Remove se's load from this cfs_rq child load-average, if the entity is
2264 * transitioning to a blocked state we track its projected decay using
2265 * blocked_load_avg.
2266 */
2dac754e 2267static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2268 struct sched_entity *se,
2269 int sleep)
2dac754e 2270{
9ee474f5 2271 update_entity_load_avg(se, 1);
aff3e498
PT
2272 /* we force update consideration on load-balancer moves */
2273 update_cfs_rq_blocked_load(cfs_rq, !sleep);
9ee474f5 2274
2dac754e 2275 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
2276 if (sleep) {
2277 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2278 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2279 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 2280}
642dbc39
VG
2281
2282/*
2283 * Update the rq's load with the elapsed running time before entering
2284 * idle. if the last scheduled task is not a CFS task, idle_enter will
2285 * be the only way to update the runnable statistic.
2286 */
2287void idle_enter_fair(struct rq *this_rq)
2288{
2289 update_rq_runnable_avg(this_rq, 1);
2290}
2291
2292/*
2293 * Update the rq's load with the elapsed idle time before a task is
2294 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2295 * be the only way to update the runnable statistic.
2296 */
2297void idle_exit_fair(struct rq *this_rq)
2298{
2299 update_rq_runnable_avg(this_rq, 0);
2300}
2301
9d85f21c 2302#else
9ee474f5
PT
2303static inline void update_entity_load_avg(struct sched_entity *se,
2304 int update_cfs_rq) {}
18bf2805 2305static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 2306static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2307 struct sched_entity *se,
2308 int wakeup) {}
2dac754e 2309static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2310 struct sched_entity *se,
2311 int sleep) {}
aff3e498
PT
2312static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2313 int force_update) {}
9d85f21c
PT
2314#endif
2315
2396af69 2316static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 2317{
bf0f6f24 2318#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
2319 struct task_struct *tsk = NULL;
2320
2321 if (entity_is_task(se))
2322 tsk = task_of(se);
2323
41acab88 2324 if (se->statistics.sleep_start) {
78becc27 2325 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
bf0f6f24
IM
2326
2327 if ((s64)delta < 0)
2328 delta = 0;
2329
41acab88
LDM
2330 if (unlikely(delta > se->statistics.sleep_max))
2331 se->statistics.sleep_max = delta;
bf0f6f24 2332
8c79a045 2333 se->statistics.sleep_start = 0;
41acab88 2334 se->statistics.sum_sleep_runtime += delta;
9745512c 2335
768d0c27 2336 if (tsk) {
e414314c 2337 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
2338 trace_sched_stat_sleep(tsk, delta);
2339 }
bf0f6f24 2340 }
41acab88 2341 if (se->statistics.block_start) {
78becc27 2342 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
bf0f6f24
IM
2343
2344 if ((s64)delta < 0)
2345 delta = 0;
2346
41acab88
LDM
2347 if (unlikely(delta > se->statistics.block_max))
2348 se->statistics.block_max = delta;
bf0f6f24 2349
8c79a045 2350 se->statistics.block_start = 0;
41acab88 2351 se->statistics.sum_sleep_runtime += delta;
30084fbd 2352
e414314c 2353 if (tsk) {
8f0dfc34 2354 if (tsk->in_iowait) {
41acab88
LDM
2355 se->statistics.iowait_sum += delta;
2356 se->statistics.iowait_count++;
768d0c27 2357 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
2358 }
2359
b781a602
AV
2360 trace_sched_stat_blocked(tsk, delta);
2361
e414314c
PZ
2362 /*
2363 * Blocking time is in units of nanosecs, so shift by
2364 * 20 to get a milliseconds-range estimation of the
2365 * amount of time that the task spent sleeping:
2366 */
2367 if (unlikely(prof_on == SLEEP_PROFILING)) {
2368 profile_hits(SLEEP_PROFILING,
2369 (void *)get_wchan(tsk),
2370 delta >> 20);
2371 }
2372 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 2373 }
bf0f6f24
IM
2374 }
2375#endif
2376}
2377
ddc97297
PZ
2378static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2379{
2380#ifdef CONFIG_SCHED_DEBUG
2381 s64 d = se->vruntime - cfs_rq->min_vruntime;
2382
2383 if (d < 0)
2384 d = -d;
2385
2386 if (d > 3*sysctl_sched_latency)
2387 schedstat_inc(cfs_rq, nr_spread_over);
2388#endif
2389}
2390
aeb73b04
PZ
2391static void
2392place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2393{
1af5f730 2394 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 2395
2cb8600e
PZ
2396 /*
2397 * The 'current' period is already promised to the current tasks,
2398 * however the extra weight of the new task will slow them down a
2399 * little, place the new task so that it fits in the slot that
2400 * stays open at the end.
2401 */
94dfb5e7 2402 if (initial && sched_feat(START_DEBIT))
f9c0b095 2403 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 2404
a2e7a7eb 2405 /* sleeps up to a single latency don't count. */
5ca9880c 2406 if (!initial) {
a2e7a7eb 2407 unsigned long thresh = sysctl_sched_latency;
a7be37ac 2408
a2e7a7eb
MG
2409 /*
2410 * Halve their sleep time's effect, to allow
2411 * for a gentler effect of sleepers:
2412 */
2413 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2414 thresh >>= 1;
51e0304c 2415
a2e7a7eb 2416 vruntime -= thresh;
aeb73b04
PZ
2417 }
2418
b5d9d734 2419 /* ensure we never gain time by being placed backwards. */
16c8f1c7 2420 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
2421}
2422
d3d9dc33
PT
2423static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2424
bf0f6f24 2425static void
88ec22d3 2426enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 2427{
88ec22d3
PZ
2428 /*
2429 * Update the normalized vruntime before updating min_vruntime
0fc576d5 2430 * through calling update_curr().
88ec22d3 2431 */
371fd7e7 2432 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
2433 se->vruntime += cfs_rq->min_vruntime;
2434
bf0f6f24 2435 /*
a2a2d680 2436 * Update run-time statistics of the 'current'.
bf0f6f24 2437 */
b7cc0896 2438 update_curr(cfs_rq);
f269ae04 2439 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
17bc14b7
LT
2440 account_entity_enqueue(cfs_rq, se);
2441 update_cfs_shares(cfs_rq);
bf0f6f24 2442
88ec22d3 2443 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 2444 place_entity(cfs_rq, se, 0);
2396af69 2445 enqueue_sleeper(cfs_rq, se);
e9acbff6 2446 }
bf0f6f24 2447
d2417e5a 2448 update_stats_enqueue(cfs_rq, se);
ddc97297 2449 check_spread(cfs_rq, se);
83b699ed
SV
2450 if (se != cfs_rq->curr)
2451 __enqueue_entity(cfs_rq, se);
2069dd75 2452 se->on_rq = 1;
3d4b47b4 2453
d3d9dc33 2454 if (cfs_rq->nr_running == 1) {
3d4b47b4 2455 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
2456 check_enqueue_throttle(cfs_rq);
2457 }
bf0f6f24
IM
2458}
2459
2c13c919 2460static void __clear_buddies_last(struct sched_entity *se)
2002c695 2461{
2c13c919
RR
2462 for_each_sched_entity(se) {
2463 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2464 if (cfs_rq->last == se)
2465 cfs_rq->last = NULL;
2466 else
2467 break;
2468 }
2469}
2002c695 2470
2c13c919
RR
2471static void __clear_buddies_next(struct sched_entity *se)
2472{
2473 for_each_sched_entity(se) {
2474 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2475 if (cfs_rq->next == se)
2476 cfs_rq->next = NULL;
2477 else
2478 break;
2479 }
2002c695
PZ
2480}
2481
ac53db59
RR
2482static void __clear_buddies_skip(struct sched_entity *se)
2483{
2484 for_each_sched_entity(se) {
2485 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2486 if (cfs_rq->skip == se)
2487 cfs_rq->skip = NULL;
2488 else
2489 break;
2490 }
2491}
2492
a571bbea
PZ
2493static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2494{
2c13c919
RR
2495 if (cfs_rq->last == se)
2496 __clear_buddies_last(se);
2497
2498 if (cfs_rq->next == se)
2499 __clear_buddies_next(se);
ac53db59
RR
2500
2501 if (cfs_rq->skip == se)
2502 __clear_buddies_skip(se);
a571bbea
PZ
2503}
2504
6c16a6dc 2505static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 2506
bf0f6f24 2507static void
371fd7e7 2508dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 2509{
a2a2d680
DA
2510 /*
2511 * Update run-time statistics of the 'current'.
2512 */
2513 update_curr(cfs_rq);
17bc14b7 2514 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 2515
19b6a2e3 2516 update_stats_dequeue(cfs_rq, se);
371fd7e7 2517 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 2518#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
2519 if (entity_is_task(se)) {
2520 struct task_struct *tsk = task_of(se);
2521
2522 if (tsk->state & TASK_INTERRUPTIBLE)
78becc27 2523 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2524 if (tsk->state & TASK_UNINTERRUPTIBLE)
78becc27 2525 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2526 }
db36cc7d 2527#endif
67e9fb2a
PZ
2528 }
2529
2002c695 2530 clear_buddies(cfs_rq, se);
4793241b 2531
83b699ed 2532 if (se != cfs_rq->curr)
30cfdcfc 2533 __dequeue_entity(cfs_rq, se);
17bc14b7 2534 se->on_rq = 0;
30cfdcfc 2535 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
2536
2537 /*
2538 * Normalize the entity after updating the min_vruntime because the
2539 * update can refer to the ->curr item and we need to reflect this
2540 * movement in our normalized position.
2541 */
371fd7e7 2542 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 2543 se->vruntime -= cfs_rq->min_vruntime;
1e876231 2544
d8b4986d
PT
2545 /* return excess runtime on last dequeue */
2546 return_cfs_rq_runtime(cfs_rq);
2547
1e876231 2548 update_min_vruntime(cfs_rq);
17bc14b7 2549 update_cfs_shares(cfs_rq);
bf0f6f24
IM
2550}
2551
2552/*
2553 * Preempt the current task with a newly woken task if needed:
2554 */
7c92e54f 2555static void
2e09bf55 2556check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 2557{
11697830 2558 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
2559 struct sched_entity *se;
2560 s64 delta;
11697830 2561
6d0f0ebd 2562 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 2563 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 2564 if (delta_exec > ideal_runtime) {
bf0f6f24 2565 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
2566 /*
2567 * The current task ran long enough, ensure it doesn't get
2568 * re-elected due to buddy favours.
2569 */
2570 clear_buddies(cfs_rq, curr);
f685ceac
MG
2571 return;
2572 }
2573
2574 /*
2575 * Ensure that a task that missed wakeup preemption by a
2576 * narrow margin doesn't have to wait for a full slice.
2577 * This also mitigates buddy induced latencies under load.
2578 */
f685ceac
MG
2579 if (delta_exec < sysctl_sched_min_granularity)
2580 return;
2581
f4cfb33e
WX
2582 se = __pick_first_entity(cfs_rq);
2583 delta = curr->vruntime - se->vruntime;
f685ceac 2584
f4cfb33e
WX
2585 if (delta < 0)
2586 return;
d7d82944 2587
f4cfb33e
WX
2588 if (delta > ideal_runtime)
2589 resched_task(rq_of(cfs_rq)->curr);
bf0f6f24
IM
2590}
2591
83b699ed 2592static void
8494f412 2593set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 2594{
83b699ed
SV
2595 /* 'current' is not kept within the tree. */
2596 if (se->on_rq) {
2597 /*
2598 * Any task has to be enqueued before it get to execute on
2599 * a CPU. So account for the time it spent waiting on the
2600 * runqueue.
2601 */
2602 update_stats_wait_end(cfs_rq, se);
2603 __dequeue_entity(cfs_rq, se);
2604 }
2605
79303e9e 2606 update_stats_curr_start(cfs_rq, se);
429d43bc 2607 cfs_rq->curr = se;
eba1ed4b
IM
2608#ifdef CONFIG_SCHEDSTATS
2609 /*
2610 * Track our maximum slice length, if the CPU's load is at
2611 * least twice that of our own weight (i.e. dont track it
2612 * when there are only lesser-weight tasks around):
2613 */
495eca49 2614 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 2615 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
2616 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2617 }
2618#endif
4a55b450 2619 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
2620}
2621
3f3a4904
PZ
2622static int
2623wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2624
ac53db59
RR
2625/*
2626 * Pick the next process, keeping these things in mind, in this order:
2627 * 1) keep things fair between processes/task groups
2628 * 2) pick the "next" process, since someone really wants that to run
2629 * 3) pick the "last" process, for cache locality
2630 * 4) do not run the "skip" process, if something else is available
2631 */
f4b6755f 2632static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 2633{
ac53db59 2634 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 2635 struct sched_entity *left = se;
f4b6755f 2636
ac53db59
RR
2637 /*
2638 * Avoid running the skip buddy, if running something else can
2639 * be done without getting too unfair.
2640 */
2641 if (cfs_rq->skip == se) {
2642 struct sched_entity *second = __pick_next_entity(se);
2643 if (second && wakeup_preempt_entity(second, left) < 1)
2644 se = second;
2645 }
aa2ac252 2646
f685ceac
MG
2647 /*
2648 * Prefer last buddy, try to return the CPU to a preempted task.
2649 */
2650 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2651 se = cfs_rq->last;
2652
ac53db59
RR
2653 /*
2654 * Someone really wants this to run. If it's not unfair, run it.
2655 */
2656 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2657 se = cfs_rq->next;
2658
f685ceac 2659 clear_buddies(cfs_rq, se);
4793241b
PZ
2660
2661 return se;
aa2ac252
PZ
2662}
2663
d3d9dc33
PT
2664static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2665
ab6cde26 2666static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
2667{
2668 /*
2669 * If still on the runqueue then deactivate_task()
2670 * was not called and update_curr() has to be done:
2671 */
2672 if (prev->on_rq)
b7cc0896 2673 update_curr(cfs_rq);
bf0f6f24 2674
d3d9dc33
PT
2675 /* throttle cfs_rqs exceeding runtime */
2676 check_cfs_rq_runtime(cfs_rq);
2677
ddc97297 2678 check_spread(cfs_rq, prev);
30cfdcfc 2679 if (prev->on_rq) {
5870db5b 2680 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
2681 /* Put 'current' back into the tree. */
2682 __enqueue_entity(cfs_rq, prev);
9d85f21c 2683 /* in !on_rq case, update occurred at dequeue */
9ee474f5 2684 update_entity_load_avg(prev, 1);
30cfdcfc 2685 }
429d43bc 2686 cfs_rq->curr = NULL;
bf0f6f24
IM
2687}
2688
8f4d37ec
PZ
2689static void
2690entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 2691{
bf0f6f24 2692 /*
30cfdcfc 2693 * Update run-time statistics of the 'current'.
bf0f6f24 2694 */
30cfdcfc 2695 update_curr(cfs_rq);
bf0f6f24 2696
9d85f21c
PT
2697 /*
2698 * Ensure that runnable average is periodically updated.
2699 */
9ee474f5 2700 update_entity_load_avg(curr, 1);
aff3e498 2701 update_cfs_rq_blocked_load(cfs_rq, 1);
bf0bd948 2702 update_cfs_shares(cfs_rq);
9d85f21c 2703
8f4d37ec
PZ
2704#ifdef CONFIG_SCHED_HRTICK
2705 /*
2706 * queued ticks are scheduled to match the slice, so don't bother
2707 * validating it and just reschedule.
2708 */
983ed7a6
HH
2709 if (queued) {
2710 resched_task(rq_of(cfs_rq)->curr);
2711 return;
2712 }
8f4d37ec
PZ
2713 /*
2714 * don't let the period tick interfere with the hrtick preemption
2715 */
2716 if (!sched_feat(DOUBLE_TICK) &&
2717 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2718 return;
2719#endif
2720
2c2efaed 2721 if (cfs_rq->nr_running > 1)
2e09bf55 2722 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
2723}
2724
ab84d31e
PT
2725
2726/**************************************************
2727 * CFS bandwidth control machinery
2728 */
2729
2730#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
2731
2732#ifdef HAVE_JUMP_LABEL
c5905afb 2733static struct static_key __cfs_bandwidth_used;
029632fb
PZ
2734
2735static inline bool cfs_bandwidth_used(void)
2736{
c5905afb 2737 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
2738}
2739
2740void account_cfs_bandwidth_used(int enabled, int was_enabled)
2741{
2742 /* only need to count groups transitioning between enabled/!enabled */
2743 if (enabled && !was_enabled)
c5905afb 2744 static_key_slow_inc(&__cfs_bandwidth_used);
029632fb 2745 else if (!enabled && was_enabled)
c5905afb 2746 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
2747}
2748#else /* HAVE_JUMP_LABEL */
2749static bool cfs_bandwidth_used(void)
2750{
2751 return true;
2752}
2753
2754void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2755#endif /* HAVE_JUMP_LABEL */
2756
ab84d31e
PT
2757/*
2758 * default period for cfs group bandwidth.
2759 * default: 0.1s, units: nanoseconds
2760 */
2761static inline u64 default_cfs_period(void)
2762{
2763 return 100000000ULL;
2764}
ec12cb7f
PT
2765
2766static inline u64 sched_cfs_bandwidth_slice(void)
2767{
2768 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2769}
2770
a9cf55b2
PT
2771/*
2772 * Replenish runtime according to assigned quota and update expiration time.
2773 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2774 * additional synchronization around rq->lock.
2775 *
2776 * requires cfs_b->lock
2777 */
029632fb 2778void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
2779{
2780 u64 now;
2781
2782 if (cfs_b->quota == RUNTIME_INF)
2783 return;
2784
2785 now = sched_clock_cpu(smp_processor_id());
2786 cfs_b->runtime = cfs_b->quota;
2787 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2788}
2789
029632fb
PZ
2790static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2791{
2792 return &tg->cfs_bandwidth;
2793}
2794
f1b17280
PT
2795/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2796static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2797{
2798 if (unlikely(cfs_rq->throttle_count))
2799 return cfs_rq->throttled_clock_task;
2800
78becc27 2801 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
2802}
2803
85dac906
PT
2804/* returns 0 on failure to allocate runtime */
2805static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
2806{
2807 struct task_group *tg = cfs_rq->tg;
2808 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 2809 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
2810
2811 /* note: this is a positive sum as runtime_remaining <= 0 */
2812 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2813
2814 raw_spin_lock(&cfs_b->lock);
2815 if (cfs_b->quota == RUNTIME_INF)
2816 amount = min_amount;
58088ad0 2817 else {
a9cf55b2
PT
2818 /*
2819 * If the bandwidth pool has become inactive, then at least one
2820 * period must have elapsed since the last consumption.
2821 * Refresh the global state and ensure bandwidth timer becomes
2822 * active.
2823 */
2824 if (!cfs_b->timer_active) {
2825 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 2826 __start_cfs_bandwidth(cfs_b);
a9cf55b2 2827 }
58088ad0
PT
2828
2829 if (cfs_b->runtime > 0) {
2830 amount = min(cfs_b->runtime, min_amount);
2831 cfs_b->runtime -= amount;
2832 cfs_b->idle = 0;
2833 }
ec12cb7f 2834 }
a9cf55b2 2835 expires = cfs_b->runtime_expires;
ec12cb7f
PT
2836 raw_spin_unlock(&cfs_b->lock);
2837
2838 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
2839 /*
2840 * we may have advanced our local expiration to account for allowed
2841 * spread between our sched_clock and the one on which runtime was
2842 * issued.
2843 */
2844 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2845 cfs_rq->runtime_expires = expires;
85dac906
PT
2846
2847 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
2848}
2849
a9cf55b2
PT
2850/*
2851 * Note: This depends on the synchronization provided by sched_clock and the
2852 * fact that rq->clock snapshots this value.
2853 */
2854static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 2855{
a9cf55b2 2856 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
2857
2858 /* if the deadline is ahead of our clock, nothing to do */
78becc27 2859 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
2860 return;
2861
a9cf55b2
PT
2862 if (cfs_rq->runtime_remaining < 0)
2863 return;
2864
2865 /*
2866 * If the local deadline has passed we have to consider the
2867 * possibility that our sched_clock is 'fast' and the global deadline
2868 * has not truly expired.
2869 *
2870 * Fortunately we can check determine whether this the case by checking
2871 * whether the global deadline has advanced.
2872 */
2873
2874 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2875 /* extend local deadline, drift is bounded above by 2 ticks */
2876 cfs_rq->runtime_expires += TICK_NSEC;
2877 } else {
2878 /* global deadline is ahead, expiration has passed */
2879 cfs_rq->runtime_remaining = 0;
2880 }
2881}
2882
2883static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2884 unsigned long delta_exec)
2885{
2886 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 2887 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
2888 expire_cfs_rq_runtime(cfs_rq);
2889
2890 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
2891 return;
2892
85dac906
PT
2893 /*
2894 * if we're unable to extend our runtime we resched so that the active
2895 * hierarchy can be throttled
2896 */
2897 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2898 resched_task(rq_of(cfs_rq)->curr);
ec12cb7f
PT
2899}
2900
6c16a6dc
PZ
2901static __always_inline
2902void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
ec12cb7f 2903{
56f570e5 2904 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
2905 return;
2906
2907 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2908}
2909
85dac906
PT
2910static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2911{
56f570e5 2912 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
2913}
2914
64660c86
PT
2915/* check whether cfs_rq, or any parent, is throttled */
2916static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2917{
56f570e5 2918 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
2919}
2920
2921/*
2922 * Ensure that neither of the group entities corresponding to src_cpu or
2923 * dest_cpu are members of a throttled hierarchy when performing group
2924 * load-balance operations.
2925 */
2926static inline int throttled_lb_pair(struct task_group *tg,
2927 int src_cpu, int dest_cpu)
2928{
2929 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2930
2931 src_cfs_rq = tg->cfs_rq[src_cpu];
2932 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2933
2934 return throttled_hierarchy(src_cfs_rq) ||
2935 throttled_hierarchy(dest_cfs_rq);
2936}
2937
2938/* updated child weight may affect parent so we have to do this bottom up */
2939static int tg_unthrottle_up(struct task_group *tg, void *data)
2940{
2941 struct rq *rq = data;
2942 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2943
2944 cfs_rq->throttle_count--;
2945#ifdef CONFIG_SMP
2946 if (!cfs_rq->throttle_count) {
f1b17280 2947 /* adjust cfs_rq_clock_task() */
78becc27 2948 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 2949 cfs_rq->throttled_clock_task;
64660c86
PT
2950 }
2951#endif
2952
2953 return 0;
2954}
2955
2956static int tg_throttle_down(struct task_group *tg, void *data)
2957{
2958 struct rq *rq = data;
2959 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2960
82958366
PT
2961 /* group is entering throttled state, stop time */
2962 if (!cfs_rq->throttle_count)
78becc27 2963 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
2964 cfs_rq->throttle_count++;
2965
2966 return 0;
2967}
2968
d3d9dc33 2969static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
2970{
2971 struct rq *rq = rq_of(cfs_rq);
2972 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2973 struct sched_entity *se;
2974 long task_delta, dequeue = 1;
2975
2976 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2977
f1b17280 2978 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
2979 rcu_read_lock();
2980 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2981 rcu_read_unlock();
85dac906
PT
2982
2983 task_delta = cfs_rq->h_nr_running;
2984 for_each_sched_entity(se) {
2985 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2986 /* throttled entity or throttle-on-deactivate */
2987 if (!se->on_rq)
2988 break;
2989
2990 if (dequeue)
2991 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2992 qcfs_rq->h_nr_running -= task_delta;
2993
2994 if (qcfs_rq->load.weight)
2995 dequeue = 0;
2996 }
2997
2998 if (!se)
2999 rq->nr_running -= task_delta;
3000
3001 cfs_rq->throttled = 1;
78becc27 3002 cfs_rq->throttled_clock = rq_clock(rq);
85dac906
PT
3003 raw_spin_lock(&cfs_b->lock);
3004 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3005 raw_spin_unlock(&cfs_b->lock);
3006}
3007
029632fb 3008void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
3009{
3010 struct rq *rq = rq_of(cfs_rq);
3011 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3012 struct sched_entity *se;
3013 int enqueue = 1;
3014 long task_delta;
3015
22b958d8 3016 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
3017
3018 cfs_rq->throttled = 0;
1a55af2e
FW
3019
3020 update_rq_clock(rq);
3021
671fd9da 3022 raw_spin_lock(&cfs_b->lock);
78becc27 3023 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
3024 list_del_rcu(&cfs_rq->throttled_list);
3025 raw_spin_unlock(&cfs_b->lock);
3026
64660c86
PT
3027 /* update hierarchical throttle state */
3028 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3029
671fd9da
PT
3030 if (!cfs_rq->load.weight)
3031 return;
3032
3033 task_delta = cfs_rq->h_nr_running;
3034 for_each_sched_entity(se) {
3035 if (se->on_rq)
3036 enqueue = 0;
3037
3038 cfs_rq = cfs_rq_of(se);
3039 if (enqueue)
3040 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3041 cfs_rq->h_nr_running += task_delta;
3042
3043 if (cfs_rq_throttled(cfs_rq))
3044 break;
3045 }
3046
3047 if (!se)
3048 rq->nr_running += task_delta;
3049
3050 /* determine whether we need to wake up potentially idle cpu */
3051 if (rq->curr == rq->idle && rq->cfs.nr_running)
3052 resched_task(rq->curr);
3053}
3054
3055static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3056 u64 remaining, u64 expires)
3057{
3058 struct cfs_rq *cfs_rq;
3059 u64 runtime = remaining;
3060
3061 rcu_read_lock();
3062 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3063 throttled_list) {
3064 struct rq *rq = rq_of(cfs_rq);
3065
3066 raw_spin_lock(&rq->lock);
3067 if (!cfs_rq_throttled(cfs_rq))
3068 goto next;
3069
3070 runtime = -cfs_rq->runtime_remaining + 1;
3071 if (runtime > remaining)
3072 runtime = remaining;
3073 remaining -= runtime;
3074
3075 cfs_rq->runtime_remaining += runtime;
3076 cfs_rq->runtime_expires = expires;
3077
3078 /* we check whether we're throttled above */
3079 if (cfs_rq->runtime_remaining > 0)
3080 unthrottle_cfs_rq(cfs_rq);
3081
3082next:
3083 raw_spin_unlock(&rq->lock);
3084
3085 if (!remaining)
3086 break;
3087 }
3088 rcu_read_unlock();
3089
3090 return remaining;
3091}
3092
58088ad0
PT
3093/*
3094 * Responsible for refilling a task_group's bandwidth and unthrottling its
3095 * cfs_rqs as appropriate. If there has been no activity within the last
3096 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3097 * used to track this state.
3098 */
3099static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3100{
671fd9da
PT
3101 u64 runtime, runtime_expires;
3102 int idle = 1, throttled;
58088ad0
PT
3103
3104 raw_spin_lock(&cfs_b->lock);
3105 /* no need to continue the timer with no bandwidth constraint */
3106 if (cfs_b->quota == RUNTIME_INF)
3107 goto out_unlock;
3108
671fd9da
PT
3109 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3110 /* idle depends on !throttled (for the case of a large deficit) */
3111 idle = cfs_b->idle && !throttled;
e8da1b18 3112 cfs_b->nr_periods += overrun;
671fd9da 3113
a9cf55b2
PT
3114 /* if we're going inactive then everything else can be deferred */
3115 if (idle)
3116 goto out_unlock;
3117
3118 __refill_cfs_bandwidth_runtime(cfs_b);
3119
671fd9da
PT
3120 if (!throttled) {
3121 /* mark as potentially idle for the upcoming period */
3122 cfs_b->idle = 1;
3123 goto out_unlock;
3124 }
3125
e8da1b18
NR
3126 /* account preceding periods in which throttling occurred */
3127 cfs_b->nr_throttled += overrun;
3128
671fd9da
PT
3129 /*
3130 * There are throttled entities so we must first use the new bandwidth
3131 * to unthrottle them before making it generally available. This
3132 * ensures that all existing debts will be paid before a new cfs_rq is
3133 * allowed to run.
3134 */
3135 runtime = cfs_b->runtime;
3136 runtime_expires = cfs_b->runtime_expires;
3137 cfs_b->runtime = 0;
3138
3139 /*
3140 * This check is repeated as we are holding onto the new bandwidth
3141 * while we unthrottle. This can potentially race with an unthrottled
3142 * group trying to acquire new bandwidth from the global pool.
3143 */
3144 while (throttled && runtime > 0) {
3145 raw_spin_unlock(&cfs_b->lock);
3146 /* we can't nest cfs_b->lock while distributing bandwidth */
3147 runtime = distribute_cfs_runtime(cfs_b, runtime,
3148 runtime_expires);
3149 raw_spin_lock(&cfs_b->lock);
3150
3151 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3152 }
58088ad0 3153
671fd9da
PT
3154 /* return (any) remaining runtime */
3155 cfs_b->runtime = runtime;
3156 /*
3157 * While we are ensured activity in the period following an
3158 * unthrottle, this also covers the case in which the new bandwidth is
3159 * insufficient to cover the existing bandwidth deficit. (Forcing the
3160 * timer to remain active while there are any throttled entities.)
3161 */
3162 cfs_b->idle = 0;
58088ad0
PT
3163out_unlock:
3164 if (idle)
3165 cfs_b->timer_active = 0;
3166 raw_spin_unlock(&cfs_b->lock);
3167
3168 return idle;
3169}
d3d9dc33 3170
d8b4986d
PT
3171/* a cfs_rq won't donate quota below this amount */
3172static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3173/* minimum remaining period time to redistribute slack quota */
3174static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3175/* how long we wait to gather additional slack before distributing */
3176static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3177
3178/* are we near the end of the current quota period? */
3179static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3180{
3181 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3182 u64 remaining;
3183
3184 /* if the call-back is running a quota refresh is already occurring */
3185 if (hrtimer_callback_running(refresh_timer))
3186 return 1;
3187
3188 /* is a quota refresh about to occur? */
3189 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3190 if (remaining < min_expire)
3191 return 1;
3192
3193 return 0;
3194}
3195
3196static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3197{
3198 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3199
3200 /* if there's a quota refresh soon don't bother with slack */
3201 if (runtime_refresh_within(cfs_b, min_left))
3202 return;
3203
3204 start_bandwidth_timer(&cfs_b->slack_timer,
3205 ns_to_ktime(cfs_bandwidth_slack_period));
3206}
3207
3208/* we know any runtime found here is valid as update_curr() precedes return */
3209static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3210{
3211 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3212 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3213
3214 if (slack_runtime <= 0)
3215 return;
3216
3217 raw_spin_lock(&cfs_b->lock);
3218 if (cfs_b->quota != RUNTIME_INF &&
3219 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3220 cfs_b->runtime += slack_runtime;
3221
3222 /* we are under rq->lock, defer unthrottling using a timer */
3223 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3224 !list_empty(&cfs_b->throttled_cfs_rq))
3225 start_cfs_slack_bandwidth(cfs_b);
3226 }
3227 raw_spin_unlock(&cfs_b->lock);
3228
3229 /* even if it's not valid for return we don't want to try again */
3230 cfs_rq->runtime_remaining -= slack_runtime;
3231}
3232
3233static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3234{
56f570e5
PT
3235 if (!cfs_bandwidth_used())
3236 return;
3237
fccfdc6f 3238 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
3239 return;
3240
3241 __return_cfs_rq_runtime(cfs_rq);
3242}
3243
3244/*
3245 * This is done with a timer (instead of inline with bandwidth return) since
3246 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3247 */
3248static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3249{
3250 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3251 u64 expires;
3252
3253 /* confirm we're still not at a refresh boundary */
3254 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
3255 return;
3256
3257 raw_spin_lock(&cfs_b->lock);
3258 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3259 runtime = cfs_b->runtime;
3260 cfs_b->runtime = 0;
3261 }
3262 expires = cfs_b->runtime_expires;
3263 raw_spin_unlock(&cfs_b->lock);
3264
3265 if (!runtime)
3266 return;
3267
3268 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3269
3270 raw_spin_lock(&cfs_b->lock);
3271 if (expires == cfs_b->runtime_expires)
3272 cfs_b->runtime = runtime;
3273 raw_spin_unlock(&cfs_b->lock);
3274}
3275
d3d9dc33
PT
3276/*
3277 * When a group wakes up we want to make sure that its quota is not already
3278 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3279 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3280 */
3281static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3282{
56f570e5
PT
3283 if (!cfs_bandwidth_used())
3284 return;
3285
d3d9dc33
PT
3286 /* an active group must be handled by the update_curr()->put() path */
3287 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3288 return;
3289
3290 /* ensure the group is not already throttled */
3291 if (cfs_rq_throttled(cfs_rq))
3292 return;
3293
3294 /* update runtime allocation */
3295 account_cfs_rq_runtime(cfs_rq, 0);
3296 if (cfs_rq->runtime_remaining <= 0)
3297 throttle_cfs_rq(cfs_rq);
3298}
3299
3300/* conditionally throttle active cfs_rq's from put_prev_entity() */
3301static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3302{
56f570e5
PT
3303 if (!cfs_bandwidth_used())
3304 return;
3305
d3d9dc33
PT
3306 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3307 return;
3308
3309 /*
3310 * it's possible for a throttled entity to be forced into a running
3311 * state (e.g. set_curr_task), in this case we're finished.
3312 */
3313 if (cfs_rq_throttled(cfs_rq))
3314 return;
3315
3316 throttle_cfs_rq(cfs_rq);
3317}
029632fb 3318
029632fb
PZ
3319static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3320{
3321 struct cfs_bandwidth *cfs_b =
3322 container_of(timer, struct cfs_bandwidth, slack_timer);
3323 do_sched_cfs_slack_timer(cfs_b);
3324
3325 return HRTIMER_NORESTART;
3326}
3327
3328static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3329{
3330 struct cfs_bandwidth *cfs_b =
3331 container_of(timer, struct cfs_bandwidth, period_timer);
3332 ktime_t now;
3333 int overrun;
3334 int idle = 0;
3335
3336 for (;;) {
3337 now = hrtimer_cb_get_time(timer);
3338 overrun = hrtimer_forward(timer, now, cfs_b->period);
3339
3340 if (!overrun)
3341 break;
3342
3343 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3344 }
3345
3346 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3347}
3348
3349void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3350{
3351 raw_spin_lock_init(&cfs_b->lock);
3352 cfs_b->runtime = 0;
3353 cfs_b->quota = RUNTIME_INF;
3354 cfs_b->period = ns_to_ktime(default_cfs_period());
3355
3356 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3357 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3358 cfs_b->period_timer.function = sched_cfs_period_timer;
3359 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3360 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3361}
3362
3363static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3364{
3365 cfs_rq->runtime_enabled = 0;
3366 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3367}
3368
3369/* requires cfs_b->lock, may release to reprogram timer */
3370void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3371{
3372 /*
3373 * The timer may be active because we're trying to set a new bandwidth
3374 * period or because we're racing with the tear-down path
3375 * (timer_active==0 becomes visible before the hrtimer call-back
3376 * terminates). In either case we ensure that it's re-programmed
3377 */
3378 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
3379 raw_spin_unlock(&cfs_b->lock);
3380 /* ensure cfs_b->lock is available while we wait */
3381 hrtimer_cancel(&cfs_b->period_timer);
3382
3383 raw_spin_lock(&cfs_b->lock);
3384 /* if someone else restarted the timer then we're done */
3385 if (cfs_b->timer_active)
3386 return;
3387 }
3388
3389 cfs_b->timer_active = 1;
3390 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3391}
3392
3393static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3394{
3395 hrtimer_cancel(&cfs_b->period_timer);
3396 hrtimer_cancel(&cfs_b->slack_timer);
3397}
3398
38dc3348 3399static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
3400{
3401 struct cfs_rq *cfs_rq;
3402
3403 for_each_leaf_cfs_rq(rq, cfs_rq) {
3404 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3405
3406 if (!cfs_rq->runtime_enabled)
3407 continue;
3408
3409 /*
3410 * clock_task is not advancing so we just need to make sure
3411 * there's some valid quota amount
3412 */
3413 cfs_rq->runtime_remaining = cfs_b->quota;
3414 if (cfs_rq_throttled(cfs_rq))
3415 unthrottle_cfs_rq(cfs_rq);
3416 }
3417}
3418
3419#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
3420static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3421{
78becc27 3422 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
3423}
3424
3425static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3426 unsigned long delta_exec) {}
d3d9dc33
PT
3427static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3428static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 3429static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
3430
3431static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3432{
3433 return 0;
3434}
64660c86
PT
3435
3436static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3437{
3438 return 0;
3439}
3440
3441static inline int throttled_lb_pair(struct task_group *tg,
3442 int src_cpu, int dest_cpu)
3443{
3444 return 0;
3445}
029632fb
PZ
3446
3447void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3448
3449#ifdef CONFIG_FAIR_GROUP_SCHED
3450static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
3451#endif
3452
029632fb
PZ
3453static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3454{
3455 return NULL;
3456}
3457static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
a4c96ae3 3458static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
3459
3460#endif /* CONFIG_CFS_BANDWIDTH */
3461
bf0f6f24
IM
3462/**************************************************
3463 * CFS operations on tasks:
3464 */
3465
8f4d37ec
PZ
3466#ifdef CONFIG_SCHED_HRTICK
3467static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3468{
8f4d37ec
PZ
3469 struct sched_entity *se = &p->se;
3470 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3471
3472 WARN_ON(task_rq(p) != rq);
3473
b39e66ea 3474 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
3475 u64 slice = sched_slice(cfs_rq, se);
3476 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3477 s64 delta = slice - ran;
3478
3479 if (delta < 0) {
3480 if (rq->curr == p)
3481 resched_task(p);
3482 return;
3483 }
3484
3485 /*
3486 * Don't schedule slices shorter than 10000ns, that just
3487 * doesn't make sense. Rely on vruntime for fairness.
3488 */
31656519 3489 if (rq->curr != p)
157124c1 3490 delta = max_t(s64, 10000LL, delta);
8f4d37ec 3491
31656519 3492 hrtick_start(rq, delta);
8f4d37ec
PZ
3493 }
3494}
a4c2f00f
PZ
3495
3496/*
3497 * called from enqueue/dequeue and updates the hrtick when the
3498 * current task is from our class and nr_running is low enough
3499 * to matter.
3500 */
3501static void hrtick_update(struct rq *rq)
3502{
3503 struct task_struct *curr = rq->curr;
3504
b39e66ea 3505 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
3506 return;
3507
3508 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3509 hrtick_start_fair(rq, curr);
3510}
55e12e5e 3511#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
3512static inline void
3513hrtick_start_fair(struct rq *rq, struct task_struct *p)
3514{
3515}
a4c2f00f
PZ
3516
3517static inline void hrtick_update(struct rq *rq)
3518{
3519}
8f4d37ec
PZ
3520#endif
3521
bf0f6f24
IM
3522/*
3523 * The enqueue_task method is called before nr_running is
3524 * increased. Here we update the fair scheduling stats and
3525 * then put the task into the rbtree:
3526 */
ea87bb78 3527static void
371fd7e7 3528enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3529{
3530 struct cfs_rq *cfs_rq;
62fb1851 3531 struct sched_entity *se = &p->se;
bf0f6f24
IM
3532
3533 for_each_sched_entity(se) {
62fb1851 3534 if (se->on_rq)
bf0f6f24
IM
3535 break;
3536 cfs_rq = cfs_rq_of(se);
88ec22d3 3537 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
3538
3539 /*
3540 * end evaluation on encountering a throttled cfs_rq
3541 *
3542 * note: in the case of encountering a throttled cfs_rq we will
3543 * post the final h_nr_running increment below.
3544 */
3545 if (cfs_rq_throttled(cfs_rq))
3546 break;
953bfcd1 3547 cfs_rq->h_nr_running++;
85dac906 3548
88ec22d3 3549 flags = ENQUEUE_WAKEUP;
bf0f6f24 3550 }
8f4d37ec 3551
2069dd75 3552 for_each_sched_entity(se) {
0f317143 3553 cfs_rq = cfs_rq_of(se);
953bfcd1 3554 cfs_rq->h_nr_running++;
2069dd75 3555
85dac906
PT
3556 if (cfs_rq_throttled(cfs_rq))
3557 break;
3558
17bc14b7 3559 update_cfs_shares(cfs_rq);
9ee474f5 3560 update_entity_load_avg(se, 1);
2069dd75
PZ
3561 }
3562
18bf2805
BS
3563 if (!se) {
3564 update_rq_runnable_avg(rq, rq->nr_running);
85dac906 3565 inc_nr_running(rq);
18bf2805 3566 }
a4c2f00f 3567 hrtick_update(rq);
bf0f6f24
IM
3568}
3569
2f36825b
VP
3570static void set_next_buddy(struct sched_entity *se);
3571
bf0f6f24
IM
3572/*
3573 * The dequeue_task method is called before nr_running is
3574 * decreased. We remove the task from the rbtree and
3575 * update the fair scheduling stats:
3576 */
371fd7e7 3577static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3578{
3579 struct cfs_rq *cfs_rq;
62fb1851 3580 struct sched_entity *se = &p->se;
2f36825b 3581 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
3582
3583 for_each_sched_entity(se) {
3584 cfs_rq = cfs_rq_of(se);
371fd7e7 3585 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
3586
3587 /*
3588 * end evaluation on encountering a throttled cfs_rq
3589 *
3590 * note: in the case of encountering a throttled cfs_rq we will
3591 * post the final h_nr_running decrement below.
3592 */
3593 if (cfs_rq_throttled(cfs_rq))
3594 break;
953bfcd1 3595 cfs_rq->h_nr_running--;
2069dd75 3596
bf0f6f24 3597 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
3598 if (cfs_rq->load.weight) {
3599 /*
3600 * Bias pick_next to pick a task from this cfs_rq, as
3601 * p is sleeping when it is within its sched_slice.
3602 */
3603 if (task_sleep && parent_entity(se))
3604 set_next_buddy(parent_entity(se));
9598c82d
PT
3605
3606 /* avoid re-evaluating load for this entity */
3607 se = parent_entity(se);
bf0f6f24 3608 break;
2f36825b 3609 }
371fd7e7 3610 flags |= DEQUEUE_SLEEP;
bf0f6f24 3611 }
8f4d37ec 3612
2069dd75 3613 for_each_sched_entity(se) {
0f317143 3614 cfs_rq = cfs_rq_of(se);
953bfcd1 3615 cfs_rq->h_nr_running--;
2069dd75 3616
85dac906
PT
3617 if (cfs_rq_throttled(cfs_rq))
3618 break;
3619
17bc14b7 3620 update_cfs_shares(cfs_rq);
9ee474f5 3621 update_entity_load_avg(se, 1);
2069dd75
PZ
3622 }
3623
18bf2805 3624 if (!se) {
85dac906 3625 dec_nr_running(rq);
18bf2805
BS
3626 update_rq_runnable_avg(rq, 1);
3627 }
a4c2f00f 3628 hrtick_update(rq);
bf0f6f24
IM
3629}
3630
e7693a36 3631#ifdef CONFIG_SMP
029632fb
PZ
3632/* Used instead of source_load when we know the type == 0 */
3633static unsigned long weighted_cpuload(const int cpu)
3634{
b92486cb 3635 return cpu_rq(cpu)->cfs.runnable_load_avg;
029632fb
PZ
3636}
3637
3638/*
3639 * Return a low guess at the load of a migration-source cpu weighted
3640 * according to the scheduling class and "nice" value.
3641 *
3642 * We want to under-estimate the load of migration sources, to
3643 * balance conservatively.
3644 */
3645static unsigned long source_load(int cpu, int type)
3646{
3647 struct rq *rq = cpu_rq(cpu);
3648 unsigned long total = weighted_cpuload(cpu);
3649
3650 if (type == 0 || !sched_feat(LB_BIAS))
3651 return total;
3652
3653 return min(rq->cpu_load[type-1], total);
3654}
3655
3656/*
3657 * Return a high guess at the load of a migration-target cpu weighted
3658 * according to the scheduling class and "nice" value.
3659 */
3660static unsigned long target_load(int cpu, int type)
3661{
3662 struct rq *rq = cpu_rq(cpu);
3663 unsigned long total = weighted_cpuload(cpu);
3664
3665 if (type == 0 || !sched_feat(LB_BIAS))
3666 return total;
3667
3668 return max(rq->cpu_load[type-1], total);
3669}
3670
3671static unsigned long power_of(int cpu)
3672{
3673 return cpu_rq(cpu)->cpu_power;
3674}
3675
3676static unsigned long cpu_avg_load_per_task(int cpu)
3677{
3678 struct rq *rq = cpu_rq(cpu);
3679 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
b92486cb 3680 unsigned long load_avg = rq->cfs.runnable_load_avg;
029632fb
PZ
3681
3682 if (nr_running)
b92486cb 3683 return load_avg / nr_running;
029632fb
PZ
3684
3685 return 0;
3686}
3687
62470419
MW
3688static void record_wakee(struct task_struct *p)
3689{
3690 /*
3691 * Rough decay (wiping) for cost saving, don't worry
3692 * about the boundary, really active task won't care
3693 * about the loss.
3694 */
3695 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3696 current->wakee_flips = 0;
3697 current->wakee_flip_decay_ts = jiffies;
3698 }
3699
3700 if (current->last_wakee != p) {
3701 current->last_wakee = p;
3702 current->wakee_flips++;
3703 }
3704}
098fb9db 3705
74f8e4b2 3706static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
3707{
3708 struct sched_entity *se = &p->se;
3709 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
3710 u64 min_vruntime;
3711
3712#ifndef CONFIG_64BIT
3713 u64 min_vruntime_copy;
88ec22d3 3714
3fe1698b
PZ
3715 do {
3716 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3717 smp_rmb();
3718 min_vruntime = cfs_rq->min_vruntime;
3719 } while (min_vruntime != min_vruntime_copy);
3720#else
3721 min_vruntime = cfs_rq->min_vruntime;
3722#endif
88ec22d3 3723
3fe1698b 3724 se->vruntime -= min_vruntime;
62470419 3725 record_wakee(p);
88ec22d3
PZ
3726}
3727
bb3469ac 3728#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
3729/*
3730 * effective_load() calculates the load change as seen from the root_task_group
3731 *
3732 * Adding load to a group doesn't make a group heavier, but can cause movement
3733 * of group shares between cpus. Assuming the shares were perfectly aligned one
3734 * can calculate the shift in shares.
cf5f0acf
PZ
3735 *
3736 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3737 * on this @cpu and results in a total addition (subtraction) of @wg to the
3738 * total group weight.
3739 *
3740 * Given a runqueue weight distribution (rw_i) we can compute a shares
3741 * distribution (s_i) using:
3742 *
3743 * s_i = rw_i / \Sum rw_j (1)
3744 *
3745 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3746 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3747 * shares distribution (s_i):
3748 *
3749 * rw_i = { 2, 4, 1, 0 }
3750 * s_i = { 2/7, 4/7, 1/7, 0 }
3751 *
3752 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3753 * task used to run on and the CPU the waker is running on), we need to
3754 * compute the effect of waking a task on either CPU and, in case of a sync
3755 * wakeup, compute the effect of the current task going to sleep.
3756 *
3757 * So for a change of @wl to the local @cpu with an overall group weight change
3758 * of @wl we can compute the new shares distribution (s'_i) using:
3759 *
3760 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3761 *
3762 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3763 * differences in waking a task to CPU 0. The additional task changes the
3764 * weight and shares distributions like:
3765 *
3766 * rw'_i = { 3, 4, 1, 0 }
3767 * s'_i = { 3/8, 4/8, 1/8, 0 }
3768 *
3769 * We can then compute the difference in effective weight by using:
3770 *
3771 * dw_i = S * (s'_i - s_i) (3)
3772 *
3773 * Where 'S' is the group weight as seen by its parent.
3774 *
3775 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3776 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3777 * 4/7) times the weight of the group.
f5bfb7d9 3778 */
2069dd75 3779static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 3780{
4be9daaa 3781 struct sched_entity *se = tg->se[cpu];
f1d239f7 3782
58d081b5 3783 if (!tg->parent || !wl) /* the trivial, non-cgroup case */
f1d239f7
PZ
3784 return wl;
3785
4be9daaa 3786 for_each_sched_entity(se) {
cf5f0acf 3787 long w, W;
4be9daaa 3788
977dda7c 3789 tg = se->my_q->tg;
bb3469ac 3790
cf5f0acf
PZ
3791 /*
3792 * W = @wg + \Sum rw_j
3793 */
3794 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 3795
cf5f0acf
PZ
3796 /*
3797 * w = rw_i + @wl
3798 */
3799 w = se->my_q->load.weight + wl;
940959e9 3800
cf5f0acf
PZ
3801 /*
3802 * wl = S * s'_i; see (2)
3803 */
3804 if (W > 0 && w < W)
3805 wl = (w * tg->shares) / W;
977dda7c
PT
3806 else
3807 wl = tg->shares;
940959e9 3808
cf5f0acf
PZ
3809 /*
3810 * Per the above, wl is the new se->load.weight value; since
3811 * those are clipped to [MIN_SHARES, ...) do so now. See
3812 * calc_cfs_shares().
3813 */
977dda7c
PT
3814 if (wl < MIN_SHARES)
3815 wl = MIN_SHARES;
cf5f0acf
PZ
3816
3817 /*
3818 * wl = dw_i = S * (s'_i - s_i); see (3)
3819 */
977dda7c 3820 wl -= se->load.weight;
cf5f0acf
PZ
3821
3822 /*
3823 * Recursively apply this logic to all parent groups to compute
3824 * the final effective load change on the root group. Since
3825 * only the @tg group gets extra weight, all parent groups can
3826 * only redistribute existing shares. @wl is the shift in shares
3827 * resulting from this level per the above.
3828 */
4be9daaa 3829 wg = 0;
4be9daaa 3830 }
bb3469ac 3831
4be9daaa 3832 return wl;
bb3469ac
PZ
3833}
3834#else
4be9daaa 3835
58d081b5 3836static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4be9daaa 3837{
83378269 3838 return wl;
bb3469ac 3839}
4be9daaa 3840
bb3469ac
PZ
3841#endif
3842
62470419
MW
3843static int wake_wide(struct task_struct *p)
3844{
7d9ffa89 3845 int factor = this_cpu_read(sd_llc_size);
62470419
MW
3846
3847 /*
3848 * Yeah, it's the switching-frequency, could means many wakee or
3849 * rapidly switch, use factor here will just help to automatically
3850 * adjust the loose-degree, so bigger node will lead to more pull.
3851 */
3852 if (p->wakee_flips > factor) {
3853 /*
3854 * wakee is somewhat hot, it needs certain amount of cpu
3855 * resource, so if waker is far more hot, prefer to leave
3856 * it alone.
3857 */
3858 if (current->wakee_flips > (factor * p->wakee_flips))
3859 return 1;
3860 }
3861
3862 return 0;
3863}
3864
c88d5910 3865static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 3866{
e37b6a7b 3867 s64 this_load, load;
c88d5910 3868 int idx, this_cpu, prev_cpu;
098fb9db 3869 unsigned long tl_per_task;
c88d5910 3870 struct task_group *tg;
83378269 3871 unsigned long weight;
b3137bc8 3872 int balanced;
098fb9db 3873
62470419
MW
3874 /*
3875 * If we wake multiple tasks be careful to not bounce
3876 * ourselves around too much.
3877 */
3878 if (wake_wide(p))
3879 return 0;
3880
c88d5910
PZ
3881 idx = sd->wake_idx;
3882 this_cpu = smp_processor_id();
3883 prev_cpu = task_cpu(p);
3884 load = source_load(prev_cpu, idx);
3885 this_load = target_load(this_cpu, idx);
098fb9db 3886
b3137bc8
MG
3887 /*
3888 * If sync wakeup then subtract the (maximum possible)
3889 * effect of the currently running task from the load
3890 * of the current CPU:
3891 */
83378269
PZ
3892 if (sync) {
3893 tg = task_group(current);
3894 weight = current->se.load.weight;
3895
c88d5910 3896 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
3897 load += effective_load(tg, prev_cpu, 0, -weight);
3898 }
b3137bc8 3899
83378269
PZ
3900 tg = task_group(p);
3901 weight = p->se.load.weight;
b3137bc8 3902
71a29aa7
PZ
3903 /*
3904 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
3905 * due to the sync cause above having dropped this_load to 0, we'll
3906 * always have an imbalance, but there's really nothing you can do
3907 * about that, so that's good too.
71a29aa7
PZ
3908 *
3909 * Otherwise check if either cpus are near enough in load to allow this
3910 * task to be woken on this_cpu.
3911 */
e37b6a7b
PT
3912 if (this_load > 0) {
3913 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
3914
3915 this_eff_load = 100;
3916 this_eff_load *= power_of(prev_cpu);
3917 this_eff_load *= this_load +
3918 effective_load(tg, this_cpu, weight, weight);
3919
3920 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3921 prev_eff_load *= power_of(this_cpu);
3922 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3923
3924 balanced = this_eff_load <= prev_eff_load;
3925 } else
3926 balanced = true;
b3137bc8 3927
098fb9db 3928 /*
4ae7d5ce
IM
3929 * If the currently running task will sleep within
3930 * a reasonable amount of time then attract this newly
3931 * woken task:
098fb9db 3932 */
2fb7635c
PZ
3933 if (sync && balanced)
3934 return 1;
098fb9db 3935
41acab88 3936 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
3937 tl_per_task = cpu_avg_load_per_task(this_cpu);
3938
c88d5910
PZ
3939 if (balanced ||
3940 (this_load <= load &&
3941 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
3942 /*
3943 * This domain has SD_WAKE_AFFINE and
3944 * p is cache cold in this domain, and
3945 * there is no bad imbalance.
3946 */
c88d5910 3947 schedstat_inc(sd, ttwu_move_affine);
41acab88 3948 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
3949
3950 return 1;
3951 }
3952 return 0;
3953}
3954
aaee1203
PZ
3955/*
3956 * find_idlest_group finds and returns the least busy CPU group within the
3957 * domain.
3958 */
3959static struct sched_group *
78e7ed53 3960find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 3961 int this_cpu, int load_idx)
e7693a36 3962{
b3bd3de6 3963 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 3964 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 3965 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 3966
aaee1203
PZ
3967 do {
3968 unsigned long load, avg_load;
3969 int local_group;
3970 int i;
e7693a36 3971
aaee1203
PZ
3972 /* Skip over this group if it has no CPUs allowed */
3973 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 3974 tsk_cpus_allowed(p)))
aaee1203
PZ
3975 continue;
3976
3977 local_group = cpumask_test_cpu(this_cpu,
3978 sched_group_cpus(group));
3979
3980 /* Tally up the load of all CPUs in the group */
3981 avg_load = 0;
3982
3983 for_each_cpu(i, sched_group_cpus(group)) {
3984 /* Bias balancing toward cpus of our domain */
3985 if (local_group)
3986 load = source_load(i, load_idx);
3987 else
3988 load = target_load(i, load_idx);
3989
3990 avg_load += load;
3991 }
3992
3993 /* Adjust by relative CPU power of the group */
9c3f75cb 3994 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
3995
3996 if (local_group) {
3997 this_load = avg_load;
aaee1203
PZ
3998 } else if (avg_load < min_load) {
3999 min_load = avg_load;
4000 idlest = group;
4001 }
4002 } while (group = group->next, group != sd->groups);
4003
4004 if (!idlest || 100*this_load < imbalance*min_load)
4005 return NULL;
4006 return idlest;
4007}
4008
4009/*
4010 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4011 */
4012static int
4013find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4014{
4015 unsigned long load, min_load = ULONG_MAX;
4016 int idlest = -1;
4017 int i;
4018
4019 /* Traverse only the allowed CPUs */
fa17b507 4020 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
4021 load = weighted_cpuload(i);
4022
4023 if (load < min_load || (load == min_load && i == this_cpu)) {
4024 min_load = load;
4025 idlest = i;
e7693a36
GH
4026 }
4027 }
4028
aaee1203
PZ
4029 return idlest;
4030}
e7693a36 4031
a50bde51
PZ
4032/*
4033 * Try and locate an idle CPU in the sched_domain.
4034 */
99bd5e2f 4035static int select_idle_sibling(struct task_struct *p, int target)
a50bde51 4036{
99bd5e2f 4037 struct sched_domain *sd;
37407ea7 4038 struct sched_group *sg;
e0a79f52 4039 int i = task_cpu(p);
a50bde51 4040
e0a79f52
MG
4041 if (idle_cpu(target))
4042 return target;
99bd5e2f
SS
4043
4044 /*
e0a79f52 4045 * If the prevous cpu is cache affine and idle, don't be stupid.
99bd5e2f 4046 */
e0a79f52
MG
4047 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4048 return i;
a50bde51
PZ
4049
4050 /*
37407ea7 4051 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 4052 */
518cd623 4053 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 4054 for_each_lower_domain(sd) {
37407ea7
LT
4055 sg = sd->groups;
4056 do {
4057 if (!cpumask_intersects(sched_group_cpus(sg),
4058 tsk_cpus_allowed(p)))
4059 goto next;
4060
4061 for_each_cpu(i, sched_group_cpus(sg)) {
e0a79f52 4062 if (i == target || !idle_cpu(i))
37407ea7
LT
4063 goto next;
4064 }
970e1789 4065
37407ea7
LT
4066 target = cpumask_first_and(sched_group_cpus(sg),
4067 tsk_cpus_allowed(p));
4068 goto done;
4069next:
4070 sg = sg->next;
4071 } while (sg != sd->groups);
4072 }
4073done:
a50bde51
PZ
4074 return target;
4075}
4076
aaee1203
PZ
4077/*
4078 * sched_balance_self: balance the current task (running on cpu) in domains
4079 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
4080 * SD_BALANCE_EXEC.
4081 *
4082 * Balance, ie. select the least loaded group.
4083 *
4084 * Returns the target CPU number, or the same CPU if no balancing is needed.
4085 *
4086 * preempt must be disabled.
4087 */
0017d735 4088static int
ac66f547 4089select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
aaee1203 4090{
29cd8bae 4091 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910 4092 int cpu = smp_processor_id();
c88d5910 4093 int new_cpu = cpu;
99bd5e2f 4094 int want_affine = 0;
5158f4e4 4095 int sync = wake_flags & WF_SYNC;
c88d5910 4096
29baa747 4097 if (p->nr_cpus_allowed == 1)
76854c7e
MG
4098 return prev_cpu;
4099
0763a660 4100 if (sd_flag & SD_BALANCE_WAKE) {
fa17b507 4101 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
c88d5910
PZ
4102 want_affine = 1;
4103 new_cpu = prev_cpu;
4104 }
aaee1203 4105
dce840a0 4106 rcu_read_lock();
aaee1203 4107 for_each_domain(cpu, tmp) {
e4f42888
PZ
4108 if (!(tmp->flags & SD_LOAD_BALANCE))
4109 continue;
4110
fe3bcfe1 4111 /*
99bd5e2f
SS
4112 * If both cpu and prev_cpu are part of this domain,
4113 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 4114 */
99bd5e2f
SS
4115 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4116 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4117 affine_sd = tmp;
29cd8bae 4118 break;
f03542a7 4119 }
29cd8bae 4120
f03542a7 4121 if (tmp->flags & sd_flag)
29cd8bae
PZ
4122 sd = tmp;
4123 }
4124
8b911acd 4125 if (affine_sd) {
f03542a7 4126 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
dce840a0
PZ
4127 prev_cpu = cpu;
4128
4129 new_cpu = select_idle_sibling(p, prev_cpu);
4130 goto unlock;
8b911acd 4131 }
e7693a36 4132
aaee1203 4133 while (sd) {
5158f4e4 4134 int load_idx = sd->forkexec_idx;
aaee1203 4135 struct sched_group *group;
c88d5910 4136 int weight;
098fb9db 4137
0763a660 4138 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
4139 sd = sd->child;
4140 continue;
4141 }
098fb9db 4142
5158f4e4
PZ
4143 if (sd_flag & SD_BALANCE_WAKE)
4144 load_idx = sd->wake_idx;
098fb9db 4145
5158f4e4 4146 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
4147 if (!group) {
4148 sd = sd->child;
4149 continue;
4150 }
4ae7d5ce 4151
d7c33c49 4152 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
4153 if (new_cpu == -1 || new_cpu == cpu) {
4154 /* Now try balancing at a lower domain level of cpu */
4155 sd = sd->child;
4156 continue;
e7693a36 4157 }
aaee1203
PZ
4158
4159 /* Now try balancing at a lower domain level of new_cpu */
4160 cpu = new_cpu;
669c55e9 4161 weight = sd->span_weight;
aaee1203
PZ
4162 sd = NULL;
4163 for_each_domain(cpu, tmp) {
669c55e9 4164 if (weight <= tmp->span_weight)
aaee1203 4165 break;
0763a660 4166 if (tmp->flags & sd_flag)
aaee1203
PZ
4167 sd = tmp;
4168 }
4169 /* while loop will break here if sd == NULL */
e7693a36 4170 }
dce840a0
PZ
4171unlock:
4172 rcu_read_unlock();
e7693a36 4173
c88d5910 4174 return new_cpu;
e7693a36 4175}
0a74bef8
PT
4176
4177/*
4178 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4179 * cfs_rq_of(p) references at time of call are still valid and identify the
4180 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4181 * other assumptions, including the state of rq->lock, should be made.
4182 */
4183static void
4184migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4185{
aff3e498
PT
4186 struct sched_entity *se = &p->se;
4187 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4188
4189 /*
4190 * Load tracking: accumulate removed load so that it can be processed
4191 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4192 * to blocked load iff they have a positive decay-count. It can never
4193 * be negative here since on-rq tasks have decay-count == 0.
4194 */
4195 if (se->avg.decay_count) {
4196 se->avg.decay_count = -__synchronize_entity_decay(se);
2509940f
AS
4197 atomic_long_add(se->avg.load_avg_contrib,
4198 &cfs_rq->removed_load);
aff3e498 4199 }
0a74bef8 4200}
e7693a36
GH
4201#endif /* CONFIG_SMP */
4202
e52fb7c0
PZ
4203static unsigned long
4204wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
4205{
4206 unsigned long gran = sysctl_sched_wakeup_granularity;
4207
4208 /*
e52fb7c0
PZ
4209 * Since its curr running now, convert the gran from real-time
4210 * to virtual-time in his units.
13814d42
MG
4211 *
4212 * By using 'se' instead of 'curr' we penalize light tasks, so
4213 * they get preempted easier. That is, if 'se' < 'curr' then
4214 * the resulting gran will be larger, therefore penalizing the
4215 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4216 * be smaller, again penalizing the lighter task.
4217 *
4218 * This is especially important for buddies when the leftmost
4219 * task is higher priority than the buddy.
0bbd3336 4220 */
f4ad9bd2 4221 return calc_delta_fair(gran, se);
0bbd3336
PZ
4222}
4223
464b7527
PZ
4224/*
4225 * Should 'se' preempt 'curr'.
4226 *
4227 * |s1
4228 * |s2
4229 * |s3
4230 * g
4231 * |<--->|c
4232 *
4233 * w(c, s1) = -1
4234 * w(c, s2) = 0
4235 * w(c, s3) = 1
4236 *
4237 */
4238static int
4239wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4240{
4241 s64 gran, vdiff = curr->vruntime - se->vruntime;
4242
4243 if (vdiff <= 0)
4244 return -1;
4245
e52fb7c0 4246 gran = wakeup_gran(curr, se);
464b7527
PZ
4247 if (vdiff > gran)
4248 return 1;
4249
4250 return 0;
4251}
4252
02479099
PZ
4253static void set_last_buddy(struct sched_entity *se)
4254{
69c80f3e
VP
4255 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4256 return;
4257
4258 for_each_sched_entity(se)
4259 cfs_rq_of(se)->last = se;
02479099
PZ
4260}
4261
4262static void set_next_buddy(struct sched_entity *se)
4263{
69c80f3e
VP
4264 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4265 return;
4266
4267 for_each_sched_entity(se)
4268 cfs_rq_of(se)->next = se;
02479099
PZ
4269}
4270
ac53db59
RR
4271static void set_skip_buddy(struct sched_entity *se)
4272{
69c80f3e
VP
4273 for_each_sched_entity(se)
4274 cfs_rq_of(se)->skip = se;
ac53db59
RR
4275}
4276
bf0f6f24
IM
4277/*
4278 * Preempt the current task with a newly woken task if needed:
4279 */
5a9b86f6 4280static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
4281{
4282 struct task_struct *curr = rq->curr;
8651a86c 4283 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 4284 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 4285 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 4286 int next_buddy_marked = 0;
bf0f6f24 4287
4ae7d5ce
IM
4288 if (unlikely(se == pse))
4289 return;
4290
5238cdd3 4291 /*
ddcdf6e7 4292 * This is possible from callers such as move_task(), in which we
5238cdd3
PT
4293 * unconditionally check_prempt_curr() after an enqueue (which may have
4294 * lead to a throttle). This both saves work and prevents false
4295 * next-buddy nomination below.
4296 */
4297 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4298 return;
4299
2f36825b 4300 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 4301 set_next_buddy(pse);
2f36825b
VP
4302 next_buddy_marked = 1;
4303 }
57fdc26d 4304
aec0a514
BR
4305 /*
4306 * We can come here with TIF_NEED_RESCHED already set from new task
4307 * wake up path.
5238cdd3
PT
4308 *
4309 * Note: this also catches the edge-case of curr being in a throttled
4310 * group (e.g. via set_curr_task), since update_curr() (in the
4311 * enqueue of curr) will have resulted in resched being set. This
4312 * prevents us from potentially nominating it as a false LAST_BUDDY
4313 * below.
aec0a514
BR
4314 */
4315 if (test_tsk_need_resched(curr))
4316 return;
4317
a2f5c9ab
DH
4318 /* Idle tasks are by definition preempted by non-idle tasks. */
4319 if (unlikely(curr->policy == SCHED_IDLE) &&
4320 likely(p->policy != SCHED_IDLE))
4321 goto preempt;
4322
91c234b4 4323 /*
a2f5c9ab
DH
4324 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4325 * is driven by the tick):
91c234b4 4326 */
8ed92e51 4327 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 4328 return;
bf0f6f24 4329
464b7527 4330 find_matching_se(&se, &pse);
9bbd7374 4331 update_curr(cfs_rq_of(se));
002f128b 4332 BUG_ON(!pse);
2f36825b
VP
4333 if (wakeup_preempt_entity(se, pse) == 1) {
4334 /*
4335 * Bias pick_next to pick the sched entity that is
4336 * triggering this preemption.
4337 */
4338 if (!next_buddy_marked)
4339 set_next_buddy(pse);
3a7e73a2 4340 goto preempt;
2f36825b 4341 }
464b7527 4342
3a7e73a2 4343 return;
a65ac745 4344
3a7e73a2
PZ
4345preempt:
4346 resched_task(curr);
4347 /*
4348 * Only set the backward buddy when the current task is still
4349 * on the rq. This can happen when a wakeup gets interleaved
4350 * with schedule on the ->pre_schedule() or idle_balance()
4351 * point, either of which can * drop the rq lock.
4352 *
4353 * Also, during early boot the idle thread is in the fair class,
4354 * for obvious reasons its a bad idea to schedule back to it.
4355 */
4356 if (unlikely(!se->on_rq || curr == rq->idle))
4357 return;
4358
4359 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4360 set_last_buddy(se);
bf0f6f24
IM
4361}
4362
fb8d4724 4363static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 4364{
8f4d37ec 4365 struct task_struct *p;
bf0f6f24
IM
4366 struct cfs_rq *cfs_rq = &rq->cfs;
4367 struct sched_entity *se;
4368
36ace27e 4369 if (!cfs_rq->nr_running)
bf0f6f24
IM
4370 return NULL;
4371
4372 do {
9948f4b2 4373 se = pick_next_entity(cfs_rq);
f4b6755f 4374 set_next_entity(cfs_rq, se);
bf0f6f24
IM
4375 cfs_rq = group_cfs_rq(se);
4376 } while (cfs_rq);
4377
8f4d37ec 4378 p = task_of(se);
b39e66ea
MG
4379 if (hrtick_enabled(rq))
4380 hrtick_start_fair(rq, p);
8f4d37ec
PZ
4381
4382 return p;
bf0f6f24
IM
4383}
4384
4385/*
4386 * Account for a descheduled task:
4387 */
31ee529c 4388static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
4389{
4390 struct sched_entity *se = &prev->se;
4391 struct cfs_rq *cfs_rq;
4392
4393 for_each_sched_entity(se) {
4394 cfs_rq = cfs_rq_of(se);
ab6cde26 4395 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
4396 }
4397}
4398
ac53db59
RR
4399/*
4400 * sched_yield() is very simple
4401 *
4402 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4403 */
4404static void yield_task_fair(struct rq *rq)
4405{
4406 struct task_struct *curr = rq->curr;
4407 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4408 struct sched_entity *se = &curr->se;
4409
4410 /*
4411 * Are we the only task in the tree?
4412 */
4413 if (unlikely(rq->nr_running == 1))
4414 return;
4415
4416 clear_buddies(cfs_rq, se);
4417
4418 if (curr->policy != SCHED_BATCH) {
4419 update_rq_clock(rq);
4420 /*
4421 * Update run-time statistics of the 'current'.
4422 */
4423 update_curr(cfs_rq);
916671c0
MG
4424 /*
4425 * Tell update_rq_clock() that we've just updated,
4426 * so we don't do microscopic update in schedule()
4427 * and double the fastpath cost.
4428 */
4429 rq->skip_clock_update = 1;
ac53db59
RR
4430 }
4431
4432 set_skip_buddy(se);
4433}
4434
d95f4122
MG
4435static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4436{
4437 struct sched_entity *se = &p->se;
4438
5238cdd3
PT
4439 /* throttled hierarchies are not runnable */
4440 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
4441 return false;
4442
4443 /* Tell the scheduler that we'd really like pse to run next. */
4444 set_next_buddy(se);
4445
d95f4122
MG
4446 yield_task_fair(rq);
4447
4448 return true;
4449}
4450
681f3e68 4451#ifdef CONFIG_SMP
bf0f6f24 4452/**************************************************
e9c84cb8
PZ
4453 * Fair scheduling class load-balancing methods.
4454 *
4455 * BASICS
4456 *
4457 * The purpose of load-balancing is to achieve the same basic fairness the
4458 * per-cpu scheduler provides, namely provide a proportional amount of compute
4459 * time to each task. This is expressed in the following equation:
4460 *
4461 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4462 *
4463 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4464 * W_i,0 is defined as:
4465 *
4466 * W_i,0 = \Sum_j w_i,j (2)
4467 *
4468 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4469 * is derived from the nice value as per prio_to_weight[].
4470 *
4471 * The weight average is an exponential decay average of the instantaneous
4472 * weight:
4473 *
4474 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4475 *
4476 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4477 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4478 * can also include other factors [XXX].
4479 *
4480 * To achieve this balance we define a measure of imbalance which follows
4481 * directly from (1):
4482 *
4483 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
4484 *
4485 * We them move tasks around to minimize the imbalance. In the continuous
4486 * function space it is obvious this converges, in the discrete case we get
4487 * a few fun cases generally called infeasible weight scenarios.
4488 *
4489 * [XXX expand on:
4490 * - infeasible weights;
4491 * - local vs global optima in the discrete case. ]
4492 *
4493 *
4494 * SCHED DOMAINS
4495 *
4496 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4497 * for all i,j solution, we create a tree of cpus that follows the hardware
4498 * topology where each level pairs two lower groups (or better). This results
4499 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4500 * tree to only the first of the previous level and we decrease the frequency
4501 * of load-balance at each level inv. proportional to the number of cpus in
4502 * the groups.
4503 *
4504 * This yields:
4505 *
4506 * log_2 n 1 n
4507 * \Sum { --- * --- * 2^i } = O(n) (5)
4508 * i = 0 2^i 2^i
4509 * `- size of each group
4510 * | | `- number of cpus doing load-balance
4511 * | `- freq
4512 * `- sum over all levels
4513 *
4514 * Coupled with a limit on how many tasks we can migrate every balance pass,
4515 * this makes (5) the runtime complexity of the balancer.
4516 *
4517 * An important property here is that each CPU is still (indirectly) connected
4518 * to every other cpu in at most O(log n) steps:
4519 *
4520 * The adjacency matrix of the resulting graph is given by:
4521 *
4522 * log_2 n
4523 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4524 * k = 0
4525 *
4526 * And you'll find that:
4527 *
4528 * A^(log_2 n)_i,j != 0 for all i,j (7)
4529 *
4530 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4531 * The task movement gives a factor of O(m), giving a convergence complexity
4532 * of:
4533 *
4534 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4535 *
4536 *
4537 * WORK CONSERVING
4538 *
4539 * In order to avoid CPUs going idle while there's still work to do, new idle
4540 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4541 * tree itself instead of relying on other CPUs to bring it work.
4542 *
4543 * This adds some complexity to both (5) and (8) but it reduces the total idle
4544 * time.
4545 *
4546 * [XXX more?]
4547 *
4548 *
4549 * CGROUPS
4550 *
4551 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4552 *
4553 * s_k,i
4554 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4555 * S_k
4556 *
4557 * Where
4558 *
4559 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4560 *
4561 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4562 *
4563 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4564 * property.
4565 *
4566 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4567 * rewrite all of this once again.]
4568 */
bf0f6f24 4569
ed387b78
HS
4570static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4571
ddcdf6e7 4572#define LBF_ALL_PINNED 0x01
367456c7 4573#define LBF_NEED_BREAK 0x02
6263322c
PZ
4574#define LBF_DST_PINNED 0x04
4575#define LBF_SOME_PINNED 0x08
ddcdf6e7
PZ
4576
4577struct lb_env {
4578 struct sched_domain *sd;
4579
ddcdf6e7 4580 struct rq *src_rq;
85c1e7da 4581 int src_cpu;
ddcdf6e7
PZ
4582
4583 int dst_cpu;
4584 struct rq *dst_rq;
4585
88b8dac0
SV
4586 struct cpumask *dst_grpmask;
4587 int new_dst_cpu;
ddcdf6e7 4588 enum cpu_idle_type idle;
bd939f45 4589 long imbalance;
b9403130
MW
4590 /* The set of CPUs under consideration for load-balancing */
4591 struct cpumask *cpus;
4592
ddcdf6e7 4593 unsigned int flags;
367456c7
PZ
4594
4595 unsigned int loop;
4596 unsigned int loop_break;
4597 unsigned int loop_max;
ddcdf6e7
PZ
4598};
4599
1e3c88bd 4600/*
ddcdf6e7 4601 * move_task - move a task from one runqueue to another runqueue.
1e3c88bd
PZ
4602 * Both runqueues must be locked.
4603 */
ddcdf6e7 4604static void move_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 4605{
ddcdf6e7
PZ
4606 deactivate_task(env->src_rq, p, 0);
4607 set_task_cpu(p, env->dst_cpu);
4608 activate_task(env->dst_rq, p, 0);
4609 check_preempt_curr(env->dst_rq, p, 0);
6fe6b2d6
RR
4610#ifdef CONFIG_NUMA_BALANCING
4611 if (p->numa_preferred_nid != -1) {
4612 int src_nid = cpu_to_node(env->src_cpu);
4613 int dst_nid = cpu_to_node(env->dst_cpu);
4614
4615 /*
4616 * If the load balancer has moved the task then limit
4617 * migrations from taking place in the short term in
4618 * case this is a short-lived migration.
4619 */
4620 if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid)
4621 p->numa_migrate_seq = 0;
4622 }
4623#endif
1e3c88bd
PZ
4624}
4625
029632fb
PZ
4626/*
4627 * Is this task likely cache-hot:
4628 */
4629static int
4630task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4631{
4632 s64 delta;
4633
4634 if (p->sched_class != &fair_sched_class)
4635 return 0;
4636
4637 if (unlikely(p->policy == SCHED_IDLE))
4638 return 0;
4639
4640 /*
4641 * Buddy candidates are cache hot:
4642 */
4643 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4644 (&p->se == cfs_rq_of(&p->se)->next ||
4645 &p->se == cfs_rq_of(&p->se)->last))
4646 return 1;
4647
4648 if (sysctl_sched_migration_cost == -1)
4649 return 1;
4650 if (sysctl_sched_migration_cost == 0)
4651 return 0;
4652
4653 delta = now - p->se.exec_start;
4654
4655 return delta < (s64)sysctl_sched_migration_cost;
4656}
4657
3a7053b3
MG
4658#ifdef CONFIG_NUMA_BALANCING
4659/* Returns true if the destination node has incurred more faults */
4660static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4661{
4662 int src_nid, dst_nid;
4663
4664 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4665 !(env->sd->flags & SD_NUMA)) {
4666 return false;
4667 }
4668
4669 src_nid = cpu_to_node(env->src_cpu);
4670 dst_nid = cpu_to_node(env->dst_cpu);
4671
83e1d2cd 4672 if (src_nid == dst_nid)
3a7053b3
MG
4673 return false;
4674
83e1d2cd
MG
4675 /* Always encourage migration to the preferred node. */
4676 if (dst_nid == p->numa_preferred_nid)
4677 return true;
4678
4679 /* After the task has settled, check if the new node is better. */
4680 if (p->numa_migrate_seq >= sysctl_numa_balancing_settle_count &&
4681 task_weight(p, dst_nid) + group_weight(p, dst_nid) >
4682 task_weight(p, src_nid) + group_weight(p, src_nid))
3a7053b3
MG
4683 return true;
4684
4685 return false;
4686}
7a0f3083
MG
4687
4688
4689static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4690{
4691 int src_nid, dst_nid;
4692
4693 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4694 return false;
4695
4696 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4697 return false;
4698
4699 src_nid = cpu_to_node(env->src_cpu);
4700 dst_nid = cpu_to_node(env->dst_cpu);
4701
83e1d2cd 4702 if (src_nid == dst_nid)
7a0f3083
MG
4703 return false;
4704
83e1d2cd
MG
4705 /* Migrating away from the preferred node is always bad. */
4706 if (src_nid == p->numa_preferred_nid)
4707 return true;
4708
4709 /* After the task has settled, check if the new node is worse. */
4710 if (p->numa_migrate_seq >= sysctl_numa_balancing_settle_count &&
4711 task_weight(p, dst_nid) + group_weight(p, dst_nid) <
4712 task_weight(p, src_nid) + group_weight(p, src_nid))
7a0f3083
MG
4713 return true;
4714
4715 return false;
4716}
4717
3a7053b3
MG
4718#else
4719static inline bool migrate_improves_locality(struct task_struct *p,
4720 struct lb_env *env)
4721{
4722 return false;
4723}
7a0f3083
MG
4724
4725static inline bool migrate_degrades_locality(struct task_struct *p,
4726 struct lb_env *env)
4727{
4728 return false;
4729}
3a7053b3
MG
4730#endif
4731
1e3c88bd
PZ
4732/*
4733 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4734 */
4735static
8e45cb54 4736int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
4737{
4738 int tsk_cache_hot = 0;
4739 /*
4740 * We do not migrate tasks that are:
d3198084 4741 * 1) throttled_lb_pair, or
1e3c88bd 4742 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
4743 * 3) running (obviously), or
4744 * 4) are cache-hot on their current CPU.
1e3c88bd 4745 */
d3198084
JK
4746 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4747 return 0;
4748
ddcdf6e7 4749 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 4750 int cpu;
88b8dac0 4751
41acab88 4752 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0 4753
6263322c
PZ
4754 env->flags |= LBF_SOME_PINNED;
4755
88b8dac0
SV
4756 /*
4757 * Remember if this task can be migrated to any other cpu in
4758 * our sched_group. We may want to revisit it if we couldn't
4759 * meet load balance goals by pulling other tasks on src_cpu.
4760 *
4761 * Also avoid computing new_dst_cpu if we have already computed
4762 * one in current iteration.
4763 */
6263322c 4764 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
4765 return 0;
4766
e02e60c1
JK
4767 /* Prevent to re-select dst_cpu via env's cpus */
4768 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4769 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6263322c 4770 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
4771 env->new_dst_cpu = cpu;
4772 break;
4773 }
88b8dac0 4774 }
e02e60c1 4775
1e3c88bd
PZ
4776 return 0;
4777 }
88b8dac0
SV
4778
4779 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 4780 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 4781
ddcdf6e7 4782 if (task_running(env->src_rq, p)) {
41acab88 4783 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
4784 return 0;
4785 }
4786
4787 /*
4788 * Aggressive migration if:
3a7053b3
MG
4789 * 1) destination numa is preferred
4790 * 2) task is cache cold, or
4791 * 3) too many balance attempts have failed.
1e3c88bd 4792 */
78becc27 4793 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
7a0f3083
MG
4794 if (!tsk_cache_hot)
4795 tsk_cache_hot = migrate_degrades_locality(p, env);
3a7053b3
MG
4796
4797 if (migrate_improves_locality(p, env)) {
4798#ifdef CONFIG_SCHEDSTATS
4799 if (tsk_cache_hot) {
4800 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4801 schedstat_inc(p, se.statistics.nr_forced_migrations);
4802 }
4803#endif
4804 return 1;
4805 }
4806
1e3c88bd 4807 if (!tsk_cache_hot ||
8e45cb54 4808 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4e2dcb73 4809
1e3c88bd 4810 if (tsk_cache_hot) {
8e45cb54 4811 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 4812 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd 4813 }
4e2dcb73 4814
1e3c88bd
PZ
4815 return 1;
4816 }
4817
4e2dcb73
ZH
4818 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4819 return 0;
1e3c88bd
PZ
4820}
4821
897c395f
PZ
4822/*
4823 * move_one_task tries to move exactly one task from busiest to this_rq, as
4824 * part of active balancing operations within "domain".
4825 * Returns 1 if successful and 0 otherwise.
4826 *
4827 * Called with both runqueues locked.
4828 */
8e45cb54 4829static int move_one_task(struct lb_env *env)
897c395f
PZ
4830{
4831 struct task_struct *p, *n;
897c395f 4832
367456c7 4833 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
4834 if (!can_migrate_task(p, env))
4835 continue;
897c395f 4836
367456c7
PZ
4837 move_task(p, env);
4838 /*
4839 * Right now, this is only the second place move_task()
4840 * is called, so we can safely collect move_task()
4841 * stats here rather than inside move_task().
4842 */
4843 schedstat_inc(env->sd, lb_gained[env->idle]);
4844 return 1;
897c395f 4845 }
897c395f
PZ
4846 return 0;
4847}
4848
eb95308e
PZ
4849static const unsigned int sched_nr_migrate_break = 32;
4850
5d6523eb 4851/*
bd939f45 4852 * move_tasks tries to move up to imbalance weighted load from busiest to
5d6523eb
PZ
4853 * this_rq, as part of a balancing operation within domain "sd".
4854 * Returns 1 if successful and 0 otherwise.
4855 *
4856 * Called with both runqueues locked.
4857 */
4858static int move_tasks(struct lb_env *env)
1e3c88bd 4859{
5d6523eb
PZ
4860 struct list_head *tasks = &env->src_rq->cfs_tasks;
4861 struct task_struct *p;
367456c7
PZ
4862 unsigned long load;
4863 int pulled = 0;
1e3c88bd 4864
bd939f45 4865 if (env->imbalance <= 0)
5d6523eb 4866 return 0;
1e3c88bd 4867
5d6523eb
PZ
4868 while (!list_empty(tasks)) {
4869 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 4870
367456c7
PZ
4871 env->loop++;
4872 /* We've more or less seen every task there is, call it quits */
5d6523eb 4873 if (env->loop > env->loop_max)
367456c7 4874 break;
5d6523eb
PZ
4875
4876 /* take a breather every nr_migrate tasks */
367456c7 4877 if (env->loop > env->loop_break) {
eb95308e 4878 env->loop_break += sched_nr_migrate_break;
8e45cb54 4879 env->flags |= LBF_NEED_BREAK;
ee00e66f 4880 break;
a195f004 4881 }
1e3c88bd 4882
d3198084 4883 if (!can_migrate_task(p, env))
367456c7
PZ
4884 goto next;
4885
4886 load = task_h_load(p);
5d6523eb 4887
eb95308e 4888 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
4889 goto next;
4890
bd939f45 4891 if ((load / 2) > env->imbalance)
367456c7 4892 goto next;
1e3c88bd 4893
ddcdf6e7 4894 move_task(p, env);
ee00e66f 4895 pulled++;
bd939f45 4896 env->imbalance -= load;
1e3c88bd
PZ
4897
4898#ifdef CONFIG_PREEMPT
ee00e66f
PZ
4899 /*
4900 * NEWIDLE balancing is a source of latency, so preemptible
4901 * kernels will stop after the first task is pulled to minimize
4902 * the critical section.
4903 */
5d6523eb 4904 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 4905 break;
1e3c88bd
PZ
4906#endif
4907
ee00e66f
PZ
4908 /*
4909 * We only want to steal up to the prescribed amount of
4910 * weighted load.
4911 */
bd939f45 4912 if (env->imbalance <= 0)
ee00e66f 4913 break;
367456c7
PZ
4914
4915 continue;
4916next:
5d6523eb 4917 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 4918 }
5d6523eb 4919
1e3c88bd 4920 /*
ddcdf6e7
PZ
4921 * Right now, this is one of only two places move_task() is called,
4922 * so we can safely collect move_task() stats here rather than
4923 * inside move_task().
1e3c88bd 4924 */
8e45cb54 4925 schedstat_add(env->sd, lb_gained[env->idle], pulled);
1e3c88bd 4926
5d6523eb 4927 return pulled;
1e3c88bd
PZ
4928}
4929
230059de 4930#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
4931/*
4932 * update tg->load_weight by folding this cpu's load_avg
4933 */
48a16753 4934static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
9e3081ca 4935{
48a16753
PT
4936 struct sched_entity *se = tg->se[cpu];
4937 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
9e3081ca 4938
48a16753
PT
4939 /* throttled entities do not contribute to load */
4940 if (throttled_hierarchy(cfs_rq))
4941 return;
9e3081ca 4942
aff3e498 4943 update_cfs_rq_blocked_load(cfs_rq, 1);
9e3081ca 4944
82958366
PT
4945 if (se) {
4946 update_entity_load_avg(se, 1);
4947 /*
4948 * We pivot on our runnable average having decayed to zero for
4949 * list removal. This generally implies that all our children
4950 * have also been removed (modulo rounding error or bandwidth
4951 * control); however, such cases are rare and we can fix these
4952 * at enqueue.
4953 *
4954 * TODO: fix up out-of-order children on enqueue.
4955 */
4956 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4957 list_del_leaf_cfs_rq(cfs_rq);
4958 } else {
48a16753 4959 struct rq *rq = rq_of(cfs_rq);
82958366
PT
4960 update_rq_runnable_avg(rq, rq->nr_running);
4961 }
9e3081ca
PZ
4962}
4963
48a16753 4964static void update_blocked_averages(int cpu)
9e3081ca 4965{
9e3081ca 4966 struct rq *rq = cpu_rq(cpu);
48a16753
PT
4967 struct cfs_rq *cfs_rq;
4968 unsigned long flags;
9e3081ca 4969
48a16753
PT
4970 raw_spin_lock_irqsave(&rq->lock, flags);
4971 update_rq_clock(rq);
9763b67f
PZ
4972 /*
4973 * Iterates the task_group tree in a bottom up fashion, see
4974 * list_add_leaf_cfs_rq() for details.
4975 */
64660c86 4976 for_each_leaf_cfs_rq(rq, cfs_rq) {
48a16753
PT
4977 /*
4978 * Note: We may want to consider periodically releasing
4979 * rq->lock about these updates so that creating many task
4980 * groups does not result in continually extending hold time.
4981 */
4982 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
64660c86 4983 }
48a16753
PT
4984
4985 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
4986}
4987
9763b67f 4988/*
68520796 4989 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
4990 * This needs to be done in a top-down fashion because the load of a child
4991 * group is a fraction of its parents load.
4992 */
68520796 4993static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 4994{
68520796
VD
4995 struct rq *rq = rq_of(cfs_rq);
4996 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 4997 unsigned long now = jiffies;
68520796 4998 unsigned long load;
a35b6466 4999
68520796 5000 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
5001 return;
5002
68520796
VD
5003 cfs_rq->h_load_next = NULL;
5004 for_each_sched_entity(se) {
5005 cfs_rq = cfs_rq_of(se);
5006 cfs_rq->h_load_next = se;
5007 if (cfs_rq->last_h_load_update == now)
5008 break;
5009 }
a35b6466 5010
68520796 5011 if (!se) {
7e3115ef 5012 cfs_rq->h_load = cfs_rq->runnable_load_avg;
68520796
VD
5013 cfs_rq->last_h_load_update = now;
5014 }
5015
5016 while ((se = cfs_rq->h_load_next) != NULL) {
5017 load = cfs_rq->h_load;
5018 load = div64_ul(load * se->avg.load_avg_contrib,
5019 cfs_rq->runnable_load_avg + 1);
5020 cfs_rq = group_cfs_rq(se);
5021 cfs_rq->h_load = load;
5022 cfs_rq->last_h_load_update = now;
5023 }
9763b67f
PZ
5024}
5025
367456c7 5026static unsigned long task_h_load(struct task_struct *p)
230059de 5027{
367456c7 5028 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 5029
68520796 5030 update_cfs_rq_h_load(cfs_rq);
a003a25b
AS
5031 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5032 cfs_rq->runnable_load_avg + 1);
230059de
PZ
5033}
5034#else
48a16753 5035static inline void update_blocked_averages(int cpu)
9e3081ca
PZ
5036{
5037}
5038
367456c7 5039static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 5040{
a003a25b 5041 return p->se.avg.load_avg_contrib;
1e3c88bd 5042}
230059de 5043#endif
1e3c88bd 5044
1e3c88bd 5045/********** Helpers for find_busiest_group ************************/
1e3c88bd
PZ
5046/*
5047 * sg_lb_stats - stats of a sched_group required for load_balancing
5048 */
5049struct sg_lb_stats {
5050 unsigned long avg_load; /*Avg load across the CPUs of the group */
5051 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 5052 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 5053 unsigned long load_per_task;
3ae11c90 5054 unsigned long group_power;
147c5fc2
PZ
5055 unsigned int sum_nr_running; /* Nr tasks running in the group */
5056 unsigned int group_capacity;
5057 unsigned int idle_cpus;
5058 unsigned int group_weight;
1e3c88bd 5059 int group_imb; /* Is there an imbalance in the group ? */
fab47622 5060 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
5061};
5062
56cf515b
JK
5063/*
5064 * sd_lb_stats - Structure to store the statistics of a sched_domain
5065 * during load balancing.
5066 */
5067struct sd_lb_stats {
5068 struct sched_group *busiest; /* Busiest group in this sd */
5069 struct sched_group *local; /* Local group in this sd */
5070 unsigned long total_load; /* Total load of all groups in sd */
5071 unsigned long total_pwr; /* Total power of all groups in sd */
5072 unsigned long avg_load; /* Average load across all groups in sd */
5073
56cf515b 5074 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 5075 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
5076};
5077
147c5fc2
PZ
5078static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5079{
5080 /*
5081 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5082 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5083 * We must however clear busiest_stat::avg_load because
5084 * update_sd_pick_busiest() reads this before assignment.
5085 */
5086 *sds = (struct sd_lb_stats){
5087 .busiest = NULL,
5088 .local = NULL,
5089 .total_load = 0UL,
5090 .total_pwr = 0UL,
5091 .busiest_stat = {
5092 .avg_load = 0UL,
5093 },
5094 };
5095}
5096
1e3c88bd
PZ
5097/**
5098 * get_sd_load_idx - Obtain the load index for a given sched domain.
5099 * @sd: The sched_domain whose load_idx is to be obtained.
5100 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
e69f6186
YB
5101 *
5102 * Return: The load index.
1e3c88bd
PZ
5103 */
5104static inline int get_sd_load_idx(struct sched_domain *sd,
5105 enum cpu_idle_type idle)
5106{
5107 int load_idx;
5108
5109 switch (idle) {
5110 case CPU_NOT_IDLE:
5111 load_idx = sd->busy_idx;
5112 break;
5113
5114 case CPU_NEWLY_IDLE:
5115 load_idx = sd->newidle_idx;
5116 break;
5117 default:
5118 load_idx = sd->idle_idx;
5119 break;
5120 }
5121
5122 return load_idx;
5123}
5124
15f803c9 5125static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
1e3c88bd 5126{
1399fa78 5127 return SCHED_POWER_SCALE;
1e3c88bd
PZ
5128}
5129
5130unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5131{
5132 return default_scale_freq_power(sd, cpu);
5133}
5134
15f803c9 5135static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
1e3c88bd 5136{
669c55e9 5137 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
5138 unsigned long smt_gain = sd->smt_gain;
5139
5140 smt_gain /= weight;
5141
5142 return smt_gain;
5143}
5144
5145unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5146{
5147 return default_scale_smt_power(sd, cpu);
5148}
5149
15f803c9 5150static unsigned long scale_rt_power(int cpu)
1e3c88bd
PZ
5151{
5152 struct rq *rq = cpu_rq(cpu);
b654f7de 5153 u64 total, available, age_stamp, avg;
1e3c88bd 5154
b654f7de
PZ
5155 /*
5156 * Since we're reading these variables without serialization make sure
5157 * we read them once before doing sanity checks on them.
5158 */
5159 age_stamp = ACCESS_ONCE(rq->age_stamp);
5160 avg = ACCESS_ONCE(rq->rt_avg);
5161
78becc27 5162 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
aa483808 5163
b654f7de 5164 if (unlikely(total < avg)) {
aa483808
VP
5165 /* Ensures that power won't end up being negative */
5166 available = 0;
5167 } else {
b654f7de 5168 available = total - avg;
aa483808 5169 }
1e3c88bd 5170
1399fa78
NR
5171 if (unlikely((s64)total < SCHED_POWER_SCALE))
5172 total = SCHED_POWER_SCALE;
1e3c88bd 5173
1399fa78 5174 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
5175
5176 return div_u64(available, total);
5177}
5178
5179static void update_cpu_power(struct sched_domain *sd, int cpu)
5180{
669c55e9 5181 unsigned long weight = sd->span_weight;
1399fa78 5182 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
5183 struct sched_group *sdg = sd->groups;
5184
1e3c88bd
PZ
5185 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5186 if (sched_feat(ARCH_POWER))
5187 power *= arch_scale_smt_power(sd, cpu);
5188 else
5189 power *= default_scale_smt_power(sd, cpu);
5190
1399fa78 5191 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
5192 }
5193
9c3f75cb 5194 sdg->sgp->power_orig = power;
9d5efe05
SV
5195
5196 if (sched_feat(ARCH_POWER))
5197 power *= arch_scale_freq_power(sd, cpu);
5198 else
5199 power *= default_scale_freq_power(sd, cpu);
5200
1399fa78 5201 power >>= SCHED_POWER_SHIFT;
9d5efe05 5202
1e3c88bd 5203 power *= scale_rt_power(cpu);
1399fa78 5204 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
5205
5206 if (!power)
5207 power = 1;
5208
e51fd5e2 5209 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 5210 sdg->sgp->power = power;
1e3c88bd
PZ
5211}
5212
029632fb 5213void update_group_power(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
5214{
5215 struct sched_domain *child = sd->child;
5216 struct sched_group *group, *sdg = sd->groups;
863bffc8 5217 unsigned long power, power_orig;
4ec4412e
VG
5218 unsigned long interval;
5219
5220 interval = msecs_to_jiffies(sd->balance_interval);
5221 interval = clamp(interval, 1UL, max_load_balance_interval);
5222 sdg->sgp->next_update = jiffies + interval;
1e3c88bd
PZ
5223
5224 if (!child) {
5225 update_cpu_power(sd, cpu);
5226 return;
5227 }
5228
863bffc8 5229 power_orig = power = 0;
1e3c88bd 5230
74a5ce20
PZ
5231 if (child->flags & SD_OVERLAP) {
5232 /*
5233 * SD_OVERLAP domains cannot assume that child groups
5234 * span the current group.
5235 */
5236
863bffc8
PZ
5237 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5238 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
5239
5240 power_orig += sg->sgp->power_orig;
5241 power += sg->sgp->power;
5242 }
74a5ce20
PZ
5243 } else {
5244 /*
5245 * !SD_OVERLAP domains can assume that child groups
5246 * span the current group.
5247 */
5248
5249 group = child->groups;
5250 do {
863bffc8 5251 power_orig += group->sgp->power_orig;
74a5ce20
PZ
5252 power += group->sgp->power;
5253 group = group->next;
5254 } while (group != child->groups);
5255 }
1e3c88bd 5256
863bffc8
PZ
5257 sdg->sgp->power_orig = power_orig;
5258 sdg->sgp->power = power;
1e3c88bd
PZ
5259}
5260
9d5efe05
SV
5261/*
5262 * Try and fix up capacity for tiny siblings, this is needed when
5263 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5264 * which on its own isn't powerful enough.
5265 *
5266 * See update_sd_pick_busiest() and check_asym_packing().
5267 */
5268static inline int
5269fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5270{
5271 /*
1399fa78 5272 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 5273 */
a6c75f2f 5274 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
5275 return 0;
5276
5277 /*
5278 * If ~90% of the cpu_power is still there, we're good.
5279 */
9c3f75cb 5280 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
5281 return 1;
5282
5283 return 0;
5284}
5285
30ce5dab
PZ
5286/*
5287 * Group imbalance indicates (and tries to solve) the problem where balancing
5288 * groups is inadequate due to tsk_cpus_allowed() constraints.
5289 *
5290 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5291 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5292 * Something like:
5293 *
5294 * { 0 1 2 3 } { 4 5 6 7 }
5295 * * * * *
5296 *
5297 * If we were to balance group-wise we'd place two tasks in the first group and
5298 * two tasks in the second group. Clearly this is undesired as it will overload
5299 * cpu 3 and leave one of the cpus in the second group unused.
5300 *
5301 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
5302 * by noticing the lower domain failed to reach balance and had difficulty
5303 * moving tasks due to affinity constraints.
30ce5dab
PZ
5304 *
5305 * When this is so detected; this group becomes a candidate for busiest; see
5306 * update_sd_pick_busiest(). And calculcate_imbalance() and
6263322c 5307 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
5308 * to create an effective group imbalance.
5309 *
5310 * This is a somewhat tricky proposition since the next run might not find the
5311 * group imbalance and decide the groups need to be balanced again. A most
5312 * subtle and fragile situation.
5313 */
5314
6263322c 5315static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 5316{
6263322c 5317 return group->sgp->imbalance;
30ce5dab
PZ
5318}
5319
b37d9316
PZ
5320/*
5321 * Compute the group capacity.
5322 *
c61037e9
PZ
5323 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5324 * first dividing out the smt factor and computing the actual number of cores
5325 * and limit power unit capacity with that.
b37d9316
PZ
5326 */
5327static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5328{
c61037e9
PZ
5329 unsigned int capacity, smt, cpus;
5330 unsigned int power, power_orig;
5331
5332 power = group->sgp->power;
5333 power_orig = group->sgp->power_orig;
5334 cpus = group->group_weight;
b37d9316 5335
c61037e9
PZ
5336 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5337 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5338 capacity = cpus / smt; /* cores */
b37d9316 5339
c61037e9 5340 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
b37d9316
PZ
5341 if (!capacity)
5342 capacity = fix_small_capacity(env->sd, group);
5343
5344 return capacity;
5345}
5346
1e3c88bd
PZ
5347/**
5348 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 5349 * @env: The load balancing environment.
1e3c88bd 5350 * @group: sched_group whose statistics are to be updated.
1e3c88bd 5351 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 5352 * @local_group: Does group contain this_cpu.
1e3c88bd
PZ
5353 * @sgs: variable to hold the statistics for this group.
5354 */
bd939f45
PZ
5355static inline void update_sg_lb_stats(struct lb_env *env,
5356 struct sched_group *group, int load_idx,
23f0d209 5357 int local_group, struct sg_lb_stats *sgs)
1e3c88bd 5358{
30ce5dab
PZ
5359 unsigned long nr_running;
5360 unsigned long load;
bd939f45 5361 int i;
1e3c88bd 5362
b72ff13c
PZ
5363 memset(sgs, 0, sizeof(*sgs));
5364
b9403130 5365 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
5366 struct rq *rq = cpu_rq(i);
5367
e44bc5c5
PZ
5368 nr_running = rq->nr_running;
5369
1e3c88bd 5370 /* Bias balancing toward cpus of our domain */
6263322c 5371 if (local_group)
04f733b4 5372 load = target_load(i, load_idx);
6263322c 5373 else
1e3c88bd 5374 load = source_load(i, load_idx);
1e3c88bd
PZ
5375
5376 sgs->group_load += load;
e44bc5c5 5377 sgs->sum_nr_running += nr_running;
1e3c88bd 5378 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
5379 if (idle_cpu(i))
5380 sgs->idle_cpus++;
1e3c88bd
PZ
5381 }
5382
1e3c88bd 5383 /* Adjust by relative CPU power of the group */
3ae11c90
PZ
5384 sgs->group_power = group->sgp->power;
5385 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
1e3c88bd 5386
dd5feea1 5387 if (sgs->sum_nr_running)
38d0f770 5388 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 5389
aae6d3dd 5390 sgs->group_weight = group->group_weight;
fab47622 5391
b37d9316
PZ
5392 sgs->group_imb = sg_imbalanced(group);
5393 sgs->group_capacity = sg_capacity(env, group);
5394
fab47622
NR
5395 if (sgs->group_capacity > sgs->sum_nr_running)
5396 sgs->group_has_capacity = 1;
1e3c88bd
PZ
5397}
5398
532cb4c4
MN
5399/**
5400 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 5401 * @env: The load balancing environment.
532cb4c4
MN
5402 * @sds: sched_domain statistics
5403 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 5404 * @sgs: sched_group statistics
532cb4c4
MN
5405 *
5406 * Determine if @sg is a busier group than the previously selected
5407 * busiest group.
e69f6186
YB
5408 *
5409 * Return: %true if @sg is a busier group than the previously selected
5410 * busiest group. %false otherwise.
532cb4c4 5411 */
bd939f45 5412static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
5413 struct sd_lb_stats *sds,
5414 struct sched_group *sg,
bd939f45 5415 struct sg_lb_stats *sgs)
532cb4c4 5416{
56cf515b 5417 if (sgs->avg_load <= sds->busiest_stat.avg_load)
532cb4c4
MN
5418 return false;
5419
5420 if (sgs->sum_nr_running > sgs->group_capacity)
5421 return true;
5422
5423 if (sgs->group_imb)
5424 return true;
5425
5426 /*
5427 * ASYM_PACKING needs to move all the work to the lowest
5428 * numbered CPUs in the group, therefore mark all groups
5429 * higher than ourself as busy.
5430 */
bd939f45
PZ
5431 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5432 env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
5433 if (!sds->busiest)
5434 return true;
5435
5436 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5437 return true;
5438 }
5439
5440 return false;
5441}
5442
1e3c88bd 5443/**
461819ac 5444 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 5445 * @env: The load balancing environment.
1e3c88bd
PZ
5446 * @balance: Should we balance.
5447 * @sds: variable to hold the statistics for this sched_domain.
5448 */
bd939f45 5449static inline void update_sd_lb_stats(struct lb_env *env,
23f0d209 5450 struct sd_lb_stats *sds)
1e3c88bd 5451{
bd939f45
PZ
5452 struct sched_domain *child = env->sd->child;
5453 struct sched_group *sg = env->sd->groups;
56cf515b 5454 struct sg_lb_stats tmp_sgs;
1e3c88bd
PZ
5455 int load_idx, prefer_sibling = 0;
5456
5457 if (child && child->flags & SD_PREFER_SIBLING)
5458 prefer_sibling = 1;
5459
bd939f45 5460 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
5461
5462 do {
56cf515b 5463 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
5464 int local_group;
5465
bd939f45 5466 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
56cf515b
JK
5467 if (local_group) {
5468 sds->local = sg;
5469 sgs = &sds->local_stat;
b72ff13c
PZ
5470
5471 if (env->idle != CPU_NEWLY_IDLE ||
5472 time_after_eq(jiffies, sg->sgp->next_update))
5473 update_group_power(env->sd, env->dst_cpu);
56cf515b 5474 }
1e3c88bd 5475
56cf515b 5476 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
1e3c88bd 5477
b72ff13c
PZ
5478 if (local_group)
5479 goto next_group;
5480
1e3c88bd
PZ
5481 /*
5482 * In case the child domain prefers tasks go to siblings
532cb4c4 5483 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
5484 * and move all the excess tasks away. We lower the capacity
5485 * of a group only if the local group has the capacity to fit
5486 * these excess tasks, i.e. nr_running < group_capacity. The
5487 * extra check prevents the case where you always pull from the
5488 * heaviest group when it is already under-utilized (possible
5489 * with a large weight task outweighs the tasks on the system).
1e3c88bd 5490 */
b72ff13c
PZ
5491 if (prefer_sibling && sds->local &&
5492 sds->local_stat.group_has_capacity)
147c5fc2 5493 sgs->group_capacity = min(sgs->group_capacity, 1U);
1e3c88bd 5494
b72ff13c 5495 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 5496 sds->busiest = sg;
56cf515b 5497 sds->busiest_stat = *sgs;
1e3c88bd
PZ
5498 }
5499
b72ff13c
PZ
5500next_group:
5501 /* Now, start updating sd_lb_stats */
5502 sds->total_load += sgs->group_load;
5503 sds->total_pwr += sgs->group_power;
5504
532cb4c4 5505 sg = sg->next;
bd939f45 5506 } while (sg != env->sd->groups);
532cb4c4
MN
5507}
5508
532cb4c4
MN
5509/**
5510 * check_asym_packing - Check to see if the group is packed into the
5511 * sched doman.
5512 *
5513 * This is primarily intended to used at the sibling level. Some
5514 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5515 * case of POWER7, it can move to lower SMT modes only when higher
5516 * threads are idle. When in lower SMT modes, the threads will
5517 * perform better since they share less core resources. Hence when we
5518 * have idle threads, we want them to be the higher ones.
5519 *
5520 * This packing function is run on idle threads. It checks to see if
5521 * the busiest CPU in this domain (core in the P7 case) has a higher
5522 * CPU number than the packing function is being run on. Here we are
5523 * assuming lower CPU number will be equivalent to lower a SMT thread
5524 * number.
5525 *
e69f6186 5526 * Return: 1 when packing is required and a task should be moved to
b6b12294
MN
5527 * this CPU. The amount of the imbalance is returned in *imbalance.
5528 *
cd96891d 5529 * @env: The load balancing environment.
532cb4c4 5530 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 5531 */
bd939f45 5532static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
5533{
5534 int busiest_cpu;
5535
bd939f45 5536 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
5537 return 0;
5538
5539 if (!sds->busiest)
5540 return 0;
5541
5542 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 5543 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
5544 return 0;
5545
bd939f45 5546 env->imbalance = DIV_ROUND_CLOSEST(
3ae11c90
PZ
5547 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5548 SCHED_POWER_SCALE);
bd939f45 5549
532cb4c4 5550 return 1;
1e3c88bd
PZ
5551}
5552
5553/**
5554 * fix_small_imbalance - Calculate the minor imbalance that exists
5555 * amongst the groups of a sched_domain, during
5556 * load balancing.
cd96891d 5557 * @env: The load balancing environment.
1e3c88bd 5558 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 5559 */
bd939f45
PZ
5560static inline
5561void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd
PZ
5562{
5563 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5564 unsigned int imbn = 2;
dd5feea1 5565 unsigned long scaled_busy_load_per_task;
56cf515b 5566 struct sg_lb_stats *local, *busiest;
1e3c88bd 5567
56cf515b
JK
5568 local = &sds->local_stat;
5569 busiest = &sds->busiest_stat;
1e3c88bd 5570
56cf515b
JK
5571 if (!local->sum_nr_running)
5572 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5573 else if (busiest->load_per_task > local->load_per_task)
5574 imbn = 1;
dd5feea1 5575
56cf515b
JK
5576 scaled_busy_load_per_task =
5577 (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5578 busiest->group_power;
56cf515b 5579
3029ede3
VD
5580 if (busiest->avg_load + scaled_busy_load_per_task >=
5581 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 5582 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
5583 return;
5584 }
5585
5586 /*
5587 * OK, we don't have enough imbalance to justify moving tasks,
5588 * however we may be able to increase total CPU power used by
5589 * moving them.
5590 */
5591
3ae11c90 5592 pwr_now += busiest->group_power *
56cf515b 5593 min(busiest->load_per_task, busiest->avg_load);
3ae11c90 5594 pwr_now += local->group_power *
56cf515b 5595 min(local->load_per_task, local->avg_load);
1399fa78 5596 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
5597
5598 /* Amount of load we'd subtract */
56cf515b 5599 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5600 busiest->group_power;
56cf515b 5601 if (busiest->avg_load > tmp) {
3ae11c90 5602 pwr_move += busiest->group_power *
56cf515b
JK
5603 min(busiest->load_per_task,
5604 busiest->avg_load - tmp);
5605 }
1e3c88bd
PZ
5606
5607 /* Amount of load we'd add */
3ae11c90 5608 if (busiest->avg_load * busiest->group_power <
56cf515b 5609 busiest->load_per_task * SCHED_POWER_SCALE) {
3ae11c90
PZ
5610 tmp = (busiest->avg_load * busiest->group_power) /
5611 local->group_power;
56cf515b
JK
5612 } else {
5613 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 5614 local->group_power;
56cf515b 5615 }
3ae11c90
PZ
5616 pwr_move += local->group_power *
5617 min(local->load_per_task, local->avg_load + tmp);
1399fa78 5618 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
5619
5620 /* Move if we gain throughput */
5621 if (pwr_move > pwr_now)
56cf515b 5622 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
5623}
5624
5625/**
5626 * calculate_imbalance - Calculate the amount of imbalance present within the
5627 * groups of a given sched_domain during load balance.
bd939f45 5628 * @env: load balance environment
1e3c88bd 5629 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 5630 */
bd939f45 5631static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 5632{
dd5feea1 5633 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
5634 struct sg_lb_stats *local, *busiest;
5635
5636 local = &sds->local_stat;
56cf515b 5637 busiest = &sds->busiest_stat;
dd5feea1 5638
56cf515b 5639 if (busiest->group_imb) {
30ce5dab
PZ
5640 /*
5641 * In the group_imb case we cannot rely on group-wide averages
5642 * to ensure cpu-load equilibrium, look at wider averages. XXX
5643 */
56cf515b
JK
5644 busiest->load_per_task =
5645 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
5646 }
5647
1e3c88bd
PZ
5648 /*
5649 * In the presence of smp nice balancing, certain scenarios can have
5650 * max load less than avg load(as we skip the groups at or below
5651 * its cpu_power, while calculating max_load..)
5652 */
b1885550
VD
5653 if (busiest->avg_load <= sds->avg_load ||
5654 local->avg_load >= sds->avg_load) {
bd939f45
PZ
5655 env->imbalance = 0;
5656 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
5657 }
5658
56cf515b 5659 if (!busiest->group_imb) {
dd5feea1
SS
5660 /*
5661 * Don't want to pull so many tasks that a group would go idle.
30ce5dab
PZ
5662 * Except of course for the group_imb case, since then we might
5663 * have to drop below capacity to reach cpu-load equilibrium.
dd5feea1 5664 */
56cf515b
JK
5665 load_above_capacity =
5666 (busiest->sum_nr_running - busiest->group_capacity);
dd5feea1 5667
1399fa78 5668 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3ae11c90 5669 load_above_capacity /= busiest->group_power;
dd5feea1
SS
5670 }
5671
5672 /*
5673 * We're trying to get all the cpus to the average_load, so we don't
5674 * want to push ourselves above the average load, nor do we wish to
5675 * reduce the max loaded cpu below the average load. At the same time,
5676 * we also don't want to reduce the group load below the group capacity
5677 * (so that we can implement power-savings policies etc). Thus we look
5678 * for the minimum possible imbalance.
dd5feea1 5679 */
30ce5dab 5680 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
5681
5682 /* How much load to actually move to equalise the imbalance */
56cf515b 5683 env->imbalance = min(
3ae11c90
PZ
5684 max_pull * busiest->group_power,
5685 (sds->avg_load - local->avg_load) * local->group_power
56cf515b 5686 ) / SCHED_POWER_SCALE;
1e3c88bd
PZ
5687
5688 /*
5689 * if *imbalance is less than the average load per runnable task
25985edc 5690 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
5691 * a think about bumping its value to force at least one task to be
5692 * moved
5693 */
56cf515b 5694 if (env->imbalance < busiest->load_per_task)
bd939f45 5695 return fix_small_imbalance(env, sds);
1e3c88bd 5696}
fab47622 5697
1e3c88bd
PZ
5698/******* find_busiest_group() helpers end here *********************/
5699
5700/**
5701 * find_busiest_group - Returns the busiest group within the sched_domain
5702 * if there is an imbalance. If there isn't an imbalance, and
5703 * the user has opted for power-savings, it returns a group whose
5704 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5705 * such a group exists.
5706 *
5707 * Also calculates the amount of weighted load which should be moved
5708 * to restore balance.
5709 *
cd96891d 5710 * @env: The load balancing environment.
1e3c88bd 5711 *
e69f6186 5712 * Return: - The busiest group if imbalance exists.
1e3c88bd
PZ
5713 * - If no imbalance and user has opted for power-savings balance,
5714 * return the least loaded group whose CPUs can be
5715 * put to idle by rebalancing its tasks onto our group.
5716 */
56cf515b 5717static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 5718{
56cf515b 5719 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
5720 struct sd_lb_stats sds;
5721
147c5fc2 5722 init_sd_lb_stats(&sds);
1e3c88bd
PZ
5723
5724 /*
5725 * Compute the various statistics relavent for load balancing at
5726 * this level.
5727 */
23f0d209 5728 update_sd_lb_stats(env, &sds);
56cf515b
JK
5729 local = &sds.local_stat;
5730 busiest = &sds.busiest_stat;
1e3c88bd 5731
bd939f45
PZ
5732 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5733 check_asym_packing(env, &sds))
532cb4c4
MN
5734 return sds.busiest;
5735
cc57aa8f 5736 /* There is no busy sibling group to pull tasks from */
56cf515b 5737 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
5738 goto out_balanced;
5739
1399fa78 5740 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 5741
866ab43e
PZ
5742 /*
5743 * If the busiest group is imbalanced the below checks don't
30ce5dab 5744 * work because they assume all things are equal, which typically
866ab43e
PZ
5745 * isn't true due to cpus_allowed constraints and the like.
5746 */
56cf515b 5747 if (busiest->group_imb)
866ab43e
PZ
5748 goto force_balance;
5749
cc57aa8f 5750 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
56cf515b
JK
5751 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5752 !busiest->group_has_capacity)
fab47622
NR
5753 goto force_balance;
5754
cc57aa8f
PZ
5755 /*
5756 * If the local group is more busy than the selected busiest group
5757 * don't try and pull any tasks.
5758 */
56cf515b 5759 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
5760 goto out_balanced;
5761
cc57aa8f
PZ
5762 /*
5763 * Don't pull any tasks if this group is already above the domain
5764 * average load.
5765 */
56cf515b 5766 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
5767 goto out_balanced;
5768
bd939f45 5769 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
5770 /*
5771 * This cpu is idle. If the busiest group load doesn't
5772 * have more tasks than the number of available cpu's and
5773 * there is no imbalance between this and busiest group
5774 * wrt to idle cpu's, it is balanced.
5775 */
56cf515b
JK
5776 if ((local->idle_cpus < busiest->idle_cpus) &&
5777 busiest->sum_nr_running <= busiest->group_weight)
aae6d3dd 5778 goto out_balanced;
c186fafe
PZ
5779 } else {
5780 /*
5781 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5782 * imbalance_pct to be conservative.
5783 */
56cf515b
JK
5784 if (100 * busiest->avg_load <=
5785 env->sd->imbalance_pct * local->avg_load)
c186fafe 5786 goto out_balanced;
aae6d3dd 5787 }
1e3c88bd 5788
fab47622 5789force_balance:
1e3c88bd 5790 /* Looks like there is an imbalance. Compute it */
bd939f45 5791 calculate_imbalance(env, &sds);
1e3c88bd
PZ
5792 return sds.busiest;
5793
5794out_balanced:
bd939f45 5795 env->imbalance = 0;
1e3c88bd
PZ
5796 return NULL;
5797}
5798
5799/*
5800 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5801 */
bd939f45 5802static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 5803 struct sched_group *group)
1e3c88bd
PZ
5804{
5805 struct rq *busiest = NULL, *rq;
95a79b80 5806 unsigned long busiest_load = 0, busiest_power = 1;
1e3c88bd
PZ
5807 int i;
5808
6906a408 5809 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd 5810 unsigned long power = power_of(i);
1399fa78
NR
5811 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5812 SCHED_POWER_SCALE);
1e3c88bd
PZ
5813 unsigned long wl;
5814
9d5efe05 5815 if (!capacity)
bd939f45 5816 capacity = fix_small_capacity(env->sd, group);
9d5efe05 5817
1e3c88bd 5818 rq = cpu_rq(i);
6e40f5bb 5819 wl = weighted_cpuload(i);
1e3c88bd 5820
6e40f5bb
TG
5821 /*
5822 * When comparing with imbalance, use weighted_cpuload()
5823 * which is not scaled with the cpu power.
5824 */
bd939f45 5825 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
5826 continue;
5827
6e40f5bb
TG
5828 /*
5829 * For the load comparisons with the other cpu's, consider
5830 * the weighted_cpuload() scaled with the cpu power, so that
5831 * the load can be moved away from the cpu that is potentially
5832 * running at a lower capacity.
95a79b80
JK
5833 *
5834 * Thus we're looking for max(wl_i / power_i), crosswise
5835 * multiplication to rid ourselves of the division works out
5836 * to: wl_i * power_j > wl_j * power_i; where j is our
5837 * previous maximum.
6e40f5bb 5838 */
95a79b80
JK
5839 if (wl * busiest_power > busiest_load * power) {
5840 busiest_load = wl;
5841 busiest_power = power;
1e3c88bd
PZ
5842 busiest = rq;
5843 }
5844 }
5845
5846 return busiest;
5847}
5848
5849/*
5850 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5851 * so long as it is large enough.
5852 */
5853#define MAX_PINNED_INTERVAL 512
5854
5855/* Working cpumask for load_balance and load_balance_newidle. */
e6252c3e 5856DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
1e3c88bd 5857
bd939f45 5858static int need_active_balance(struct lb_env *env)
1af3ed3d 5859{
bd939f45
PZ
5860 struct sched_domain *sd = env->sd;
5861
5862 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
5863
5864 /*
5865 * ASYM_PACKING needs to force migrate tasks from busy but
5866 * higher numbered CPUs in order to pack all tasks in the
5867 * lowest numbered CPUs.
5868 */
bd939f45 5869 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 5870 return 1;
1af3ed3d
PZ
5871 }
5872
5873 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5874}
5875
969c7921
TH
5876static int active_load_balance_cpu_stop(void *data);
5877
23f0d209
JK
5878static int should_we_balance(struct lb_env *env)
5879{
5880 struct sched_group *sg = env->sd->groups;
5881 struct cpumask *sg_cpus, *sg_mask;
5882 int cpu, balance_cpu = -1;
5883
5884 /*
5885 * In the newly idle case, we will allow all the cpu's
5886 * to do the newly idle load balance.
5887 */
5888 if (env->idle == CPU_NEWLY_IDLE)
5889 return 1;
5890
5891 sg_cpus = sched_group_cpus(sg);
5892 sg_mask = sched_group_mask(sg);
5893 /* Try to find first idle cpu */
5894 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5895 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5896 continue;
5897
5898 balance_cpu = cpu;
5899 break;
5900 }
5901
5902 if (balance_cpu == -1)
5903 balance_cpu = group_balance_cpu(sg);
5904
5905 /*
5906 * First idle cpu or the first cpu(busiest) in this sched group
5907 * is eligible for doing load balancing at this and above domains.
5908 */
b0cff9d8 5909 return balance_cpu == env->dst_cpu;
23f0d209
JK
5910}
5911
1e3c88bd
PZ
5912/*
5913 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5914 * tasks if there is an imbalance.
5915 */
5916static int load_balance(int this_cpu, struct rq *this_rq,
5917 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 5918 int *continue_balancing)
1e3c88bd 5919{
88b8dac0 5920 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 5921 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 5922 struct sched_group *group;
1e3c88bd
PZ
5923 struct rq *busiest;
5924 unsigned long flags;
e6252c3e 5925 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
1e3c88bd 5926
8e45cb54
PZ
5927 struct lb_env env = {
5928 .sd = sd,
ddcdf6e7
PZ
5929 .dst_cpu = this_cpu,
5930 .dst_rq = this_rq,
88b8dac0 5931 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 5932 .idle = idle,
eb95308e 5933 .loop_break = sched_nr_migrate_break,
b9403130 5934 .cpus = cpus,
8e45cb54
PZ
5935 };
5936
cfc03118
JK
5937 /*
5938 * For NEWLY_IDLE load_balancing, we don't need to consider
5939 * other cpus in our group
5940 */
e02e60c1 5941 if (idle == CPU_NEWLY_IDLE)
cfc03118 5942 env.dst_grpmask = NULL;
cfc03118 5943
1e3c88bd
PZ
5944 cpumask_copy(cpus, cpu_active_mask);
5945
1e3c88bd
PZ
5946 schedstat_inc(sd, lb_count[idle]);
5947
5948redo:
23f0d209
JK
5949 if (!should_we_balance(&env)) {
5950 *continue_balancing = 0;
1e3c88bd 5951 goto out_balanced;
23f0d209 5952 }
1e3c88bd 5953
23f0d209 5954 group = find_busiest_group(&env);
1e3c88bd
PZ
5955 if (!group) {
5956 schedstat_inc(sd, lb_nobusyg[idle]);
5957 goto out_balanced;
5958 }
5959
b9403130 5960 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
5961 if (!busiest) {
5962 schedstat_inc(sd, lb_nobusyq[idle]);
5963 goto out_balanced;
5964 }
5965
78feefc5 5966 BUG_ON(busiest == env.dst_rq);
1e3c88bd 5967
bd939f45 5968 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
5969
5970 ld_moved = 0;
5971 if (busiest->nr_running > 1) {
5972 /*
5973 * Attempt to move tasks. If find_busiest_group has found
5974 * an imbalance but busiest->nr_running <= 1, the group is
5975 * still unbalanced. ld_moved simply stays zero, so it is
5976 * correctly treated as an imbalance.
5977 */
8e45cb54 5978 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
5979 env.src_cpu = busiest->cpu;
5980 env.src_rq = busiest;
5981 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 5982
5d6523eb 5983more_balance:
1e3c88bd 5984 local_irq_save(flags);
78feefc5 5985 double_rq_lock(env.dst_rq, busiest);
88b8dac0
SV
5986
5987 /*
5988 * cur_ld_moved - load moved in current iteration
5989 * ld_moved - cumulative load moved across iterations
5990 */
5991 cur_ld_moved = move_tasks(&env);
5992 ld_moved += cur_ld_moved;
78feefc5 5993 double_rq_unlock(env.dst_rq, busiest);
1e3c88bd
PZ
5994 local_irq_restore(flags);
5995
5996 /*
5997 * some other cpu did the load balance for us.
5998 */
88b8dac0
SV
5999 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6000 resched_cpu(env.dst_cpu);
6001
f1cd0858
JK
6002 if (env.flags & LBF_NEED_BREAK) {
6003 env.flags &= ~LBF_NEED_BREAK;
6004 goto more_balance;
6005 }
6006
88b8dac0
SV
6007 /*
6008 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6009 * us and move them to an alternate dst_cpu in our sched_group
6010 * where they can run. The upper limit on how many times we
6011 * iterate on same src_cpu is dependent on number of cpus in our
6012 * sched_group.
6013 *
6014 * This changes load balance semantics a bit on who can move
6015 * load to a given_cpu. In addition to the given_cpu itself
6016 * (or a ilb_cpu acting on its behalf where given_cpu is
6017 * nohz-idle), we now have balance_cpu in a position to move
6018 * load to given_cpu. In rare situations, this may cause
6019 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6020 * _independently_ and at _same_ time to move some load to
6021 * given_cpu) causing exceess load to be moved to given_cpu.
6022 * This however should not happen so much in practice and
6023 * moreover subsequent load balance cycles should correct the
6024 * excess load moved.
6025 */
6263322c 6026 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 6027
7aff2e3a
VD
6028 /* Prevent to re-select dst_cpu via env's cpus */
6029 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6030
78feefc5 6031 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 6032 env.dst_cpu = env.new_dst_cpu;
6263322c 6033 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
6034 env.loop = 0;
6035 env.loop_break = sched_nr_migrate_break;
e02e60c1 6036
88b8dac0
SV
6037 /*
6038 * Go back to "more_balance" rather than "redo" since we
6039 * need to continue with same src_cpu.
6040 */
6041 goto more_balance;
6042 }
1e3c88bd 6043
6263322c
PZ
6044 /*
6045 * We failed to reach balance because of affinity.
6046 */
6047 if (sd_parent) {
6048 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6049
6050 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6051 *group_imbalance = 1;
6052 } else if (*group_imbalance)
6053 *group_imbalance = 0;
6054 }
6055
1e3c88bd 6056 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 6057 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 6058 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
6059 if (!cpumask_empty(cpus)) {
6060 env.loop = 0;
6061 env.loop_break = sched_nr_migrate_break;
1e3c88bd 6062 goto redo;
bbf18b19 6063 }
1e3c88bd
PZ
6064 goto out_balanced;
6065 }
6066 }
6067
6068 if (!ld_moved) {
6069 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
6070 /*
6071 * Increment the failure counter only on periodic balance.
6072 * We do not want newidle balance, which can be very
6073 * frequent, pollute the failure counter causing
6074 * excessive cache_hot migrations and active balances.
6075 */
6076 if (idle != CPU_NEWLY_IDLE)
6077 sd->nr_balance_failed++;
1e3c88bd 6078
bd939f45 6079 if (need_active_balance(&env)) {
1e3c88bd
PZ
6080 raw_spin_lock_irqsave(&busiest->lock, flags);
6081
969c7921
TH
6082 /* don't kick the active_load_balance_cpu_stop,
6083 * if the curr task on busiest cpu can't be
6084 * moved to this_cpu
1e3c88bd
PZ
6085 */
6086 if (!cpumask_test_cpu(this_cpu,
fa17b507 6087 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
6088 raw_spin_unlock_irqrestore(&busiest->lock,
6089 flags);
8e45cb54 6090 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
6091 goto out_one_pinned;
6092 }
6093
969c7921
TH
6094 /*
6095 * ->active_balance synchronizes accesses to
6096 * ->active_balance_work. Once set, it's cleared
6097 * only after active load balance is finished.
6098 */
1e3c88bd
PZ
6099 if (!busiest->active_balance) {
6100 busiest->active_balance = 1;
6101 busiest->push_cpu = this_cpu;
6102 active_balance = 1;
6103 }
6104 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 6105
bd939f45 6106 if (active_balance) {
969c7921
TH
6107 stop_one_cpu_nowait(cpu_of(busiest),
6108 active_load_balance_cpu_stop, busiest,
6109 &busiest->active_balance_work);
bd939f45 6110 }
1e3c88bd
PZ
6111
6112 /*
6113 * We've kicked active balancing, reset the failure
6114 * counter.
6115 */
6116 sd->nr_balance_failed = sd->cache_nice_tries+1;
6117 }
6118 } else
6119 sd->nr_balance_failed = 0;
6120
6121 if (likely(!active_balance)) {
6122 /* We were unbalanced, so reset the balancing interval */
6123 sd->balance_interval = sd->min_interval;
6124 } else {
6125 /*
6126 * If we've begun active balancing, start to back off. This
6127 * case may not be covered by the all_pinned logic if there
6128 * is only 1 task on the busy runqueue (because we don't call
6129 * move_tasks).
6130 */
6131 if (sd->balance_interval < sd->max_interval)
6132 sd->balance_interval *= 2;
6133 }
6134
1e3c88bd
PZ
6135 goto out;
6136
6137out_balanced:
6138 schedstat_inc(sd, lb_balanced[idle]);
6139
6140 sd->nr_balance_failed = 0;
6141
6142out_one_pinned:
6143 /* tune up the balancing interval */
8e45cb54 6144 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 6145 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
6146 (sd->balance_interval < sd->max_interval))
6147 sd->balance_interval *= 2;
6148
46e49b38 6149 ld_moved = 0;
1e3c88bd 6150out:
1e3c88bd
PZ
6151 return ld_moved;
6152}
6153
1e3c88bd
PZ
6154/*
6155 * idle_balance is called by schedule() if this_cpu is about to become
6156 * idle. Attempts to pull tasks from other CPUs.
6157 */
029632fb 6158void idle_balance(int this_cpu, struct rq *this_rq)
1e3c88bd
PZ
6159{
6160 struct sched_domain *sd;
6161 int pulled_task = 0;
6162 unsigned long next_balance = jiffies + HZ;
9bd721c5 6163 u64 curr_cost = 0;
1e3c88bd 6164
78becc27 6165 this_rq->idle_stamp = rq_clock(this_rq);
1e3c88bd
PZ
6166
6167 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6168 return;
6169
f492e12e
PZ
6170 /*
6171 * Drop the rq->lock, but keep IRQ/preempt disabled.
6172 */
6173 raw_spin_unlock(&this_rq->lock);
6174
48a16753 6175 update_blocked_averages(this_cpu);
dce840a0 6176 rcu_read_lock();
1e3c88bd
PZ
6177 for_each_domain(this_cpu, sd) {
6178 unsigned long interval;
23f0d209 6179 int continue_balancing = 1;
9bd721c5 6180 u64 t0, domain_cost;
1e3c88bd
PZ
6181
6182 if (!(sd->flags & SD_LOAD_BALANCE))
6183 continue;
6184
9bd721c5
JL
6185 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6186 break;
6187
f492e12e 6188 if (sd->flags & SD_BALANCE_NEWIDLE) {
9bd721c5
JL
6189 t0 = sched_clock_cpu(this_cpu);
6190
1e3c88bd 6191 /* If we've pulled tasks over stop searching: */
f492e12e 6192 pulled_task = load_balance(this_cpu, this_rq,
23f0d209
JK
6193 sd, CPU_NEWLY_IDLE,
6194 &continue_balancing);
9bd721c5
JL
6195
6196 domain_cost = sched_clock_cpu(this_cpu) - t0;
6197 if (domain_cost > sd->max_newidle_lb_cost)
6198 sd->max_newidle_lb_cost = domain_cost;
6199
6200 curr_cost += domain_cost;
f492e12e 6201 }
1e3c88bd
PZ
6202
6203 interval = msecs_to_jiffies(sd->balance_interval);
6204 if (time_after(next_balance, sd->last_balance + interval))
6205 next_balance = sd->last_balance + interval;
d5ad140b
NR
6206 if (pulled_task) {
6207 this_rq->idle_stamp = 0;
1e3c88bd 6208 break;
d5ad140b 6209 }
1e3c88bd 6210 }
dce840a0 6211 rcu_read_unlock();
f492e12e
PZ
6212
6213 raw_spin_lock(&this_rq->lock);
6214
1e3c88bd
PZ
6215 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6216 /*
6217 * We are going idle. next_balance may be set based on
6218 * a busy processor. So reset next_balance.
6219 */
6220 this_rq->next_balance = next_balance;
6221 }
9bd721c5
JL
6222
6223 if (curr_cost > this_rq->max_idle_balance_cost)
6224 this_rq->max_idle_balance_cost = curr_cost;
1e3c88bd
PZ
6225}
6226
6227/*
969c7921
TH
6228 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6229 * running tasks off the busiest CPU onto idle CPUs. It requires at
6230 * least 1 task to be running on each physical CPU where possible, and
6231 * avoids physical / logical imbalances.
1e3c88bd 6232 */
969c7921 6233static int active_load_balance_cpu_stop(void *data)
1e3c88bd 6234{
969c7921
TH
6235 struct rq *busiest_rq = data;
6236 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 6237 int target_cpu = busiest_rq->push_cpu;
969c7921 6238 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 6239 struct sched_domain *sd;
969c7921
TH
6240
6241 raw_spin_lock_irq(&busiest_rq->lock);
6242
6243 /* make sure the requested cpu hasn't gone down in the meantime */
6244 if (unlikely(busiest_cpu != smp_processor_id() ||
6245 !busiest_rq->active_balance))
6246 goto out_unlock;
1e3c88bd
PZ
6247
6248 /* Is there any task to move? */
6249 if (busiest_rq->nr_running <= 1)
969c7921 6250 goto out_unlock;
1e3c88bd
PZ
6251
6252 /*
6253 * This condition is "impossible", if it occurs
6254 * we need to fix it. Originally reported by
6255 * Bjorn Helgaas on a 128-cpu setup.
6256 */
6257 BUG_ON(busiest_rq == target_rq);
6258
6259 /* move a task from busiest_rq to target_rq */
6260 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
6261
6262 /* Search for an sd spanning us and the target CPU. */
dce840a0 6263 rcu_read_lock();
1e3c88bd
PZ
6264 for_each_domain(target_cpu, sd) {
6265 if ((sd->flags & SD_LOAD_BALANCE) &&
6266 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6267 break;
6268 }
6269
6270 if (likely(sd)) {
8e45cb54
PZ
6271 struct lb_env env = {
6272 .sd = sd,
ddcdf6e7
PZ
6273 .dst_cpu = target_cpu,
6274 .dst_rq = target_rq,
6275 .src_cpu = busiest_rq->cpu,
6276 .src_rq = busiest_rq,
8e45cb54
PZ
6277 .idle = CPU_IDLE,
6278 };
6279
1e3c88bd
PZ
6280 schedstat_inc(sd, alb_count);
6281
8e45cb54 6282 if (move_one_task(&env))
1e3c88bd
PZ
6283 schedstat_inc(sd, alb_pushed);
6284 else
6285 schedstat_inc(sd, alb_failed);
6286 }
dce840a0 6287 rcu_read_unlock();
1e3c88bd 6288 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
6289out_unlock:
6290 busiest_rq->active_balance = 0;
6291 raw_spin_unlock_irq(&busiest_rq->lock);
6292 return 0;
1e3c88bd
PZ
6293}
6294
3451d024 6295#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
6296/*
6297 * idle load balancing details
83cd4fe2
VP
6298 * - When one of the busy CPUs notice that there may be an idle rebalancing
6299 * needed, they will kick the idle load balancer, which then does idle
6300 * load balancing for all the idle CPUs.
6301 */
1e3c88bd 6302static struct {
83cd4fe2 6303 cpumask_var_t idle_cpus_mask;
0b005cf5 6304 atomic_t nr_cpus;
83cd4fe2
VP
6305 unsigned long next_balance; /* in jiffy units */
6306} nohz ____cacheline_aligned;
1e3c88bd 6307
8e7fbcbc 6308static inline int find_new_ilb(int call_cpu)
1e3c88bd 6309{
0b005cf5 6310 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 6311
786d6dc7
SS
6312 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6313 return ilb;
6314
6315 return nr_cpu_ids;
1e3c88bd 6316}
1e3c88bd 6317
83cd4fe2
VP
6318/*
6319 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6320 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6321 * CPU (if there is one).
6322 */
6323static void nohz_balancer_kick(int cpu)
6324{
6325 int ilb_cpu;
6326
6327 nohz.next_balance++;
6328
0b005cf5 6329 ilb_cpu = find_new_ilb(cpu);
83cd4fe2 6330
0b005cf5
SS
6331 if (ilb_cpu >= nr_cpu_ids)
6332 return;
83cd4fe2 6333
cd490c5b 6334 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
6335 return;
6336 /*
6337 * Use smp_send_reschedule() instead of resched_cpu().
6338 * This way we generate a sched IPI on the target cpu which
6339 * is idle. And the softirq performing nohz idle load balance
6340 * will be run before returning from the IPI.
6341 */
6342 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
6343 return;
6344}
6345
c1cc017c 6346static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
6347{
6348 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6349 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6350 atomic_dec(&nohz.nr_cpus);
6351 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6352 }
6353}
6354
69e1e811
SS
6355static inline void set_cpu_sd_state_busy(void)
6356{
6357 struct sched_domain *sd;
69e1e811 6358
69e1e811 6359 rcu_read_lock();
424c93fe 6360 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
6361
6362 if (!sd || !sd->nohz_idle)
6363 goto unlock;
6364 sd->nohz_idle = 0;
6365
6366 for (; sd; sd = sd->parent)
69e1e811 6367 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 6368unlock:
69e1e811
SS
6369 rcu_read_unlock();
6370}
6371
6372void set_cpu_sd_state_idle(void)
6373{
6374 struct sched_domain *sd;
69e1e811 6375
69e1e811 6376 rcu_read_lock();
424c93fe 6377 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
6378
6379 if (!sd || sd->nohz_idle)
6380 goto unlock;
6381 sd->nohz_idle = 1;
6382
6383 for (; sd; sd = sd->parent)
69e1e811 6384 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 6385unlock:
69e1e811
SS
6386 rcu_read_unlock();
6387}
6388
1e3c88bd 6389/*
c1cc017c 6390 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 6391 * This info will be used in performing idle load balancing in the future.
1e3c88bd 6392 */
c1cc017c 6393void nohz_balance_enter_idle(int cpu)
1e3c88bd 6394{
71325960
SS
6395 /*
6396 * If this cpu is going down, then nothing needs to be done.
6397 */
6398 if (!cpu_active(cpu))
6399 return;
6400
c1cc017c
AS
6401 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6402 return;
1e3c88bd 6403
c1cc017c
AS
6404 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6405 atomic_inc(&nohz.nr_cpus);
6406 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 6407}
71325960 6408
0db0628d 6409static int sched_ilb_notifier(struct notifier_block *nfb,
71325960
SS
6410 unsigned long action, void *hcpu)
6411{
6412 switch (action & ~CPU_TASKS_FROZEN) {
6413 case CPU_DYING:
c1cc017c 6414 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
6415 return NOTIFY_OK;
6416 default:
6417 return NOTIFY_DONE;
6418 }
6419}
1e3c88bd
PZ
6420#endif
6421
6422static DEFINE_SPINLOCK(balancing);
6423
49c022e6
PZ
6424/*
6425 * Scale the max load_balance interval with the number of CPUs in the system.
6426 * This trades load-balance latency on larger machines for less cross talk.
6427 */
029632fb 6428void update_max_interval(void)
49c022e6
PZ
6429{
6430 max_load_balance_interval = HZ*num_online_cpus()/10;
6431}
6432
1e3c88bd
PZ
6433/*
6434 * It checks each scheduling domain to see if it is due to be balanced,
6435 * and initiates a balancing operation if so.
6436 *
b9b0853a 6437 * Balancing parameters are set up in init_sched_domains.
1e3c88bd
PZ
6438 */
6439static void rebalance_domains(int cpu, enum cpu_idle_type idle)
6440{
23f0d209 6441 int continue_balancing = 1;
1e3c88bd
PZ
6442 struct rq *rq = cpu_rq(cpu);
6443 unsigned long interval;
04f733b4 6444 struct sched_domain *sd;
1e3c88bd
PZ
6445 /* Earliest time when we have to do rebalance again */
6446 unsigned long next_balance = jiffies + 60*HZ;
6447 int update_next_balance = 0;
f48627e6
JL
6448 int need_serialize, need_decay = 0;
6449 u64 max_cost = 0;
1e3c88bd 6450
48a16753 6451 update_blocked_averages(cpu);
2069dd75 6452
dce840a0 6453 rcu_read_lock();
1e3c88bd 6454 for_each_domain(cpu, sd) {
f48627e6
JL
6455 /*
6456 * Decay the newidle max times here because this is a regular
6457 * visit to all the domains. Decay ~1% per second.
6458 */
6459 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6460 sd->max_newidle_lb_cost =
6461 (sd->max_newidle_lb_cost * 253) / 256;
6462 sd->next_decay_max_lb_cost = jiffies + HZ;
6463 need_decay = 1;
6464 }
6465 max_cost += sd->max_newidle_lb_cost;
6466
1e3c88bd
PZ
6467 if (!(sd->flags & SD_LOAD_BALANCE))
6468 continue;
6469
f48627e6
JL
6470 /*
6471 * Stop the load balance at this level. There is another
6472 * CPU in our sched group which is doing load balancing more
6473 * actively.
6474 */
6475 if (!continue_balancing) {
6476 if (need_decay)
6477 continue;
6478 break;
6479 }
6480
1e3c88bd
PZ
6481 interval = sd->balance_interval;
6482 if (idle != CPU_IDLE)
6483 interval *= sd->busy_factor;
6484
6485 /* scale ms to jiffies */
6486 interval = msecs_to_jiffies(interval);
49c022e6 6487 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
6488
6489 need_serialize = sd->flags & SD_SERIALIZE;
6490
6491 if (need_serialize) {
6492 if (!spin_trylock(&balancing))
6493 goto out;
6494 }
6495
6496 if (time_after_eq(jiffies, sd->last_balance + interval)) {
23f0d209 6497 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
1e3c88bd 6498 /*
6263322c 6499 * The LBF_DST_PINNED logic could have changed
de5eb2dd
JK
6500 * env->dst_cpu, so we can't know our idle
6501 * state even if we migrated tasks. Update it.
1e3c88bd 6502 */
de5eb2dd 6503 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
6504 }
6505 sd->last_balance = jiffies;
6506 }
6507 if (need_serialize)
6508 spin_unlock(&balancing);
6509out:
6510 if (time_after(next_balance, sd->last_balance + interval)) {
6511 next_balance = sd->last_balance + interval;
6512 update_next_balance = 1;
6513 }
f48627e6
JL
6514 }
6515 if (need_decay) {
1e3c88bd 6516 /*
f48627e6
JL
6517 * Ensure the rq-wide value also decays but keep it at a
6518 * reasonable floor to avoid funnies with rq->avg_idle.
1e3c88bd 6519 */
f48627e6
JL
6520 rq->max_idle_balance_cost =
6521 max((u64)sysctl_sched_migration_cost, max_cost);
1e3c88bd 6522 }
dce840a0 6523 rcu_read_unlock();
1e3c88bd
PZ
6524
6525 /*
6526 * next_balance will be updated only when there is a need.
6527 * When the cpu is attached to null domain for ex, it will not be
6528 * updated.
6529 */
6530 if (likely(update_next_balance))
6531 rq->next_balance = next_balance;
6532}
6533
3451d024 6534#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 6535/*
3451d024 6536 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
6537 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6538 */
83cd4fe2
VP
6539static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6540{
6541 struct rq *this_rq = cpu_rq(this_cpu);
6542 struct rq *rq;
6543 int balance_cpu;
6544
1c792db7
SS
6545 if (idle != CPU_IDLE ||
6546 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6547 goto end;
83cd4fe2
VP
6548
6549 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 6550 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
6551 continue;
6552
6553 /*
6554 * If this cpu gets work to do, stop the load balancing
6555 * work being done for other cpus. Next load
6556 * balancing owner will pick it up.
6557 */
1c792db7 6558 if (need_resched())
83cd4fe2 6559 break;
83cd4fe2 6560
5ed4f1d9
VG
6561 rq = cpu_rq(balance_cpu);
6562
6563 raw_spin_lock_irq(&rq->lock);
6564 update_rq_clock(rq);
6565 update_idle_cpu_load(rq);
6566 raw_spin_unlock_irq(&rq->lock);
83cd4fe2
VP
6567
6568 rebalance_domains(balance_cpu, CPU_IDLE);
6569
83cd4fe2
VP
6570 if (time_after(this_rq->next_balance, rq->next_balance))
6571 this_rq->next_balance = rq->next_balance;
6572 }
6573 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
6574end:
6575 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
6576}
6577
6578/*
0b005cf5
SS
6579 * Current heuristic for kicking the idle load balancer in the presence
6580 * of an idle cpu is the system.
6581 * - This rq has more than one task.
6582 * - At any scheduler domain level, this cpu's scheduler group has multiple
6583 * busy cpu's exceeding the group's power.
6584 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6585 * domain span are idle.
83cd4fe2
VP
6586 */
6587static inline int nohz_kick_needed(struct rq *rq, int cpu)
6588{
6589 unsigned long now = jiffies;
0b005cf5 6590 struct sched_domain *sd;
83cd4fe2 6591
1c792db7 6592 if (unlikely(idle_cpu(cpu)))
83cd4fe2
VP
6593 return 0;
6594
1c792db7
SS
6595 /*
6596 * We may be recently in ticked or tickless idle mode. At the first
6597 * busy tick after returning from idle, we will update the busy stats.
6598 */
69e1e811 6599 set_cpu_sd_state_busy();
c1cc017c 6600 nohz_balance_exit_idle(cpu);
0b005cf5
SS
6601
6602 /*
6603 * None are in tickless mode and hence no need for NOHZ idle load
6604 * balancing.
6605 */
6606 if (likely(!atomic_read(&nohz.nr_cpus)))
6607 return 0;
1c792db7
SS
6608
6609 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
6610 return 0;
6611
0b005cf5
SS
6612 if (rq->nr_running >= 2)
6613 goto need_kick;
83cd4fe2 6614
067491b7 6615 rcu_read_lock();
0b005cf5
SS
6616 for_each_domain(cpu, sd) {
6617 struct sched_group *sg = sd->groups;
6618 struct sched_group_power *sgp = sg->sgp;
6619 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
83cd4fe2 6620
0b005cf5 6621 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
067491b7 6622 goto need_kick_unlock;
0b005cf5
SS
6623
6624 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
6625 && (cpumask_first_and(nohz.idle_cpus_mask,
6626 sched_domain_span(sd)) < cpu))
067491b7 6627 goto need_kick_unlock;
0b005cf5
SS
6628
6629 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6630 break;
83cd4fe2 6631 }
067491b7 6632 rcu_read_unlock();
83cd4fe2 6633 return 0;
067491b7
PZ
6634
6635need_kick_unlock:
6636 rcu_read_unlock();
0b005cf5
SS
6637need_kick:
6638 return 1;
83cd4fe2
VP
6639}
6640#else
6641static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6642#endif
6643
6644/*
6645 * run_rebalance_domains is triggered when needed from the scheduler tick.
6646 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
6647 */
1e3c88bd
PZ
6648static void run_rebalance_domains(struct softirq_action *h)
6649{
6650 int this_cpu = smp_processor_id();
6651 struct rq *this_rq = cpu_rq(this_cpu);
6eb57e0d 6652 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
6653 CPU_IDLE : CPU_NOT_IDLE;
6654
6655 rebalance_domains(this_cpu, idle);
6656
1e3c88bd 6657 /*
83cd4fe2 6658 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
6659 * balancing on behalf of the other idle cpus whose ticks are
6660 * stopped.
6661 */
83cd4fe2 6662 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
6663}
6664
6665static inline int on_null_domain(int cpu)
6666{
90a6501f 6667 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
6668}
6669
6670/*
6671 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 6672 */
029632fb 6673void trigger_load_balance(struct rq *rq, int cpu)
1e3c88bd 6674{
1e3c88bd
PZ
6675 /* Don't need to rebalance while attached to NULL domain */
6676 if (time_after_eq(jiffies, rq->next_balance) &&
6677 likely(!on_null_domain(cpu)))
6678 raise_softirq(SCHED_SOFTIRQ);
3451d024 6679#ifdef CONFIG_NO_HZ_COMMON
1c792db7 6680 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
83cd4fe2
VP
6681 nohz_balancer_kick(cpu);
6682#endif
1e3c88bd
PZ
6683}
6684
0bcdcf28
CE
6685static void rq_online_fair(struct rq *rq)
6686{
6687 update_sysctl();
6688}
6689
6690static void rq_offline_fair(struct rq *rq)
6691{
6692 update_sysctl();
a4c96ae3
PB
6693
6694 /* Ensure any throttled groups are reachable by pick_next_task */
6695 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
6696}
6697
55e12e5e 6698#endif /* CONFIG_SMP */
e1d1484f 6699
bf0f6f24
IM
6700/*
6701 * scheduler tick hitting a task of our scheduling class:
6702 */
8f4d37ec 6703static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
6704{
6705 struct cfs_rq *cfs_rq;
6706 struct sched_entity *se = &curr->se;
6707
6708 for_each_sched_entity(se) {
6709 cfs_rq = cfs_rq_of(se);
8f4d37ec 6710 entity_tick(cfs_rq, se, queued);
bf0f6f24 6711 }
18bf2805 6712
10e84b97 6713 if (numabalancing_enabled)
cbee9f88 6714 task_tick_numa(rq, curr);
3d59eebc 6715
18bf2805 6716 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
6717}
6718
6719/*
cd29fe6f
PZ
6720 * called on fork with the child task as argument from the parent's context
6721 * - child not yet on the tasklist
6722 * - preemption disabled
bf0f6f24 6723 */
cd29fe6f 6724static void task_fork_fair(struct task_struct *p)
bf0f6f24 6725{
4fc420c9
DN
6726 struct cfs_rq *cfs_rq;
6727 struct sched_entity *se = &p->se, *curr;
00bf7bfc 6728 int this_cpu = smp_processor_id();
cd29fe6f
PZ
6729 struct rq *rq = this_rq();
6730 unsigned long flags;
6731
05fa785c 6732 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 6733
861d034e
PZ
6734 update_rq_clock(rq);
6735
4fc420c9
DN
6736 cfs_rq = task_cfs_rq(current);
6737 curr = cfs_rq->curr;
6738
6c9a27f5
DN
6739 /*
6740 * Not only the cpu but also the task_group of the parent might have
6741 * been changed after parent->se.parent,cfs_rq were copied to
6742 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6743 * of child point to valid ones.
6744 */
6745 rcu_read_lock();
6746 __set_task_cpu(p, this_cpu);
6747 rcu_read_unlock();
bf0f6f24 6748
7109c442 6749 update_curr(cfs_rq);
cd29fe6f 6750
b5d9d734
MG
6751 if (curr)
6752 se->vruntime = curr->vruntime;
aeb73b04 6753 place_entity(cfs_rq, se, 1);
4d78e7b6 6754
cd29fe6f 6755 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 6756 /*
edcb60a3
IM
6757 * Upon rescheduling, sched_class::put_prev_task() will place
6758 * 'current' within the tree based on its new key value.
6759 */
4d78e7b6 6760 swap(curr->vruntime, se->vruntime);
aec0a514 6761 resched_task(rq->curr);
4d78e7b6 6762 }
bf0f6f24 6763
88ec22d3
PZ
6764 se->vruntime -= cfs_rq->min_vruntime;
6765
05fa785c 6766 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
6767}
6768
cb469845
SR
6769/*
6770 * Priority of the task has changed. Check to see if we preempt
6771 * the current task.
6772 */
da7a735e
PZ
6773static void
6774prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 6775{
da7a735e
PZ
6776 if (!p->se.on_rq)
6777 return;
6778
cb469845
SR
6779 /*
6780 * Reschedule if we are currently running on this runqueue and
6781 * our priority decreased, or if we are not currently running on
6782 * this runqueue and our priority is higher than the current's
6783 */
da7a735e 6784 if (rq->curr == p) {
cb469845
SR
6785 if (p->prio > oldprio)
6786 resched_task(rq->curr);
6787 } else
15afe09b 6788 check_preempt_curr(rq, p, 0);
cb469845
SR
6789}
6790
da7a735e
PZ
6791static void switched_from_fair(struct rq *rq, struct task_struct *p)
6792{
6793 struct sched_entity *se = &p->se;
6794 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6795
6796 /*
6797 * Ensure the task's vruntime is normalized, so that when its
6798 * switched back to the fair class the enqueue_entity(.flags=0) will
6799 * do the right thing.
6800 *
6801 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6802 * have normalized the vruntime, if it was !on_rq, then only when
6803 * the task is sleeping will it still have non-normalized vruntime.
6804 */
6805 if (!se->on_rq && p->state != TASK_RUNNING) {
6806 /*
6807 * Fix up our vruntime so that the current sleep doesn't
6808 * cause 'unlimited' sleep bonus.
6809 */
6810 place_entity(cfs_rq, se, 0);
6811 se->vruntime -= cfs_rq->min_vruntime;
6812 }
9ee474f5 6813
141965c7 6814#ifdef CONFIG_SMP
9ee474f5
PT
6815 /*
6816 * Remove our load from contribution when we leave sched_fair
6817 * and ensure we don't carry in an old decay_count if we
6818 * switch back.
6819 */
87e3c8ae
KT
6820 if (se->avg.decay_count) {
6821 __synchronize_entity_decay(se);
6822 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
9ee474f5
PT
6823 }
6824#endif
da7a735e
PZ
6825}
6826
cb469845
SR
6827/*
6828 * We switched to the sched_fair class.
6829 */
da7a735e 6830static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 6831{
da7a735e
PZ
6832 if (!p->se.on_rq)
6833 return;
6834
cb469845
SR
6835 /*
6836 * We were most likely switched from sched_rt, so
6837 * kick off the schedule if running, otherwise just see
6838 * if we can still preempt the current task.
6839 */
da7a735e 6840 if (rq->curr == p)
cb469845
SR
6841 resched_task(rq->curr);
6842 else
15afe09b 6843 check_preempt_curr(rq, p, 0);
cb469845
SR
6844}
6845
83b699ed
SV
6846/* Account for a task changing its policy or group.
6847 *
6848 * This routine is mostly called to set cfs_rq->curr field when a task
6849 * migrates between groups/classes.
6850 */
6851static void set_curr_task_fair(struct rq *rq)
6852{
6853 struct sched_entity *se = &rq->curr->se;
6854
ec12cb7f
PT
6855 for_each_sched_entity(se) {
6856 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6857
6858 set_next_entity(cfs_rq, se);
6859 /* ensure bandwidth has been allocated on our new cfs_rq */
6860 account_cfs_rq_runtime(cfs_rq, 0);
6861 }
83b699ed
SV
6862}
6863
029632fb
PZ
6864void init_cfs_rq(struct cfs_rq *cfs_rq)
6865{
6866 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
6867 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6868#ifndef CONFIG_64BIT
6869 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6870#endif
141965c7 6871#ifdef CONFIG_SMP
9ee474f5 6872 atomic64_set(&cfs_rq->decay_counter, 1);
2509940f 6873 atomic_long_set(&cfs_rq->removed_load, 0);
9ee474f5 6874#endif
029632fb
PZ
6875}
6876
810b3817 6877#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6878static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 6879{
aff3e498 6880 struct cfs_rq *cfs_rq;
b2b5ce02
PZ
6881 /*
6882 * If the task was not on the rq at the time of this cgroup movement
6883 * it must have been asleep, sleeping tasks keep their ->vruntime
6884 * absolute on their old rq until wakeup (needed for the fair sleeper
6885 * bonus in place_entity()).
6886 *
6887 * If it was on the rq, we've just 'preempted' it, which does convert
6888 * ->vruntime to a relative base.
6889 *
6890 * Make sure both cases convert their relative position when migrating
6891 * to another cgroup's rq. This does somewhat interfere with the
6892 * fair sleeper stuff for the first placement, but who cares.
6893 */
7ceff013
DN
6894 /*
6895 * When !on_rq, vruntime of the task has usually NOT been normalized.
6896 * But there are some cases where it has already been normalized:
6897 *
6898 * - Moving a forked child which is waiting for being woken up by
6899 * wake_up_new_task().
62af3783
DN
6900 * - Moving a task which has been woken up by try_to_wake_up() and
6901 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
6902 *
6903 * To prevent boost or penalty in the new cfs_rq caused by delta
6904 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6905 */
62af3783 6906 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7ceff013
DN
6907 on_rq = 1;
6908
b2b5ce02
PZ
6909 if (!on_rq)
6910 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6911 set_task_rq(p, task_cpu(p));
aff3e498
PT
6912 if (!on_rq) {
6913 cfs_rq = cfs_rq_of(&p->se);
6914 p->se.vruntime += cfs_rq->min_vruntime;
6915#ifdef CONFIG_SMP
6916 /*
6917 * migrate_task_rq_fair() will have removed our previous
6918 * contribution, but we must synchronize for ongoing future
6919 * decay.
6920 */
6921 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6922 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6923#endif
6924 }
810b3817 6925}
029632fb
PZ
6926
6927void free_fair_sched_group(struct task_group *tg)
6928{
6929 int i;
6930
6931 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6932
6933 for_each_possible_cpu(i) {
6934 if (tg->cfs_rq)
6935 kfree(tg->cfs_rq[i]);
6936 if (tg->se)
6937 kfree(tg->se[i]);
6938 }
6939
6940 kfree(tg->cfs_rq);
6941 kfree(tg->se);
6942}
6943
6944int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6945{
6946 struct cfs_rq *cfs_rq;
6947 struct sched_entity *se;
6948 int i;
6949
6950 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6951 if (!tg->cfs_rq)
6952 goto err;
6953 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6954 if (!tg->se)
6955 goto err;
6956
6957 tg->shares = NICE_0_LOAD;
6958
6959 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6960
6961 for_each_possible_cpu(i) {
6962 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6963 GFP_KERNEL, cpu_to_node(i));
6964 if (!cfs_rq)
6965 goto err;
6966
6967 se = kzalloc_node(sizeof(struct sched_entity),
6968 GFP_KERNEL, cpu_to_node(i));
6969 if (!se)
6970 goto err_free_rq;
6971
6972 init_cfs_rq(cfs_rq);
6973 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6974 }
6975
6976 return 1;
6977
6978err_free_rq:
6979 kfree(cfs_rq);
6980err:
6981 return 0;
6982}
6983
6984void unregister_fair_sched_group(struct task_group *tg, int cpu)
6985{
6986 struct rq *rq = cpu_rq(cpu);
6987 unsigned long flags;
6988
6989 /*
6990 * Only empty task groups can be destroyed; so we can speculatively
6991 * check on_list without danger of it being re-added.
6992 */
6993 if (!tg->cfs_rq[cpu]->on_list)
6994 return;
6995
6996 raw_spin_lock_irqsave(&rq->lock, flags);
6997 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6998 raw_spin_unlock_irqrestore(&rq->lock, flags);
6999}
7000
7001void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7002 struct sched_entity *se, int cpu,
7003 struct sched_entity *parent)
7004{
7005 struct rq *rq = cpu_rq(cpu);
7006
7007 cfs_rq->tg = tg;
7008 cfs_rq->rq = rq;
029632fb
PZ
7009 init_cfs_rq_runtime(cfs_rq);
7010
7011 tg->cfs_rq[cpu] = cfs_rq;
7012 tg->se[cpu] = se;
7013
7014 /* se could be NULL for root_task_group */
7015 if (!se)
7016 return;
7017
7018 if (!parent)
7019 se->cfs_rq = &rq->cfs;
7020 else
7021 se->cfs_rq = parent->my_q;
7022
7023 se->my_q = cfs_rq;
7024 update_load_set(&se->load, 0);
7025 se->parent = parent;
7026}
7027
7028static DEFINE_MUTEX(shares_mutex);
7029
7030int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7031{
7032 int i;
7033 unsigned long flags;
7034
7035 /*
7036 * We can't change the weight of the root cgroup.
7037 */
7038 if (!tg->se[0])
7039 return -EINVAL;
7040
7041 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7042
7043 mutex_lock(&shares_mutex);
7044 if (tg->shares == shares)
7045 goto done;
7046
7047 tg->shares = shares;
7048 for_each_possible_cpu(i) {
7049 struct rq *rq = cpu_rq(i);
7050 struct sched_entity *se;
7051
7052 se = tg->se[i];
7053 /* Propagate contribution to hierarchy */
7054 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
7055
7056 /* Possible calls to update_curr() need rq clock */
7057 update_rq_clock(rq);
17bc14b7 7058 for_each_sched_entity(se)
029632fb
PZ
7059 update_cfs_shares(group_cfs_rq(se));
7060 raw_spin_unlock_irqrestore(&rq->lock, flags);
7061 }
7062
7063done:
7064 mutex_unlock(&shares_mutex);
7065 return 0;
7066}
7067#else /* CONFIG_FAIR_GROUP_SCHED */
7068
7069void free_fair_sched_group(struct task_group *tg) { }
7070
7071int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7072{
7073 return 1;
7074}
7075
7076void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7077
7078#endif /* CONFIG_FAIR_GROUP_SCHED */
7079
810b3817 7080
6d686f45 7081static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
7082{
7083 struct sched_entity *se = &task->se;
0d721cea
PW
7084 unsigned int rr_interval = 0;
7085
7086 /*
7087 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7088 * idle runqueue:
7089 */
0d721cea 7090 if (rq->cfs.load.weight)
a59f4e07 7091 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
7092
7093 return rr_interval;
7094}
7095
bf0f6f24
IM
7096/*
7097 * All the scheduling class methods:
7098 */
029632fb 7099const struct sched_class fair_sched_class = {
5522d5d5 7100 .next = &idle_sched_class,
bf0f6f24
IM
7101 .enqueue_task = enqueue_task_fair,
7102 .dequeue_task = dequeue_task_fair,
7103 .yield_task = yield_task_fair,
d95f4122 7104 .yield_to_task = yield_to_task_fair,
bf0f6f24 7105
2e09bf55 7106 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
7107
7108 .pick_next_task = pick_next_task_fair,
7109 .put_prev_task = put_prev_task_fair,
7110
681f3e68 7111#ifdef CONFIG_SMP
4ce72a2c 7112 .select_task_rq = select_task_rq_fair,
0a74bef8 7113 .migrate_task_rq = migrate_task_rq_fair,
141965c7 7114
0bcdcf28
CE
7115 .rq_online = rq_online_fair,
7116 .rq_offline = rq_offline_fair,
88ec22d3
PZ
7117
7118 .task_waking = task_waking_fair,
681f3e68 7119#endif
bf0f6f24 7120
83b699ed 7121 .set_curr_task = set_curr_task_fair,
bf0f6f24 7122 .task_tick = task_tick_fair,
cd29fe6f 7123 .task_fork = task_fork_fair,
cb469845
SR
7124
7125 .prio_changed = prio_changed_fair,
da7a735e 7126 .switched_from = switched_from_fair,
cb469845 7127 .switched_to = switched_to_fair,
810b3817 7128
0d721cea
PW
7129 .get_rr_interval = get_rr_interval_fair,
7130
810b3817 7131#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 7132 .task_move_group = task_move_group_fair,
810b3817 7133#endif
bf0f6f24
IM
7134};
7135
7136#ifdef CONFIG_SCHED_DEBUG
029632fb 7137void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 7138{
bf0f6f24
IM
7139 struct cfs_rq *cfs_rq;
7140
5973e5b9 7141 rcu_read_lock();
c3b64f1e 7142 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 7143 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 7144 rcu_read_unlock();
bf0f6f24
IM
7145}
7146#endif
029632fb
PZ
7147
7148__init void init_sched_fair_class(void)
7149{
7150#ifdef CONFIG_SMP
7151 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7152
3451d024 7153#ifdef CONFIG_NO_HZ_COMMON
554cecaf 7154 nohz.next_balance = jiffies;
029632fb 7155 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 7156 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
7157#endif
7158#endif /* SMP */
7159
7160}