]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/sched/fair.c
sched: Add default-disabled option to BUG() when stack end location is overwritten
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
cbee9f88 29#include <linux/mempolicy.h>
e14808b4 30#include <linux/migrate.h>
cbee9f88 31#include <linux/task_work.h>
029632fb
PZ
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
9745512c 36
bf0f6f24 37/*
21805085 38 * Targeted preemption latency for CPU-bound tasks:
864616ee 39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 40 *
21805085 41 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
bf0f6f24 45 *
d274a4ce
IM
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 48 */
21406928
MG
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 51
1983a922
CE
52/*
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
2bd8e6d4 64/*
b2be5e96 65 * Minimal preemption granularity for CPU-bound tasks:
864616ee 66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 67 */
0bf377bb
IM
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
70
71/*
b2be5e96
PZ
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
0bf377bb 74static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
75
76/*
2bba22c5 77 * After fork, child runs first. If set to 0 (default) then
b2be5e96 78 * parent will (try to) run first.
21805085 79 */
2bba22c5 80unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 81
bf0f6f24
IM
82/*
83 * SCHED_OTHER wake-up granularity.
172e082a 84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
85 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
172e082a 90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 92
da84d961
IM
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
a7a4f8a7
PT
95/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
ec12cb7f
PT
102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
9dbdb155 181#define WMULT_CONST (~0U)
029632fb
PZ
182#define WMULT_SHIFT 32
183
9dbdb155
PZ
184static void __update_inv_weight(struct load_weight *lw)
185{
186 unsigned long w;
187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
029632fb
PZ
200
201/*
9dbdb155
PZ
202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
029632fb 212 */
9dbdb155 213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
029632fb 214{
9dbdb155
PZ
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
029632fb 217
9dbdb155 218 __update_inv_weight(lw);
029632fb 219
9dbdb155
PZ
220 if (unlikely(fact >> 32)) {
221 while (fact >> 32) {
222 fact >>= 1;
223 shift--;
224 }
029632fb
PZ
225 }
226
9dbdb155
PZ
227 /* hint to use a 32x32->64 mul */
228 fact = (u64)(u32)fact * lw->inv_weight;
029632fb 229
9dbdb155
PZ
230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
029632fb 234
9dbdb155 235 return mul_u64_u32_shr(delta_exec, fact, shift);
029632fb
PZ
236}
237
238
239const struct sched_class fair_sched_class;
a4c2f00f 240
bf0f6f24
IM
241/**************************************************************
242 * CFS operations on generic schedulable entities:
243 */
244
62160e3f 245#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 246
62160e3f 247/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
248static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249{
62160e3f 250 return cfs_rq->rq;
bf0f6f24
IM
251}
252
62160e3f
IM
253/* An entity is a task if it doesn't "own" a runqueue */
254#define entity_is_task(se) (!se->my_q)
bf0f6f24 255
8f48894f
PZ
256static inline struct task_struct *task_of(struct sched_entity *se)
257{
258#ifdef CONFIG_SCHED_DEBUG
259 WARN_ON_ONCE(!entity_is_task(se));
260#endif
261 return container_of(se, struct task_struct, se);
262}
263
b758149c
PZ
264/* Walk up scheduling entities hierarchy */
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273/* runqueue on which this entity is (to be) queued */
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279/* runqueue "owned" by this group */
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
aff3e498
PT
285static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
286 int force_update);
9ee474f5 287
3d4b47b4
PZ
288static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289{
290 if (!cfs_rq->on_list) {
67e86250
PT
291 /*
292 * Ensure we either appear before our parent (if already
293 * enqueued) or force our parent to appear after us when it is
294 * enqueued. The fact that we always enqueue bottom-up
295 * reduces this to two cases.
296 */
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
300 &rq_of(cfs_rq)->leaf_cfs_rq_list);
301 } else {
302 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 303 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 304 }
3d4b47b4
PZ
305
306 cfs_rq->on_list = 1;
9ee474f5 307 /* We should have no load, but we need to update last_decay. */
aff3e498 308 update_cfs_rq_blocked_load(cfs_rq, 0);
3d4b47b4
PZ
309 }
310}
311
312static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313{
314 if (cfs_rq->on_list) {
315 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316 cfs_rq->on_list = 0;
317 }
318}
319
b758149c
PZ
320/* Iterate thr' all leaf cfs_rq's on a runqueue */
321#define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323
324/* Do the two (enqueued) entities belong to the same group ? */
fed14d45 325static inline struct cfs_rq *
b758149c
PZ
326is_same_group(struct sched_entity *se, struct sched_entity *pse)
327{
328 if (se->cfs_rq == pse->cfs_rq)
fed14d45 329 return se->cfs_rq;
b758149c 330
fed14d45 331 return NULL;
b758149c
PZ
332}
333
334static inline struct sched_entity *parent_entity(struct sched_entity *se)
335{
336 return se->parent;
337}
338
464b7527
PZ
339static void
340find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341{
342 int se_depth, pse_depth;
343
344 /*
345 * preemption test can be made between sibling entities who are in the
346 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347 * both tasks until we find their ancestors who are siblings of common
348 * parent.
349 */
350
351 /* First walk up until both entities are at same depth */
fed14d45
PZ
352 se_depth = (*se)->depth;
353 pse_depth = (*pse)->depth;
464b7527
PZ
354
355 while (se_depth > pse_depth) {
356 se_depth--;
357 *se = parent_entity(*se);
358 }
359
360 while (pse_depth > se_depth) {
361 pse_depth--;
362 *pse = parent_entity(*pse);
363 }
364
365 while (!is_same_group(*se, *pse)) {
366 *se = parent_entity(*se);
367 *pse = parent_entity(*pse);
368 }
369}
370
8f48894f
PZ
371#else /* !CONFIG_FAIR_GROUP_SCHED */
372
373static inline struct task_struct *task_of(struct sched_entity *se)
374{
375 return container_of(se, struct task_struct, se);
376}
bf0f6f24 377
62160e3f
IM
378static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379{
380 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
381}
382
383#define entity_is_task(se) 1
384
b758149c
PZ
385#define for_each_sched_entity(se) \
386 for (; se; se = NULL)
bf0f6f24 387
b758149c 388static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 389{
b758149c 390 return &task_rq(p)->cfs;
bf0f6f24
IM
391}
392
b758149c
PZ
393static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394{
395 struct task_struct *p = task_of(se);
396 struct rq *rq = task_rq(p);
397
398 return &rq->cfs;
399}
400
401/* runqueue "owned" by this group */
402static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403{
404 return NULL;
405}
406
3d4b47b4
PZ
407static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
411static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412{
413}
414
b758149c
PZ
415#define for_each_leaf_cfs_rq(rq, cfs_rq) \
416 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417
b758149c
PZ
418static inline struct sched_entity *parent_entity(struct sched_entity *se)
419{
420 return NULL;
421}
422
464b7527
PZ
423static inline void
424find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425{
426}
427
b758149c
PZ
428#endif /* CONFIG_FAIR_GROUP_SCHED */
429
6c16a6dc 430static __always_inline
9dbdb155 431void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
bf0f6f24
IM
432
433/**************************************************************
434 * Scheduling class tree data structure manipulation methods:
435 */
436
1bf08230 437static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 438{
1bf08230 439 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 440 if (delta > 0)
1bf08230 441 max_vruntime = vruntime;
02e0431a 442
1bf08230 443 return max_vruntime;
02e0431a
PZ
444}
445
0702e3eb 446static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
447{
448 s64 delta = (s64)(vruntime - min_vruntime);
449 if (delta < 0)
450 min_vruntime = vruntime;
451
452 return min_vruntime;
453}
454
54fdc581
FC
455static inline int entity_before(struct sched_entity *a,
456 struct sched_entity *b)
457{
458 return (s64)(a->vruntime - b->vruntime) < 0;
459}
460
1af5f730
PZ
461static void update_min_vruntime(struct cfs_rq *cfs_rq)
462{
463 u64 vruntime = cfs_rq->min_vruntime;
464
465 if (cfs_rq->curr)
466 vruntime = cfs_rq->curr->vruntime;
467
468 if (cfs_rq->rb_leftmost) {
469 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
470 struct sched_entity,
471 run_node);
472
e17036da 473 if (!cfs_rq->curr)
1af5f730
PZ
474 vruntime = se->vruntime;
475 else
476 vruntime = min_vruntime(vruntime, se->vruntime);
477 }
478
1bf08230 479 /* ensure we never gain time by being placed backwards. */
1af5f730 480 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
481#ifndef CONFIG_64BIT
482 smp_wmb();
483 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
484#endif
1af5f730
PZ
485}
486
bf0f6f24
IM
487/*
488 * Enqueue an entity into the rb-tree:
489 */
0702e3eb 490static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
491{
492 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
493 struct rb_node *parent = NULL;
494 struct sched_entity *entry;
bf0f6f24
IM
495 int leftmost = 1;
496
497 /*
498 * Find the right place in the rbtree:
499 */
500 while (*link) {
501 parent = *link;
502 entry = rb_entry(parent, struct sched_entity, run_node);
503 /*
504 * We dont care about collisions. Nodes with
505 * the same key stay together.
506 */
2bd2d6f2 507 if (entity_before(se, entry)) {
bf0f6f24
IM
508 link = &parent->rb_left;
509 } else {
510 link = &parent->rb_right;
511 leftmost = 0;
512 }
513 }
514
515 /*
516 * Maintain a cache of leftmost tree entries (it is frequently
517 * used):
518 */
1af5f730 519 if (leftmost)
57cb499d 520 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
521
522 rb_link_node(&se->run_node, parent, link);
523 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
524}
525
0702e3eb 526static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 527{
3fe69747
PZ
528 if (cfs_rq->rb_leftmost == &se->run_node) {
529 struct rb_node *next_node;
3fe69747
PZ
530
531 next_node = rb_next(&se->run_node);
532 cfs_rq->rb_leftmost = next_node;
3fe69747 533 }
e9acbff6 534
bf0f6f24 535 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
536}
537
029632fb 538struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 539{
f4b6755f
PZ
540 struct rb_node *left = cfs_rq->rb_leftmost;
541
542 if (!left)
543 return NULL;
544
545 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
546}
547
ac53db59
RR
548static struct sched_entity *__pick_next_entity(struct sched_entity *se)
549{
550 struct rb_node *next = rb_next(&se->run_node);
551
552 if (!next)
553 return NULL;
554
555 return rb_entry(next, struct sched_entity, run_node);
556}
557
558#ifdef CONFIG_SCHED_DEBUG
029632fb 559struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 560{
7eee3e67 561 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 562
70eee74b
BS
563 if (!last)
564 return NULL;
7eee3e67
IM
565
566 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
567}
568
bf0f6f24
IM
569/**************************************************************
570 * Scheduling class statistics methods:
571 */
572
acb4a848 573int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 574 void __user *buffer, size_t *lenp,
b2be5e96
PZ
575 loff_t *ppos)
576{
8d65af78 577 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 578 int factor = get_update_sysctl_factor();
b2be5e96
PZ
579
580 if (ret || !write)
581 return ret;
582
583 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
584 sysctl_sched_min_granularity);
585
acb4a848
CE
586#define WRT_SYSCTL(name) \
587 (normalized_sysctl_##name = sysctl_##name / (factor))
588 WRT_SYSCTL(sched_min_granularity);
589 WRT_SYSCTL(sched_latency);
590 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
591#undef WRT_SYSCTL
592
b2be5e96
PZ
593 return 0;
594}
595#endif
647e7cac 596
a7be37ac 597/*
f9c0b095 598 * delta /= w
a7be37ac 599 */
9dbdb155 600static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
a7be37ac 601{
f9c0b095 602 if (unlikely(se->load.weight != NICE_0_LOAD))
9dbdb155 603 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
604
605 return delta;
606}
607
647e7cac
IM
608/*
609 * The idea is to set a period in which each task runs once.
610 *
532b1858 611 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
612 * this period because otherwise the slices get too small.
613 *
614 * p = (nr <= nl) ? l : l*nr/nl
615 */
4d78e7b6
PZ
616static u64 __sched_period(unsigned long nr_running)
617{
618 u64 period = sysctl_sched_latency;
b2be5e96 619 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
620
621 if (unlikely(nr_running > nr_latency)) {
4bf0b771 622 period = sysctl_sched_min_granularity;
4d78e7b6 623 period *= nr_running;
4d78e7b6
PZ
624 }
625
626 return period;
627}
628
647e7cac
IM
629/*
630 * We calculate the wall-time slice from the period by taking a part
631 * proportional to the weight.
632 *
f9c0b095 633 * s = p*P[w/rw]
647e7cac 634 */
6d0f0ebd 635static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 636{
0a582440 637 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 638
0a582440 639 for_each_sched_entity(se) {
6272d68c 640 struct load_weight *load;
3104bf03 641 struct load_weight lw;
6272d68c
LM
642
643 cfs_rq = cfs_rq_of(se);
644 load = &cfs_rq->load;
f9c0b095 645
0a582440 646 if (unlikely(!se->on_rq)) {
3104bf03 647 lw = cfs_rq->load;
0a582440
MG
648
649 update_load_add(&lw, se->load.weight);
650 load = &lw;
651 }
9dbdb155 652 slice = __calc_delta(slice, se->load.weight, load);
0a582440
MG
653 }
654 return slice;
bf0f6f24
IM
655}
656
647e7cac 657/*
660cc00f 658 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 659 *
f9c0b095 660 * vs = s/w
647e7cac 661 */
f9c0b095 662static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 663{
f9c0b095 664 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
665}
666
a75cdaa9 667#ifdef CONFIG_SMP
ba7e5a27 668static int select_idle_sibling(struct task_struct *p, int cpu);
fb13c7ee
MG
669static unsigned long task_h_load(struct task_struct *p);
670
a75cdaa9
AS
671static inline void __update_task_entity_contrib(struct sched_entity *se);
672
673/* Give new task start runnable values to heavy its load in infant time */
674void init_task_runnable_average(struct task_struct *p)
675{
676 u32 slice;
677
678 p->se.avg.decay_count = 0;
679 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
680 p->se.avg.runnable_avg_sum = slice;
681 p->se.avg.runnable_avg_period = slice;
682 __update_task_entity_contrib(&p->se);
683}
684#else
685void init_task_runnable_average(struct task_struct *p)
686{
687}
688#endif
689
bf0f6f24 690/*
9dbdb155 691 * Update the current task's runtime statistics.
bf0f6f24 692 */
b7cc0896 693static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 694{
429d43bc 695 struct sched_entity *curr = cfs_rq->curr;
78becc27 696 u64 now = rq_clock_task(rq_of(cfs_rq));
9dbdb155 697 u64 delta_exec;
bf0f6f24
IM
698
699 if (unlikely(!curr))
700 return;
701
9dbdb155
PZ
702 delta_exec = now - curr->exec_start;
703 if (unlikely((s64)delta_exec <= 0))
34f28ecd 704 return;
bf0f6f24 705
8ebc91d9 706 curr->exec_start = now;
d842de87 707
9dbdb155
PZ
708 schedstat_set(curr->statistics.exec_max,
709 max(delta_exec, curr->statistics.exec_max));
710
711 curr->sum_exec_runtime += delta_exec;
712 schedstat_add(cfs_rq, exec_clock, delta_exec);
713
714 curr->vruntime += calc_delta_fair(delta_exec, curr);
715 update_min_vruntime(cfs_rq);
716
d842de87
SV
717 if (entity_is_task(curr)) {
718 struct task_struct *curtask = task_of(curr);
719
f977bb49 720 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 721 cpuacct_charge(curtask, delta_exec);
f06febc9 722 account_group_exec_runtime(curtask, delta_exec);
d842de87 723 }
ec12cb7f
PT
724
725 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
726}
727
728static inline void
5870db5b 729update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 730{
78becc27 731 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
bf0f6f24
IM
732}
733
bf0f6f24
IM
734/*
735 * Task is being enqueued - update stats:
736 */
d2417e5a 737static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 738{
bf0f6f24
IM
739 /*
740 * Are we enqueueing a waiting task? (for current tasks
741 * a dequeue/enqueue event is a NOP)
742 */
429d43bc 743 if (se != cfs_rq->curr)
5870db5b 744 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
745}
746
bf0f6f24 747static void
9ef0a961 748update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 749{
41acab88 750 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
78becc27 751 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
41acab88
LDM
752 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
753 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
78becc27 754 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
755#ifdef CONFIG_SCHEDSTATS
756 if (entity_is_task(se)) {
757 trace_sched_stat_wait(task_of(se),
78becc27 758 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
759 }
760#endif
41acab88 761 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
762}
763
764static inline void
19b6a2e3 765update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 766{
bf0f6f24
IM
767 /*
768 * Mark the end of the wait period if dequeueing a
769 * waiting task:
770 */
429d43bc 771 if (se != cfs_rq->curr)
9ef0a961 772 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
773}
774
775/*
776 * We are picking a new current task - update its stats:
777 */
778static inline void
79303e9e 779update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
780{
781 /*
782 * We are starting a new run period:
783 */
78becc27 784 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
785}
786
bf0f6f24
IM
787/**************************************************
788 * Scheduling class queueing methods:
789 */
790
cbee9f88
PZ
791#ifdef CONFIG_NUMA_BALANCING
792/*
598f0ec0
MG
793 * Approximate time to scan a full NUMA task in ms. The task scan period is
794 * calculated based on the tasks virtual memory size and
795 * numa_balancing_scan_size.
cbee9f88 796 */
598f0ec0
MG
797unsigned int sysctl_numa_balancing_scan_period_min = 1000;
798unsigned int sysctl_numa_balancing_scan_period_max = 60000;
6e5fb223
PZ
799
800/* Portion of address space to scan in MB */
801unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 802
4b96a29b
PZ
803/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
804unsigned int sysctl_numa_balancing_scan_delay = 1000;
805
598f0ec0
MG
806static unsigned int task_nr_scan_windows(struct task_struct *p)
807{
808 unsigned long rss = 0;
809 unsigned long nr_scan_pages;
810
811 /*
812 * Calculations based on RSS as non-present and empty pages are skipped
813 * by the PTE scanner and NUMA hinting faults should be trapped based
814 * on resident pages
815 */
816 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
817 rss = get_mm_rss(p->mm);
818 if (!rss)
819 rss = nr_scan_pages;
820
821 rss = round_up(rss, nr_scan_pages);
822 return rss / nr_scan_pages;
823}
824
825/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
826#define MAX_SCAN_WINDOW 2560
827
828static unsigned int task_scan_min(struct task_struct *p)
829{
830 unsigned int scan, floor;
831 unsigned int windows = 1;
832
833 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
834 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
835 floor = 1000 / windows;
836
837 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
838 return max_t(unsigned int, floor, scan);
839}
840
841static unsigned int task_scan_max(struct task_struct *p)
842{
843 unsigned int smin = task_scan_min(p);
844 unsigned int smax;
845
846 /* Watch for min being lower than max due to floor calculations */
847 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
848 return max(smin, smax);
849}
850
0ec8aa00
PZ
851static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
852{
853 rq->nr_numa_running += (p->numa_preferred_nid != -1);
854 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
855}
856
857static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
858{
859 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
860 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
861}
862
8c8a743c
PZ
863struct numa_group {
864 atomic_t refcount;
865
866 spinlock_t lock; /* nr_tasks, tasks */
867 int nr_tasks;
e29cf08b 868 pid_t gid;
8c8a743c
PZ
869 struct list_head task_list;
870
871 struct rcu_head rcu;
20e07dea 872 nodemask_t active_nodes;
989348b5 873 unsigned long total_faults;
7e2703e6
RR
874 /*
875 * Faults_cpu is used to decide whether memory should move
876 * towards the CPU. As a consequence, these stats are weighted
877 * more by CPU use than by memory faults.
878 */
50ec8a40 879 unsigned long *faults_cpu;
989348b5 880 unsigned long faults[0];
8c8a743c
PZ
881};
882
be1e4e76
RR
883/* Shared or private faults. */
884#define NR_NUMA_HINT_FAULT_TYPES 2
885
886/* Memory and CPU locality */
887#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
888
889/* Averaged statistics, and temporary buffers. */
890#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
891
e29cf08b
MG
892pid_t task_numa_group_id(struct task_struct *p)
893{
894 return p->numa_group ? p->numa_group->gid : 0;
895}
896
ac8e895b
MG
897static inline int task_faults_idx(int nid, int priv)
898{
be1e4e76 899 return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
ac8e895b
MG
900}
901
902static inline unsigned long task_faults(struct task_struct *p, int nid)
903{
ff1df896 904 if (!p->numa_faults_memory)
ac8e895b
MG
905 return 0;
906
ff1df896
RR
907 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
908 p->numa_faults_memory[task_faults_idx(nid, 1)];
ac8e895b
MG
909}
910
83e1d2cd
MG
911static inline unsigned long group_faults(struct task_struct *p, int nid)
912{
913 if (!p->numa_group)
914 return 0;
915
82897b4f
WL
916 return p->numa_group->faults[task_faults_idx(nid, 0)] +
917 p->numa_group->faults[task_faults_idx(nid, 1)];
83e1d2cd
MG
918}
919
20e07dea
RR
920static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
921{
922 return group->faults_cpu[task_faults_idx(nid, 0)] +
923 group->faults_cpu[task_faults_idx(nid, 1)];
924}
925
83e1d2cd
MG
926/*
927 * These return the fraction of accesses done by a particular task, or
928 * task group, on a particular numa node. The group weight is given a
929 * larger multiplier, in order to group tasks together that are almost
930 * evenly spread out between numa nodes.
931 */
932static inline unsigned long task_weight(struct task_struct *p, int nid)
933{
934 unsigned long total_faults;
935
ff1df896 936 if (!p->numa_faults_memory)
83e1d2cd
MG
937 return 0;
938
939 total_faults = p->total_numa_faults;
940
941 if (!total_faults)
942 return 0;
943
944 return 1000 * task_faults(p, nid) / total_faults;
945}
946
947static inline unsigned long group_weight(struct task_struct *p, int nid)
948{
989348b5 949 if (!p->numa_group || !p->numa_group->total_faults)
83e1d2cd
MG
950 return 0;
951
989348b5 952 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
83e1d2cd
MG
953}
954
10f39042
RR
955bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
956 int src_nid, int dst_cpu)
957{
958 struct numa_group *ng = p->numa_group;
959 int dst_nid = cpu_to_node(dst_cpu);
960 int last_cpupid, this_cpupid;
961
962 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
963
964 /*
965 * Multi-stage node selection is used in conjunction with a periodic
966 * migration fault to build a temporal task<->page relation. By using
967 * a two-stage filter we remove short/unlikely relations.
968 *
969 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
970 * a task's usage of a particular page (n_p) per total usage of this
971 * page (n_t) (in a given time-span) to a probability.
972 *
973 * Our periodic faults will sample this probability and getting the
974 * same result twice in a row, given these samples are fully
975 * independent, is then given by P(n)^2, provided our sample period
976 * is sufficiently short compared to the usage pattern.
977 *
978 * This quadric squishes small probabilities, making it less likely we
979 * act on an unlikely task<->page relation.
980 */
981 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
982 if (!cpupid_pid_unset(last_cpupid) &&
983 cpupid_to_nid(last_cpupid) != dst_nid)
984 return false;
985
986 /* Always allow migrate on private faults */
987 if (cpupid_match_pid(p, last_cpupid))
988 return true;
989
990 /* A shared fault, but p->numa_group has not been set up yet. */
991 if (!ng)
992 return true;
993
994 /*
995 * Do not migrate if the destination is not a node that
996 * is actively used by this numa group.
997 */
998 if (!node_isset(dst_nid, ng->active_nodes))
999 return false;
1000
1001 /*
1002 * Source is a node that is not actively used by this
1003 * numa group, while the destination is. Migrate.
1004 */
1005 if (!node_isset(src_nid, ng->active_nodes))
1006 return true;
1007
1008 /*
1009 * Both source and destination are nodes in active
1010 * use by this numa group. Maximize memory bandwidth
1011 * by migrating from more heavily used groups, to less
1012 * heavily used ones, spreading the load around.
1013 * Use a 1/4 hysteresis to avoid spurious page movement.
1014 */
1015 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1016}
1017
e6628d5b 1018static unsigned long weighted_cpuload(const int cpu);
58d081b5
MG
1019static unsigned long source_load(int cpu, int type);
1020static unsigned long target_load(int cpu, int type);
ced549fa 1021static unsigned long capacity_of(int cpu);
58d081b5
MG
1022static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1023
fb13c7ee 1024/* Cached statistics for all CPUs within a node */
58d081b5 1025struct numa_stats {
fb13c7ee 1026 unsigned long nr_running;
58d081b5 1027 unsigned long load;
fb13c7ee
MG
1028
1029 /* Total compute capacity of CPUs on a node */
5ef20ca1 1030 unsigned long compute_capacity;
fb13c7ee
MG
1031
1032 /* Approximate capacity in terms of runnable tasks on a node */
5ef20ca1 1033 unsigned long task_capacity;
1b6a7495 1034 int has_free_capacity;
58d081b5 1035};
e6628d5b 1036
fb13c7ee
MG
1037/*
1038 * XXX borrowed from update_sg_lb_stats
1039 */
1040static void update_numa_stats(struct numa_stats *ns, int nid)
1041{
83d7f242
RR
1042 int smt, cpu, cpus = 0;
1043 unsigned long capacity;
fb13c7ee
MG
1044
1045 memset(ns, 0, sizeof(*ns));
1046 for_each_cpu(cpu, cpumask_of_node(nid)) {
1047 struct rq *rq = cpu_rq(cpu);
1048
1049 ns->nr_running += rq->nr_running;
1050 ns->load += weighted_cpuload(cpu);
ced549fa 1051 ns->compute_capacity += capacity_of(cpu);
5eca82a9
PZ
1052
1053 cpus++;
fb13c7ee
MG
1054 }
1055
5eca82a9
PZ
1056 /*
1057 * If we raced with hotplug and there are no CPUs left in our mask
1058 * the @ns structure is NULL'ed and task_numa_compare() will
1059 * not find this node attractive.
1060 *
1b6a7495
NP
1061 * We'll either bail at !has_free_capacity, or we'll detect a huge
1062 * imbalance and bail there.
5eca82a9
PZ
1063 */
1064 if (!cpus)
1065 return;
1066
83d7f242
RR
1067 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1068 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1069 capacity = cpus / smt; /* cores */
1070
1071 ns->task_capacity = min_t(unsigned, capacity,
1072 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1b6a7495 1073 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
fb13c7ee
MG
1074}
1075
58d081b5
MG
1076struct task_numa_env {
1077 struct task_struct *p;
e6628d5b 1078
58d081b5
MG
1079 int src_cpu, src_nid;
1080 int dst_cpu, dst_nid;
e6628d5b 1081
58d081b5 1082 struct numa_stats src_stats, dst_stats;
e6628d5b 1083
40ea2b42 1084 int imbalance_pct;
fb13c7ee
MG
1085
1086 struct task_struct *best_task;
1087 long best_imp;
58d081b5
MG
1088 int best_cpu;
1089};
1090
fb13c7ee
MG
1091static void task_numa_assign(struct task_numa_env *env,
1092 struct task_struct *p, long imp)
1093{
1094 if (env->best_task)
1095 put_task_struct(env->best_task);
1096 if (p)
1097 get_task_struct(p);
1098
1099 env->best_task = p;
1100 env->best_imp = imp;
1101 env->best_cpu = env->dst_cpu;
1102}
1103
28a21745 1104static bool load_too_imbalanced(long src_load, long dst_load,
e63da036
RR
1105 struct task_numa_env *env)
1106{
1107 long imb, old_imb;
28a21745
RR
1108 long orig_src_load, orig_dst_load;
1109 long src_capacity, dst_capacity;
1110
1111 /*
1112 * The load is corrected for the CPU capacity available on each node.
1113 *
1114 * src_load dst_load
1115 * ------------ vs ---------
1116 * src_capacity dst_capacity
1117 */
1118 src_capacity = env->src_stats.compute_capacity;
1119 dst_capacity = env->dst_stats.compute_capacity;
e63da036
RR
1120
1121 /* We care about the slope of the imbalance, not the direction. */
1122 if (dst_load < src_load)
1123 swap(dst_load, src_load);
1124
1125 /* Is the difference below the threshold? */
28a21745
RR
1126 imb = dst_load * src_capacity * 100 -
1127 src_load * dst_capacity * env->imbalance_pct;
e63da036
RR
1128 if (imb <= 0)
1129 return false;
1130
1131 /*
1132 * The imbalance is above the allowed threshold.
1133 * Compare it with the old imbalance.
1134 */
28a21745
RR
1135 orig_src_load = env->src_stats.load;
1136 orig_dst_load = env->dst_stats.load;
1137
e63da036
RR
1138 if (orig_dst_load < orig_src_load)
1139 swap(orig_dst_load, orig_src_load);
1140
28a21745
RR
1141 old_imb = orig_dst_load * src_capacity * 100 -
1142 orig_src_load * dst_capacity * env->imbalance_pct;
e63da036
RR
1143
1144 /* Would this change make things worse? */
1662867a 1145 return (imb > old_imb);
e63da036
RR
1146}
1147
fb13c7ee
MG
1148/*
1149 * This checks if the overall compute and NUMA accesses of the system would
1150 * be improved if the source tasks was migrated to the target dst_cpu taking
1151 * into account that it might be best if task running on the dst_cpu should
1152 * be exchanged with the source task
1153 */
887c290e
RR
1154static void task_numa_compare(struct task_numa_env *env,
1155 long taskimp, long groupimp)
fb13c7ee
MG
1156{
1157 struct rq *src_rq = cpu_rq(env->src_cpu);
1158 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1159 struct task_struct *cur;
28a21745 1160 long src_load, dst_load;
fb13c7ee 1161 long load;
1c5d3eb3 1162 long imp = env->p->numa_group ? groupimp : taskimp;
0132c3e1 1163 long moveimp = imp;
fb13c7ee
MG
1164
1165 rcu_read_lock();
1166 cur = ACCESS_ONCE(dst_rq->curr);
1167 if (cur->pid == 0) /* idle */
1168 cur = NULL;
1169
1170 /*
1171 * "imp" is the fault differential for the source task between the
1172 * source and destination node. Calculate the total differential for
1173 * the source task and potential destination task. The more negative
1174 * the value is, the more rmeote accesses that would be expected to
1175 * be incurred if the tasks were swapped.
1176 */
1177 if (cur) {
1178 /* Skip this swap candidate if cannot move to the source cpu */
1179 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1180 goto unlock;
1181
887c290e
RR
1182 /*
1183 * If dst and source tasks are in the same NUMA group, or not
ca28aa53 1184 * in any group then look only at task weights.
887c290e 1185 */
ca28aa53 1186 if (cur->numa_group == env->p->numa_group) {
887c290e
RR
1187 imp = taskimp + task_weight(cur, env->src_nid) -
1188 task_weight(cur, env->dst_nid);
ca28aa53
RR
1189 /*
1190 * Add some hysteresis to prevent swapping the
1191 * tasks within a group over tiny differences.
1192 */
1193 if (cur->numa_group)
1194 imp -= imp/16;
887c290e 1195 } else {
ca28aa53
RR
1196 /*
1197 * Compare the group weights. If a task is all by
1198 * itself (not part of a group), use the task weight
1199 * instead.
1200 */
ca28aa53
RR
1201 if (cur->numa_group)
1202 imp += group_weight(cur, env->src_nid) -
1203 group_weight(cur, env->dst_nid);
1204 else
1205 imp += task_weight(cur, env->src_nid) -
1206 task_weight(cur, env->dst_nid);
887c290e 1207 }
fb13c7ee
MG
1208 }
1209
0132c3e1 1210 if (imp <= env->best_imp && moveimp <= env->best_imp)
fb13c7ee
MG
1211 goto unlock;
1212
1213 if (!cur) {
1214 /* Is there capacity at our destination? */
b932c03c 1215 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1b6a7495 1216 !env->dst_stats.has_free_capacity)
fb13c7ee
MG
1217 goto unlock;
1218
1219 goto balance;
1220 }
1221
1222 /* Balance doesn't matter much if we're running a task per cpu */
0132c3e1
RR
1223 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1224 dst_rq->nr_running == 1)
fb13c7ee
MG
1225 goto assign;
1226
1227 /*
1228 * In the overloaded case, try and keep the load balanced.
1229 */
1230balance:
e720fff6
PZ
1231 load = task_h_load(env->p);
1232 dst_load = env->dst_stats.load + load;
1233 src_load = env->src_stats.load - load;
fb13c7ee 1234
0132c3e1
RR
1235 if (moveimp > imp && moveimp > env->best_imp) {
1236 /*
1237 * If the improvement from just moving env->p direction is
1238 * better than swapping tasks around, check if a move is
1239 * possible. Store a slightly smaller score than moveimp,
1240 * so an actually idle CPU will win.
1241 */
1242 if (!load_too_imbalanced(src_load, dst_load, env)) {
1243 imp = moveimp - 1;
1244 cur = NULL;
1245 goto assign;
1246 }
1247 }
1248
1249 if (imp <= env->best_imp)
1250 goto unlock;
1251
fb13c7ee 1252 if (cur) {
e720fff6
PZ
1253 load = task_h_load(cur);
1254 dst_load -= load;
1255 src_load += load;
fb13c7ee
MG
1256 }
1257
28a21745 1258 if (load_too_imbalanced(src_load, dst_load, env))
fb13c7ee
MG
1259 goto unlock;
1260
ba7e5a27
RR
1261 /*
1262 * One idle CPU per node is evaluated for a task numa move.
1263 * Call select_idle_sibling to maybe find a better one.
1264 */
1265 if (!cur)
1266 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1267
fb13c7ee
MG
1268assign:
1269 task_numa_assign(env, cur, imp);
1270unlock:
1271 rcu_read_unlock();
1272}
1273
887c290e
RR
1274static void task_numa_find_cpu(struct task_numa_env *env,
1275 long taskimp, long groupimp)
2c8a50aa
MG
1276{
1277 int cpu;
1278
1279 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1280 /* Skip this CPU if the source task cannot migrate */
1281 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1282 continue;
1283
1284 env->dst_cpu = cpu;
887c290e 1285 task_numa_compare(env, taskimp, groupimp);
2c8a50aa
MG
1286 }
1287}
1288
58d081b5
MG
1289static int task_numa_migrate(struct task_struct *p)
1290{
58d081b5
MG
1291 struct task_numa_env env = {
1292 .p = p,
fb13c7ee 1293
58d081b5 1294 .src_cpu = task_cpu(p),
b32e86b4 1295 .src_nid = task_node(p),
fb13c7ee
MG
1296
1297 .imbalance_pct = 112,
1298
1299 .best_task = NULL,
1300 .best_imp = 0,
1301 .best_cpu = -1
58d081b5
MG
1302 };
1303 struct sched_domain *sd;
887c290e 1304 unsigned long taskweight, groupweight;
2c8a50aa 1305 int nid, ret;
887c290e 1306 long taskimp, groupimp;
e6628d5b 1307
58d081b5 1308 /*
fb13c7ee
MG
1309 * Pick the lowest SD_NUMA domain, as that would have the smallest
1310 * imbalance and would be the first to start moving tasks about.
1311 *
1312 * And we want to avoid any moving of tasks about, as that would create
1313 * random movement of tasks -- counter the numa conditions we're trying
1314 * to satisfy here.
58d081b5
MG
1315 */
1316 rcu_read_lock();
fb13c7ee 1317 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
46a73e8a
RR
1318 if (sd)
1319 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
e6628d5b
MG
1320 rcu_read_unlock();
1321
46a73e8a
RR
1322 /*
1323 * Cpusets can break the scheduler domain tree into smaller
1324 * balance domains, some of which do not cross NUMA boundaries.
1325 * Tasks that are "trapped" in such domains cannot be migrated
1326 * elsewhere, so there is no point in (re)trying.
1327 */
1328 if (unlikely(!sd)) {
de1b301a 1329 p->numa_preferred_nid = task_node(p);
46a73e8a
RR
1330 return -EINVAL;
1331 }
1332
887c290e
RR
1333 taskweight = task_weight(p, env.src_nid);
1334 groupweight = group_weight(p, env.src_nid);
fb13c7ee 1335 update_numa_stats(&env.src_stats, env.src_nid);
2c8a50aa 1336 env.dst_nid = p->numa_preferred_nid;
887c290e
RR
1337 taskimp = task_weight(p, env.dst_nid) - taskweight;
1338 groupimp = group_weight(p, env.dst_nid) - groupweight;
2c8a50aa 1339 update_numa_stats(&env.dst_stats, env.dst_nid);
58d081b5 1340
a43455a1
RR
1341 /* Try to find a spot on the preferred nid. */
1342 task_numa_find_cpu(&env, taskimp, groupimp);
e1dda8a7
RR
1343
1344 /* No space available on the preferred nid. Look elsewhere. */
1345 if (env.best_cpu == -1) {
2c8a50aa
MG
1346 for_each_online_node(nid) {
1347 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1348 continue;
58d081b5 1349
83e1d2cd 1350 /* Only consider nodes where both task and groups benefit */
887c290e
RR
1351 taskimp = task_weight(p, nid) - taskweight;
1352 groupimp = group_weight(p, nid) - groupweight;
1353 if (taskimp < 0 && groupimp < 0)
fb13c7ee
MG
1354 continue;
1355
2c8a50aa
MG
1356 env.dst_nid = nid;
1357 update_numa_stats(&env.dst_stats, env.dst_nid);
887c290e 1358 task_numa_find_cpu(&env, taskimp, groupimp);
58d081b5
MG
1359 }
1360 }
1361
68d1b02a
RR
1362 /*
1363 * If the task is part of a workload that spans multiple NUMA nodes,
1364 * and is migrating into one of the workload's active nodes, remember
1365 * this node as the task's preferred numa node, so the workload can
1366 * settle down.
1367 * A task that migrated to a second choice node will be better off
1368 * trying for a better one later. Do not set the preferred node here.
1369 */
db015dae
RR
1370 if (p->numa_group) {
1371 if (env.best_cpu == -1)
1372 nid = env.src_nid;
1373 else
1374 nid = env.dst_nid;
1375
1376 if (node_isset(nid, p->numa_group->active_nodes))
1377 sched_setnuma(p, env.dst_nid);
1378 }
1379
1380 /* No better CPU than the current one was found. */
1381 if (env.best_cpu == -1)
1382 return -EAGAIN;
0ec8aa00 1383
04bb2f94
RR
1384 /*
1385 * Reset the scan period if the task is being rescheduled on an
1386 * alternative node to recheck if the tasks is now properly placed.
1387 */
1388 p->numa_scan_period = task_scan_min(p);
1389
fb13c7ee 1390 if (env.best_task == NULL) {
286549dc
MG
1391 ret = migrate_task_to(p, env.best_cpu);
1392 if (ret != 0)
1393 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
fb13c7ee
MG
1394 return ret;
1395 }
1396
1397 ret = migrate_swap(p, env.best_task);
286549dc
MG
1398 if (ret != 0)
1399 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
fb13c7ee
MG
1400 put_task_struct(env.best_task);
1401 return ret;
e6628d5b
MG
1402}
1403
6b9a7460
MG
1404/* Attempt to migrate a task to a CPU on the preferred node. */
1405static void numa_migrate_preferred(struct task_struct *p)
1406{
5085e2a3
RR
1407 unsigned long interval = HZ;
1408
2739d3ee 1409 /* This task has no NUMA fault statistics yet */
ff1df896 1410 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
6b9a7460
MG
1411 return;
1412
2739d3ee 1413 /* Periodically retry migrating the task to the preferred node */
5085e2a3
RR
1414 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1415 p->numa_migrate_retry = jiffies + interval;
2739d3ee
RR
1416
1417 /* Success if task is already running on preferred CPU */
de1b301a 1418 if (task_node(p) == p->numa_preferred_nid)
6b9a7460
MG
1419 return;
1420
1421 /* Otherwise, try migrate to a CPU on the preferred node */
2739d3ee 1422 task_numa_migrate(p);
6b9a7460
MG
1423}
1424
20e07dea
RR
1425/*
1426 * Find the nodes on which the workload is actively running. We do this by
1427 * tracking the nodes from which NUMA hinting faults are triggered. This can
1428 * be different from the set of nodes where the workload's memory is currently
1429 * located.
1430 *
1431 * The bitmask is used to make smarter decisions on when to do NUMA page
1432 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1433 * are added when they cause over 6/16 of the maximum number of faults, but
1434 * only removed when they drop below 3/16.
1435 */
1436static void update_numa_active_node_mask(struct numa_group *numa_group)
1437{
1438 unsigned long faults, max_faults = 0;
1439 int nid;
1440
1441 for_each_online_node(nid) {
1442 faults = group_faults_cpu(numa_group, nid);
1443 if (faults > max_faults)
1444 max_faults = faults;
1445 }
1446
1447 for_each_online_node(nid) {
1448 faults = group_faults_cpu(numa_group, nid);
1449 if (!node_isset(nid, numa_group->active_nodes)) {
1450 if (faults > max_faults * 6 / 16)
1451 node_set(nid, numa_group->active_nodes);
1452 } else if (faults < max_faults * 3 / 16)
1453 node_clear(nid, numa_group->active_nodes);
1454 }
1455}
1456
04bb2f94
RR
1457/*
1458 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1459 * increments. The more local the fault statistics are, the higher the scan
a22b4b01
RR
1460 * period will be for the next scan window. If local/(local+remote) ratio is
1461 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1462 * the scan period will decrease. Aim for 70% local accesses.
04bb2f94
RR
1463 */
1464#define NUMA_PERIOD_SLOTS 10
a22b4b01 1465#define NUMA_PERIOD_THRESHOLD 7
04bb2f94
RR
1466
1467/*
1468 * Increase the scan period (slow down scanning) if the majority of
1469 * our memory is already on our local node, or if the majority of
1470 * the page accesses are shared with other processes.
1471 * Otherwise, decrease the scan period.
1472 */
1473static void update_task_scan_period(struct task_struct *p,
1474 unsigned long shared, unsigned long private)
1475{
1476 unsigned int period_slot;
1477 int ratio;
1478 int diff;
1479
1480 unsigned long remote = p->numa_faults_locality[0];
1481 unsigned long local = p->numa_faults_locality[1];
1482
1483 /*
1484 * If there were no record hinting faults then either the task is
1485 * completely idle or all activity is areas that are not of interest
1486 * to automatic numa balancing. Scan slower
1487 */
1488 if (local + shared == 0) {
1489 p->numa_scan_period = min(p->numa_scan_period_max,
1490 p->numa_scan_period << 1);
1491
1492 p->mm->numa_next_scan = jiffies +
1493 msecs_to_jiffies(p->numa_scan_period);
1494
1495 return;
1496 }
1497
1498 /*
1499 * Prepare to scale scan period relative to the current period.
1500 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1501 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1502 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1503 */
1504 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1505 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1506 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1507 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1508 if (!slot)
1509 slot = 1;
1510 diff = slot * period_slot;
1511 } else {
1512 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1513
1514 /*
1515 * Scale scan rate increases based on sharing. There is an
1516 * inverse relationship between the degree of sharing and
1517 * the adjustment made to the scanning period. Broadly
1518 * speaking the intent is that there is little point
1519 * scanning faster if shared accesses dominate as it may
1520 * simply bounce migrations uselessly
1521 */
04bb2f94
RR
1522 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1523 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1524 }
1525
1526 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1527 task_scan_min(p), task_scan_max(p));
1528 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1529}
1530
7e2703e6
RR
1531/*
1532 * Get the fraction of time the task has been running since the last
1533 * NUMA placement cycle. The scheduler keeps similar statistics, but
1534 * decays those on a 32ms period, which is orders of magnitude off
1535 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1536 * stats only if the task is so new there are no NUMA statistics yet.
1537 */
1538static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1539{
1540 u64 runtime, delta, now;
1541 /* Use the start of this time slice to avoid calculations. */
1542 now = p->se.exec_start;
1543 runtime = p->se.sum_exec_runtime;
1544
1545 if (p->last_task_numa_placement) {
1546 delta = runtime - p->last_sum_exec_runtime;
1547 *period = now - p->last_task_numa_placement;
1548 } else {
1549 delta = p->se.avg.runnable_avg_sum;
1550 *period = p->se.avg.runnable_avg_period;
1551 }
1552
1553 p->last_sum_exec_runtime = runtime;
1554 p->last_task_numa_placement = now;
1555
1556 return delta;
1557}
1558
cbee9f88
PZ
1559static void task_numa_placement(struct task_struct *p)
1560{
83e1d2cd
MG
1561 int seq, nid, max_nid = -1, max_group_nid = -1;
1562 unsigned long max_faults = 0, max_group_faults = 0;
04bb2f94 1563 unsigned long fault_types[2] = { 0, 0 };
7e2703e6
RR
1564 unsigned long total_faults;
1565 u64 runtime, period;
7dbd13ed 1566 spinlock_t *group_lock = NULL;
cbee9f88 1567
2832bc19 1568 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
1569 if (p->numa_scan_seq == seq)
1570 return;
1571 p->numa_scan_seq = seq;
598f0ec0 1572 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 1573
7e2703e6
RR
1574 total_faults = p->numa_faults_locality[0] +
1575 p->numa_faults_locality[1];
1576 runtime = numa_get_avg_runtime(p, &period);
1577
7dbd13ed
MG
1578 /* If the task is part of a group prevent parallel updates to group stats */
1579 if (p->numa_group) {
1580 group_lock = &p->numa_group->lock;
60e69eed 1581 spin_lock_irq(group_lock);
7dbd13ed
MG
1582 }
1583
688b7585
MG
1584 /* Find the node with the highest number of faults */
1585 for_each_online_node(nid) {
83e1d2cd 1586 unsigned long faults = 0, group_faults = 0;
ac8e895b 1587 int priv, i;
745d6147 1588
be1e4e76 1589 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
7e2703e6 1590 long diff, f_diff, f_weight;
8c8a743c 1591
ac8e895b 1592 i = task_faults_idx(nid, priv);
745d6147 1593
ac8e895b 1594 /* Decay existing window, copy faults since last scan */
35664fd4 1595 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
ff1df896
RR
1596 fault_types[priv] += p->numa_faults_buffer_memory[i];
1597 p->numa_faults_buffer_memory[i] = 0;
fb13c7ee 1598
7e2703e6
RR
1599 /*
1600 * Normalize the faults_from, so all tasks in a group
1601 * count according to CPU use, instead of by the raw
1602 * number of faults. Tasks with little runtime have
1603 * little over-all impact on throughput, and thus their
1604 * faults are less important.
1605 */
1606 f_weight = div64_u64(runtime << 16, period + 1);
1607 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1608 (total_faults + 1);
35664fd4 1609 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
50ec8a40
RR
1610 p->numa_faults_buffer_cpu[i] = 0;
1611
35664fd4
RR
1612 p->numa_faults_memory[i] += diff;
1613 p->numa_faults_cpu[i] += f_diff;
ff1df896 1614 faults += p->numa_faults_memory[i];
83e1d2cd 1615 p->total_numa_faults += diff;
8c8a743c
PZ
1616 if (p->numa_group) {
1617 /* safe because we can only change our own group */
989348b5 1618 p->numa_group->faults[i] += diff;
50ec8a40 1619 p->numa_group->faults_cpu[i] += f_diff;
989348b5
MG
1620 p->numa_group->total_faults += diff;
1621 group_faults += p->numa_group->faults[i];
8c8a743c 1622 }
ac8e895b
MG
1623 }
1624
688b7585
MG
1625 if (faults > max_faults) {
1626 max_faults = faults;
1627 max_nid = nid;
1628 }
83e1d2cd
MG
1629
1630 if (group_faults > max_group_faults) {
1631 max_group_faults = group_faults;
1632 max_group_nid = nid;
1633 }
1634 }
1635
04bb2f94
RR
1636 update_task_scan_period(p, fault_types[0], fault_types[1]);
1637
7dbd13ed 1638 if (p->numa_group) {
20e07dea 1639 update_numa_active_node_mask(p->numa_group);
60e69eed 1640 spin_unlock_irq(group_lock);
f0b8a4af 1641 max_nid = max_group_nid;
688b7585
MG
1642 }
1643
bb97fc31
RR
1644 if (max_faults) {
1645 /* Set the new preferred node */
1646 if (max_nid != p->numa_preferred_nid)
1647 sched_setnuma(p, max_nid);
1648
1649 if (task_node(p) != p->numa_preferred_nid)
1650 numa_migrate_preferred(p);
3a7053b3 1651 }
cbee9f88
PZ
1652}
1653
8c8a743c
PZ
1654static inline int get_numa_group(struct numa_group *grp)
1655{
1656 return atomic_inc_not_zero(&grp->refcount);
1657}
1658
1659static inline void put_numa_group(struct numa_group *grp)
1660{
1661 if (atomic_dec_and_test(&grp->refcount))
1662 kfree_rcu(grp, rcu);
1663}
1664
3e6a9418
MG
1665static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1666 int *priv)
8c8a743c
PZ
1667{
1668 struct numa_group *grp, *my_grp;
1669 struct task_struct *tsk;
1670 bool join = false;
1671 int cpu = cpupid_to_cpu(cpupid);
1672 int i;
1673
1674 if (unlikely(!p->numa_group)) {
1675 unsigned int size = sizeof(struct numa_group) +
50ec8a40 1676 4*nr_node_ids*sizeof(unsigned long);
8c8a743c
PZ
1677
1678 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1679 if (!grp)
1680 return;
1681
1682 atomic_set(&grp->refcount, 1);
1683 spin_lock_init(&grp->lock);
1684 INIT_LIST_HEAD(&grp->task_list);
e29cf08b 1685 grp->gid = p->pid;
50ec8a40 1686 /* Second half of the array tracks nids where faults happen */
be1e4e76
RR
1687 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1688 nr_node_ids;
8c8a743c 1689
20e07dea
RR
1690 node_set(task_node(current), grp->active_nodes);
1691
be1e4e76 1692 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
ff1df896 1693 grp->faults[i] = p->numa_faults_memory[i];
8c8a743c 1694
989348b5 1695 grp->total_faults = p->total_numa_faults;
83e1d2cd 1696
8c8a743c
PZ
1697 list_add(&p->numa_entry, &grp->task_list);
1698 grp->nr_tasks++;
1699 rcu_assign_pointer(p->numa_group, grp);
1700 }
1701
1702 rcu_read_lock();
1703 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1704
1705 if (!cpupid_match_pid(tsk, cpupid))
3354781a 1706 goto no_join;
8c8a743c
PZ
1707
1708 grp = rcu_dereference(tsk->numa_group);
1709 if (!grp)
3354781a 1710 goto no_join;
8c8a743c
PZ
1711
1712 my_grp = p->numa_group;
1713 if (grp == my_grp)
3354781a 1714 goto no_join;
8c8a743c
PZ
1715
1716 /*
1717 * Only join the other group if its bigger; if we're the bigger group,
1718 * the other task will join us.
1719 */
1720 if (my_grp->nr_tasks > grp->nr_tasks)
3354781a 1721 goto no_join;
8c8a743c
PZ
1722
1723 /*
1724 * Tie-break on the grp address.
1725 */
1726 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3354781a 1727 goto no_join;
8c8a743c 1728
dabe1d99
RR
1729 /* Always join threads in the same process. */
1730 if (tsk->mm == current->mm)
1731 join = true;
1732
1733 /* Simple filter to avoid false positives due to PID collisions */
1734 if (flags & TNF_SHARED)
1735 join = true;
8c8a743c 1736
3e6a9418
MG
1737 /* Update priv based on whether false sharing was detected */
1738 *priv = !join;
1739
dabe1d99 1740 if (join && !get_numa_group(grp))
3354781a 1741 goto no_join;
8c8a743c 1742
8c8a743c
PZ
1743 rcu_read_unlock();
1744
1745 if (!join)
1746 return;
1747
60e69eed
MG
1748 BUG_ON(irqs_disabled());
1749 double_lock_irq(&my_grp->lock, &grp->lock);
989348b5 1750
be1e4e76 1751 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
ff1df896
RR
1752 my_grp->faults[i] -= p->numa_faults_memory[i];
1753 grp->faults[i] += p->numa_faults_memory[i];
8c8a743c 1754 }
989348b5
MG
1755 my_grp->total_faults -= p->total_numa_faults;
1756 grp->total_faults += p->total_numa_faults;
8c8a743c
PZ
1757
1758 list_move(&p->numa_entry, &grp->task_list);
1759 my_grp->nr_tasks--;
1760 grp->nr_tasks++;
1761
1762 spin_unlock(&my_grp->lock);
60e69eed 1763 spin_unlock_irq(&grp->lock);
8c8a743c
PZ
1764
1765 rcu_assign_pointer(p->numa_group, grp);
1766
1767 put_numa_group(my_grp);
3354781a
PZ
1768 return;
1769
1770no_join:
1771 rcu_read_unlock();
1772 return;
8c8a743c
PZ
1773}
1774
1775void task_numa_free(struct task_struct *p)
1776{
1777 struct numa_group *grp = p->numa_group;
ff1df896 1778 void *numa_faults = p->numa_faults_memory;
e9dd685c
SR
1779 unsigned long flags;
1780 int i;
8c8a743c
PZ
1781
1782 if (grp) {
e9dd685c 1783 spin_lock_irqsave(&grp->lock, flags);
be1e4e76 1784 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
ff1df896 1785 grp->faults[i] -= p->numa_faults_memory[i];
989348b5 1786 grp->total_faults -= p->total_numa_faults;
83e1d2cd 1787
8c8a743c
PZ
1788 list_del(&p->numa_entry);
1789 grp->nr_tasks--;
e9dd685c 1790 spin_unlock_irqrestore(&grp->lock, flags);
35b123e2 1791 RCU_INIT_POINTER(p->numa_group, NULL);
8c8a743c
PZ
1792 put_numa_group(grp);
1793 }
1794
ff1df896
RR
1795 p->numa_faults_memory = NULL;
1796 p->numa_faults_buffer_memory = NULL;
50ec8a40
RR
1797 p->numa_faults_cpu= NULL;
1798 p->numa_faults_buffer_cpu = NULL;
82727018 1799 kfree(numa_faults);
8c8a743c
PZ
1800}
1801
cbee9f88
PZ
1802/*
1803 * Got a PROT_NONE fault for a page on @node.
1804 */
58b46da3 1805void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
cbee9f88
PZ
1806{
1807 struct task_struct *p = current;
6688cc05 1808 bool migrated = flags & TNF_MIGRATED;
58b46da3 1809 int cpu_node = task_node(current);
792568ec 1810 int local = !!(flags & TNF_FAULT_LOCAL);
ac8e895b 1811 int priv;
cbee9f88 1812
10e84b97 1813 if (!numabalancing_enabled)
1a687c2e
MG
1814 return;
1815
9ff1d9ff
MG
1816 /* for example, ksmd faulting in a user's mm */
1817 if (!p->mm)
1818 return;
1819
82727018
RR
1820 /* Do not worry about placement if exiting */
1821 if (p->state == TASK_DEAD)
1822 return;
1823
f809ca9a 1824 /* Allocate buffer to track faults on a per-node basis */
ff1df896 1825 if (unlikely(!p->numa_faults_memory)) {
be1e4e76
RR
1826 int size = sizeof(*p->numa_faults_memory) *
1827 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
f809ca9a 1828
be1e4e76 1829 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
ff1df896 1830 if (!p->numa_faults_memory)
f809ca9a 1831 return;
745d6147 1832
ff1df896 1833 BUG_ON(p->numa_faults_buffer_memory);
be1e4e76
RR
1834 /*
1835 * The averaged statistics, shared & private, memory & cpu,
1836 * occupy the first half of the array. The second half of the
1837 * array is for current counters, which are averaged into the
1838 * first set by task_numa_placement.
1839 */
50ec8a40
RR
1840 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
1841 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
1842 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
83e1d2cd 1843 p->total_numa_faults = 0;
04bb2f94 1844 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
f809ca9a 1845 }
cbee9f88 1846
8c8a743c
PZ
1847 /*
1848 * First accesses are treated as private, otherwise consider accesses
1849 * to be private if the accessing pid has not changed
1850 */
1851 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1852 priv = 1;
1853 } else {
1854 priv = cpupid_match_pid(p, last_cpupid);
6688cc05 1855 if (!priv && !(flags & TNF_NO_GROUP))
3e6a9418 1856 task_numa_group(p, last_cpupid, flags, &priv);
8c8a743c
PZ
1857 }
1858
792568ec
RR
1859 /*
1860 * If a workload spans multiple NUMA nodes, a shared fault that
1861 * occurs wholly within the set of nodes that the workload is
1862 * actively using should be counted as local. This allows the
1863 * scan rate to slow down when a workload has settled down.
1864 */
1865 if (!priv && !local && p->numa_group &&
1866 node_isset(cpu_node, p->numa_group->active_nodes) &&
1867 node_isset(mem_node, p->numa_group->active_nodes))
1868 local = 1;
1869
cbee9f88 1870 task_numa_placement(p);
f809ca9a 1871
2739d3ee
RR
1872 /*
1873 * Retry task to preferred node migration periodically, in case it
1874 * case it previously failed, or the scheduler moved us.
1875 */
1876 if (time_after(jiffies, p->numa_migrate_retry))
6b9a7460
MG
1877 numa_migrate_preferred(p);
1878
b32e86b4
IM
1879 if (migrated)
1880 p->numa_pages_migrated += pages;
1881
58b46da3
RR
1882 p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
1883 p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
792568ec 1884 p->numa_faults_locality[local] += pages;
cbee9f88
PZ
1885}
1886
6e5fb223
PZ
1887static void reset_ptenuma_scan(struct task_struct *p)
1888{
1889 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1890 p->mm->numa_scan_offset = 0;
1891}
1892
cbee9f88
PZ
1893/*
1894 * The expensive part of numa migration is done from task_work context.
1895 * Triggered from task_tick_numa().
1896 */
1897void task_numa_work(struct callback_head *work)
1898{
1899 unsigned long migrate, next_scan, now = jiffies;
1900 struct task_struct *p = current;
1901 struct mm_struct *mm = p->mm;
6e5fb223 1902 struct vm_area_struct *vma;
9f40604c 1903 unsigned long start, end;
598f0ec0 1904 unsigned long nr_pte_updates = 0;
9f40604c 1905 long pages;
cbee9f88
PZ
1906
1907 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1908
1909 work->next = work; /* protect against double add */
1910 /*
1911 * Who cares about NUMA placement when they're dying.
1912 *
1913 * NOTE: make sure not to dereference p->mm before this check,
1914 * exit_task_work() happens _after_ exit_mm() so we could be called
1915 * without p->mm even though we still had it when we enqueued this
1916 * work.
1917 */
1918 if (p->flags & PF_EXITING)
1919 return;
1920
930aa174 1921 if (!mm->numa_next_scan) {
7e8d16b6
MG
1922 mm->numa_next_scan = now +
1923 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
b8593bfd
MG
1924 }
1925
cbee9f88
PZ
1926 /*
1927 * Enforce maximal scan/migration frequency..
1928 */
1929 migrate = mm->numa_next_scan;
1930 if (time_before(now, migrate))
1931 return;
1932
598f0ec0
MG
1933 if (p->numa_scan_period == 0) {
1934 p->numa_scan_period_max = task_scan_max(p);
1935 p->numa_scan_period = task_scan_min(p);
1936 }
cbee9f88 1937
fb003b80 1938 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
1939 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1940 return;
1941
19a78d11
PZ
1942 /*
1943 * Delay this task enough that another task of this mm will likely win
1944 * the next time around.
1945 */
1946 p->node_stamp += 2 * TICK_NSEC;
1947
9f40604c
MG
1948 start = mm->numa_scan_offset;
1949 pages = sysctl_numa_balancing_scan_size;
1950 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1951 if (!pages)
1952 return;
cbee9f88 1953
6e5fb223 1954 down_read(&mm->mmap_sem);
9f40604c 1955 vma = find_vma(mm, start);
6e5fb223
PZ
1956 if (!vma) {
1957 reset_ptenuma_scan(p);
9f40604c 1958 start = 0;
6e5fb223
PZ
1959 vma = mm->mmap;
1960 }
9f40604c 1961 for (; vma; vma = vma->vm_next) {
fc314724 1962 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
6e5fb223
PZ
1963 continue;
1964
4591ce4f
MG
1965 /*
1966 * Shared library pages mapped by multiple processes are not
1967 * migrated as it is expected they are cache replicated. Avoid
1968 * hinting faults in read-only file-backed mappings or the vdso
1969 * as migrating the pages will be of marginal benefit.
1970 */
1971 if (!vma->vm_mm ||
1972 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1973 continue;
1974
3c67f474
MG
1975 /*
1976 * Skip inaccessible VMAs to avoid any confusion between
1977 * PROT_NONE and NUMA hinting ptes
1978 */
1979 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1980 continue;
4591ce4f 1981
9f40604c
MG
1982 do {
1983 start = max(start, vma->vm_start);
1984 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1985 end = min(end, vma->vm_end);
598f0ec0
MG
1986 nr_pte_updates += change_prot_numa(vma, start, end);
1987
1988 /*
1989 * Scan sysctl_numa_balancing_scan_size but ensure that
1990 * at least one PTE is updated so that unused virtual
1991 * address space is quickly skipped.
1992 */
1993 if (nr_pte_updates)
1994 pages -= (end - start) >> PAGE_SHIFT;
6e5fb223 1995
9f40604c
MG
1996 start = end;
1997 if (pages <= 0)
1998 goto out;
3cf1962c
RR
1999
2000 cond_resched();
9f40604c 2001 } while (end != vma->vm_end);
cbee9f88 2002 }
6e5fb223 2003
9f40604c 2004out:
6e5fb223 2005 /*
c69307d5
PZ
2006 * It is possible to reach the end of the VMA list but the last few
2007 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2008 * would find the !migratable VMA on the next scan but not reset the
2009 * scanner to the start so check it now.
6e5fb223
PZ
2010 */
2011 if (vma)
9f40604c 2012 mm->numa_scan_offset = start;
6e5fb223
PZ
2013 else
2014 reset_ptenuma_scan(p);
2015 up_read(&mm->mmap_sem);
cbee9f88
PZ
2016}
2017
2018/*
2019 * Drive the periodic memory faults..
2020 */
2021void task_tick_numa(struct rq *rq, struct task_struct *curr)
2022{
2023 struct callback_head *work = &curr->numa_work;
2024 u64 period, now;
2025
2026 /*
2027 * We don't care about NUMA placement if we don't have memory.
2028 */
2029 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2030 return;
2031
2032 /*
2033 * Using runtime rather than walltime has the dual advantage that
2034 * we (mostly) drive the selection from busy threads and that the
2035 * task needs to have done some actual work before we bother with
2036 * NUMA placement.
2037 */
2038 now = curr->se.sum_exec_runtime;
2039 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2040
2041 if (now - curr->node_stamp > period) {
4b96a29b 2042 if (!curr->node_stamp)
598f0ec0 2043 curr->numa_scan_period = task_scan_min(curr);
19a78d11 2044 curr->node_stamp += period;
cbee9f88
PZ
2045
2046 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2047 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2048 task_work_add(curr, work, true);
2049 }
2050 }
2051}
2052#else
2053static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2054{
2055}
0ec8aa00
PZ
2056
2057static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2058{
2059}
2060
2061static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2062{
2063}
cbee9f88
PZ
2064#endif /* CONFIG_NUMA_BALANCING */
2065
30cfdcfc
DA
2066static void
2067account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2068{
2069 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 2070 if (!parent_entity(se))
029632fb 2071 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 2072#ifdef CONFIG_SMP
0ec8aa00
PZ
2073 if (entity_is_task(se)) {
2074 struct rq *rq = rq_of(cfs_rq);
2075
2076 account_numa_enqueue(rq, task_of(se));
2077 list_add(&se->group_node, &rq->cfs_tasks);
2078 }
367456c7 2079#endif
30cfdcfc 2080 cfs_rq->nr_running++;
30cfdcfc
DA
2081}
2082
2083static void
2084account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2085{
2086 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 2087 if (!parent_entity(se))
029632fb 2088 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
0ec8aa00
PZ
2089 if (entity_is_task(se)) {
2090 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
b87f1724 2091 list_del_init(&se->group_node);
0ec8aa00 2092 }
30cfdcfc 2093 cfs_rq->nr_running--;
30cfdcfc
DA
2094}
2095
3ff6dcac
YZ
2096#ifdef CONFIG_FAIR_GROUP_SCHED
2097# ifdef CONFIG_SMP
cf5f0acf
PZ
2098static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2099{
2100 long tg_weight;
2101
2102 /*
2103 * Use this CPU's actual weight instead of the last load_contribution
2104 * to gain a more accurate current total weight. See
2105 * update_cfs_rq_load_contribution().
2106 */
bf5b986e 2107 tg_weight = atomic_long_read(&tg->load_avg);
82958366 2108 tg_weight -= cfs_rq->tg_load_contrib;
cf5f0acf
PZ
2109 tg_weight += cfs_rq->load.weight;
2110
2111 return tg_weight;
2112}
2113
6d5ab293 2114static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 2115{
cf5f0acf 2116 long tg_weight, load, shares;
3ff6dcac 2117
cf5f0acf 2118 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 2119 load = cfs_rq->load.weight;
3ff6dcac 2120
3ff6dcac 2121 shares = (tg->shares * load);
cf5f0acf
PZ
2122 if (tg_weight)
2123 shares /= tg_weight;
3ff6dcac
YZ
2124
2125 if (shares < MIN_SHARES)
2126 shares = MIN_SHARES;
2127 if (shares > tg->shares)
2128 shares = tg->shares;
2129
2130 return shares;
2131}
3ff6dcac 2132# else /* CONFIG_SMP */
6d5ab293 2133static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
2134{
2135 return tg->shares;
2136}
3ff6dcac 2137# endif /* CONFIG_SMP */
2069dd75
PZ
2138static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2139 unsigned long weight)
2140{
19e5eebb
PT
2141 if (se->on_rq) {
2142 /* commit outstanding execution time */
2143 if (cfs_rq->curr == se)
2144 update_curr(cfs_rq);
2069dd75 2145 account_entity_dequeue(cfs_rq, se);
19e5eebb 2146 }
2069dd75
PZ
2147
2148 update_load_set(&se->load, weight);
2149
2150 if (se->on_rq)
2151 account_entity_enqueue(cfs_rq, se);
2152}
2153
82958366
PT
2154static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2155
6d5ab293 2156static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
2157{
2158 struct task_group *tg;
2159 struct sched_entity *se;
3ff6dcac 2160 long shares;
2069dd75 2161
2069dd75
PZ
2162 tg = cfs_rq->tg;
2163 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 2164 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 2165 return;
3ff6dcac
YZ
2166#ifndef CONFIG_SMP
2167 if (likely(se->load.weight == tg->shares))
2168 return;
2169#endif
6d5ab293 2170 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
2171
2172 reweight_entity(cfs_rq_of(se), se, shares);
2173}
2174#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 2175static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
2176{
2177}
2178#endif /* CONFIG_FAIR_GROUP_SCHED */
2179
141965c7 2180#ifdef CONFIG_SMP
5b51f2f8
PT
2181/*
2182 * We choose a half-life close to 1 scheduling period.
2183 * Note: The tables below are dependent on this value.
2184 */
2185#define LOAD_AVG_PERIOD 32
2186#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2187#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2188
2189/* Precomputed fixed inverse multiplies for multiplication by y^n */
2190static const u32 runnable_avg_yN_inv[] = {
2191 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2192 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2193 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2194 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2195 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2196 0x85aac367, 0x82cd8698,
2197};
2198
2199/*
2200 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2201 * over-estimates when re-combining.
2202 */
2203static const u32 runnable_avg_yN_sum[] = {
2204 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2205 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2206 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2207};
2208
9d85f21c
PT
2209/*
2210 * Approximate:
2211 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2212 */
2213static __always_inline u64 decay_load(u64 val, u64 n)
2214{
5b51f2f8
PT
2215 unsigned int local_n;
2216
2217 if (!n)
2218 return val;
2219 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2220 return 0;
2221
2222 /* after bounds checking we can collapse to 32-bit */
2223 local_n = n;
2224
2225 /*
2226 * As y^PERIOD = 1/2, we can combine
2227 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
2228 * With a look-up table which covers k^n (n<PERIOD)
2229 *
2230 * To achieve constant time decay_load.
2231 */
2232 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2233 val >>= local_n / LOAD_AVG_PERIOD;
2234 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
2235 }
2236
5b51f2f8
PT
2237 val *= runnable_avg_yN_inv[local_n];
2238 /* We don't use SRR here since we always want to round down. */
2239 return val >> 32;
2240}
2241
2242/*
2243 * For updates fully spanning n periods, the contribution to runnable
2244 * average will be: \Sum 1024*y^n
2245 *
2246 * We can compute this reasonably efficiently by combining:
2247 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2248 */
2249static u32 __compute_runnable_contrib(u64 n)
2250{
2251 u32 contrib = 0;
2252
2253 if (likely(n <= LOAD_AVG_PERIOD))
2254 return runnable_avg_yN_sum[n];
2255 else if (unlikely(n >= LOAD_AVG_MAX_N))
2256 return LOAD_AVG_MAX;
2257
2258 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2259 do {
2260 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2261 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2262
2263 n -= LOAD_AVG_PERIOD;
2264 } while (n > LOAD_AVG_PERIOD);
2265
2266 contrib = decay_load(contrib, n);
2267 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
2268}
2269
2270/*
2271 * We can represent the historical contribution to runnable average as the
2272 * coefficients of a geometric series. To do this we sub-divide our runnable
2273 * history into segments of approximately 1ms (1024us); label the segment that
2274 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2275 *
2276 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2277 * p0 p1 p2
2278 * (now) (~1ms ago) (~2ms ago)
2279 *
2280 * Let u_i denote the fraction of p_i that the entity was runnable.
2281 *
2282 * We then designate the fractions u_i as our co-efficients, yielding the
2283 * following representation of historical load:
2284 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2285 *
2286 * We choose y based on the with of a reasonably scheduling period, fixing:
2287 * y^32 = 0.5
2288 *
2289 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2290 * approximately half as much as the contribution to load within the last ms
2291 * (u_0).
2292 *
2293 * When a period "rolls over" and we have new u_0`, multiplying the previous
2294 * sum again by y is sufficient to update:
2295 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2296 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2297 */
2298static __always_inline int __update_entity_runnable_avg(u64 now,
2299 struct sched_avg *sa,
2300 int runnable)
2301{
5b51f2f8
PT
2302 u64 delta, periods;
2303 u32 runnable_contrib;
9d85f21c
PT
2304 int delta_w, decayed = 0;
2305
2306 delta = now - sa->last_runnable_update;
2307 /*
2308 * This should only happen when time goes backwards, which it
2309 * unfortunately does during sched clock init when we swap over to TSC.
2310 */
2311 if ((s64)delta < 0) {
2312 sa->last_runnable_update = now;
2313 return 0;
2314 }
2315
2316 /*
2317 * Use 1024ns as the unit of measurement since it's a reasonable
2318 * approximation of 1us and fast to compute.
2319 */
2320 delta >>= 10;
2321 if (!delta)
2322 return 0;
2323 sa->last_runnable_update = now;
2324
2325 /* delta_w is the amount already accumulated against our next period */
2326 delta_w = sa->runnable_avg_period % 1024;
2327 if (delta + delta_w >= 1024) {
2328 /* period roll-over */
2329 decayed = 1;
2330
2331 /*
2332 * Now that we know we're crossing a period boundary, figure
2333 * out how much from delta we need to complete the current
2334 * period and accrue it.
2335 */
2336 delta_w = 1024 - delta_w;
5b51f2f8
PT
2337 if (runnable)
2338 sa->runnable_avg_sum += delta_w;
2339 sa->runnable_avg_period += delta_w;
2340
2341 delta -= delta_w;
2342
2343 /* Figure out how many additional periods this update spans */
2344 periods = delta / 1024;
2345 delta %= 1024;
2346
2347 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2348 periods + 1);
2349 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2350 periods + 1);
2351
2352 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2353 runnable_contrib = __compute_runnable_contrib(periods);
2354 if (runnable)
2355 sa->runnable_avg_sum += runnable_contrib;
2356 sa->runnable_avg_period += runnable_contrib;
9d85f21c
PT
2357 }
2358
2359 /* Remainder of delta accrued against u_0` */
2360 if (runnable)
2361 sa->runnable_avg_sum += delta;
2362 sa->runnable_avg_period += delta;
2363
2364 return decayed;
2365}
2366
9ee474f5 2367/* Synchronize an entity's decay with its parenting cfs_rq.*/
aff3e498 2368static inline u64 __synchronize_entity_decay(struct sched_entity *se)
9ee474f5
PT
2369{
2370 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2371 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2372
2373 decays -= se->avg.decay_count;
2374 if (!decays)
aff3e498 2375 return 0;
9ee474f5
PT
2376
2377 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2378 se->avg.decay_count = 0;
aff3e498
PT
2379
2380 return decays;
9ee474f5
PT
2381}
2382
c566e8e9
PT
2383#ifdef CONFIG_FAIR_GROUP_SCHED
2384static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2385 int force_update)
2386{
2387 struct task_group *tg = cfs_rq->tg;
bf5b986e 2388 long tg_contrib;
c566e8e9
PT
2389
2390 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2391 tg_contrib -= cfs_rq->tg_load_contrib;
2392
8236d907
JL
2393 if (!tg_contrib)
2394 return;
2395
bf5b986e
AS
2396 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2397 atomic_long_add(tg_contrib, &tg->load_avg);
c566e8e9
PT
2398 cfs_rq->tg_load_contrib += tg_contrib;
2399 }
2400}
8165e145 2401
bb17f655
PT
2402/*
2403 * Aggregate cfs_rq runnable averages into an equivalent task_group
2404 * representation for computing load contributions.
2405 */
2406static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2407 struct cfs_rq *cfs_rq)
2408{
2409 struct task_group *tg = cfs_rq->tg;
2410 long contrib;
2411
2412 /* The fraction of a cpu used by this cfs_rq */
85b088e9 2413 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
bb17f655
PT
2414 sa->runnable_avg_period + 1);
2415 contrib -= cfs_rq->tg_runnable_contrib;
2416
2417 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2418 atomic_add(contrib, &tg->runnable_avg);
2419 cfs_rq->tg_runnable_contrib += contrib;
2420 }
2421}
2422
8165e145
PT
2423static inline void __update_group_entity_contrib(struct sched_entity *se)
2424{
2425 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2426 struct task_group *tg = cfs_rq->tg;
bb17f655
PT
2427 int runnable_avg;
2428
8165e145
PT
2429 u64 contrib;
2430
2431 contrib = cfs_rq->tg_load_contrib * tg->shares;
bf5b986e
AS
2432 se->avg.load_avg_contrib = div_u64(contrib,
2433 atomic_long_read(&tg->load_avg) + 1);
bb17f655
PT
2434
2435 /*
2436 * For group entities we need to compute a correction term in the case
2437 * that they are consuming <1 cpu so that we would contribute the same
2438 * load as a task of equal weight.
2439 *
2440 * Explicitly co-ordinating this measurement would be expensive, but
2441 * fortunately the sum of each cpus contribution forms a usable
2442 * lower-bound on the true value.
2443 *
2444 * Consider the aggregate of 2 contributions. Either they are disjoint
2445 * (and the sum represents true value) or they are disjoint and we are
2446 * understating by the aggregate of their overlap.
2447 *
2448 * Extending this to N cpus, for a given overlap, the maximum amount we
2449 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2450 * cpus that overlap for this interval and w_i is the interval width.
2451 *
2452 * On a small machine; the first term is well-bounded which bounds the
2453 * total error since w_i is a subset of the period. Whereas on a
2454 * larger machine, while this first term can be larger, if w_i is the
2455 * of consequential size guaranteed to see n_i*w_i quickly converge to
2456 * our upper bound of 1-cpu.
2457 */
2458 runnable_avg = atomic_read(&tg->runnable_avg);
2459 if (runnable_avg < NICE_0_LOAD) {
2460 se->avg.load_avg_contrib *= runnable_avg;
2461 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2462 }
8165e145 2463}
f5f9739d
DE
2464
2465static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2466{
2467 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2468 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2469}
6e83125c 2470#else /* CONFIG_FAIR_GROUP_SCHED */
c566e8e9
PT
2471static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2472 int force_update) {}
bb17f655
PT
2473static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2474 struct cfs_rq *cfs_rq) {}
8165e145 2475static inline void __update_group_entity_contrib(struct sched_entity *se) {}
f5f9739d 2476static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
6e83125c 2477#endif /* CONFIG_FAIR_GROUP_SCHED */
c566e8e9 2478
8165e145
PT
2479static inline void __update_task_entity_contrib(struct sched_entity *se)
2480{
2481 u32 contrib;
2482
2483 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2484 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2485 contrib /= (se->avg.runnable_avg_period + 1);
2486 se->avg.load_avg_contrib = scale_load(contrib);
2487}
2488
2dac754e
PT
2489/* Compute the current contribution to load_avg by se, return any delta */
2490static long __update_entity_load_avg_contrib(struct sched_entity *se)
2491{
2492 long old_contrib = se->avg.load_avg_contrib;
2493
8165e145
PT
2494 if (entity_is_task(se)) {
2495 __update_task_entity_contrib(se);
2496 } else {
bb17f655 2497 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
8165e145
PT
2498 __update_group_entity_contrib(se);
2499 }
2dac754e
PT
2500
2501 return se->avg.load_avg_contrib - old_contrib;
2502}
2503
9ee474f5
PT
2504static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2505 long load_contrib)
2506{
2507 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2508 cfs_rq->blocked_load_avg -= load_contrib;
2509 else
2510 cfs_rq->blocked_load_avg = 0;
2511}
2512
f1b17280
PT
2513static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2514
9d85f21c 2515/* Update a sched_entity's runnable average */
9ee474f5
PT
2516static inline void update_entity_load_avg(struct sched_entity *se,
2517 int update_cfs_rq)
9d85f21c 2518{
2dac754e
PT
2519 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2520 long contrib_delta;
f1b17280 2521 u64 now;
2dac754e 2522
f1b17280
PT
2523 /*
2524 * For a group entity we need to use their owned cfs_rq_clock_task() in
2525 * case they are the parent of a throttled hierarchy.
2526 */
2527 if (entity_is_task(se))
2528 now = cfs_rq_clock_task(cfs_rq);
2529 else
2530 now = cfs_rq_clock_task(group_cfs_rq(se));
2531
2532 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2dac754e
PT
2533 return;
2534
2535 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
2536
2537 if (!update_cfs_rq)
2538 return;
2539
2dac754e
PT
2540 if (se->on_rq)
2541 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
2542 else
2543 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2544}
2545
2546/*
2547 * Decay the load contributed by all blocked children and account this so that
2548 * their contribution may appropriately discounted when they wake up.
2549 */
aff3e498 2550static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
9ee474f5 2551{
f1b17280 2552 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
9ee474f5
PT
2553 u64 decays;
2554
2555 decays = now - cfs_rq->last_decay;
aff3e498 2556 if (!decays && !force_update)
9ee474f5
PT
2557 return;
2558
2509940f
AS
2559 if (atomic_long_read(&cfs_rq->removed_load)) {
2560 unsigned long removed_load;
2561 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
aff3e498
PT
2562 subtract_blocked_load_contrib(cfs_rq, removed_load);
2563 }
9ee474f5 2564
aff3e498
PT
2565 if (decays) {
2566 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2567 decays);
2568 atomic64_add(decays, &cfs_rq->decay_counter);
2569 cfs_rq->last_decay = now;
2570 }
c566e8e9
PT
2571
2572 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
9d85f21c 2573}
18bf2805 2574
2dac754e
PT
2575/* Add the load generated by se into cfs_rq's child load-average */
2576static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2577 struct sched_entity *se,
2578 int wakeup)
2dac754e 2579{
aff3e498
PT
2580 /*
2581 * We track migrations using entity decay_count <= 0, on a wake-up
2582 * migration we use a negative decay count to track the remote decays
2583 * accumulated while sleeping.
a75cdaa9
AS
2584 *
2585 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2586 * are seen by enqueue_entity_load_avg() as a migration with an already
2587 * constructed load_avg_contrib.
aff3e498
PT
2588 */
2589 if (unlikely(se->avg.decay_count <= 0)) {
78becc27 2590 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
aff3e498
PT
2591 if (se->avg.decay_count) {
2592 /*
2593 * In a wake-up migration we have to approximate the
2594 * time sleeping. This is because we can't synchronize
2595 * clock_task between the two cpus, and it is not
2596 * guaranteed to be read-safe. Instead, we can
2597 * approximate this using our carried decays, which are
2598 * explicitly atomically readable.
2599 */
2600 se->avg.last_runnable_update -= (-se->avg.decay_count)
2601 << 20;
2602 update_entity_load_avg(se, 0);
2603 /* Indicate that we're now synchronized and on-rq */
2604 se->avg.decay_count = 0;
2605 }
9ee474f5
PT
2606 wakeup = 0;
2607 } else {
9390675a 2608 __synchronize_entity_decay(se);
9ee474f5
PT
2609 }
2610
aff3e498
PT
2611 /* migrated tasks did not contribute to our blocked load */
2612 if (wakeup) {
9ee474f5 2613 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
aff3e498
PT
2614 update_entity_load_avg(se, 0);
2615 }
9ee474f5 2616
2dac754e 2617 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
2618 /* we force update consideration on load-balancer moves */
2619 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2dac754e
PT
2620}
2621
9ee474f5
PT
2622/*
2623 * Remove se's load from this cfs_rq child load-average, if the entity is
2624 * transitioning to a blocked state we track its projected decay using
2625 * blocked_load_avg.
2626 */
2dac754e 2627static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2628 struct sched_entity *se,
2629 int sleep)
2dac754e 2630{
9ee474f5 2631 update_entity_load_avg(se, 1);
aff3e498
PT
2632 /* we force update consideration on load-balancer moves */
2633 update_cfs_rq_blocked_load(cfs_rq, !sleep);
9ee474f5 2634
2dac754e 2635 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
2636 if (sleep) {
2637 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2638 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2639 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 2640}
642dbc39
VG
2641
2642/*
2643 * Update the rq's load with the elapsed running time before entering
2644 * idle. if the last scheduled task is not a CFS task, idle_enter will
2645 * be the only way to update the runnable statistic.
2646 */
2647void idle_enter_fair(struct rq *this_rq)
2648{
2649 update_rq_runnable_avg(this_rq, 1);
2650}
2651
2652/*
2653 * Update the rq's load with the elapsed idle time before a task is
2654 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2655 * be the only way to update the runnable statistic.
2656 */
2657void idle_exit_fair(struct rq *this_rq)
2658{
2659 update_rq_runnable_avg(this_rq, 0);
2660}
2661
6e83125c
PZ
2662static int idle_balance(struct rq *this_rq);
2663
38033c37
PZ
2664#else /* CONFIG_SMP */
2665
9ee474f5
PT
2666static inline void update_entity_load_avg(struct sched_entity *se,
2667 int update_cfs_rq) {}
18bf2805 2668static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 2669static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2670 struct sched_entity *se,
2671 int wakeup) {}
2dac754e 2672static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
2673 struct sched_entity *se,
2674 int sleep) {}
aff3e498
PT
2675static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2676 int force_update) {}
6e83125c
PZ
2677
2678static inline int idle_balance(struct rq *rq)
2679{
2680 return 0;
2681}
2682
38033c37 2683#endif /* CONFIG_SMP */
9d85f21c 2684
2396af69 2685static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 2686{
bf0f6f24 2687#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
2688 struct task_struct *tsk = NULL;
2689
2690 if (entity_is_task(se))
2691 tsk = task_of(se);
2692
41acab88 2693 if (se->statistics.sleep_start) {
78becc27 2694 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
bf0f6f24
IM
2695
2696 if ((s64)delta < 0)
2697 delta = 0;
2698
41acab88
LDM
2699 if (unlikely(delta > se->statistics.sleep_max))
2700 se->statistics.sleep_max = delta;
bf0f6f24 2701
8c79a045 2702 se->statistics.sleep_start = 0;
41acab88 2703 se->statistics.sum_sleep_runtime += delta;
9745512c 2704
768d0c27 2705 if (tsk) {
e414314c 2706 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
2707 trace_sched_stat_sleep(tsk, delta);
2708 }
bf0f6f24 2709 }
41acab88 2710 if (se->statistics.block_start) {
78becc27 2711 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
bf0f6f24
IM
2712
2713 if ((s64)delta < 0)
2714 delta = 0;
2715
41acab88
LDM
2716 if (unlikely(delta > se->statistics.block_max))
2717 se->statistics.block_max = delta;
bf0f6f24 2718
8c79a045 2719 se->statistics.block_start = 0;
41acab88 2720 se->statistics.sum_sleep_runtime += delta;
30084fbd 2721
e414314c 2722 if (tsk) {
8f0dfc34 2723 if (tsk->in_iowait) {
41acab88
LDM
2724 se->statistics.iowait_sum += delta;
2725 se->statistics.iowait_count++;
768d0c27 2726 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
2727 }
2728
b781a602
AV
2729 trace_sched_stat_blocked(tsk, delta);
2730
e414314c
PZ
2731 /*
2732 * Blocking time is in units of nanosecs, so shift by
2733 * 20 to get a milliseconds-range estimation of the
2734 * amount of time that the task spent sleeping:
2735 */
2736 if (unlikely(prof_on == SLEEP_PROFILING)) {
2737 profile_hits(SLEEP_PROFILING,
2738 (void *)get_wchan(tsk),
2739 delta >> 20);
2740 }
2741 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 2742 }
bf0f6f24
IM
2743 }
2744#endif
2745}
2746
ddc97297
PZ
2747static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2748{
2749#ifdef CONFIG_SCHED_DEBUG
2750 s64 d = se->vruntime - cfs_rq->min_vruntime;
2751
2752 if (d < 0)
2753 d = -d;
2754
2755 if (d > 3*sysctl_sched_latency)
2756 schedstat_inc(cfs_rq, nr_spread_over);
2757#endif
2758}
2759
aeb73b04
PZ
2760static void
2761place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2762{
1af5f730 2763 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 2764
2cb8600e
PZ
2765 /*
2766 * The 'current' period is already promised to the current tasks,
2767 * however the extra weight of the new task will slow them down a
2768 * little, place the new task so that it fits in the slot that
2769 * stays open at the end.
2770 */
94dfb5e7 2771 if (initial && sched_feat(START_DEBIT))
f9c0b095 2772 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 2773
a2e7a7eb 2774 /* sleeps up to a single latency don't count. */
5ca9880c 2775 if (!initial) {
a2e7a7eb 2776 unsigned long thresh = sysctl_sched_latency;
a7be37ac 2777
a2e7a7eb
MG
2778 /*
2779 * Halve their sleep time's effect, to allow
2780 * for a gentler effect of sleepers:
2781 */
2782 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2783 thresh >>= 1;
51e0304c 2784
a2e7a7eb 2785 vruntime -= thresh;
aeb73b04
PZ
2786 }
2787
b5d9d734 2788 /* ensure we never gain time by being placed backwards. */
16c8f1c7 2789 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
2790}
2791
d3d9dc33
PT
2792static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2793
bf0f6f24 2794static void
88ec22d3 2795enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 2796{
88ec22d3
PZ
2797 /*
2798 * Update the normalized vruntime before updating min_vruntime
0fc576d5 2799 * through calling update_curr().
88ec22d3 2800 */
371fd7e7 2801 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
2802 se->vruntime += cfs_rq->min_vruntime;
2803
bf0f6f24 2804 /*
a2a2d680 2805 * Update run-time statistics of the 'current'.
bf0f6f24 2806 */
b7cc0896 2807 update_curr(cfs_rq);
f269ae04 2808 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
17bc14b7
LT
2809 account_entity_enqueue(cfs_rq, se);
2810 update_cfs_shares(cfs_rq);
bf0f6f24 2811
88ec22d3 2812 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 2813 place_entity(cfs_rq, se, 0);
2396af69 2814 enqueue_sleeper(cfs_rq, se);
e9acbff6 2815 }
bf0f6f24 2816
d2417e5a 2817 update_stats_enqueue(cfs_rq, se);
ddc97297 2818 check_spread(cfs_rq, se);
83b699ed
SV
2819 if (se != cfs_rq->curr)
2820 __enqueue_entity(cfs_rq, se);
2069dd75 2821 se->on_rq = 1;
3d4b47b4 2822
d3d9dc33 2823 if (cfs_rq->nr_running == 1) {
3d4b47b4 2824 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
2825 check_enqueue_throttle(cfs_rq);
2826 }
bf0f6f24
IM
2827}
2828
2c13c919 2829static void __clear_buddies_last(struct sched_entity *se)
2002c695 2830{
2c13c919
RR
2831 for_each_sched_entity(se) {
2832 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 2833 if (cfs_rq->last != se)
2c13c919 2834 break;
f1044799
PZ
2835
2836 cfs_rq->last = NULL;
2c13c919
RR
2837 }
2838}
2002c695 2839
2c13c919
RR
2840static void __clear_buddies_next(struct sched_entity *se)
2841{
2842 for_each_sched_entity(se) {
2843 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 2844 if (cfs_rq->next != se)
2c13c919 2845 break;
f1044799
PZ
2846
2847 cfs_rq->next = NULL;
2c13c919 2848 }
2002c695
PZ
2849}
2850
ac53db59
RR
2851static void __clear_buddies_skip(struct sched_entity *se)
2852{
2853 for_each_sched_entity(se) {
2854 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 2855 if (cfs_rq->skip != se)
ac53db59 2856 break;
f1044799
PZ
2857
2858 cfs_rq->skip = NULL;
ac53db59
RR
2859 }
2860}
2861
a571bbea
PZ
2862static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2863{
2c13c919
RR
2864 if (cfs_rq->last == se)
2865 __clear_buddies_last(se);
2866
2867 if (cfs_rq->next == se)
2868 __clear_buddies_next(se);
ac53db59
RR
2869
2870 if (cfs_rq->skip == se)
2871 __clear_buddies_skip(se);
a571bbea
PZ
2872}
2873
6c16a6dc 2874static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 2875
bf0f6f24 2876static void
371fd7e7 2877dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 2878{
a2a2d680
DA
2879 /*
2880 * Update run-time statistics of the 'current'.
2881 */
2882 update_curr(cfs_rq);
17bc14b7 2883 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 2884
19b6a2e3 2885 update_stats_dequeue(cfs_rq, se);
371fd7e7 2886 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 2887#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
2888 if (entity_is_task(se)) {
2889 struct task_struct *tsk = task_of(se);
2890
2891 if (tsk->state & TASK_INTERRUPTIBLE)
78becc27 2892 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2893 if (tsk->state & TASK_UNINTERRUPTIBLE)
78becc27 2894 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 2895 }
db36cc7d 2896#endif
67e9fb2a
PZ
2897 }
2898
2002c695 2899 clear_buddies(cfs_rq, se);
4793241b 2900
83b699ed 2901 if (se != cfs_rq->curr)
30cfdcfc 2902 __dequeue_entity(cfs_rq, se);
17bc14b7 2903 se->on_rq = 0;
30cfdcfc 2904 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
2905
2906 /*
2907 * Normalize the entity after updating the min_vruntime because the
2908 * update can refer to the ->curr item and we need to reflect this
2909 * movement in our normalized position.
2910 */
371fd7e7 2911 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 2912 se->vruntime -= cfs_rq->min_vruntime;
1e876231 2913
d8b4986d
PT
2914 /* return excess runtime on last dequeue */
2915 return_cfs_rq_runtime(cfs_rq);
2916
1e876231 2917 update_min_vruntime(cfs_rq);
17bc14b7 2918 update_cfs_shares(cfs_rq);
bf0f6f24
IM
2919}
2920
2921/*
2922 * Preempt the current task with a newly woken task if needed:
2923 */
7c92e54f 2924static void
2e09bf55 2925check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 2926{
11697830 2927 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
2928 struct sched_entity *se;
2929 s64 delta;
11697830 2930
6d0f0ebd 2931 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 2932 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 2933 if (delta_exec > ideal_runtime) {
8875125e 2934 resched_curr(rq_of(cfs_rq));
a9f3e2b5
MG
2935 /*
2936 * The current task ran long enough, ensure it doesn't get
2937 * re-elected due to buddy favours.
2938 */
2939 clear_buddies(cfs_rq, curr);
f685ceac
MG
2940 return;
2941 }
2942
2943 /*
2944 * Ensure that a task that missed wakeup preemption by a
2945 * narrow margin doesn't have to wait for a full slice.
2946 * This also mitigates buddy induced latencies under load.
2947 */
f685ceac
MG
2948 if (delta_exec < sysctl_sched_min_granularity)
2949 return;
2950
f4cfb33e
WX
2951 se = __pick_first_entity(cfs_rq);
2952 delta = curr->vruntime - se->vruntime;
f685ceac 2953
f4cfb33e
WX
2954 if (delta < 0)
2955 return;
d7d82944 2956
f4cfb33e 2957 if (delta > ideal_runtime)
8875125e 2958 resched_curr(rq_of(cfs_rq));
bf0f6f24
IM
2959}
2960
83b699ed 2961static void
8494f412 2962set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 2963{
83b699ed
SV
2964 /* 'current' is not kept within the tree. */
2965 if (se->on_rq) {
2966 /*
2967 * Any task has to be enqueued before it get to execute on
2968 * a CPU. So account for the time it spent waiting on the
2969 * runqueue.
2970 */
2971 update_stats_wait_end(cfs_rq, se);
2972 __dequeue_entity(cfs_rq, se);
2973 }
2974
79303e9e 2975 update_stats_curr_start(cfs_rq, se);
429d43bc 2976 cfs_rq->curr = se;
eba1ed4b
IM
2977#ifdef CONFIG_SCHEDSTATS
2978 /*
2979 * Track our maximum slice length, if the CPU's load is at
2980 * least twice that of our own weight (i.e. dont track it
2981 * when there are only lesser-weight tasks around):
2982 */
495eca49 2983 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 2984 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
2985 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2986 }
2987#endif
4a55b450 2988 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
2989}
2990
3f3a4904
PZ
2991static int
2992wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2993
ac53db59
RR
2994/*
2995 * Pick the next process, keeping these things in mind, in this order:
2996 * 1) keep things fair between processes/task groups
2997 * 2) pick the "next" process, since someone really wants that to run
2998 * 3) pick the "last" process, for cache locality
2999 * 4) do not run the "skip" process, if something else is available
3000 */
678d5718
PZ
3001static struct sched_entity *
3002pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
aa2ac252 3003{
678d5718
PZ
3004 struct sched_entity *left = __pick_first_entity(cfs_rq);
3005 struct sched_entity *se;
3006
3007 /*
3008 * If curr is set we have to see if its left of the leftmost entity
3009 * still in the tree, provided there was anything in the tree at all.
3010 */
3011 if (!left || (curr && entity_before(curr, left)))
3012 left = curr;
3013
3014 se = left; /* ideally we run the leftmost entity */
f4b6755f 3015
ac53db59
RR
3016 /*
3017 * Avoid running the skip buddy, if running something else can
3018 * be done without getting too unfair.
3019 */
3020 if (cfs_rq->skip == se) {
678d5718
PZ
3021 struct sched_entity *second;
3022
3023 if (se == curr) {
3024 second = __pick_first_entity(cfs_rq);
3025 } else {
3026 second = __pick_next_entity(se);
3027 if (!second || (curr && entity_before(curr, second)))
3028 second = curr;
3029 }
3030
ac53db59
RR
3031 if (second && wakeup_preempt_entity(second, left) < 1)
3032 se = second;
3033 }
aa2ac252 3034
f685ceac
MG
3035 /*
3036 * Prefer last buddy, try to return the CPU to a preempted task.
3037 */
3038 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3039 se = cfs_rq->last;
3040
ac53db59
RR
3041 /*
3042 * Someone really wants this to run. If it's not unfair, run it.
3043 */
3044 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3045 se = cfs_rq->next;
3046
f685ceac 3047 clear_buddies(cfs_rq, se);
4793241b
PZ
3048
3049 return se;
aa2ac252
PZ
3050}
3051
678d5718 3052static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d3d9dc33 3053
ab6cde26 3054static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
3055{
3056 /*
3057 * If still on the runqueue then deactivate_task()
3058 * was not called and update_curr() has to be done:
3059 */
3060 if (prev->on_rq)
b7cc0896 3061 update_curr(cfs_rq);
bf0f6f24 3062
d3d9dc33
PT
3063 /* throttle cfs_rqs exceeding runtime */
3064 check_cfs_rq_runtime(cfs_rq);
3065
ddc97297 3066 check_spread(cfs_rq, prev);
30cfdcfc 3067 if (prev->on_rq) {
5870db5b 3068 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
3069 /* Put 'current' back into the tree. */
3070 __enqueue_entity(cfs_rq, prev);
9d85f21c 3071 /* in !on_rq case, update occurred at dequeue */
9ee474f5 3072 update_entity_load_avg(prev, 1);
30cfdcfc 3073 }
429d43bc 3074 cfs_rq->curr = NULL;
bf0f6f24
IM
3075}
3076
8f4d37ec
PZ
3077static void
3078entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 3079{
bf0f6f24 3080 /*
30cfdcfc 3081 * Update run-time statistics of the 'current'.
bf0f6f24 3082 */
30cfdcfc 3083 update_curr(cfs_rq);
bf0f6f24 3084
9d85f21c
PT
3085 /*
3086 * Ensure that runnable average is periodically updated.
3087 */
9ee474f5 3088 update_entity_load_avg(curr, 1);
aff3e498 3089 update_cfs_rq_blocked_load(cfs_rq, 1);
bf0bd948 3090 update_cfs_shares(cfs_rq);
9d85f21c 3091
8f4d37ec
PZ
3092#ifdef CONFIG_SCHED_HRTICK
3093 /*
3094 * queued ticks are scheduled to match the slice, so don't bother
3095 * validating it and just reschedule.
3096 */
983ed7a6 3097 if (queued) {
8875125e 3098 resched_curr(rq_of(cfs_rq));
983ed7a6
HH
3099 return;
3100 }
8f4d37ec
PZ
3101 /*
3102 * don't let the period tick interfere with the hrtick preemption
3103 */
3104 if (!sched_feat(DOUBLE_TICK) &&
3105 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3106 return;
3107#endif
3108
2c2efaed 3109 if (cfs_rq->nr_running > 1)
2e09bf55 3110 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
3111}
3112
ab84d31e
PT
3113
3114/**************************************************
3115 * CFS bandwidth control machinery
3116 */
3117
3118#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
3119
3120#ifdef HAVE_JUMP_LABEL
c5905afb 3121static struct static_key __cfs_bandwidth_used;
029632fb
PZ
3122
3123static inline bool cfs_bandwidth_used(void)
3124{
c5905afb 3125 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
3126}
3127
1ee14e6c 3128void cfs_bandwidth_usage_inc(void)
029632fb 3129{
1ee14e6c
BS
3130 static_key_slow_inc(&__cfs_bandwidth_used);
3131}
3132
3133void cfs_bandwidth_usage_dec(void)
3134{
3135 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
3136}
3137#else /* HAVE_JUMP_LABEL */
3138static bool cfs_bandwidth_used(void)
3139{
3140 return true;
3141}
3142
1ee14e6c
BS
3143void cfs_bandwidth_usage_inc(void) {}
3144void cfs_bandwidth_usage_dec(void) {}
029632fb
PZ
3145#endif /* HAVE_JUMP_LABEL */
3146
ab84d31e
PT
3147/*
3148 * default period for cfs group bandwidth.
3149 * default: 0.1s, units: nanoseconds
3150 */
3151static inline u64 default_cfs_period(void)
3152{
3153 return 100000000ULL;
3154}
ec12cb7f
PT
3155
3156static inline u64 sched_cfs_bandwidth_slice(void)
3157{
3158 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3159}
3160
a9cf55b2
PT
3161/*
3162 * Replenish runtime according to assigned quota and update expiration time.
3163 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3164 * additional synchronization around rq->lock.
3165 *
3166 * requires cfs_b->lock
3167 */
029632fb 3168void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
3169{
3170 u64 now;
3171
3172 if (cfs_b->quota == RUNTIME_INF)
3173 return;
3174
3175 now = sched_clock_cpu(smp_processor_id());
3176 cfs_b->runtime = cfs_b->quota;
3177 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3178}
3179
029632fb
PZ
3180static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3181{
3182 return &tg->cfs_bandwidth;
3183}
3184
f1b17280
PT
3185/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3186static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3187{
3188 if (unlikely(cfs_rq->throttle_count))
3189 return cfs_rq->throttled_clock_task;
3190
78becc27 3191 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
3192}
3193
85dac906
PT
3194/* returns 0 on failure to allocate runtime */
3195static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
3196{
3197 struct task_group *tg = cfs_rq->tg;
3198 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 3199 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
3200
3201 /* note: this is a positive sum as runtime_remaining <= 0 */
3202 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3203
3204 raw_spin_lock(&cfs_b->lock);
3205 if (cfs_b->quota == RUNTIME_INF)
3206 amount = min_amount;
58088ad0 3207 else {
a9cf55b2
PT
3208 /*
3209 * If the bandwidth pool has become inactive, then at least one
3210 * period must have elapsed since the last consumption.
3211 * Refresh the global state and ensure bandwidth timer becomes
3212 * active.
3213 */
3214 if (!cfs_b->timer_active) {
3215 __refill_cfs_bandwidth_runtime(cfs_b);
09dc4ab0 3216 __start_cfs_bandwidth(cfs_b, false);
a9cf55b2 3217 }
58088ad0
PT
3218
3219 if (cfs_b->runtime > 0) {
3220 amount = min(cfs_b->runtime, min_amount);
3221 cfs_b->runtime -= amount;
3222 cfs_b->idle = 0;
3223 }
ec12cb7f 3224 }
a9cf55b2 3225 expires = cfs_b->runtime_expires;
ec12cb7f
PT
3226 raw_spin_unlock(&cfs_b->lock);
3227
3228 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
3229 /*
3230 * we may have advanced our local expiration to account for allowed
3231 * spread between our sched_clock and the one on which runtime was
3232 * issued.
3233 */
3234 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3235 cfs_rq->runtime_expires = expires;
85dac906
PT
3236
3237 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
3238}
3239
a9cf55b2
PT
3240/*
3241 * Note: This depends on the synchronization provided by sched_clock and the
3242 * fact that rq->clock snapshots this value.
3243 */
3244static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 3245{
a9cf55b2 3246 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
3247
3248 /* if the deadline is ahead of our clock, nothing to do */
78becc27 3249 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
3250 return;
3251
a9cf55b2
PT
3252 if (cfs_rq->runtime_remaining < 0)
3253 return;
3254
3255 /*
3256 * If the local deadline has passed we have to consider the
3257 * possibility that our sched_clock is 'fast' and the global deadline
3258 * has not truly expired.
3259 *
3260 * Fortunately we can check determine whether this the case by checking
51f2176d
BS
3261 * whether the global deadline has advanced. It is valid to compare
3262 * cfs_b->runtime_expires without any locks since we only care about
3263 * exact equality, so a partial write will still work.
a9cf55b2
PT
3264 */
3265
51f2176d 3266 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
a9cf55b2
PT
3267 /* extend local deadline, drift is bounded above by 2 ticks */
3268 cfs_rq->runtime_expires += TICK_NSEC;
3269 } else {
3270 /* global deadline is ahead, expiration has passed */
3271 cfs_rq->runtime_remaining = 0;
3272 }
3273}
3274
9dbdb155 3275static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
a9cf55b2
PT
3276{
3277 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 3278 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
3279 expire_cfs_rq_runtime(cfs_rq);
3280
3281 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
3282 return;
3283
85dac906
PT
3284 /*
3285 * if we're unable to extend our runtime we resched so that the active
3286 * hierarchy can be throttled
3287 */
3288 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
8875125e 3289 resched_curr(rq_of(cfs_rq));
ec12cb7f
PT
3290}
3291
6c16a6dc 3292static __always_inline
9dbdb155 3293void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
ec12cb7f 3294{
56f570e5 3295 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
3296 return;
3297
3298 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3299}
3300
85dac906
PT
3301static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3302{
56f570e5 3303 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
3304}
3305
64660c86
PT
3306/* check whether cfs_rq, or any parent, is throttled */
3307static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3308{
56f570e5 3309 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
3310}
3311
3312/*
3313 * Ensure that neither of the group entities corresponding to src_cpu or
3314 * dest_cpu are members of a throttled hierarchy when performing group
3315 * load-balance operations.
3316 */
3317static inline int throttled_lb_pair(struct task_group *tg,
3318 int src_cpu, int dest_cpu)
3319{
3320 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3321
3322 src_cfs_rq = tg->cfs_rq[src_cpu];
3323 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3324
3325 return throttled_hierarchy(src_cfs_rq) ||
3326 throttled_hierarchy(dest_cfs_rq);
3327}
3328
3329/* updated child weight may affect parent so we have to do this bottom up */
3330static int tg_unthrottle_up(struct task_group *tg, void *data)
3331{
3332 struct rq *rq = data;
3333 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3334
3335 cfs_rq->throttle_count--;
3336#ifdef CONFIG_SMP
3337 if (!cfs_rq->throttle_count) {
f1b17280 3338 /* adjust cfs_rq_clock_task() */
78becc27 3339 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 3340 cfs_rq->throttled_clock_task;
64660c86
PT
3341 }
3342#endif
3343
3344 return 0;
3345}
3346
3347static int tg_throttle_down(struct task_group *tg, void *data)
3348{
3349 struct rq *rq = data;
3350 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3351
82958366
PT
3352 /* group is entering throttled state, stop time */
3353 if (!cfs_rq->throttle_count)
78becc27 3354 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
3355 cfs_rq->throttle_count++;
3356
3357 return 0;
3358}
3359
d3d9dc33 3360static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
3361{
3362 struct rq *rq = rq_of(cfs_rq);
3363 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3364 struct sched_entity *se;
3365 long task_delta, dequeue = 1;
3366
3367 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3368
f1b17280 3369 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
3370 rcu_read_lock();
3371 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3372 rcu_read_unlock();
85dac906
PT
3373
3374 task_delta = cfs_rq->h_nr_running;
3375 for_each_sched_entity(se) {
3376 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3377 /* throttled entity or throttle-on-deactivate */
3378 if (!se->on_rq)
3379 break;
3380
3381 if (dequeue)
3382 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3383 qcfs_rq->h_nr_running -= task_delta;
3384
3385 if (qcfs_rq->load.weight)
3386 dequeue = 0;
3387 }
3388
3389 if (!se)
72465447 3390 sub_nr_running(rq, task_delta);
85dac906
PT
3391
3392 cfs_rq->throttled = 1;
78becc27 3393 cfs_rq->throttled_clock = rq_clock(rq);
85dac906 3394 raw_spin_lock(&cfs_b->lock);
c06f04c7
BS
3395 /*
3396 * Add to the _head_ of the list, so that an already-started
3397 * distribute_cfs_runtime will not see us
3398 */
3399 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
f9f9ffc2 3400 if (!cfs_b->timer_active)
09dc4ab0 3401 __start_cfs_bandwidth(cfs_b, false);
85dac906
PT
3402 raw_spin_unlock(&cfs_b->lock);
3403}
3404
029632fb 3405void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
3406{
3407 struct rq *rq = rq_of(cfs_rq);
3408 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3409 struct sched_entity *se;
3410 int enqueue = 1;
3411 long task_delta;
3412
22b958d8 3413 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
3414
3415 cfs_rq->throttled = 0;
1a55af2e
FW
3416
3417 update_rq_clock(rq);
3418
671fd9da 3419 raw_spin_lock(&cfs_b->lock);
78becc27 3420 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
3421 list_del_rcu(&cfs_rq->throttled_list);
3422 raw_spin_unlock(&cfs_b->lock);
3423
64660c86
PT
3424 /* update hierarchical throttle state */
3425 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3426
671fd9da
PT
3427 if (!cfs_rq->load.weight)
3428 return;
3429
3430 task_delta = cfs_rq->h_nr_running;
3431 for_each_sched_entity(se) {
3432 if (se->on_rq)
3433 enqueue = 0;
3434
3435 cfs_rq = cfs_rq_of(se);
3436 if (enqueue)
3437 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3438 cfs_rq->h_nr_running += task_delta;
3439
3440 if (cfs_rq_throttled(cfs_rq))
3441 break;
3442 }
3443
3444 if (!se)
72465447 3445 add_nr_running(rq, task_delta);
671fd9da
PT
3446
3447 /* determine whether we need to wake up potentially idle cpu */
3448 if (rq->curr == rq->idle && rq->cfs.nr_running)
8875125e 3449 resched_curr(rq);
671fd9da
PT
3450}
3451
3452static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3453 u64 remaining, u64 expires)
3454{
3455 struct cfs_rq *cfs_rq;
c06f04c7
BS
3456 u64 runtime;
3457 u64 starting_runtime = remaining;
671fd9da
PT
3458
3459 rcu_read_lock();
3460 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3461 throttled_list) {
3462 struct rq *rq = rq_of(cfs_rq);
3463
3464 raw_spin_lock(&rq->lock);
3465 if (!cfs_rq_throttled(cfs_rq))
3466 goto next;
3467
3468 runtime = -cfs_rq->runtime_remaining + 1;
3469 if (runtime > remaining)
3470 runtime = remaining;
3471 remaining -= runtime;
3472
3473 cfs_rq->runtime_remaining += runtime;
3474 cfs_rq->runtime_expires = expires;
3475
3476 /* we check whether we're throttled above */
3477 if (cfs_rq->runtime_remaining > 0)
3478 unthrottle_cfs_rq(cfs_rq);
3479
3480next:
3481 raw_spin_unlock(&rq->lock);
3482
3483 if (!remaining)
3484 break;
3485 }
3486 rcu_read_unlock();
3487
c06f04c7 3488 return starting_runtime - remaining;
671fd9da
PT
3489}
3490
58088ad0
PT
3491/*
3492 * Responsible for refilling a task_group's bandwidth and unthrottling its
3493 * cfs_rqs as appropriate. If there has been no activity within the last
3494 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3495 * used to track this state.
3496 */
3497static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3498{
671fd9da 3499 u64 runtime, runtime_expires;
51f2176d 3500 int throttled;
58088ad0 3501
58088ad0
PT
3502 /* no need to continue the timer with no bandwidth constraint */
3503 if (cfs_b->quota == RUNTIME_INF)
51f2176d 3504 goto out_deactivate;
58088ad0 3505
671fd9da 3506 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
e8da1b18 3507 cfs_b->nr_periods += overrun;
671fd9da 3508
51f2176d
BS
3509 /*
3510 * idle depends on !throttled (for the case of a large deficit), and if
3511 * we're going inactive then everything else can be deferred
3512 */
3513 if (cfs_b->idle && !throttled)
3514 goto out_deactivate;
a9cf55b2 3515
927b54fc
BS
3516 /*
3517 * if we have relooped after returning idle once, we need to update our
3518 * status as actually running, so that other cpus doing
3519 * __start_cfs_bandwidth will stop trying to cancel us.
3520 */
3521 cfs_b->timer_active = 1;
3522
a9cf55b2
PT
3523 __refill_cfs_bandwidth_runtime(cfs_b);
3524
671fd9da
PT
3525 if (!throttled) {
3526 /* mark as potentially idle for the upcoming period */
3527 cfs_b->idle = 1;
51f2176d 3528 return 0;
671fd9da
PT
3529 }
3530
e8da1b18
NR
3531 /* account preceding periods in which throttling occurred */
3532 cfs_b->nr_throttled += overrun;
3533
671fd9da 3534 runtime_expires = cfs_b->runtime_expires;
671fd9da
PT
3535
3536 /*
c06f04c7
BS
3537 * This check is repeated as we are holding onto the new bandwidth while
3538 * we unthrottle. This can potentially race with an unthrottled group
3539 * trying to acquire new bandwidth from the global pool. This can result
3540 * in us over-using our runtime if it is all used during this loop, but
3541 * only by limited amounts in that extreme case.
671fd9da 3542 */
c06f04c7
BS
3543 while (throttled && cfs_b->runtime > 0) {
3544 runtime = cfs_b->runtime;
671fd9da
PT
3545 raw_spin_unlock(&cfs_b->lock);
3546 /* we can't nest cfs_b->lock while distributing bandwidth */
3547 runtime = distribute_cfs_runtime(cfs_b, runtime,
3548 runtime_expires);
3549 raw_spin_lock(&cfs_b->lock);
3550
3551 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
c06f04c7
BS
3552
3553 cfs_b->runtime -= min(runtime, cfs_b->runtime);
671fd9da 3554 }
58088ad0 3555
671fd9da
PT
3556 /*
3557 * While we are ensured activity in the period following an
3558 * unthrottle, this also covers the case in which the new bandwidth is
3559 * insufficient to cover the existing bandwidth deficit. (Forcing the
3560 * timer to remain active while there are any throttled entities.)
3561 */
3562 cfs_b->idle = 0;
58088ad0 3563
51f2176d
BS
3564 return 0;
3565
3566out_deactivate:
3567 cfs_b->timer_active = 0;
3568 return 1;
58088ad0 3569}
d3d9dc33 3570
d8b4986d
PT
3571/* a cfs_rq won't donate quota below this amount */
3572static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3573/* minimum remaining period time to redistribute slack quota */
3574static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3575/* how long we wait to gather additional slack before distributing */
3576static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3577
db06e78c
BS
3578/*
3579 * Are we near the end of the current quota period?
3580 *
3581 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3582 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3583 * migrate_hrtimers, base is never cleared, so we are fine.
3584 */
d8b4986d
PT
3585static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3586{
3587 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3588 u64 remaining;
3589
3590 /* if the call-back is running a quota refresh is already occurring */
3591 if (hrtimer_callback_running(refresh_timer))
3592 return 1;
3593
3594 /* is a quota refresh about to occur? */
3595 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3596 if (remaining < min_expire)
3597 return 1;
3598
3599 return 0;
3600}
3601
3602static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3603{
3604 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3605
3606 /* if there's a quota refresh soon don't bother with slack */
3607 if (runtime_refresh_within(cfs_b, min_left))
3608 return;
3609
3610 start_bandwidth_timer(&cfs_b->slack_timer,
3611 ns_to_ktime(cfs_bandwidth_slack_period));
3612}
3613
3614/* we know any runtime found here is valid as update_curr() precedes return */
3615static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3616{
3617 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3618 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3619
3620 if (slack_runtime <= 0)
3621 return;
3622
3623 raw_spin_lock(&cfs_b->lock);
3624 if (cfs_b->quota != RUNTIME_INF &&
3625 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3626 cfs_b->runtime += slack_runtime;
3627
3628 /* we are under rq->lock, defer unthrottling using a timer */
3629 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3630 !list_empty(&cfs_b->throttled_cfs_rq))
3631 start_cfs_slack_bandwidth(cfs_b);
3632 }
3633 raw_spin_unlock(&cfs_b->lock);
3634
3635 /* even if it's not valid for return we don't want to try again */
3636 cfs_rq->runtime_remaining -= slack_runtime;
3637}
3638
3639static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3640{
56f570e5
PT
3641 if (!cfs_bandwidth_used())
3642 return;
3643
fccfdc6f 3644 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
3645 return;
3646
3647 __return_cfs_rq_runtime(cfs_rq);
3648}
3649
3650/*
3651 * This is done with a timer (instead of inline with bandwidth return) since
3652 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3653 */
3654static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3655{
3656 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3657 u64 expires;
3658
3659 /* confirm we're still not at a refresh boundary */
db06e78c
BS
3660 raw_spin_lock(&cfs_b->lock);
3661 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3662 raw_spin_unlock(&cfs_b->lock);
d8b4986d 3663 return;
db06e78c 3664 }
d8b4986d 3665
c06f04c7 3666 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
d8b4986d 3667 runtime = cfs_b->runtime;
c06f04c7 3668
d8b4986d
PT
3669 expires = cfs_b->runtime_expires;
3670 raw_spin_unlock(&cfs_b->lock);
3671
3672 if (!runtime)
3673 return;
3674
3675 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3676
3677 raw_spin_lock(&cfs_b->lock);
3678 if (expires == cfs_b->runtime_expires)
c06f04c7 3679 cfs_b->runtime -= min(runtime, cfs_b->runtime);
d8b4986d
PT
3680 raw_spin_unlock(&cfs_b->lock);
3681}
3682
d3d9dc33
PT
3683/*
3684 * When a group wakes up we want to make sure that its quota is not already
3685 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3686 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3687 */
3688static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3689{
56f570e5
PT
3690 if (!cfs_bandwidth_used())
3691 return;
3692
d3d9dc33
PT
3693 /* an active group must be handled by the update_curr()->put() path */
3694 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3695 return;
3696
3697 /* ensure the group is not already throttled */
3698 if (cfs_rq_throttled(cfs_rq))
3699 return;
3700
3701 /* update runtime allocation */
3702 account_cfs_rq_runtime(cfs_rq, 0);
3703 if (cfs_rq->runtime_remaining <= 0)
3704 throttle_cfs_rq(cfs_rq);
3705}
3706
3707/* conditionally throttle active cfs_rq's from put_prev_entity() */
678d5718 3708static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
d3d9dc33 3709{
56f570e5 3710 if (!cfs_bandwidth_used())
678d5718 3711 return false;
56f570e5 3712
d3d9dc33 3713 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
678d5718 3714 return false;
d3d9dc33
PT
3715
3716 /*
3717 * it's possible for a throttled entity to be forced into a running
3718 * state (e.g. set_curr_task), in this case we're finished.
3719 */
3720 if (cfs_rq_throttled(cfs_rq))
678d5718 3721 return true;
d3d9dc33
PT
3722
3723 throttle_cfs_rq(cfs_rq);
678d5718 3724 return true;
d3d9dc33 3725}
029632fb 3726
029632fb
PZ
3727static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3728{
3729 struct cfs_bandwidth *cfs_b =
3730 container_of(timer, struct cfs_bandwidth, slack_timer);
3731 do_sched_cfs_slack_timer(cfs_b);
3732
3733 return HRTIMER_NORESTART;
3734}
3735
3736static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3737{
3738 struct cfs_bandwidth *cfs_b =
3739 container_of(timer, struct cfs_bandwidth, period_timer);
3740 ktime_t now;
3741 int overrun;
3742 int idle = 0;
3743
51f2176d 3744 raw_spin_lock(&cfs_b->lock);
029632fb
PZ
3745 for (;;) {
3746 now = hrtimer_cb_get_time(timer);
3747 overrun = hrtimer_forward(timer, now, cfs_b->period);
3748
3749 if (!overrun)
3750 break;
3751
3752 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3753 }
51f2176d 3754 raw_spin_unlock(&cfs_b->lock);
029632fb
PZ
3755
3756 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3757}
3758
3759void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3760{
3761 raw_spin_lock_init(&cfs_b->lock);
3762 cfs_b->runtime = 0;
3763 cfs_b->quota = RUNTIME_INF;
3764 cfs_b->period = ns_to_ktime(default_cfs_period());
3765
3766 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3767 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3768 cfs_b->period_timer.function = sched_cfs_period_timer;
3769 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3770 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3771}
3772
3773static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3774{
3775 cfs_rq->runtime_enabled = 0;
3776 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3777}
3778
3779/* requires cfs_b->lock, may release to reprogram timer */
09dc4ab0 3780void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
029632fb
PZ
3781{
3782 /*
3783 * The timer may be active because we're trying to set a new bandwidth
3784 * period or because we're racing with the tear-down path
3785 * (timer_active==0 becomes visible before the hrtimer call-back
3786 * terminates). In either case we ensure that it's re-programmed
3787 */
927b54fc
BS
3788 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3789 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3790 /* bounce the lock to allow do_sched_cfs_period_timer to run */
029632fb 3791 raw_spin_unlock(&cfs_b->lock);
927b54fc 3792 cpu_relax();
029632fb
PZ
3793 raw_spin_lock(&cfs_b->lock);
3794 /* if someone else restarted the timer then we're done */
09dc4ab0 3795 if (!force && cfs_b->timer_active)
029632fb
PZ
3796 return;
3797 }
3798
3799 cfs_b->timer_active = 1;
3800 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3801}
3802
3803static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3804{
3805 hrtimer_cancel(&cfs_b->period_timer);
3806 hrtimer_cancel(&cfs_b->slack_timer);
3807}
3808
0e59bdae
KT
3809static void __maybe_unused update_runtime_enabled(struct rq *rq)
3810{
3811 struct cfs_rq *cfs_rq;
3812
3813 for_each_leaf_cfs_rq(rq, cfs_rq) {
3814 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
3815
3816 raw_spin_lock(&cfs_b->lock);
3817 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
3818 raw_spin_unlock(&cfs_b->lock);
3819 }
3820}
3821
38dc3348 3822static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
3823{
3824 struct cfs_rq *cfs_rq;
3825
3826 for_each_leaf_cfs_rq(rq, cfs_rq) {
029632fb
PZ
3827 if (!cfs_rq->runtime_enabled)
3828 continue;
3829
3830 /*
3831 * clock_task is not advancing so we just need to make sure
3832 * there's some valid quota amount
3833 */
51f2176d 3834 cfs_rq->runtime_remaining = 1;
0e59bdae
KT
3835 /*
3836 * Offline rq is schedulable till cpu is completely disabled
3837 * in take_cpu_down(), so we prevent new cfs throttling here.
3838 */
3839 cfs_rq->runtime_enabled = 0;
3840
029632fb
PZ
3841 if (cfs_rq_throttled(cfs_rq))
3842 unthrottle_cfs_rq(cfs_rq);
3843 }
3844}
3845
3846#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
3847static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3848{
78becc27 3849 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
3850}
3851
9dbdb155 3852static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
678d5718 3853static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
d3d9dc33 3854static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 3855static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
3856
3857static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3858{
3859 return 0;
3860}
64660c86
PT
3861
3862static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3863{
3864 return 0;
3865}
3866
3867static inline int throttled_lb_pair(struct task_group *tg,
3868 int src_cpu, int dest_cpu)
3869{
3870 return 0;
3871}
029632fb
PZ
3872
3873void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3874
3875#ifdef CONFIG_FAIR_GROUP_SCHED
3876static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
3877#endif
3878
029632fb
PZ
3879static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3880{
3881 return NULL;
3882}
3883static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
0e59bdae 3884static inline void update_runtime_enabled(struct rq *rq) {}
a4c96ae3 3885static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
3886
3887#endif /* CONFIG_CFS_BANDWIDTH */
3888
bf0f6f24
IM
3889/**************************************************
3890 * CFS operations on tasks:
3891 */
3892
8f4d37ec
PZ
3893#ifdef CONFIG_SCHED_HRTICK
3894static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3895{
8f4d37ec
PZ
3896 struct sched_entity *se = &p->se;
3897 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3898
3899 WARN_ON(task_rq(p) != rq);
3900
b39e66ea 3901 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
3902 u64 slice = sched_slice(cfs_rq, se);
3903 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3904 s64 delta = slice - ran;
3905
3906 if (delta < 0) {
3907 if (rq->curr == p)
8875125e 3908 resched_curr(rq);
8f4d37ec
PZ
3909 return;
3910 }
31656519 3911 hrtick_start(rq, delta);
8f4d37ec
PZ
3912 }
3913}
a4c2f00f
PZ
3914
3915/*
3916 * called from enqueue/dequeue and updates the hrtick when the
3917 * current task is from our class and nr_running is low enough
3918 * to matter.
3919 */
3920static void hrtick_update(struct rq *rq)
3921{
3922 struct task_struct *curr = rq->curr;
3923
b39e66ea 3924 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
3925 return;
3926
3927 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3928 hrtick_start_fair(rq, curr);
3929}
55e12e5e 3930#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
3931static inline void
3932hrtick_start_fair(struct rq *rq, struct task_struct *p)
3933{
3934}
a4c2f00f
PZ
3935
3936static inline void hrtick_update(struct rq *rq)
3937{
3938}
8f4d37ec
PZ
3939#endif
3940
bf0f6f24
IM
3941/*
3942 * The enqueue_task method is called before nr_running is
3943 * increased. Here we update the fair scheduling stats and
3944 * then put the task into the rbtree:
3945 */
ea87bb78 3946static void
371fd7e7 3947enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3948{
3949 struct cfs_rq *cfs_rq;
62fb1851 3950 struct sched_entity *se = &p->se;
bf0f6f24
IM
3951
3952 for_each_sched_entity(se) {
62fb1851 3953 if (se->on_rq)
bf0f6f24
IM
3954 break;
3955 cfs_rq = cfs_rq_of(se);
88ec22d3 3956 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
3957
3958 /*
3959 * end evaluation on encountering a throttled cfs_rq
3960 *
3961 * note: in the case of encountering a throttled cfs_rq we will
3962 * post the final h_nr_running increment below.
3963 */
3964 if (cfs_rq_throttled(cfs_rq))
3965 break;
953bfcd1 3966 cfs_rq->h_nr_running++;
85dac906 3967
88ec22d3 3968 flags = ENQUEUE_WAKEUP;
bf0f6f24 3969 }
8f4d37ec 3970
2069dd75 3971 for_each_sched_entity(se) {
0f317143 3972 cfs_rq = cfs_rq_of(se);
953bfcd1 3973 cfs_rq->h_nr_running++;
2069dd75 3974
85dac906
PT
3975 if (cfs_rq_throttled(cfs_rq))
3976 break;
3977
17bc14b7 3978 update_cfs_shares(cfs_rq);
9ee474f5 3979 update_entity_load_avg(se, 1);
2069dd75
PZ
3980 }
3981
18bf2805
BS
3982 if (!se) {
3983 update_rq_runnable_avg(rq, rq->nr_running);
72465447 3984 add_nr_running(rq, 1);
18bf2805 3985 }
a4c2f00f 3986 hrtick_update(rq);
bf0f6f24
IM
3987}
3988
2f36825b
VP
3989static void set_next_buddy(struct sched_entity *se);
3990
bf0f6f24
IM
3991/*
3992 * The dequeue_task method is called before nr_running is
3993 * decreased. We remove the task from the rbtree and
3994 * update the fair scheduling stats:
3995 */
371fd7e7 3996static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
3997{
3998 struct cfs_rq *cfs_rq;
62fb1851 3999 struct sched_entity *se = &p->se;
2f36825b 4000 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
4001
4002 for_each_sched_entity(se) {
4003 cfs_rq = cfs_rq_of(se);
371fd7e7 4004 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
4005
4006 /*
4007 * end evaluation on encountering a throttled cfs_rq
4008 *
4009 * note: in the case of encountering a throttled cfs_rq we will
4010 * post the final h_nr_running decrement below.
4011 */
4012 if (cfs_rq_throttled(cfs_rq))
4013 break;
953bfcd1 4014 cfs_rq->h_nr_running--;
2069dd75 4015
bf0f6f24 4016 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
4017 if (cfs_rq->load.weight) {
4018 /*
4019 * Bias pick_next to pick a task from this cfs_rq, as
4020 * p is sleeping when it is within its sched_slice.
4021 */
4022 if (task_sleep && parent_entity(se))
4023 set_next_buddy(parent_entity(se));
9598c82d
PT
4024
4025 /* avoid re-evaluating load for this entity */
4026 se = parent_entity(se);
bf0f6f24 4027 break;
2f36825b 4028 }
371fd7e7 4029 flags |= DEQUEUE_SLEEP;
bf0f6f24 4030 }
8f4d37ec 4031
2069dd75 4032 for_each_sched_entity(se) {
0f317143 4033 cfs_rq = cfs_rq_of(se);
953bfcd1 4034 cfs_rq->h_nr_running--;
2069dd75 4035
85dac906
PT
4036 if (cfs_rq_throttled(cfs_rq))
4037 break;
4038
17bc14b7 4039 update_cfs_shares(cfs_rq);
9ee474f5 4040 update_entity_load_avg(se, 1);
2069dd75
PZ
4041 }
4042
18bf2805 4043 if (!se) {
72465447 4044 sub_nr_running(rq, 1);
18bf2805
BS
4045 update_rq_runnable_avg(rq, 1);
4046 }
a4c2f00f 4047 hrtick_update(rq);
bf0f6f24
IM
4048}
4049
e7693a36 4050#ifdef CONFIG_SMP
029632fb
PZ
4051/* Used instead of source_load when we know the type == 0 */
4052static unsigned long weighted_cpuload(const int cpu)
4053{
b92486cb 4054 return cpu_rq(cpu)->cfs.runnable_load_avg;
029632fb
PZ
4055}
4056
4057/*
4058 * Return a low guess at the load of a migration-source cpu weighted
4059 * according to the scheduling class and "nice" value.
4060 *
4061 * We want to under-estimate the load of migration sources, to
4062 * balance conservatively.
4063 */
4064static unsigned long source_load(int cpu, int type)
4065{
4066 struct rq *rq = cpu_rq(cpu);
4067 unsigned long total = weighted_cpuload(cpu);
4068
4069 if (type == 0 || !sched_feat(LB_BIAS))
4070 return total;
4071
4072 return min(rq->cpu_load[type-1], total);
4073}
4074
4075/*
4076 * Return a high guess at the load of a migration-target cpu weighted
4077 * according to the scheduling class and "nice" value.
4078 */
4079static unsigned long target_load(int cpu, int type)
4080{
4081 struct rq *rq = cpu_rq(cpu);
4082 unsigned long total = weighted_cpuload(cpu);
4083
4084 if (type == 0 || !sched_feat(LB_BIAS))
4085 return total;
4086
4087 return max(rq->cpu_load[type-1], total);
4088}
4089
ced549fa 4090static unsigned long capacity_of(int cpu)
029632fb 4091{
ced549fa 4092 return cpu_rq(cpu)->cpu_capacity;
029632fb
PZ
4093}
4094
4095static unsigned long cpu_avg_load_per_task(int cpu)
4096{
4097 struct rq *rq = cpu_rq(cpu);
4098 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
b92486cb 4099 unsigned long load_avg = rq->cfs.runnable_load_avg;
029632fb
PZ
4100
4101 if (nr_running)
b92486cb 4102 return load_avg / nr_running;
029632fb
PZ
4103
4104 return 0;
4105}
4106
62470419
MW
4107static void record_wakee(struct task_struct *p)
4108{
4109 /*
4110 * Rough decay (wiping) for cost saving, don't worry
4111 * about the boundary, really active task won't care
4112 * about the loss.
4113 */
2538d960 4114 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
096aa338 4115 current->wakee_flips >>= 1;
62470419
MW
4116 current->wakee_flip_decay_ts = jiffies;
4117 }
4118
4119 if (current->last_wakee != p) {
4120 current->last_wakee = p;
4121 current->wakee_flips++;
4122 }
4123}
098fb9db 4124
74f8e4b2 4125static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
4126{
4127 struct sched_entity *se = &p->se;
4128 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
4129 u64 min_vruntime;
4130
4131#ifndef CONFIG_64BIT
4132 u64 min_vruntime_copy;
88ec22d3 4133
3fe1698b
PZ
4134 do {
4135 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4136 smp_rmb();
4137 min_vruntime = cfs_rq->min_vruntime;
4138 } while (min_vruntime != min_vruntime_copy);
4139#else
4140 min_vruntime = cfs_rq->min_vruntime;
4141#endif
88ec22d3 4142
3fe1698b 4143 se->vruntime -= min_vruntime;
62470419 4144 record_wakee(p);
88ec22d3
PZ
4145}
4146
bb3469ac 4147#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
4148/*
4149 * effective_load() calculates the load change as seen from the root_task_group
4150 *
4151 * Adding load to a group doesn't make a group heavier, but can cause movement
4152 * of group shares between cpus. Assuming the shares were perfectly aligned one
4153 * can calculate the shift in shares.
cf5f0acf
PZ
4154 *
4155 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4156 * on this @cpu and results in a total addition (subtraction) of @wg to the
4157 * total group weight.
4158 *
4159 * Given a runqueue weight distribution (rw_i) we can compute a shares
4160 * distribution (s_i) using:
4161 *
4162 * s_i = rw_i / \Sum rw_j (1)
4163 *
4164 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4165 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4166 * shares distribution (s_i):
4167 *
4168 * rw_i = { 2, 4, 1, 0 }
4169 * s_i = { 2/7, 4/7, 1/7, 0 }
4170 *
4171 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4172 * task used to run on and the CPU the waker is running on), we need to
4173 * compute the effect of waking a task on either CPU and, in case of a sync
4174 * wakeup, compute the effect of the current task going to sleep.
4175 *
4176 * So for a change of @wl to the local @cpu with an overall group weight change
4177 * of @wl we can compute the new shares distribution (s'_i) using:
4178 *
4179 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4180 *
4181 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4182 * differences in waking a task to CPU 0. The additional task changes the
4183 * weight and shares distributions like:
4184 *
4185 * rw'_i = { 3, 4, 1, 0 }
4186 * s'_i = { 3/8, 4/8, 1/8, 0 }
4187 *
4188 * We can then compute the difference in effective weight by using:
4189 *
4190 * dw_i = S * (s'_i - s_i) (3)
4191 *
4192 * Where 'S' is the group weight as seen by its parent.
4193 *
4194 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4195 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4196 * 4/7) times the weight of the group.
f5bfb7d9 4197 */
2069dd75 4198static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 4199{
4be9daaa 4200 struct sched_entity *se = tg->se[cpu];
f1d239f7 4201
9722c2da 4202 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
4203 return wl;
4204
4be9daaa 4205 for_each_sched_entity(se) {
cf5f0acf 4206 long w, W;
4be9daaa 4207
977dda7c 4208 tg = se->my_q->tg;
bb3469ac 4209
cf5f0acf
PZ
4210 /*
4211 * W = @wg + \Sum rw_j
4212 */
4213 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 4214
cf5f0acf
PZ
4215 /*
4216 * w = rw_i + @wl
4217 */
4218 w = se->my_q->load.weight + wl;
940959e9 4219
cf5f0acf
PZ
4220 /*
4221 * wl = S * s'_i; see (2)
4222 */
4223 if (W > 0 && w < W)
4224 wl = (w * tg->shares) / W;
977dda7c
PT
4225 else
4226 wl = tg->shares;
940959e9 4227
cf5f0acf
PZ
4228 /*
4229 * Per the above, wl is the new se->load.weight value; since
4230 * those are clipped to [MIN_SHARES, ...) do so now. See
4231 * calc_cfs_shares().
4232 */
977dda7c
PT
4233 if (wl < MIN_SHARES)
4234 wl = MIN_SHARES;
cf5f0acf
PZ
4235
4236 /*
4237 * wl = dw_i = S * (s'_i - s_i); see (3)
4238 */
977dda7c 4239 wl -= se->load.weight;
cf5f0acf
PZ
4240
4241 /*
4242 * Recursively apply this logic to all parent groups to compute
4243 * the final effective load change on the root group. Since
4244 * only the @tg group gets extra weight, all parent groups can
4245 * only redistribute existing shares. @wl is the shift in shares
4246 * resulting from this level per the above.
4247 */
4be9daaa 4248 wg = 0;
4be9daaa 4249 }
bb3469ac 4250
4be9daaa 4251 return wl;
bb3469ac
PZ
4252}
4253#else
4be9daaa 4254
58d081b5 4255static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4be9daaa 4256{
83378269 4257 return wl;
bb3469ac 4258}
4be9daaa 4259
bb3469ac
PZ
4260#endif
4261
62470419
MW
4262static int wake_wide(struct task_struct *p)
4263{
7d9ffa89 4264 int factor = this_cpu_read(sd_llc_size);
62470419
MW
4265
4266 /*
4267 * Yeah, it's the switching-frequency, could means many wakee or
4268 * rapidly switch, use factor here will just help to automatically
4269 * adjust the loose-degree, so bigger node will lead to more pull.
4270 */
4271 if (p->wakee_flips > factor) {
4272 /*
4273 * wakee is somewhat hot, it needs certain amount of cpu
4274 * resource, so if waker is far more hot, prefer to leave
4275 * it alone.
4276 */
4277 if (current->wakee_flips > (factor * p->wakee_flips))
4278 return 1;
4279 }
4280
4281 return 0;
4282}
4283
c88d5910 4284static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 4285{
e37b6a7b 4286 s64 this_load, load;
c88d5910 4287 int idx, this_cpu, prev_cpu;
098fb9db 4288 unsigned long tl_per_task;
c88d5910 4289 struct task_group *tg;
83378269 4290 unsigned long weight;
b3137bc8 4291 int balanced;
098fb9db 4292
62470419
MW
4293 /*
4294 * If we wake multiple tasks be careful to not bounce
4295 * ourselves around too much.
4296 */
4297 if (wake_wide(p))
4298 return 0;
4299
c88d5910
PZ
4300 idx = sd->wake_idx;
4301 this_cpu = smp_processor_id();
4302 prev_cpu = task_cpu(p);
4303 load = source_load(prev_cpu, idx);
4304 this_load = target_load(this_cpu, idx);
098fb9db 4305
b3137bc8
MG
4306 /*
4307 * If sync wakeup then subtract the (maximum possible)
4308 * effect of the currently running task from the load
4309 * of the current CPU:
4310 */
83378269
PZ
4311 if (sync) {
4312 tg = task_group(current);
4313 weight = current->se.load.weight;
4314
c88d5910 4315 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
4316 load += effective_load(tg, prev_cpu, 0, -weight);
4317 }
b3137bc8 4318
83378269
PZ
4319 tg = task_group(p);
4320 weight = p->se.load.weight;
b3137bc8 4321
71a29aa7
PZ
4322 /*
4323 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
4324 * due to the sync cause above having dropped this_load to 0, we'll
4325 * always have an imbalance, but there's really nothing you can do
4326 * about that, so that's good too.
71a29aa7
PZ
4327 *
4328 * Otherwise check if either cpus are near enough in load to allow this
4329 * task to be woken on this_cpu.
4330 */
e37b6a7b
PT
4331 if (this_load > 0) {
4332 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
4333
4334 this_eff_load = 100;
ced549fa 4335 this_eff_load *= capacity_of(prev_cpu);
e51fd5e2
PZ
4336 this_eff_load *= this_load +
4337 effective_load(tg, this_cpu, weight, weight);
4338
4339 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
ced549fa 4340 prev_eff_load *= capacity_of(this_cpu);
e51fd5e2
PZ
4341 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4342
4343 balanced = this_eff_load <= prev_eff_load;
4344 } else
4345 balanced = true;
b3137bc8 4346
098fb9db 4347 /*
4ae7d5ce
IM
4348 * If the currently running task will sleep within
4349 * a reasonable amount of time then attract this newly
4350 * woken task:
098fb9db 4351 */
2fb7635c
PZ
4352 if (sync && balanced)
4353 return 1;
098fb9db 4354
41acab88 4355 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
4356 tl_per_task = cpu_avg_load_per_task(this_cpu);
4357
c88d5910
PZ
4358 if (balanced ||
4359 (this_load <= load &&
4360 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
4361 /*
4362 * This domain has SD_WAKE_AFFINE and
4363 * p is cache cold in this domain, and
4364 * there is no bad imbalance.
4365 */
c88d5910 4366 schedstat_inc(sd, ttwu_move_affine);
41acab88 4367 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
4368
4369 return 1;
4370 }
4371 return 0;
4372}
4373
aaee1203
PZ
4374/*
4375 * find_idlest_group finds and returns the least busy CPU group within the
4376 * domain.
4377 */
4378static struct sched_group *
78e7ed53 4379find_idlest_group(struct sched_domain *sd, struct task_struct *p,
c44f2a02 4380 int this_cpu, int sd_flag)
e7693a36 4381{
b3bd3de6 4382 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 4383 unsigned long min_load = ULONG_MAX, this_load = 0;
c44f2a02 4384 int load_idx = sd->forkexec_idx;
aaee1203 4385 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 4386
c44f2a02
VG
4387 if (sd_flag & SD_BALANCE_WAKE)
4388 load_idx = sd->wake_idx;
4389
aaee1203
PZ
4390 do {
4391 unsigned long load, avg_load;
4392 int local_group;
4393 int i;
e7693a36 4394
aaee1203
PZ
4395 /* Skip over this group if it has no CPUs allowed */
4396 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 4397 tsk_cpus_allowed(p)))
aaee1203
PZ
4398 continue;
4399
4400 local_group = cpumask_test_cpu(this_cpu,
4401 sched_group_cpus(group));
4402
4403 /* Tally up the load of all CPUs in the group */
4404 avg_load = 0;
4405
4406 for_each_cpu(i, sched_group_cpus(group)) {
4407 /* Bias balancing toward cpus of our domain */
4408 if (local_group)
4409 load = source_load(i, load_idx);
4410 else
4411 load = target_load(i, load_idx);
4412
4413 avg_load += load;
4414 }
4415
63b2ca30 4416 /* Adjust by relative CPU capacity of the group */
ca8ce3d0 4417 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
aaee1203
PZ
4418
4419 if (local_group) {
4420 this_load = avg_load;
aaee1203
PZ
4421 } else if (avg_load < min_load) {
4422 min_load = avg_load;
4423 idlest = group;
4424 }
4425 } while (group = group->next, group != sd->groups);
4426
4427 if (!idlest || 100*this_load < imbalance*min_load)
4428 return NULL;
4429 return idlest;
4430}
4431
4432/*
4433 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4434 */
4435static int
4436find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4437{
4438 unsigned long load, min_load = ULONG_MAX;
4439 int idlest = -1;
4440 int i;
4441
4442 /* Traverse only the allowed CPUs */
fa17b507 4443 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
4444 load = weighted_cpuload(i);
4445
4446 if (load < min_load || (load == min_load && i == this_cpu)) {
4447 min_load = load;
4448 idlest = i;
e7693a36
GH
4449 }
4450 }
4451
aaee1203
PZ
4452 return idlest;
4453}
e7693a36 4454
a50bde51
PZ
4455/*
4456 * Try and locate an idle CPU in the sched_domain.
4457 */
99bd5e2f 4458static int select_idle_sibling(struct task_struct *p, int target)
a50bde51 4459{
99bd5e2f 4460 struct sched_domain *sd;
37407ea7 4461 struct sched_group *sg;
e0a79f52 4462 int i = task_cpu(p);
a50bde51 4463
e0a79f52
MG
4464 if (idle_cpu(target))
4465 return target;
99bd5e2f
SS
4466
4467 /*
e0a79f52 4468 * If the prevous cpu is cache affine and idle, don't be stupid.
99bd5e2f 4469 */
e0a79f52
MG
4470 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4471 return i;
a50bde51
PZ
4472
4473 /*
37407ea7 4474 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 4475 */
518cd623 4476 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 4477 for_each_lower_domain(sd) {
37407ea7
LT
4478 sg = sd->groups;
4479 do {
4480 if (!cpumask_intersects(sched_group_cpus(sg),
4481 tsk_cpus_allowed(p)))
4482 goto next;
4483
4484 for_each_cpu(i, sched_group_cpus(sg)) {
e0a79f52 4485 if (i == target || !idle_cpu(i))
37407ea7
LT
4486 goto next;
4487 }
970e1789 4488
37407ea7
LT
4489 target = cpumask_first_and(sched_group_cpus(sg),
4490 tsk_cpus_allowed(p));
4491 goto done;
4492next:
4493 sg = sg->next;
4494 } while (sg != sd->groups);
4495 }
4496done:
a50bde51
PZ
4497 return target;
4498}
4499
aaee1203 4500/*
de91b9cb
MR
4501 * select_task_rq_fair: Select target runqueue for the waking task in domains
4502 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4503 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
aaee1203 4504 *
de91b9cb
MR
4505 * Balances load by selecting the idlest cpu in the idlest group, or under
4506 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
aaee1203 4507 *
de91b9cb 4508 * Returns the target cpu number.
aaee1203
PZ
4509 *
4510 * preempt must be disabled.
4511 */
0017d735 4512static int
ac66f547 4513select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
aaee1203 4514{
29cd8bae 4515 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910 4516 int cpu = smp_processor_id();
c88d5910 4517 int new_cpu = cpu;
99bd5e2f 4518 int want_affine = 0;
5158f4e4 4519 int sync = wake_flags & WF_SYNC;
c88d5910 4520
29baa747 4521 if (p->nr_cpus_allowed == 1)
76854c7e
MG
4522 return prev_cpu;
4523
a8edd075
KT
4524 if (sd_flag & SD_BALANCE_WAKE)
4525 want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
aaee1203 4526
dce840a0 4527 rcu_read_lock();
aaee1203 4528 for_each_domain(cpu, tmp) {
e4f42888
PZ
4529 if (!(tmp->flags & SD_LOAD_BALANCE))
4530 continue;
4531
fe3bcfe1 4532 /*
99bd5e2f
SS
4533 * If both cpu and prev_cpu are part of this domain,
4534 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 4535 */
99bd5e2f
SS
4536 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4537 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4538 affine_sd = tmp;
29cd8bae 4539 break;
f03542a7 4540 }
29cd8bae 4541
f03542a7 4542 if (tmp->flags & sd_flag)
29cd8bae
PZ
4543 sd = tmp;
4544 }
4545
8bf21433
RR
4546 if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4547 prev_cpu = cpu;
dce840a0 4548
8bf21433 4549 if (sd_flag & SD_BALANCE_WAKE) {
dce840a0
PZ
4550 new_cpu = select_idle_sibling(p, prev_cpu);
4551 goto unlock;
8b911acd 4552 }
e7693a36 4553
aaee1203
PZ
4554 while (sd) {
4555 struct sched_group *group;
c88d5910 4556 int weight;
098fb9db 4557
0763a660 4558 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
4559 sd = sd->child;
4560 continue;
4561 }
098fb9db 4562
c44f2a02 4563 group = find_idlest_group(sd, p, cpu, sd_flag);
aaee1203
PZ
4564 if (!group) {
4565 sd = sd->child;
4566 continue;
4567 }
4ae7d5ce 4568
d7c33c49 4569 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
4570 if (new_cpu == -1 || new_cpu == cpu) {
4571 /* Now try balancing at a lower domain level of cpu */
4572 sd = sd->child;
4573 continue;
e7693a36 4574 }
aaee1203
PZ
4575
4576 /* Now try balancing at a lower domain level of new_cpu */
4577 cpu = new_cpu;
669c55e9 4578 weight = sd->span_weight;
aaee1203
PZ
4579 sd = NULL;
4580 for_each_domain(cpu, tmp) {
669c55e9 4581 if (weight <= tmp->span_weight)
aaee1203 4582 break;
0763a660 4583 if (tmp->flags & sd_flag)
aaee1203
PZ
4584 sd = tmp;
4585 }
4586 /* while loop will break here if sd == NULL */
e7693a36 4587 }
dce840a0
PZ
4588unlock:
4589 rcu_read_unlock();
e7693a36 4590
c88d5910 4591 return new_cpu;
e7693a36 4592}
0a74bef8
PT
4593
4594/*
4595 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4596 * cfs_rq_of(p) references at time of call are still valid and identify the
4597 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4598 * other assumptions, including the state of rq->lock, should be made.
4599 */
4600static void
4601migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4602{
aff3e498
PT
4603 struct sched_entity *se = &p->se;
4604 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4605
4606 /*
4607 * Load tracking: accumulate removed load so that it can be processed
4608 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4609 * to blocked load iff they have a positive decay-count. It can never
4610 * be negative here since on-rq tasks have decay-count == 0.
4611 */
4612 if (se->avg.decay_count) {
4613 se->avg.decay_count = -__synchronize_entity_decay(se);
2509940f
AS
4614 atomic_long_add(se->avg.load_avg_contrib,
4615 &cfs_rq->removed_load);
aff3e498 4616 }
3944a927
BS
4617
4618 /* We have migrated, no longer consider this task hot */
4619 se->exec_start = 0;
0a74bef8 4620}
e7693a36
GH
4621#endif /* CONFIG_SMP */
4622
e52fb7c0
PZ
4623static unsigned long
4624wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
4625{
4626 unsigned long gran = sysctl_sched_wakeup_granularity;
4627
4628 /*
e52fb7c0
PZ
4629 * Since its curr running now, convert the gran from real-time
4630 * to virtual-time in his units.
13814d42
MG
4631 *
4632 * By using 'se' instead of 'curr' we penalize light tasks, so
4633 * they get preempted easier. That is, if 'se' < 'curr' then
4634 * the resulting gran will be larger, therefore penalizing the
4635 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4636 * be smaller, again penalizing the lighter task.
4637 *
4638 * This is especially important for buddies when the leftmost
4639 * task is higher priority than the buddy.
0bbd3336 4640 */
f4ad9bd2 4641 return calc_delta_fair(gran, se);
0bbd3336
PZ
4642}
4643
464b7527
PZ
4644/*
4645 * Should 'se' preempt 'curr'.
4646 *
4647 * |s1
4648 * |s2
4649 * |s3
4650 * g
4651 * |<--->|c
4652 *
4653 * w(c, s1) = -1
4654 * w(c, s2) = 0
4655 * w(c, s3) = 1
4656 *
4657 */
4658static int
4659wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4660{
4661 s64 gran, vdiff = curr->vruntime - se->vruntime;
4662
4663 if (vdiff <= 0)
4664 return -1;
4665
e52fb7c0 4666 gran = wakeup_gran(curr, se);
464b7527
PZ
4667 if (vdiff > gran)
4668 return 1;
4669
4670 return 0;
4671}
4672
02479099
PZ
4673static void set_last_buddy(struct sched_entity *se)
4674{
69c80f3e
VP
4675 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4676 return;
4677
4678 for_each_sched_entity(se)
4679 cfs_rq_of(se)->last = se;
02479099
PZ
4680}
4681
4682static void set_next_buddy(struct sched_entity *se)
4683{
69c80f3e
VP
4684 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4685 return;
4686
4687 for_each_sched_entity(se)
4688 cfs_rq_of(se)->next = se;
02479099
PZ
4689}
4690
ac53db59
RR
4691static void set_skip_buddy(struct sched_entity *se)
4692{
69c80f3e
VP
4693 for_each_sched_entity(se)
4694 cfs_rq_of(se)->skip = se;
ac53db59
RR
4695}
4696
bf0f6f24
IM
4697/*
4698 * Preempt the current task with a newly woken task if needed:
4699 */
5a9b86f6 4700static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
4701{
4702 struct task_struct *curr = rq->curr;
8651a86c 4703 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 4704 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 4705 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 4706 int next_buddy_marked = 0;
bf0f6f24 4707
4ae7d5ce
IM
4708 if (unlikely(se == pse))
4709 return;
4710
5238cdd3 4711 /*
163122b7 4712 * This is possible from callers such as attach_tasks(), in which we
5238cdd3
PT
4713 * unconditionally check_prempt_curr() after an enqueue (which may have
4714 * lead to a throttle). This both saves work and prevents false
4715 * next-buddy nomination below.
4716 */
4717 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4718 return;
4719
2f36825b 4720 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 4721 set_next_buddy(pse);
2f36825b
VP
4722 next_buddy_marked = 1;
4723 }
57fdc26d 4724
aec0a514
BR
4725 /*
4726 * We can come here with TIF_NEED_RESCHED already set from new task
4727 * wake up path.
5238cdd3
PT
4728 *
4729 * Note: this also catches the edge-case of curr being in a throttled
4730 * group (e.g. via set_curr_task), since update_curr() (in the
4731 * enqueue of curr) will have resulted in resched being set. This
4732 * prevents us from potentially nominating it as a false LAST_BUDDY
4733 * below.
aec0a514
BR
4734 */
4735 if (test_tsk_need_resched(curr))
4736 return;
4737
a2f5c9ab
DH
4738 /* Idle tasks are by definition preempted by non-idle tasks. */
4739 if (unlikely(curr->policy == SCHED_IDLE) &&
4740 likely(p->policy != SCHED_IDLE))
4741 goto preempt;
4742
91c234b4 4743 /*
a2f5c9ab
DH
4744 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4745 * is driven by the tick):
91c234b4 4746 */
8ed92e51 4747 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 4748 return;
bf0f6f24 4749
464b7527 4750 find_matching_se(&se, &pse);
9bbd7374 4751 update_curr(cfs_rq_of(se));
002f128b 4752 BUG_ON(!pse);
2f36825b
VP
4753 if (wakeup_preempt_entity(se, pse) == 1) {
4754 /*
4755 * Bias pick_next to pick the sched entity that is
4756 * triggering this preemption.
4757 */
4758 if (!next_buddy_marked)
4759 set_next_buddy(pse);
3a7e73a2 4760 goto preempt;
2f36825b 4761 }
464b7527 4762
3a7e73a2 4763 return;
a65ac745 4764
3a7e73a2 4765preempt:
8875125e 4766 resched_curr(rq);
3a7e73a2
PZ
4767 /*
4768 * Only set the backward buddy when the current task is still
4769 * on the rq. This can happen when a wakeup gets interleaved
4770 * with schedule on the ->pre_schedule() or idle_balance()
4771 * point, either of which can * drop the rq lock.
4772 *
4773 * Also, during early boot the idle thread is in the fair class,
4774 * for obvious reasons its a bad idea to schedule back to it.
4775 */
4776 if (unlikely(!se->on_rq || curr == rq->idle))
4777 return;
4778
4779 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4780 set_last_buddy(se);
bf0f6f24
IM
4781}
4782
606dba2e
PZ
4783static struct task_struct *
4784pick_next_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
4785{
4786 struct cfs_rq *cfs_rq = &rq->cfs;
4787 struct sched_entity *se;
678d5718 4788 struct task_struct *p;
37e117c0 4789 int new_tasks;
678d5718 4790
6e83125c 4791again:
678d5718
PZ
4792#ifdef CONFIG_FAIR_GROUP_SCHED
4793 if (!cfs_rq->nr_running)
38033c37 4794 goto idle;
678d5718 4795
3f1d2a31 4796 if (prev->sched_class != &fair_sched_class)
678d5718
PZ
4797 goto simple;
4798
4799 /*
4800 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
4801 * likely that a next task is from the same cgroup as the current.
4802 *
4803 * Therefore attempt to avoid putting and setting the entire cgroup
4804 * hierarchy, only change the part that actually changes.
4805 */
4806
4807 do {
4808 struct sched_entity *curr = cfs_rq->curr;
4809
4810 /*
4811 * Since we got here without doing put_prev_entity() we also
4812 * have to consider cfs_rq->curr. If it is still a runnable
4813 * entity, update_curr() will update its vruntime, otherwise
4814 * forget we've ever seen it.
4815 */
4816 if (curr && curr->on_rq)
4817 update_curr(cfs_rq);
4818 else
4819 curr = NULL;
4820
4821 /*
4822 * This call to check_cfs_rq_runtime() will do the throttle and
4823 * dequeue its entity in the parent(s). Therefore the 'simple'
4824 * nr_running test will indeed be correct.
4825 */
4826 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
4827 goto simple;
4828
4829 se = pick_next_entity(cfs_rq, curr);
4830 cfs_rq = group_cfs_rq(se);
4831 } while (cfs_rq);
4832
4833 p = task_of(se);
4834
4835 /*
4836 * Since we haven't yet done put_prev_entity and if the selected task
4837 * is a different task than we started out with, try and touch the
4838 * least amount of cfs_rqs.
4839 */
4840 if (prev != p) {
4841 struct sched_entity *pse = &prev->se;
4842
4843 while (!(cfs_rq = is_same_group(se, pse))) {
4844 int se_depth = se->depth;
4845 int pse_depth = pse->depth;
4846
4847 if (se_depth <= pse_depth) {
4848 put_prev_entity(cfs_rq_of(pse), pse);
4849 pse = parent_entity(pse);
4850 }
4851 if (se_depth >= pse_depth) {
4852 set_next_entity(cfs_rq_of(se), se);
4853 se = parent_entity(se);
4854 }
4855 }
4856
4857 put_prev_entity(cfs_rq, pse);
4858 set_next_entity(cfs_rq, se);
4859 }
4860
4861 if (hrtick_enabled(rq))
4862 hrtick_start_fair(rq, p);
4863
4864 return p;
4865simple:
4866 cfs_rq = &rq->cfs;
4867#endif
bf0f6f24 4868
36ace27e 4869 if (!cfs_rq->nr_running)
38033c37 4870 goto idle;
bf0f6f24 4871
3f1d2a31 4872 put_prev_task(rq, prev);
606dba2e 4873
bf0f6f24 4874 do {
678d5718 4875 se = pick_next_entity(cfs_rq, NULL);
f4b6755f 4876 set_next_entity(cfs_rq, se);
bf0f6f24
IM
4877 cfs_rq = group_cfs_rq(se);
4878 } while (cfs_rq);
4879
8f4d37ec 4880 p = task_of(se);
678d5718 4881
b39e66ea
MG
4882 if (hrtick_enabled(rq))
4883 hrtick_start_fair(rq, p);
8f4d37ec
PZ
4884
4885 return p;
38033c37
PZ
4886
4887idle:
e4aa358b 4888 new_tasks = idle_balance(rq);
37e117c0
PZ
4889 /*
4890 * Because idle_balance() releases (and re-acquires) rq->lock, it is
4891 * possible for any higher priority task to appear. In that case we
4892 * must re-start the pick_next_entity() loop.
4893 */
e4aa358b 4894 if (new_tasks < 0)
37e117c0
PZ
4895 return RETRY_TASK;
4896
e4aa358b 4897 if (new_tasks > 0)
38033c37 4898 goto again;
38033c37
PZ
4899
4900 return NULL;
bf0f6f24
IM
4901}
4902
4903/*
4904 * Account for a descheduled task:
4905 */
31ee529c 4906static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
4907{
4908 struct sched_entity *se = &prev->se;
4909 struct cfs_rq *cfs_rq;
4910
4911 for_each_sched_entity(se) {
4912 cfs_rq = cfs_rq_of(se);
ab6cde26 4913 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
4914 }
4915}
4916
ac53db59
RR
4917/*
4918 * sched_yield() is very simple
4919 *
4920 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4921 */
4922static void yield_task_fair(struct rq *rq)
4923{
4924 struct task_struct *curr = rq->curr;
4925 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4926 struct sched_entity *se = &curr->se;
4927
4928 /*
4929 * Are we the only task in the tree?
4930 */
4931 if (unlikely(rq->nr_running == 1))
4932 return;
4933
4934 clear_buddies(cfs_rq, se);
4935
4936 if (curr->policy != SCHED_BATCH) {
4937 update_rq_clock(rq);
4938 /*
4939 * Update run-time statistics of the 'current'.
4940 */
4941 update_curr(cfs_rq);
916671c0
MG
4942 /*
4943 * Tell update_rq_clock() that we've just updated,
4944 * so we don't do microscopic update in schedule()
4945 * and double the fastpath cost.
4946 */
4947 rq->skip_clock_update = 1;
ac53db59
RR
4948 }
4949
4950 set_skip_buddy(se);
4951}
4952
d95f4122
MG
4953static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4954{
4955 struct sched_entity *se = &p->se;
4956
5238cdd3
PT
4957 /* throttled hierarchies are not runnable */
4958 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
4959 return false;
4960
4961 /* Tell the scheduler that we'd really like pse to run next. */
4962 set_next_buddy(se);
4963
d95f4122
MG
4964 yield_task_fair(rq);
4965
4966 return true;
4967}
4968
681f3e68 4969#ifdef CONFIG_SMP
bf0f6f24 4970/**************************************************
e9c84cb8
PZ
4971 * Fair scheduling class load-balancing methods.
4972 *
4973 * BASICS
4974 *
4975 * The purpose of load-balancing is to achieve the same basic fairness the
4976 * per-cpu scheduler provides, namely provide a proportional amount of compute
4977 * time to each task. This is expressed in the following equation:
4978 *
4979 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4980 *
4981 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4982 * W_i,0 is defined as:
4983 *
4984 * W_i,0 = \Sum_j w_i,j (2)
4985 *
4986 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4987 * is derived from the nice value as per prio_to_weight[].
4988 *
4989 * The weight average is an exponential decay average of the instantaneous
4990 * weight:
4991 *
4992 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4993 *
ced549fa 4994 * C_i is the compute capacity of cpu i, typically it is the
e9c84cb8
PZ
4995 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4996 * can also include other factors [XXX].
4997 *
4998 * To achieve this balance we define a measure of imbalance which follows
4999 * directly from (1):
5000 *
ced549fa 5001 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
e9c84cb8
PZ
5002 *
5003 * We them move tasks around to minimize the imbalance. In the continuous
5004 * function space it is obvious this converges, in the discrete case we get
5005 * a few fun cases generally called infeasible weight scenarios.
5006 *
5007 * [XXX expand on:
5008 * - infeasible weights;
5009 * - local vs global optima in the discrete case. ]
5010 *
5011 *
5012 * SCHED DOMAINS
5013 *
5014 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5015 * for all i,j solution, we create a tree of cpus that follows the hardware
5016 * topology where each level pairs two lower groups (or better). This results
5017 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5018 * tree to only the first of the previous level and we decrease the frequency
5019 * of load-balance at each level inv. proportional to the number of cpus in
5020 * the groups.
5021 *
5022 * This yields:
5023 *
5024 * log_2 n 1 n
5025 * \Sum { --- * --- * 2^i } = O(n) (5)
5026 * i = 0 2^i 2^i
5027 * `- size of each group
5028 * | | `- number of cpus doing load-balance
5029 * | `- freq
5030 * `- sum over all levels
5031 *
5032 * Coupled with a limit on how many tasks we can migrate every balance pass,
5033 * this makes (5) the runtime complexity of the balancer.
5034 *
5035 * An important property here is that each CPU is still (indirectly) connected
5036 * to every other cpu in at most O(log n) steps:
5037 *
5038 * The adjacency matrix of the resulting graph is given by:
5039 *
5040 * log_2 n
5041 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5042 * k = 0
5043 *
5044 * And you'll find that:
5045 *
5046 * A^(log_2 n)_i,j != 0 for all i,j (7)
5047 *
5048 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5049 * The task movement gives a factor of O(m), giving a convergence complexity
5050 * of:
5051 *
5052 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5053 *
5054 *
5055 * WORK CONSERVING
5056 *
5057 * In order to avoid CPUs going idle while there's still work to do, new idle
5058 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5059 * tree itself instead of relying on other CPUs to bring it work.
5060 *
5061 * This adds some complexity to both (5) and (8) but it reduces the total idle
5062 * time.
5063 *
5064 * [XXX more?]
5065 *
5066 *
5067 * CGROUPS
5068 *
5069 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5070 *
5071 * s_k,i
5072 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5073 * S_k
5074 *
5075 * Where
5076 *
5077 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5078 *
5079 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5080 *
5081 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5082 * property.
5083 *
5084 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5085 * rewrite all of this once again.]
5086 */
bf0f6f24 5087
ed387b78
HS
5088static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5089
0ec8aa00
PZ
5090enum fbq_type { regular, remote, all };
5091
ddcdf6e7 5092#define LBF_ALL_PINNED 0x01
367456c7 5093#define LBF_NEED_BREAK 0x02
6263322c
PZ
5094#define LBF_DST_PINNED 0x04
5095#define LBF_SOME_PINNED 0x08
ddcdf6e7
PZ
5096
5097struct lb_env {
5098 struct sched_domain *sd;
5099
ddcdf6e7 5100 struct rq *src_rq;
85c1e7da 5101 int src_cpu;
ddcdf6e7
PZ
5102
5103 int dst_cpu;
5104 struct rq *dst_rq;
5105
88b8dac0
SV
5106 struct cpumask *dst_grpmask;
5107 int new_dst_cpu;
ddcdf6e7 5108 enum cpu_idle_type idle;
bd939f45 5109 long imbalance;
b9403130
MW
5110 /* The set of CPUs under consideration for load-balancing */
5111 struct cpumask *cpus;
5112
ddcdf6e7 5113 unsigned int flags;
367456c7
PZ
5114
5115 unsigned int loop;
5116 unsigned int loop_break;
5117 unsigned int loop_max;
0ec8aa00
PZ
5118
5119 enum fbq_type fbq_type;
163122b7 5120 struct list_head tasks;
ddcdf6e7
PZ
5121};
5122
029632fb
PZ
5123/*
5124 * Is this task likely cache-hot:
5125 */
5d5e2b1b 5126static int task_hot(struct task_struct *p, struct lb_env *env)
029632fb
PZ
5127{
5128 s64 delta;
5129
e5673f28
KT
5130 lockdep_assert_held(&env->src_rq->lock);
5131
029632fb
PZ
5132 if (p->sched_class != &fair_sched_class)
5133 return 0;
5134
5135 if (unlikely(p->policy == SCHED_IDLE))
5136 return 0;
5137
5138 /*
5139 * Buddy candidates are cache hot:
5140 */
5d5e2b1b 5141 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
029632fb
PZ
5142 (&p->se == cfs_rq_of(&p->se)->next ||
5143 &p->se == cfs_rq_of(&p->se)->last))
5144 return 1;
5145
5146 if (sysctl_sched_migration_cost == -1)
5147 return 1;
5148 if (sysctl_sched_migration_cost == 0)
5149 return 0;
5150
5d5e2b1b 5151 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
029632fb
PZ
5152
5153 return delta < (s64)sysctl_sched_migration_cost;
5154}
5155
3a7053b3
MG
5156#ifdef CONFIG_NUMA_BALANCING
5157/* Returns true if the destination node has incurred more faults */
5158static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5159{
b1ad065e 5160 struct numa_group *numa_group = rcu_dereference(p->numa_group);
3a7053b3
MG
5161 int src_nid, dst_nid;
5162
ff1df896 5163 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
3a7053b3
MG
5164 !(env->sd->flags & SD_NUMA)) {
5165 return false;
5166 }
5167
5168 src_nid = cpu_to_node(env->src_cpu);
5169 dst_nid = cpu_to_node(env->dst_cpu);
5170
83e1d2cd 5171 if (src_nid == dst_nid)
3a7053b3
MG
5172 return false;
5173
b1ad065e
RR
5174 if (numa_group) {
5175 /* Task is already in the group's interleave set. */
5176 if (node_isset(src_nid, numa_group->active_nodes))
5177 return false;
83e1d2cd 5178
b1ad065e
RR
5179 /* Task is moving into the group's interleave set. */
5180 if (node_isset(dst_nid, numa_group->active_nodes))
5181 return true;
83e1d2cd 5182
b1ad065e
RR
5183 return group_faults(p, dst_nid) > group_faults(p, src_nid);
5184 }
5185
5186 /* Encourage migration to the preferred node. */
5187 if (dst_nid == p->numa_preferred_nid)
3a7053b3
MG
5188 return true;
5189
b1ad065e 5190 return task_faults(p, dst_nid) > task_faults(p, src_nid);
3a7053b3 5191}
7a0f3083
MG
5192
5193
5194static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5195{
b1ad065e 5196 struct numa_group *numa_group = rcu_dereference(p->numa_group);
7a0f3083
MG
5197 int src_nid, dst_nid;
5198
5199 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5200 return false;
5201
ff1df896 5202 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
7a0f3083
MG
5203 return false;
5204
5205 src_nid = cpu_to_node(env->src_cpu);
5206 dst_nid = cpu_to_node(env->dst_cpu);
5207
83e1d2cd 5208 if (src_nid == dst_nid)
7a0f3083
MG
5209 return false;
5210
b1ad065e
RR
5211 if (numa_group) {
5212 /* Task is moving within/into the group's interleave set. */
5213 if (node_isset(dst_nid, numa_group->active_nodes))
5214 return false;
5215
5216 /* Task is moving out of the group's interleave set. */
5217 if (node_isset(src_nid, numa_group->active_nodes))
5218 return true;
5219
5220 return group_faults(p, dst_nid) < group_faults(p, src_nid);
5221 }
5222
83e1d2cd
MG
5223 /* Migrating away from the preferred node is always bad. */
5224 if (src_nid == p->numa_preferred_nid)
5225 return true;
5226
b1ad065e 5227 return task_faults(p, dst_nid) < task_faults(p, src_nid);
7a0f3083
MG
5228}
5229
3a7053b3
MG
5230#else
5231static inline bool migrate_improves_locality(struct task_struct *p,
5232 struct lb_env *env)
5233{
5234 return false;
5235}
7a0f3083
MG
5236
5237static inline bool migrate_degrades_locality(struct task_struct *p,
5238 struct lb_env *env)
5239{
5240 return false;
5241}
3a7053b3
MG
5242#endif
5243
1e3c88bd
PZ
5244/*
5245 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5246 */
5247static
8e45cb54 5248int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
5249{
5250 int tsk_cache_hot = 0;
e5673f28
KT
5251
5252 lockdep_assert_held(&env->src_rq->lock);
5253
1e3c88bd
PZ
5254 /*
5255 * We do not migrate tasks that are:
d3198084 5256 * 1) throttled_lb_pair, or
1e3c88bd 5257 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
5258 * 3) running (obviously), or
5259 * 4) are cache-hot on their current CPU.
1e3c88bd 5260 */
d3198084
JK
5261 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5262 return 0;
5263
ddcdf6e7 5264 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 5265 int cpu;
88b8dac0 5266
41acab88 5267 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0 5268
6263322c
PZ
5269 env->flags |= LBF_SOME_PINNED;
5270
88b8dac0
SV
5271 /*
5272 * Remember if this task can be migrated to any other cpu in
5273 * our sched_group. We may want to revisit it if we couldn't
5274 * meet load balance goals by pulling other tasks on src_cpu.
5275 *
5276 * Also avoid computing new_dst_cpu if we have already computed
5277 * one in current iteration.
5278 */
6263322c 5279 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
5280 return 0;
5281
e02e60c1
JK
5282 /* Prevent to re-select dst_cpu via env's cpus */
5283 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5284 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6263322c 5285 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
5286 env->new_dst_cpu = cpu;
5287 break;
5288 }
88b8dac0 5289 }
e02e60c1 5290
1e3c88bd
PZ
5291 return 0;
5292 }
88b8dac0
SV
5293
5294 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 5295 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 5296
ddcdf6e7 5297 if (task_running(env->src_rq, p)) {
41acab88 5298 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
5299 return 0;
5300 }
5301
5302 /*
5303 * Aggressive migration if:
3a7053b3
MG
5304 * 1) destination numa is preferred
5305 * 2) task is cache cold, or
5306 * 3) too many balance attempts have failed.
1e3c88bd 5307 */
5d5e2b1b 5308 tsk_cache_hot = task_hot(p, env);
7a0f3083
MG
5309 if (!tsk_cache_hot)
5310 tsk_cache_hot = migrate_degrades_locality(p, env);
3a7053b3
MG
5311
5312 if (migrate_improves_locality(p, env)) {
5313#ifdef CONFIG_SCHEDSTATS
5314 if (tsk_cache_hot) {
5315 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5316 schedstat_inc(p, se.statistics.nr_forced_migrations);
5317 }
5318#endif
5319 return 1;
5320 }
5321
1e3c88bd 5322 if (!tsk_cache_hot ||
8e45cb54 5323 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4e2dcb73 5324
1e3c88bd 5325 if (tsk_cache_hot) {
8e45cb54 5326 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 5327 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd 5328 }
4e2dcb73 5329
1e3c88bd
PZ
5330 return 1;
5331 }
5332
4e2dcb73
ZH
5333 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5334 return 0;
1e3c88bd
PZ
5335}
5336
163122b7
KT
5337/*
5338 * detach_task() -- detach the task for the migration specified in env
5339 */
5340static void detach_task(struct task_struct *p, struct lb_env *env)
5341{
5342 lockdep_assert_held(&env->src_rq->lock);
5343
5344 deactivate_task(env->src_rq, p, 0);
5345 p->on_rq = TASK_ON_RQ_MIGRATING;
5346 set_task_cpu(p, env->dst_cpu);
5347}
5348
897c395f 5349/*
e5673f28 5350 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
897c395f 5351 * part of active balancing operations within "domain".
897c395f 5352 *
e5673f28 5353 * Returns a task if successful and NULL otherwise.
897c395f 5354 */
e5673f28 5355static struct task_struct *detach_one_task(struct lb_env *env)
897c395f
PZ
5356{
5357 struct task_struct *p, *n;
897c395f 5358
e5673f28
KT
5359 lockdep_assert_held(&env->src_rq->lock);
5360
367456c7 5361 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
5362 if (!can_migrate_task(p, env))
5363 continue;
897c395f 5364
163122b7 5365 detach_task(p, env);
e5673f28 5366
367456c7 5367 /*
e5673f28 5368 * Right now, this is only the second place where
163122b7 5369 * lb_gained[env->idle] is updated (other is detach_tasks)
e5673f28 5370 * so we can safely collect stats here rather than
163122b7 5371 * inside detach_tasks().
367456c7
PZ
5372 */
5373 schedstat_inc(env->sd, lb_gained[env->idle]);
e5673f28 5374 return p;
897c395f 5375 }
e5673f28
KT
5376 return NULL;
5377}
5378
eb95308e
PZ
5379static const unsigned int sched_nr_migrate_break = 32;
5380
5d6523eb 5381/*
163122b7
KT
5382 * detach_tasks() -- tries to detach up to imbalance weighted load from
5383 * busiest_rq, as part of a balancing operation within domain "sd".
5d6523eb 5384 *
163122b7 5385 * Returns number of detached tasks if successful and 0 otherwise.
5d6523eb 5386 */
163122b7 5387static int detach_tasks(struct lb_env *env)
1e3c88bd 5388{
5d6523eb
PZ
5389 struct list_head *tasks = &env->src_rq->cfs_tasks;
5390 struct task_struct *p;
367456c7 5391 unsigned long load;
163122b7
KT
5392 int detached = 0;
5393
5394 lockdep_assert_held(&env->src_rq->lock);
1e3c88bd 5395
bd939f45 5396 if (env->imbalance <= 0)
5d6523eb 5397 return 0;
1e3c88bd 5398
5d6523eb
PZ
5399 while (!list_empty(tasks)) {
5400 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 5401
367456c7
PZ
5402 env->loop++;
5403 /* We've more or less seen every task there is, call it quits */
5d6523eb 5404 if (env->loop > env->loop_max)
367456c7 5405 break;
5d6523eb
PZ
5406
5407 /* take a breather every nr_migrate tasks */
367456c7 5408 if (env->loop > env->loop_break) {
eb95308e 5409 env->loop_break += sched_nr_migrate_break;
8e45cb54 5410 env->flags |= LBF_NEED_BREAK;
ee00e66f 5411 break;
a195f004 5412 }
1e3c88bd 5413
d3198084 5414 if (!can_migrate_task(p, env))
367456c7
PZ
5415 goto next;
5416
5417 load = task_h_load(p);
5d6523eb 5418
eb95308e 5419 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
5420 goto next;
5421
bd939f45 5422 if ((load / 2) > env->imbalance)
367456c7 5423 goto next;
1e3c88bd 5424
163122b7
KT
5425 detach_task(p, env);
5426 list_add(&p->se.group_node, &env->tasks);
5427
5428 detached++;
bd939f45 5429 env->imbalance -= load;
1e3c88bd
PZ
5430
5431#ifdef CONFIG_PREEMPT
ee00e66f
PZ
5432 /*
5433 * NEWIDLE balancing is a source of latency, so preemptible
163122b7 5434 * kernels will stop after the first task is detached to minimize
ee00e66f
PZ
5435 * the critical section.
5436 */
5d6523eb 5437 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 5438 break;
1e3c88bd
PZ
5439#endif
5440
ee00e66f
PZ
5441 /*
5442 * We only want to steal up to the prescribed amount of
5443 * weighted load.
5444 */
bd939f45 5445 if (env->imbalance <= 0)
ee00e66f 5446 break;
367456c7
PZ
5447
5448 continue;
5449next:
5d6523eb 5450 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 5451 }
5d6523eb 5452
1e3c88bd 5453 /*
163122b7
KT
5454 * Right now, this is one of only two places we collect this stat
5455 * so we can safely collect detach_one_task() stats here rather
5456 * than inside detach_one_task().
1e3c88bd 5457 */
163122b7 5458 schedstat_add(env->sd, lb_gained[env->idle], detached);
1e3c88bd 5459
163122b7
KT
5460 return detached;
5461}
5462
5463/*
5464 * attach_task() -- attach the task detached by detach_task() to its new rq.
5465 */
5466static void attach_task(struct rq *rq, struct task_struct *p)
5467{
5468 lockdep_assert_held(&rq->lock);
5469
5470 BUG_ON(task_rq(p) != rq);
5471 p->on_rq = TASK_ON_RQ_QUEUED;
5472 activate_task(rq, p, 0);
5473 check_preempt_curr(rq, p, 0);
5474}
5475
5476/*
5477 * attach_one_task() -- attaches the task returned from detach_one_task() to
5478 * its new rq.
5479 */
5480static void attach_one_task(struct rq *rq, struct task_struct *p)
5481{
5482 raw_spin_lock(&rq->lock);
5483 attach_task(rq, p);
5484 raw_spin_unlock(&rq->lock);
5485}
5486
5487/*
5488 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
5489 * new rq.
5490 */
5491static void attach_tasks(struct lb_env *env)
5492{
5493 struct list_head *tasks = &env->tasks;
5494 struct task_struct *p;
5495
5496 raw_spin_lock(&env->dst_rq->lock);
5497
5498 while (!list_empty(tasks)) {
5499 p = list_first_entry(tasks, struct task_struct, se.group_node);
5500 list_del_init(&p->se.group_node);
5501
5502 attach_task(env->dst_rq, p);
5503 }
5504
5505 raw_spin_unlock(&env->dst_rq->lock);
1e3c88bd
PZ
5506}
5507
230059de 5508#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
5509/*
5510 * update tg->load_weight by folding this cpu's load_avg
5511 */
48a16753 5512static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
9e3081ca 5513{
48a16753
PT
5514 struct sched_entity *se = tg->se[cpu];
5515 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
9e3081ca 5516
48a16753
PT
5517 /* throttled entities do not contribute to load */
5518 if (throttled_hierarchy(cfs_rq))
5519 return;
9e3081ca 5520
aff3e498 5521 update_cfs_rq_blocked_load(cfs_rq, 1);
9e3081ca 5522
82958366
PT
5523 if (se) {
5524 update_entity_load_avg(se, 1);
5525 /*
5526 * We pivot on our runnable average having decayed to zero for
5527 * list removal. This generally implies that all our children
5528 * have also been removed (modulo rounding error or bandwidth
5529 * control); however, such cases are rare and we can fix these
5530 * at enqueue.
5531 *
5532 * TODO: fix up out-of-order children on enqueue.
5533 */
5534 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5535 list_del_leaf_cfs_rq(cfs_rq);
5536 } else {
48a16753 5537 struct rq *rq = rq_of(cfs_rq);
82958366
PT
5538 update_rq_runnable_avg(rq, rq->nr_running);
5539 }
9e3081ca
PZ
5540}
5541
48a16753 5542static void update_blocked_averages(int cpu)
9e3081ca 5543{
9e3081ca 5544 struct rq *rq = cpu_rq(cpu);
48a16753
PT
5545 struct cfs_rq *cfs_rq;
5546 unsigned long flags;
9e3081ca 5547
48a16753
PT
5548 raw_spin_lock_irqsave(&rq->lock, flags);
5549 update_rq_clock(rq);
9763b67f
PZ
5550 /*
5551 * Iterates the task_group tree in a bottom up fashion, see
5552 * list_add_leaf_cfs_rq() for details.
5553 */
64660c86 5554 for_each_leaf_cfs_rq(rq, cfs_rq) {
48a16753
PT
5555 /*
5556 * Note: We may want to consider periodically releasing
5557 * rq->lock about these updates so that creating many task
5558 * groups does not result in continually extending hold time.
5559 */
5560 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
64660c86 5561 }
48a16753
PT
5562
5563 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
5564}
5565
9763b67f 5566/*
68520796 5567 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
5568 * This needs to be done in a top-down fashion because the load of a child
5569 * group is a fraction of its parents load.
5570 */
68520796 5571static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 5572{
68520796
VD
5573 struct rq *rq = rq_of(cfs_rq);
5574 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 5575 unsigned long now = jiffies;
68520796 5576 unsigned long load;
a35b6466 5577
68520796 5578 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
5579 return;
5580
68520796
VD
5581 cfs_rq->h_load_next = NULL;
5582 for_each_sched_entity(se) {
5583 cfs_rq = cfs_rq_of(se);
5584 cfs_rq->h_load_next = se;
5585 if (cfs_rq->last_h_load_update == now)
5586 break;
5587 }
a35b6466 5588
68520796 5589 if (!se) {
7e3115ef 5590 cfs_rq->h_load = cfs_rq->runnable_load_avg;
68520796
VD
5591 cfs_rq->last_h_load_update = now;
5592 }
5593
5594 while ((se = cfs_rq->h_load_next) != NULL) {
5595 load = cfs_rq->h_load;
5596 load = div64_ul(load * se->avg.load_avg_contrib,
5597 cfs_rq->runnable_load_avg + 1);
5598 cfs_rq = group_cfs_rq(se);
5599 cfs_rq->h_load = load;
5600 cfs_rq->last_h_load_update = now;
5601 }
9763b67f
PZ
5602}
5603
367456c7 5604static unsigned long task_h_load(struct task_struct *p)
230059de 5605{
367456c7 5606 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 5607
68520796 5608 update_cfs_rq_h_load(cfs_rq);
a003a25b
AS
5609 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5610 cfs_rq->runnable_load_avg + 1);
230059de
PZ
5611}
5612#else
48a16753 5613static inline void update_blocked_averages(int cpu)
9e3081ca
PZ
5614{
5615}
5616
367456c7 5617static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 5618{
a003a25b 5619 return p->se.avg.load_avg_contrib;
1e3c88bd 5620}
230059de 5621#endif
1e3c88bd 5622
1e3c88bd 5623/********** Helpers for find_busiest_group ************************/
caeb178c
RR
5624
5625enum group_type {
5626 group_other = 0,
5627 group_imbalanced,
5628 group_overloaded,
5629};
5630
1e3c88bd
PZ
5631/*
5632 * sg_lb_stats - stats of a sched_group required for load_balancing
5633 */
5634struct sg_lb_stats {
5635 unsigned long avg_load; /*Avg load across the CPUs of the group */
5636 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 5637 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 5638 unsigned long load_per_task;
63b2ca30 5639 unsigned long group_capacity;
147c5fc2 5640 unsigned int sum_nr_running; /* Nr tasks running in the group */
0fedc6c8 5641 unsigned int group_capacity_factor;
147c5fc2
PZ
5642 unsigned int idle_cpus;
5643 unsigned int group_weight;
caeb178c 5644 enum group_type group_type;
1b6a7495 5645 int group_has_free_capacity;
0ec8aa00
PZ
5646#ifdef CONFIG_NUMA_BALANCING
5647 unsigned int nr_numa_running;
5648 unsigned int nr_preferred_running;
5649#endif
1e3c88bd
PZ
5650};
5651
56cf515b
JK
5652/*
5653 * sd_lb_stats - Structure to store the statistics of a sched_domain
5654 * during load balancing.
5655 */
5656struct sd_lb_stats {
5657 struct sched_group *busiest; /* Busiest group in this sd */
5658 struct sched_group *local; /* Local group in this sd */
5659 unsigned long total_load; /* Total load of all groups in sd */
63b2ca30 5660 unsigned long total_capacity; /* Total capacity of all groups in sd */
56cf515b
JK
5661 unsigned long avg_load; /* Average load across all groups in sd */
5662
56cf515b 5663 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 5664 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
5665};
5666
147c5fc2
PZ
5667static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5668{
5669 /*
5670 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5671 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5672 * We must however clear busiest_stat::avg_load because
5673 * update_sd_pick_busiest() reads this before assignment.
5674 */
5675 *sds = (struct sd_lb_stats){
5676 .busiest = NULL,
5677 .local = NULL,
5678 .total_load = 0UL,
63b2ca30 5679 .total_capacity = 0UL,
147c5fc2
PZ
5680 .busiest_stat = {
5681 .avg_load = 0UL,
caeb178c
RR
5682 .sum_nr_running = 0,
5683 .group_type = group_other,
147c5fc2
PZ
5684 },
5685 };
5686}
5687
1e3c88bd
PZ
5688/**
5689 * get_sd_load_idx - Obtain the load index for a given sched domain.
5690 * @sd: The sched_domain whose load_idx is to be obtained.
ed1b7732 5691 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
e69f6186
YB
5692 *
5693 * Return: The load index.
1e3c88bd
PZ
5694 */
5695static inline int get_sd_load_idx(struct sched_domain *sd,
5696 enum cpu_idle_type idle)
5697{
5698 int load_idx;
5699
5700 switch (idle) {
5701 case CPU_NOT_IDLE:
5702 load_idx = sd->busy_idx;
5703 break;
5704
5705 case CPU_NEWLY_IDLE:
5706 load_idx = sd->newidle_idx;
5707 break;
5708 default:
5709 load_idx = sd->idle_idx;
5710 break;
5711 }
5712
5713 return load_idx;
5714}
5715
ced549fa 5716static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 5717{
ca8ce3d0 5718 return SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
5719}
5720
ca8ce3d0 5721unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 5722{
ced549fa 5723 return default_scale_capacity(sd, cpu);
1e3c88bd
PZ
5724}
5725
ced549fa 5726static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 5727{
669c55e9 5728 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
5729 unsigned long smt_gain = sd->smt_gain;
5730
5731 smt_gain /= weight;
5732
5733 return smt_gain;
5734}
5735
ca8ce3d0 5736unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 5737{
ced549fa 5738 return default_scale_smt_capacity(sd, cpu);
1e3c88bd
PZ
5739}
5740
ced549fa 5741static unsigned long scale_rt_capacity(int cpu)
1e3c88bd
PZ
5742{
5743 struct rq *rq = cpu_rq(cpu);
b654f7de 5744 u64 total, available, age_stamp, avg;
cadefd3d 5745 s64 delta;
1e3c88bd 5746
b654f7de
PZ
5747 /*
5748 * Since we're reading these variables without serialization make sure
5749 * we read them once before doing sanity checks on them.
5750 */
5751 age_stamp = ACCESS_ONCE(rq->age_stamp);
5752 avg = ACCESS_ONCE(rq->rt_avg);
5753
cadefd3d
PZ
5754 delta = rq_clock(rq) - age_stamp;
5755 if (unlikely(delta < 0))
5756 delta = 0;
5757
5758 total = sched_avg_period() + delta;
aa483808 5759
b654f7de 5760 if (unlikely(total < avg)) {
ced549fa 5761 /* Ensures that capacity won't end up being negative */
aa483808
VP
5762 available = 0;
5763 } else {
b654f7de 5764 available = total - avg;
aa483808 5765 }
1e3c88bd 5766
ca8ce3d0
NP
5767 if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
5768 total = SCHED_CAPACITY_SCALE;
1e3c88bd 5769
ca8ce3d0 5770 total >>= SCHED_CAPACITY_SHIFT;
1e3c88bd
PZ
5771
5772 return div_u64(available, total);
5773}
5774
ced549fa 5775static void update_cpu_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 5776{
669c55e9 5777 unsigned long weight = sd->span_weight;
ca8ce3d0 5778 unsigned long capacity = SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
5779 struct sched_group *sdg = sd->groups;
5780
5d4dfddd
NP
5781 if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
5782 if (sched_feat(ARCH_CAPACITY))
ca8ce3d0 5783 capacity *= arch_scale_smt_capacity(sd, cpu);
1e3c88bd 5784 else
ced549fa 5785 capacity *= default_scale_smt_capacity(sd, cpu);
1e3c88bd 5786
ca8ce3d0 5787 capacity >>= SCHED_CAPACITY_SHIFT;
1e3c88bd
PZ
5788 }
5789
ced549fa 5790 sdg->sgc->capacity_orig = capacity;
9d5efe05 5791
5d4dfddd 5792 if (sched_feat(ARCH_CAPACITY))
ca8ce3d0 5793 capacity *= arch_scale_freq_capacity(sd, cpu);
9d5efe05 5794 else
ced549fa 5795 capacity *= default_scale_capacity(sd, cpu);
9d5efe05 5796
ca8ce3d0 5797 capacity >>= SCHED_CAPACITY_SHIFT;
9d5efe05 5798
ced549fa 5799 capacity *= scale_rt_capacity(cpu);
ca8ce3d0 5800 capacity >>= SCHED_CAPACITY_SHIFT;
1e3c88bd 5801
ced549fa
NP
5802 if (!capacity)
5803 capacity = 1;
1e3c88bd 5804
ced549fa
NP
5805 cpu_rq(cpu)->cpu_capacity = capacity;
5806 sdg->sgc->capacity = capacity;
1e3c88bd
PZ
5807}
5808
63b2ca30 5809void update_group_capacity(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
5810{
5811 struct sched_domain *child = sd->child;
5812 struct sched_group *group, *sdg = sd->groups;
63b2ca30 5813 unsigned long capacity, capacity_orig;
4ec4412e
VG
5814 unsigned long interval;
5815
5816 interval = msecs_to_jiffies(sd->balance_interval);
5817 interval = clamp(interval, 1UL, max_load_balance_interval);
63b2ca30 5818 sdg->sgc->next_update = jiffies + interval;
1e3c88bd
PZ
5819
5820 if (!child) {
ced549fa 5821 update_cpu_capacity(sd, cpu);
1e3c88bd
PZ
5822 return;
5823 }
5824
63b2ca30 5825 capacity_orig = capacity = 0;
1e3c88bd 5826
74a5ce20
PZ
5827 if (child->flags & SD_OVERLAP) {
5828 /*
5829 * SD_OVERLAP domains cannot assume that child groups
5830 * span the current group.
5831 */
5832
863bffc8 5833 for_each_cpu(cpu, sched_group_cpus(sdg)) {
63b2ca30 5834 struct sched_group_capacity *sgc;
9abf24d4 5835 struct rq *rq = cpu_rq(cpu);
863bffc8 5836
9abf24d4 5837 /*
63b2ca30 5838 * build_sched_domains() -> init_sched_groups_capacity()
9abf24d4
SD
5839 * gets here before we've attached the domains to the
5840 * runqueues.
5841 *
ced549fa
NP
5842 * Use capacity_of(), which is set irrespective of domains
5843 * in update_cpu_capacity().
9abf24d4 5844 *
63b2ca30 5845 * This avoids capacity/capacity_orig from being 0 and
9abf24d4
SD
5846 * causing divide-by-zero issues on boot.
5847 *
63b2ca30 5848 * Runtime updates will correct capacity_orig.
9abf24d4
SD
5849 */
5850 if (unlikely(!rq->sd)) {
ced549fa
NP
5851 capacity_orig += capacity_of(cpu);
5852 capacity += capacity_of(cpu);
9abf24d4
SD
5853 continue;
5854 }
863bffc8 5855
63b2ca30
NP
5856 sgc = rq->sd->groups->sgc;
5857 capacity_orig += sgc->capacity_orig;
5858 capacity += sgc->capacity;
863bffc8 5859 }
74a5ce20
PZ
5860 } else {
5861 /*
5862 * !SD_OVERLAP domains can assume that child groups
5863 * span the current group.
5864 */
5865
5866 group = child->groups;
5867 do {
63b2ca30
NP
5868 capacity_orig += group->sgc->capacity_orig;
5869 capacity += group->sgc->capacity;
74a5ce20
PZ
5870 group = group->next;
5871 } while (group != child->groups);
5872 }
1e3c88bd 5873
63b2ca30
NP
5874 sdg->sgc->capacity_orig = capacity_orig;
5875 sdg->sgc->capacity = capacity;
1e3c88bd
PZ
5876}
5877
9d5efe05
SV
5878/*
5879 * Try and fix up capacity for tiny siblings, this is needed when
5880 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5881 * which on its own isn't powerful enough.
5882 *
5883 * See update_sd_pick_busiest() and check_asym_packing().
5884 */
5885static inline int
5886fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5887{
5888 /*
ca8ce3d0 5889 * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
9d5efe05 5890 */
5d4dfddd 5891 if (!(sd->flags & SD_SHARE_CPUCAPACITY))
9d5efe05
SV
5892 return 0;
5893
5894 /*
63b2ca30 5895 * If ~90% of the cpu_capacity is still there, we're good.
9d5efe05 5896 */
63b2ca30 5897 if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
9d5efe05
SV
5898 return 1;
5899
5900 return 0;
5901}
5902
30ce5dab
PZ
5903/*
5904 * Group imbalance indicates (and tries to solve) the problem where balancing
5905 * groups is inadequate due to tsk_cpus_allowed() constraints.
5906 *
5907 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5908 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5909 * Something like:
5910 *
5911 * { 0 1 2 3 } { 4 5 6 7 }
5912 * * * * *
5913 *
5914 * If we were to balance group-wise we'd place two tasks in the first group and
5915 * two tasks in the second group. Clearly this is undesired as it will overload
5916 * cpu 3 and leave one of the cpus in the second group unused.
5917 *
5918 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
5919 * by noticing the lower domain failed to reach balance and had difficulty
5920 * moving tasks due to affinity constraints.
30ce5dab
PZ
5921 *
5922 * When this is so detected; this group becomes a candidate for busiest; see
ed1b7732 5923 * update_sd_pick_busiest(). And calculate_imbalance() and
6263322c 5924 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
5925 * to create an effective group imbalance.
5926 *
5927 * This is a somewhat tricky proposition since the next run might not find the
5928 * group imbalance and decide the groups need to be balanced again. A most
5929 * subtle and fragile situation.
5930 */
5931
6263322c 5932static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 5933{
63b2ca30 5934 return group->sgc->imbalance;
30ce5dab
PZ
5935}
5936
b37d9316 5937/*
0fedc6c8 5938 * Compute the group capacity factor.
b37d9316 5939 *
ced549fa 5940 * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
c61037e9 5941 * first dividing out the smt factor and computing the actual number of cores
63b2ca30 5942 * and limit unit capacity with that.
b37d9316 5943 */
0fedc6c8 5944static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
b37d9316 5945{
0fedc6c8 5946 unsigned int capacity_factor, smt, cpus;
63b2ca30 5947 unsigned int capacity, capacity_orig;
c61037e9 5948
63b2ca30
NP
5949 capacity = group->sgc->capacity;
5950 capacity_orig = group->sgc->capacity_orig;
c61037e9 5951 cpus = group->group_weight;
b37d9316 5952
63b2ca30 5953 /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
ca8ce3d0 5954 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
0fedc6c8 5955 capacity_factor = cpus / smt; /* cores */
b37d9316 5956
63b2ca30 5957 capacity_factor = min_t(unsigned,
ca8ce3d0 5958 capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
0fedc6c8
NP
5959 if (!capacity_factor)
5960 capacity_factor = fix_small_capacity(env->sd, group);
b37d9316 5961
0fedc6c8 5962 return capacity_factor;
b37d9316
PZ
5963}
5964
caeb178c
RR
5965static enum group_type
5966group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
5967{
5968 if (sgs->sum_nr_running > sgs->group_capacity_factor)
5969 return group_overloaded;
5970
5971 if (sg_imbalanced(group))
5972 return group_imbalanced;
5973
5974 return group_other;
5975}
5976
1e3c88bd
PZ
5977/**
5978 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 5979 * @env: The load balancing environment.
1e3c88bd 5980 * @group: sched_group whose statistics are to be updated.
1e3c88bd 5981 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 5982 * @local_group: Does group contain this_cpu.
1e3c88bd 5983 * @sgs: variable to hold the statistics for this group.
cd3bd4e6 5984 * @overload: Indicate more than one runnable task for any CPU.
1e3c88bd 5985 */
bd939f45
PZ
5986static inline void update_sg_lb_stats(struct lb_env *env,
5987 struct sched_group *group, int load_idx,
4486edd1
TC
5988 int local_group, struct sg_lb_stats *sgs,
5989 bool *overload)
1e3c88bd 5990{
30ce5dab 5991 unsigned long load;
bd939f45 5992 int i;
1e3c88bd 5993
b72ff13c
PZ
5994 memset(sgs, 0, sizeof(*sgs));
5995
b9403130 5996 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
5997 struct rq *rq = cpu_rq(i);
5998
1e3c88bd 5999 /* Bias balancing toward cpus of our domain */
6263322c 6000 if (local_group)
04f733b4 6001 load = target_load(i, load_idx);
6263322c 6002 else
1e3c88bd 6003 load = source_load(i, load_idx);
1e3c88bd
PZ
6004
6005 sgs->group_load += load;
380c9077 6006 sgs->sum_nr_running += rq->nr_running;
4486edd1
TC
6007
6008 if (rq->nr_running > 1)
6009 *overload = true;
6010
0ec8aa00
PZ
6011#ifdef CONFIG_NUMA_BALANCING
6012 sgs->nr_numa_running += rq->nr_numa_running;
6013 sgs->nr_preferred_running += rq->nr_preferred_running;
6014#endif
1e3c88bd 6015 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
6016 if (idle_cpu(i))
6017 sgs->idle_cpus++;
1e3c88bd
PZ
6018 }
6019
63b2ca30
NP
6020 /* Adjust by relative CPU capacity of the group */
6021 sgs->group_capacity = group->sgc->capacity;
ca8ce3d0 6022 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
1e3c88bd 6023
dd5feea1 6024 if (sgs->sum_nr_running)
38d0f770 6025 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 6026
aae6d3dd 6027 sgs->group_weight = group->group_weight;
0fedc6c8 6028 sgs->group_capacity_factor = sg_capacity_factor(env, group);
caeb178c 6029 sgs->group_type = group_classify(group, sgs);
b37d9316 6030
0fedc6c8 6031 if (sgs->group_capacity_factor > sgs->sum_nr_running)
1b6a7495 6032 sgs->group_has_free_capacity = 1;
1e3c88bd
PZ
6033}
6034
532cb4c4
MN
6035/**
6036 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 6037 * @env: The load balancing environment.
532cb4c4
MN
6038 * @sds: sched_domain statistics
6039 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 6040 * @sgs: sched_group statistics
532cb4c4
MN
6041 *
6042 * Determine if @sg is a busier group than the previously selected
6043 * busiest group.
e69f6186
YB
6044 *
6045 * Return: %true if @sg is a busier group than the previously selected
6046 * busiest group. %false otherwise.
532cb4c4 6047 */
bd939f45 6048static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
6049 struct sd_lb_stats *sds,
6050 struct sched_group *sg,
bd939f45 6051 struct sg_lb_stats *sgs)
532cb4c4 6052{
caeb178c 6053 struct sg_lb_stats *busiest = &sds->busiest_stat;
532cb4c4 6054
caeb178c 6055 if (sgs->group_type > busiest->group_type)
532cb4c4
MN
6056 return true;
6057
caeb178c
RR
6058 if (sgs->group_type < busiest->group_type)
6059 return false;
6060
6061 if (sgs->avg_load <= busiest->avg_load)
6062 return false;
6063
6064 /* This is the busiest node in its class. */
6065 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
6066 return true;
6067
6068 /*
6069 * ASYM_PACKING needs to move all the work to the lowest
6070 * numbered CPUs in the group, therefore mark all groups
6071 * higher than ourself as busy.
6072 */
caeb178c 6073 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
6074 if (!sds->busiest)
6075 return true;
6076
6077 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6078 return true;
6079 }
6080
6081 return false;
6082}
6083
0ec8aa00
PZ
6084#ifdef CONFIG_NUMA_BALANCING
6085static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6086{
6087 if (sgs->sum_nr_running > sgs->nr_numa_running)
6088 return regular;
6089 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6090 return remote;
6091 return all;
6092}
6093
6094static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6095{
6096 if (rq->nr_running > rq->nr_numa_running)
6097 return regular;
6098 if (rq->nr_running > rq->nr_preferred_running)
6099 return remote;
6100 return all;
6101}
6102#else
6103static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6104{
6105 return all;
6106}
6107
6108static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6109{
6110 return regular;
6111}
6112#endif /* CONFIG_NUMA_BALANCING */
6113
1e3c88bd 6114/**
461819ac 6115 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 6116 * @env: The load balancing environment.
1e3c88bd
PZ
6117 * @sds: variable to hold the statistics for this sched_domain.
6118 */
0ec8aa00 6119static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 6120{
bd939f45
PZ
6121 struct sched_domain *child = env->sd->child;
6122 struct sched_group *sg = env->sd->groups;
56cf515b 6123 struct sg_lb_stats tmp_sgs;
1e3c88bd 6124 int load_idx, prefer_sibling = 0;
4486edd1 6125 bool overload = false;
1e3c88bd
PZ
6126
6127 if (child && child->flags & SD_PREFER_SIBLING)
6128 prefer_sibling = 1;
6129
bd939f45 6130 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
6131
6132 do {
56cf515b 6133 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
6134 int local_group;
6135
bd939f45 6136 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
56cf515b
JK
6137 if (local_group) {
6138 sds->local = sg;
6139 sgs = &sds->local_stat;
b72ff13c
PZ
6140
6141 if (env->idle != CPU_NEWLY_IDLE ||
63b2ca30
NP
6142 time_after_eq(jiffies, sg->sgc->next_update))
6143 update_group_capacity(env->sd, env->dst_cpu);
56cf515b 6144 }
1e3c88bd 6145
4486edd1
TC
6146 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6147 &overload);
1e3c88bd 6148
b72ff13c
PZ
6149 if (local_group)
6150 goto next_group;
6151
1e3c88bd
PZ
6152 /*
6153 * In case the child domain prefers tasks go to siblings
0fedc6c8 6154 * first, lower the sg capacity factor to one so that we'll try
75dd321d
NR
6155 * and move all the excess tasks away. We lower the capacity
6156 * of a group only if the local group has the capacity to fit
0fedc6c8 6157 * these excess tasks, i.e. nr_running < group_capacity_factor. The
75dd321d
NR
6158 * extra check prevents the case where you always pull from the
6159 * heaviest group when it is already under-utilized (possible
6160 * with a large weight task outweighs the tasks on the system).
1e3c88bd 6161 */
b72ff13c 6162 if (prefer_sibling && sds->local &&
1b6a7495 6163 sds->local_stat.group_has_free_capacity)
0fedc6c8 6164 sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
1e3c88bd 6165
b72ff13c 6166 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 6167 sds->busiest = sg;
56cf515b 6168 sds->busiest_stat = *sgs;
1e3c88bd
PZ
6169 }
6170
b72ff13c
PZ
6171next_group:
6172 /* Now, start updating sd_lb_stats */
6173 sds->total_load += sgs->group_load;
63b2ca30 6174 sds->total_capacity += sgs->group_capacity;
b72ff13c 6175
532cb4c4 6176 sg = sg->next;
bd939f45 6177 } while (sg != env->sd->groups);
0ec8aa00
PZ
6178
6179 if (env->sd->flags & SD_NUMA)
6180 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
4486edd1
TC
6181
6182 if (!env->sd->parent) {
6183 /* update overload indicator if we are at root domain */
6184 if (env->dst_rq->rd->overload != overload)
6185 env->dst_rq->rd->overload = overload;
6186 }
6187
532cb4c4
MN
6188}
6189
532cb4c4
MN
6190/**
6191 * check_asym_packing - Check to see if the group is packed into the
6192 * sched doman.
6193 *
6194 * This is primarily intended to used at the sibling level. Some
6195 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6196 * case of POWER7, it can move to lower SMT modes only when higher
6197 * threads are idle. When in lower SMT modes, the threads will
6198 * perform better since they share less core resources. Hence when we
6199 * have idle threads, we want them to be the higher ones.
6200 *
6201 * This packing function is run on idle threads. It checks to see if
6202 * the busiest CPU in this domain (core in the P7 case) has a higher
6203 * CPU number than the packing function is being run on. Here we are
6204 * assuming lower CPU number will be equivalent to lower a SMT thread
6205 * number.
6206 *
e69f6186 6207 * Return: 1 when packing is required and a task should be moved to
b6b12294
MN
6208 * this CPU. The amount of the imbalance is returned in *imbalance.
6209 *
cd96891d 6210 * @env: The load balancing environment.
532cb4c4 6211 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 6212 */
bd939f45 6213static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
6214{
6215 int busiest_cpu;
6216
bd939f45 6217 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
6218 return 0;
6219
6220 if (!sds->busiest)
6221 return 0;
6222
6223 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 6224 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
6225 return 0;
6226
bd939f45 6227 env->imbalance = DIV_ROUND_CLOSEST(
63b2ca30 6228 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
ca8ce3d0 6229 SCHED_CAPACITY_SCALE);
bd939f45 6230
532cb4c4 6231 return 1;
1e3c88bd
PZ
6232}
6233
6234/**
6235 * fix_small_imbalance - Calculate the minor imbalance that exists
6236 * amongst the groups of a sched_domain, during
6237 * load balancing.
cd96891d 6238 * @env: The load balancing environment.
1e3c88bd 6239 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 6240 */
bd939f45
PZ
6241static inline
6242void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 6243{
63b2ca30 6244 unsigned long tmp, capa_now = 0, capa_move = 0;
1e3c88bd 6245 unsigned int imbn = 2;
dd5feea1 6246 unsigned long scaled_busy_load_per_task;
56cf515b 6247 struct sg_lb_stats *local, *busiest;
1e3c88bd 6248
56cf515b
JK
6249 local = &sds->local_stat;
6250 busiest = &sds->busiest_stat;
1e3c88bd 6251
56cf515b
JK
6252 if (!local->sum_nr_running)
6253 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6254 else if (busiest->load_per_task > local->load_per_task)
6255 imbn = 1;
dd5feea1 6256
56cf515b 6257 scaled_busy_load_per_task =
ca8ce3d0 6258 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 6259 busiest->group_capacity;
56cf515b 6260
3029ede3
VD
6261 if (busiest->avg_load + scaled_busy_load_per_task >=
6262 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 6263 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
6264 return;
6265 }
6266
6267 /*
6268 * OK, we don't have enough imbalance to justify moving tasks,
ced549fa 6269 * however we may be able to increase total CPU capacity used by
1e3c88bd
PZ
6270 * moving them.
6271 */
6272
63b2ca30 6273 capa_now += busiest->group_capacity *
56cf515b 6274 min(busiest->load_per_task, busiest->avg_load);
63b2ca30 6275 capa_now += local->group_capacity *
56cf515b 6276 min(local->load_per_task, local->avg_load);
ca8ce3d0 6277 capa_now /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
6278
6279 /* Amount of load we'd subtract */
a2cd4260 6280 if (busiest->avg_load > scaled_busy_load_per_task) {
63b2ca30 6281 capa_move += busiest->group_capacity *
56cf515b 6282 min(busiest->load_per_task,
a2cd4260 6283 busiest->avg_load - scaled_busy_load_per_task);
56cf515b 6284 }
1e3c88bd
PZ
6285
6286 /* Amount of load we'd add */
63b2ca30 6287 if (busiest->avg_load * busiest->group_capacity <
ca8ce3d0 6288 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
63b2ca30
NP
6289 tmp = (busiest->avg_load * busiest->group_capacity) /
6290 local->group_capacity;
56cf515b 6291 } else {
ca8ce3d0 6292 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 6293 local->group_capacity;
56cf515b 6294 }
63b2ca30 6295 capa_move += local->group_capacity *
3ae11c90 6296 min(local->load_per_task, local->avg_load + tmp);
ca8ce3d0 6297 capa_move /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
6298
6299 /* Move if we gain throughput */
63b2ca30 6300 if (capa_move > capa_now)
56cf515b 6301 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
6302}
6303
6304/**
6305 * calculate_imbalance - Calculate the amount of imbalance present within the
6306 * groups of a given sched_domain during load balance.
bd939f45 6307 * @env: load balance environment
1e3c88bd 6308 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 6309 */
bd939f45 6310static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 6311{
dd5feea1 6312 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
6313 struct sg_lb_stats *local, *busiest;
6314
6315 local = &sds->local_stat;
56cf515b 6316 busiest = &sds->busiest_stat;
dd5feea1 6317
caeb178c 6318 if (busiest->group_type == group_imbalanced) {
30ce5dab
PZ
6319 /*
6320 * In the group_imb case we cannot rely on group-wide averages
6321 * to ensure cpu-load equilibrium, look at wider averages. XXX
6322 */
56cf515b
JK
6323 busiest->load_per_task =
6324 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
6325 }
6326
1e3c88bd
PZ
6327 /*
6328 * In the presence of smp nice balancing, certain scenarios can have
6329 * max load less than avg load(as we skip the groups at or below
ced549fa 6330 * its cpu_capacity, while calculating max_load..)
1e3c88bd 6331 */
b1885550
VD
6332 if (busiest->avg_load <= sds->avg_load ||
6333 local->avg_load >= sds->avg_load) {
bd939f45
PZ
6334 env->imbalance = 0;
6335 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
6336 }
6337
9a5d9ba6
PZ
6338 /*
6339 * If there aren't any idle cpus, avoid creating some.
6340 */
6341 if (busiest->group_type == group_overloaded &&
6342 local->group_type == group_overloaded) {
56cf515b 6343 load_above_capacity =
0fedc6c8 6344 (busiest->sum_nr_running - busiest->group_capacity_factor);
dd5feea1 6345
ca8ce3d0 6346 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
63b2ca30 6347 load_above_capacity /= busiest->group_capacity;
dd5feea1
SS
6348 }
6349
6350 /*
6351 * We're trying to get all the cpus to the average_load, so we don't
6352 * want to push ourselves above the average load, nor do we wish to
6353 * reduce the max loaded cpu below the average load. At the same time,
6354 * we also don't want to reduce the group load below the group capacity
6355 * (so that we can implement power-savings policies etc). Thus we look
6356 * for the minimum possible imbalance.
dd5feea1 6357 */
30ce5dab 6358 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
6359
6360 /* How much load to actually move to equalise the imbalance */
56cf515b 6361 env->imbalance = min(
63b2ca30
NP
6362 max_pull * busiest->group_capacity,
6363 (sds->avg_load - local->avg_load) * local->group_capacity
ca8ce3d0 6364 ) / SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
6365
6366 /*
6367 * if *imbalance is less than the average load per runnable task
25985edc 6368 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
6369 * a think about bumping its value to force at least one task to be
6370 * moved
6371 */
56cf515b 6372 if (env->imbalance < busiest->load_per_task)
bd939f45 6373 return fix_small_imbalance(env, sds);
1e3c88bd 6374}
fab47622 6375
1e3c88bd
PZ
6376/******* find_busiest_group() helpers end here *********************/
6377
6378/**
6379 * find_busiest_group - Returns the busiest group within the sched_domain
6380 * if there is an imbalance. If there isn't an imbalance, and
6381 * the user has opted for power-savings, it returns a group whose
6382 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6383 * such a group exists.
6384 *
6385 * Also calculates the amount of weighted load which should be moved
6386 * to restore balance.
6387 *
cd96891d 6388 * @env: The load balancing environment.
1e3c88bd 6389 *
e69f6186 6390 * Return: - The busiest group if imbalance exists.
1e3c88bd
PZ
6391 * - If no imbalance and user has opted for power-savings balance,
6392 * return the least loaded group whose CPUs can be
6393 * put to idle by rebalancing its tasks onto our group.
6394 */
56cf515b 6395static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 6396{
56cf515b 6397 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
6398 struct sd_lb_stats sds;
6399
147c5fc2 6400 init_sd_lb_stats(&sds);
1e3c88bd
PZ
6401
6402 /*
6403 * Compute the various statistics relavent for load balancing at
6404 * this level.
6405 */
23f0d209 6406 update_sd_lb_stats(env, &sds);
56cf515b
JK
6407 local = &sds.local_stat;
6408 busiest = &sds.busiest_stat;
1e3c88bd 6409
bd939f45
PZ
6410 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6411 check_asym_packing(env, &sds))
532cb4c4
MN
6412 return sds.busiest;
6413
cc57aa8f 6414 /* There is no busy sibling group to pull tasks from */
56cf515b 6415 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
6416 goto out_balanced;
6417
ca8ce3d0
NP
6418 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6419 / sds.total_capacity;
b0432d8f 6420
866ab43e
PZ
6421 /*
6422 * If the busiest group is imbalanced the below checks don't
30ce5dab 6423 * work because they assume all things are equal, which typically
866ab43e
PZ
6424 * isn't true due to cpus_allowed constraints and the like.
6425 */
caeb178c 6426 if (busiest->group_type == group_imbalanced)
866ab43e
PZ
6427 goto force_balance;
6428
cc57aa8f 6429 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
1b6a7495
NP
6430 if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
6431 !busiest->group_has_free_capacity)
fab47622
NR
6432 goto force_balance;
6433
cc57aa8f
PZ
6434 /*
6435 * If the local group is more busy than the selected busiest group
6436 * don't try and pull any tasks.
6437 */
56cf515b 6438 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
6439 goto out_balanced;
6440
cc57aa8f
PZ
6441 /*
6442 * Don't pull any tasks if this group is already above the domain
6443 * average load.
6444 */
56cf515b 6445 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
6446 goto out_balanced;
6447
bd939f45 6448 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
6449 /*
6450 * This cpu is idle. If the busiest group load doesn't
6451 * have more tasks than the number of available cpu's and
6452 * there is no imbalance between this and busiest group
6453 * wrt to idle cpu's, it is balanced.
6454 */
56cf515b
JK
6455 if ((local->idle_cpus < busiest->idle_cpus) &&
6456 busiest->sum_nr_running <= busiest->group_weight)
aae6d3dd 6457 goto out_balanced;
c186fafe
PZ
6458 } else {
6459 /*
6460 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6461 * imbalance_pct to be conservative.
6462 */
56cf515b
JK
6463 if (100 * busiest->avg_load <=
6464 env->sd->imbalance_pct * local->avg_load)
c186fafe 6465 goto out_balanced;
aae6d3dd 6466 }
1e3c88bd 6467
fab47622 6468force_balance:
1e3c88bd 6469 /* Looks like there is an imbalance. Compute it */
bd939f45 6470 calculate_imbalance(env, &sds);
1e3c88bd
PZ
6471 return sds.busiest;
6472
6473out_balanced:
bd939f45 6474 env->imbalance = 0;
1e3c88bd
PZ
6475 return NULL;
6476}
6477
6478/*
6479 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6480 */
bd939f45 6481static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 6482 struct sched_group *group)
1e3c88bd
PZ
6483{
6484 struct rq *busiest = NULL, *rq;
ced549fa 6485 unsigned long busiest_load = 0, busiest_capacity = 1;
1e3c88bd
PZ
6486 int i;
6487
6906a408 6488 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
ced549fa 6489 unsigned long capacity, capacity_factor, wl;
0ec8aa00
PZ
6490 enum fbq_type rt;
6491
6492 rq = cpu_rq(i);
6493 rt = fbq_classify_rq(rq);
1e3c88bd 6494
0ec8aa00
PZ
6495 /*
6496 * We classify groups/runqueues into three groups:
6497 * - regular: there are !numa tasks
6498 * - remote: there are numa tasks that run on the 'wrong' node
6499 * - all: there is no distinction
6500 *
6501 * In order to avoid migrating ideally placed numa tasks,
6502 * ignore those when there's better options.
6503 *
6504 * If we ignore the actual busiest queue to migrate another
6505 * task, the next balance pass can still reduce the busiest
6506 * queue by moving tasks around inside the node.
6507 *
6508 * If we cannot move enough load due to this classification
6509 * the next pass will adjust the group classification and
6510 * allow migration of more tasks.
6511 *
6512 * Both cases only affect the total convergence complexity.
6513 */
6514 if (rt > env->fbq_type)
6515 continue;
6516
ced549fa 6517 capacity = capacity_of(i);
ca8ce3d0 6518 capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
0fedc6c8
NP
6519 if (!capacity_factor)
6520 capacity_factor = fix_small_capacity(env->sd, group);
9d5efe05 6521
6e40f5bb 6522 wl = weighted_cpuload(i);
1e3c88bd 6523
6e40f5bb
TG
6524 /*
6525 * When comparing with imbalance, use weighted_cpuload()
ced549fa 6526 * which is not scaled with the cpu capacity.
6e40f5bb 6527 */
0fedc6c8 6528 if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
6529 continue;
6530
6e40f5bb
TG
6531 /*
6532 * For the load comparisons with the other cpu's, consider
ced549fa
NP
6533 * the weighted_cpuload() scaled with the cpu capacity, so
6534 * that the load can be moved away from the cpu that is
6535 * potentially running at a lower capacity.
95a79b80 6536 *
ced549fa 6537 * Thus we're looking for max(wl_i / capacity_i), crosswise
95a79b80 6538 * multiplication to rid ourselves of the division works out
ced549fa
NP
6539 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6540 * our previous maximum.
6e40f5bb 6541 */
ced549fa 6542 if (wl * busiest_capacity > busiest_load * capacity) {
95a79b80 6543 busiest_load = wl;
ced549fa 6544 busiest_capacity = capacity;
1e3c88bd
PZ
6545 busiest = rq;
6546 }
6547 }
6548
6549 return busiest;
6550}
6551
6552/*
6553 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6554 * so long as it is large enough.
6555 */
6556#define MAX_PINNED_INTERVAL 512
6557
6558/* Working cpumask for load_balance and load_balance_newidle. */
e6252c3e 6559DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
1e3c88bd 6560
bd939f45 6561static int need_active_balance(struct lb_env *env)
1af3ed3d 6562{
bd939f45
PZ
6563 struct sched_domain *sd = env->sd;
6564
6565 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
6566
6567 /*
6568 * ASYM_PACKING needs to force migrate tasks from busy but
6569 * higher numbered CPUs in order to pack all tasks in the
6570 * lowest numbered CPUs.
6571 */
bd939f45 6572 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 6573 return 1;
1af3ed3d
PZ
6574 }
6575
6576 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6577}
6578
969c7921
TH
6579static int active_load_balance_cpu_stop(void *data);
6580
23f0d209
JK
6581static int should_we_balance(struct lb_env *env)
6582{
6583 struct sched_group *sg = env->sd->groups;
6584 struct cpumask *sg_cpus, *sg_mask;
6585 int cpu, balance_cpu = -1;
6586
6587 /*
6588 * In the newly idle case, we will allow all the cpu's
6589 * to do the newly idle load balance.
6590 */
6591 if (env->idle == CPU_NEWLY_IDLE)
6592 return 1;
6593
6594 sg_cpus = sched_group_cpus(sg);
6595 sg_mask = sched_group_mask(sg);
6596 /* Try to find first idle cpu */
6597 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6598 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6599 continue;
6600
6601 balance_cpu = cpu;
6602 break;
6603 }
6604
6605 if (balance_cpu == -1)
6606 balance_cpu = group_balance_cpu(sg);
6607
6608 /*
6609 * First idle cpu or the first cpu(busiest) in this sched group
6610 * is eligible for doing load balancing at this and above domains.
6611 */
b0cff9d8 6612 return balance_cpu == env->dst_cpu;
23f0d209
JK
6613}
6614
1e3c88bd
PZ
6615/*
6616 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6617 * tasks if there is an imbalance.
6618 */
6619static int load_balance(int this_cpu, struct rq *this_rq,
6620 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 6621 int *continue_balancing)
1e3c88bd 6622{
88b8dac0 6623 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 6624 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 6625 struct sched_group *group;
1e3c88bd
PZ
6626 struct rq *busiest;
6627 unsigned long flags;
e6252c3e 6628 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
1e3c88bd 6629
8e45cb54
PZ
6630 struct lb_env env = {
6631 .sd = sd,
ddcdf6e7
PZ
6632 .dst_cpu = this_cpu,
6633 .dst_rq = this_rq,
88b8dac0 6634 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 6635 .idle = idle,
eb95308e 6636 .loop_break = sched_nr_migrate_break,
b9403130 6637 .cpus = cpus,
0ec8aa00 6638 .fbq_type = all,
163122b7 6639 .tasks = LIST_HEAD_INIT(env.tasks),
8e45cb54
PZ
6640 };
6641
cfc03118
JK
6642 /*
6643 * For NEWLY_IDLE load_balancing, we don't need to consider
6644 * other cpus in our group
6645 */
e02e60c1 6646 if (idle == CPU_NEWLY_IDLE)
cfc03118 6647 env.dst_grpmask = NULL;
cfc03118 6648
1e3c88bd
PZ
6649 cpumask_copy(cpus, cpu_active_mask);
6650
1e3c88bd
PZ
6651 schedstat_inc(sd, lb_count[idle]);
6652
6653redo:
23f0d209
JK
6654 if (!should_we_balance(&env)) {
6655 *continue_balancing = 0;
1e3c88bd 6656 goto out_balanced;
23f0d209 6657 }
1e3c88bd 6658
23f0d209 6659 group = find_busiest_group(&env);
1e3c88bd
PZ
6660 if (!group) {
6661 schedstat_inc(sd, lb_nobusyg[idle]);
6662 goto out_balanced;
6663 }
6664
b9403130 6665 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
6666 if (!busiest) {
6667 schedstat_inc(sd, lb_nobusyq[idle]);
6668 goto out_balanced;
6669 }
6670
78feefc5 6671 BUG_ON(busiest == env.dst_rq);
1e3c88bd 6672
bd939f45 6673 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
6674
6675 ld_moved = 0;
6676 if (busiest->nr_running > 1) {
6677 /*
6678 * Attempt to move tasks. If find_busiest_group has found
6679 * an imbalance but busiest->nr_running <= 1, the group is
6680 * still unbalanced. ld_moved simply stays zero, so it is
6681 * correctly treated as an imbalance.
6682 */
8e45cb54 6683 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
6684 env.src_cpu = busiest->cpu;
6685 env.src_rq = busiest;
6686 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 6687
5d6523eb 6688more_balance:
163122b7 6689 raw_spin_lock_irqsave(&busiest->lock, flags);
88b8dac0
SV
6690
6691 /*
6692 * cur_ld_moved - load moved in current iteration
6693 * ld_moved - cumulative load moved across iterations
6694 */
163122b7
KT
6695 cur_ld_moved = detach_tasks(&env);
6696
6697 /*
6698 * We've detached some tasks from busiest_rq. Every
6699 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
6700 * unlock busiest->lock, and we are able to be sure
6701 * that nobody can manipulate the tasks in parallel.
6702 * See task_rq_lock() family for the details.
6703 */
6704
6705 raw_spin_unlock(&busiest->lock);
6706
6707 if (cur_ld_moved) {
6708 attach_tasks(&env);
6709 ld_moved += cur_ld_moved;
6710 }
6711
1e3c88bd
PZ
6712 local_irq_restore(flags);
6713
6714 /*
6715 * some other cpu did the load balance for us.
6716 */
88b8dac0
SV
6717 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6718 resched_cpu(env.dst_cpu);
6719
f1cd0858
JK
6720 if (env.flags & LBF_NEED_BREAK) {
6721 env.flags &= ~LBF_NEED_BREAK;
6722 goto more_balance;
6723 }
6724
88b8dac0
SV
6725 /*
6726 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6727 * us and move them to an alternate dst_cpu in our sched_group
6728 * where they can run. The upper limit on how many times we
6729 * iterate on same src_cpu is dependent on number of cpus in our
6730 * sched_group.
6731 *
6732 * This changes load balance semantics a bit on who can move
6733 * load to a given_cpu. In addition to the given_cpu itself
6734 * (or a ilb_cpu acting on its behalf where given_cpu is
6735 * nohz-idle), we now have balance_cpu in a position to move
6736 * load to given_cpu. In rare situations, this may cause
6737 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6738 * _independently_ and at _same_ time to move some load to
6739 * given_cpu) causing exceess load to be moved to given_cpu.
6740 * This however should not happen so much in practice and
6741 * moreover subsequent load balance cycles should correct the
6742 * excess load moved.
6743 */
6263322c 6744 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 6745
7aff2e3a
VD
6746 /* Prevent to re-select dst_cpu via env's cpus */
6747 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6748
78feefc5 6749 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 6750 env.dst_cpu = env.new_dst_cpu;
6263322c 6751 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
6752 env.loop = 0;
6753 env.loop_break = sched_nr_migrate_break;
e02e60c1 6754
88b8dac0
SV
6755 /*
6756 * Go back to "more_balance" rather than "redo" since we
6757 * need to continue with same src_cpu.
6758 */
6759 goto more_balance;
6760 }
1e3c88bd 6761
6263322c
PZ
6762 /*
6763 * We failed to reach balance because of affinity.
6764 */
6765 if (sd_parent) {
63b2ca30 6766 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6263322c
PZ
6767
6768 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6769 *group_imbalance = 1;
6770 } else if (*group_imbalance)
6771 *group_imbalance = 0;
6772 }
6773
1e3c88bd 6774 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 6775 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 6776 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
6777 if (!cpumask_empty(cpus)) {
6778 env.loop = 0;
6779 env.loop_break = sched_nr_migrate_break;
1e3c88bd 6780 goto redo;
bbf18b19 6781 }
1e3c88bd
PZ
6782 goto out_balanced;
6783 }
6784 }
6785
6786 if (!ld_moved) {
6787 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
6788 /*
6789 * Increment the failure counter only on periodic balance.
6790 * We do not want newidle balance, which can be very
6791 * frequent, pollute the failure counter causing
6792 * excessive cache_hot migrations and active balances.
6793 */
6794 if (idle != CPU_NEWLY_IDLE)
6795 sd->nr_balance_failed++;
1e3c88bd 6796
bd939f45 6797 if (need_active_balance(&env)) {
1e3c88bd
PZ
6798 raw_spin_lock_irqsave(&busiest->lock, flags);
6799
969c7921
TH
6800 /* don't kick the active_load_balance_cpu_stop,
6801 * if the curr task on busiest cpu can't be
6802 * moved to this_cpu
1e3c88bd
PZ
6803 */
6804 if (!cpumask_test_cpu(this_cpu,
fa17b507 6805 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
6806 raw_spin_unlock_irqrestore(&busiest->lock,
6807 flags);
8e45cb54 6808 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
6809 goto out_one_pinned;
6810 }
6811
969c7921
TH
6812 /*
6813 * ->active_balance synchronizes accesses to
6814 * ->active_balance_work. Once set, it's cleared
6815 * only after active load balance is finished.
6816 */
1e3c88bd
PZ
6817 if (!busiest->active_balance) {
6818 busiest->active_balance = 1;
6819 busiest->push_cpu = this_cpu;
6820 active_balance = 1;
6821 }
6822 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 6823
bd939f45 6824 if (active_balance) {
969c7921
TH
6825 stop_one_cpu_nowait(cpu_of(busiest),
6826 active_load_balance_cpu_stop, busiest,
6827 &busiest->active_balance_work);
bd939f45 6828 }
1e3c88bd
PZ
6829
6830 /*
6831 * We've kicked active balancing, reset the failure
6832 * counter.
6833 */
6834 sd->nr_balance_failed = sd->cache_nice_tries+1;
6835 }
6836 } else
6837 sd->nr_balance_failed = 0;
6838
6839 if (likely(!active_balance)) {
6840 /* We were unbalanced, so reset the balancing interval */
6841 sd->balance_interval = sd->min_interval;
6842 } else {
6843 /*
6844 * If we've begun active balancing, start to back off. This
6845 * case may not be covered by the all_pinned logic if there
6846 * is only 1 task on the busy runqueue (because we don't call
163122b7 6847 * detach_tasks).
1e3c88bd
PZ
6848 */
6849 if (sd->balance_interval < sd->max_interval)
6850 sd->balance_interval *= 2;
6851 }
6852
1e3c88bd
PZ
6853 goto out;
6854
6855out_balanced:
6856 schedstat_inc(sd, lb_balanced[idle]);
6857
6858 sd->nr_balance_failed = 0;
6859
6860out_one_pinned:
6861 /* tune up the balancing interval */
8e45cb54 6862 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 6863 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
6864 (sd->balance_interval < sd->max_interval))
6865 sd->balance_interval *= 2;
6866
46e49b38 6867 ld_moved = 0;
1e3c88bd 6868out:
1e3c88bd
PZ
6869 return ld_moved;
6870}
6871
52a08ef1
JL
6872static inline unsigned long
6873get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
6874{
6875 unsigned long interval = sd->balance_interval;
6876
6877 if (cpu_busy)
6878 interval *= sd->busy_factor;
6879
6880 /* scale ms to jiffies */
6881 interval = msecs_to_jiffies(interval);
6882 interval = clamp(interval, 1UL, max_load_balance_interval);
6883
6884 return interval;
6885}
6886
6887static inline void
6888update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
6889{
6890 unsigned long interval, next;
6891
6892 interval = get_sd_balance_interval(sd, cpu_busy);
6893 next = sd->last_balance + interval;
6894
6895 if (time_after(*next_balance, next))
6896 *next_balance = next;
6897}
6898
1e3c88bd
PZ
6899/*
6900 * idle_balance is called by schedule() if this_cpu is about to become
6901 * idle. Attempts to pull tasks from other CPUs.
6902 */
6e83125c 6903static int idle_balance(struct rq *this_rq)
1e3c88bd 6904{
52a08ef1
JL
6905 unsigned long next_balance = jiffies + HZ;
6906 int this_cpu = this_rq->cpu;
1e3c88bd
PZ
6907 struct sched_domain *sd;
6908 int pulled_task = 0;
9bd721c5 6909 u64 curr_cost = 0;
1e3c88bd 6910
6e83125c 6911 idle_enter_fair(this_rq);
0e5b5337 6912
6e83125c
PZ
6913 /*
6914 * We must set idle_stamp _before_ calling idle_balance(), such that we
6915 * measure the duration of idle_balance() as idle time.
6916 */
6917 this_rq->idle_stamp = rq_clock(this_rq);
6918
4486edd1
TC
6919 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
6920 !this_rq->rd->overload) {
52a08ef1
JL
6921 rcu_read_lock();
6922 sd = rcu_dereference_check_sched_domain(this_rq->sd);
6923 if (sd)
6924 update_next_balance(sd, 0, &next_balance);
6925 rcu_read_unlock();
6926
6e83125c 6927 goto out;
52a08ef1 6928 }
1e3c88bd 6929
f492e12e
PZ
6930 /*
6931 * Drop the rq->lock, but keep IRQ/preempt disabled.
6932 */
6933 raw_spin_unlock(&this_rq->lock);
6934
48a16753 6935 update_blocked_averages(this_cpu);
dce840a0 6936 rcu_read_lock();
1e3c88bd 6937 for_each_domain(this_cpu, sd) {
23f0d209 6938 int continue_balancing = 1;
9bd721c5 6939 u64 t0, domain_cost;
1e3c88bd
PZ
6940
6941 if (!(sd->flags & SD_LOAD_BALANCE))
6942 continue;
6943
52a08ef1
JL
6944 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
6945 update_next_balance(sd, 0, &next_balance);
9bd721c5 6946 break;
52a08ef1 6947 }
9bd721c5 6948
f492e12e 6949 if (sd->flags & SD_BALANCE_NEWIDLE) {
9bd721c5
JL
6950 t0 = sched_clock_cpu(this_cpu);
6951
f492e12e 6952 pulled_task = load_balance(this_cpu, this_rq,
23f0d209
JK
6953 sd, CPU_NEWLY_IDLE,
6954 &continue_balancing);
9bd721c5
JL
6955
6956 domain_cost = sched_clock_cpu(this_cpu) - t0;
6957 if (domain_cost > sd->max_newidle_lb_cost)
6958 sd->max_newidle_lb_cost = domain_cost;
6959
6960 curr_cost += domain_cost;
f492e12e 6961 }
1e3c88bd 6962
52a08ef1 6963 update_next_balance(sd, 0, &next_balance);
39a4d9ca
JL
6964
6965 /*
6966 * Stop searching for tasks to pull if there are
6967 * now runnable tasks on this rq.
6968 */
6969 if (pulled_task || this_rq->nr_running > 0)
1e3c88bd 6970 break;
1e3c88bd 6971 }
dce840a0 6972 rcu_read_unlock();
f492e12e
PZ
6973
6974 raw_spin_lock(&this_rq->lock);
6975
0e5b5337
JL
6976 if (curr_cost > this_rq->max_idle_balance_cost)
6977 this_rq->max_idle_balance_cost = curr_cost;
6978
e5fc6611 6979 /*
0e5b5337
JL
6980 * While browsing the domains, we released the rq lock, a task could
6981 * have been enqueued in the meantime. Since we're not going idle,
6982 * pretend we pulled a task.
e5fc6611 6983 */
0e5b5337 6984 if (this_rq->cfs.h_nr_running && !pulled_task)
6e83125c 6985 pulled_task = 1;
e5fc6611 6986
52a08ef1
JL
6987out:
6988 /* Move the next balance forward */
6989 if (time_after(this_rq->next_balance, next_balance))
1e3c88bd 6990 this_rq->next_balance = next_balance;
9bd721c5 6991
e4aa358b 6992 /* Is there a task of a high priority class? */
46383648 6993 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
e4aa358b
KT
6994 pulled_task = -1;
6995
6996 if (pulled_task) {
6997 idle_exit_fair(this_rq);
6e83125c 6998 this_rq->idle_stamp = 0;
e4aa358b 6999 }
6e83125c 7000
3c4017c1 7001 return pulled_task;
1e3c88bd
PZ
7002}
7003
7004/*
969c7921
TH
7005 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7006 * running tasks off the busiest CPU onto idle CPUs. It requires at
7007 * least 1 task to be running on each physical CPU where possible, and
7008 * avoids physical / logical imbalances.
1e3c88bd 7009 */
969c7921 7010static int active_load_balance_cpu_stop(void *data)
1e3c88bd 7011{
969c7921
TH
7012 struct rq *busiest_rq = data;
7013 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 7014 int target_cpu = busiest_rq->push_cpu;
969c7921 7015 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 7016 struct sched_domain *sd;
e5673f28 7017 struct task_struct *p = NULL;
969c7921
TH
7018
7019 raw_spin_lock_irq(&busiest_rq->lock);
7020
7021 /* make sure the requested cpu hasn't gone down in the meantime */
7022 if (unlikely(busiest_cpu != smp_processor_id() ||
7023 !busiest_rq->active_balance))
7024 goto out_unlock;
1e3c88bd
PZ
7025
7026 /* Is there any task to move? */
7027 if (busiest_rq->nr_running <= 1)
969c7921 7028 goto out_unlock;
1e3c88bd
PZ
7029
7030 /*
7031 * This condition is "impossible", if it occurs
7032 * we need to fix it. Originally reported by
7033 * Bjorn Helgaas on a 128-cpu setup.
7034 */
7035 BUG_ON(busiest_rq == target_rq);
7036
1e3c88bd 7037 /* Search for an sd spanning us and the target CPU. */
dce840a0 7038 rcu_read_lock();
1e3c88bd
PZ
7039 for_each_domain(target_cpu, sd) {
7040 if ((sd->flags & SD_LOAD_BALANCE) &&
7041 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7042 break;
7043 }
7044
7045 if (likely(sd)) {
8e45cb54
PZ
7046 struct lb_env env = {
7047 .sd = sd,
ddcdf6e7
PZ
7048 .dst_cpu = target_cpu,
7049 .dst_rq = target_rq,
7050 .src_cpu = busiest_rq->cpu,
7051 .src_rq = busiest_rq,
8e45cb54
PZ
7052 .idle = CPU_IDLE,
7053 };
7054
1e3c88bd
PZ
7055 schedstat_inc(sd, alb_count);
7056
e5673f28
KT
7057 p = detach_one_task(&env);
7058 if (p)
1e3c88bd
PZ
7059 schedstat_inc(sd, alb_pushed);
7060 else
7061 schedstat_inc(sd, alb_failed);
7062 }
dce840a0 7063 rcu_read_unlock();
969c7921
TH
7064out_unlock:
7065 busiest_rq->active_balance = 0;
e5673f28
KT
7066 raw_spin_unlock(&busiest_rq->lock);
7067
7068 if (p)
7069 attach_one_task(target_rq, p);
7070
7071 local_irq_enable();
7072
969c7921 7073 return 0;
1e3c88bd
PZ
7074}
7075
d987fc7f
MG
7076static inline int on_null_domain(struct rq *rq)
7077{
7078 return unlikely(!rcu_dereference_sched(rq->sd));
7079}
7080
3451d024 7081#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
7082/*
7083 * idle load balancing details
83cd4fe2
VP
7084 * - When one of the busy CPUs notice that there may be an idle rebalancing
7085 * needed, they will kick the idle load balancer, which then does idle
7086 * load balancing for all the idle CPUs.
7087 */
1e3c88bd 7088static struct {
83cd4fe2 7089 cpumask_var_t idle_cpus_mask;
0b005cf5 7090 atomic_t nr_cpus;
83cd4fe2
VP
7091 unsigned long next_balance; /* in jiffy units */
7092} nohz ____cacheline_aligned;
1e3c88bd 7093
3dd0337d 7094static inline int find_new_ilb(void)
1e3c88bd 7095{
0b005cf5 7096 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 7097
786d6dc7
SS
7098 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7099 return ilb;
7100
7101 return nr_cpu_ids;
1e3c88bd 7102}
1e3c88bd 7103
83cd4fe2
VP
7104/*
7105 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7106 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7107 * CPU (if there is one).
7108 */
0aeeeeba 7109static void nohz_balancer_kick(void)
83cd4fe2
VP
7110{
7111 int ilb_cpu;
7112
7113 nohz.next_balance++;
7114
3dd0337d 7115 ilb_cpu = find_new_ilb();
83cd4fe2 7116
0b005cf5
SS
7117 if (ilb_cpu >= nr_cpu_ids)
7118 return;
83cd4fe2 7119
cd490c5b 7120 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
7121 return;
7122 /*
7123 * Use smp_send_reschedule() instead of resched_cpu().
7124 * This way we generate a sched IPI on the target cpu which
7125 * is idle. And the softirq performing nohz idle load balance
7126 * will be run before returning from the IPI.
7127 */
7128 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
7129 return;
7130}
7131
c1cc017c 7132static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
7133{
7134 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
d987fc7f
MG
7135 /*
7136 * Completely isolated CPUs don't ever set, so we must test.
7137 */
7138 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7139 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7140 atomic_dec(&nohz.nr_cpus);
7141 }
71325960
SS
7142 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7143 }
7144}
7145
69e1e811
SS
7146static inline void set_cpu_sd_state_busy(void)
7147{
7148 struct sched_domain *sd;
37dc6b50 7149 int cpu = smp_processor_id();
69e1e811 7150
69e1e811 7151 rcu_read_lock();
37dc6b50 7152 sd = rcu_dereference(per_cpu(sd_busy, cpu));
25f55d9d
VG
7153
7154 if (!sd || !sd->nohz_idle)
7155 goto unlock;
7156 sd->nohz_idle = 0;
7157
63b2ca30 7158 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
25f55d9d 7159unlock:
69e1e811
SS
7160 rcu_read_unlock();
7161}
7162
7163void set_cpu_sd_state_idle(void)
7164{
7165 struct sched_domain *sd;
37dc6b50 7166 int cpu = smp_processor_id();
69e1e811 7167
69e1e811 7168 rcu_read_lock();
37dc6b50 7169 sd = rcu_dereference(per_cpu(sd_busy, cpu));
25f55d9d
VG
7170
7171 if (!sd || sd->nohz_idle)
7172 goto unlock;
7173 sd->nohz_idle = 1;
7174
63b2ca30 7175 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
25f55d9d 7176unlock:
69e1e811
SS
7177 rcu_read_unlock();
7178}
7179
1e3c88bd 7180/*
c1cc017c 7181 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 7182 * This info will be used in performing idle load balancing in the future.
1e3c88bd 7183 */
c1cc017c 7184void nohz_balance_enter_idle(int cpu)
1e3c88bd 7185{
71325960
SS
7186 /*
7187 * If this cpu is going down, then nothing needs to be done.
7188 */
7189 if (!cpu_active(cpu))
7190 return;
7191
c1cc017c
AS
7192 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7193 return;
1e3c88bd 7194
d987fc7f
MG
7195 /*
7196 * If we're a completely isolated CPU, we don't play.
7197 */
7198 if (on_null_domain(cpu_rq(cpu)))
7199 return;
7200
c1cc017c
AS
7201 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7202 atomic_inc(&nohz.nr_cpus);
7203 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 7204}
71325960 7205
0db0628d 7206static int sched_ilb_notifier(struct notifier_block *nfb,
71325960
SS
7207 unsigned long action, void *hcpu)
7208{
7209 switch (action & ~CPU_TASKS_FROZEN) {
7210 case CPU_DYING:
c1cc017c 7211 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
7212 return NOTIFY_OK;
7213 default:
7214 return NOTIFY_DONE;
7215 }
7216}
1e3c88bd
PZ
7217#endif
7218
7219static DEFINE_SPINLOCK(balancing);
7220
49c022e6
PZ
7221/*
7222 * Scale the max load_balance interval with the number of CPUs in the system.
7223 * This trades load-balance latency on larger machines for less cross talk.
7224 */
029632fb 7225void update_max_interval(void)
49c022e6
PZ
7226{
7227 max_load_balance_interval = HZ*num_online_cpus()/10;
7228}
7229
1e3c88bd
PZ
7230/*
7231 * It checks each scheduling domain to see if it is due to be balanced,
7232 * and initiates a balancing operation if so.
7233 *
b9b0853a 7234 * Balancing parameters are set up in init_sched_domains.
1e3c88bd 7235 */
f7ed0a89 7236static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
1e3c88bd 7237{
23f0d209 7238 int continue_balancing = 1;
f7ed0a89 7239 int cpu = rq->cpu;
1e3c88bd 7240 unsigned long interval;
04f733b4 7241 struct sched_domain *sd;
1e3c88bd
PZ
7242 /* Earliest time when we have to do rebalance again */
7243 unsigned long next_balance = jiffies + 60*HZ;
7244 int update_next_balance = 0;
f48627e6
JL
7245 int need_serialize, need_decay = 0;
7246 u64 max_cost = 0;
1e3c88bd 7247
48a16753 7248 update_blocked_averages(cpu);
2069dd75 7249
dce840a0 7250 rcu_read_lock();
1e3c88bd 7251 for_each_domain(cpu, sd) {
f48627e6
JL
7252 /*
7253 * Decay the newidle max times here because this is a regular
7254 * visit to all the domains. Decay ~1% per second.
7255 */
7256 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7257 sd->max_newidle_lb_cost =
7258 (sd->max_newidle_lb_cost * 253) / 256;
7259 sd->next_decay_max_lb_cost = jiffies + HZ;
7260 need_decay = 1;
7261 }
7262 max_cost += sd->max_newidle_lb_cost;
7263
1e3c88bd
PZ
7264 if (!(sd->flags & SD_LOAD_BALANCE))
7265 continue;
7266
f48627e6
JL
7267 /*
7268 * Stop the load balance at this level. There is another
7269 * CPU in our sched group which is doing load balancing more
7270 * actively.
7271 */
7272 if (!continue_balancing) {
7273 if (need_decay)
7274 continue;
7275 break;
7276 }
7277
52a08ef1 7278 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
1e3c88bd
PZ
7279
7280 need_serialize = sd->flags & SD_SERIALIZE;
1e3c88bd
PZ
7281 if (need_serialize) {
7282 if (!spin_trylock(&balancing))
7283 goto out;
7284 }
7285
7286 if (time_after_eq(jiffies, sd->last_balance + interval)) {
23f0d209 7287 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
1e3c88bd 7288 /*
6263322c 7289 * The LBF_DST_PINNED logic could have changed
de5eb2dd
JK
7290 * env->dst_cpu, so we can't know our idle
7291 * state even if we migrated tasks. Update it.
1e3c88bd 7292 */
de5eb2dd 7293 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
7294 }
7295 sd->last_balance = jiffies;
52a08ef1 7296 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
1e3c88bd
PZ
7297 }
7298 if (need_serialize)
7299 spin_unlock(&balancing);
7300out:
7301 if (time_after(next_balance, sd->last_balance + interval)) {
7302 next_balance = sd->last_balance + interval;
7303 update_next_balance = 1;
7304 }
f48627e6
JL
7305 }
7306 if (need_decay) {
1e3c88bd 7307 /*
f48627e6
JL
7308 * Ensure the rq-wide value also decays but keep it at a
7309 * reasonable floor to avoid funnies with rq->avg_idle.
1e3c88bd 7310 */
f48627e6
JL
7311 rq->max_idle_balance_cost =
7312 max((u64)sysctl_sched_migration_cost, max_cost);
1e3c88bd 7313 }
dce840a0 7314 rcu_read_unlock();
1e3c88bd
PZ
7315
7316 /*
7317 * next_balance will be updated only when there is a need.
7318 * When the cpu is attached to null domain for ex, it will not be
7319 * updated.
7320 */
7321 if (likely(update_next_balance))
7322 rq->next_balance = next_balance;
7323}
7324
3451d024 7325#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 7326/*
3451d024 7327 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
7328 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7329 */
208cb16b 7330static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
83cd4fe2 7331{
208cb16b 7332 int this_cpu = this_rq->cpu;
83cd4fe2
VP
7333 struct rq *rq;
7334 int balance_cpu;
7335
1c792db7
SS
7336 if (idle != CPU_IDLE ||
7337 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7338 goto end;
83cd4fe2
VP
7339
7340 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 7341 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
7342 continue;
7343
7344 /*
7345 * If this cpu gets work to do, stop the load balancing
7346 * work being done for other cpus. Next load
7347 * balancing owner will pick it up.
7348 */
1c792db7 7349 if (need_resched())
83cd4fe2 7350 break;
83cd4fe2 7351
5ed4f1d9
VG
7352 rq = cpu_rq(balance_cpu);
7353
ed61bbc6
TC
7354 /*
7355 * If time for next balance is due,
7356 * do the balance.
7357 */
7358 if (time_after_eq(jiffies, rq->next_balance)) {
7359 raw_spin_lock_irq(&rq->lock);
7360 update_rq_clock(rq);
7361 update_idle_cpu_load(rq);
7362 raw_spin_unlock_irq(&rq->lock);
7363 rebalance_domains(rq, CPU_IDLE);
7364 }
83cd4fe2 7365
83cd4fe2
VP
7366 if (time_after(this_rq->next_balance, rq->next_balance))
7367 this_rq->next_balance = rq->next_balance;
7368 }
7369 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
7370end:
7371 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
7372}
7373
7374/*
0b005cf5
SS
7375 * Current heuristic for kicking the idle load balancer in the presence
7376 * of an idle cpu is the system.
7377 * - This rq has more than one task.
7378 * - At any scheduler domain level, this cpu's scheduler group has multiple
63b2ca30 7379 * busy cpu's exceeding the group's capacity.
0b005cf5
SS
7380 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7381 * domain span are idle.
83cd4fe2 7382 */
4a725627 7383static inline int nohz_kick_needed(struct rq *rq)
83cd4fe2
VP
7384{
7385 unsigned long now = jiffies;
0b005cf5 7386 struct sched_domain *sd;
63b2ca30 7387 struct sched_group_capacity *sgc;
4a725627 7388 int nr_busy, cpu = rq->cpu;
83cd4fe2 7389
4a725627 7390 if (unlikely(rq->idle_balance))
83cd4fe2
VP
7391 return 0;
7392
1c792db7
SS
7393 /*
7394 * We may be recently in ticked or tickless idle mode. At the first
7395 * busy tick after returning from idle, we will update the busy stats.
7396 */
69e1e811 7397 set_cpu_sd_state_busy();
c1cc017c 7398 nohz_balance_exit_idle(cpu);
0b005cf5
SS
7399
7400 /*
7401 * None are in tickless mode and hence no need for NOHZ idle load
7402 * balancing.
7403 */
7404 if (likely(!atomic_read(&nohz.nr_cpus)))
7405 return 0;
1c792db7
SS
7406
7407 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
7408 return 0;
7409
0b005cf5
SS
7410 if (rq->nr_running >= 2)
7411 goto need_kick;
83cd4fe2 7412
067491b7 7413 rcu_read_lock();
37dc6b50 7414 sd = rcu_dereference(per_cpu(sd_busy, cpu));
83cd4fe2 7415
37dc6b50 7416 if (sd) {
63b2ca30
NP
7417 sgc = sd->groups->sgc;
7418 nr_busy = atomic_read(&sgc->nr_busy_cpus);
0b005cf5 7419
37dc6b50 7420 if (nr_busy > 1)
067491b7 7421 goto need_kick_unlock;
83cd4fe2 7422 }
37dc6b50
PM
7423
7424 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7425
7426 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7427 sched_domain_span(sd)) < cpu))
7428 goto need_kick_unlock;
7429
067491b7 7430 rcu_read_unlock();
83cd4fe2 7431 return 0;
067491b7
PZ
7432
7433need_kick_unlock:
7434 rcu_read_unlock();
0b005cf5
SS
7435need_kick:
7436 return 1;
83cd4fe2
VP
7437}
7438#else
208cb16b 7439static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
83cd4fe2
VP
7440#endif
7441
7442/*
7443 * run_rebalance_domains is triggered when needed from the scheduler tick.
7444 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7445 */
1e3c88bd
PZ
7446static void run_rebalance_domains(struct softirq_action *h)
7447{
208cb16b 7448 struct rq *this_rq = this_rq();
6eb57e0d 7449 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
7450 CPU_IDLE : CPU_NOT_IDLE;
7451
f7ed0a89 7452 rebalance_domains(this_rq, idle);
1e3c88bd 7453
1e3c88bd 7454 /*
83cd4fe2 7455 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
7456 * balancing on behalf of the other idle cpus whose ticks are
7457 * stopped.
7458 */
208cb16b 7459 nohz_idle_balance(this_rq, idle);
1e3c88bd
PZ
7460}
7461
1e3c88bd
PZ
7462/*
7463 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 7464 */
7caff66f 7465void trigger_load_balance(struct rq *rq)
1e3c88bd 7466{
1e3c88bd 7467 /* Don't need to rebalance while attached to NULL domain */
c726099e
DL
7468 if (unlikely(on_null_domain(rq)))
7469 return;
7470
7471 if (time_after_eq(jiffies, rq->next_balance))
1e3c88bd 7472 raise_softirq(SCHED_SOFTIRQ);
3451d024 7473#ifdef CONFIG_NO_HZ_COMMON
c726099e 7474 if (nohz_kick_needed(rq))
0aeeeeba 7475 nohz_balancer_kick();
83cd4fe2 7476#endif
1e3c88bd
PZ
7477}
7478
0bcdcf28
CE
7479static void rq_online_fair(struct rq *rq)
7480{
7481 update_sysctl();
0e59bdae
KT
7482
7483 update_runtime_enabled(rq);
0bcdcf28
CE
7484}
7485
7486static void rq_offline_fair(struct rq *rq)
7487{
7488 update_sysctl();
a4c96ae3
PB
7489
7490 /* Ensure any throttled groups are reachable by pick_next_task */
7491 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
7492}
7493
55e12e5e 7494#endif /* CONFIG_SMP */
e1d1484f 7495
bf0f6f24
IM
7496/*
7497 * scheduler tick hitting a task of our scheduling class:
7498 */
8f4d37ec 7499static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
7500{
7501 struct cfs_rq *cfs_rq;
7502 struct sched_entity *se = &curr->se;
7503
7504 for_each_sched_entity(se) {
7505 cfs_rq = cfs_rq_of(se);
8f4d37ec 7506 entity_tick(cfs_rq, se, queued);
bf0f6f24 7507 }
18bf2805 7508
10e84b97 7509 if (numabalancing_enabled)
cbee9f88 7510 task_tick_numa(rq, curr);
3d59eebc 7511
18bf2805 7512 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
7513}
7514
7515/*
cd29fe6f
PZ
7516 * called on fork with the child task as argument from the parent's context
7517 * - child not yet on the tasklist
7518 * - preemption disabled
bf0f6f24 7519 */
cd29fe6f 7520static void task_fork_fair(struct task_struct *p)
bf0f6f24 7521{
4fc420c9
DN
7522 struct cfs_rq *cfs_rq;
7523 struct sched_entity *se = &p->se, *curr;
00bf7bfc 7524 int this_cpu = smp_processor_id();
cd29fe6f
PZ
7525 struct rq *rq = this_rq();
7526 unsigned long flags;
7527
05fa785c 7528 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 7529
861d034e
PZ
7530 update_rq_clock(rq);
7531
4fc420c9
DN
7532 cfs_rq = task_cfs_rq(current);
7533 curr = cfs_rq->curr;
7534
6c9a27f5
DN
7535 /*
7536 * Not only the cpu but also the task_group of the parent might have
7537 * been changed after parent->se.parent,cfs_rq were copied to
7538 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7539 * of child point to valid ones.
7540 */
7541 rcu_read_lock();
7542 __set_task_cpu(p, this_cpu);
7543 rcu_read_unlock();
bf0f6f24 7544
7109c442 7545 update_curr(cfs_rq);
cd29fe6f 7546
b5d9d734
MG
7547 if (curr)
7548 se->vruntime = curr->vruntime;
aeb73b04 7549 place_entity(cfs_rq, se, 1);
4d78e7b6 7550
cd29fe6f 7551 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 7552 /*
edcb60a3
IM
7553 * Upon rescheduling, sched_class::put_prev_task() will place
7554 * 'current' within the tree based on its new key value.
7555 */
4d78e7b6 7556 swap(curr->vruntime, se->vruntime);
8875125e 7557 resched_curr(rq);
4d78e7b6 7558 }
bf0f6f24 7559
88ec22d3
PZ
7560 se->vruntime -= cfs_rq->min_vruntime;
7561
05fa785c 7562 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
7563}
7564
cb469845
SR
7565/*
7566 * Priority of the task has changed. Check to see if we preempt
7567 * the current task.
7568 */
da7a735e
PZ
7569static void
7570prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 7571{
da0c1e65 7572 if (!task_on_rq_queued(p))
da7a735e
PZ
7573 return;
7574
cb469845
SR
7575 /*
7576 * Reschedule if we are currently running on this runqueue and
7577 * our priority decreased, or if we are not currently running on
7578 * this runqueue and our priority is higher than the current's
7579 */
da7a735e 7580 if (rq->curr == p) {
cb469845 7581 if (p->prio > oldprio)
8875125e 7582 resched_curr(rq);
cb469845 7583 } else
15afe09b 7584 check_preempt_curr(rq, p, 0);
cb469845
SR
7585}
7586
da7a735e
PZ
7587static void switched_from_fair(struct rq *rq, struct task_struct *p)
7588{
7589 struct sched_entity *se = &p->se;
7590 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7591
7592 /*
791c9e02 7593 * Ensure the task's vruntime is normalized, so that when it's
da7a735e
PZ
7594 * switched back to the fair class the enqueue_entity(.flags=0) will
7595 * do the right thing.
7596 *
da0c1e65
KT
7597 * If it's queued, then the dequeue_entity(.flags=0) will already
7598 * have normalized the vruntime, if it's !queued, then only when
da7a735e
PZ
7599 * the task is sleeping will it still have non-normalized vruntime.
7600 */
da0c1e65 7601 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
da7a735e
PZ
7602 /*
7603 * Fix up our vruntime so that the current sleep doesn't
7604 * cause 'unlimited' sleep bonus.
7605 */
7606 place_entity(cfs_rq, se, 0);
7607 se->vruntime -= cfs_rq->min_vruntime;
7608 }
9ee474f5 7609
141965c7 7610#ifdef CONFIG_SMP
9ee474f5
PT
7611 /*
7612 * Remove our load from contribution when we leave sched_fair
7613 * and ensure we don't carry in an old decay_count if we
7614 * switch back.
7615 */
87e3c8ae
KT
7616 if (se->avg.decay_count) {
7617 __synchronize_entity_decay(se);
7618 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
9ee474f5
PT
7619 }
7620#endif
da7a735e
PZ
7621}
7622
cb469845
SR
7623/*
7624 * We switched to the sched_fair class.
7625 */
da7a735e 7626static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 7627{
eb7a59b2 7628#ifdef CONFIG_FAIR_GROUP_SCHED
f36c019c 7629 struct sched_entity *se = &p->se;
eb7a59b2
M
7630 /*
7631 * Since the real-depth could have been changed (only FAIR
7632 * class maintain depth value), reset depth properly.
7633 */
7634 se->depth = se->parent ? se->parent->depth + 1 : 0;
7635#endif
da0c1e65 7636 if (!task_on_rq_queued(p))
da7a735e
PZ
7637 return;
7638
cb469845
SR
7639 /*
7640 * We were most likely switched from sched_rt, so
7641 * kick off the schedule if running, otherwise just see
7642 * if we can still preempt the current task.
7643 */
da7a735e 7644 if (rq->curr == p)
8875125e 7645 resched_curr(rq);
cb469845 7646 else
15afe09b 7647 check_preempt_curr(rq, p, 0);
cb469845
SR
7648}
7649
83b699ed
SV
7650/* Account for a task changing its policy or group.
7651 *
7652 * This routine is mostly called to set cfs_rq->curr field when a task
7653 * migrates between groups/classes.
7654 */
7655static void set_curr_task_fair(struct rq *rq)
7656{
7657 struct sched_entity *se = &rq->curr->se;
7658
ec12cb7f
PT
7659 for_each_sched_entity(se) {
7660 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7661
7662 set_next_entity(cfs_rq, se);
7663 /* ensure bandwidth has been allocated on our new cfs_rq */
7664 account_cfs_rq_runtime(cfs_rq, 0);
7665 }
83b699ed
SV
7666}
7667
029632fb
PZ
7668void init_cfs_rq(struct cfs_rq *cfs_rq)
7669{
7670 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
7671 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7672#ifndef CONFIG_64BIT
7673 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7674#endif
141965c7 7675#ifdef CONFIG_SMP
9ee474f5 7676 atomic64_set(&cfs_rq->decay_counter, 1);
2509940f 7677 atomic_long_set(&cfs_rq->removed_load, 0);
9ee474f5 7678#endif
029632fb
PZ
7679}
7680
810b3817 7681#ifdef CONFIG_FAIR_GROUP_SCHED
da0c1e65 7682static void task_move_group_fair(struct task_struct *p, int queued)
810b3817 7683{
fed14d45 7684 struct sched_entity *se = &p->se;
aff3e498 7685 struct cfs_rq *cfs_rq;
fed14d45 7686
b2b5ce02
PZ
7687 /*
7688 * If the task was not on the rq at the time of this cgroup movement
7689 * it must have been asleep, sleeping tasks keep their ->vruntime
7690 * absolute on their old rq until wakeup (needed for the fair sleeper
7691 * bonus in place_entity()).
7692 *
7693 * If it was on the rq, we've just 'preempted' it, which does convert
7694 * ->vruntime to a relative base.
7695 *
7696 * Make sure both cases convert their relative position when migrating
7697 * to another cgroup's rq. This does somewhat interfere with the
7698 * fair sleeper stuff for the first placement, but who cares.
7699 */
7ceff013 7700 /*
da0c1e65 7701 * When !queued, vruntime of the task has usually NOT been normalized.
7ceff013
DN
7702 * But there are some cases where it has already been normalized:
7703 *
7704 * - Moving a forked child which is waiting for being woken up by
7705 * wake_up_new_task().
62af3783
DN
7706 * - Moving a task which has been woken up by try_to_wake_up() and
7707 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
7708 *
7709 * To prevent boost or penalty in the new cfs_rq caused by delta
7710 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7711 */
da0c1e65
KT
7712 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7713 queued = 1;
7ceff013 7714
da0c1e65 7715 if (!queued)
fed14d45 7716 se->vruntime -= cfs_rq_of(se)->min_vruntime;
b2b5ce02 7717 set_task_rq(p, task_cpu(p));
fed14d45 7718 se->depth = se->parent ? se->parent->depth + 1 : 0;
da0c1e65 7719 if (!queued) {
fed14d45
PZ
7720 cfs_rq = cfs_rq_of(se);
7721 se->vruntime += cfs_rq->min_vruntime;
aff3e498
PT
7722#ifdef CONFIG_SMP
7723 /*
7724 * migrate_task_rq_fair() will have removed our previous
7725 * contribution, but we must synchronize for ongoing future
7726 * decay.
7727 */
fed14d45
PZ
7728 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7729 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
7730#endif
7731 }
810b3817 7732}
029632fb
PZ
7733
7734void free_fair_sched_group(struct task_group *tg)
7735{
7736 int i;
7737
7738 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7739
7740 for_each_possible_cpu(i) {
7741 if (tg->cfs_rq)
7742 kfree(tg->cfs_rq[i]);
7743 if (tg->se)
7744 kfree(tg->se[i]);
7745 }
7746
7747 kfree(tg->cfs_rq);
7748 kfree(tg->se);
7749}
7750
7751int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7752{
7753 struct cfs_rq *cfs_rq;
7754 struct sched_entity *se;
7755 int i;
7756
7757 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7758 if (!tg->cfs_rq)
7759 goto err;
7760 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7761 if (!tg->se)
7762 goto err;
7763
7764 tg->shares = NICE_0_LOAD;
7765
7766 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7767
7768 for_each_possible_cpu(i) {
7769 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7770 GFP_KERNEL, cpu_to_node(i));
7771 if (!cfs_rq)
7772 goto err;
7773
7774 se = kzalloc_node(sizeof(struct sched_entity),
7775 GFP_KERNEL, cpu_to_node(i));
7776 if (!se)
7777 goto err_free_rq;
7778
7779 init_cfs_rq(cfs_rq);
7780 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7781 }
7782
7783 return 1;
7784
7785err_free_rq:
7786 kfree(cfs_rq);
7787err:
7788 return 0;
7789}
7790
7791void unregister_fair_sched_group(struct task_group *tg, int cpu)
7792{
7793 struct rq *rq = cpu_rq(cpu);
7794 unsigned long flags;
7795
7796 /*
7797 * Only empty task groups can be destroyed; so we can speculatively
7798 * check on_list without danger of it being re-added.
7799 */
7800 if (!tg->cfs_rq[cpu]->on_list)
7801 return;
7802
7803 raw_spin_lock_irqsave(&rq->lock, flags);
7804 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7805 raw_spin_unlock_irqrestore(&rq->lock, flags);
7806}
7807
7808void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7809 struct sched_entity *se, int cpu,
7810 struct sched_entity *parent)
7811{
7812 struct rq *rq = cpu_rq(cpu);
7813
7814 cfs_rq->tg = tg;
7815 cfs_rq->rq = rq;
029632fb
PZ
7816 init_cfs_rq_runtime(cfs_rq);
7817
7818 tg->cfs_rq[cpu] = cfs_rq;
7819 tg->se[cpu] = se;
7820
7821 /* se could be NULL for root_task_group */
7822 if (!se)
7823 return;
7824
fed14d45 7825 if (!parent) {
029632fb 7826 se->cfs_rq = &rq->cfs;
fed14d45
PZ
7827 se->depth = 0;
7828 } else {
029632fb 7829 se->cfs_rq = parent->my_q;
fed14d45
PZ
7830 se->depth = parent->depth + 1;
7831 }
029632fb
PZ
7832
7833 se->my_q = cfs_rq;
0ac9b1c2
PT
7834 /* guarantee group entities always have weight */
7835 update_load_set(&se->load, NICE_0_LOAD);
029632fb
PZ
7836 se->parent = parent;
7837}
7838
7839static DEFINE_MUTEX(shares_mutex);
7840
7841int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7842{
7843 int i;
7844 unsigned long flags;
7845
7846 /*
7847 * We can't change the weight of the root cgroup.
7848 */
7849 if (!tg->se[0])
7850 return -EINVAL;
7851
7852 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7853
7854 mutex_lock(&shares_mutex);
7855 if (tg->shares == shares)
7856 goto done;
7857
7858 tg->shares = shares;
7859 for_each_possible_cpu(i) {
7860 struct rq *rq = cpu_rq(i);
7861 struct sched_entity *se;
7862
7863 se = tg->se[i];
7864 /* Propagate contribution to hierarchy */
7865 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
7866
7867 /* Possible calls to update_curr() need rq clock */
7868 update_rq_clock(rq);
17bc14b7 7869 for_each_sched_entity(se)
029632fb
PZ
7870 update_cfs_shares(group_cfs_rq(se));
7871 raw_spin_unlock_irqrestore(&rq->lock, flags);
7872 }
7873
7874done:
7875 mutex_unlock(&shares_mutex);
7876 return 0;
7877}
7878#else /* CONFIG_FAIR_GROUP_SCHED */
7879
7880void free_fair_sched_group(struct task_group *tg) { }
7881
7882int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7883{
7884 return 1;
7885}
7886
7887void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7888
7889#endif /* CONFIG_FAIR_GROUP_SCHED */
7890
810b3817 7891
6d686f45 7892static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
7893{
7894 struct sched_entity *se = &task->se;
0d721cea
PW
7895 unsigned int rr_interval = 0;
7896
7897 /*
7898 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7899 * idle runqueue:
7900 */
0d721cea 7901 if (rq->cfs.load.weight)
a59f4e07 7902 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
7903
7904 return rr_interval;
7905}
7906
bf0f6f24
IM
7907/*
7908 * All the scheduling class methods:
7909 */
029632fb 7910const struct sched_class fair_sched_class = {
5522d5d5 7911 .next = &idle_sched_class,
bf0f6f24
IM
7912 .enqueue_task = enqueue_task_fair,
7913 .dequeue_task = dequeue_task_fair,
7914 .yield_task = yield_task_fair,
d95f4122 7915 .yield_to_task = yield_to_task_fair,
bf0f6f24 7916
2e09bf55 7917 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
7918
7919 .pick_next_task = pick_next_task_fair,
7920 .put_prev_task = put_prev_task_fair,
7921
681f3e68 7922#ifdef CONFIG_SMP
4ce72a2c 7923 .select_task_rq = select_task_rq_fair,
0a74bef8 7924 .migrate_task_rq = migrate_task_rq_fair,
141965c7 7925
0bcdcf28
CE
7926 .rq_online = rq_online_fair,
7927 .rq_offline = rq_offline_fair,
88ec22d3
PZ
7928
7929 .task_waking = task_waking_fair,
681f3e68 7930#endif
bf0f6f24 7931
83b699ed 7932 .set_curr_task = set_curr_task_fair,
bf0f6f24 7933 .task_tick = task_tick_fair,
cd29fe6f 7934 .task_fork = task_fork_fair,
cb469845
SR
7935
7936 .prio_changed = prio_changed_fair,
da7a735e 7937 .switched_from = switched_from_fair,
cb469845 7938 .switched_to = switched_to_fair,
810b3817 7939
0d721cea
PW
7940 .get_rr_interval = get_rr_interval_fair,
7941
810b3817 7942#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 7943 .task_move_group = task_move_group_fair,
810b3817 7944#endif
bf0f6f24
IM
7945};
7946
7947#ifdef CONFIG_SCHED_DEBUG
029632fb 7948void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 7949{
bf0f6f24
IM
7950 struct cfs_rq *cfs_rq;
7951
5973e5b9 7952 rcu_read_lock();
c3b64f1e 7953 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 7954 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 7955 rcu_read_unlock();
bf0f6f24
IM
7956}
7957#endif
029632fb
PZ
7958
7959__init void init_sched_fair_class(void)
7960{
7961#ifdef CONFIG_SMP
7962 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7963
3451d024 7964#ifdef CONFIG_NO_HZ_COMMON
554cecaf 7965 nohz.next_balance = jiffies;
029632fb 7966 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 7967 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
7968#endif
7969#endif /* SMP */
7970
7971}