]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - kernel/sched/fair.c
sched/numa: Set preferred_node based on best_cpu
[mirror_ubuntu-kernels.git] / kernel / sched / fair.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
bf0f6f24
IM
2/*
3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 *
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 *
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 *
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 *
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
19 *
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
90eec103 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
bf0f6f24 22 */
325ea10c 23#include "sched.h"
029632fb
PZ
24
25#include <trace/events/sched.h>
26
bf0f6f24 27/*
21805085 28 * Targeted preemption latency for CPU-bound tasks:
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
2b4d5b25
IM
37 *
38 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 39 */
2b4d5b25
IM
40unsigned int sysctl_sched_latency = 6000000ULL;
41unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 42
1983a922
CE
43/*
44 * The initial- and re-scaling of tunables is configurable
1983a922
CE
45 *
46 * Options are:
2b4d5b25
IM
47 *
48 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
49 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
50 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
51 *
52 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
1983a922 53 */
2b4d5b25 54enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
1983a922 55
2bd8e6d4 56/*
b2be5e96 57 * Minimal preemption granularity for CPU-bound tasks:
2b4d5b25 58 *
864616ee 59 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 60 */
2b4d5b25
IM
61unsigned int sysctl_sched_min_granularity = 750000ULL;
62unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
63
64/*
2b4d5b25 65 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
b2be5e96 66 */
0bf377bb 67static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
68
69/*
2bba22c5 70 * After fork, child runs first. If set to 0 (default) then
b2be5e96 71 * parent will (try to) run first.
21805085 72 */
2bba22c5 73unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 74
bf0f6f24
IM
75/*
76 * SCHED_OTHER wake-up granularity.
bf0f6f24
IM
77 *
78 * This option delays the preemption effects of decoupled workloads
79 * and reduces their over-scheduling. Synchronous workloads will still
80 * have immediate wakeup/sleep latencies.
2b4d5b25
IM
81 *
82 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 83 */
2b4d5b25
IM
84unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
85unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 86
2b4d5b25 87const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
da84d961 88
afe06efd
TC
89#ifdef CONFIG_SMP
90/*
97fb7a0a 91 * For asym packing, by default the lower numbered CPU has higher priority.
afe06efd
TC
92 */
93int __weak arch_asym_cpu_priority(int cpu)
94{
95 return -cpu;
96}
97#endif
98
ec12cb7f
PT
99#ifdef CONFIG_CFS_BANDWIDTH
100/*
101 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102 * each time a cfs_rq requests quota.
103 *
104 * Note: in the case that the slice exceeds the runtime remaining (either due
105 * to consumption or the quota being specified to be smaller than the slice)
106 * we will always only issue the remaining available time.
107 *
2b4d5b25
IM
108 * (default: 5 msec, units: microseconds)
109 */
110unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
ec12cb7f
PT
111#endif
112
3273163c
MR
113/*
114 * The margin used when comparing utilization with CPU capacity:
893c5d22 115 * util * margin < capacity * 1024
2b4d5b25
IM
116 *
117 * (default: ~20%)
3273163c 118 */
2b4d5b25 119unsigned int capacity_margin = 1280;
3273163c 120
8527632d
PG
121static inline void update_load_add(struct load_weight *lw, unsigned long inc)
122{
123 lw->weight += inc;
124 lw->inv_weight = 0;
125}
126
127static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
128{
129 lw->weight -= dec;
130 lw->inv_weight = 0;
131}
132
133static inline void update_load_set(struct load_weight *lw, unsigned long w)
134{
135 lw->weight = w;
136 lw->inv_weight = 0;
137}
138
029632fb
PZ
139/*
140 * Increase the granularity value when there are more CPUs,
141 * because with more CPUs the 'effective latency' as visible
142 * to users decreases. But the relationship is not linear,
143 * so pick a second-best guess by going with the log2 of the
144 * number of CPUs.
145 *
146 * This idea comes from the SD scheduler of Con Kolivas:
147 */
58ac93e4 148static unsigned int get_update_sysctl_factor(void)
029632fb 149{
58ac93e4 150 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
029632fb
PZ
151 unsigned int factor;
152
153 switch (sysctl_sched_tunable_scaling) {
154 case SCHED_TUNABLESCALING_NONE:
155 factor = 1;
156 break;
157 case SCHED_TUNABLESCALING_LINEAR:
158 factor = cpus;
159 break;
160 case SCHED_TUNABLESCALING_LOG:
161 default:
162 factor = 1 + ilog2(cpus);
163 break;
164 }
165
166 return factor;
167}
168
169static void update_sysctl(void)
170{
171 unsigned int factor = get_update_sysctl_factor();
172
173#define SET_SYSCTL(name) \
174 (sysctl_##name = (factor) * normalized_sysctl_##name)
175 SET_SYSCTL(sched_min_granularity);
176 SET_SYSCTL(sched_latency);
177 SET_SYSCTL(sched_wakeup_granularity);
178#undef SET_SYSCTL
179}
180
181void sched_init_granularity(void)
182{
183 update_sysctl();
184}
185
9dbdb155 186#define WMULT_CONST (~0U)
029632fb
PZ
187#define WMULT_SHIFT 32
188
9dbdb155
PZ
189static void __update_inv_weight(struct load_weight *lw)
190{
191 unsigned long w;
192
193 if (likely(lw->inv_weight))
194 return;
195
196 w = scale_load_down(lw->weight);
197
198 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
199 lw->inv_weight = 1;
200 else if (unlikely(!w))
201 lw->inv_weight = WMULT_CONST;
202 else
203 lw->inv_weight = WMULT_CONST / w;
204}
029632fb
PZ
205
206/*
9dbdb155
PZ
207 * delta_exec * weight / lw.weight
208 * OR
209 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
210 *
1c3de5e1 211 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
9dbdb155
PZ
212 * we're guaranteed shift stays positive because inv_weight is guaranteed to
213 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
214 *
215 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
216 * weight/lw.weight <= 1, and therefore our shift will also be positive.
029632fb 217 */
9dbdb155 218static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
029632fb 219{
9dbdb155
PZ
220 u64 fact = scale_load_down(weight);
221 int shift = WMULT_SHIFT;
029632fb 222
9dbdb155 223 __update_inv_weight(lw);
029632fb 224
9dbdb155
PZ
225 if (unlikely(fact >> 32)) {
226 while (fact >> 32) {
227 fact >>= 1;
228 shift--;
229 }
029632fb
PZ
230 }
231
9dbdb155
PZ
232 /* hint to use a 32x32->64 mul */
233 fact = (u64)(u32)fact * lw->inv_weight;
029632fb 234
9dbdb155
PZ
235 while (fact >> 32) {
236 fact >>= 1;
237 shift--;
238 }
029632fb 239
9dbdb155 240 return mul_u64_u32_shr(delta_exec, fact, shift);
029632fb
PZ
241}
242
243
244const struct sched_class fair_sched_class;
a4c2f00f 245
bf0f6f24
IM
246/**************************************************************
247 * CFS operations on generic schedulable entities:
248 */
249
62160e3f 250#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 251
62160e3f 252/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
253static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
254{
62160e3f 255 return cfs_rq->rq;
bf0f6f24
IM
256}
257
8f48894f
PZ
258static inline struct task_struct *task_of(struct sched_entity *se)
259{
9148a3a1 260 SCHED_WARN_ON(!entity_is_task(se));
8f48894f
PZ
261 return container_of(se, struct task_struct, se);
262}
263
b758149c
PZ
264/* Walk up scheduling entities hierarchy */
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273/* runqueue on which this entity is (to be) queued */
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279/* runqueue "owned" by this group */
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
3d4b47b4
PZ
285static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
286{
287 if (!cfs_rq->on_list) {
9c2791f9
VG
288 struct rq *rq = rq_of(cfs_rq);
289 int cpu = cpu_of(rq);
67e86250
PT
290 /*
291 * Ensure we either appear before our parent (if already
292 * enqueued) or force our parent to appear after us when it is
9c2791f9
VG
293 * enqueued. The fact that we always enqueue bottom-up
294 * reduces this to two cases and a special case for the root
295 * cfs_rq. Furthermore, it also means that we will always reset
296 * tmp_alone_branch either when the branch is connected
297 * to a tree or when we reach the beg of the tree
67e86250
PT
298 */
299 if (cfs_rq->tg->parent &&
9c2791f9
VG
300 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
301 /*
302 * If parent is already on the list, we add the child
303 * just before. Thanks to circular linked property of
304 * the list, this means to put the child at the tail
305 * of the list that starts by parent.
306 */
307 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
308 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
309 /*
310 * The branch is now connected to its tree so we can
311 * reset tmp_alone_branch to the beginning of the
312 * list.
313 */
314 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
315 } else if (!cfs_rq->tg->parent) {
316 /*
317 * cfs rq without parent should be put
318 * at the tail of the list.
319 */
67e86250 320 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
9c2791f9
VG
321 &rq->leaf_cfs_rq_list);
322 /*
323 * We have reach the beg of a tree so we can reset
324 * tmp_alone_branch to the beginning of the list.
325 */
326 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
327 } else {
328 /*
329 * The parent has not already been added so we want to
330 * make sure that it will be put after us.
331 * tmp_alone_branch points to the beg of the branch
332 * where we will add parent.
333 */
334 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
335 rq->tmp_alone_branch);
336 /*
337 * update tmp_alone_branch to points to the new beg
338 * of the branch
339 */
340 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
67e86250 341 }
3d4b47b4
PZ
342
343 cfs_rq->on_list = 1;
344 }
345}
346
347static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
348{
349 if (cfs_rq->on_list) {
350 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
351 cfs_rq->on_list = 0;
352 }
353}
354
b758149c 355/* Iterate thr' all leaf cfs_rq's on a runqueue */
a9e7f654
TH
356#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
357 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
358 leaf_cfs_rq_list)
b758149c
PZ
359
360/* Do the two (enqueued) entities belong to the same group ? */
fed14d45 361static inline struct cfs_rq *
b758149c
PZ
362is_same_group(struct sched_entity *se, struct sched_entity *pse)
363{
364 if (se->cfs_rq == pse->cfs_rq)
fed14d45 365 return se->cfs_rq;
b758149c 366
fed14d45 367 return NULL;
b758149c
PZ
368}
369
370static inline struct sched_entity *parent_entity(struct sched_entity *se)
371{
372 return se->parent;
373}
374
464b7527
PZ
375static void
376find_matching_se(struct sched_entity **se, struct sched_entity **pse)
377{
378 int se_depth, pse_depth;
379
380 /*
381 * preemption test can be made between sibling entities who are in the
382 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
383 * both tasks until we find their ancestors who are siblings of common
384 * parent.
385 */
386
387 /* First walk up until both entities are at same depth */
fed14d45
PZ
388 se_depth = (*se)->depth;
389 pse_depth = (*pse)->depth;
464b7527
PZ
390
391 while (se_depth > pse_depth) {
392 se_depth--;
393 *se = parent_entity(*se);
394 }
395
396 while (pse_depth > se_depth) {
397 pse_depth--;
398 *pse = parent_entity(*pse);
399 }
400
401 while (!is_same_group(*se, *pse)) {
402 *se = parent_entity(*se);
403 *pse = parent_entity(*pse);
404 }
405}
406
8f48894f
PZ
407#else /* !CONFIG_FAIR_GROUP_SCHED */
408
409static inline struct task_struct *task_of(struct sched_entity *se)
410{
411 return container_of(se, struct task_struct, se);
412}
bf0f6f24 413
62160e3f
IM
414static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
415{
416 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
417}
418
bf0f6f24 419
b758149c
PZ
420#define for_each_sched_entity(se) \
421 for (; se; se = NULL)
bf0f6f24 422
b758149c 423static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 424{
b758149c 425 return &task_rq(p)->cfs;
bf0f6f24
IM
426}
427
b758149c
PZ
428static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
429{
430 struct task_struct *p = task_of(se);
431 struct rq *rq = task_rq(p);
432
433 return &rq->cfs;
434}
435
436/* runqueue "owned" by this group */
437static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
438{
439 return NULL;
440}
441
3d4b47b4
PZ
442static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
443{
444}
445
446static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
447{
448}
449
a9e7f654
TH
450#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
451 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
b758149c 452
b758149c
PZ
453static inline struct sched_entity *parent_entity(struct sched_entity *se)
454{
455 return NULL;
456}
457
464b7527
PZ
458static inline void
459find_matching_se(struct sched_entity **se, struct sched_entity **pse)
460{
461}
462
b758149c
PZ
463#endif /* CONFIG_FAIR_GROUP_SCHED */
464
6c16a6dc 465static __always_inline
9dbdb155 466void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
bf0f6f24
IM
467
468/**************************************************************
469 * Scheduling class tree data structure manipulation methods:
470 */
471
1bf08230 472static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 473{
1bf08230 474 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 475 if (delta > 0)
1bf08230 476 max_vruntime = vruntime;
02e0431a 477
1bf08230 478 return max_vruntime;
02e0431a
PZ
479}
480
0702e3eb 481static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
482{
483 s64 delta = (s64)(vruntime - min_vruntime);
484 if (delta < 0)
485 min_vruntime = vruntime;
486
487 return min_vruntime;
488}
489
54fdc581
FC
490static inline int entity_before(struct sched_entity *a,
491 struct sched_entity *b)
492{
493 return (s64)(a->vruntime - b->vruntime) < 0;
494}
495
1af5f730
PZ
496static void update_min_vruntime(struct cfs_rq *cfs_rq)
497{
b60205c7 498 struct sched_entity *curr = cfs_rq->curr;
bfb06889 499 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
b60205c7 500
1af5f730
PZ
501 u64 vruntime = cfs_rq->min_vruntime;
502
b60205c7
PZ
503 if (curr) {
504 if (curr->on_rq)
505 vruntime = curr->vruntime;
506 else
507 curr = NULL;
508 }
1af5f730 509
bfb06889
DB
510 if (leftmost) { /* non-empty tree */
511 struct sched_entity *se;
512 se = rb_entry(leftmost, struct sched_entity, run_node);
1af5f730 513
b60205c7 514 if (!curr)
1af5f730
PZ
515 vruntime = se->vruntime;
516 else
517 vruntime = min_vruntime(vruntime, se->vruntime);
518 }
519
1bf08230 520 /* ensure we never gain time by being placed backwards. */
1af5f730 521 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
522#ifndef CONFIG_64BIT
523 smp_wmb();
524 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
525#endif
1af5f730
PZ
526}
527
bf0f6f24
IM
528/*
529 * Enqueue an entity into the rb-tree:
530 */
0702e3eb 531static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 532{
bfb06889 533 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
bf0f6f24
IM
534 struct rb_node *parent = NULL;
535 struct sched_entity *entry;
bfb06889 536 bool leftmost = true;
bf0f6f24
IM
537
538 /*
539 * Find the right place in the rbtree:
540 */
541 while (*link) {
542 parent = *link;
543 entry = rb_entry(parent, struct sched_entity, run_node);
544 /*
545 * We dont care about collisions. Nodes with
546 * the same key stay together.
547 */
2bd2d6f2 548 if (entity_before(se, entry)) {
bf0f6f24
IM
549 link = &parent->rb_left;
550 } else {
551 link = &parent->rb_right;
bfb06889 552 leftmost = false;
bf0f6f24
IM
553 }
554 }
555
bf0f6f24 556 rb_link_node(&se->run_node, parent, link);
bfb06889
DB
557 rb_insert_color_cached(&se->run_node,
558 &cfs_rq->tasks_timeline, leftmost);
bf0f6f24
IM
559}
560
0702e3eb 561static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 562{
bfb06889 563 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
564}
565
029632fb 566struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 567{
bfb06889 568 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
f4b6755f
PZ
569
570 if (!left)
571 return NULL;
572
573 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
574}
575
ac53db59
RR
576static struct sched_entity *__pick_next_entity(struct sched_entity *se)
577{
578 struct rb_node *next = rb_next(&se->run_node);
579
580 if (!next)
581 return NULL;
582
583 return rb_entry(next, struct sched_entity, run_node);
584}
585
586#ifdef CONFIG_SCHED_DEBUG
029632fb 587struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 588{
bfb06889 589 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
aeb73b04 590
70eee74b
BS
591 if (!last)
592 return NULL;
7eee3e67
IM
593
594 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
595}
596
bf0f6f24
IM
597/**************************************************************
598 * Scheduling class statistics methods:
599 */
600
acb4a848 601int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 602 void __user *buffer, size_t *lenp,
b2be5e96
PZ
603 loff_t *ppos)
604{
8d65af78 605 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
58ac93e4 606 unsigned int factor = get_update_sysctl_factor();
b2be5e96
PZ
607
608 if (ret || !write)
609 return ret;
610
611 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
612 sysctl_sched_min_granularity);
613
acb4a848
CE
614#define WRT_SYSCTL(name) \
615 (normalized_sysctl_##name = sysctl_##name / (factor))
616 WRT_SYSCTL(sched_min_granularity);
617 WRT_SYSCTL(sched_latency);
618 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
619#undef WRT_SYSCTL
620
b2be5e96
PZ
621 return 0;
622}
623#endif
647e7cac 624
a7be37ac 625/*
f9c0b095 626 * delta /= w
a7be37ac 627 */
9dbdb155 628static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
a7be37ac 629{
f9c0b095 630 if (unlikely(se->load.weight != NICE_0_LOAD))
9dbdb155 631 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
632
633 return delta;
634}
635
647e7cac
IM
636/*
637 * The idea is to set a period in which each task runs once.
638 *
532b1858 639 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
640 * this period because otherwise the slices get too small.
641 *
642 * p = (nr <= nl) ? l : l*nr/nl
643 */
4d78e7b6
PZ
644static u64 __sched_period(unsigned long nr_running)
645{
8e2b0bf3
BF
646 if (unlikely(nr_running > sched_nr_latency))
647 return nr_running * sysctl_sched_min_granularity;
648 else
649 return sysctl_sched_latency;
4d78e7b6
PZ
650}
651
647e7cac
IM
652/*
653 * We calculate the wall-time slice from the period by taking a part
654 * proportional to the weight.
655 *
f9c0b095 656 * s = p*P[w/rw]
647e7cac 657 */
6d0f0ebd 658static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 659{
0a582440 660 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 661
0a582440 662 for_each_sched_entity(se) {
6272d68c 663 struct load_weight *load;
3104bf03 664 struct load_weight lw;
6272d68c
LM
665
666 cfs_rq = cfs_rq_of(se);
667 load = &cfs_rq->load;
f9c0b095 668
0a582440 669 if (unlikely(!se->on_rq)) {
3104bf03 670 lw = cfs_rq->load;
0a582440
MG
671
672 update_load_add(&lw, se->load.weight);
673 load = &lw;
674 }
9dbdb155 675 slice = __calc_delta(slice, se->load.weight, load);
0a582440
MG
676 }
677 return slice;
bf0f6f24
IM
678}
679
647e7cac 680/*
660cc00f 681 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 682 *
f9c0b095 683 * vs = s/w
647e7cac 684 */
f9c0b095 685static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 686{
f9c0b095 687 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
688}
689
a75cdaa9 690#ifdef CONFIG_SMP
c0796298 691#include "pelt.h"
283e2ed3
PZ
692#include "sched-pelt.h"
693
772bd008 694static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
fb13c7ee
MG
695static unsigned long task_h_load(struct task_struct *p);
696
540247fb
YD
697/* Give new sched_entity start runnable values to heavy its load in infant time */
698void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9 699{
540247fb 700 struct sched_avg *sa = &se->avg;
a75cdaa9 701
f207934f
PZ
702 memset(sa, 0, sizeof(*sa));
703
b5a9b340
VG
704 /*
705 * Tasks are intialized with full load to be seen as heavy tasks until
706 * they get a chance to stabilize to their real load level.
707 * Group entities are intialized with zero load to reflect the fact that
708 * nothing has been attached to the task group yet.
709 */
710 if (entity_is_task(se))
1ea6c46a 711 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
1ea6c46a 712
f207934f
PZ
713 se->runnable_weight = se->load.weight;
714
9d89c257 715 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
a75cdaa9 716}
7ea241af 717
7dc603c9 718static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
df217913 719static void attach_entity_cfs_rq(struct sched_entity *se);
7dc603c9 720
2b8c41da
YD
721/*
722 * With new tasks being created, their initial util_avgs are extrapolated
723 * based on the cfs_rq's current util_avg:
724 *
725 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
726 *
727 * However, in many cases, the above util_avg does not give a desired
728 * value. Moreover, the sum of the util_avgs may be divergent, such
729 * as when the series is a harmonic series.
730 *
731 * To solve this problem, we also cap the util_avg of successive tasks to
732 * only 1/2 of the left utilization budget:
733 *
8fe5c5a9 734 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
2b8c41da 735 *
8fe5c5a9 736 * where n denotes the nth task and cpu_scale the CPU capacity.
2b8c41da 737 *
8fe5c5a9
QP
738 * For example, for a CPU with 1024 of capacity, a simplest series from
739 * the beginning would be like:
2b8c41da
YD
740 *
741 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
742 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
743 *
744 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
745 * if util_avg > util_avg_cap.
746 */
747void post_init_entity_util_avg(struct sched_entity *se)
748{
749 struct cfs_rq *cfs_rq = cfs_rq_of(se);
750 struct sched_avg *sa = &se->avg;
8fe5c5a9
QP
751 long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
752 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
2b8c41da
YD
753
754 if (cap > 0) {
755 if (cfs_rq->avg.util_avg != 0) {
756 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
757 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
758
759 if (sa->util_avg > cap)
760 sa->util_avg = cap;
761 } else {
762 sa->util_avg = cap;
763 }
2b8c41da 764 }
7dc603c9
PZ
765
766 if (entity_is_task(se)) {
767 struct task_struct *p = task_of(se);
768 if (p->sched_class != &fair_sched_class) {
769 /*
770 * For !fair tasks do:
771 *
3a123bbb 772 update_cfs_rq_load_avg(now, cfs_rq);
ea14b57e 773 attach_entity_load_avg(cfs_rq, se, 0);
7dc603c9
PZ
774 switched_from_fair(rq, p);
775 *
776 * such that the next switched_to_fair() has the
777 * expected state.
778 */
df217913 779 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
7dc603c9
PZ
780 return;
781 }
782 }
783
df217913 784 attach_entity_cfs_rq(se);
2b8c41da
YD
785}
786
7dc603c9 787#else /* !CONFIG_SMP */
540247fb 788void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9
AS
789{
790}
2b8c41da
YD
791void post_init_entity_util_avg(struct sched_entity *se)
792{
793}
3d30544f
PZ
794static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
795{
796}
7dc603c9 797#endif /* CONFIG_SMP */
a75cdaa9 798
bf0f6f24 799/*
9dbdb155 800 * Update the current task's runtime statistics.
bf0f6f24 801 */
b7cc0896 802static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 803{
429d43bc 804 struct sched_entity *curr = cfs_rq->curr;
78becc27 805 u64 now = rq_clock_task(rq_of(cfs_rq));
9dbdb155 806 u64 delta_exec;
bf0f6f24
IM
807
808 if (unlikely(!curr))
809 return;
810
9dbdb155
PZ
811 delta_exec = now - curr->exec_start;
812 if (unlikely((s64)delta_exec <= 0))
34f28ecd 813 return;
bf0f6f24 814
8ebc91d9 815 curr->exec_start = now;
d842de87 816
9dbdb155
PZ
817 schedstat_set(curr->statistics.exec_max,
818 max(delta_exec, curr->statistics.exec_max));
819
820 curr->sum_exec_runtime += delta_exec;
ae92882e 821 schedstat_add(cfs_rq->exec_clock, delta_exec);
9dbdb155
PZ
822
823 curr->vruntime += calc_delta_fair(delta_exec, curr);
824 update_min_vruntime(cfs_rq);
825
d842de87
SV
826 if (entity_is_task(curr)) {
827 struct task_struct *curtask = task_of(curr);
828
f977bb49 829 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d2cc5ed6 830 cgroup_account_cputime(curtask, delta_exec);
f06febc9 831 account_group_exec_runtime(curtask, delta_exec);
d842de87 832 }
ec12cb7f
PT
833
834 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
835}
836
6e998916
SG
837static void update_curr_fair(struct rq *rq)
838{
839 update_curr(cfs_rq_of(&rq->curr->se));
840}
841
bf0f6f24 842static inline void
5870db5b 843update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 844{
4fa8d299
JP
845 u64 wait_start, prev_wait_start;
846
847 if (!schedstat_enabled())
848 return;
849
850 wait_start = rq_clock(rq_of(cfs_rq));
851 prev_wait_start = schedstat_val(se->statistics.wait_start);
3ea94de1
JP
852
853 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
4fa8d299
JP
854 likely(wait_start > prev_wait_start))
855 wait_start -= prev_wait_start;
3ea94de1 856
2ed41a55 857 __schedstat_set(se->statistics.wait_start, wait_start);
bf0f6f24
IM
858}
859
4fa8d299 860static inline void
3ea94de1
JP
861update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
862{
863 struct task_struct *p;
cb251765
MG
864 u64 delta;
865
4fa8d299
JP
866 if (!schedstat_enabled())
867 return;
868
869 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
3ea94de1
JP
870
871 if (entity_is_task(se)) {
872 p = task_of(se);
873 if (task_on_rq_migrating(p)) {
874 /*
875 * Preserve migrating task's wait time so wait_start
876 * time stamp can be adjusted to accumulate wait time
877 * prior to migration.
878 */
2ed41a55 879 __schedstat_set(se->statistics.wait_start, delta);
3ea94de1
JP
880 return;
881 }
882 trace_sched_stat_wait(p, delta);
883 }
884
2ed41a55 885 __schedstat_set(se->statistics.wait_max,
4fa8d299 886 max(schedstat_val(se->statistics.wait_max), delta));
2ed41a55
PZ
887 __schedstat_inc(se->statistics.wait_count);
888 __schedstat_add(se->statistics.wait_sum, delta);
889 __schedstat_set(se->statistics.wait_start, 0);
3ea94de1 890}
3ea94de1 891
4fa8d299 892static inline void
1a3d027c
JP
893update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
894{
895 struct task_struct *tsk = NULL;
4fa8d299
JP
896 u64 sleep_start, block_start;
897
898 if (!schedstat_enabled())
899 return;
900
901 sleep_start = schedstat_val(se->statistics.sleep_start);
902 block_start = schedstat_val(se->statistics.block_start);
1a3d027c
JP
903
904 if (entity_is_task(se))
905 tsk = task_of(se);
906
4fa8d299
JP
907 if (sleep_start) {
908 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
1a3d027c
JP
909
910 if ((s64)delta < 0)
911 delta = 0;
912
4fa8d299 913 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
2ed41a55 914 __schedstat_set(se->statistics.sleep_max, delta);
1a3d027c 915
2ed41a55
PZ
916 __schedstat_set(se->statistics.sleep_start, 0);
917 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
918
919 if (tsk) {
920 account_scheduler_latency(tsk, delta >> 10, 1);
921 trace_sched_stat_sleep(tsk, delta);
922 }
923 }
4fa8d299
JP
924 if (block_start) {
925 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
1a3d027c
JP
926
927 if ((s64)delta < 0)
928 delta = 0;
929
4fa8d299 930 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
2ed41a55 931 __schedstat_set(se->statistics.block_max, delta);
1a3d027c 932
2ed41a55
PZ
933 __schedstat_set(se->statistics.block_start, 0);
934 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
935
936 if (tsk) {
937 if (tsk->in_iowait) {
2ed41a55
PZ
938 __schedstat_add(se->statistics.iowait_sum, delta);
939 __schedstat_inc(se->statistics.iowait_count);
1a3d027c
JP
940 trace_sched_stat_iowait(tsk, delta);
941 }
942
943 trace_sched_stat_blocked(tsk, delta);
944
945 /*
946 * Blocking time is in units of nanosecs, so shift by
947 * 20 to get a milliseconds-range estimation of the
948 * amount of time that the task spent sleeping:
949 */
950 if (unlikely(prof_on == SLEEP_PROFILING)) {
951 profile_hits(SLEEP_PROFILING,
952 (void *)get_wchan(tsk),
953 delta >> 20);
954 }
955 account_scheduler_latency(tsk, delta >> 10, 0);
956 }
957 }
3ea94de1 958}
3ea94de1 959
bf0f6f24
IM
960/*
961 * Task is being enqueued - update stats:
962 */
cb251765 963static inline void
1a3d027c 964update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 965{
4fa8d299
JP
966 if (!schedstat_enabled())
967 return;
968
bf0f6f24
IM
969 /*
970 * Are we enqueueing a waiting task? (for current tasks
971 * a dequeue/enqueue event is a NOP)
972 */
429d43bc 973 if (se != cfs_rq->curr)
5870db5b 974 update_stats_wait_start(cfs_rq, se);
1a3d027c
JP
975
976 if (flags & ENQUEUE_WAKEUP)
977 update_stats_enqueue_sleeper(cfs_rq, se);
bf0f6f24
IM
978}
979
bf0f6f24 980static inline void
cb251765 981update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 982{
4fa8d299
JP
983
984 if (!schedstat_enabled())
985 return;
986
bf0f6f24
IM
987 /*
988 * Mark the end of the wait period if dequeueing a
989 * waiting task:
990 */
429d43bc 991 if (se != cfs_rq->curr)
9ef0a961 992 update_stats_wait_end(cfs_rq, se);
cb251765 993
4fa8d299
JP
994 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
995 struct task_struct *tsk = task_of(se);
cb251765 996
4fa8d299 997 if (tsk->state & TASK_INTERRUPTIBLE)
2ed41a55 998 __schedstat_set(se->statistics.sleep_start,
4fa8d299
JP
999 rq_clock(rq_of(cfs_rq)));
1000 if (tsk->state & TASK_UNINTERRUPTIBLE)
2ed41a55 1001 __schedstat_set(se->statistics.block_start,
4fa8d299 1002 rq_clock(rq_of(cfs_rq)));
cb251765 1003 }
cb251765
MG
1004}
1005
bf0f6f24
IM
1006/*
1007 * We are picking a new current task - update its stats:
1008 */
1009static inline void
79303e9e 1010update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
1011{
1012 /*
1013 * We are starting a new run period:
1014 */
78becc27 1015 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
1016}
1017
bf0f6f24
IM
1018/**************************************************
1019 * Scheduling class queueing methods:
1020 */
1021
cbee9f88
PZ
1022#ifdef CONFIG_NUMA_BALANCING
1023/*
598f0ec0
MG
1024 * Approximate time to scan a full NUMA task in ms. The task scan period is
1025 * calculated based on the tasks virtual memory size and
1026 * numa_balancing_scan_size.
cbee9f88 1027 */
598f0ec0
MG
1028unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1029unsigned int sysctl_numa_balancing_scan_period_max = 60000;
6e5fb223
PZ
1030
1031/* Portion of address space to scan in MB */
1032unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 1033
4b96a29b
PZ
1034/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1035unsigned int sysctl_numa_balancing_scan_delay = 1000;
1036
b5dd77c8
RR
1037struct numa_group {
1038 atomic_t refcount;
1039
1040 spinlock_t lock; /* nr_tasks, tasks */
1041 int nr_tasks;
1042 pid_t gid;
1043 int active_nodes;
1044
1045 struct rcu_head rcu;
1046 unsigned long total_faults;
1047 unsigned long max_faults_cpu;
1048 /*
1049 * Faults_cpu is used to decide whether memory should move
1050 * towards the CPU. As a consequence, these stats are weighted
1051 * more by CPU use than by memory faults.
1052 */
1053 unsigned long *faults_cpu;
1054 unsigned long faults[0];
1055};
1056
1057static inline unsigned long group_faults_priv(struct numa_group *ng);
1058static inline unsigned long group_faults_shared(struct numa_group *ng);
1059
598f0ec0
MG
1060static unsigned int task_nr_scan_windows(struct task_struct *p)
1061{
1062 unsigned long rss = 0;
1063 unsigned long nr_scan_pages;
1064
1065 /*
1066 * Calculations based on RSS as non-present and empty pages are skipped
1067 * by the PTE scanner and NUMA hinting faults should be trapped based
1068 * on resident pages
1069 */
1070 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1071 rss = get_mm_rss(p->mm);
1072 if (!rss)
1073 rss = nr_scan_pages;
1074
1075 rss = round_up(rss, nr_scan_pages);
1076 return rss / nr_scan_pages;
1077}
1078
1079/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1080#define MAX_SCAN_WINDOW 2560
1081
1082static unsigned int task_scan_min(struct task_struct *p)
1083{
316c1608 1084 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
598f0ec0
MG
1085 unsigned int scan, floor;
1086 unsigned int windows = 1;
1087
64192658
KT
1088 if (scan_size < MAX_SCAN_WINDOW)
1089 windows = MAX_SCAN_WINDOW / scan_size;
598f0ec0
MG
1090 floor = 1000 / windows;
1091
1092 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1093 return max_t(unsigned int, floor, scan);
1094}
1095
b5dd77c8
RR
1096static unsigned int task_scan_start(struct task_struct *p)
1097{
1098 unsigned long smin = task_scan_min(p);
1099 unsigned long period = smin;
1100
1101 /* Scale the maximum scan period with the amount of shared memory. */
1102 if (p->numa_group) {
1103 struct numa_group *ng = p->numa_group;
1104 unsigned long shared = group_faults_shared(ng);
1105 unsigned long private = group_faults_priv(ng);
1106
1107 period *= atomic_read(&ng->refcount);
1108 period *= shared + 1;
1109 period /= private + shared + 1;
1110 }
1111
1112 return max(smin, period);
1113}
1114
598f0ec0
MG
1115static unsigned int task_scan_max(struct task_struct *p)
1116{
b5dd77c8
RR
1117 unsigned long smin = task_scan_min(p);
1118 unsigned long smax;
598f0ec0
MG
1119
1120 /* Watch for min being lower than max due to floor calculations */
1121 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
b5dd77c8
RR
1122
1123 /* Scale the maximum scan period with the amount of shared memory. */
1124 if (p->numa_group) {
1125 struct numa_group *ng = p->numa_group;
1126 unsigned long shared = group_faults_shared(ng);
1127 unsigned long private = group_faults_priv(ng);
1128 unsigned long period = smax;
1129
1130 period *= atomic_read(&ng->refcount);
1131 period *= shared + 1;
1132 period /= private + shared + 1;
1133
1134 smax = max(smax, period);
1135 }
1136
598f0ec0
MG
1137 return max(smin, smax);
1138}
1139
13784475
MG
1140void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1141{
1142 int mm_users = 0;
1143 struct mm_struct *mm = p->mm;
1144
1145 if (mm) {
1146 mm_users = atomic_read(&mm->mm_users);
1147 if (mm_users == 1) {
1148 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1149 mm->numa_scan_seq = 0;
1150 }
1151 }
1152 p->node_stamp = 0;
1153 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
1154 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1155 p->numa_work.next = &p->numa_work;
1156 p->numa_faults = NULL;
1157 p->numa_group = NULL;
1158 p->last_task_numa_placement = 0;
1159 p->last_sum_exec_runtime = 0;
1160
1161 /* New address space, reset the preferred nid */
1162 if (!(clone_flags & CLONE_VM)) {
1163 p->numa_preferred_nid = -1;
1164 return;
1165 }
1166
1167 /*
1168 * New thread, keep existing numa_preferred_nid which should be copied
1169 * already by arch_dup_task_struct but stagger when scans start.
1170 */
1171 if (mm) {
1172 unsigned int delay;
1173
1174 delay = min_t(unsigned int, task_scan_max(current),
1175 current->numa_scan_period * mm_users * NSEC_PER_MSEC);
1176 delay += 2 * TICK_NSEC;
1177 p->node_stamp = delay;
1178 }
1179}
1180
0ec8aa00
PZ
1181static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1182{
1183 rq->nr_numa_running += (p->numa_preferred_nid != -1);
1184 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1185}
1186
1187static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1188{
1189 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1190 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1191}
1192
be1e4e76
RR
1193/* Shared or private faults. */
1194#define NR_NUMA_HINT_FAULT_TYPES 2
1195
1196/* Memory and CPU locality */
1197#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1198
1199/* Averaged statistics, and temporary buffers. */
1200#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1201
e29cf08b
MG
1202pid_t task_numa_group_id(struct task_struct *p)
1203{
1204 return p->numa_group ? p->numa_group->gid : 0;
1205}
1206
44dba3d5 1207/*
97fb7a0a 1208 * The averaged statistics, shared & private, memory & CPU,
44dba3d5
IM
1209 * occupy the first half of the array. The second half of the
1210 * array is for current counters, which are averaged into the
1211 * first set by task_numa_placement.
1212 */
1213static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
ac8e895b 1214{
44dba3d5 1215 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
ac8e895b
MG
1216}
1217
1218static inline unsigned long task_faults(struct task_struct *p, int nid)
1219{
44dba3d5 1220 if (!p->numa_faults)
ac8e895b
MG
1221 return 0;
1222
44dba3d5
IM
1223 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1224 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
ac8e895b
MG
1225}
1226
83e1d2cd
MG
1227static inline unsigned long group_faults(struct task_struct *p, int nid)
1228{
1229 if (!p->numa_group)
1230 return 0;
1231
44dba3d5
IM
1232 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1233 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
83e1d2cd
MG
1234}
1235
20e07dea
RR
1236static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1237{
44dba3d5
IM
1238 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1239 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
20e07dea
RR
1240}
1241
b5dd77c8
RR
1242static inline unsigned long group_faults_priv(struct numa_group *ng)
1243{
1244 unsigned long faults = 0;
1245 int node;
1246
1247 for_each_online_node(node) {
1248 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1249 }
1250
1251 return faults;
1252}
1253
1254static inline unsigned long group_faults_shared(struct numa_group *ng)
1255{
1256 unsigned long faults = 0;
1257 int node;
1258
1259 for_each_online_node(node) {
1260 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1261 }
1262
1263 return faults;
1264}
1265
4142c3eb
RR
1266/*
1267 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1268 * considered part of a numa group's pseudo-interleaving set. Migrations
1269 * between these nodes are slowed down, to allow things to settle down.
1270 */
1271#define ACTIVE_NODE_FRACTION 3
1272
1273static bool numa_is_active_node(int nid, struct numa_group *ng)
1274{
1275 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1276}
1277
6c6b1193
RR
1278/* Handle placement on systems where not all nodes are directly connected. */
1279static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1280 int maxdist, bool task)
1281{
1282 unsigned long score = 0;
1283 int node;
1284
1285 /*
1286 * All nodes are directly connected, and the same distance
1287 * from each other. No need for fancy placement algorithms.
1288 */
1289 if (sched_numa_topology_type == NUMA_DIRECT)
1290 return 0;
1291
1292 /*
1293 * This code is called for each node, introducing N^2 complexity,
1294 * which should be ok given the number of nodes rarely exceeds 8.
1295 */
1296 for_each_online_node(node) {
1297 unsigned long faults;
1298 int dist = node_distance(nid, node);
1299
1300 /*
1301 * The furthest away nodes in the system are not interesting
1302 * for placement; nid was already counted.
1303 */
1304 if (dist == sched_max_numa_distance || node == nid)
1305 continue;
1306
1307 /*
1308 * On systems with a backplane NUMA topology, compare groups
1309 * of nodes, and move tasks towards the group with the most
1310 * memory accesses. When comparing two nodes at distance
1311 * "hoplimit", only nodes closer by than "hoplimit" are part
1312 * of each group. Skip other nodes.
1313 */
1314 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1315 dist > maxdist)
1316 continue;
1317
1318 /* Add up the faults from nearby nodes. */
1319 if (task)
1320 faults = task_faults(p, node);
1321 else
1322 faults = group_faults(p, node);
1323
1324 /*
1325 * On systems with a glueless mesh NUMA topology, there are
1326 * no fixed "groups of nodes". Instead, nodes that are not
1327 * directly connected bounce traffic through intermediate
1328 * nodes; a numa_group can occupy any set of nodes.
1329 * The further away a node is, the less the faults count.
1330 * This seems to result in good task placement.
1331 */
1332 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1333 faults *= (sched_max_numa_distance - dist);
1334 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1335 }
1336
1337 score += faults;
1338 }
1339
1340 return score;
1341}
1342
83e1d2cd
MG
1343/*
1344 * These return the fraction of accesses done by a particular task, or
1345 * task group, on a particular numa node. The group weight is given a
1346 * larger multiplier, in order to group tasks together that are almost
1347 * evenly spread out between numa nodes.
1348 */
7bd95320
RR
1349static inline unsigned long task_weight(struct task_struct *p, int nid,
1350 int dist)
83e1d2cd 1351{
7bd95320 1352 unsigned long faults, total_faults;
83e1d2cd 1353
44dba3d5 1354 if (!p->numa_faults)
83e1d2cd
MG
1355 return 0;
1356
1357 total_faults = p->total_numa_faults;
1358
1359 if (!total_faults)
1360 return 0;
1361
7bd95320 1362 faults = task_faults(p, nid);
6c6b1193
RR
1363 faults += score_nearby_nodes(p, nid, dist, true);
1364
7bd95320 1365 return 1000 * faults / total_faults;
83e1d2cd
MG
1366}
1367
7bd95320
RR
1368static inline unsigned long group_weight(struct task_struct *p, int nid,
1369 int dist)
83e1d2cd 1370{
7bd95320
RR
1371 unsigned long faults, total_faults;
1372
1373 if (!p->numa_group)
1374 return 0;
1375
1376 total_faults = p->numa_group->total_faults;
1377
1378 if (!total_faults)
83e1d2cd
MG
1379 return 0;
1380
7bd95320 1381 faults = group_faults(p, nid);
6c6b1193
RR
1382 faults += score_nearby_nodes(p, nid, dist, false);
1383
7bd95320 1384 return 1000 * faults / total_faults;
83e1d2cd
MG
1385}
1386
10f39042
RR
1387bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1388 int src_nid, int dst_cpu)
1389{
1390 struct numa_group *ng = p->numa_group;
1391 int dst_nid = cpu_to_node(dst_cpu);
1392 int last_cpupid, this_cpupid;
1393
1394 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1395
1396 /*
1397 * Multi-stage node selection is used in conjunction with a periodic
1398 * migration fault to build a temporal task<->page relation. By using
1399 * a two-stage filter we remove short/unlikely relations.
1400 *
1401 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1402 * a task's usage of a particular page (n_p) per total usage of this
1403 * page (n_t) (in a given time-span) to a probability.
1404 *
1405 * Our periodic faults will sample this probability and getting the
1406 * same result twice in a row, given these samples are fully
1407 * independent, is then given by P(n)^2, provided our sample period
1408 * is sufficiently short compared to the usage pattern.
1409 *
1410 * This quadric squishes small probabilities, making it less likely we
1411 * act on an unlikely task<->page relation.
1412 */
1413 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1414 if (!cpupid_pid_unset(last_cpupid) &&
1415 cpupid_to_nid(last_cpupid) != dst_nid)
1416 return false;
1417
1418 /* Always allow migrate on private faults */
1419 if (cpupid_match_pid(p, last_cpupid))
1420 return true;
1421
1422 /* A shared fault, but p->numa_group has not been set up yet. */
1423 if (!ng)
1424 return true;
1425
1426 /*
4142c3eb
RR
1427 * Destination node is much more heavily used than the source
1428 * node? Allow migration.
10f39042 1429 */
4142c3eb
RR
1430 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1431 ACTIVE_NODE_FRACTION)
10f39042
RR
1432 return true;
1433
1434 /*
4142c3eb
RR
1435 * Distribute memory according to CPU & memory use on each node,
1436 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1437 *
1438 * faults_cpu(dst) 3 faults_cpu(src)
1439 * --------------- * - > ---------------
1440 * faults_mem(dst) 4 faults_mem(src)
10f39042 1441 */
4142c3eb
RR
1442 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1443 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
10f39042
RR
1444}
1445
c7132dd6 1446static unsigned long weighted_cpuload(struct rq *rq);
58d081b5
MG
1447static unsigned long source_load(int cpu, int type);
1448static unsigned long target_load(int cpu, int type);
ced549fa 1449static unsigned long capacity_of(int cpu);
58d081b5 1450
fb13c7ee 1451/* Cached statistics for all CPUs within a node */
58d081b5 1452struct numa_stats {
fb13c7ee 1453 unsigned long nr_running;
58d081b5 1454 unsigned long load;
fb13c7ee
MG
1455
1456 /* Total compute capacity of CPUs on a node */
5ef20ca1 1457 unsigned long compute_capacity;
fb13c7ee
MG
1458
1459 /* Approximate capacity in terms of runnable tasks on a node */
5ef20ca1 1460 unsigned long task_capacity;
1b6a7495 1461 int has_free_capacity;
58d081b5 1462};
e6628d5b 1463
fb13c7ee
MG
1464/*
1465 * XXX borrowed from update_sg_lb_stats
1466 */
1467static void update_numa_stats(struct numa_stats *ns, int nid)
1468{
83d7f242
RR
1469 int smt, cpu, cpus = 0;
1470 unsigned long capacity;
fb13c7ee
MG
1471
1472 memset(ns, 0, sizeof(*ns));
1473 for_each_cpu(cpu, cpumask_of_node(nid)) {
1474 struct rq *rq = cpu_rq(cpu);
1475
1476 ns->nr_running += rq->nr_running;
c7132dd6 1477 ns->load += weighted_cpuload(rq);
ced549fa 1478 ns->compute_capacity += capacity_of(cpu);
5eca82a9
PZ
1479
1480 cpus++;
fb13c7ee
MG
1481 }
1482
5eca82a9
PZ
1483 /*
1484 * If we raced with hotplug and there are no CPUs left in our mask
1485 * the @ns structure is NULL'ed and task_numa_compare() will
1486 * not find this node attractive.
1487 *
1b6a7495
NP
1488 * We'll either bail at !has_free_capacity, or we'll detect a huge
1489 * imbalance and bail there.
5eca82a9
PZ
1490 */
1491 if (!cpus)
1492 return;
1493
83d7f242
RR
1494 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1495 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1496 capacity = cpus / smt; /* cores */
1497
1498 ns->task_capacity = min_t(unsigned, capacity,
1499 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1b6a7495 1500 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
fb13c7ee
MG
1501}
1502
58d081b5
MG
1503struct task_numa_env {
1504 struct task_struct *p;
e6628d5b 1505
58d081b5
MG
1506 int src_cpu, src_nid;
1507 int dst_cpu, dst_nid;
e6628d5b 1508
58d081b5 1509 struct numa_stats src_stats, dst_stats;
e6628d5b 1510
40ea2b42 1511 int imbalance_pct;
7bd95320 1512 int dist;
fb13c7ee
MG
1513
1514 struct task_struct *best_task;
1515 long best_imp;
58d081b5
MG
1516 int best_cpu;
1517};
1518
fb13c7ee
MG
1519static void task_numa_assign(struct task_numa_env *env,
1520 struct task_struct *p, long imp)
1521{
1522 if (env->best_task)
1523 put_task_struct(env->best_task);
bac78573
ON
1524 if (p)
1525 get_task_struct(p);
fb13c7ee
MG
1526
1527 env->best_task = p;
1528 env->best_imp = imp;
1529 env->best_cpu = env->dst_cpu;
1530}
1531
28a21745 1532static bool load_too_imbalanced(long src_load, long dst_load,
e63da036
RR
1533 struct task_numa_env *env)
1534{
e4991b24
RR
1535 long imb, old_imb;
1536 long orig_src_load, orig_dst_load;
28a21745
RR
1537 long src_capacity, dst_capacity;
1538
1539 /*
1540 * The load is corrected for the CPU capacity available on each node.
1541 *
1542 * src_load dst_load
1543 * ------------ vs ---------
1544 * src_capacity dst_capacity
1545 */
1546 src_capacity = env->src_stats.compute_capacity;
1547 dst_capacity = env->dst_stats.compute_capacity;
e63da036 1548
5f95ba7a 1549 imb = abs(dst_load * src_capacity - src_load * dst_capacity);
e63da036 1550
28a21745 1551 orig_src_load = env->src_stats.load;
e4991b24 1552 orig_dst_load = env->dst_stats.load;
28a21745 1553
5f95ba7a 1554 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
e4991b24
RR
1555
1556 /* Would this change make things worse? */
1557 return (imb > old_imb);
e63da036
RR
1558}
1559
fb13c7ee
MG
1560/*
1561 * This checks if the overall compute and NUMA accesses of the system would
1562 * be improved if the source tasks was migrated to the target dst_cpu taking
1563 * into account that it might be best if task running on the dst_cpu should
1564 * be exchanged with the source task
1565 */
887c290e 1566static void task_numa_compare(struct task_numa_env *env,
305c1fac 1567 long taskimp, long groupimp, bool maymove)
fb13c7ee 1568{
fb13c7ee
MG
1569 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1570 struct task_struct *cur;
28a21745 1571 long src_load, dst_load;
fb13c7ee 1572 long load;
1c5d3eb3 1573 long imp = env->p->numa_group ? groupimp : taskimp;
0132c3e1 1574 long moveimp = imp;
7bd95320 1575 int dist = env->dist;
fb13c7ee
MG
1576
1577 rcu_read_lock();
bac78573
ON
1578 cur = task_rcu_dereference(&dst_rq->curr);
1579 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
fb13c7ee
MG
1580 cur = NULL;
1581
7af68335
PZ
1582 /*
1583 * Because we have preemption enabled we can get migrated around and
1584 * end try selecting ourselves (current == env->p) as a swap candidate.
1585 */
1586 if (cur == env->p)
1587 goto unlock;
1588
305c1fac
SD
1589 if (!cur) {
1590 if (maymove || imp > env->best_imp)
1591 goto assign;
1592 else
1593 goto unlock;
1594 }
1595
fb13c7ee
MG
1596 /*
1597 * "imp" is the fault differential for the source task between the
1598 * source and destination node. Calculate the total differential for
1599 * the source task and potential destination task. The more negative
305c1fac 1600 * the value is, the more remote accesses that would be expected to
fb13c7ee
MG
1601 * be incurred if the tasks were swapped.
1602 */
305c1fac
SD
1603 /* Skip this swap candidate if cannot move to the source cpu */
1604 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1605 goto unlock;
fb13c7ee 1606
305c1fac
SD
1607 /*
1608 * If dst and source tasks are in the same NUMA group, or not
1609 * in any group then look only at task weights.
1610 */
1611 if (cur->numa_group == env->p->numa_group) {
1612 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1613 task_weight(cur, env->dst_nid, dist);
887c290e 1614 /*
305c1fac
SD
1615 * Add some hysteresis to prevent swapping the
1616 * tasks within a group over tiny differences.
887c290e 1617 */
305c1fac
SD
1618 if (cur->numa_group)
1619 imp -= imp / 16;
1620 } else {
1621 /*
1622 * Compare the group weights. If a task is all by itself
1623 * (not part of a group), use the task weight instead.
1624 */
1625 if (cur->numa_group && env->p->numa_group)
1626 imp += group_weight(cur, env->src_nid, dist) -
1627 group_weight(cur, env->dst_nid, dist);
1628 else
1629 imp += task_weight(cur, env->src_nid, dist) -
1630 task_weight(cur, env->dst_nid, dist);
fb13c7ee
MG
1631 }
1632
305c1fac 1633 if (imp <= env->best_imp)
fb13c7ee
MG
1634 goto unlock;
1635
305c1fac
SD
1636 if (maymove && moveimp > imp && moveimp > env->best_imp) {
1637 imp = moveimp - 1;
1638 cur = NULL;
fb13c7ee 1639 goto assign;
305c1fac 1640 }
fb13c7ee
MG
1641
1642 /*
1643 * In the overloaded case, try and keep the load balanced.
1644 */
305c1fac
SD
1645 load = task_h_load(env->p) - task_h_load(cur);
1646 if (!load)
1647 goto assign;
1648
e720fff6
PZ
1649 dst_load = env->dst_stats.load + load;
1650 src_load = env->src_stats.load - load;
fb13c7ee 1651
28a21745 1652 if (load_too_imbalanced(src_load, dst_load, env))
fb13c7ee
MG
1653 goto unlock;
1654
305c1fac 1655assign:
ba7e5a27
RR
1656 /*
1657 * One idle CPU per node is evaluated for a task numa move.
1658 * Call select_idle_sibling to maybe find a better one.
1659 */
10e2f1ac
PZ
1660 if (!cur) {
1661 /*
97fb7a0a 1662 * select_idle_siblings() uses an per-CPU cpumask that
10e2f1ac
PZ
1663 * can be used from IRQ context.
1664 */
1665 local_irq_disable();
772bd008
MR
1666 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1667 env->dst_cpu);
10e2f1ac
PZ
1668 local_irq_enable();
1669 }
ba7e5a27 1670
fb13c7ee
MG
1671 task_numa_assign(env, cur, imp);
1672unlock:
1673 rcu_read_unlock();
1674}
1675
887c290e
RR
1676static void task_numa_find_cpu(struct task_numa_env *env,
1677 long taskimp, long groupimp)
2c8a50aa 1678{
305c1fac
SD
1679 long src_load, dst_load, load;
1680 bool maymove = false;
2c8a50aa
MG
1681 int cpu;
1682
305c1fac
SD
1683 load = task_h_load(env->p);
1684 dst_load = env->dst_stats.load + load;
1685 src_load = env->src_stats.load - load;
1686
1687 /*
1688 * If the improvement from just moving env->p direction is better
1689 * than swapping tasks around, check if a move is possible.
1690 */
1691 maymove = !load_too_imbalanced(src_load, dst_load, env);
1692
2c8a50aa
MG
1693 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1694 /* Skip this CPU if the source task cannot migrate */
0c98d344 1695 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
2c8a50aa
MG
1696 continue;
1697
1698 env->dst_cpu = cpu;
305c1fac 1699 task_numa_compare(env, taskimp, groupimp, maymove);
2c8a50aa
MG
1700 }
1701}
1702
6f9aad0b
RR
1703/* Only move tasks to a NUMA node less busy than the current node. */
1704static bool numa_has_capacity(struct task_numa_env *env)
1705{
1706 struct numa_stats *src = &env->src_stats;
1707 struct numa_stats *dst = &env->dst_stats;
1708
1709 if (src->has_free_capacity && !dst->has_free_capacity)
1710 return false;
1711
1712 /*
1713 * Only consider a task move if the source has a higher load
1714 * than the destination, corrected for CPU capacity on each node.
1715 *
1716 * src->load dst->load
1717 * --------------------- vs ---------------------
1718 * src->compute_capacity dst->compute_capacity
1719 */
44dcb04f
SD
1720 if (src->load * dst->compute_capacity * env->imbalance_pct >
1721
1722 dst->load * src->compute_capacity * 100)
6f9aad0b
RR
1723 return true;
1724
1725 return false;
1726}
1727
58d081b5
MG
1728static int task_numa_migrate(struct task_struct *p)
1729{
58d081b5
MG
1730 struct task_numa_env env = {
1731 .p = p,
fb13c7ee 1732
58d081b5 1733 .src_cpu = task_cpu(p),
b32e86b4 1734 .src_nid = task_node(p),
fb13c7ee
MG
1735
1736 .imbalance_pct = 112,
1737
1738 .best_task = NULL,
1739 .best_imp = 0,
4142c3eb 1740 .best_cpu = -1,
58d081b5
MG
1741 };
1742 struct sched_domain *sd;
887c290e 1743 unsigned long taskweight, groupweight;
7bd95320 1744 int nid, ret, dist;
887c290e 1745 long taskimp, groupimp;
e6628d5b 1746
58d081b5 1747 /*
fb13c7ee
MG
1748 * Pick the lowest SD_NUMA domain, as that would have the smallest
1749 * imbalance and would be the first to start moving tasks about.
1750 *
1751 * And we want to avoid any moving of tasks about, as that would create
1752 * random movement of tasks -- counter the numa conditions we're trying
1753 * to satisfy here.
58d081b5
MG
1754 */
1755 rcu_read_lock();
fb13c7ee 1756 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
46a73e8a
RR
1757 if (sd)
1758 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
e6628d5b
MG
1759 rcu_read_unlock();
1760
46a73e8a
RR
1761 /*
1762 * Cpusets can break the scheduler domain tree into smaller
1763 * balance domains, some of which do not cross NUMA boundaries.
1764 * Tasks that are "trapped" in such domains cannot be migrated
1765 * elsewhere, so there is no point in (re)trying.
1766 */
1767 if (unlikely(!sd)) {
8cd45eee 1768 sched_setnuma(p, task_node(p));
46a73e8a
RR
1769 return -EINVAL;
1770 }
1771
2c8a50aa 1772 env.dst_nid = p->numa_preferred_nid;
7bd95320
RR
1773 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1774 taskweight = task_weight(p, env.src_nid, dist);
1775 groupweight = group_weight(p, env.src_nid, dist);
1776 update_numa_stats(&env.src_stats, env.src_nid);
1777 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1778 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2c8a50aa 1779 update_numa_stats(&env.dst_stats, env.dst_nid);
58d081b5 1780
a43455a1 1781 /* Try to find a spot on the preferred nid. */
6f9aad0b
RR
1782 if (numa_has_capacity(&env))
1783 task_numa_find_cpu(&env, taskimp, groupimp);
e1dda8a7 1784
9de05d48
RR
1785 /*
1786 * Look at other nodes in these cases:
1787 * - there is no space available on the preferred_nid
1788 * - the task is part of a numa_group that is interleaved across
1789 * multiple NUMA nodes; in order to better consolidate the group,
1790 * we need to check other locations.
1791 */
4142c3eb 1792 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
2c8a50aa
MG
1793 for_each_online_node(nid) {
1794 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1795 continue;
58d081b5 1796
7bd95320 1797 dist = node_distance(env.src_nid, env.dst_nid);
6c6b1193
RR
1798 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1799 dist != env.dist) {
1800 taskweight = task_weight(p, env.src_nid, dist);
1801 groupweight = group_weight(p, env.src_nid, dist);
1802 }
7bd95320 1803
83e1d2cd 1804 /* Only consider nodes where both task and groups benefit */
7bd95320
RR
1805 taskimp = task_weight(p, nid, dist) - taskweight;
1806 groupimp = group_weight(p, nid, dist) - groupweight;
887c290e 1807 if (taskimp < 0 && groupimp < 0)
fb13c7ee
MG
1808 continue;
1809
7bd95320 1810 env.dist = dist;
2c8a50aa
MG
1811 env.dst_nid = nid;
1812 update_numa_stats(&env.dst_stats, env.dst_nid);
6f9aad0b
RR
1813 if (numa_has_capacity(&env))
1814 task_numa_find_cpu(&env, taskimp, groupimp);
58d081b5
MG
1815 }
1816 }
1817
68d1b02a
RR
1818 /*
1819 * If the task is part of a workload that spans multiple NUMA nodes,
1820 * and is migrating into one of the workload's active nodes, remember
1821 * this node as the task's preferred numa node, so the workload can
1822 * settle down.
1823 * A task that migrated to a second choice node will be better off
1824 * trying for a better one later. Do not set the preferred node here.
1825 */
db015dae
RR
1826 if (p->numa_group) {
1827 if (env.best_cpu == -1)
1828 nid = env.src_nid;
1829 else
8cd45eee 1830 nid = cpu_to_node(env.best_cpu);
db015dae 1831
8cd45eee
SD
1832 if (nid != p->numa_preferred_nid)
1833 sched_setnuma(p, nid);
db015dae
RR
1834 }
1835
1836 /* No better CPU than the current one was found. */
1837 if (env.best_cpu == -1)
1838 return -EAGAIN;
0ec8aa00 1839
04bb2f94
RR
1840 /*
1841 * Reset the scan period if the task is being rescheduled on an
1842 * alternative node to recheck if the tasks is now properly placed.
1843 */
b5dd77c8 1844 p->numa_scan_period = task_scan_start(p);
04bb2f94 1845
fb13c7ee 1846 if (env.best_task == NULL) {
286549dc
MG
1847 ret = migrate_task_to(p, env.best_cpu);
1848 if (ret != 0)
1849 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
fb13c7ee
MG
1850 return ret;
1851 }
1852
1853 ret = migrate_swap(p, env.best_task);
286549dc
MG
1854 if (ret != 0)
1855 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
fb13c7ee
MG
1856 put_task_struct(env.best_task);
1857 return ret;
e6628d5b
MG
1858}
1859
6b9a7460
MG
1860/* Attempt to migrate a task to a CPU on the preferred node. */
1861static void numa_migrate_preferred(struct task_struct *p)
1862{
5085e2a3
RR
1863 unsigned long interval = HZ;
1864
2739d3ee 1865 /* This task has no NUMA fault statistics yet */
44dba3d5 1866 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
6b9a7460
MG
1867 return;
1868
2739d3ee 1869 /* Periodically retry migrating the task to the preferred node */
5085e2a3 1870 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
789ba280 1871 p->numa_migrate_retry = jiffies + interval;
2739d3ee
RR
1872
1873 /* Success if task is already running on preferred CPU */
de1b301a 1874 if (task_node(p) == p->numa_preferred_nid)
6b9a7460
MG
1875 return;
1876
1877 /* Otherwise, try migrate to a CPU on the preferred node */
2739d3ee 1878 task_numa_migrate(p);
6b9a7460
MG
1879}
1880
20e07dea 1881/*
4142c3eb 1882 * Find out how many nodes on the workload is actively running on. Do this by
20e07dea
RR
1883 * tracking the nodes from which NUMA hinting faults are triggered. This can
1884 * be different from the set of nodes where the workload's memory is currently
1885 * located.
20e07dea 1886 */
4142c3eb 1887static void numa_group_count_active_nodes(struct numa_group *numa_group)
20e07dea
RR
1888{
1889 unsigned long faults, max_faults = 0;
4142c3eb 1890 int nid, active_nodes = 0;
20e07dea
RR
1891
1892 for_each_online_node(nid) {
1893 faults = group_faults_cpu(numa_group, nid);
1894 if (faults > max_faults)
1895 max_faults = faults;
1896 }
1897
1898 for_each_online_node(nid) {
1899 faults = group_faults_cpu(numa_group, nid);
4142c3eb
RR
1900 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1901 active_nodes++;
20e07dea 1902 }
4142c3eb
RR
1903
1904 numa_group->max_faults_cpu = max_faults;
1905 numa_group->active_nodes = active_nodes;
20e07dea
RR
1906}
1907
04bb2f94
RR
1908/*
1909 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1910 * increments. The more local the fault statistics are, the higher the scan
a22b4b01
RR
1911 * period will be for the next scan window. If local/(local+remote) ratio is
1912 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1913 * the scan period will decrease. Aim for 70% local accesses.
04bb2f94
RR
1914 */
1915#define NUMA_PERIOD_SLOTS 10
a22b4b01 1916#define NUMA_PERIOD_THRESHOLD 7
04bb2f94
RR
1917
1918/*
1919 * Increase the scan period (slow down scanning) if the majority of
1920 * our memory is already on our local node, or if the majority of
1921 * the page accesses are shared with other processes.
1922 * Otherwise, decrease the scan period.
1923 */
1924static void update_task_scan_period(struct task_struct *p,
1925 unsigned long shared, unsigned long private)
1926{
1927 unsigned int period_slot;
37ec97de 1928 int lr_ratio, ps_ratio;
04bb2f94
RR
1929 int diff;
1930
1931 unsigned long remote = p->numa_faults_locality[0];
1932 unsigned long local = p->numa_faults_locality[1];
1933
1934 /*
1935 * If there were no record hinting faults then either the task is
1936 * completely idle or all activity is areas that are not of interest
074c2381
MG
1937 * to automatic numa balancing. Related to that, if there were failed
1938 * migration then it implies we are migrating too quickly or the local
1939 * node is overloaded. In either case, scan slower
04bb2f94 1940 */
074c2381 1941 if (local + shared == 0 || p->numa_faults_locality[2]) {
04bb2f94
RR
1942 p->numa_scan_period = min(p->numa_scan_period_max,
1943 p->numa_scan_period << 1);
1944
1945 p->mm->numa_next_scan = jiffies +
1946 msecs_to_jiffies(p->numa_scan_period);
1947
1948 return;
1949 }
1950
1951 /*
1952 * Prepare to scale scan period relative to the current period.
1953 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1954 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1955 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1956 */
1957 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
37ec97de
RR
1958 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1959 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
1960
1961 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
1962 /*
1963 * Most memory accesses are local. There is no need to
1964 * do fast NUMA scanning, since memory is already local.
1965 */
1966 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
1967 if (!slot)
1968 slot = 1;
1969 diff = slot * period_slot;
1970 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
1971 /*
1972 * Most memory accesses are shared with other tasks.
1973 * There is no point in continuing fast NUMA scanning,
1974 * since other tasks may just move the memory elsewhere.
1975 */
1976 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
04bb2f94
RR
1977 if (!slot)
1978 slot = 1;
1979 diff = slot * period_slot;
1980 } else {
04bb2f94 1981 /*
37ec97de
RR
1982 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
1983 * yet they are not on the local NUMA node. Speed up
1984 * NUMA scanning to get the memory moved over.
04bb2f94 1985 */
37ec97de
RR
1986 int ratio = max(lr_ratio, ps_ratio);
1987 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
04bb2f94
RR
1988 }
1989
1990 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1991 task_scan_min(p), task_scan_max(p));
1992 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1993}
1994
7e2703e6
RR
1995/*
1996 * Get the fraction of time the task has been running since the last
1997 * NUMA placement cycle. The scheduler keeps similar statistics, but
1998 * decays those on a 32ms period, which is orders of magnitude off
1999 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2000 * stats only if the task is so new there are no NUMA statistics yet.
2001 */
2002static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2003{
2004 u64 runtime, delta, now;
2005 /* Use the start of this time slice to avoid calculations. */
2006 now = p->se.exec_start;
2007 runtime = p->se.sum_exec_runtime;
2008
2009 if (p->last_task_numa_placement) {
2010 delta = runtime - p->last_sum_exec_runtime;
2011 *period = now - p->last_task_numa_placement;
2012 } else {
c7b50216 2013 delta = p->se.avg.load_sum;
9d89c257 2014 *period = LOAD_AVG_MAX;
7e2703e6
RR
2015 }
2016
2017 p->last_sum_exec_runtime = runtime;
2018 p->last_task_numa_placement = now;
2019
2020 return delta;
2021}
2022
54009416
RR
2023/*
2024 * Determine the preferred nid for a task in a numa_group. This needs to
2025 * be done in a way that produces consistent results with group_weight,
2026 * otherwise workloads might not converge.
2027 */
2028static int preferred_group_nid(struct task_struct *p, int nid)
2029{
2030 nodemask_t nodes;
2031 int dist;
2032
2033 /* Direct connections between all NUMA nodes. */
2034 if (sched_numa_topology_type == NUMA_DIRECT)
2035 return nid;
2036
2037 /*
2038 * On a system with glueless mesh NUMA topology, group_weight
2039 * scores nodes according to the number of NUMA hinting faults on
2040 * both the node itself, and on nearby nodes.
2041 */
2042 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2043 unsigned long score, max_score = 0;
2044 int node, max_node = nid;
2045
2046 dist = sched_max_numa_distance;
2047
2048 for_each_online_node(node) {
2049 score = group_weight(p, node, dist);
2050 if (score > max_score) {
2051 max_score = score;
2052 max_node = node;
2053 }
2054 }
2055 return max_node;
2056 }
2057
2058 /*
2059 * Finding the preferred nid in a system with NUMA backplane
2060 * interconnect topology is more involved. The goal is to locate
2061 * tasks from numa_groups near each other in the system, and
2062 * untangle workloads from different sides of the system. This requires
2063 * searching down the hierarchy of node groups, recursively searching
2064 * inside the highest scoring group of nodes. The nodemask tricks
2065 * keep the complexity of the search down.
2066 */
2067 nodes = node_online_map;
2068 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2069 unsigned long max_faults = 0;
81907478 2070 nodemask_t max_group = NODE_MASK_NONE;
54009416
RR
2071 int a, b;
2072
2073 /* Are there nodes at this distance from each other? */
2074 if (!find_numa_distance(dist))
2075 continue;
2076
2077 for_each_node_mask(a, nodes) {
2078 unsigned long faults = 0;
2079 nodemask_t this_group;
2080 nodes_clear(this_group);
2081
2082 /* Sum group's NUMA faults; includes a==b case. */
2083 for_each_node_mask(b, nodes) {
2084 if (node_distance(a, b) < dist) {
2085 faults += group_faults(p, b);
2086 node_set(b, this_group);
2087 node_clear(b, nodes);
2088 }
2089 }
2090
2091 /* Remember the top group. */
2092 if (faults > max_faults) {
2093 max_faults = faults;
2094 max_group = this_group;
2095 /*
2096 * subtle: at the smallest distance there is
2097 * just one node left in each "group", the
2098 * winner is the preferred nid.
2099 */
2100 nid = a;
2101 }
2102 }
2103 /* Next round, evaluate the nodes within max_group. */
890a5409
JB
2104 if (!max_faults)
2105 break;
54009416
RR
2106 nodes = max_group;
2107 }
2108 return nid;
2109}
2110
cbee9f88
PZ
2111static void task_numa_placement(struct task_struct *p)
2112{
83e1d2cd
MG
2113 int seq, nid, max_nid = -1, max_group_nid = -1;
2114 unsigned long max_faults = 0, max_group_faults = 0;
04bb2f94 2115 unsigned long fault_types[2] = { 0, 0 };
7e2703e6
RR
2116 unsigned long total_faults;
2117 u64 runtime, period;
7dbd13ed 2118 spinlock_t *group_lock = NULL;
cbee9f88 2119
7e5a2c17
JL
2120 /*
2121 * The p->mm->numa_scan_seq field gets updated without
2122 * exclusive access. Use READ_ONCE() here to ensure
2123 * that the field is read in a single access:
2124 */
316c1608 2125 seq = READ_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
2126 if (p->numa_scan_seq == seq)
2127 return;
2128 p->numa_scan_seq = seq;
598f0ec0 2129 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 2130
7e2703e6
RR
2131 total_faults = p->numa_faults_locality[0] +
2132 p->numa_faults_locality[1];
2133 runtime = numa_get_avg_runtime(p, &period);
2134
7dbd13ed
MG
2135 /* If the task is part of a group prevent parallel updates to group stats */
2136 if (p->numa_group) {
2137 group_lock = &p->numa_group->lock;
60e69eed 2138 spin_lock_irq(group_lock);
7dbd13ed
MG
2139 }
2140
688b7585
MG
2141 /* Find the node with the highest number of faults */
2142 for_each_online_node(nid) {
44dba3d5
IM
2143 /* Keep track of the offsets in numa_faults array */
2144 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
83e1d2cd 2145 unsigned long faults = 0, group_faults = 0;
44dba3d5 2146 int priv;
745d6147 2147
be1e4e76 2148 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
7e2703e6 2149 long diff, f_diff, f_weight;
8c8a743c 2150
44dba3d5
IM
2151 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2152 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2153 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2154 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
745d6147 2155
ac8e895b 2156 /* Decay existing window, copy faults since last scan */
44dba3d5
IM
2157 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2158 fault_types[priv] += p->numa_faults[membuf_idx];
2159 p->numa_faults[membuf_idx] = 0;
fb13c7ee 2160
7e2703e6
RR
2161 /*
2162 * Normalize the faults_from, so all tasks in a group
2163 * count according to CPU use, instead of by the raw
2164 * number of faults. Tasks with little runtime have
2165 * little over-all impact on throughput, and thus their
2166 * faults are less important.
2167 */
2168 f_weight = div64_u64(runtime << 16, period + 1);
44dba3d5 2169 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
7e2703e6 2170 (total_faults + 1);
44dba3d5
IM
2171 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2172 p->numa_faults[cpubuf_idx] = 0;
50ec8a40 2173
44dba3d5
IM
2174 p->numa_faults[mem_idx] += diff;
2175 p->numa_faults[cpu_idx] += f_diff;
2176 faults += p->numa_faults[mem_idx];
83e1d2cd 2177 p->total_numa_faults += diff;
8c8a743c 2178 if (p->numa_group) {
44dba3d5
IM
2179 /*
2180 * safe because we can only change our own group
2181 *
2182 * mem_idx represents the offset for a given
2183 * nid and priv in a specific region because it
2184 * is at the beginning of the numa_faults array.
2185 */
2186 p->numa_group->faults[mem_idx] += diff;
2187 p->numa_group->faults_cpu[mem_idx] += f_diff;
989348b5 2188 p->numa_group->total_faults += diff;
44dba3d5 2189 group_faults += p->numa_group->faults[mem_idx];
8c8a743c 2190 }
ac8e895b
MG
2191 }
2192
688b7585
MG
2193 if (faults > max_faults) {
2194 max_faults = faults;
2195 max_nid = nid;
2196 }
83e1d2cd
MG
2197
2198 if (group_faults > max_group_faults) {
2199 max_group_faults = group_faults;
2200 max_group_nid = nid;
2201 }
2202 }
2203
04bb2f94
RR
2204 update_task_scan_period(p, fault_types[0], fault_types[1]);
2205
7dbd13ed 2206 if (p->numa_group) {
4142c3eb 2207 numa_group_count_active_nodes(p->numa_group);
60e69eed 2208 spin_unlock_irq(group_lock);
54009416 2209 max_nid = preferred_group_nid(p, max_group_nid);
688b7585
MG
2210 }
2211
bb97fc31
RR
2212 if (max_faults) {
2213 /* Set the new preferred node */
2214 if (max_nid != p->numa_preferred_nid)
2215 sched_setnuma(p, max_nid);
2216
2217 if (task_node(p) != p->numa_preferred_nid)
2218 numa_migrate_preferred(p);
3a7053b3 2219 }
cbee9f88
PZ
2220}
2221
8c8a743c
PZ
2222static inline int get_numa_group(struct numa_group *grp)
2223{
2224 return atomic_inc_not_zero(&grp->refcount);
2225}
2226
2227static inline void put_numa_group(struct numa_group *grp)
2228{
2229 if (atomic_dec_and_test(&grp->refcount))
2230 kfree_rcu(grp, rcu);
2231}
2232
3e6a9418
MG
2233static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2234 int *priv)
8c8a743c
PZ
2235{
2236 struct numa_group *grp, *my_grp;
2237 struct task_struct *tsk;
2238 bool join = false;
2239 int cpu = cpupid_to_cpu(cpupid);
2240 int i;
2241
2242 if (unlikely(!p->numa_group)) {
2243 unsigned int size = sizeof(struct numa_group) +
50ec8a40 2244 4*nr_node_ids*sizeof(unsigned long);
8c8a743c
PZ
2245
2246 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2247 if (!grp)
2248 return;
2249
2250 atomic_set(&grp->refcount, 1);
4142c3eb
RR
2251 grp->active_nodes = 1;
2252 grp->max_faults_cpu = 0;
8c8a743c 2253 spin_lock_init(&grp->lock);
e29cf08b 2254 grp->gid = p->pid;
50ec8a40 2255 /* Second half of the array tracks nids where faults happen */
be1e4e76
RR
2256 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2257 nr_node_ids;
8c8a743c 2258
be1e4e76 2259 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2260 grp->faults[i] = p->numa_faults[i];
8c8a743c 2261
989348b5 2262 grp->total_faults = p->total_numa_faults;
83e1d2cd 2263
8c8a743c
PZ
2264 grp->nr_tasks++;
2265 rcu_assign_pointer(p->numa_group, grp);
2266 }
2267
2268 rcu_read_lock();
316c1608 2269 tsk = READ_ONCE(cpu_rq(cpu)->curr);
8c8a743c
PZ
2270
2271 if (!cpupid_match_pid(tsk, cpupid))
3354781a 2272 goto no_join;
8c8a743c
PZ
2273
2274 grp = rcu_dereference(tsk->numa_group);
2275 if (!grp)
3354781a 2276 goto no_join;
8c8a743c
PZ
2277
2278 my_grp = p->numa_group;
2279 if (grp == my_grp)
3354781a 2280 goto no_join;
8c8a743c
PZ
2281
2282 /*
2283 * Only join the other group if its bigger; if we're the bigger group,
2284 * the other task will join us.
2285 */
2286 if (my_grp->nr_tasks > grp->nr_tasks)
3354781a 2287 goto no_join;
8c8a743c
PZ
2288
2289 /*
2290 * Tie-break on the grp address.
2291 */
2292 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3354781a 2293 goto no_join;
8c8a743c 2294
dabe1d99
RR
2295 /* Always join threads in the same process. */
2296 if (tsk->mm == current->mm)
2297 join = true;
2298
2299 /* Simple filter to avoid false positives due to PID collisions */
2300 if (flags & TNF_SHARED)
2301 join = true;
8c8a743c 2302
3e6a9418
MG
2303 /* Update priv based on whether false sharing was detected */
2304 *priv = !join;
2305
dabe1d99 2306 if (join && !get_numa_group(grp))
3354781a 2307 goto no_join;
8c8a743c 2308
8c8a743c
PZ
2309 rcu_read_unlock();
2310
2311 if (!join)
2312 return;
2313
60e69eed
MG
2314 BUG_ON(irqs_disabled());
2315 double_lock_irq(&my_grp->lock, &grp->lock);
989348b5 2316
be1e4e76 2317 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
44dba3d5
IM
2318 my_grp->faults[i] -= p->numa_faults[i];
2319 grp->faults[i] += p->numa_faults[i];
8c8a743c 2320 }
989348b5
MG
2321 my_grp->total_faults -= p->total_numa_faults;
2322 grp->total_faults += p->total_numa_faults;
8c8a743c 2323
8c8a743c
PZ
2324 my_grp->nr_tasks--;
2325 grp->nr_tasks++;
2326
2327 spin_unlock(&my_grp->lock);
60e69eed 2328 spin_unlock_irq(&grp->lock);
8c8a743c
PZ
2329
2330 rcu_assign_pointer(p->numa_group, grp);
2331
2332 put_numa_group(my_grp);
3354781a
PZ
2333 return;
2334
2335no_join:
2336 rcu_read_unlock();
2337 return;
8c8a743c
PZ
2338}
2339
2340void task_numa_free(struct task_struct *p)
2341{
2342 struct numa_group *grp = p->numa_group;
44dba3d5 2343 void *numa_faults = p->numa_faults;
e9dd685c
SR
2344 unsigned long flags;
2345 int i;
8c8a743c
PZ
2346
2347 if (grp) {
e9dd685c 2348 spin_lock_irqsave(&grp->lock, flags);
be1e4e76 2349 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2350 grp->faults[i] -= p->numa_faults[i];
989348b5 2351 grp->total_faults -= p->total_numa_faults;
83e1d2cd 2352
8c8a743c 2353 grp->nr_tasks--;
e9dd685c 2354 spin_unlock_irqrestore(&grp->lock, flags);
35b123e2 2355 RCU_INIT_POINTER(p->numa_group, NULL);
8c8a743c
PZ
2356 put_numa_group(grp);
2357 }
2358
44dba3d5 2359 p->numa_faults = NULL;
82727018 2360 kfree(numa_faults);
8c8a743c
PZ
2361}
2362
cbee9f88
PZ
2363/*
2364 * Got a PROT_NONE fault for a page on @node.
2365 */
58b46da3 2366void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
cbee9f88
PZ
2367{
2368 struct task_struct *p = current;
6688cc05 2369 bool migrated = flags & TNF_MIGRATED;
58b46da3 2370 int cpu_node = task_node(current);
792568ec 2371 int local = !!(flags & TNF_FAULT_LOCAL);
4142c3eb 2372 struct numa_group *ng;
ac8e895b 2373 int priv;
cbee9f88 2374
2a595721 2375 if (!static_branch_likely(&sched_numa_balancing))
1a687c2e
MG
2376 return;
2377
9ff1d9ff
MG
2378 /* for example, ksmd faulting in a user's mm */
2379 if (!p->mm)
2380 return;
2381
f809ca9a 2382 /* Allocate buffer to track faults on a per-node basis */
44dba3d5
IM
2383 if (unlikely(!p->numa_faults)) {
2384 int size = sizeof(*p->numa_faults) *
be1e4e76 2385 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
f809ca9a 2386
44dba3d5
IM
2387 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2388 if (!p->numa_faults)
f809ca9a 2389 return;
745d6147 2390
83e1d2cd 2391 p->total_numa_faults = 0;
04bb2f94 2392 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
f809ca9a 2393 }
cbee9f88 2394
8c8a743c
PZ
2395 /*
2396 * First accesses are treated as private, otherwise consider accesses
2397 * to be private if the accessing pid has not changed
2398 */
2399 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2400 priv = 1;
2401 } else {
2402 priv = cpupid_match_pid(p, last_cpupid);
6688cc05 2403 if (!priv && !(flags & TNF_NO_GROUP))
3e6a9418 2404 task_numa_group(p, last_cpupid, flags, &priv);
8c8a743c
PZ
2405 }
2406
792568ec
RR
2407 /*
2408 * If a workload spans multiple NUMA nodes, a shared fault that
2409 * occurs wholly within the set of nodes that the workload is
2410 * actively using should be counted as local. This allows the
2411 * scan rate to slow down when a workload has settled down.
2412 */
4142c3eb
RR
2413 ng = p->numa_group;
2414 if (!priv && !local && ng && ng->active_nodes > 1 &&
2415 numa_is_active_node(cpu_node, ng) &&
2416 numa_is_active_node(mem_node, ng))
792568ec
RR
2417 local = 1;
2418
cbee9f88 2419 task_numa_placement(p);
f809ca9a 2420
2739d3ee
RR
2421 /*
2422 * Retry task to preferred node migration periodically, in case it
2423 * case it previously failed, or the scheduler moved us.
2424 */
2425 if (time_after(jiffies, p->numa_migrate_retry))
6b9a7460
MG
2426 numa_migrate_preferred(p);
2427
b32e86b4
IM
2428 if (migrated)
2429 p->numa_pages_migrated += pages;
074c2381
MG
2430 if (flags & TNF_MIGRATE_FAIL)
2431 p->numa_faults_locality[2] += pages;
b32e86b4 2432
44dba3d5
IM
2433 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2434 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
792568ec 2435 p->numa_faults_locality[local] += pages;
cbee9f88
PZ
2436}
2437
6e5fb223
PZ
2438static void reset_ptenuma_scan(struct task_struct *p)
2439{
7e5a2c17
JL
2440 /*
2441 * We only did a read acquisition of the mmap sem, so
2442 * p->mm->numa_scan_seq is written to without exclusive access
2443 * and the update is not guaranteed to be atomic. That's not
2444 * much of an issue though, since this is just used for
2445 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2446 * expensive, to avoid any form of compiler optimizations:
2447 */
316c1608 2448 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
6e5fb223
PZ
2449 p->mm->numa_scan_offset = 0;
2450}
2451
cbee9f88
PZ
2452/*
2453 * The expensive part of numa migration is done from task_work context.
2454 * Triggered from task_tick_numa().
2455 */
2456void task_numa_work(struct callback_head *work)
2457{
2458 unsigned long migrate, next_scan, now = jiffies;
2459 struct task_struct *p = current;
2460 struct mm_struct *mm = p->mm;
51170840 2461 u64 runtime = p->se.sum_exec_runtime;
6e5fb223 2462 struct vm_area_struct *vma;
9f40604c 2463 unsigned long start, end;
598f0ec0 2464 unsigned long nr_pte_updates = 0;
4620f8c1 2465 long pages, virtpages;
cbee9f88 2466
9148a3a1 2467 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
cbee9f88
PZ
2468
2469 work->next = work; /* protect against double add */
2470 /*
2471 * Who cares about NUMA placement when they're dying.
2472 *
2473 * NOTE: make sure not to dereference p->mm before this check,
2474 * exit_task_work() happens _after_ exit_mm() so we could be called
2475 * without p->mm even though we still had it when we enqueued this
2476 * work.
2477 */
2478 if (p->flags & PF_EXITING)
2479 return;
2480
930aa174 2481 if (!mm->numa_next_scan) {
7e8d16b6
MG
2482 mm->numa_next_scan = now +
2483 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
b8593bfd
MG
2484 }
2485
cbee9f88
PZ
2486 /*
2487 * Enforce maximal scan/migration frequency..
2488 */
2489 migrate = mm->numa_next_scan;
2490 if (time_before(now, migrate))
2491 return;
2492
598f0ec0
MG
2493 if (p->numa_scan_period == 0) {
2494 p->numa_scan_period_max = task_scan_max(p);
b5dd77c8 2495 p->numa_scan_period = task_scan_start(p);
598f0ec0 2496 }
cbee9f88 2497
fb003b80 2498 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
2499 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2500 return;
2501
19a78d11
PZ
2502 /*
2503 * Delay this task enough that another task of this mm will likely win
2504 * the next time around.
2505 */
2506 p->node_stamp += 2 * TICK_NSEC;
2507
9f40604c
MG
2508 start = mm->numa_scan_offset;
2509 pages = sysctl_numa_balancing_scan_size;
2510 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
4620f8c1 2511 virtpages = pages * 8; /* Scan up to this much virtual space */
9f40604c
MG
2512 if (!pages)
2513 return;
cbee9f88 2514
4620f8c1 2515
8655d549
VB
2516 if (!down_read_trylock(&mm->mmap_sem))
2517 return;
9f40604c 2518 vma = find_vma(mm, start);
6e5fb223
PZ
2519 if (!vma) {
2520 reset_ptenuma_scan(p);
9f40604c 2521 start = 0;
6e5fb223
PZ
2522 vma = mm->mmap;
2523 }
9f40604c 2524 for (; vma; vma = vma->vm_next) {
6b79c57b 2525 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
8e76d4ee 2526 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
6e5fb223 2527 continue;
6b79c57b 2528 }
6e5fb223 2529
4591ce4f
MG
2530 /*
2531 * Shared library pages mapped by multiple processes are not
2532 * migrated as it is expected they are cache replicated. Avoid
2533 * hinting faults in read-only file-backed mappings or the vdso
2534 * as migrating the pages will be of marginal benefit.
2535 */
2536 if (!vma->vm_mm ||
2537 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2538 continue;
2539
3c67f474
MG
2540 /*
2541 * Skip inaccessible VMAs to avoid any confusion between
2542 * PROT_NONE and NUMA hinting ptes
2543 */
2544 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2545 continue;
4591ce4f 2546
9f40604c
MG
2547 do {
2548 start = max(start, vma->vm_start);
2549 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2550 end = min(end, vma->vm_end);
4620f8c1 2551 nr_pte_updates = change_prot_numa(vma, start, end);
598f0ec0
MG
2552
2553 /*
4620f8c1
RR
2554 * Try to scan sysctl_numa_balancing_size worth of
2555 * hpages that have at least one present PTE that
2556 * is not already pte-numa. If the VMA contains
2557 * areas that are unused or already full of prot_numa
2558 * PTEs, scan up to virtpages, to skip through those
2559 * areas faster.
598f0ec0
MG
2560 */
2561 if (nr_pte_updates)
2562 pages -= (end - start) >> PAGE_SHIFT;
4620f8c1 2563 virtpages -= (end - start) >> PAGE_SHIFT;
6e5fb223 2564
9f40604c 2565 start = end;
4620f8c1 2566 if (pages <= 0 || virtpages <= 0)
9f40604c 2567 goto out;
3cf1962c
RR
2568
2569 cond_resched();
9f40604c 2570 } while (end != vma->vm_end);
cbee9f88 2571 }
6e5fb223 2572
9f40604c 2573out:
6e5fb223 2574 /*
c69307d5
PZ
2575 * It is possible to reach the end of the VMA list but the last few
2576 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2577 * would find the !migratable VMA on the next scan but not reset the
2578 * scanner to the start so check it now.
6e5fb223
PZ
2579 */
2580 if (vma)
9f40604c 2581 mm->numa_scan_offset = start;
6e5fb223
PZ
2582 else
2583 reset_ptenuma_scan(p);
2584 up_read(&mm->mmap_sem);
51170840
RR
2585
2586 /*
2587 * Make sure tasks use at least 32x as much time to run other code
2588 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2589 * Usually update_task_scan_period slows down scanning enough; on an
2590 * overloaded system we need to limit overhead on a per task basis.
2591 */
2592 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2593 u64 diff = p->se.sum_exec_runtime - runtime;
2594 p->node_stamp += 32 * diff;
2595 }
cbee9f88
PZ
2596}
2597
2598/*
2599 * Drive the periodic memory faults..
2600 */
2601void task_tick_numa(struct rq *rq, struct task_struct *curr)
2602{
2603 struct callback_head *work = &curr->numa_work;
2604 u64 period, now;
2605
2606 /*
2607 * We don't care about NUMA placement if we don't have memory.
2608 */
2609 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2610 return;
2611
2612 /*
2613 * Using runtime rather than walltime has the dual advantage that
2614 * we (mostly) drive the selection from busy threads and that the
2615 * task needs to have done some actual work before we bother with
2616 * NUMA placement.
2617 */
2618 now = curr->se.sum_exec_runtime;
2619 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2620
25b3e5a3 2621 if (now > curr->node_stamp + period) {
4b96a29b 2622 if (!curr->node_stamp)
b5dd77c8 2623 curr->numa_scan_period = task_scan_start(curr);
19a78d11 2624 curr->node_stamp += period;
cbee9f88
PZ
2625
2626 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2627 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2628 task_work_add(curr, work, true);
2629 }
2630 }
2631}
3fed382b 2632
cbee9f88
PZ
2633#else
2634static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2635{
2636}
0ec8aa00
PZ
2637
2638static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2639{
2640}
2641
2642static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2643{
2644}
3fed382b 2645
cbee9f88
PZ
2646#endif /* CONFIG_NUMA_BALANCING */
2647
30cfdcfc
DA
2648static void
2649account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2650{
2651 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 2652 if (!parent_entity(se))
029632fb 2653 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 2654#ifdef CONFIG_SMP
0ec8aa00
PZ
2655 if (entity_is_task(se)) {
2656 struct rq *rq = rq_of(cfs_rq);
2657
2658 account_numa_enqueue(rq, task_of(se));
2659 list_add(&se->group_node, &rq->cfs_tasks);
2660 }
367456c7 2661#endif
30cfdcfc 2662 cfs_rq->nr_running++;
30cfdcfc
DA
2663}
2664
2665static void
2666account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2667{
2668 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 2669 if (!parent_entity(se))
029632fb 2670 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
bfdb198c 2671#ifdef CONFIG_SMP
0ec8aa00
PZ
2672 if (entity_is_task(se)) {
2673 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
b87f1724 2674 list_del_init(&se->group_node);
0ec8aa00 2675 }
bfdb198c 2676#endif
30cfdcfc 2677 cfs_rq->nr_running--;
30cfdcfc
DA
2678}
2679
8d5b9025
PZ
2680/*
2681 * Signed add and clamp on underflow.
2682 *
2683 * Explicitly do a load-store to ensure the intermediate value never hits
2684 * memory. This allows lockless observations without ever seeing the negative
2685 * values.
2686 */
2687#define add_positive(_ptr, _val) do { \
2688 typeof(_ptr) ptr = (_ptr); \
2689 typeof(_val) val = (_val); \
2690 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2691 \
2692 res = var + val; \
2693 \
2694 if (val < 0 && res > var) \
2695 res = 0; \
2696 \
2697 WRITE_ONCE(*ptr, res); \
2698} while (0)
2699
2700/*
2701 * Unsigned subtract and clamp on underflow.
2702 *
2703 * Explicitly do a load-store to ensure the intermediate value never hits
2704 * memory. This allows lockless observations without ever seeing the negative
2705 * values.
2706 */
2707#define sub_positive(_ptr, _val) do { \
2708 typeof(_ptr) ptr = (_ptr); \
2709 typeof(*ptr) val = (_val); \
2710 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2711 res = var - val; \
2712 if (res > var) \
2713 res = 0; \
2714 WRITE_ONCE(*ptr, res); \
2715} while (0)
2716
2717#ifdef CONFIG_SMP
8d5b9025
PZ
2718static inline void
2719enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2720{
1ea6c46a
PZ
2721 cfs_rq->runnable_weight += se->runnable_weight;
2722
2723 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
2724 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
8d5b9025
PZ
2725}
2726
2727static inline void
2728dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2729{
1ea6c46a
PZ
2730 cfs_rq->runnable_weight -= se->runnable_weight;
2731
2732 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
2733 sub_positive(&cfs_rq->avg.runnable_load_sum,
2734 se_runnable(se) * se->avg.runnable_load_sum);
8d5b9025
PZ
2735}
2736
2737static inline void
2738enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2739{
2740 cfs_rq->avg.load_avg += se->avg.load_avg;
2741 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
2742}
2743
2744static inline void
2745dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2746{
2747 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
2748 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
2749}
2750#else
2751static inline void
2752enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2753static inline void
2754dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2755static inline void
2756enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2757static inline void
2758dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2759#endif
2760
9059393e 2761static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1ea6c46a 2762 unsigned long weight, unsigned long runnable)
9059393e
VG
2763{
2764 if (se->on_rq) {
2765 /* commit outstanding execution time */
2766 if (cfs_rq->curr == se)
2767 update_curr(cfs_rq);
2768 account_entity_dequeue(cfs_rq, se);
2769 dequeue_runnable_load_avg(cfs_rq, se);
2770 }
2771 dequeue_load_avg(cfs_rq, se);
2772
1ea6c46a 2773 se->runnable_weight = runnable;
9059393e
VG
2774 update_load_set(&se->load, weight);
2775
2776#ifdef CONFIG_SMP
1ea6c46a
PZ
2777 do {
2778 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
2779
2780 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
2781 se->avg.runnable_load_avg =
2782 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
2783 } while (0);
9059393e
VG
2784#endif
2785
2786 enqueue_load_avg(cfs_rq, se);
2787 if (se->on_rq) {
2788 account_entity_enqueue(cfs_rq, se);
2789 enqueue_runnable_load_avg(cfs_rq, se);
2790 }
2791}
2792
2793void reweight_task(struct task_struct *p, int prio)
2794{
2795 struct sched_entity *se = &p->se;
2796 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2797 struct load_weight *load = &se->load;
2798 unsigned long weight = scale_load(sched_prio_to_weight[prio]);
2799
1ea6c46a 2800 reweight_entity(cfs_rq, se, weight, weight);
9059393e
VG
2801 load->inv_weight = sched_prio_to_wmult[prio];
2802}
2803
3ff6dcac 2804#ifdef CONFIG_FAIR_GROUP_SCHED
387f77cc 2805#ifdef CONFIG_SMP
cef27403
PZ
2806/*
2807 * All this does is approximate the hierarchical proportion which includes that
2808 * global sum we all love to hate.
2809 *
2810 * That is, the weight of a group entity, is the proportional share of the
2811 * group weight based on the group runqueue weights. That is:
2812 *
2813 * tg->weight * grq->load.weight
2814 * ge->load.weight = ----------------------------- (1)
2815 * \Sum grq->load.weight
2816 *
2817 * Now, because computing that sum is prohibitively expensive to compute (been
2818 * there, done that) we approximate it with this average stuff. The average
2819 * moves slower and therefore the approximation is cheaper and more stable.
2820 *
2821 * So instead of the above, we substitute:
2822 *
2823 * grq->load.weight -> grq->avg.load_avg (2)
2824 *
2825 * which yields the following:
2826 *
2827 * tg->weight * grq->avg.load_avg
2828 * ge->load.weight = ------------------------------ (3)
2829 * tg->load_avg
2830 *
2831 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
2832 *
2833 * That is shares_avg, and it is right (given the approximation (2)).
2834 *
2835 * The problem with it is that because the average is slow -- it was designed
2836 * to be exactly that of course -- this leads to transients in boundary
2837 * conditions. In specific, the case where the group was idle and we start the
2838 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
2839 * yielding bad latency etc..
2840 *
2841 * Now, in that special case (1) reduces to:
2842 *
2843 * tg->weight * grq->load.weight
17de4ee0 2844 * ge->load.weight = ----------------------------- = tg->weight (4)
cef27403
PZ
2845 * grp->load.weight
2846 *
2847 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
2848 *
2849 * So what we do is modify our approximation (3) to approach (4) in the (near)
2850 * UP case, like:
2851 *
2852 * ge->load.weight =
2853 *
2854 * tg->weight * grq->load.weight
2855 * --------------------------------------------------- (5)
2856 * tg->load_avg - grq->avg.load_avg + grq->load.weight
2857 *
17de4ee0
PZ
2858 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
2859 * we need to use grq->avg.load_avg as its lower bound, which then gives:
2860 *
2861 *
2862 * tg->weight * grq->load.weight
2863 * ge->load.weight = ----------------------------- (6)
2864 * tg_load_avg'
2865 *
2866 * Where:
2867 *
2868 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
2869 * max(grq->load.weight, grq->avg.load_avg)
cef27403
PZ
2870 *
2871 * And that is shares_weight and is icky. In the (near) UP case it approaches
2872 * (4) while in the normal case it approaches (3). It consistently
2873 * overestimates the ge->load.weight and therefore:
2874 *
2875 * \Sum ge->load.weight >= tg->weight
2876 *
2877 * hence icky!
2878 */
2c8e4dce 2879static long calc_group_shares(struct cfs_rq *cfs_rq)
cf5f0acf 2880{
7c80cfc9
PZ
2881 long tg_weight, tg_shares, load, shares;
2882 struct task_group *tg = cfs_rq->tg;
2883
2884 tg_shares = READ_ONCE(tg->shares);
cf5f0acf 2885
3d4b60d3 2886 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
cf5f0acf 2887
ea1dc6fc 2888 tg_weight = atomic_long_read(&tg->load_avg);
3ff6dcac 2889
ea1dc6fc
PZ
2890 /* Ensure tg_weight >= load */
2891 tg_weight -= cfs_rq->tg_load_avg_contrib;
2892 tg_weight += load;
3ff6dcac 2893
7c80cfc9 2894 shares = (tg_shares * load);
cf5f0acf
PZ
2895 if (tg_weight)
2896 shares /= tg_weight;
3ff6dcac 2897
b8fd8423
DE
2898 /*
2899 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
2900 * of a group with small tg->shares value. It is a floor value which is
2901 * assigned as a minimum load.weight to the sched_entity representing
2902 * the group on a CPU.
2903 *
2904 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
2905 * on an 8-core system with 8 tasks each runnable on one CPU shares has
2906 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
2907 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
2908 * instead of 0.
2909 */
7c80cfc9 2910 return clamp_t(long, shares, MIN_SHARES, tg_shares);
3ff6dcac 2911}
2c8e4dce
JB
2912
2913/*
17de4ee0
PZ
2914 * This calculates the effective runnable weight for a group entity based on
2915 * the group entity weight calculated above.
2916 *
2917 * Because of the above approximation (2), our group entity weight is
2918 * an load_avg based ratio (3). This means that it includes blocked load and
2919 * does not represent the runnable weight.
2920 *
2921 * Approximate the group entity's runnable weight per ratio from the group
2922 * runqueue:
2923 *
2924 * grq->avg.runnable_load_avg
2925 * ge->runnable_weight = ge->load.weight * -------------------------- (7)
2926 * grq->avg.load_avg
2927 *
2928 * However, analogous to above, since the avg numbers are slow, this leads to
2929 * transients in the from-idle case. Instead we use:
2930 *
2931 * ge->runnable_weight = ge->load.weight *
2932 *
2933 * max(grq->avg.runnable_load_avg, grq->runnable_weight)
2934 * ----------------------------------------------------- (8)
2935 * max(grq->avg.load_avg, grq->load.weight)
2936 *
2937 * Where these max() serve both to use the 'instant' values to fix the slow
2938 * from-idle and avoid the /0 on to-idle, similar to (6).
2c8e4dce
JB
2939 */
2940static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
2941{
17de4ee0
PZ
2942 long runnable, load_avg;
2943
2944 load_avg = max(cfs_rq->avg.load_avg,
2945 scale_load_down(cfs_rq->load.weight));
2946
2947 runnable = max(cfs_rq->avg.runnable_load_avg,
2948 scale_load_down(cfs_rq->runnable_weight));
2c8e4dce
JB
2949
2950 runnable *= shares;
2951 if (load_avg)
2952 runnable /= load_avg;
17de4ee0 2953
2c8e4dce
JB
2954 return clamp_t(long, runnable, MIN_SHARES, shares);
2955}
387f77cc 2956#endif /* CONFIG_SMP */
ea1dc6fc 2957
82958366
PT
2958static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2959
1ea6c46a
PZ
2960/*
2961 * Recomputes the group entity based on the current state of its group
2962 * runqueue.
2963 */
2964static void update_cfs_group(struct sched_entity *se)
2069dd75 2965{
1ea6c46a
PZ
2966 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
2967 long shares, runnable;
2069dd75 2968
1ea6c46a 2969 if (!gcfs_rq)
89ee048f
VG
2970 return;
2971
1ea6c46a 2972 if (throttled_hierarchy(gcfs_rq))
2069dd75 2973 return;
89ee048f 2974
3ff6dcac 2975#ifndef CONFIG_SMP
1ea6c46a 2976 runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
7c80cfc9
PZ
2977
2978 if (likely(se->load.weight == shares))
3ff6dcac 2979 return;
7c80cfc9 2980#else
2c8e4dce
JB
2981 shares = calc_group_shares(gcfs_rq);
2982 runnable = calc_group_runnable(gcfs_rq, shares);
3ff6dcac 2983#endif
2069dd75 2984
1ea6c46a 2985 reweight_entity(cfs_rq_of(se), se, shares, runnable);
2069dd75 2986}
89ee048f 2987
2069dd75 2988#else /* CONFIG_FAIR_GROUP_SCHED */
1ea6c46a 2989static inline void update_cfs_group(struct sched_entity *se)
2069dd75
PZ
2990{
2991}
2992#endif /* CONFIG_FAIR_GROUP_SCHED */
2993
ea14b57e 2994static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
a030d738 2995{
43964409
LT
2996 struct rq *rq = rq_of(cfs_rq);
2997
ea14b57e 2998 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
a030d738
VK
2999 /*
3000 * There are a few boundary cases this might miss but it should
3001 * get called often enough that that should (hopefully) not be
9783be2c 3002 * a real problem.
a030d738
VK
3003 *
3004 * It will not get called when we go idle, because the idle
3005 * thread is a different class (!fair), nor will the utilization
3006 * number include things like RT tasks.
3007 *
3008 * As is, the util number is not freq-invariant (we'd have to
3009 * implement arch_scale_freq_capacity() for that).
3010 *
3011 * See cpu_util().
3012 */
ea14b57e 3013 cpufreq_update_util(rq, flags);
a030d738
VK
3014 }
3015}
3016
141965c7 3017#ifdef CONFIG_SMP
c566e8e9 3018#ifdef CONFIG_FAIR_GROUP_SCHED
7c3edd2c
PZ
3019/**
3020 * update_tg_load_avg - update the tg's load avg
3021 * @cfs_rq: the cfs_rq whose avg changed
3022 * @force: update regardless of how small the difference
3023 *
3024 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3025 * However, because tg->load_avg is a global value there are performance
3026 * considerations.
3027 *
3028 * In order to avoid having to look at the other cfs_rq's, we use a
3029 * differential update where we store the last value we propagated. This in
3030 * turn allows skipping updates if the differential is 'small'.
3031 *
815abf5a 3032 * Updating tg's load_avg is necessary before update_cfs_share().
bb17f655 3033 */
9d89c257 3034static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
bb17f655 3035{
9d89c257 3036 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
bb17f655 3037
aa0b7ae0
WL
3038 /*
3039 * No need to update load_avg for root_task_group as it is not used.
3040 */
3041 if (cfs_rq->tg == &root_task_group)
3042 return;
3043
9d89c257
YD
3044 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3045 atomic_long_add(delta, &cfs_rq->tg->load_avg);
3046 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
bb17f655 3047 }
8165e145 3048}
f5f9739d 3049
ad936d86 3050/*
97fb7a0a 3051 * Called within set_task_rq() right before setting a task's CPU. The
ad936d86
BP
3052 * caller only guarantees p->pi_lock is held; no other assumptions,
3053 * including the state of rq->lock, should be made.
3054 */
3055void set_task_rq_fair(struct sched_entity *se,
3056 struct cfs_rq *prev, struct cfs_rq *next)
3057{
0ccb977f
PZ
3058 u64 p_last_update_time;
3059 u64 n_last_update_time;
3060
ad936d86
BP
3061 if (!sched_feat(ATTACH_AGE_LOAD))
3062 return;
3063
3064 /*
3065 * We are supposed to update the task to "current" time, then its up to
3066 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3067 * getting what current time is, so simply throw away the out-of-date
3068 * time. This will result in the wakee task is less decayed, but giving
3069 * the wakee more load sounds not bad.
3070 */
0ccb977f
PZ
3071 if (!(se->avg.last_update_time && prev))
3072 return;
ad936d86
BP
3073
3074#ifndef CONFIG_64BIT
0ccb977f 3075 {
ad936d86
BP
3076 u64 p_last_update_time_copy;
3077 u64 n_last_update_time_copy;
3078
3079 do {
3080 p_last_update_time_copy = prev->load_last_update_time_copy;
3081 n_last_update_time_copy = next->load_last_update_time_copy;
3082
3083 smp_rmb();
3084
3085 p_last_update_time = prev->avg.last_update_time;
3086 n_last_update_time = next->avg.last_update_time;
3087
3088 } while (p_last_update_time != p_last_update_time_copy ||
3089 n_last_update_time != n_last_update_time_copy);
0ccb977f 3090 }
ad936d86 3091#else
0ccb977f
PZ
3092 p_last_update_time = prev->avg.last_update_time;
3093 n_last_update_time = next->avg.last_update_time;
ad936d86 3094#endif
0ccb977f
PZ
3095 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
3096 se->avg.last_update_time = n_last_update_time;
ad936d86 3097}
09a43ace 3098
0e2d2aaa
PZ
3099
3100/*
3101 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3102 * propagate its contribution. The key to this propagation is the invariant
3103 * that for each group:
3104 *
3105 * ge->avg == grq->avg (1)
3106 *
3107 * _IFF_ we look at the pure running and runnable sums. Because they
3108 * represent the very same entity, just at different points in the hierarchy.
3109 *
a4c3c049
VG
3110 * Per the above update_tg_cfs_util() is trivial and simply copies the running
3111 * sum over (but still wrong, because the group entity and group rq do not have
3112 * their PELT windows aligned).
0e2d2aaa
PZ
3113 *
3114 * However, update_tg_cfs_runnable() is more complex. So we have:
3115 *
3116 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3117 *
3118 * And since, like util, the runnable part should be directly transferable,
3119 * the following would _appear_ to be the straight forward approach:
3120 *
a4c3c049 3121 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
0e2d2aaa
PZ
3122 *
3123 * And per (1) we have:
3124 *
a4c3c049 3125 * ge->avg.runnable_avg == grq->avg.runnable_avg
0e2d2aaa
PZ
3126 *
3127 * Which gives:
3128 *
3129 * ge->load.weight * grq->avg.load_avg
3130 * ge->avg.load_avg = ----------------------------------- (4)
3131 * grq->load.weight
3132 *
3133 * Except that is wrong!
3134 *
3135 * Because while for entities historical weight is not important and we
3136 * really only care about our future and therefore can consider a pure
3137 * runnable sum, runqueues can NOT do this.
3138 *
3139 * We specifically want runqueues to have a load_avg that includes
3140 * historical weights. Those represent the blocked load, the load we expect
3141 * to (shortly) return to us. This only works by keeping the weights as
3142 * integral part of the sum. We therefore cannot decompose as per (3).
3143 *
a4c3c049
VG
3144 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3145 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3146 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3147 * runnable section of these tasks overlap (or not). If they were to perfectly
3148 * align the rq as a whole would be runnable 2/3 of the time. If however we
3149 * always have at least 1 runnable task, the rq as a whole is always runnable.
0e2d2aaa 3150 *
a4c3c049 3151 * So we'll have to approximate.. :/
0e2d2aaa 3152 *
a4c3c049 3153 * Given the constraint:
0e2d2aaa 3154 *
a4c3c049 3155 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
0e2d2aaa 3156 *
a4c3c049
VG
3157 * We can construct a rule that adds runnable to a rq by assuming minimal
3158 * overlap.
0e2d2aaa 3159 *
a4c3c049 3160 * On removal, we'll assume each task is equally runnable; which yields:
0e2d2aaa 3161 *
a4c3c049 3162 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
0e2d2aaa 3163 *
a4c3c049 3164 * XXX: only do this for the part of runnable > running ?
0e2d2aaa 3165 *
0e2d2aaa
PZ
3166 */
3167
09a43ace 3168static inline void
0e2d2aaa 3169update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
09a43ace 3170{
09a43ace
VG
3171 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3172
3173 /* Nothing to update */
3174 if (!delta)
3175 return;
3176
a4c3c049
VG
3177 /*
3178 * The relation between sum and avg is:
3179 *
3180 * LOAD_AVG_MAX - 1024 + sa->period_contrib
3181 *
3182 * however, the PELT windows are not aligned between grq and gse.
3183 */
3184
09a43ace
VG
3185 /* Set new sched_entity's utilization */
3186 se->avg.util_avg = gcfs_rq->avg.util_avg;
3187 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
3188
3189 /* Update parent cfs_rq utilization */
3190 add_positive(&cfs_rq->avg.util_avg, delta);
3191 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
3192}
3193
09a43ace 3194static inline void
0e2d2aaa 3195update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
09a43ace 3196{
a4c3c049
VG
3197 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3198 unsigned long runnable_load_avg, load_avg;
3199 u64 runnable_load_sum, load_sum = 0;
3200 s64 delta_sum;
09a43ace 3201
0e2d2aaa
PZ
3202 if (!runnable_sum)
3203 return;
09a43ace 3204
0e2d2aaa 3205 gcfs_rq->prop_runnable_sum = 0;
09a43ace 3206
a4c3c049
VG
3207 if (runnable_sum >= 0) {
3208 /*
3209 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3210 * the CPU is saturated running == runnable.
3211 */
3212 runnable_sum += se->avg.load_sum;
3213 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
3214 } else {
3215 /*
3216 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3217 * assuming all tasks are equally runnable.
3218 */
3219 if (scale_load_down(gcfs_rq->load.weight)) {
3220 load_sum = div_s64(gcfs_rq->avg.load_sum,
3221 scale_load_down(gcfs_rq->load.weight));
3222 }
3223
3224 /* But make sure to not inflate se's runnable */
3225 runnable_sum = min(se->avg.load_sum, load_sum);
3226 }
3227
3228 /*
3229 * runnable_sum can't be lower than running_sum
97fb7a0a 3230 * As running sum is scale with CPU capacity wehreas the runnable sum
a4c3c049
VG
3231 * is not we rescale running_sum 1st
3232 */
3233 running_sum = se->avg.util_sum /
3234 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
3235 runnable_sum = max(runnable_sum, running_sum);
3236
0e2d2aaa
PZ
3237 load_sum = (s64)se_weight(se) * runnable_sum;
3238 load_avg = div_s64(load_sum, LOAD_AVG_MAX);
09a43ace 3239
a4c3c049
VG
3240 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3241 delta_avg = load_avg - se->avg.load_avg;
09a43ace 3242
a4c3c049
VG
3243 se->avg.load_sum = runnable_sum;
3244 se->avg.load_avg = load_avg;
3245 add_positive(&cfs_rq->avg.load_avg, delta_avg);
3246 add_positive(&cfs_rq->avg.load_sum, delta_sum);
09a43ace 3247
1ea6c46a
PZ
3248 runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
3249 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
a4c3c049
VG
3250 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
3251 delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
1ea6c46a 3252
a4c3c049
VG
3253 se->avg.runnable_load_sum = runnable_sum;
3254 se->avg.runnable_load_avg = runnable_load_avg;
1ea6c46a 3255
09a43ace 3256 if (se->on_rq) {
a4c3c049
VG
3257 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
3258 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
09a43ace
VG
3259 }
3260}
3261
0e2d2aaa 3262static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
09a43ace 3263{
0e2d2aaa
PZ
3264 cfs_rq->propagate = 1;
3265 cfs_rq->prop_runnable_sum += runnable_sum;
09a43ace
VG
3266}
3267
3268/* Update task and its cfs_rq load average */
3269static inline int propagate_entity_load_avg(struct sched_entity *se)
3270{
0e2d2aaa 3271 struct cfs_rq *cfs_rq, *gcfs_rq;
09a43ace
VG
3272
3273 if (entity_is_task(se))
3274 return 0;
3275
0e2d2aaa
PZ
3276 gcfs_rq = group_cfs_rq(se);
3277 if (!gcfs_rq->propagate)
09a43ace
VG
3278 return 0;
3279
0e2d2aaa
PZ
3280 gcfs_rq->propagate = 0;
3281
09a43ace
VG
3282 cfs_rq = cfs_rq_of(se);
3283
0e2d2aaa 3284 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
09a43ace 3285
0e2d2aaa
PZ
3286 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3287 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
09a43ace
VG
3288
3289 return 1;
3290}
3291
bc427898
VG
3292/*
3293 * Check if we need to update the load and the utilization of a blocked
3294 * group_entity:
3295 */
3296static inline bool skip_blocked_update(struct sched_entity *se)
3297{
3298 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3299
3300 /*
3301 * If sched_entity still have not zero load or utilization, we have to
3302 * decay it:
3303 */
3304 if (se->avg.load_avg || se->avg.util_avg)
3305 return false;
3306
3307 /*
3308 * If there is a pending propagation, we have to update the load and
3309 * the utilization of the sched_entity:
3310 */
0e2d2aaa 3311 if (gcfs_rq->propagate)
bc427898
VG
3312 return false;
3313
3314 /*
3315 * Otherwise, the load and the utilization of the sched_entity is
3316 * already zero and there is no pending propagation, so it will be a
3317 * waste of time to try to decay it:
3318 */
3319 return true;
3320}
3321
6e83125c 3322#else /* CONFIG_FAIR_GROUP_SCHED */
09a43ace 3323
9d89c257 3324static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
09a43ace
VG
3325
3326static inline int propagate_entity_load_avg(struct sched_entity *se)
3327{
3328 return 0;
3329}
3330
0e2d2aaa 3331static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
09a43ace 3332
6e83125c 3333#endif /* CONFIG_FAIR_GROUP_SCHED */
c566e8e9 3334
3d30544f
PZ
3335/**
3336 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3337 * @now: current time, as per cfs_rq_clock_task()
3338 * @cfs_rq: cfs_rq to update
3d30544f
PZ
3339 *
3340 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3341 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3342 * post_init_entity_util_avg().
3343 *
3344 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3345 *
7c3edd2c
PZ
3346 * Returns true if the load decayed or we removed load.
3347 *
3348 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3349 * call update_tg_load_avg() when this function returns true.
3d30544f 3350 */
a2c6c91f 3351static inline int
3a123bbb 3352update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2dac754e 3353{
0e2d2aaa 3354 unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
9d89c257 3355 struct sched_avg *sa = &cfs_rq->avg;
2a2f5d4e 3356 int decayed = 0;
2dac754e 3357
2a2f5d4e
PZ
3358 if (cfs_rq->removed.nr) {
3359 unsigned long r;
9a2dd585 3360 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
2a2f5d4e
PZ
3361
3362 raw_spin_lock(&cfs_rq->removed.lock);
3363 swap(cfs_rq->removed.util_avg, removed_util);
3364 swap(cfs_rq->removed.load_avg, removed_load);
0e2d2aaa 3365 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
2a2f5d4e
PZ
3366 cfs_rq->removed.nr = 0;
3367 raw_spin_unlock(&cfs_rq->removed.lock);
3368
2a2f5d4e 3369 r = removed_load;
89741892 3370 sub_positive(&sa->load_avg, r);
9a2dd585 3371 sub_positive(&sa->load_sum, r * divider);
2dac754e 3372
2a2f5d4e 3373 r = removed_util;
89741892 3374 sub_positive(&sa->util_avg, r);
9a2dd585 3375 sub_positive(&sa->util_sum, r * divider);
2a2f5d4e 3376
0e2d2aaa 3377 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
2a2f5d4e
PZ
3378
3379 decayed = 1;
9d89c257 3380 }
36ee28e4 3381
2a2f5d4e 3382 decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
36ee28e4 3383
9d89c257
YD
3384#ifndef CONFIG_64BIT
3385 smp_wmb();
3386 cfs_rq->load_last_update_time_copy = sa->last_update_time;
3387#endif
36ee28e4 3388
2a2f5d4e 3389 if (decayed)
ea14b57e 3390 cfs_rq_util_change(cfs_rq, 0);
21e96f88 3391
2a2f5d4e 3392 return decayed;
21e96f88
SM
3393}
3394
3d30544f
PZ
3395/**
3396 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3397 * @cfs_rq: cfs_rq to attach to
3398 * @se: sched_entity to attach
3399 *
3400 * Must call update_cfs_rq_load_avg() before this, since we rely on
3401 * cfs_rq->avg.last_update_time being current.
3402 */
ea14b57e 3403static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
a05e8c51 3404{
f207934f
PZ
3405 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3406
3407 /*
3408 * When we attach the @se to the @cfs_rq, we must align the decay
3409 * window because without that, really weird and wonderful things can
3410 * happen.
3411 *
3412 * XXX illustrate
3413 */
a05e8c51 3414 se->avg.last_update_time = cfs_rq->avg.last_update_time;
f207934f
PZ
3415 se->avg.period_contrib = cfs_rq->avg.period_contrib;
3416
3417 /*
3418 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
3419 * period_contrib. This isn't strictly correct, but since we're
3420 * entirely outside of the PELT hierarchy, nobody cares if we truncate
3421 * _sum a little.
3422 */
3423 se->avg.util_sum = se->avg.util_avg * divider;
3424
3425 se->avg.load_sum = divider;
3426 if (se_weight(se)) {
3427 se->avg.load_sum =
3428 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
3429 }
3430
3431 se->avg.runnable_load_sum = se->avg.load_sum;
3432
8d5b9025 3433 enqueue_load_avg(cfs_rq, se);
a05e8c51
BP
3434 cfs_rq->avg.util_avg += se->avg.util_avg;
3435 cfs_rq->avg.util_sum += se->avg.util_sum;
0e2d2aaa
PZ
3436
3437 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
a2c6c91f 3438
ea14b57e 3439 cfs_rq_util_change(cfs_rq, flags);
a05e8c51
BP
3440}
3441
3d30544f
PZ
3442/**
3443 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3444 * @cfs_rq: cfs_rq to detach from
3445 * @se: sched_entity to detach
3446 *
3447 * Must call update_cfs_rq_load_avg() before this, since we rely on
3448 * cfs_rq->avg.last_update_time being current.
3449 */
a05e8c51
BP
3450static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3451{
8d5b9025 3452 dequeue_load_avg(cfs_rq, se);
89741892
PZ
3453 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3454 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
0e2d2aaa
PZ
3455
3456 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
a2c6c91f 3457
ea14b57e 3458 cfs_rq_util_change(cfs_rq, 0);
a05e8c51
BP
3459}
3460
b382a531
PZ
3461/*
3462 * Optional action to be done while updating the load average
3463 */
3464#define UPDATE_TG 0x1
3465#define SKIP_AGE_LOAD 0x2
3466#define DO_ATTACH 0x4
3467
3468/* Update task and its cfs_rq load average */
3469static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3470{
3471 u64 now = cfs_rq_clock_task(cfs_rq);
3472 struct rq *rq = rq_of(cfs_rq);
3473 int cpu = cpu_of(rq);
3474 int decayed;
3475
3476 /*
3477 * Track task load average for carrying it to new CPU after migrated, and
3478 * track group sched_entity load average for task_h_load calc in migration
3479 */
3480 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
3481 __update_load_avg_se(now, cpu, cfs_rq, se);
3482
3483 decayed = update_cfs_rq_load_avg(now, cfs_rq);
3484 decayed |= propagate_entity_load_avg(se);
3485
3486 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3487
ea14b57e
PZ
3488 /*
3489 * DO_ATTACH means we're here from enqueue_entity().
3490 * !last_update_time means we've passed through
3491 * migrate_task_rq_fair() indicating we migrated.
3492 *
3493 * IOW we're enqueueing a task on a new CPU.
3494 */
3495 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
b382a531
PZ
3496 update_tg_load_avg(cfs_rq, 0);
3497
3498 } else if (decayed && (flags & UPDATE_TG))
3499 update_tg_load_avg(cfs_rq, 0);
3500}
3501
9d89c257 3502#ifndef CONFIG_64BIT
0905f04e
YD
3503static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3504{
9d89c257 3505 u64 last_update_time_copy;
0905f04e 3506 u64 last_update_time;
9ee474f5 3507
9d89c257
YD
3508 do {
3509 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3510 smp_rmb();
3511 last_update_time = cfs_rq->avg.last_update_time;
3512 } while (last_update_time != last_update_time_copy);
0905f04e
YD
3513
3514 return last_update_time;
3515}
9d89c257 3516#else
0905f04e
YD
3517static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3518{
3519 return cfs_rq->avg.last_update_time;
3520}
9d89c257
YD
3521#endif
3522
104cb16d
MR
3523/*
3524 * Synchronize entity load avg of dequeued entity without locking
3525 * the previous rq.
3526 */
3527void sync_entity_load_avg(struct sched_entity *se)
3528{
3529 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3530 u64 last_update_time;
3531
3532 last_update_time = cfs_rq_last_update_time(cfs_rq);
0ccb977f 3533 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
104cb16d
MR
3534}
3535
0905f04e
YD
3536/*
3537 * Task first catches up with cfs_rq, and then subtract
3538 * itself from the cfs_rq (task must be off the queue now).
3539 */
3540void remove_entity_load_avg(struct sched_entity *se)
3541{
3542 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2a2f5d4e 3543 unsigned long flags;
0905f04e
YD
3544
3545 /*
7dc603c9
PZ
3546 * tasks cannot exit without having gone through wake_up_new_task() ->
3547 * post_init_entity_util_avg() which will have added things to the
3548 * cfs_rq, so we can remove unconditionally.
3549 *
3550 * Similarly for groups, they will have passed through
3551 * post_init_entity_util_avg() before unregister_sched_fair_group()
3552 * calls this.
0905f04e 3553 */
0905f04e 3554
104cb16d 3555 sync_entity_load_avg(se);
2a2f5d4e
PZ
3556
3557 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
3558 ++cfs_rq->removed.nr;
3559 cfs_rq->removed.util_avg += se->avg.util_avg;
3560 cfs_rq->removed.load_avg += se->avg.load_avg;
0e2d2aaa 3561 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */
2a2f5d4e 3562 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
2dac754e 3563}
642dbc39 3564
7ea241af
YD
3565static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3566{
1ea6c46a 3567 return cfs_rq->avg.runnable_load_avg;
7ea241af
YD
3568}
3569
3570static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3571{
3572 return cfs_rq->avg.load_avg;
3573}
3574
46f69fa3 3575static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
6e83125c 3576
7f65ea42
PB
3577static inline unsigned long task_util(struct task_struct *p)
3578{
3579 return READ_ONCE(p->se.avg.util_avg);
3580}
3581
3582static inline unsigned long _task_util_est(struct task_struct *p)
3583{
3584 struct util_est ue = READ_ONCE(p->se.avg.util_est);
3585
3586 return max(ue.ewma, ue.enqueued);
3587}
3588
3589static inline unsigned long task_util_est(struct task_struct *p)
3590{
3591 return max(task_util(p), _task_util_est(p));
3592}
3593
3594static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
3595 struct task_struct *p)
3596{
3597 unsigned int enqueued;
3598
3599 if (!sched_feat(UTIL_EST))
3600 return;
3601
3602 /* Update root cfs_rq's estimated utilization */
3603 enqueued = cfs_rq->avg.util_est.enqueued;
d519329f 3604 enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
7f65ea42
PB
3605 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
3606}
3607
3608/*
3609 * Check if a (signed) value is within a specified (unsigned) margin,
3610 * based on the observation that:
3611 *
3612 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
3613 *
3614 * NOTE: this only works when value + maring < INT_MAX.
3615 */
3616static inline bool within_margin(int value, int margin)
3617{
3618 return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
3619}
3620
3621static void
3622util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
3623{
3624 long last_ewma_diff;
3625 struct util_est ue;
3626
3627 if (!sched_feat(UTIL_EST))
3628 return;
3629
3482d98b
VG
3630 /* Update root cfs_rq's estimated utilization */
3631 ue.enqueued = cfs_rq->avg.util_est.enqueued;
3632 ue.enqueued -= min_t(unsigned int, ue.enqueued,
3633 (_task_util_est(p) | UTIL_AVG_UNCHANGED));
7f65ea42
PB
3634 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
3635
3636 /*
3637 * Skip update of task's estimated utilization when the task has not
3638 * yet completed an activation, e.g. being migrated.
3639 */
3640 if (!task_sleep)
3641 return;
3642
d519329f
PB
3643 /*
3644 * If the PELT values haven't changed since enqueue time,
3645 * skip the util_est update.
3646 */
3647 ue = p->se.avg.util_est;
3648 if (ue.enqueued & UTIL_AVG_UNCHANGED)
3649 return;
3650
7f65ea42
PB
3651 /*
3652 * Skip update of task's estimated utilization when its EWMA is
3653 * already ~1% close to its last activation value.
3654 */
d519329f 3655 ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
7f65ea42
PB
3656 last_ewma_diff = ue.enqueued - ue.ewma;
3657 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
3658 return;
3659
3660 /*
3661 * Update Task's estimated utilization
3662 *
3663 * When *p completes an activation we can consolidate another sample
3664 * of the task size. This is done by storing the current PELT value
3665 * as ue.enqueued and by using this value to update the Exponential
3666 * Weighted Moving Average (EWMA):
3667 *
3668 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
3669 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
3670 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
3671 * = w * ( last_ewma_diff ) + ewma(t-1)
3672 * = w * (last_ewma_diff + ewma(t-1) / w)
3673 *
3674 * Where 'w' is the weight of new samples, which is configured to be
3675 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
3676 */
3677 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
3678 ue.ewma += last_ewma_diff;
3679 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
3680 WRITE_ONCE(p->se.avg.util_est, ue);
3681}
3682
38033c37
PZ
3683#else /* CONFIG_SMP */
3684
d31b1a66
VG
3685#define UPDATE_TG 0x0
3686#define SKIP_AGE_LOAD 0x0
b382a531 3687#define DO_ATTACH 0x0
d31b1a66 3688
88c0616e 3689static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
536bd00c 3690{
ea14b57e 3691 cfs_rq_util_change(cfs_rq, 0);
536bd00c
RW
3692}
3693
9d89c257 3694static inline void remove_entity_load_avg(struct sched_entity *se) {}
6e83125c 3695
a05e8c51 3696static inline void
ea14b57e 3697attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
a05e8c51
BP
3698static inline void
3699detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3700
46f69fa3 3701static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
6e83125c
PZ
3702{
3703 return 0;
3704}
3705
7f65ea42
PB
3706static inline void
3707util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
3708
3709static inline void
3710util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
3711 bool task_sleep) {}
3712
38033c37 3713#endif /* CONFIG_SMP */
9d85f21c 3714
ddc97297
PZ
3715static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3716{
3717#ifdef CONFIG_SCHED_DEBUG
3718 s64 d = se->vruntime - cfs_rq->min_vruntime;
3719
3720 if (d < 0)
3721 d = -d;
3722
3723 if (d > 3*sysctl_sched_latency)
ae92882e 3724 schedstat_inc(cfs_rq->nr_spread_over);
ddc97297
PZ
3725#endif
3726}
3727
aeb73b04
PZ
3728static void
3729place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3730{
1af5f730 3731 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 3732
2cb8600e
PZ
3733 /*
3734 * The 'current' period is already promised to the current tasks,
3735 * however the extra weight of the new task will slow them down a
3736 * little, place the new task so that it fits in the slot that
3737 * stays open at the end.
3738 */
94dfb5e7 3739 if (initial && sched_feat(START_DEBIT))
f9c0b095 3740 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 3741
a2e7a7eb 3742 /* sleeps up to a single latency don't count. */
5ca9880c 3743 if (!initial) {
a2e7a7eb 3744 unsigned long thresh = sysctl_sched_latency;
a7be37ac 3745
a2e7a7eb
MG
3746 /*
3747 * Halve their sleep time's effect, to allow
3748 * for a gentler effect of sleepers:
3749 */
3750 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3751 thresh >>= 1;
51e0304c 3752
a2e7a7eb 3753 vruntime -= thresh;
aeb73b04
PZ
3754 }
3755
b5d9d734 3756 /* ensure we never gain time by being placed backwards. */
16c8f1c7 3757 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
3758}
3759
d3d9dc33
PT
3760static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3761
cb251765
MG
3762static inline void check_schedstat_required(void)
3763{
3764#ifdef CONFIG_SCHEDSTATS
3765 if (schedstat_enabled())
3766 return;
3767
3768 /* Force schedstat enabled if a dependent tracepoint is active */
3769 if (trace_sched_stat_wait_enabled() ||
3770 trace_sched_stat_sleep_enabled() ||
3771 trace_sched_stat_iowait_enabled() ||
3772 trace_sched_stat_blocked_enabled() ||
3773 trace_sched_stat_runtime_enabled()) {
eda8dca5 3774 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
cb251765 3775 "stat_blocked and stat_runtime require the "
f67abed5 3776 "kernel parameter schedstats=enable or "
cb251765
MG
3777 "kernel.sched_schedstats=1\n");
3778 }
3779#endif
3780}
3781
b5179ac7
PZ
3782
3783/*
3784 * MIGRATION
3785 *
3786 * dequeue
3787 * update_curr()
3788 * update_min_vruntime()
3789 * vruntime -= min_vruntime
3790 *
3791 * enqueue
3792 * update_curr()
3793 * update_min_vruntime()
3794 * vruntime += min_vruntime
3795 *
3796 * this way the vruntime transition between RQs is done when both
3797 * min_vruntime are up-to-date.
3798 *
3799 * WAKEUP (remote)
3800 *
59efa0ba 3801 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
b5179ac7
PZ
3802 * vruntime -= min_vruntime
3803 *
3804 * enqueue
3805 * update_curr()
3806 * update_min_vruntime()
3807 * vruntime += min_vruntime
3808 *
3809 * this way we don't have the most up-to-date min_vruntime on the originating
3810 * CPU and an up-to-date min_vruntime on the destination CPU.
3811 */
3812
bf0f6f24 3813static void
88ec22d3 3814enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3815{
2f950354
PZ
3816 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3817 bool curr = cfs_rq->curr == se;
3818
88ec22d3 3819 /*
2f950354
PZ
3820 * If we're the current task, we must renormalise before calling
3821 * update_curr().
88ec22d3 3822 */
2f950354 3823 if (renorm && curr)
88ec22d3
PZ
3824 se->vruntime += cfs_rq->min_vruntime;
3825
2f950354
PZ
3826 update_curr(cfs_rq);
3827
bf0f6f24 3828 /*
2f950354
PZ
3829 * Otherwise, renormalise after, such that we're placed at the current
3830 * moment in time, instead of some random moment in the past. Being
3831 * placed in the past could significantly boost this task to the
3832 * fairness detriment of existing tasks.
bf0f6f24 3833 */
2f950354
PZ
3834 if (renorm && !curr)
3835 se->vruntime += cfs_rq->min_vruntime;
3836
89ee048f
VG
3837 /*
3838 * When enqueuing a sched_entity, we must:
3839 * - Update loads to have both entity and cfs_rq synced with now.
3840 * - Add its load to cfs_rq->runnable_avg
3841 * - For group_entity, update its weight to reflect the new share of
3842 * its group cfs_rq
3843 * - Add its new weight to cfs_rq->load.weight
3844 */
b382a531 3845 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
1ea6c46a 3846 update_cfs_group(se);
b5b3e35f 3847 enqueue_runnable_load_avg(cfs_rq, se);
17bc14b7 3848 account_entity_enqueue(cfs_rq, se);
bf0f6f24 3849
1a3d027c 3850 if (flags & ENQUEUE_WAKEUP)
aeb73b04 3851 place_entity(cfs_rq, se, 0);
bf0f6f24 3852
cb251765 3853 check_schedstat_required();
4fa8d299
JP
3854 update_stats_enqueue(cfs_rq, se, flags);
3855 check_spread(cfs_rq, se);
2f950354 3856 if (!curr)
83b699ed 3857 __enqueue_entity(cfs_rq, se);
2069dd75 3858 se->on_rq = 1;
3d4b47b4 3859
d3d9dc33 3860 if (cfs_rq->nr_running == 1) {
3d4b47b4 3861 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
3862 check_enqueue_throttle(cfs_rq);
3863 }
bf0f6f24
IM
3864}
3865
2c13c919 3866static void __clear_buddies_last(struct sched_entity *se)
2002c695 3867{
2c13c919
RR
3868 for_each_sched_entity(se) {
3869 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3870 if (cfs_rq->last != se)
2c13c919 3871 break;
f1044799
PZ
3872
3873 cfs_rq->last = NULL;
2c13c919
RR
3874 }
3875}
2002c695 3876
2c13c919
RR
3877static void __clear_buddies_next(struct sched_entity *se)
3878{
3879 for_each_sched_entity(se) {
3880 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3881 if (cfs_rq->next != se)
2c13c919 3882 break;
f1044799
PZ
3883
3884 cfs_rq->next = NULL;
2c13c919 3885 }
2002c695
PZ
3886}
3887
ac53db59
RR
3888static void __clear_buddies_skip(struct sched_entity *se)
3889{
3890 for_each_sched_entity(se) {
3891 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3892 if (cfs_rq->skip != se)
ac53db59 3893 break;
f1044799
PZ
3894
3895 cfs_rq->skip = NULL;
ac53db59
RR
3896 }
3897}
3898
a571bbea
PZ
3899static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3900{
2c13c919
RR
3901 if (cfs_rq->last == se)
3902 __clear_buddies_last(se);
3903
3904 if (cfs_rq->next == se)
3905 __clear_buddies_next(se);
ac53db59
RR
3906
3907 if (cfs_rq->skip == se)
3908 __clear_buddies_skip(se);
a571bbea
PZ
3909}
3910
6c16a6dc 3911static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 3912
bf0f6f24 3913static void
371fd7e7 3914dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3915{
a2a2d680
DA
3916 /*
3917 * Update run-time statistics of the 'current'.
3918 */
3919 update_curr(cfs_rq);
89ee048f
VG
3920
3921 /*
3922 * When dequeuing a sched_entity, we must:
3923 * - Update loads to have both entity and cfs_rq synced with now.
3924 * - Substract its load from the cfs_rq->runnable_avg.
3925 * - Substract its previous weight from cfs_rq->load.weight.
3926 * - For group entity, update its weight to reflect the new share
3927 * of its group cfs_rq.
3928 */
88c0616e 3929 update_load_avg(cfs_rq, se, UPDATE_TG);
b5b3e35f 3930 dequeue_runnable_load_avg(cfs_rq, se);
a2a2d680 3931
4fa8d299 3932 update_stats_dequeue(cfs_rq, se, flags);
67e9fb2a 3933
2002c695 3934 clear_buddies(cfs_rq, se);
4793241b 3935
83b699ed 3936 if (se != cfs_rq->curr)
30cfdcfc 3937 __dequeue_entity(cfs_rq, se);
17bc14b7 3938 se->on_rq = 0;
30cfdcfc 3939 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
3940
3941 /*
b60205c7
PZ
3942 * Normalize after update_curr(); which will also have moved
3943 * min_vruntime if @se is the one holding it back. But before doing
3944 * update_min_vruntime() again, which will discount @se's position and
3945 * can move min_vruntime forward still more.
88ec22d3 3946 */
371fd7e7 3947 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 3948 se->vruntime -= cfs_rq->min_vruntime;
1e876231 3949
d8b4986d
PT
3950 /* return excess runtime on last dequeue */
3951 return_cfs_rq_runtime(cfs_rq);
3952
1ea6c46a 3953 update_cfs_group(se);
b60205c7
PZ
3954
3955 /*
3956 * Now advance min_vruntime if @se was the entity holding it back,
3957 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3958 * put back on, and if we advance min_vruntime, we'll be placed back
3959 * further than we started -- ie. we'll be penalized.
3960 */
3961 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3962 update_min_vruntime(cfs_rq);
bf0f6f24
IM
3963}
3964
3965/*
3966 * Preempt the current task with a newly woken task if needed:
3967 */
7c92e54f 3968static void
2e09bf55 3969check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 3970{
11697830 3971 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
3972 struct sched_entity *se;
3973 s64 delta;
11697830 3974
6d0f0ebd 3975 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 3976 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 3977 if (delta_exec > ideal_runtime) {
8875125e 3978 resched_curr(rq_of(cfs_rq));
a9f3e2b5
MG
3979 /*
3980 * The current task ran long enough, ensure it doesn't get
3981 * re-elected due to buddy favours.
3982 */
3983 clear_buddies(cfs_rq, curr);
f685ceac
MG
3984 return;
3985 }
3986
3987 /*
3988 * Ensure that a task that missed wakeup preemption by a
3989 * narrow margin doesn't have to wait for a full slice.
3990 * This also mitigates buddy induced latencies under load.
3991 */
f685ceac
MG
3992 if (delta_exec < sysctl_sched_min_granularity)
3993 return;
3994
f4cfb33e
WX
3995 se = __pick_first_entity(cfs_rq);
3996 delta = curr->vruntime - se->vruntime;
f685ceac 3997
f4cfb33e
WX
3998 if (delta < 0)
3999 return;
d7d82944 4000
f4cfb33e 4001 if (delta > ideal_runtime)
8875125e 4002 resched_curr(rq_of(cfs_rq));
bf0f6f24
IM
4003}
4004
83b699ed 4005static void
8494f412 4006set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 4007{
83b699ed
SV
4008 /* 'current' is not kept within the tree. */
4009 if (se->on_rq) {
4010 /*
4011 * Any task has to be enqueued before it get to execute on
4012 * a CPU. So account for the time it spent waiting on the
4013 * runqueue.
4014 */
4fa8d299 4015 update_stats_wait_end(cfs_rq, se);
83b699ed 4016 __dequeue_entity(cfs_rq, se);
88c0616e 4017 update_load_avg(cfs_rq, se, UPDATE_TG);
83b699ed
SV
4018 }
4019
79303e9e 4020 update_stats_curr_start(cfs_rq, se);
429d43bc 4021 cfs_rq->curr = se;
4fa8d299 4022
eba1ed4b
IM
4023 /*
4024 * Track our maximum slice length, if the CPU's load is at
4025 * least twice that of our own weight (i.e. dont track it
4026 * when there are only lesser-weight tasks around):
4027 */
cb251765 4028 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
4fa8d299
JP
4029 schedstat_set(se->statistics.slice_max,
4030 max((u64)schedstat_val(se->statistics.slice_max),
4031 se->sum_exec_runtime - se->prev_sum_exec_runtime));
eba1ed4b 4032 }
4fa8d299 4033
4a55b450 4034 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
4035}
4036
3f3a4904
PZ
4037static int
4038wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4039
ac53db59
RR
4040/*
4041 * Pick the next process, keeping these things in mind, in this order:
4042 * 1) keep things fair between processes/task groups
4043 * 2) pick the "next" process, since someone really wants that to run
4044 * 3) pick the "last" process, for cache locality
4045 * 4) do not run the "skip" process, if something else is available
4046 */
678d5718
PZ
4047static struct sched_entity *
4048pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
aa2ac252 4049{
678d5718
PZ
4050 struct sched_entity *left = __pick_first_entity(cfs_rq);
4051 struct sched_entity *se;
4052
4053 /*
4054 * If curr is set we have to see if its left of the leftmost entity
4055 * still in the tree, provided there was anything in the tree at all.
4056 */
4057 if (!left || (curr && entity_before(curr, left)))
4058 left = curr;
4059
4060 se = left; /* ideally we run the leftmost entity */
f4b6755f 4061
ac53db59
RR
4062 /*
4063 * Avoid running the skip buddy, if running something else can
4064 * be done without getting too unfair.
4065 */
4066 if (cfs_rq->skip == se) {
678d5718
PZ
4067 struct sched_entity *second;
4068
4069 if (se == curr) {
4070 second = __pick_first_entity(cfs_rq);
4071 } else {
4072 second = __pick_next_entity(se);
4073 if (!second || (curr && entity_before(curr, second)))
4074 second = curr;
4075 }
4076
ac53db59
RR
4077 if (second && wakeup_preempt_entity(second, left) < 1)
4078 se = second;
4079 }
aa2ac252 4080
f685ceac
MG
4081 /*
4082 * Prefer last buddy, try to return the CPU to a preempted task.
4083 */
4084 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
4085 se = cfs_rq->last;
4086
ac53db59
RR
4087 /*
4088 * Someone really wants this to run. If it's not unfair, run it.
4089 */
4090 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
4091 se = cfs_rq->next;
4092
f685ceac 4093 clear_buddies(cfs_rq, se);
4793241b
PZ
4094
4095 return se;
aa2ac252
PZ
4096}
4097
678d5718 4098static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d3d9dc33 4099
ab6cde26 4100static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
4101{
4102 /*
4103 * If still on the runqueue then deactivate_task()
4104 * was not called and update_curr() has to be done:
4105 */
4106 if (prev->on_rq)
b7cc0896 4107 update_curr(cfs_rq);
bf0f6f24 4108
d3d9dc33
PT
4109 /* throttle cfs_rqs exceeding runtime */
4110 check_cfs_rq_runtime(cfs_rq);
4111
4fa8d299 4112 check_spread(cfs_rq, prev);
cb251765 4113
30cfdcfc 4114 if (prev->on_rq) {
4fa8d299 4115 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
4116 /* Put 'current' back into the tree. */
4117 __enqueue_entity(cfs_rq, prev);
9d85f21c 4118 /* in !on_rq case, update occurred at dequeue */
88c0616e 4119 update_load_avg(cfs_rq, prev, 0);
30cfdcfc 4120 }
429d43bc 4121 cfs_rq->curr = NULL;
bf0f6f24
IM
4122}
4123
8f4d37ec
PZ
4124static void
4125entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 4126{
bf0f6f24 4127 /*
30cfdcfc 4128 * Update run-time statistics of the 'current'.
bf0f6f24 4129 */
30cfdcfc 4130 update_curr(cfs_rq);
bf0f6f24 4131
9d85f21c
PT
4132 /*
4133 * Ensure that runnable average is periodically updated.
4134 */
88c0616e 4135 update_load_avg(cfs_rq, curr, UPDATE_TG);
1ea6c46a 4136 update_cfs_group(curr);
9d85f21c 4137
8f4d37ec
PZ
4138#ifdef CONFIG_SCHED_HRTICK
4139 /*
4140 * queued ticks are scheduled to match the slice, so don't bother
4141 * validating it and just reschedule.
4142 */
983ed7a6 4143 if (queued) {
8875125e 4144 resched_curr(rq_of(cfs_rq));
983ed7a6
HH
4145 return;
4146 }
8f4d37ec
PZ
4147 /*
4148 * don't let the period tick interfere with the hrtick preemption
4149 */
4150 if (!sched_feat(DOUBLE_TICK) &&
4151 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
4152 return;
4153#endif
4154
2c2efaed 4155 if (cfs_rq->nr_running > 1)
2e09bf55 4156 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
4157}
4158
ab84d31e
PT
4159
4160/**************************************************
4161 * CFS bandwidth control machinery
4162 */
4163
4164#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
4165
4166#ifdef HAVE_JUMP_LABEL
c5905afb 4167static struct static_key __cfs_bandwidth_used;
029632fb
PZ
4168
4169static inline bool cfs_bandwidth_used(void)
4170{
c5905afb 4171 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
4172}
4173
1ee14e6c 4174void cfs_bandwidth_usage_inc(void)
029632fb 4175{
ce48c146 4176 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
1ee14e6c
BS
4177}
4178
4179void cfs_bandwidth_usage_dec(void)
4180{
ce48c146 4181 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
029632fb
PZ
4182}
4183#else /* HAVE_JUMP_LABEL */
4184static bool cfs_bandwidth_used(void)
4185{
4186 return true;
4187}
4188
1ee14e6c
BS
4189void cfs_bandwidth_usage_inc(void) {}
4190void cfs_bandwidth_usage_dec(void) {}
029632fb
PZ
4191#endif /* HAVE_JUMP_LABEL */
4192
ab84d31e
PT
4193/*
4194 * default period for cfs group bandwidth.
4195 * default: 0.1s, units: nanoseconds
4196 */
4197static inline u64 default_cfs_period(void)
4198{
4199 return 100000000ULL;
4200}
ec12cb7f
PT
4201
4202static inline u64 sched_cfs_bandwidth_slice(void)
4203{
4204 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4205}
4206
a9cf55b2
PT
4207/*
4208 * Replenish runtime according to assigned quota and update expiration time.
4209 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
4210 * additional synchronization around rq->lock.
4211 *
4212 * requires cfs_b->lock
4213 */
029632fb 4214void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
4215{
4216 u64 now;
4217
4218 if (cfs_b->quota == RUNTIME_INF)
4219 return;
4220
4221 now = sched_clock_cpu(smp_processor_id());
4222 cfs_b->runtime = cfs_b->quota;
4223 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
512ac999 4224 cfs_b->expires_seq++;
a9cf55b2
PT
4225}
4226
029632fb
PZ
4227static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4228{
4229 return &tg->cfs_bandwidth;
4230}
4231
f1b17280
PT
4232/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
4233static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4234{
4235 if (unlikely(cfs_rq->throttle_count))
1a99ae3f 4236 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
f1b17280 4237
78becc27 4238 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
4239}
4240
85dac906
PT
4241/* returns 0 on failure to allocate runtime */
4242static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
4243{
4244 struct task_group *tg = cfs_rq->tg;
4245 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 4246 u64 amount = 0, min_amount, expires;
512ac999 4247 int expires_seq;
ec12cb7f
PT
4248
4249 /* note: this is a positive sum as runtime_remaining <= 0 */
4250 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
4251
4252 raw_spin_lock(&cfs_b->lock);
4253 if (cfs_b->quota == RUNTIME_INF)
4254 amount = min_amount;
58088ad0 4255 else {
77a4d1a1 4256 start_cfs_bandwidth(cfs_b);
58088ad0
PT
4257
4258 if (cfs_b->runtime > 0) {
4259 amount = min(cfs_b->runtime, min_amount);
4260 cfs_b->runtime -= amount;
4261 cfs_b->idle = 0;
4262 }
ec12cb7f 4263 }
512ac999 4264 expires_seq = cfs_b->expires_seq;
a9cf55b2 4265 expires = cfs_b->runtime_expires;
ec12cb7f
PT
4266 raw_spin_unlock(&cfs_b->lock);
4267
4268 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
4269 /*
4270 * we may have advanced our local expiration to account for allowed
4271 * spread between our sched_clock and the one on which runtime was
4272 * issued.
4273 */
512ac999
XP
4274 if (cfs_rq->expires_seq != expires_seq) {
4275 cfs_rq->expires_seq = expires_seq;
a9cf55b2 4276 cfs_rq->runtime_expires = expires;
512ac999 4277 }
85dac906
PT
4278
4279 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
4280}
4281
a9cf55b2
PT
4282/*
4283 * Note: This depends on the synchronization provided by sched_clock and the
4284 * fact that rq->clock snapshots this value.
4285 */
4286static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 4287{
a9cf55b2 4288 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
4289
4290 /* if the deadline is ahead of our clock, nothing to do */
78becc27 4291 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
4292 return;
4293
a9cf55b2
PT
4294 if (cfs_rq->runtime_remaining < 0)
4295 return;
4296
4297 /*
4298 * If the local deadline has passed we have to consider the
4299 * possibility that our sched_clock is 'fast' and the global deadline
4300 * has not truly expired.
4301 *
4302 * Fortunately we can check determine whether this the case by checking
512ac999 4303 * whether the global deadline(cfs_b->expires_seq) has advanced.
a9cf55b2 4304 */
512ac999 4305 if (cfs_rq->expires_seq == cfs_b->expires_seq) {
a9cf55b2
PT
4306 /* extend local deadline, drift is bounded above by 2 ticks */
4307 cfs_rq->runtime_expires += TICK_NSEC;
4308 } else {
4309 /* global deadline is ahead, expiration has passed */
4310 cfs_rq->runtime_remaining = 0;
4311 }
4312}
4313
9dbdb155 4314static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
a9cf55b2
PT
4315{
4316 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 4317 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
4318 expire_cfs_rq_runtime(cfs_rq);
4319
4320 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
4321 return;
4322
85dac906
PT
4323 /*
4324 * if we're unable to extend our runtime we resched so that the active
4325 * hierarchy can be throttled
4326 */
4327 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
8875125e 4328 resched_curr(rq_of(cfs_rq));
ec12cb7f
PT
4329}
4330
6c16a6dc 4331static __always_inline
9dbdb155 4332void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
ec12cb7f 4333{
56f570e5 4334 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
4335 return;
4336
4337 __account_cfs_rq_runtime(cfs_rq, delta_exec);
4338}
4339
85dac906
PT
4340static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4341{
56f570e5 4342 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
4343}
4344
64660c86
PT
4345/* check whether cfs_rq, or any parent, is throttled */
4346static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4347{
56f570e5 4348 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
4349}
4350
4351/*
4352 * Ensure that neither of the group entities corresponding to src_cpu or
4353 * dest_cpu are members of a throttled hierarchy when performing group
4354 * load-balance operations.
4355 */
4356static inline int throttled_lb_pair(struct task_group *tg,
4357 int src_cpu, int dest_cpu)
4358{
4359 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4360
4361 src_cfs_rq = tg->cfs_rq[src_cpu];
4362 dest_cfs_rq = tg->cfs_rq[dest_cpu];
4363
4364 return throttled_hierarchy(src_cfs_rq) ||
4365 throttled_hierarchy(dest_cfs_rq);
4366}
4367
64660c86
PT
4368static int tg_unthrottle_up(struct task_group *tg, void *data)
4369{
4370 struct rq *rq = data;
4371 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4372
4373 cfs_rq->throttle_count--;
64660c86 4374 if (!cfs_rq->throttle_count) {
f1b17280 4375 /* adjust cfs_rq_clock_task() */
78becc27 4376 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 4377 cfs_rq->throttled_clock_task;
64660c86 4378 }
64660c86
PT
4379
4380 return 0;
4381}
4382
4383static int tg_throttle_down(struct task_group *tg, void *data)
4384{
4385 struct rq *rq = data;
4386 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4387
82958366
PT
4388 /* group is entering throttled state, stop time */
4389 if (!cfs_rq->throttle_count)
78becc27 4390 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
4391 cfs_rq->throttle_count++;
4392
4393 return 0;
4394}
4395
d3d9dc33 4396static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
4397{
4398 struct rq *rq = rq_of(cfs_rq);
4399 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4400 struct sched_entity *se;
4401 long task_delta, dequeue = 1;
77a4d1a1 4402 bool empty;
85dac906
PT
4403
4404 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4405
f1b17280 4406 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
4407 rcu_read_lock();
4408 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4409 rcu_read_unlock();
85dac906
PT
4410
4411 task_delta = cfs_rq->h_nr_running;
4412 for_each_sched_entity(se) {
4413 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4414 /* throttled entity or throttle-on-deactivate */
4415 if (!se->on_rq)
4416 break;
4417
4418 if (dequeue)
4419 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4420 qcfs_rq->h_nr_running -= task_delta;
4421
4422 if (qcfs_rq->load.weight)
4423 dequeue = 0;
4424 }
4425
4426 if (!se)
72465447 4427 sub_nr_running(rq, task_delta);
85dac906
PT
4428
4429 cfs_rq->throttled = 1;
78becc27 4430 cfs_rq->throttled_clock = rq_clock(rq);
85dac906 4431 raw_spin_lock(&cfs_b->lock);
d49db342 4432 empty = list_empty(&cfs_b->throttled_cfs_rq);
77a4d1a1 4433
c06f04c7
BS
4434 /*
4435 * Add to the _head_ of the list, so that an already-started
4436 * distribute_cfs_runtime will not see us
4437 */
4438 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
77a4d1a1
PZ
4439
4440 /*
4441 * If we're the first throttled task, make sure the bandwidth
4442 * timer is running.
4443 */
4444 if (empty)
4445 start_cfs_bandwidth(cfs_b);
4446
85dac906
PT
4447 raw_spin_unlock(&cfs_b->lock);
4448}
4449
029632fb 4450void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
4451{
4452 struct rq *rq = rq_of(cfs_rq);
4453 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4454 struct sched_entity *se;
4455 int enqueue = 1;
4456 long task_delta;
4457
22b958d8 4458 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
4459
4460 cfs_rq->throttled = 0;
1a55af2e
FW
4461
4462 update_rq_clock(rq);
4463
671fd9da 4464 raw_spin_lock(&cfs_b->lock);
78becc27 4465 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
4466 list_del_rcu(&cfs_rq->throttled_list);
4467 raw_spin_unlock(&cfs_b->lock);
4468
64660c86
PT
4469 /* update hierarchical throttle state */
4470 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4471
671fd9da
PT
4472 if (!cfs_rq->load.weight)
4473 return;
4474
4475 task_delta = cfs_rq->h_nr_running;
4476 for_each_sched_entity(se) {
4477 if (se->on_rq)
4478 enqueue = 0;
4479
4480 cfs_rq = cfs_rq_of(se);
4481 if (enqueue)
4482 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4483 cfs_rq->h_nr_running += task_delta;
4484
4485 if (cfs_rq_throttled(cfs_rq))
4486 break;
4487 }
4488
4489 if (!se)
72465447 4490 add_nr_running(rq, task_delta);
671fd9da 4491
97fb7a0a 4492 /* Determine whether we need to wake up potentially idle CPU: */
671fd9da 4493 if (rq->curr == rq->idle && rq->cfs.nr_running)
8875125e 4494 resched_curr(rq);
671fd9da
PT
4495}
4496
4497static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4498 u64 remaining, u64 expires)
4499{
4500 struct cfs_rq *cfs_rq;
c06f04c7
BS
4501 u64 runtime;
4502 u64 starting_runtime = remaining;
671fd9da
PT
4503
4504 rcu_read_lock();
4505 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4506 throttled_list) {
4507 struct rq *rq = rq_of(cfs_rq);
8a8c69c3 4508 struct rq_flags rf;
671fd9da 4509
8a8c69c3 4510 rq_lock(rq, &rf);
671fd9da
PT
4511 if (!cfs_rq_throttled(cfs_rq))
4512 goto next;
4513
4514 runtime = -cfs_rq->runtime_remaining + 1;
4515 if (runtime > remaining)
4516 runtime = remaining;
4517 remaining -= runtime;
4518
4519 cfs_rq->runtime_remaining += runtime;
4520 cfs_rq->runtime_expires = expires;
4521
4522 /* we check whether we're throttled above */
4523 if (cfs_rq->runtime_remaining > 0)
4524 unthrottle_cfs_rq(cfs_rq);
4525
4526next:
8a8c69c3 4527 rq_unlock(rq, &rf);
671fd9da
PT
4528
4529 if (!remaining)
4530 break;
4531 }
4532 rcu_read_unlock();
4533
c06f04c7 4534 return starting_runtime - remaining;
671fd9da
PT
4535}
4536
58088ad0
PT
4537/*
4538 * Responsible for refilling a task_group's bandwidth and unthrottling its
4539 * cfs_rqs as appropriate. If there has been no activity within the last
4540 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4541 * used to track this state.
4542 */
4543static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4544{
671fd9da 4545 u64 runtime, runtime_expires;
51f2176d 4546 int throttled;
58088ad0 4547
58088ad0
PT
4548 /* no need to continue the timer with no bandwidth constraint */
4549 if (cfs_b->quota == RUNTIME_INF)
51f2176d 4550 goto out_deactivate;
58088ad0 4551
671fd9da 4552 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
e8da1b18 4553 cfs_b->nr_periods += overrun;
671fd9da 4554
51f2176d
BS
4555 /*
4556 * idle depends on !throttled (for the case of a large deficit), and if
4557 * we're going inactive then everything else can be deferred
4558 */
4559 if (cfs_b->idle && !throttled)
4560 goto out_deactivate;
a9cf55b2
PT
4561
4562 __refill_cfs_bandwidth_runtime(cfs_b);
4563
671fd9da
PT
4564 if (!throttled) {
4565 /* mark as potentially idle for the upcoming period */
4566 cfs_b->idle = 1;
51f2176d 4567 return 0;
671fd9da
PT
4568 }
4569
e8da1b18
NR
4570 /* account preceding periods in which throttling occurred */
4571 cfs_b->nr_throttled += overrun;
4572
671fd9da 4573 runtime_expires = cfs_b->runtime_expires;
671fd9da
PT
4574
4575 /*
c06f04c7
BS
4576 * This check is repeated as we are holding onto the new bandwidth while
4577 * we unthrottle. This can potentially race with an unthrottled group
4578 * trying to acquire new bandwidth from the global pool. This can result
4579 * in us over-using our runtime if it is all used during this loop, but
4580 * only by limited amounts in that extreme case.
671fd9da 4581 */
c06f04c7
BS
4582 while (throttled && cfs_b->runtime > 0) {
4583 runtime = cfs_b->runtime;
671fd9da
PT
4584 raw_spin_unlock(&cfs_b->lock);
4585 /* we can't nest cfs_b->lock while distributing bandwidth */
4586 runtime = distribute_cfs_runtime(cfs_b, runtime,
4587 runtime_expires);
4588 raw_spin_lock(&cfs_b->lock);
4589
4590 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
c06f04c7
BS
4591
4592 cfs_b->runtime -= min(runtime, cfs_b->runtime);
671fd9da 4593 }
58088ad0 4594
671fd9da
PT
4595 /*
4596 * While we are ensured activity in the period following an
4597 * unthrottle, this also covers the case in which the new bandwidth is
4598 * insufficient to cover the existing bandwidth deficit. (Forcing the
4599 * timer to remain active while there are any throttled entities.)
4600 */
4601 cfs_b->idle = 0;
58088ad0 4602
51f2176d
BS
4603 return 0;
4604
4605out_deactivate:
51f2176d 4606 return 1;
58088ad0 4607}
d3d9dc33 4608
d8b4986d
PT
4609/* a cfs_rq won't donate quota below this amount */
4610static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4611/* minimum remaining period time to redistribute slack quota */
4612static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4613/* how long we wait to gather additional slack before distributing */
4614static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4615
db06e78c
BS
4616/*
4617 * Are we near the end of the current quota period?
4618 *
4619 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4961b6e1 4620 * hrtimer base being cleared by hrtimer_start. In the case of
db06e78c
BS
4621 * migrate_hrtimers, base is never cleared, so we are fine.
4622 */
d8b4986d
PT
4623static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4624{
4625 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4626 u64 remaining;
4627
4628 /* if the call-back is running a quota refresh is already occurring */
4629 if (hrtimer_callback_running(refresh_timer))
4630 return 1;
4631
4632 /* is a quota refresh about to occur? */
4633 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4634 if (remaining < min_expire)
4635 return 1;
4636
4637 return 0;
4638}
4639
4640static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4641{
4642 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4643
4644 /* if there's a quota refresh soon don't bother with slack */
4645 if (runtime_refresh_within(cfs_b, min_left))
4646 return;
4647
4cfafd30
PZ
4648 hrtimer_start(&cfs_b->slack_timer,
4649 ns_to_ktime(cfs_bandwidth_slack_period),
4650 HRTIMER_MODE_REL);
d8b4986d
PT
4651}
4652
4653/* we know any runtime found here is valid as update_curr() precedes return */
4654static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4655{
4656 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4657 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4658
4659 if (slack_runtime <= 0)
4660 return;
4661
4662 raw_spin_lock(&cfs_b->lock);
4663 if (cfs_b->quota != RUNTIME_INF &&
4664 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4665 cfs_b->runtime += slack_runtime;
4666
4667 /* we are under rq->lock, defer unthrottling using a timer */
4668 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4669 !list_empty(&cfs_b->throttled_cfs_rq))
4670 start_cfs_slack_bandwidth(cfs_b);
4671 }
4672 raw_spin_unlock(&cfs_b->lock);
4673
4674 /* even if it's not valid for return we don't want to try again */
4675 cfs_rq->runtime_remaining -= slack_runtime;
4676}
4677
4678static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4679{
56f570e5
PT
4680 if (!cfs_bandwidth_used())
4681 return;
4682
fccfdc6f 4683 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
4684 return;
4685
4686 __return_cfs_rq_runtime(cfs_rq);
4687}
4688
4689/*
4690 * This is done with a timer (instead of inline with bandwidth return) since
4691 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4692 */
4693static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4694{
4695 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4696 u64 expires;
4697
4698 /* confirm we're still not at a refresh boundary */
db06e78c
BS
4699 raw_spin_lock(&cfs_b->lock);
4700 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4701 raw_spin_unlock(&cfs_b->lock);
d8b4986d 4702 return;
db06e78c 4703 }
d8b4986d 4704
c06f04c7 4705 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
d8b4986d 4706 runtime = cfs_b->runtime;
c06f04c7 4707
d8b4986d
PT
4708 expires = cfs_b->runtime_expires;
4709 raw_spin_unlock(&cfs_b->lock);
4710
4711 if (!runtime)
4712 return;
4713
4714 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4715
4716 raw_spin_lock(&cfs_b->lock);
4717 if (expires == cfs_b->runtime_expires)
c06f04c7 4718 cfs_b->runtime -= min(runtime, cfs_b->runtime);
d8b4986d
PT
4719 raw_spin_unlock(&cfs_b->lock);
4720}
4721
d3d9dc33
PT
4722/*
4723 * When a group wakes up we want to make sure that its quota is not already
4724 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4725 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4726 */
4727static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4728{
56f570e5
PT
4729 if (!cfs_bandwidth_used())
4730 return;
4731
d3d9dc33
PT
4732 /* an active group must be handled by the update_curr()->put() path */
4733 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4734 return;
4735
4736 /* ensure the group is not already throttled */
4737 if (cfs_rq_throttled(cfs_rq))
4738 return;
4739
4740 /* update runtime allocation */
4741 account_cfs_rq_runtime(cfs_rq, 0);
4742 if (cfs_rq->runtime_remaining <= 0)
4743 throttle_cfs_rq(cfs_rq);
4744}
4745
55e16d30
PZ
4746static void sync_throttle(struct task_group *tg, int cpu)
4747{
4748 struct cfs_rq *pcfs_rq, *cfs_rq;
4749
4750 if (!cfs_bandwidth_used())
4751 return;
4752
4753 if (!tg->parent)
4754 return;
4755
4756 cfs_rq = tg->cfs_rq[cpu];
4757 pcfs_rq = tg->parent->cfs_rq[cpu];
4758
4759 cfs_rq->throttle_count = pcfs_rq->throttle_count;
b8922125 4760 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
55e16d30
PZ
4761}
4762
d3d9dc33 4763/* conditionally throttle active cfs_rq's from put_prev_entity() */
678d5718 4764static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
d3d9dc33 4765{
56f570e5 4766 if (!cfs_bandwidth_used())
678d5718 4767 return false;
56f570e5 4768
d3d9dc33 4769 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
678d5718 4770 return false;
d3d9dc33
PT
4771
4772 /*
4773 * it's possible for a throttled entity to be forced into a running
4774 * state (e.g. set_curr_task), in this case we're finished.
4775 */
4776 if (cfs_rq_throttled(cfs_rq))
678d5718 4777 return true;
d3d9dc33
PT
4778
4779 throttle_cfs_rq(cfs_rq);
678d5718 4780 return true;
d3d9dc33 4781}
029632fb 4782
029632fb
PZ
4783static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4784{
4785 struct cfs_bandwidth *cfs_b =
4786 container_of(timer, struct cfs_bandwidth, slack_timer);
77a4d1a1 4787
029632fb
PZ
4788 do_sched_cfs_slack_timer(cfs_b);
4789
4790 return HRTIMER_NORESTART;
4791}
4792
4793static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4794{
4795 struct cfs_bandwidth *cfs_b =
4796 container_of(timer, struct cfs_bandwidth, period_timer);
029632fb
PZ
4797 int overrun;
4798 int idle = 0;
4799
51f2176d 4800 raw_spin_lock(&cfs_b->lock);
029632fb 4801 for (;;) {
77a4d1a1 4802 overrun = hrtimer_forward_now(timer, cfs_b->period);
029632fb
PZ
4803 if (!overrun)
4804 break;
4805
4806 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4807 }
4cfafd30
PZ
4808 if (idle)
4809 cfs_b->period_active = 0;
51f2176d 4810 raw_spin_unlock(&cfs_b->lock);
029632fb
PZ
4811
4812 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4813}
4814
4815void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4816{
4817 raw_spin_lock_init(&cfs_b->lock);
4818 cfs_b->runtime = 0;
4819 cfs_b->quota = RUNTIME_INF;
4820 cfs_b->period = ns_to_ktime(default_cfs_period());
4821
4822 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4cfafd30 4823 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
029632fb
PZ
4824 cfs_b->period_timer.function = sched_cfs_period_timer;
4825 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4826 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4827}
4828
4829static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4830{
4831 cfs_rq->runtime_enabled = 0;
4832 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4833}
4834
77a4d1a1 4835void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
029632fb 4836{
f1d1be8a
XP
4837 u64 overrun;
4838
4cfafd30 4839 lockdep_assert_held(&cfs_b->lock);
029632fb 4840
f1d1be8a
XP
4841 if (cfs_b->period_active)
4842 return;
4843
4844 cfs_b->period_active = 1;
4845 overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4846 cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
4847 cfs_b->expires_seq++;
4848 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
029632fb
PZ
4849}
4850
4851static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4852{
7f1a169b
TH
4853 /* init_cfs_bandwidth() was not called */
4854 if (!cfs_b->throttled_cfs_rq.next)
4855 return;
4856
029632fb
PZ
4857 hrtimer_cancel(&cfs_b->period_timer);
4858 hrtimer_cancel(&cfs_b->slack_timer);
4859}
4860
502ce005 4861/*
97fb7a0a 4862 * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
502ce005
PZ
4863 *
4864 * The race is harmless, since modifying bandwidth settings of unhooked group
4865 * bits doesn't do much.
4866 */
4867
4868/* cpu online calback */
0e59bdae
KT
4869static void __maybe_unused update_runtime_enabled(struct rq *rq)
4870{
502ce005 4871 struct task_group *tg;
0e59bdae 4872
502ce005
PZ
4873 lockdep_assert_held(&rq->lock);
4874
4875 rcu_read_lock();
4876 list_for_each_entry_rcu(tg, &task_groups, list) {
4877 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
4878 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
0e59bdae
KT
4879
4880 raw_spin_lock(&cfs_b->lock);
4881 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4882 raw_spin_unlock(&cfs_b->lock);
4883 }
502ce005 4884 rcu_read_unlock();
0e59bdae
KT
4885}
4886
502ce005 4887/* cpu offline callback */
38dc3348 4888static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb 4889{
502ce005
PZ
4890 struct task_group *tg;
4891
4892 lockdep_assert_held(&rq->lock);
4893
4894 rcu_read_lock();
4895 list_for_each_entry_rcu(tg, &task_groups, list) {
4896 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
029632fb 4897
029632fb
PZ
4898 if (!cfs_rq->runtime_enabled)
4899 continue;
4900
4901 /*
4902 * clock_task is not advancing so we just need to make sure
4903 * there's some valid quota amount
4904 */
51f2176d 4905 cfs_rq->runtime_remaining = 1;
0e59bdae 4906 /*
97fb7a0a 4907 * Offline rq is schedulable till CPU is completely disabled
0e59bdae
KT
4908 * in take_cpu_down(), so we prevent new cfs throttling here.
4909 */
4910 cfs_rq->runtime_enabled = 0;
4911
029632fb
PZ
4912 if (cfs_rq_throttled(cfs_rq))
4913 unthrottle_cfs_rq(cfs_rq);
4914 }
502ce005 4915 rcu_read_unlock();
029632fb
PZ
4916}
4917
4918#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
4919static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4920{
78becc27 4921 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
4922}
4923
9dbdb155 4924static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
678d5718 4925static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
d3d9dc33 4926static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
55e16d30 4927static inline void sync_throttle(struct task_group *tg, int cpu) {}
6c16a6dc 4928static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
4929
4930static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4931{
4932 return 0;
4933}
64660c86
PT
4934
4935static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4936{
4937 return 0;
4938}
4939
4940static inline int throttled_lb_pair(struct task_group *tg,
4941 int src_cpu, int dest_cpu)
4942{
4943 return 0;
4944}
029632fb
PZ
4945
4946void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4947
4948#ifdef CONFIG_FAIR_GROUP_SCHED
4949static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
4950#endif
4951
029632fb
PZ
4952static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4953{
4954 return NULL;
4955}
4956static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
0e59bdae 4957static inline void update_runtime_enabled(struct rq *rq) {}
a4c96ae3 4958static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
4959
4960#endif /* CONFIG_CFS_BANDWIDTH */
4961
bf0f6f24
IM
4962/**************************************************
4963 * CFS operations on tasks:
4964 */
4965
8f4d37ec
PZ
4966#ifdef CONFIG_SCHED_HRTICK
4967static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4968{
8f4d37ec
PZ
4969 struct sched_entity *se = &p->se;
4970 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4971
9148a3a1 4972 SCHED_WARN_ON(task_rq(p) != rq);
8f4d37ec 4973
8bf46a39 4974 if (rq->cfs.h_nr_running > 1) {
8f4d37ec
PZ
4975 u64 slice = sched_slice(cfs_rq, se);
4976 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4977 s64 delta = slice - ran;
4978
4979 if (delta < 0) {
4980 if (rq->curr == p)
8875125e 4981 resched_curr(rq);
8f4d37ec
PZ
4982 return;
4983 }
31656519 4984 hrtick_start(rq, delta);
8f4d37ec
PZ
4985 }
4986}
a4c2f00f
PZ
4987
4988/*
4989 * called from enqueue/dequeue and updates the hrtick when the
4990 * current task is from our class and nr_running is low enough
4991 * to matter.
4992 */
4993static void hrtick_update(struct rq *rq)
4994{
4995 struct task_struct *curr = rq->curr;
4996
b39e66ea 4997 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
4998 return;
4999
5000 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
5001 hrtick_start_fair(rq, curr);
5002}
55e12e5e 5003#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
5004static inline void
5005hrtick_start_fair(struct rq *rq, struct task_struct *p)
5006{
5007}
a4c2f00f
PZ
5008
5009static inline void hrtick_update(struct rq *rq)
5010{
5011}
8f4d37ec
PZ
5012#endif
5013
bf0f6f24
IM
5014/*
5015 * The enqueue_task method is called before nr_running is
5016 * increased. Here we update the fair scheduling stats and
5017 * then put the task into the rbtree:
5018 */
ea87bb78 5019static void
371fd7e7 5020enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
5021{
5022 struct cfs_rq *cfs_rq;
62fb1851 5023 struct sched_entity *se = &p->se;
bf0f6f24 5024
2539fc82
PB
5025 /*
5026 * The code below (indirectly) updates schedutil which looks at
5027 * the cfs_rq utilization to select a frequency.
5028 * Let's add the task's estimated utilization to the cfs_rq's
5029 * estimated utilization, before we update schedutil.
5030 */
5031 util_est_enqueue(&rq->cfs, p);
5032
8c34ab19
RW
5033 /*
5034 * If in_iowait is set, the code below may not trigger any cpufreq
5035 * utilization updates, so do it here explicitly with the IOWAIT flag
5036 * passed.
5037 */
5038 if (p->in_iowait)
674e7541 5039 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
8c34ab19 5040
bf0f6f24 5041 for_each_sched_entity(se) {
62fb1851 5042 if (se->on_rq)
bf0f6f24
IM
5043 break;
5044 cfs_rq = cfs_rq_of(se);
88ec22d3 5045 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
5046
5047 /*
5048 * end evaluation on encountering a throttled cfs_rq
5049 *
5050 * note: in the case of encountering a throttled cfs_rq we will
5051 * post the final h_nr_running increment below.
e210bffd 5052 */
85dac906
PT
5053 if (cfs_rq_throttled(cfs_rq))
5054 break;
953bfcd1 5055 cfs_rq->h_nr_running++;
85dac906 5056
88ec22d3 5057 flags = ENQUEUE_WAKEUP;
bf0f6f24 5058 }
8f4d37ec 5059
2069dd75 5060 for_each_sched_entity(se) {
0f317143 5061 cfs_rq = cfs_rq_of(se);
953bfcd1 5062 cfs_rq->h_nr_running++;
2069dd75 5063
85dac906
PT
5064 if (cfs_rq_throttled(cfs_rq))
5065 break;
5066
88c0616e 5067 update_load_avg(cfs_rq, se, UPDATE_TG);
1ea6c46a 5068 update_cfs_group(se);
2069dd75
PZ
5069 }
5070
cd126afe 5071 if (!se)
72465447 5072 add_nr_running(rq, 1);
cd126afe 5073
a4c2f00f 5074 hrtick_update(rq);
bf0f6f24
IM
5075}
5076
2f36825b
VP
5077static void set_next_buddy(struct sched_entity *se);
5078
bf0f6f24
IM
5079/*
5080 * The dequeue_task method is called before nr_running is
5081 * decreased. We remove the task from the rbtree and
5082 * update the fair scheduling stats:
5083 */
371fd7e7 5084static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
5085{
5086 struct cfs_rq *cfs_rq;
62fb1851 5087 struct sched_entity *se = &p->se;
2f36825b 5088 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
5089
5090 for_each_sched_entity(se) {
5091 cfs_rq = cfs_rq_of(se);
371fd7e7 5092 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
5093
5094 /*
5095 * end evaluation on encountering a throttled cfs_rq
5096 *
5097 * note: in the case of encountering a throttled cfs_rq we will
5098 * post the final h_nr_running decrement below.
5099 */
5100 if (cfs_rq_throttled(cfs_rq))
5101 break;
953bfcd1 5102 cfs_rq->h_nr_running--;
2069dd75 5103
bf0f6f24 5104 /* Don't dequeue parent if it has other entities besides us */
2f36825b 5105 if (cfs_rq->load.weight) {
754bd598
KK
5106 /* Avoid re-evaluating load for this entity: */
5107 se = parent_entity(se);
2f36825b
VP
5108 /*
5109 * Bias pick_next to pick a task from this cfs_rq, as
5110 * p is sleeping when it is within its sched_slice.
5111 */
754bd598
KK
5112 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
5113 set_next_buddy(se);
bf0f6f24 5114 break;
2f36825b 5115 }
371fd7e7 5116 flags |= DEQUEUE_SLEEP;
bf0f6f24 5117 }
8f4d37ec 5118
2069dd75 5119 for_each_sched_entity(se) {
0f317143 5120 cfs_rq = cfs_rq_of(se);
953bfcd1 5121 cfs_rq->h_nr_running--;
2069dd75 5122
85dac906
PT
5123 if (cfs_rq_throttled(cfs_rq))
5124 break;
5125
88c0616e 5126 update_load_avg(cfs_rq, se, UPDATE_TG);
1ea6c46a 5127 update_cfs_group(se);
2069dd75
PZ
5128 }
5129
cd126afe 5130 if (!se)
72465447 5131 sub_nr_running(rq, 1);
cd126afe 5132
7f65ea42 5133 util_est_dequeue(&rq->cfs, p, task_sleep);
a4c2f00f 5134 hrtick_update(rq);
bf0f6f24
IM
5135}
5136
e7693a36 5137#ifdef CONFIG_SMP
10e2f1ac
PZ
5138
5139/* Working cpumask for: load_balance, load_balance_newidle. */
5140DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5141DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
5142
9fd81dd5 5143#ifdef CONFIG_NO_HZ_COMMON
3289bdb4
PZ
5144/*
5145 * per rq 'load' arrray crap; XXX kill this.
5146 */
5147
5148/*
d937cdc5 5149 * The exact cpuload calculated at every tick would be:
3289bdb4 5150 *
d937cdc5
PZ
5151 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
5152 *
97fb7a0a
IM
5153 * If a CPU misses updates for n ticks (as it was idle) and update gets
5154 * called on the n+1-th tick when CPU may be busy, then we have:
d937cdc5
PZ
5155 *
5156 * load_n = (1 - 1/2^i)^n * load_0
5157 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
3289bdb4
PZ
5158 *
5159 * decay_load_missed() below does efficient calculation of
3289bdb4 5160 *
d937cdc5
PZ
5161 * load' = (1 - 1/2^i)^n * load
5162 *
5163 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
5164 * This allows us to precompute the above in said factors, thereby allowing the
5165 * reduction of an arbitrary n in O(log_2 n) steps. (See also
5166 * fixed_power_int())
3289bdb4 5167 *
d937cdc5 5168 * The calculation is approximated on a 128 point scale.
3289bdb4
PZ
5169 */
5170#define DEGRADE_SHIFT 7
d937cdc5
PZ
5171
5172static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
5173static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
5174 { 0, 0, 0, 0, 0, 0, 0, 0 },
5175 { 64, 32, 8, 0, 0, 0, 0, 0 },
5176 { 96, 72, 40, 12, 1, 0, 0, 0 },
5177 { 112, 98, 75, 43, 15, 1, 0, 0 },
5178 { 120, 112, 98, 76, 45, 16, 2, 0 }
5179};
3289bdb4
PZ
5180
5181/*
5182 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
5183 * would be when CPU is idle and so we just decay the old load without
5184 * adding any new load.
5185 */
5186static unsigned long
5187decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
5188{
5189 int j = 0;
5190
5191 if (!missed_updates)
5192 return load;
5193
5194 if (missed_updates >= degrade_zero_ticks[idx])
5195 return 0;
5196
5197 if (idx == 1)
5198 return load >> missed_updates;
5199
5200 while (missed_updates) {
5201 if (missed_updates % 2)
5202 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
5203
5204 missed_updates >>= 1;
5205 j++;
5206 }
5207 return load;
5208}
e022e0d3
PZ
5209
5210static struct {
5211 cpumask_var_t idle_cpus_mask;
5212 atomic_t nr_cpus;
f643ea22 5213 int has_blocked; /* Idle CPUS has blocked load */
e022e0d3 5214 unsigned long next_balance; /* in jiffy units */
f643ea22 5215 unsigned long next_blocked; /* Next update of blocked load in jiffies */
e022e0d3
PZ
5216} nohz ____cacheline_aligned;
5217
9fd81dd5 5218#endif /* CONFIG_NO_HZ_COMMON */
3289bdb4 5219
59543275 5220/**
cee1afce 5221 * __cpu_load_update - update the rq->cpu_load[] statistics
59543275
BP
5222 * @this_rq: The rq to update statistics for
5223 * @this_load: The current load
5224 * @pending_updates: The number of missed updates
59543275 5225 *
3289bdb4 5226 * Update rq->cpu_load[] statistics. This function is usually called every
59543275
BP
5227 * scheduler tick (TICK_NSEC).
5228 *
5229 * This function computes a decaying average:
5230 *
5231 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
5232 *
5233 * Because of NOHZ it might not get called on every tick which gives need for
5234 * the @pending_updates argument.
5235 *
5236 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
5237 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
5238 * = A * (A * load[i]_n-2 + B) + B
5239 * = A * (A * (A * load[i]_n-3 + B) + B) + B
5240 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
5241 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
5242 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
5243 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
5244 *
5245 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
5246 * any change in load would have resulted in the tick being turned back on.
5247 *
5248 * For regular NOHZ, this reduces to:
5249 *
5250 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
5251 *
5252 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
1f41906a 5253 * term.
3289bdb4 5254 */
1f41906a
FW
5255static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
5256 unsigned long pending_updates)
3289bdb4 5257{
9fd81dd5 5258 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
3289bdb4
PZ
5259 int i, scale;
5260
5261 this_rq->nr_load_updates++;
5262
5263 /* Update our load: */
5264 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
5265 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
5266 unsigned long old_load, new_load;
5267
5268 /* scale is effectively 1 << i now, and >> i divides by scale */
5269
7400d3bb 5270 old_load = this_rq->cpu_load[i];
9fd81dd5 5271#ifdef CONFIG_NO_HZ_COMMON
3289bdb4 5272 old_load = decay_load_missed(old_load, pending_updates - 1, i);
7400d3bb
BP
5273 if (tickless_load) {
5274 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
5275 /*
5276 * old_load can never be a negative value because a
5277 * decayed tickless_load cannot be greater than the
5278 * original tickless_load.
5279 */
5280 old_load += tickless_load;
5281 }
9fd81dd5 5282#endif
3289bdb4
PZ
5283 new_load = this_load;
5284 /*
5285 * Round up the averaging division if load is increasing. This
5286 * prevents us from getting stuck on 9 if the load is 10, for
5287 * example.
5288 */
5289 if (new_load > old_load)
5290 new_load += scale - 1;
5291
5292 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
5293 }
3289bdb4
PZ
5294}
5295
7ea241af 5296/* Used instead of source_load when we know the type == 0 */
c7132dd6 5297static unsigned long weighted_cpuload(struct rq *rq)
7ea241af 5298{
c7132dd6 5299 return cfs_rq_runnable_load_avg(&rq->cfs);
7ea241af
YD
5300}
5301
3289bdb4 5302#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
5303/*
5304 * There is no sane way to deal with nohz on smp when using jiffies because the
97fb7a0a 5305 * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading
1f41906a
FW
5306 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
5307 *
5308 * Therefore we need to avoid the delta approach from the regular tick when
5309 * possible since that would seriously skew the load calculation. This is why we
5310 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
5311 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
5312 * loop exit, nohz_idle_balance, nohz full exit...)
5313 *
5314 * This means we might still be one tick off for nohz periods.
5315 */
5316
5317static void cpu_load_update_nohz(struct rq *this_rq,
5318 unsigned long curr_jiffies,
5319 unsigned long load)
be68a682
FW
5320{
5321 unsigned long pending_updates;
5322
5323 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
5324 if (pending_updates) {
5325 this_rq->last_load_update_tick = curr_jiffies;
5326 /*
5327 * In the regular NOHZ case, we were idle, this means load 0.
5328 * In the NOHZ_FULL case, we were non-idle, we should consider
5329 * its weighted load.
5330 */
1f41906a 5331 cpu_load_update(this_rq, load, pending_updates);
be68a682
FW
5332 }
5333}
5334
3289bdb4
PZ
5335/*
5336 * Called from nohz_idle_balance() to update the load ratings before doing the
5337 * idle balance.
5338 */
cee1afce 5339static void cpu_load_update_idle(struct rq *this_rq)
3289bdb4 5340{
3289bdb4
PZ
5341 /*
5342 * bail if there's load or we're actually up-to-date.
5343 */
c7132dd6 5344 if (weighted_cpuload(this_rq))
3289bdb4
PZ
5345 return;
5346
1f41906a 5347 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
3289bdb4
PZ
5348}
5349
5350/*
1f41906a
FW
5351 * Record CPU load on nohz entry so we know the tickless load to account
5352 * on nohz exit. cpu_load[0] happens then to be updated more frequently
5353 * than other cpu_load[idx] but it should be fine as cpu_load readers
5354 * shouldn't rely into synchronized cpu_load[*] updates.
3289bdb4 5355 */
1f41906a 5356void cpu_load_update_nohz_start(void)
3289bdb4
PZ
5357{
5358 struct rq *this_rq = this_rq();
1f41906a
FW
5359
5360 /*
5361 * This is all lockless but should be fine. If weighted_cpuload changes
5362 * concurrently we'll exit nohz. And cpu_load write can race with
5363 * cpu_load_update_idle() but both updater would be writing the same.
5364 */
c7132dd6 5365 this_rq->cpu_load[0] = weighted_cpuload(this_rq);
1f41906a
FW
5366}
5367
5368/*
5369 * Account the tickless load in the end of a nohz frame.
5370 */
5371void cpu_load_update_nohz_stop(void)
5372{
316c1608 5373 unsigned long curr_jiffies = READ_ONCE(jiffies);
1f41906a
FW
5374 struct rq *this_rq = this_rq();
5375 unsigned long load;
8a8c69c3 5376 struct rq_flags rf;
3289bdb4
PZ
5377
5378 if (curr_jiffies == this_rq->last_load_update_tick)
5379 return;
5380
c7132dd6 5381 load = weighted_cpuload(this_rq);
8a8c69c3 5382 rq_lock(this_rq, &rf);
b52fad2d 5383 update_rq_clock(this_rq);
1f41906a 5384 cpu_load_update_nohz(this_rq, curr_jiffies, load);
8a8c69c3 5385 rq_unlock(this_rq, &rf);
3289bdb4 5386}
1f41906a
FW
5387#else /* !CONFIG_NO_HZ_COMMON */
5388static inline void cpu_load_update_nohz(struct rq *this_rq,
5389 unsigned long curr_jiffies,
5390 unsigned long load) { }
5391#endif /* CONFIG_NO_HZ_COMMON */
5392
5393static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
5394{
9fd81dd5 5395#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
5396 /* See the mess around cpu_load_update_nohz(). */
5397 this_rq->last_load_update_tick = READ_ONCE(jiffies);
9fd81dd5 5398#endif
1f41906a
FW
5399 cpu_load_update(this_rq, load, 1);
5400}
3289bdb4
PZ
5401
5402/*
5403 * Called from scheduler_tick()
5404 */
cee1afce 5405void cpu_load_update_active(struct rq *this_rq)
3289bdb4 5406{
c7132dd6 5407 unsigned long load = weighted_cpuload(this_rq);
1f41906a
FW
5408
5409 if (tick_nohz_tick_stopped())
5410 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
5411 else
5412 cpu_load_update_periodic(this_rq, load);
3289bdb4
PZ
5413}
5414
029632fb 5415/*
97fb7a0a 5416 * Return a low guess at the load of a migration-source CPU weighted
029632fb
PZ
5417 * according to the scheduling class and "nice" value.
5418 *
5419 * We want to under-estimate the load of migration sources, to
5420 * balance conservatively.
5421 */
5422static unsigned long source_load(int cpu, int type)
5423{
5424 struct rq *rq = cpu_rq(cpu);
c7132dd6 5425 unsigned long total = weighted_cpuload(rq);
029632fb
PZ
5426
5427 if (type == 0 || !sched_feat(LB_BIAS))
5428 return total;
5429
5430 return min(rq->cpu_load[type-1], total);
5431}
5432
5433/*
97fb7a0a 5434 * Return a high guess at the load of a migration-target CPU weighted
029632fb
PZ
5435 * according to the scheduling class and "nice" value.
5436 */
5437static unsigned long target_load(int cpu, int type)
5438{
5439 struct rq *rq = cpu_rq(cpu);
c7132dd6 5440 unsigned long total = weighted_cpuload(rq);
029632fb
PZ
5441
5442 if (type == 0 || !sched_feat(LB_BIAS))
5443 return total;
5444
5445 return max(rq->cpu_load[type-1], total);
5446}
5447
ced549fa 5448static unsigned long capacity_of(int cpu)
029632fb 5449{
ced549fa 5450 return cpu_rq(cpu)->cpu_capacity;
029632fb
PZ
5451}
5452
ca6d75e6
VG
5453static unsigned long capacity_orig_of(int cpu)
5454{
5455 return cpu_rq(cpu)->cpu_capacity_orig;
5456}
5457
029632fb
PZ
5458static unsigned long cpu_avg_load_per_task(int cpu)
5459{
5460 struct rq *rq = cpu_rq(cpu);
316c1608 5461 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
c7132dd6 5462 unsigned long load_avg = weighted_cpuload(rq);
029632fb
PZ
5463
5464 if (nr_running)
b92486cb 5465 return load_avg / nr_running;
029632fb
PZ
5466
5467 return 0;
5468}
5469
c58d25f3
PZ
5470static void record_wakee(struct task_struct *p)
5471{
5472 /*
5473 * Only decay a single time; tasks that have less then 1 wakeup per
5474 * jiffy will not have built up many flips.
5475 */
5476 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5477 current->wakee_flips >>= 1;
5478 current->wakee_flip_decay_ts = jiffies;
5479 }
5480
5481 if (current->last_wakee != p) {
5482 current->last_wakee = p;
5483 current->wakee_flips++;
5484 }
5485}
5486
63b0e9ed
MG
5487/*
5488 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
c58d25f3 5489 *
63b0e9ed 5490 * A waker of many should wake a different task than the one last awakened
c58d25f3
PZ
5491 * at a frequency roughly N times higher than one of its wakees.
5492 *
5493 * In order to determine whether we should let the load spread vs consolidating
5494 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5495 * partner, and a factor of lls_size higher frequency in the other.
5496 *
5497 * With both conditions met, we can be relatively sure that the relationship is
5498 * non-monogamous, with partner count exceeding socket size.
5499 *
5500 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5501 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5502 * socket size.
63b0e9ed 5503 */
62470419
MW
5504static int wake_wide(struct task_struct *p)
5505{
63b0e9ed
MG
5506 unsigned int master = current->wakee_flips;
5507 unsigned int slave = p->wakee_flips;
7d9ffa89 5508 int factor = this_cpu_read(sd_llc_size);
62470419 5509
63b0e9ed
MG
5510 if (master < slave)
5511 swap(master, slave);
5512 if (slave < factor || master < slave * factor)
5513 return 0;
5514 return 1;
62470419
MW
5515}
5516
90001d67 5517/*
d153b153
PZ
5518 * The purpose of wake_affine() is to quickly determine on which CPU we can run
5519 * soonest. For the purpose of speed we only consider the waking and previous
5520 * CPU.
90001d67 5521 *
7332dec0
MG
5522 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
5523 * cache-affine and is (or will be) idle.
f2cdd9cc
PZ
5524 *
5525 * wake_affine_weight() - considers the weight to reflect the average
5526 * scheduling latency of the CPUs. This seems to work
5527 * for the overloaded case.
90001d67 5528 */
3b76c4a3 5529static int
89a55f56 5530wake_affine_idle(int this_cpu, int prev_cpu, int sync)
90001d67 5531{
7332dec0
MG
5532 /*
5533 * If this_cpu is idle, it implies the wakeup is from interrupt
5534 * context. Only allow the move if cache is shared. Otherwise an
5535 * interrupt intensive workload could force all tasks onto one
5536 * node depending on the IO topology or IRQ affinity settings.
806486c3
MG
5537 *
5538 * If the prev_cpu is idle and cache affine then avoid a migration.
5539 * There is no guarantee that the cache hot data from an interrupt
5540 * is more important than cache hot data on the prev_cpu and from
5541 * a cpufreq perspective, it's better to have higher utilisation
5542 * on one CPU.
7332dec0 5543 */
943d355d
RJ
5544 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
5545 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
90001d67 5546
d153b153 5547 if (sync && cpu_rq(this_cpu)->nr_running == 1)
3b76c4a3 5548 return this_cpu;
90001d67 5549
3b76c4a3 5550 return nr_cpumask_bits;
90001d67
PZ
5551}
5552
3b76c4a3 5553static int
f2cdd9cc
PZ
5554wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5555 int this_cpu, int prev_cpu, int sync)
90001d67 5556{
90001d67
PZ
5557 s64 this_eff_load, prev_eff_load;
5558 unsigned long task_load;
5559
f2cdd9cc 5560 this_eff_load = target_load(this_cpu, sd->wake_idx);
90001d67 5561
90001d67
PZ
5562 if (sync) {
5563 unsigned long current_load = task_h_load(current);
5564
f2cdd9cc 5565 if (current_load > this_eff_load)
3b76c4a3 5566 return this_cpu;
90001d67 5567
f2cdd9cc 5568 this_eff_load -= current_load;
90001d67
PZ
5569 }
5570
90001d67
PZ
5571 task_load = task_h_load(p);
5572
f2cdd9cc
PZ
5573 this_eff_load += task_load;
5574 if (sched_feat(WA_BIAS))
5575 this_eff_load *= 100;
5576 this_eff_load *= capacity_of(prev_cpu);
90001d67 5577
eeb60398 5578 prev_eff_load = source_load(prev_cpu, sd->wake_idx);
f2cdd9cc
PZ
5579 prev_eff_load -= task_load;
5580 if (sched_feat(WA_BIAS))
5581 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
5582 prev_eff_load *= capacity_of(this_cpu);
90001d67 5583
082f764a
MG
5584 /*
5585 * If sync, adjust the weight of prev_eff_load such that if
5586 * prev_eff == this_eff that select_idle_sibling() will consider
5587 * stacking the wakee on top of the waker if no other CPU is
5588 * idle.
5589 */
5590 if (sync)
5591 prev_eff_load += 1;
5592
5593 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
90001d67
PZ
5594}
5595
772bd008 5596static int wake_affine(struct sched_domain *sd, struct task_struct *p,
7ebb66a1 5597 int this_cpu, int prev_cpu, int sync)
098fb9db 5598{
3b76c4a3 5599 int target = nr_cpumask_bits;
098fb9db 5600
89a55f56 5601 if (sched_feat(WA_IDLE))
3b76c4a3 5602 target = wake_affine_idle(this_cpu, prev_cpu, sync);
90001d67 5603
3b76c4a3
MG
5604 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
5605 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
098fb9db 5606
ae92882e 5607 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
3b76c4a3
MG
5608 if (target == nr_cpumask_bits)
5609 return prev_cpu;
098fb9db 5610
3b76c4a3
MG
5611 schedstat_inc(sd->ttwu_move_affine);
5612 schedstat_inc(p->se.statistics.nr_wakeups_affine);
5613 return target;
098fb9db
IM
5614}
5615
f01415fd 5616static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
6a0b19c0
MR
5617
5618static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
5619{
f453ae22 5620 return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
6a0b19c0
MR
5621}
5622
aaee1203
PZ
5623/*
5624 * find_idlest_group finds and returns the least busy CPU group within the
5625 * domain.
6fee85cc
BJ
5626 *
5627 * Assumes p is allowed on at least one CPU in sd.
aaee1203
PZ
5628 */
5629static struct sched_group *
78e7ed53 5630find_idlest_group(struct sched_domain *sd, struct task_struct *p,
c44f2a02 5631 int this_cpu, int sd_flag)
e7693a36 5632{
b3bd3de6 5633 struct sched_group *idlest = NULL, *group = sd->groups;
6a0b19c0 5634 struct sched_group *most_spare_sg = NULL;
0d10ab95
BJ
5635 unsigned long min_runnable_load = ULONG_MAX;
5636 unsigned long this_runnable_load = ULONG_MAX;
5637 unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
6a0b19c0 5638 unsigned long most_spare = 0, this_spare = 0;
c44f2a02 5639 int load_idx = sd->forkexec_idx;
6b94780e
VG
5640 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
5641 unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
5642 (sd->imbalance_pct-100) / 100;
e7693a36 5643
c44f2a02
VG
5644 if (sd_flag & SD_BALANCE_WAKE)
5645 load_idx = sd->wake_idx;
5646
aaee1203 5647 do {
6b94780e
VG
5648 unsigned long load, avg_load, runnable_load;
5649 unsigned long spare_cap, max_spare_cap;
aaee1203
PZ
5650 int local_group;
5651 int i;
e7693a36 5652
aaee1203 5653 /* Skip over this group if it has no CPUs allowed */
ae4df9d6 5654 if (!cpumask_intersects(sched_group_span(group),
0c98d344 5655 &p->cpus_allowed))
aaee1203
PZ
5656 continue;
5657
5658 local_group = cpumask_test_cpu(this_cpu,
ae4df9d6 5659 sched_group_span(group));
aaee1203 5660
6a0b19c0
MR
5661 /*
5662 * Tally up the load of all CPUs in the group and find
5663 * the group containing the CPU with most spare capacity.
5664 */
aaee1203 5665 avg_load = 0;
6b94780e 5666 runnable_load = 0;
6a0b19c0 5667 max_spare_cap = 0;
aaee1203 5668
ae4df9d6 5669 for_each_cpu(i, sched_group_span(group)) {
97fb7a0a 5670 /* Bias balancing toward CPUs of our domain */
aaee1203
PZ
5671 if (local_group)
5672 load = source_load(i, load_idx);
5673 else
5674 load = target_load(i, load_idx);
5675
6b94780e
VG
5676 runnable_load += load;
5677
5678 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
6a0b19c0
MR
5679
5680 spare_cap = capacity_spare_wake(i, p);
5681
5682 if (spare_cap > max_spare_cap)
5683 max_spare_cap = spare_cap;
aaee1203
PZ
5684 }
5685
63b2ca30 5686 /* Adjust by relative CPU capacity of the group */
6b94780e
VG
5687 avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
5688 group->sgc->capacity;
5689 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
5690 group->sgc->capacity;
aaee1203
PZ
5691
5692 if (local_group) {
6b94780e
VG
5693 this_runnable_load = runnable_load;
5694 this_avg_load = avg_load;
6a0b19c0
MR
5695 this_spare = max_spare_cap;
5696 } else {
6b94780e
VG
5697 if (min_runnable_load > (runnable_load + imbalance)) {
5698 /*
5699 * The runnable load is significantly smaller
97fb7a0a 5700 * so we can pick this new CPU:
6b94780e
VG
5701 */
5702 min_runnable_load = runnable_load;
5703 min_avg_load = avg_load;
5704 idlest = group;
5705 } else if ((runnable_load < (min_runnable_load + imbalance)) &&
5706 (100*min_avg_load > imbalance_scale*avg_load)) {
5707 /*
5708 * The runnable loads are close so take the
97fb7a0a 5709 * blocked load into account through avg_load:
6b94780e
VG
5710 */
5711 min_avg_load = avg_load;
6a0b19c0
MR
5712 idlest = group;
5713 }
5714
5715 if (most_spare < max_spare_cap) {
5716 most_spare = max_spare_cap;
5717 most_spare_sg = group;
5718 }
aaee1203
PZ
5719 }
5720 } while (group = group->next, group != sd->groups);
5721
6a0b19c0
MR
5722 /*
5723 * The cross-over point between using spare capacity or least load
5724 * is too conservative for high utilization tasks on partially
5725 * utilized systems if we require spare_capacity > task_util(p),
5726 * so we allow for some task stuffing by using
5727 * spare_capacity > task_util(p)/2.
f519a3f1
VG
5728 *
5729 * Spare capacity can't be used for fork because the utilization has
5730 * not been set yet, we must first select a rq to compute the initial
5731 * utilization.
6a0b19c0 5732 */
f519a3f1
VG
5733 if (sd_flag & SD_BALANCE_FORK)
5734 goto skip_spare;
5735
6a0b19c0 5736 if (this_spare > task_util(p) / 2 &&
6b94780e 5737 imbalance_scale*this_spare > 100*most_spare)
6a0b19c0 5738 return NULL;
6b94780e
VG
5739
5740 if (most_spare > task_util(p) / 2)
6a0b19c0
MR
5741 return most_spare_sg;
5742
f519a3f1 5743skip_spare:
6b94780e
VG
5744 if (!idlest)
5745 return NULL;
5746
2c833627
MG
5747 /*
5748 * When comparing groups across NUMA domains, it's possible for the
5749 * local domain to be very lightly loaded relative to the remote
5750 * domains but "imbalance" skews the comparison making remote CPUs
5751 * look much more favourable. When considering cross-domain, add
5752 * imbalance to the runnable load on the remote node and consider
5753 * staying local.
5754 */
5755 if ((sd->flags & SD_NUMA) &&
5756 min_runnable_load + imbalance >= this_runnable_load)
5757 return NULL;
5758
6b94780e 5759 if (min_runnable_load > (this_runnable_load + imbalance))
aaee1203 5760 return NULL;
6b94780e
VG
5761
5762 if ((this_runnable_load < (min_runnable_load + imbalance)) &&
5763 (100*this_avg_load < imbalance_scale*min_avg_load))
5764 return NULL;
5765
aaee1203
PZ
5766 return idlest;
5767}
5768
5769/*
97fb7a0a 5770 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
aaee1203
PZ
5771 */
5772static int
18bd1b4b 5773find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
aaee1203
PZ
5774{
5775 unsigned long load, min_load = ULONG_MAX;
83a0a96a
NP
5776 unsigned int min_exit_latency = UINT_MAX;
5777 u64 latest_idle_timestamp = 0;
5778 int least_loaded_cpu = this_cpu;
5779 int shallowest_idle_cpu = -1;
aaee1203
PZ
5780 int i;
5781
eaecf41f
MR
5782 /* Check if we have any choice: */
5783 if (group->group_weight == 1)
ae4df9d6 5784 return cpumask_first(sched_group_span(group));
eaecf41f 5785
aaee1203 5786 /* Traverse only the allowed CPUs */
ae4df9d6 5787 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
943d355d 5788 if (available_idle_cpu(i)) {
83a0a96a
NP
5789 struct rq *rq = cpu_rq(i);
5790 struct cpuidle_state *idle = idle_get_state(rq);
5791 if (idle && idle->exit_latency < min_exit_latency) {
5792 /*
5793 * We give priority to a CPU whose idle state
5794 * has the smallest exit latency irrespective
5795 * of any idle timestamp.
5796 */
5797 min_exit_latency = idle->exit_latency;
5798 latest_idle_timestamp = rq->idle_stamp;
5799 shallowest_idle_cpu = i;
5800 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5801 rq->idle_stamp > latest_idle_timestamp) {
5802 /*
5803 * If equal or no active idle state, then
5804 * the most recently idled CPU might have
5805 * a warmer cache.
5806 */
5807 latest_idle_timestamp = rq->idle_stamp;
5808 shallowest_idle_cpu = i;
5809 }
9f96742a 5810 } else if (shallowest_idle_cpu == -1) {
c7132dd6 5811 load = weighted_cpuload(cpu_rq(i));
18cec7e0 5812 if (load < min_load) {
83a0a96a
NP
5813 min_load = load;
5814 least_loaded_cpu = i;
5815 }
e7693a36
GH
5816 }
5817 }
5818
83a0a96a 5819 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
aaee1203 5820}
e7693a36 5821
18bd1b4b
BJ
5822static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
5823 int cpu, int prev_cpu, int sd_flag)
5824{
93f50f90 5825 int new_cpu = cpu;
18bd1b4b 5826
6fee85cc
BJ
5827 if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
5828 return prev_cpu;
5829
c976a862
VK
5830 /*
5831 * We need task's util for capacity_spare_wake, sync it up to prev_cpu's
5832 * last_update_time.
5833 */
5834 if (!(sd_flag & SD_BALANCE_FORK))
5835 sync_entity_load_avg(&p->se);
5836
18bd1b4b
BJ
5837 while (sd) {
5838 struct sched_group *group;
5839 struct sched_domain *tmp;
5840 int weight;
5841
5842 if (!(sd->flags & sd_flag)) {
5843 sd = sd->child;
5844 continue;
5845 }
5846
5847 group = find_idlest_group(sd, p, cpu, sd_flag);
5848 if (!group) {
5849 sd = sd->child;
5850 continue;
5851 }
5852
5853 new_cpu = find_idlest_group_cpu(group, p, cpu);
e90381ea 5854 if (new_cpu == cpu) {
97fb7a0a 5855 /* Now try balancing at a lower domain level of 'cpu': */
18bd1b4b
BJ
5856 sd = sd->child;
5857 continue;
5858 }
5859
97fb7a0a 5860 /* Now try balancing at a lower domain level of 'new_cpu': */
18bd1b4b
BJ
5861 cpu = new_cpu;
5862 weight = sd->span_weight;
5863 sd = NULL;
5864 for_each_domain(cpu, tmp) {
5865 if (weight <= tmp->span_weight)
5866 break;
5867 if (tmp->flags & sd_flag)
5868 sd = tmp;
5869 }
18bd1b4b
BJ
5870 }
5871
5872 return new_cpu;
5873}
5874
10e2f1ac
PZ
5875#ifdef CONFIG_SCHED_SMT
5876
5877static inline void set_idle_cores(int cpu, int val)
5878{
5879 struct sched_domain_shared *sds;
5880
5881 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5882 if (sds)
5883 WRITE_ONCE(sds->has_idle_cores, val);
5884}
5885
5886static inline bool test_idle_cores(int cpu, bool def)
5887{
5888 struct sched_domain_shared *sds;
5889
5890 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5891 if (sds)
5892 return READ_ONCE(sds->has_idle_cores);
5893
5894 return def;
5895}
5896
5897/*
5898 * Scans the local SMT mask to see if the entire core is idle, and records this
5899 * information in sd_llc_shared->has_idle_cores.
5900 *
5901 * Since SMT siblings share all cache levels, inspecting this limited remote
5902 * state should be fairly cheap.
5903 */
1b568f0a 5904void __update_idle_core(struct rq *rq)
10e2f1ac
PZ
5905{
5906 int core = cpu_of(rq);
5907 int cpu;
5908
5909 rcu_read_lock();
5910 if (test_idle_cores(core, true))
5911 goto unlock;
5912
5913 for_each_cpu(cpu, cpu_smt_mask(core)) {
5914 if (cpu == core)
5915 continue;
5916
943d355d 5917 if (!available_idle_cpu(cpu))
10e2f1ac
PZ
5918 goto unlock;
5919 }
5920
5921 set_idle_cores(core, 1);
5922unlock:
5923 rcu_read_unlock();
5924}
5925
5926/*
5927 * Scan the entire LLC domain for idle cores; this dynamically switches off if
5928 * there are no idle cores left in the system; tracked through
5929 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5930 */
5931static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5932{
5933 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
c743f0a5 5934 int core, cpu;
10e2f1ac 5935
1b568f0a
PZ
5936 if (!static_branch_likely(&sched_smt_present))
5937 return -1;
5938
10e2f1ac
PZ
5939 if (!test_idle_cores(target, false))
5940 return -1;
5941
0c98d344 5942 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
10e2f1ac 5943
c743f0a5 5944 for_each_cpu_wrap(core, cpus, target) {
10e2f1ac
PZ
5945 bool idle = true;
5946
5947 for_each_cpu(cpu, cpu_smt_mask(core)) {
5948 cpumask_clear_cpu(cpu, cpus);
943d355d 5949 if (!available_idle_cpu(cpu))
10e2f1ac
PZ
5950 idle = false;
5951 }
5952
5953 if (idle)
5954 return core;
5955 }
5956
5957 /*
5958 * Failed to find an idle core; stop looking for one.
5959 */
5960 set_idle_cores(target, 0);
5961
5962 return -1;
5963}
5964
5965/*
5966 * Scan the local SMT mask for idle CPUs.
5967 */
5968static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5969{
5970 int cpu;
5971
1b568f0a
PZ
5972 if (!static_branch_likely(&sched_smt_present))
5973 return -1;
5974
10e2f1ac 5975 for_each_cpu(cpu, cpu_smt_mask(target)) {
0c98d344 5976 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
10e2f1ac 5977 continue;
943d355d 5978 if (available_idle_cpu(cpu))
10e2f1ac
PZ
5979 return cpu;
5980 }
5981
5982 return -1;
5983}
5984
5985#else /* CONFIG_SCHED_SMT */
5986
5987static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5988{
5989 return -1;
5990}
5991
5992static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5993{
5994 return -1;
5995}
5996
5997#endif /* CONFIG_SCHED_SMT */
5998
5999/*
6000 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6001 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6002 * average idle time for this rq (as found in rq->avg_idle).
a50bde51 6003 */
10e2f1ac
PZ
6004static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
6005{
9cfb38a7 6006 struct sched_domain *this_sd;
1ad3aaf3 6007 u64 avg_cost, avg_idle;
10e2f1ac
PZ
6008 u64 time, cost;
6009 s64 delta;
1ad3aaf3 6010 int cpu, nr = INT_MAX;
10e2f1ac 6011
9cfb38a7
WL
6012 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6013 if (!this_sd)
6014 return -1;
6015
10e2f1ac
PZ
6016 /*
6017 * Due to large variance we need a large fuzz factor; hackbench in
6018 * particularly is sensitive here.
6019 */
1ad3aaf3
PZ
6020 avg_idle = this_rq()->avg_idle / 512;
6021 avg_cost = this_sd->avg_scan_cost + 1;
6022
6023 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
10e2f1ac
PZ
6024 return -1;
6025
1ad3aaf3
PZ
6026 if (sched_feat(SIS_PROP)) {
6027 u64 span_avg = sd->span_weight * avg_idle;
6028 if (span_avg > 4*avg_cost)
6029 nr = div_u64(span_avg, avg_cost);
6030 else
6031 nr = 4;
6032 }
6033
10e2f1ac
PZ
6034 time = local_clock();
6035
c743f0a5 6036 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
1ad3aaf3
PZ
6037 if (!--nr)
6038 return -1;
0c98d344 6039 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
10e2f1ac 6040 continue;
943d355d 6041 if (available_idle_cpu(cpu))
10e2f1ac
PZ
6042 break;
6043 }
6044
6045 time = local_clock() - time;
6046 cost = this_sd->avg_scan_cost;
6047 delta = (s64)(time - cost) / 8;
6048 this_sd->avg_scan_cost += delta;
6049
6050 return cpu;
6051}
6052
6053/*
6054 * Try and locate an idle core/thread in the LLC cache domain.
a50bde51 6055 */
772bd008 6056static int select_idle_sibling(struct task_struct *p, int prev, int target)
a50bde51 6057{
99bd5e2f 6058 struct sched_domain *sd;
32e839dd 6059 int i, recent_used_cpu;
a50bde51 6060
943d355d 6061 if (available_idle_cpu(target))
e0a79f52 6062 return target;
99bd5e2f
SS
6063
6064 /*
97fb7a0a 6065 * If the previous CPU is cache affine and idle, don't be stupid:
99bd5e2f 6066 */
943d355d 6067 if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev))
772bd008 6068 return prev;
a50bde51 6069
97fb7a0a 6070 /* Check a recently used CPU as a potential idle candidate: */
32e839dd
MG
6071 recent_used_cpu = p->recent_used_cpu;
6072 if (recent_used_cpu != prev &&
6073 recent_used_cpu != target &&
6074 cpus_share_cache(recent_used_cpu, target) &&
943d355d 6075 available_idle_cpu(recent_used_cpu) &&
32e839dd
MG
6076 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
6077 /*
6078 * Replace recent_used_cpu with prev as it is a potential
97fb7a0a 6079 * candidate for the next wake:
32e839dd
MG
6080 */
6081 p->recent_used_cpu = prev;
6082 return recent_used_cpu;
6083 }
6084
518cd623 6085 sd = rcu_dereference(per_cpu(sd_llc, target));
10e2f1ac
PZ
6086 if (!sd)
6087 return target;
772bd008 6088
10e2f1ac
PZ
6089 i = select_idle_core(p, sd, target);
6090 if ((unsigned)i < nr_cpumask_bits)
6091 return i;
37407ea7 6092
10e2f1ac
PZ
6093 i = select_idle_cpu(p, sd, target);
6094 if ((unsigned)i < nr_cpumask_bits)
6095 return i;
6096
6097 i = select_idle_smt(p, sd, target);
6098 if ((unsigned)i < nr_cpumask_bits)
6099 return i;
970e1789 6100
a50bde51
PZ
6101 return target;
6102}
231678b7 6103
f9be3e59
PB
6104/**
6105 * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
6106 * @cpu: the CPU to get the utilization of
6107 *
6108 * The unit of the return value must be the one of capacity so we can compare
6109 * the utilization with the capacity of the CPU that is available for CFS task
6110 * (ie cpu_capacity).
231678b7
DE
6111 *
6112 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
6113 * recent utilization of currently non-runnable tasks on a CPU. It represents
6114 * the amount of utilization of a CPU in the range [0..capacity_orig] where
6115 * capacity_orig is the cpu_capacity available at the highest frequency
6116 * (arch_scale_freq_capacity()).
6117 * The utilization of a CPU converges towards a sum equal to or less than the
6118 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
6119 * the running time on this CPU scaled by capacity_curr.
6120 *
f9be3e59
PB
6121 * The estimated utilization of a CPU is defined to be the maximum between its
6122 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
6123 * currently RUNNABLE on that CPU.
6124 * This allows to properly represent the expected utilization of a CPU which
6125 * has just got a big task running since a long sleep period. At the same time
6126 * however it preserves the benefits of the "blocked utilization" in
6127 * describing the potential for other tasks waking up on the same CPU.
6128 *
231678b7
DE
6129 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
6130 * higher than capacity_orig because of unfortunate rounding in
6131 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
6132 * the average stabilizes with the new running time. We need to check that the
6133 * utilization stays within the range of [0..capacity_orig] and cap it if
6134 * necessary. Without utilization capping, a group could be seen as overloaded
6135 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
6136 * available capacity. We allow utilization to overshoot capacity_curr (but not
6137 * capacity_orig) as it useful for predicting the capacity required after task
6138 * migrations (scheduler-driven DVFS).
f9be3e59
PB
6139 *
6140 * Return: the (estimated) utilization for the specified CPU
8bb5b00c 6141 */
f9be3e59 6142static inline unsigned long cpu_util(int cpu)
8bb5b00c 6143{
f9be3e59
PB
6144 struct cfs_rq *cfs_rq;
6145 unsigned int util;
6146
6147 cfs_rq = &cpu_rq(cpu)->cfs;
6148 util = READ_ONCE(cfs_rq->avg.util_avg);
6149
6150 if (sched_feat(UTIL_EST))
6151 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
8bb5b00c 6152
f9be3e59 6153 return min_t(unsigned long, util, capacity_orig_of(cpu));
8bb5b00c 6154}
a50bde51 6155
104cb16d 6156/*
97fb7a0a 6157 * cpu_util_wake: Compute CPU utilization with any contributions from
104cb16d
MR
6158 * the waking task p removed.
6159 */
f01415fd 6160static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
104cb16d 6161{
f9be3e59
PB
6162 struct cfs_rq *cfs_rq;
6163 unsigned int util;
104cb16d
MR
6164
6165 /* Task has no contribution or is new */
f9be3e59 6166 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
104cb16d
MR
6167 return cpu_util(cpu);
6168
f9be3e59
PB
6169 cfs_rq = &cpu_rq(cpu)->cfs;
6170 util = READ_ONCE(cfs_rq->avg.util_avg);
6171
6172 /* Discount task's blocked util from CPU's util */
6173 util -= min_t(unsigned int, util, task_util(p));
104cb16d 6174
f9be3e59
PB
6175 /*
6176 * Covered cases:
6177 *
6178 * a) if *p is the only task sleeping on this CPU, then:
6179 * cpu_util (== task_util) > util_est (== 0)
6180 * and thus we return:
6181 * cpu_util_wake = (cpu_util - task_util) = 0
6182 *
6183 * b) if other tasks are SLEEPING on this CPU, which is now exiting
6184 * IDLE, then:
6185 * cpu_util >= task_util
6186 * cpu_util > util_est (== 0)
6187 * and thus we discount *p's blocked utilization to return:
6188 * cpu_util_wake = (cpu_util - task_util) >= 0
6189 *
6190 * c) if other tasks are RUNNABLE on that CPU and
6191 * util_est > cpu_util
6192 * then we use util_est since it returns a more restrictive
6193 * estimation of the spare capacity on that CPU, by just
6194 * considering the expected utilization of tasks already
6195 * runnable on that CPU.
6196 *
6197 * Cases a) and b) are covered by the above code, while case c) is
6198 * covered by the following code when estimated utilization is
6199 * enabled.
6200 */
6201 if (sched_feat(UTIL_EST))
6202 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
6203
6204 /*
6205 * Utilization (estimated) can exceed the CPU capacity, thus let's
6206 * clamp to the maximum CPU capacity to ensure consistency with
6207 * the cpu_util call.
6208 */
6209 return min_t(unsigned long, util, capacity_orig_of(cpu));
104cb16d
MR
6210}
6211
3273163c
MR
6212/*
6213 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
6214 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
6215 *
6216 * In that case WAKE_AFFINE doesn't make sense and we'll let
6217 * BALANCE_WAKE sort things out.
6218 */
6219static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
6220{
6221 long min_cap, max_cap;
6222
6223 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
6224 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
6225
6226 /* Minimum capacity is close to max, no need to abort wake_affine */
6227 if (max_cap - min_cap < max_cap >> 3)
6228 return 0;
6229
104cb16d
MR
6230 /* Bring task utilization in sync with prev_cpu */
6231 sync_entity_load_avg(&p->se);
6232
3273163c
MR
6233 return min_cap * 1024 < task_util(p) * capacity_margin;
6234}
6235
aaee1203 6236/*
de91b9cb
MR
6237 * select_task_rq_fair: Select target runqueue for the waking task in domains
6238 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
6239 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
aaee1203 6240 *
97fb7a0a
IM
6241 * Balances load by selecting the idlest CPU in the idlest group, or under
6242 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
aaee1203 6243 *
97fb7a0a 6244 * Returns the target CPU number.
aaee1203
PZ
6245 *
6246 * preempt must be disabled.
6247 */
0017d735 6248static int
ac66f547 6249select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
aaee1203 6250{
f1d88b44 6251 struct sched_domain *tmp, *sd = NULL;
c88d5910 6252 int cpu = smp_processor_id();
63b0e9ed 6253 int new_cpu = prev_cpu;
99bd5e2f 6254 int want_affine = 0;
24d0c1d6 6255 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
c88d5910 6256
c58d25f3
PZ
6257 if (sd_flag & SD_BALANCE_WAKE) {
6258 record_wakee(p);
3273163c 6259 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
0c98d344 6260 && cpumask_test_cpu(cpu, &p->cpus_allowed);
c58d25f3 6261 }
aaee1203 6262
dce840a0 6263 rcu_read_lock();
aaee1203 6264 for_each_domain(cpu, tmp) {
e4f42888 6265 if (!(tmp->flags & SD_LOAD_BALANCE))
63b0e9ed 6266 break;
e4f42888 6267
fe3bcfe1 6268 /*
97fb7a0a 6269 * If both 'cpu' and 'prev_cpu' are part of this domain,
99bd5e2f 6270 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 6271 */
99bd5e2f
SS
6272 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
6273 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
f1d88b44
VK
6274 if (cpu != prev_cpu)
6275 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
6276
6277 sd = NULL; /* Prefer wake_affine over balance flags */
29cd8bae 6278 break;
f03542a7 6279 }
29cd8bae 6280
f03542a7 6281 if (tmp->flags & sd_flag)
29cd8bae 6282 sd = tmp;
63b0e9ed
MG
6283 else if (!want_affine)
6284 break;
29cd8bae
PZ
6285 }
6286
f1d88b44
VK
6287 if (unlikely(sd)) {
6288 /* Slow path */
18bd1b4b 6289 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
f1d88b44
VK
6290 } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
6291 /* Fast path */
6292
6293 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6294
6295 if (want_affine)
6296 current->recent_used_cpu = cpu;
e7693a36 6297 }
dce840a0 6298 rcu_read_unlock();
e7693a36 6299
c88d5910 6300 return new_cpu;
e7693a36 6301}
0a74bef8 6302
144d8487
PZ
6303static void detach_entity_cfs_rq(struct sched_entity *se);
6304
0a74bef8 6305/*
97fb7a0a 6306 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
0a74bef8 6307 * cfs_rq_of(p) references at time of call are still valid and identify the
97fb7a0a 6308 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
0a74bef8 6309 */
5a4fd036 6310static void migrate_task_rq_fair(struct task_struct *p)
0a74bef8 6311{
59efa0ba
PZ
6312 /*
6313 * As blocked tasks retain absolute vruntime the migration needs to
6314 * deal with this by subtracting the old and adding the new
6315 * min_vruntime -- the latter is done by enqueue_entity() when placing
6316 * the task on the new runqueue.
6317 */
6318 if (p->state == TASK_WAKING) {
6319 struct sched_entity *se = &p->se;
6320 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6321 u64 min_vruntime;
6322
6323#ifndef CONFIG_64BIT
6324 u64 min_vruntime_copy;
6325
6326 do {
6327 min_vruntime_copy = cfs_rq->min_vruntime_copy;
6328 smp_rmb();
6329 min_vruntime = cfs_rq->min_vruntime;
6330 } while (min_vruntime != min_vruntime_copy);
6331#else
6332 min_vruntime = cfs_rq->min_vruntime;
6333#endif
6334
6335 se->vruntime -= min_vruntime;
6336 }
6337
144d8487
PZ
6338 if (p->on_rq == TASK_ON_RQ_MIGRATING) {
6339 /*
6340 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
6341 * rq->lock and can modify state directly.
6342 */
6343 lockdep_assert_held(&task_rq(p)->lock);
6344 detach_entity_cfs_rq(&p->se);
6345
6346 } else {
6347 /*
6348 * We are supposed to update the task to "current" time, then
6349 * its up to date and ready to go to new CPU/cfs_rq. But we
6350 * have difficulty in getting what current time is, so simply
6351 * throw away the out-of-date time. This will result in the
6352 * wakee task is less decayed, but giving the wakee more load
6353 * sounds not bad.
6354 */
6355 remove_entity_load_avg(&p->se);
6356 }
9d89c257
YD
6357
6358 /* Tell new CPU we are migrated */
6359 p->se.avg.last_update_time = 0;
3944a927
BS
6360
6361 /* We have migrated, no longer consider this task hot */
9d89c257 6362 p->se.exec_start = 0;
0a74bef8 6363}
12695578
YD
6364
6365static void task_dead_fair(struct task_struct *p)
6366{
6367 remove_entity_load_avg(&p->se);
6368}
e7693a36
GH
6369#endif /* CONFIG_SMP */
6370
a555e9d8 6371static unsigned long wakeup_gran(struct sched_entity *se)
0bbd3336
PZ
6372{
6373 unsigned long gran = sysctl_sched_wakeup_granularity;
6374
6375 /*
e52fb7c0
PZ
6376 * Since its curr running now, convert the gran from real-time
6377 * to virtual-time in his units.
13814d42
MG
6378 *
6379 * By using 'se' instead of 'curr' we penalize light tasks, so
6380 * they get preempted easier. That is, if 'se' < 'curr' then
6381 * the resulting gran will be larger, therefore penalizing the
6382 * lighter, if otoh 'se' > 'curr' then the resulting gran will
6383 * be smaller, again penalizing the lighter task.
6384 *
6385 * This is especially important for buddies when the leftmost
6386 * task is higher priority than the buddy.
0bbd3336 6387 */
f4ad9bd2 6388 return calc_delta_fair(gran, se);
0bbd3336
PZ
6389}
6390
464b7527
PZ
6391/*
6392 * Should 'se' preempt 'curr'.
6393 *
6394 * |s1
6395 * |s2
6396 * |s3
6397 * g
6398 * |<--->|c
6399 *
6400 * w(c, s1) = -1
6401 * w(c, s2) = 0
6402 * w(c, s3) = 1
6403 *
6404 */
6405static int
6406wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
6407{
6408 s64 gran, vdiff = curr->vruntime - se->vruntime;
6409
6410 if (vdiff <= 0)
6411 return -1;
6412
a555e9d8 6413 gran = wakeup_gran(se);
464b7527
PZ
6414 if (vdiff > gran)
6415 return 1;
6416
6417 return 0;
6418}
6419
02479099
PZ
6420static void set_last_buddy(struct sched_entity *se)
6421{
69c80f3e
VP
6422 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6423 return;
6424
c5ae366e
DA
6425 for_each_sched_entity(se) {
6426 if (SCHED_WARN_ON(!se->on_rq))
6427 return;
69c80f3e 6428 cfs_rq_of(se)->last = se;
c5ae366e 6429 }
02479099
PZ
6430}
6431
6432static void set_next_buddy(struct sched_entity *se)
6433{
69c80f3e
VP
6434 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6435 return;
6436
c5ae366e
DA
6437 for_each_sched_entity(se) {
6438 if (SCHED_WARN_ON(!se->on_rq))
6439 return;
69c80f3e 6440 cfs_rq_of(se)->next = se;
c5ae366e 6441 }
02479099
PZ
6442}
6443
ac53db59
RR
6444static void set_skip_buddy(struct sched_entity *se)
6445{
69c80f3e
VP
6446 for_each_sched_entity(se)
6447 cfs_rq_of(se)->skip = se;
ac53db59
RR
6448}
6449
bf0f6f24
IM
6450/*
6451 * Preempt the current task with a newly woken task if needed:
6452 */
5a9b86f6 6453static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
6454{
6455 struct task_struct *curr = rq->curr;
8651a86c 6456 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 6457 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 6458 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 6459 int next_buddy_marked = 0;
bf0f6f24 6460
4ae7d5ce
IM
6461 if (unlikely(se == pse))
6462 return;
6463
5238cdd3 6464 /*
163122b7 6465 * This is possible from callers such as attach_tasks(), in which we
5238cdd3
PT
6466 * unconditionally check_prempt_curr() after an enqueue (which may have
6467 * lead to a throttle). This both saves work and prevents false
6468 * next-buddy nomination below.
6469 */
6470 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
6471 return;
6472
2f36825b 6473 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 6474 set_next_buddy(pse);
2f36825b
VP
6475 next_buddy_marked = 1;
6476 }
57fdc26d 6477
aec0a514
BR
6478 /*
6479 * We can come here with TIF_NEED_RESCHED already set from new task
6480 * wake up path.
5238cdd3
PT
6481 *
6482 * Note: this also catches the edge-case of curr being in a throttled
6483 * group (e.g. via set_curr_task), since update_curr() (in the
6484 * enqueue of curr) will have resulted in resched being set. This
6485 * prevents us from potentially nominating it as a false LAST_BUDDY
6486 * below.
aec0a514
BR
6487 */
6488 if (test_tsk_need_resched(curr))
6489 return;
6490
a2f5c9ab
DH
6491 /* Idle tasks are by definition preempted by non-idle tasks. */
6492 if (unlikely(curr->policy == SCHED_IDLE) &&
6493 likely(p->policy != SCHED_IDLE))
6494 goto preempt;
6495
91c234b4 6496 /*
a2f5c9ab
DH
6497 * Batch and idle tasks do not preempt non-idle tasks (their preemption
6498 * is driven by the tick):
91c234b4 6499 */
8ed92e51 6500 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 6501 return;
bf0f6f24 6502
464b7527 6503 find_matching_se(&se, &pse);
9bbd7374 6504 update_curr(cfs_rq_of(se));
002f128b 6505 BUG_ON(!pse);
2f36825b
VP
6506 if (wakeup_preempt_entity(se, pse) == 1) {
6507 /*
6508 * Bias pick_next to pick the sched entity that is
6509 * triggering this preemption.
6510 */
6511 if (!next_buddy_marked)
6512 set_next_buddy(pse);
3a7e73a2 6513 goto preempt;
2f36825b 6514 }
464b7527 6515
3a7e73a2 6516 return;
a65ac745 6517
3a7e73a2 6518preempt:
8875125e 6519 resched_curr(rq);
3a7e73a2
PZ
6520 /*
6521 * Only set the backward buddy when the current task is still
6522 * on the rq. This can happen when a wakeup gets interleaved
6523 * with schedule on the ->pre_schedule() or idle_balance()
6524 * point, either of which can * drop the rq lock.
6525 *
6526 * Also, during early boot the idle thread is in the fair class,
6527 * for obvious reasons its a bad idea to schedule back to it.
6528 */
6529 if (unlikely(!se->on_rq || curr == rq->idle))
6530 return;
6531
6532 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
6533 set_last_buddy(se);
bf0f6f24
IM
6534}
6535
606dba2e 6536static struct task_struct *
d8ac8971 6537pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
bf0f6f24
IM
6538{
6539 struct cfs_rq *cfs_rq = &rq->cfs;
6540 struct sched_entity *se;
678d5718 6541 struct task_struct *p;
37e117c0 6542 int new_tasks;
678d5718 6543
6e83125c 6544again:
678d5718 6545 if (!cfs_rq->nr_running)
38033c37 6546 goto idle;
678d5718 6547
9674f5ca 6548#ifdef CONFIG_FAIR_GROUP_SCHED
3f1d2a31 6549 if (prev->sched_class != &fair_sched_class)
678d5718
PZ
6550 goto simple;
6551
6552 /*
6553 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
6554 * likely that a next task is from the same cgroup as the current.
6555 *
6556 * Therefore attempt to avoid putting and setting the entire cgroup
6557 * hierarchy, only change the part that actually changes.
6558 */
6559
6560 do {
6561 struct sched_entity *curr = cfs_rq->curr;
6562
6563 /*
6564 * Since we got here without doing put_prev_entity() we also
6565 * have to consider cfs_rq->curr. If it is still a runnable
6566 * entity, update_curr() will update its vruntime, otherwise
6567 * forget we've ever seen it.
6568 */
54d27365
BS
6569 if (curr) {
6570 if (curr->on_rq)
6571 update_curr(cfs_rq);
6572 else
6573 curr = NULL;
678d5718 6574
54d27365
BS
6575 /*
6576 * This call to check_cfs_rq_runtime() will do the
6577 * throttle and dequeue its entity in the parent(s).
9674f5ca 6578 * Therefore the nr_running test will indeed
54d27365
BS
6579 * be correct.
6580 */
9674f5ca
VK
6581 if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
6582 cfs_rq = &rq->cfs;
6583
6584 if (!cfs_rq->nr_running)
6585 goto idle;
6586
54d27365 6587 goto simple;
9674f5ca 6588 }
54d27365 6589 }
678d5718
PZ
6590
6591 se = pick_next_entity(cfs_rq, curr);
6592 cfs_rq = group_cfs_rq(se);
6593 } while (cfs_rq);
6594
6595 p = task_of(se);
6596
6597 /*
6598 * Since we haven't yet done put_prev_entity and if the selected task
6599 * is a different task than we started out with, try and touch the
6600 * least amount of cfs_rqs.
6601 */
6602 if (prev != p) {
6603 struct sched_entity *pse = &prev->se;
6604
6605 while (!(cfs_rq = is_same_group(se, pse))) {
6606 int se_depth = se->depth;
6607 int pse_depth = pse->depth;
6608
6609 if (se_depth <= pse_depth) {
6610 put_prev_entity(cfs_rq_of(pse), pse);
6611 pse = parent_entity(pse);
6612 }
6613 if (se_depth >= pse_depth) {
6614 set_next_entity(cfs_rq_of(se), se);
6615 se = parent_entity(se);
6616 }
6617 }
6618
6619 put_prev_entity(cfs_rq, pse);
6620 set_next_entity(cfs_rq, se);
6621 }
6622
93824900 6623 goto done;
678d5718 6624simple:
678d5718 6625#endif
bf0f6f24 6626
3f1d2a31 6627 put_prev_task(rq, prev);
606dba2e 6628
bf0f6f24 6629 do {
678d5718 6630 se = pick_next_entity(cfs_rq, NULL);
f4b6755f 6631 set_next_entity(cfs_rq, se);
bf0f6f24
IM
6632 cfs_rq = group_cfs_rq(se);
6633 } while (cfs_rq);
6634
8f4d37ec 6635 p = task_of(se);
678d5718 6636
13a453c2 6637done: __maybe_unused;
93824900
UR
6638#ifdef CONFIG_SMP
6639 /*
6640 * Move the next running task to the front of
6641 * the list, so our cfs_tasks list becomes MRU
6642 * one.
6643 */
6644 list_move(&p->se.group_node, &rq->cfs_tasks);
6645#endif
6646
b39e66ea
MG
6647 if (hrtick_enabled(rq))
6648 hrtick_start_fair(rq, p);
8f4d37ec
PZ
6649
6650 return p;
38033c37
PZ
6651
6652idle:
46f69fa3
MF
6653 new_tasks = idle_balance(rq, rf);
6654
37e117c0
PZ
6655 /*
6656 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6657 * possible for any higher priority task to appear. In that case we
6658 * must re-start the pick_next_entity() loop.
6659 */
e4aa358b 6660 if (new_tasks < 0)
37e117c0
PZ
6661 return RETRY_TASK;
6662
e4aa358b 6663 if (new_tasks > 0)
38033c37 6664 goto again;
38033c37
PZ
6665
6666 return NULL;
bf0f6f24
IM
6667}
6668
6669/*
6670 * Account for a descheduled task:
6671 */
31ee529c 6672static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
6673{
6674 struct sched_entity *se = &prev->se;
6675 struct cfs_rq *cfs_rq;
6676
6677 for_each_sched_entity(se) {
6678 cfs_rq = cfs_rq_of(se);
ab6cde26 6679 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
6680 }
6681}
6682
ac53db59
RR
6683/*
6684 * sched_yield() is very simple
6685 *
6686 * The magic of dealing with the ->skip buddy is in pick_next_entity.
6687 */
6688static void yield_task_fair(struct rq *rq)
6689{
6690 struct task_struct *curr = rq->curr;
6691 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6692 struct sched_entity *se = &curr->se;
6693
6694 /*
6695 * Are we the only task in the tree?
6696 */
6697 if (unlikely(rq->nr_running == 1))
6698 return;
6699
6700 clear_buddies(cfs_rq, se);
6701
6702 if (curr->policy != SCHED_BATCH) {
6703 update_rq_clock(rq);
6704 /*
6705 * Update run-time statistics of the 'current'.
6706 */
6707 update_curr(cfs_rq);
916671c0
MG
6708 /*
6709 * Tell update_rq_clock() that we've just updated,
6710 * so we don't do microscopic update in schedule()
6711 * and double the fastpath cost.
6712 */
adcc8da8 6713 rq_clock_skip_update(rq);
ac53db59
RR
6714 }
6715
6716 set_skip_buddy(se);
6717}
6718
d95f4122
MG
6719static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6720{
6721 struct sched_entity *se = &p->se;
6722
5238cdd3
PT
6723 /* throttled hierarchies are not runnable */
6724 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
6725 return false;
6726
6727 /* Tell the scheduler that we'd really like pse to run next. */
6728 set_next_buddy(se);
6729
d95f4122
MG
6730 yield_task_fair(rq);
6731
6732 return true;
6733}
6734
681f3e68 6735#ifdef CONFIG_SMP
bf0f6f24 6736/**************************************************
e9c84cb8
PZ
6737 * Fair scheduling class load-balancing methods.
6738 *
6739 * BASICS
6740 *
6741 * The purpose of load-balancing is to achieve the same basic fairness the
97fb7a0a 6742 * per-CPU scheduler provides, namely provide a proportional amount of compute
e9c84cb8
PZ
6743 * time to each task. This is expressed in the following equation:
6744 *
6745 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
6746 *
97fb7a0a 6747 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
e9c84cb8
PZ
6748 * W_i,0 is defined as:
6749 *
6750 * W_i,0 = \Sum_j w_i,j (2)
6751 *
97fb7a0a 6752 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
1c3de5e1 6753 * is derived from the nice value as per sched_prio_to_weight[].
e9c84cb8
PZ
6754 *
6755 * The weight average is an exponential decay average of the instantaneous
6756 * weight:
6757 *
6758 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
6759 *
97fb7a0a 6760 * C_i is the compute capacity of CPU i, typically it is the
e9c84cb8
PZ
6761 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6762 * can also include other factors [XXX].
6763 *
6764 * To achieve this balance we define a measure of imbalance which follows
6765 * directly from (1):
6766 *
ced549fa 6767 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
e9c84cb8
PZ
6768 *
6769 * We them move tasks around to minimize the imbalance. In the continuous
6770 * function space it is obvious this converges, in the discrete case we get
6771 * a few fun cases generally called infeasible weight scenarios.
6772 *
6773 * [XXX expand on:
6774 * - infeasible weights;
6775 * - local vs global optima in the discrete case. ]
6776 *
6777 *
6778 * SCHED DOMAINS
6779 *
6780 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
97fb7a0a 6781 * for all i,j solution, we create a tree of CPUs that follows the hardware
e9c84cb8 6782 * topology where each level pairs two lower groups (or better). This results
97fb7a0a 6783 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
e9c84cb8 6784 * tree to only the first of the previous level and we decrease the frequency
97fb7a0a 6785 * of load-balance at each level inv. proportional to the number of CPUs in
e9c84cb8
PZ
6786 * the groups.
6787 *
6788 * This yields:
6789 *
6790 * log_2 n 1 n
6791 * \Sum { --- * --- * 2^i } = O(n) (5)
6792 * i = 0 2^i 2^i
6793 * `- size of each group
97fb7a0a 6794 * | | `- number of CPUs doing load-balance
e9c84cb8
PZ
6795 * | `- freq
6796 * `- sum over all levels
6797 *
6798 * Coupled with a limit on how many tasks we can migrate every balance pass,
6799 * this makes (5) the runtime complexity of the balancer.
6800 *
6801 * An important property here is that each CPU is still (indirectly) connected
97fb7a0a 6802 * to every other CPU in at most O(log n) steps:
e9c84cb8
PZ
6803 *
6804 * The adjacency matrix of the resulting graph is given by:
6805 *
97a7142f 6806 * log_2 n
e9c84cb8
PZ
6807 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
6808 * k = 0
6809 *
6810 * And you'll find that:
6811 *
6812 * A^(log_2 n)_i,j != 0 for all i,j (7)
6813 *
97fb7a0a 6814 * Showing there's indeed a path between every CPU in at most O(log n) steps.
e9c84cb8
PZ
6815 * The task movement gives a factor of O(m), giving a convergence complexity
6816 * of:
6817 *
6818 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
6819 *
6820 *
6821 * WORK CONSERVING
6822 *
6823 * In order to avoid CPUs going idle while there's still work to do, new idle
97fb7a0a 6824 * balancing is more aggressive and has the newly idle CPU iterate up the domain
e9c84cb8
PZ
6825 * tree itself instead of relying on other CPUs to bring it work.
6826 *
6827 * This adds some complexity to both (5) and (8) but it reduces the total idle
6828 * time.
6829 *
6830 * [XXX more?]
6831 *
6832 *
6833 * CGROUPS
6834 *
6835 * Cgroups make a horror show out of (2), instead of a simple sum we get:
6836 *
6837 * s_k,i
6838 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
6839 * S_k
6840 *
6841 * Where
6842 *
6843 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
6844 *
97fb7a0a 6845 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
e9c84cb8
PZ
6846 *
6847 * The big problem is S_k, its a global sum needed to compute a local (W_i)
6848 * property.
6849 *
6850 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6851 * rewrite all of this once again.]
97a7142f 6852 */
bf0f6f24 6853
ed387b78
HS
6854static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6855
0ec8aa00
PZ
6856enum fbq_type { regular, remote, all };
6857
ddcdf6e7 6858#define LBF_ALL_PINNED 0x01
367456c7 6859#define LBF_NEED_BREAK 0x02
6263322c
PZ
6860#define LBF_DST_PINNED 0x04
6861#define LBF_SOME_PINNED 0x08
e022e0d3 6862#define LBF_NOHZ_STATS 0x10
f643ea22 6863#define LBF_NOHZ_AGAIN 0x20
ddcdf6e7
PZ
6864
6865struct lb_env {
6866 struct sched_domain *sd;
6867
ddcdf6e7 6868 struct rq *src_rq;
85c1e7da 6869 int src_cpu;
ddcdf6e7
PZ
6870
6871 int dst_cpu;
6872 struct rq *dst_rq;
6873
88b8dac0
SV
6874 struct cpumask *dst_grpmask;
6875 int new_dst_cpu;
ddcdf6e7 6876 enum cpu_idle_type idle;
bd939f45 6877 long imbalance;
b9403130
MW
6878 /* The set of CPUs under consideration for load-balancing */
6879 struct cpumask *cpus;
6880
ddcdf6e7 6881 unsigned int flags;
367456c7
PZ
6882
6883 unsigned int loop;
6884 unsigned int loop_break;
6885 unsigned int loop_max;
0ec8aa00
PZ
6886
6887 enum fbq_type fbq_type;
163122b7 6888 struct list_head tasks;
ddcdf6e7
PZ
6889};
6890
029632fb
PZ
6891/*
6892 * Is this task likely cache-hot:
6893 */
5d5e2b1b 6894static int task_hot(struct task_struct *p, struct lb_env *env)
029632fb
PZ
6895{
6896 s64 delta;
6897
e5673f28
KT
6898 lockdep_assert_held(&env->src_rq->lock);
6899
029632fb
PZ
6900 if (p->sched_class != &fair_sched_class)
6901 return 0;
6902
6903 if (unlikely(p->policy == SCHED_IDLE))
6904 return 0;
6905
6906 /*
6907 * Buddy candidates are cache hot:
6908 */
5d5e2b1b 6909 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
029632fb
PZ
6910 (&p->se == cfs_rq_of(&p->se)->next ||
6911 &p->se == cfs_rq_of(&p->se)->last))
6912 return 1;
6913
6914 if (sysctl_sched_migration_cost == -1)
6915 return 1;
6916 if (sysctl_sched_migration_cost == 0)
6917 return 0;
6918
5d5e2b1b 6919 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
029632fb
PZ
6920
6921 return delta < (s64)sysctl_sched_migration_cost;
6922}
6923
3a7053b3 6924#ifdef CONFIG_NUMA_BALANCING
c1ceac62 6925/*
2a1ed24c
SD
6926 * Returns 1, if task migration degrades locality
6927 * Returns 0, if task migration improves locality i.e migration preferred.
6928 * Returns -1, if task migration is not affected by locality.
c1ceac62 6929 */
2a1ed24c 6930static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
3a7053b3 6931{
b1ad065e 6932 struct numa_group *numa_group = rcu_dereference(p->numa_group);
c1ceac62 6933 unsigned long src_faults, dst_faults;
3a7053b3
MG
6934 int src_nid, dst_nid;
6935
2a595721 6936 if (!static_branch_likely(&sched_numa_balancing))
2a1ed24c
SD
6937 return -1;
6938
c3b9bc5b 6939 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
2a1ed24c 6940 return -1;
7a0f3083
MG
6941
6942 src_nid = cpu_to_node(env->src_cpu);
6943 dst_nid = cpu_to_node(env->dst_cpu);
6944
83e1d2cd 6945 if (src_nid == dst_nid)
2a1ed24c 6946 return -1;
7a0f3083 6947
2a1ed24c
SD
6948 /* Migrating away from the preferred node is always bad. */
6949 if (src_nid == p->numa_preferred_nid) {
6950 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6951 return 1;
6952 else
6953 return -1;
6954 }
b1ad065e 6955
c1ceac62
RR
6956 /* Encourage migration to the preferred node. */
6957 if (dst_nid == p->numa_preferred_nid)
2a1ed24c 6958 return 0;
b1ad065e 6959
739294fb
RR
6960 /* Leaving a core idle is often worse than degrading locality. */
6961 if (env->idle != CPU_NOT_IDLE)
6962 return -1;
6963
c1ceac62
RR
6964 if (numa_group) {
6965 src_faults = group_faults(p, src_nid);
6966 dst_faults = group_faults(p, dst_nid);
6967 } else {
6968 src_faults = task_faults(p, src_nid);
6969 dst_faults = task_faults(p, dst_nid);
b1ad065e
RR
6970 }
6971
c1ceac62 6972 return dst_faults < src_faults;
7a0f3083
MG
6973}
6974
3a7053b3 6975#else
2a1ed24c 6976static inline int migrate_degrades_locality(struct task_struct *p,
3a7053b3
MG
6977 struct lb_env *env)
6978{
2a1ed24c 6979 return -1;
7a0f3083 6980}
3a7053b3
MG
6981#endif
6982
1e3c88bd
PZ
6983/*
6984 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6985 */
6986static
8e45cb54 6987int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 6988{
2a1ed24c 6989 int tsk_cache_hot;
e5673f28
KT
6990
6991 lockdep_assert_held(&env->src_rq->lock);
6992
1e3c88bd
PZ
6993 /*
6994 * We do not migrate tasks that are:
d3198084 6995 * 1) throttled_lb_pair, or
1e3c88bd 6996 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
6997 * 3) running (obviously), or
6998 * 4) are cache-hot on their current CPU.
1e3c88bd 6999 */
d3198084
JK
7000 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
7001 return 0;
7002
0c98d344 7003 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
e02e60c1 7004 int cpu;
88b8dac0 7005
ae92882e 7006 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
88b8dac0 7007
6263322c
PZ
7008 env->flags |= LBF_SOME_PINNED;
7009
88b8dac0 7010 /*
97fb7a0a 7011 * Remember if this task can be migrated to any other CPU in
88b8dac0
SV
7012 * our sched_group. We may want to revisit it if we couldn't
7013 * meet load balance goals by pulling other tasks on src_cpu.
7014 *
65a4433a
JH
7015 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
7016 * already computed one in current iteration.
88b8dac0 7017 */
65a4433a 7018 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
7019 return 0;
7020
97fb7a0a 7021 /* Prevent to re-select dst_cpu via env's CPUs: */
e02e60c1 7022 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
0c98d344 7023 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
6263322c 7024 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
7025 env->new_dst_cpu = cpu;
7026 break;
7027 }
88b8dac0 7028 }
e02e60c1 7029
1e3c88bd
PZ
7030 return 0;
7031 }
88b8dac0
SV
7032
7033 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 7034 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 7035
ddcdf6e7 7036 if (task_running(env->src_rq, p)) {
ae92882e 7037 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
7038 return 0;
7039 }
7040
7041 /*
7042 * Aggressive migration if:
3a7053b3
MG
7043 * 1) destination numa is preferred
7044 * 2) task is cache cold, or
7045 * 3) too many balance attempts have failed.
1e3c88bd 7046 */
2a1ed24c
SD
7047 tsk_cache_hot = migrate_degrades_locality(p, env);
7048 if (tsk_cache_hot == -1)
7049 tsk_cache_hot = task_hot(p, env);
3a7053b3 7050
2a1ed24c 7051 if (tsk_cache_hot <= 0 ||
7a96c231 7052 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
2a1ed24c 7053 if (tsk_cache_hot == 1) {
ae92882e
JP
7054 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
7055 schedstat_inc(p->se.statistics.nr_forced_migrations);
3a7053b3 7056 }
1e3c88bd
PZ
7057 return 1;
7058 }
7059
ae92882e 7060 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
4e2dcb73 7061 return 0;
1e3c88bd
PZ
7062}
7063
897c395f 7064/*
163122b7
KT
7065 * detach_task() -- detach the task for the migration specified in env
7066 */
7067static void detach_task(struct task_struct *p, struct lb_env *env)
7068{
7069 lockdep_assert_held(&env->src_rq->lock);
7070
163122b7 7071 p->on_rq = TASK_ON_RQ_MIGRATING;
5704ac0a 7072 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
163122b7
KT
7073 set_task_cpu(p, env->dst_cpu);
7074}
7075
897c395f 7076/*
e5673f28 7077 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
897c395f 7078 * part of active balancing operations within "domain".
897c395f 7079 *
e5673f28 7080 * Returns a task if successful and NULL otherwise.
897c395f 7081 */
e5673f28 7082static struct task_struct *detach_one_task(struct lb_env *env)
897c395f 7083{
93824900 7084 struct task_struct *p;
897c395f 7085
e5673f28
KT
7086 lockdep_assert_held(&env->src_rq->lock);
7087
93824900
UR
7088 list_for_each_entry_reverse(p,
7089 &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
7090 if (!can_migrate_task(p, env))
7091 continue;
897c395f 7092
163122b7 7093 detach_task(p, env);
e5673f28 7094
367456c7 7095 /*
e5673f28 7096 * Right now, this is only the second place where
163122b7 7097 * lb_gained[env->idle] is updated (other is detach_tasks)
e5673f28 7098 * so we can safely collect stats here rather than
163122b7 7099 * inside detach_tasks().
367456c7 7100 */
ae92882e 7101 schedstat_inc(env->sd->lb_gained[env->idle]);
e5673f28 7102 return p;
897c395f 7103 }
e5673f28 7104 return NULL;
897c395f
PZ
7105}
7106
eb95308e
PZ
7107static const unsigned int sched_nr_migrate_break = 32;
7108
5d6523eb 7109/*
163122b7
KT
7110 * detach_tasks() -- tries to detach up to imbalance weighted load from
7111 * busiest_rq, as part of a balancing operation within domain "sd".
5d6523eb 7112 *
163122b7 7113 * Returns number of detached tasks if successful and 0 otherwise.
5d6523eb 7114 */
163122b7 7115static int detach_tasks(struct lb_env *env)
1e3c88bd 7116{
5d6523eb
PZ
7117 struct list_head *tasks = &env->src_rq->cfs_tasks;
7118 struct task_struct *p;
367456c7 7119 unsigned long load;
163122b7
KT
7120 int detached = 0;
7121
7122 lockdep_assert_held(&env->src_rq->lock);
1e3c88bd 7123
bd939f45 7124 if (env->imbalance <= 0)
5d6523eb 7125 return 0;
1e3c88bd 7126
5d6523eb 7127 while (!list_empty(tasks)) {
985d3a4c
YD
7128 /*
7129 * We don't want to steal all, otherwise we may be treated likewise,
7130 * which could at worst lead to a livelock crash.
7131 */
7132 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
7133 break;
7134
93824900 7135 p = list_last_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 7136
367456c7
PZ
7137 env->loop++;
7138 /* We've more or less seen every task there is, call it quits */
5d6523eb 7139 if (env->loop > env->loop_max)
367456c7 7140 break;
5d6523eb
PZ
7141
7142 /* take a breather every nr_migrate tasks */
367456c7 7143 if (env->loop > env->loop_break) {
eb95308e 7144 env->loop_break += sched_nr_migrate_break;
8e45cb54 7145 env->flags |= LBF_NEED_BREAK;
ee00e66f 7146 break;
a195f004 7147 }
1e3c88bd 7148
d3198084 7149 if (!can_migrate_task(p, env))
367456c7
PZ
7150 goto next;
7151
7152 load = task_h_load(p);
5d6523eb 7153
eb95308e 7154 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
7155 goto next;
7156
bd939f45 7157 if ((load / 2) > env->imbalance)
367456c7 7158 goto next;
1e3c88bd 7159
163122b7
KT
7160 detach_task(p, env);
7161 list_add(&p->se.group_node, &env->tasks);
7162
7163 detached++;
bd939f45 7164 env->imbalance -= load;
1e3c88bd
PZ
7165
7166#ifdef CONFIG_PREEMPT
ee00e66f
PZ
7167 /*
7168 * NEWIDLE balancing is a source of latency, so preemptible
163122b7 7169 * kernels will stop after the first task is detached to minimize
ee00e66f
PZ
7170 * the critical section.
7171 */
5d6523eb 7172 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 7173 break;
1e3c88bd
PZ
7174#endif
7175
ee00e66f
PZ
7176 /*
7177 * We only want to steal up to the prescribed amount of
7178 * weighted load.
7179 */
bd939f45 7180 if (env->imbalance <= 0)
ee00e66f 7181 break;
367456c7
PZ
7182
7183 continue;
7184next:
93824900 7185 list_move(&p->se.group_node, tasks);
1e3c88bd 7186 }
5d6523eb 7187
1e3c88bd 7188 /*
163122b7
KT
7189 * Right now, this is one of only two places we collect this stat
7190 * so we can safely collect detach_one_task() stats here rather
7191 * than inside detach_one_task().
1e3c88bd 7192 */
ae92882e 7193 schedstat_add(env->sd->lb_gained[env->idle], detached);
1e3c88bd 7194
163122b7
KT
7195 return detached;
7196}
7197
7198/*
7199 * attach_task() -- attach the task detached by detach_task() to its new rq.
7200 */
7201static void attach_task(struct rq *rq, struct task_struct *p)
7202{
7203 lockdep_assert_held(&rq->lock);
7204
7205 BUG_ON(task_rq(p) != rq);
5704ac0a 7206 activate_task(rq, p, ENQUEUE_NOCLOCK);
3ea94de1 7207 p->on_rq = TASK_ON_RQ_QUEUED;
163122b7
KT
7208 check_preempt_curr(rq, p, 0);
7209}
7210
7211/*
7212 * attach_one_task() -- attaches the task returned from detach_one_task() to
7213 * its new rq.
7214 */
7215static void attach_one_task(struct rq *rq, struct task_struct *p)
7216{
8a8c69c3
PZ
7217 struct rq_flags rf;
7218
7219 rq_lock(rq, &rf);
5704ac0a 7220 update_rq_clock(rq);
163122b7 7221 attach_task(rq, p);
8a8c69c3 7222 rq_unlock(rq, &rf);
163122b7
KT
7223}
7224
7225/*
7226 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
7227 * new rq.
7228 */
7229static void attach_tasks(struct lb_env *env)
7230{
7231 struct list_head *tasks = &env->tasks;
7232 struct task_struct *p;
8a8c69c3 7233 struct rq_flags rf;
163122b7 7234
8a8c69c3 7235 rq_lock(env->dst_rq, &rf);
5704ac0a 7236 update_rq_clock(env->dst_rq);
163122b7
KT
7237
7238 while (!list_empty(tasks)) {
7239 p = list_first_entry(tasks, struct task_struct, se.group_node);
7240 list_del_init(&p->se.group_node);
1e3c88bd 7241
163122b7
KT
7242 attach_task(env->dst_rq, p);
7243 }
7244
8a8c69c3 7245 rq_unlock(env->dst_rq, &rf);
1e3c88bd
PZ
7246}
7247
1936c53c
VG
7248static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
7249{
7250 if (cfs_rq->avg.load_avg)
7251 return true;
7252
7253 if (cfs_rq->avg.util_avg)
7254 return true;
7255
7256 return false;
7257}
7258
91c27493 7259static inline bool others_have_blocked(struct rq *rq)
371bf427
VG
7260{
7261 if (READ_ONCE(rq->avg_rt.util_avg))
7262 return true;
7263
3727e0e1
VG
7264 if (READ_ONCE(rq->avg_dl.util_avg))
7265 return true;
7266
91c27493
VG
7267#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
7268 if (READ_ONCE(rq->avg_irq.util_avg))
7269 return true;
7270#endif
7271
371bf427
VG
7272 return false;
7273}
7274
1936c53c
VG
7275#ifdef CONFIG_FAIR_GROUP_SCHED
7276
a9e7f654
TH
7277static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
7278{
7279 if (cfs_rq->load.weight)
7280 return false;
7281
7282 if (cfs_rq->avg.load_sum)
7283 return false;
7284
7285 if (cfs_rq->avg.util_sum)
7286 return false;
7287
1ea6c46a 7288 if (cfs_rq->avg.runnable_load_sum)
a9e7f654
TH
7289 return false;
7290
7291 return true;
7292}
7293
48a16753 7294static void update_blocked_averages(int cpu)
9e3081ca 7295{
9e3081ca 7296 struct rq *rq = cpu_rq(cpu);
a9e7f654 7297 struct cfs_rq *cfs_rq, *pos;
8a8c69c3 7298 struct rq_flags rf;
f643ea22 7299 bool done = true;
9e3081ca 7300
8a8c69c3 7301 rq_lock_irqsave(rq, &rf);
48a16753 7302 update_rq_clock(rq);
9d89c257 7303
9763b67f
PZ
7304 /*
7305 * Iterates the task_group tree in a bottom up fashion, see
7306 * list_add_leaf_cfs_rq() for details.
7307 */
a9e7f654 7308 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
bc427898
VG
7309 struct sched_entity *se;
7310
9d89c257
YD
7311 /* throttled entities do not contribute to load */
7312 if (throttled_hierarchy(cfs_rq))
7313 continue;
48a16753 7314
3a123bbb 7315 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
9d89c257 7316 update_tg_load_avg(cfs_rq, 0);
4e516076 7317
bc427898
VG
7318 /* Propagate pending load changes to the parent, if any: */
7319 se = cfs_rq->tg->se[cpu];
7320 if (se && !skip_blocked_update(se))
88c0616e 7321 update_load_avg(cfs_rq_of(se), se, 0);
a9e7f654
TH
7322
7323 /*
7324 * There can be a lot of idle CPU cgroups. Don't let fully
7325 * decayed cfs_rqs linger on the list.
7326 */
7327 if (cfs_rq_is_decayed(cfs_rq))
7328 list_del_leaf_cfs_rq(cfs_rq);
1936c53c
VG
7329
7330 /* Don't need periodic decay once load/util_avg are null */
7331 if (cfs_rq_has_blocked(cfs_rq))
f643ea22 7332 done = false;
9d89c257 7333 }
371bf427 7334 update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
3727e0e1 7335 update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
91c27493 7336 update_irq_load_avg(rq, 0);
371bf427 7337 /* Don't need periodic decay once load/util_avg are null */
91c27493 7338 if (others_have_blocked(rq))
371bf427 7339 done = false;
e022e0d3
PZ
7340
7341#ifdef CONFIG_NO_HZ_COMMON
7342 rq->last_blocked_load_update_tick = jiffies;
f643ea22
VG
7343 if (done)
7344 rq->has_blocked_load = 0;
e022e0d3 7345#endif
8a8c69c3 7346 rq_unlock_irqrestore(rq, &rf);
9e3081ca
PZ
7347}
7348
9763b67f 7349/*
68520796 7350 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
7351 * This needs to be done in a top-down fashion because the load of a child
7352 * group is a fraction of its parents load.
7353 */
68520796 7354static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 7355{
68520796
VD
7356 struct rq *rq = rq_of(cfs_rq);
7357 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 7358 unsigned long now = jiffies;
68520796 7359 unsigned long load;
a35b6466 7360
68520796 7361 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
7362 return;
7363
68520796
VD
7364 cfs_rq->h_load_next = NULL;
7365 for_each_sched_entity(se) {
7366 cfs_rq = cfs_rq_of(se);
7367 cfs_rq->h_load_next = se;
7368 if (cfs_rq->last_h_load_update == now)
7369 break;
7370 }
a35b6466 7371
68520796 7372 if (!se) {
7ea241af 7373 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
68520796
VD
7374 cfs_rq->last_h_load_update = now;
7375 }
7376
7377 while ((se = cfs_rq->h_load_next) != NULL) {
7378 load = cfs_rq->h_load;
7ea241af
YD
7379 load = div64_ul(load * se->avg.load_avg,
7380 cfs_rq_load_avg(cfs_rq) + 1);
68520796
VD
7381 cfs_rq = group_cfs_rq(se);
7382 cfs_rq->h_load = load;
7383 cfs_rq->last_h_load_update = now;
7384 }
9763b67f
PZ
7385}
7386
367456c7 7387static unsigned long task_h_load(struct task_struct *p)
230059de 7388{
367456c7 7389 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 7390
68520796 7391 update_cfs_rq_h_load(cfs_rq);
9d89c257 7392 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
7ea241af 7393 cfs_rq_load_avg(cfs_rq) + 1);
230059de
PZ
7394}
7395#else
48a16753 7396static inline void update_blocked_averages(int cpu)
9e3081ca 7397{
6c1d47c0
VG
7398 struct rq *rq = cpu_rq(cpu);
7399 struct cfs_rq *cfs_rq = &rq->cfs;
8a8c69c3 7400 struct rq_flags rf;
6c1d47c0 7401
8a8c69c3 7402 rq_lock_irqsave(rq, &rf);
6c1d47c0 7403 update_rq_clock(rq);
3a123bbb 7404 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
371bf427 7405 update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
3727e0e1 7406 update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
91c27493 7407 update_irq_load_avg(rq, 0);
e022e0d3
PZ
7408#ifdef CONFIG_NO_HZ_COMMON
7409 rq->last_blocked_load_update_tick = jiffies;
91c27493 7410 if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
f643ea22 7411 rq->has_blocked_load = 0;
e022e0d3 7412#endif
8a8c69c3 7413 rq_unlock_irqrestore(rq, &rf);
9e3081ca
PZ
7414}
7415
367456c7 7416static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 7417{
9d89c257 7418 return p->se.avg.load_avg;
1e3c88bd 7419}
230059de 7420#endif
1e3c88bd 7421
1e3c88bd 7422/********** Helpers for find_busiest_group ************************/
caeb178c
RR
7423
7424enum group_type {
7425 group_other = 0,
7426 group_imbalanced,
7427 group_overloaded,
7428};
7429
1e3c88bd
PZ
7430/*
7431 * sg_lb_stats - stats of a sched_group required for load_balancing
7432 */
7433struct sg_lb_stats {
7434 unsigned long avg_load; /*Avg load across the CPUs of the group */
7435 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 7436 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 7437 unsigned long load_per_task;
63b2ca30 7438 unsigned long group_capacity;
9e91d61d 7439 unsigned long group_util; /* Total utilization of the group */
147c5fc2 7440 unsigned int sum_nr_running; /* Nr tasks running in the group */
147c5fc2
PZ
7441 unsigned int idle_cpus;
7442 unsigned int group_weight;
caeb178c 7443 enum group_type group_type;
ea67821b 7444 int group_no_capacity;
0ec8aa00
PZ
7445#ifdef CONFIG_NUMA_BALANCING
7446 unsigned int nr_numa_running;
7447 unsigned int nr_preferred_running;
7448#endif
1e3c88bd
PZ
7449};
7450
56cf515b
JK
7451/*
7452 * sd_lb_stats - Structure to store the statistics of a sched_domain
7453 * during load balancing.
7454 */
7455struct sd_lb_stats {
7456 struct sched_group *busiest; /* Busiest group in this sd */
7457 struct sched_group *local; /* Local group in this sd */
90001d67 7458 unsigned long total_running;
56cf515b 7459 unsigned long total_load; /* Total load of all groups in sd */
63b2ca30 7460 unsigned long total_capacity; /* Total capacity of all groups in sd */
56cf515b
JK
7461 unsigned long avg_load; /* Average load across all groups in sd */
7462
56cf515b 7463 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 7464 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
7465};
7466
147c5fc2
PZ
7467static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7468{
7469 /*
7470 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
7471 * local_stat because update_sg_lb_stats() does a full clear/assignment.
7472 * We must however clear busiest_stat::avg_load because
7473 * update_sd_pick_busiest() reads this before assignment.
7474 */
7475 *sds = (struct sd_lb_stats){
7476 .busiest = NULL,
7477 .local = NULL,
90001d67 7478 .total_running = 0UL,
147c5fc2 7479 .total_load = 0UL,
63b2ca30 7480 .total_capacity = 0UL,
147c5fc2
PZ
7481 .busiest_stat = {
7482 .avg_load = 0UL,
caeb178c
RR
7483 .sum_nr_running = 0,
7484 .group_type = group_other,
147c5fc2
PZ
7485 },
7486 };
7487}
7488
1e3c88bd
PZ
7489/**
7490 * get_sd_load_idx - Obtain the load index for a given sched domain.
7491 * @sd: The sched_domain whose load_idx is to be obtained.
ed1b7732 7492 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
e69f6186
YB
7493 *
7494 * Return: The load index.
1e3c88bd
PZ
7495 */
7496static inline int get_sd_load_idx(struct sched_domain *sd,
7497 enum cpu_idle_type idle)
7498{
7499 int load_idx;
7500
7501 switch (idle) {
7502 case CPU_NOT_IDLE:
7503 load_idx = sd->busy_idx;
7504 break;
7505
7506 case CPU_NEWLY_IDLE:
7507 load_idx = sd->newidle_idx;
7508 break;
7509 default:
7510 load_idx = sd->idle_idx;
7511 break;
7512 }
7513
7514 return load_idx;
7515}
7516
ced549fa 7517static unsigned long scale_rt_capacity(int cpu)
1e3c88bd
PZ
7518{
7519 struct rq *rq = cpu_rq(cpu);
523e979d
VG
7520 unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
7521 unsigned long used, free;
523e979d 7522 unsigned long irq;
b654f7de 7523
2e62c474 7524 irq = cpu_util_irq(rq);
cadefd3d 7525
523e979d
VG
7526 if (unlikely(irq >= max))
7527 return 1;
aa483808 7528
523e979d
VG
7529 used = READ_ONCE(rq->avg_rt.util_avg);
7530 used += READ_ONCE(rq->avg_dl.util_avg);
1e3c88bd 7531
523e979d
VG
7532 if (unlikely(used >= max))
7533 return 1;
1e3c88bd 7534
523e979d 7535 free = max - used;
2e62c474
VG
7536
7537 return scale_irq_capacity(free, irq, max);
1e3c88bd
PZ
7538}
7539
ced549fa 7540static void update_cpu_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 7541{
523e979d 7542 unsigned long capacity = scale_rt_capacity(cpu);
1e3c88bd
PZ
7543 struct sched_group *sdg = sd->groups;
7544
523e979d 7545 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
1e3c88bd 7546
ced549fa
NP
7547 if (!capacity)
7548 capacity = 1;
1e3c88bd 7549
ced549fa
NP
7550 cpu_rq(cpu)->cpu_capacity = capacity;
7551 sdg->sgc->capacity = capacity;
bf475ce0 7552 sdg->sgc->min_capacity = capacity;
1e3c88bd
PZ
7553}
7554
63b2ca30 7555void update_group_capacity(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
7556{
7557 struct sched_domain *child = sd->child;
7558 struct sched_group *group, *sdg = sd->groups;
bf475ce0 7559 unsigned long capacity, min_capacity;
4ec4412e
VG
7560 unsigned long interval;
7561
7562 interval = msecs_to_jiffies(sd->balance_interval);
7563 interval = clamp(interval, 1UL, max_load_balance_interval);
63b2ca30 7564 sdg->sgc->next_update = jiffies + interval;
1e3c88bd
PZ
7565
7566 if (!child) {
ced549fa 7567 update_cpu_capacity(sd, cpu);
1e3c88bd
PZ
7568 return;
7569 }
7570
dc7ff76e 7571 capacity = 0;
bf475ce0 7572 min_capacity = ULONG_MAX;
1e3c88bd 7573
74a5ce20
PZ
7574 if (child->flags & SD_OVERLAP) {
7575 /*
7576 * SD_OVERLAP domains cannot assume that child groups
7577 * span the current group.
7578 */
7579
ae4df9d6 7580 for_each_cpu(cpu, sched_group_span(sdg)) {
63b2ca30 7581 struct sched_group_capacity *sgc;
9abf24d4 7582 struct rq *rq = cpu_rq(cpu);
863bffc8 7583
9abf24d4 7584 /*
63b2ca30 7585 * build_sched_domains() -> init_sched_groups_capacity()
9abf24d4
SD
7586 * gets here before we've attached the domains to the
7587 * runqueues.
7588 *
ced549fa
NP
7589 * Use capacity_of(), which is set irrespective of domains
7590 * in update_cpu_capacity().
9abf24d4 7591 *
dc7ff76e 7592 * This avoids capacity from being 0 and
9abf24d4 7593 * causing divide-by-zero issues on boot.
9abf24d4
SD
7594 */
7595 if (unlikely(!rq->sd)) {
ced549fa 7596 capacity += capacity_of(cpu);
bf475ce0
MR
7597 } else {
7598 sgc = rq->sd->groups->sgc;
7599 capacity += sgc->capacity;
9abf24d4 7600 }
863bffc8 7601
bf475ce0 7602 min_capacity = min(capacity, min_capacity);
863bffc8 7603 }
74a5ce20
PZ
7604 } else {
7605 /*
7606 * !SD_OVERLAP domains can assume that child groups
7607 * span the current group.
97a7142f 7608 */
74a5ce20
PZ
7609
7610 group = child->groups;
7611 do {
bf475ce0
MR
7612 struct sched_group_capacity *sgc = group->sgc;
7613
7614 capacity += sgc->capacity;
7615 min_capacity = min(sgc->min_capacity, min_capacity);
74a5ce20
PZ
7616 group = group->next;
7617 } while (group != child->groups);
7618 }
1e3c88bd 7619
63b2ca30 7620 sdg->sgc->capacity = capacity;
bf475ce0 7621 sdg->sgc->min_capacity = min_capacity;
1e3c88bd
PZ
7622}
7623
9d5efe05 7624/*
ea67821b
VG
7625 * Check whether the capacity of the rq has been noticeably reduced by side
7626 * activity. The imbalance_pct is used for the threshold.
7627 * Return true is the capacity is reduced
9d5efe05
SV
7628 */
7629static inline int
ea67821b 7630check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9d5efe05 7631{
ea67821b
VG
7632 return ((rq->cpu_capacity * sd->imbalance_pct) <
7633 (rq->cpu_capacity_orig * 100));
9d5efe05
SV
7634}
7635
30ce5dab
PZ
7636/*
7637 * Group imbalance indicates (and tries to solve) the problem where balancing
0c98d344 7638 * groups is inadequate due to ->cpus_allowed constraints.
30ce5dab 7639 *
97fb7a0a
IM
7640 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
7641 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
30ce5dab
PZ
7642 * Something like:
7643 *
2b4d5b25
IM
7644 * { 0 1 2 3 } { 4 5 6 7 }
7645 * * * * *
30ce5dab
PZ
7646 *
7647 * If we were to balance group-wise we'd place two tasks in the first group and
7648 * two tasks in the second group. Clearly this is undesired as it will overload
97fb7a0a 7649 * cpu 3 and leave one of the CPUs in the second group unused.
30ce5dab
PZ
7650 *
7651 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
7652 * by noticing the lower domain failed to reach balance and had difficulty
7653 * moving tasks due to affinity constraints.
30ce5dab
PZ
7654 *
7655 * When this is so detected; this group becomes a candidate for busiest; see
ed1b7732 7656 * update_sd_pick_busiest(). And calculate_imbalance() and
6263322c 7657 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
7658 * to create an effective group imbalance.
7659 *
7660 * This is a somewhat tricky proposition since the next run might not find the
7661 * group imbalance and decide the groups need to be balanced again. A most
7662 * subtle and fragile situation.
7663 */
7664
6263322c 7665static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 7666{
63b2ca30 7667 return group->sgc->imbalance;
30ce5dab
PZ
7668}
7669
b37d9316 7670/*
ea67821b
VG
7671 * group_has_capacity returns true if the group has spare capacity that could
7672 * be used by some tasks.
7673 * We consider that a group has spare capacity if the * number of task is
9e91d61d
DE
7674 * smaller than the number of CPUs or if the utilization is lower than the
7675 * available capacity for CFS tasks.
ea67821b
VG
7676 * For the latter, we use a threshold to stabilize the state, to take into
7677 * account the variance of the tasks' load and to return true if the available
7678 * capacity in meaningful for the load balancer.
7679 * As an example, an available capacity of 1% can appear but it doesn't make
7680 * any benefit for the load balance.
b37d9316 7681 */
ea67821b
VG
7682static inline bool
7683group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
b37d9316 7684{
ea67821b
VG
7685 if (sgs->sum_nr_running < sgs->group_weight)
7686 return true;
c61037e9 7687
ea67821b 7688 if ((sgs->group_capacity * 100) >
9e91d61d 7689 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 7690 return true;
b37d9316 7691
ea67821b
VG
7692 return false;
7693}
7694
7695/*
7696 * group_is_overloaded returns true if the group has more tasks than it can
7697 * handle.
7698 * group_is_overloaded is not equals to !group_has_capacity because a group
7699 * with the exact right number of tasks, has no more spare capacity but is not
7700 * overloaded so both group_has_capacity and group_is_overloaded return
7701 * false.
7702 */
7703static inline bool
7704group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
7705{
7706 if (sgs->sum_nr_running <= sgs->group_weight)
7707 return false;
b37d9316 7708
ea67821b 7709 if ((sgs->group_capacity * 100) <
9e91d61d 7710 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 7711 return true;
b37d9316 7712
ea67821b 7713 return false;
b37d9316
PZ
7714}
7715
9e0994c0
MR
7716/*
7717 * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
7718 * per-CPU capacity than sched_group ref.
7719 */
7720static inline bool
7721group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
7722{
7723 return sg->sgc->min_capacity * capacity_margin <
7724 ref->sgc->min_capacity * 1024;
7725}
7726
79a89f92
LY
7727static inline enum
7728group_type group_classify(struct sched_group *group,
7729 struct sg_lb_stats *sgs)
caeb178c 7730{
ea67821b 7731 if (sgs->group_no_capacity)
caeb178c
RR
7732 return group_overloaded;
7733
7734 if (sg_imbalanced(group))
7735 return group_imbalanced;
7736
7737 return group_other;
7738}
7739
63928384 7740static bool update_nohz_stats(struct rq *rq, bool force)
e022e0d3
PZ
7741{
7742#ifdef CONFIG_NO_HZ_COMMON
7743 unsigned int cpu = rq->cpu;
7744
f643ea22
VG
7745 if (!rq->has_blocked_load)
7746 return false;
7747
e022e0d3 7748 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
f643ea22 7749 return false;
e022e0d3 7750
63928384 7751 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
f643ea22 7752 return true;
e022e0d3
PZ
7753
7754 update_blocked_averages(cpu);
f643ea22
VG
7755
7756 return rq->has_blocked_load;
7757#else
7758 return false;
e022e0d3
PZ
7759#endif
7760}
7761
1e3c88bd
PZ
7762/**
7763 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 7764 * @env: The load balancing environment.
1e3c88bd 7765 * @group: sched_group whose statistics are to be updated.
1e3c88bd 7766 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 7767 * @local_group: Does group contain this_cpu.
1e3c88bd 7768 * @sgs: variable to hold the statistics for this group.
cd3bd4e6 7769 * @overload: Indicate more than one runnable task for any CPU.
1e3c88bd 7770 */
bd939f45
PZ
7771static inline void update_sg_lb_stats(struct lb_env *env,
7772 struct sched_group *group, int load_idx,
4486edd1
TC
7773 int local_group, struct sg_lb_stats *sgs,
7774 bool *overload)
1e3c88bd 7775{
30ce5dab 7776 unsigned long load;
a426f99c 7777 int i, nr_running;
1e3c88bd 7778
b72ff13c
PZ
7779 memset(sgs, 0, sizeof(*sgs));
7780
ae4df9d6 7781 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
1e3c88bd
PZ
7782 struct rq *rq = cpu_rq(i);
7783
63928384 7784 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
f643ea22 7785 env->flags |= LBF_NOHZ_AGAIN;
e022e0d3 7786
97fb7a0a 7787 /* Bias balancing toward CPUs of our domain: */
6263322c 7788 if (local_group)
04f733b4 7789 load = target_load(i, load_idx);
6263322c 7790 else
1e3c88bd 7791 load = source_load(i, load_idx);
1e3c88bd
PZ
7792
7793 sgs->group_load += load;
9e91d61d 7794 sgs->group_util += cpu_util(i);
65fdac08 7795 sgs->sum_nr_running += rq->cfs.h_nr_running;
4486edd1 7796
a426f99c
WL
7797 nr_running = rq->nr_running;
7798 if (nr_running > 1)
4486edd1
TC
7799 *overload = true;
7800
0ec8aa00
PZ
7801#ifdef CONFIG_NUMA_BALANCING
7802 sgs->nr_numa_running += rq->nr_numa_running;
7803 sgs->nr_preferred_running += rq->nr_preferred_running;
7804#endif
c7132dd6 7805 sgs->sum_weighted_load += weighted_cpuload(rq);
a426f99c
WL
7806 /*
7807 * No need to call idle_cpu() if nr_running is not 0
7808 */
7809 if (!nr_running && idle_cpu(i))
aae6d3dd 7810 sgs->idle_cpus++;
1e3c88bd
PZ
7811 }
7812
63b2ca30
NP
7813 /* Adjust by relative CPU capacity of the group */
7814 sgs->group_capacity = group->sgc->capacity;
ca8ce3d0 7815 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
1e3c88bd 7816
dd5feea1 7817 if (sgs->sum_nr_running)
38d0f770 7818 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 7819
aae6d3dd 7820 sgs->group_weight = group->group_weight;
b37d9316 7821
ea67821b 7822 sgs->group_no_capacity = group_is_overloaded(env, sgs);
79a89f92 7823 sgs->group_type = group_classify(group, sgs);
1e3c88bd
PZ
7824}
7825
532cb4c4
MN
7826/**
7827 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 7828 * @env: The load balancing environment.
532cb4c4
MN
7829 * @sds: sched_domain statistics
7830 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 7831 * @sgs: sched_group statistics
532cb4c4
MN
7832 *
7833 * Determine if @sg is a busier group than the previously selected
7834 * busiest group.
e69f6186
YB
7835 *
7836 * Return: %true if @sg is a busier group than the previously selected
7837 * busiest group. %false otherwise.
532cb4c4 7838 */
bd939f45 7839static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
7840 struct sd_lb_stats *sds,
7841 struct sched_group *sg,
bd939f45 7842 struct sg_lb_stats *sgs)
532cb4c4 7843{
caeb178c 7844 struct sg_lb_stats *busiest = &sds->busiest_stat;
532cb4c4 7845
caeb178c 7846 if (sgs->group_type > busiest->group_type)
532cb4c4
MN
7847 return true;
7848
caeb178c
RR
7849 if (sgs->group_type < busiest->group_type)
7850 return false;
7851
7852 if (sgs->avg_load <= busiest->avg_load)
7853 return false;
7854
9e0994c0
MR
7855 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
7856 goto asym_packing;
7857
7858 /*
7859 * Candidate sg has no more than one task per CPU and
7860 * has higher per-CPU capacity. Migrating tasks to less
7861 * capable CPUs may harm throughput. Maximize throughput,
7862 * power/energy consequences are not considered.
7863 */
7864 if (sgs->sum_nr_running <= sgs->group_weight &&
7865 group_smaller_cpu_capacity(sds->local, sg))
7866 return false;
7867
7868asym_packing:
caeb178c
RR
7869 /* This is the busiest node in its class. */
7870 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
7871 return true;
7872
97fb7a0a 7873 /* No ASYM_PACKING if target CPU is already busy */
1f621e02
SD
7874 if (env->idle == CPU_NOT_IDLE)
7875 return true;
532cb4c4 7876 /*
afe06efd
TC
7877 * ASYM_PACKING needs to move all the work to the highest
7878 * prority CPUs in the group, therefore mark all groups
7879 * of lower priority than ourself as busy.
532cb4c4 7880 */
afe06efd
TC
7881 if (sgs->sum_nr_running &&
7882 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
532cb4c4
MN
7883 if (!sds->busiest)
7884 return true;
7885
97fb7a0a 7886 /* Prefer to move from lowest priority CPU's work */
afe06efd
TC
7887 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
7888 sg->asym_prefer_cpu))
532cb4c4
MN
7889 return true;
7890 }
7891
7892 return false;
7893}
7894
0ec8aa00
PZ
7895#ifdef CONFIG_NUMA_BALANCING
7896static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7897{
7898 if (sgs->sum_nr_running > sgs->nr_numa_running)
7899 return regular;
7900 if (sgs->sum_nr_running > sgs->nr_preferred_running)
7901 return remote;
7902 return all;
7903}
7904
7905static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7906{
7907 if (rq->nr_running > rq->nr_numa_running)
7908 return regular;
7909 if (rq->nr_running > rq->nr_preferred_running)
7910 return remote;
7911 return all;
7912}
7913#else
7914static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7915{
7916 return all;
7917}
7918
7919static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7920{
7921 return regular;
7922}
7923#endif /* CONFIG_NUMA_BALANCING */
7924
1e3c88bd 7925/**
461819ac 7926 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 7927 * @env: The load balancing environment.
1e3c88bd
PZ
7928 * @sds: variable to hold the statistics for this sched_domain.
7929 */
0ec8aa00 7930static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 7931{
bd939f45
PZ
7932 struct sched_domain *child = env->sd->child;
7933 struct sched_group *sg = env->sd->groups;
05b40e05 7934 struct sg_lb_stats *local = &sds->local_stat;
56cf515b 7935 struct sg_lb_stats tmp_sgs;
1e3c88bd 7936 int load_idx, prefer_sibling = 0;
4486edd1 7937 bool overload = false;
1e3c88bd
PZ
7938
7939 if (child && child->flags & SD_PREFER_SIBLING)
7940 prefer_sibling = 1;
7941
e022e0d3 7942#ifdef CONFIG_NO_HZ_COMMON
f643ea22 7943 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
e022e0d3 7944 env->flags |= LBF_NOHZ_STATS;
e022e0d3
PZ
7945#endif
7946
bd939f45 7947 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
7948
7949 do {
56cf515b 7950 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
7951 int local_group;
7952
ae4df9d6 7953 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
56cf515b
JK
7954 if (local_group) {
7955 sds->local = sg;
05b40e05 7956 sgs = local;
b72ff13c
PZ
7957
7958 if (env->idle != CPU_NEWLY_IDLE ||
63b2ca30
NP
7959 time_after_eq(jiffies, sg->sgc->next_update))
7960 update_group_capacity(env->sd, env->dst_cpu);
56cf515b 7961 }
1e3c88bd 7962
4486edd1
TC
7963 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7964 &overload);
1e3c88bd 7965
b72ff13c
PZ
7966 if (local_group)
7967 goto next_group;
7968
1e3c88bd
PZ
7969 /*
7970 * In case the child domain prefers tasks go to siblings
ea67821b 7971 * first, lower the sg capacity so that we'll try
75dd321d
NR
7972 * and move all the excess tasks away. We lower the capacity
7973 * of a group only if the local group has the capacity to fit
ea67821b
VG
7974 * these excess tasks. The extra check prevents the case where
7975 * you always pull from the heaviest group when it is already
7976 * under-utilized (possible with a large weight task outweighs
7977 * the tasks on the system).
1e3c88bd 7978 */
b72ff13c 7979 if (prefer_sibling && sds->local &&
05b40e05
SD
7980 group_has_capacity(env, local) &&
7981 (sgs->sum_nr_running > local->sum_nr_running + 1)) {
ea67821b 7982 sgs->group_no_capacity = 1;
79a89f92 7983 sgs->group_type = group_classify(sg, sgs);
cb0b9f24 7984 }
1e3c88bd 7985
b72ff13c 7986 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 7987 sds->busiest = sg;
56cf515b 7988 sds->busiest_stat = *sgs;
1e3c88bd
PZ
7989 }
7990
b72ff13c
PZ
7991next_group:
7992 /* Now, start updating sd_lb_stats */
90001d67 7993 sds->total_running += sgs->sum_nr_running;
b72ff13c 7994 sds->total_load += sgs->group_load;
63b2ca30 7995 sds->total_capacity += sgs->group_capacity;
b72ff13c 7996
532cb4c4 7997 sg = sg->next;
bd939f45 7998 } while (sg != env->sd->groups);
0ec8aa00 7999
f643ea22
VG
8000#ifdef CONFIG_NO_HZ_COMMON
8001 if ((env->flags & LBF_NOHZ_AGAIN) &&
8002 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
8003
8004 WRITE_ONCE(nohz.next_blocked,
8005 jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
8006 }
8007#endif
8008
0ec8aa00
PZ
8009 if (env->sd->flags & SD_NUMA)
8010 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
4486edd1
TC
8011
8012 if (!env->sd->parent) {
8013 /* update overload indicator if we are at root domain */
8014 if (env->dst_rq->rd->overload != overload)
8015 env->dst_rq->rd->overload = overload;
8016 }
532cb4c4
MN
8017}
8018
532cb4c4
MN
8019/**
8020 * check_asym_packing - Check to see if the group is packed into the
0ba42a59 8021 * sched domain.
532cb4c4
MN
8022 *
8023 * This is primarily intended to used at the sibling level. Some
8024 * cores like POWER7 prefer to use lower numbered SMT threads. In the
8025 * case of POWER7, it can move to lower SMT modes only when higher
8026 * threads are idle. When in lower SMT modes, the threads will
8027 * perform better since they share less core resources. Hence when we
8028 * have idle threads, we want them to be the higher ones.
8029 *
8030 * This packing function is run on idle threads. It checks to see if
8031 * the busiest CPU in this domain (core in the P7 case) has a higher
8032 * CPU number than the packing function is being run on. Here we are
8033 * assuming lower CPU number will be equivalent to lower a SMT thread
8034 * number.
8035 *
e69f6186 8036 * Return: 1 when packing is required and a task should be moved to
46123355 8037 * this CPU. The amount of the imbalance is returned in env->imbalance.
b6b12294 8038 *
cd96891d 8039 * @env: The load balancing environment.
532cb4c4 8040 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 8041 */
bd939f45 8042static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
8043{
8044 int busiest_cpu;
8045
bd939f45 8046 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
8047 return 0;
8048
1f621e02
SD
8049 if (env->idle == CPU_NOT_IDLE)
8050 return 0;
8051
532cb4c4
MN
8052 if (!sds->busiest)
8053 return 0;
8054
afe06efd
TC
8055 busiest_cpu = sds->busiest->asym_prefer_cpu;
8056 if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
532cb4c4
MN
8057 return 0;
8058
bd939f45 8059 env->imbalance = DIV_ROUND_CLOSEST(
63b2ca30 8060 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
ca8ce3d0 8061 SCHED_CAPACITY_SCALE);
bd939f45 8062
532cb4c4 8063 return 1;
1e3c88bd
PZ
8064}
8065
8066/**
8067 * fix_small_imbalance - Calculate the minor imbalance that exists
8068 * amongst the groups of a sched_domain, during
8069 * load balancing.
cd96891d 8070 * @env: The load balancing environment.
1e3c88bd 8071 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 8072 */
bd939f45
PZ
8073static inline
8074void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 8075{
63b2ca30 8076 unsigned long tmp, capa_now = 0, capa_move = 0;
1e3c88bd 8077 unsigned int imbn = 2;
dd5feea1 8078 unsigned long scaled_busy_load_per_task;
56cf515b 8079 struct sg_lb_stats *local, *busiest;
1e3c88bd 8080
56cf515b
JK
8081 local = &sds->local_stat;
8082 busiest = &sds->busiest_stat;
1e3c88bd 8083
56cf515b
JK
8084 if (!local->sum_nr_running)
8085 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
8086 else if (busiest->load_per_task > local->load_per_task)
8087 imbn = 1;
dd5feea1 8088
56cf515b 8089 scaled_busy_load_per_task =
ca8ce3d0 8090 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 8091 busiest->group_capacity;
56cf515b 8092
3029ede3
VD
8093 if (busiest->avg_load + scaled_busy_load_per_task >=
8094 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 8095 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
8096 return;
8097 }
8098
8099 /*
8100 * OK, we don't have enough imbalance to justify moving tasks,
ced549fa 8101 * however we may be able to increase total CPU capacity used by
1e3c88bd
PZ
8102 * moving them.
8103 */
8104
63b2ca30 8105 capa_now += busiest->group_capacity *
56cf515b 8106 min(busiest->load_per_task, busiest->avg_load);
63b2ca30 8107 capa_now += local->group_capacity *
56cf515b 8108 min(local->load_per_task, local->avg_load);
ca8ce3d0 8109 capa_now /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
8110
8111 /* Amount of load we'd subtract */
a2cd4260 8112 if (busiest->avg_load > scaled_busy_load_per_task) {
63b2ca30 8113 capa_move += busiest->group_capacity *
56cf515b 8114 min(busiest->load_per_task,
a2cd4260 8115 busiest->avg_load - scaled_busy_load_per_task);
56cf515b 8116 }
1e3c88bd
PZ
8117
8118 /* Amount of load we'd add */
63b2ca30 8119 if (busiest->avg_load * busiest->group_capacity <
ca8ce3d0 8120 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
63b2ca30
NP
8121 tmp = (busiest->avg_load * busiest->group_capacity) /
8122 local->group_capacity;
56cf515b 8123 } else {
ca8ce3d0 8124 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 8125 local->group_capacity;
56cf515b 8126 }
63b2ca30 8127 capa_move += local->group_capacity *
3ae11c90 8128 min(local->load_per_task, local->avg_load + tmp);
ca8ce3d0 8129 capa_move /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
8130
8131 /* Move if we gain throughput */
63b2ca30 8132 if (capa_move > capa_now)
56cf515b 8133 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
8134}
8135
8136/**
8137 * calculate_imbalance - Calculate the amount of imbalance present within the
8138 * groups of a given sched_domain during load balance.
bd939f45 8139 * @env: load balance environment
1e3c88bd 8140 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 8141 */
bd939f45 8142static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 8143{
dd5feea1 8144 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
8145 struct sg_lb_stats *local, *busiest;
8146
8147 local = &sds->local_stat;
56cf515b 8148 busiest = &sds->busiest_stat;
dd5feea1 8149
caeb178c 8150 if (busiest->group_type == group_imbalanced) {
30ce5dab
PZ
8151 /*
8152 * In the group_imb case we cannot rely on group-wide averages
97fb7a0a 8153 * to ensure CPU-load equilibrium, look at wider averages. XXX
30ce5dab 8154 */
56cf515b
JK
8155 busiest->load_per_task =
8156 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
8157 }
8158
1e3c88bd 8159 /*
885e542c
DE
8160 * Avg load of busiest sg can be less and avg load of local sg can
8161 * be greater than avg load across all sgs of sd because avg load
8162 * factors in sg capacity and sgs with smaller group_type are
8163 * skipped when updating the busiest sg:
1e3c88bd 8164 */
b1885550
VD
8165 if (busiest->avg_load <= sds->avg_load ||
8166 local->avg_load >= sds->avg_load) {
bd939f45
PZ
8167 env->imbalance = 0;
8168 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
8169 }
8170
9a5d9ba6 8171 /*
97fb7a0a 8172 * If there aren't any idle CPUs, avoid creating some.
9a5d9ba6
PZ
8173 */
8174 if (busiest->group_type == group_overloaded &&
8175 local->group_type == group_overloaded) {
1be0eb2a 8176 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
cfa10334 8177 if (load_above_capacity > busiest->group_capacity) {
ea67821b 8178 load_above_capacity -= busiest->group_capacity;
26656215 8179 load_above_capacity *= scale_load_down(NICE_0_LOAD);
cfa10334
MR
8180 load_above_capacity /= busiest->group_capacity;
8181 } else
ea67821b 8182 load_above_capacity = ~0UL;
dd5feea1
SS
8183 }
8184
8185 /*
97fb7a0a 8186 * We're trying to get all the CPUs to the average_load, so we don't
dd5feea1 8187 * want to push ourselves above the average load, nor do we wish to
97fb7a0a 8188 * reduce the max loaded CPU below the average load. At the same time,
0a9b23ce
DE
8189 * we also don't want to reduce the group load below the group
8190 * capacity. Thus we look for the minimum possible imbalance.
dd5feea1 8191 */
30ce5dab 8192 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
8193
8194 /* How much load to actually move to equalise the imbalance */
56cf515b 8195 env->imbalance = min(
63b2ca30
NP
8196 max_pull * busiest->group_capacity,
8197 (sds->avg_load - local->avg_load) * local->group_capacity
ca8ce3d0 8198 ) / SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
8199
8200 /*
8201 * if *imbalance is less than the average load per runnable task
25985edc 8202 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
8203 * a think about bumping its value to force at least one task to be
8204 * moved
8205 */
56cf515b 8206 if (env->imbalance < busiest->load_per_task)
bd939f45 8207 return fix_small_imbalance(env, sds);
1e3c88bd 8208}
fab47622 8209
1e3c88bd
PZ
8210/******* find_busiest_group() helpers end here *********************/
8211
8212/**
8213 * find_busiest_group - Returns the busiest group within the sched_domain
0a9b23ce 8214 * if there is an imbalance.
1e3c88bd
PZ
8215 *
8216 * Also calculates the amount of weighted load which should be moved
8217 * to restore balance.
8218 *
cd96891d 8219 * @env: The load balancing environment.
1e3c88bd 8220 *
e69f6186 8221 * Return: - The busiest group if imbalance exists.
1e3c88bd 8222 */
56cf515b 8223static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 8224{
56cf515b 8225 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
8226 struct sd_lb_stats sds;
8227
147c5fc2 8228 init_sd_lb_stats(&sds);
1e3c88bd
PZ
8229
8230 /*
8231 * Compute the various statistics relavent for load balancing at
8232 * this level.
8233 */
23f0d209 8234 update_sd_lb_stats(env, &sds);
56cf515b
JK
8235 local = &sds.local_stat;
8236 busiest = &sds.busiest_stat;
1e3c88bd 8237
ea67821b 8238 /* ASYM feature bypasses nice load balance check */
1f621e02 8239 if (check_asym_packing(env, &sds))
532cb4c4
MN
8240 return sds.busiest;
8241
cc57aa8f 8242 /* There is no busy sibling group to pull tasks from */
56cf515b 8243 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
8244 goto out_balanced;
8245
90001d67 8246 /* XXX broken for overlapping NUMA groups */
ca8ce3d0
NP
8247 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
8248 / sds.total_capacity;
b0432d8f 8249
866ab43e
PZ
8250 /*
8251 * If the busiest group is imbalanced the below checks don't
30ce5dab 8252 * work because they assume all things are equal, which typically
866ab43e
PZ
8253 * isn't true due to cpus_allowed constraints and the like.
8254 */
caeb178c 8255 if (busiest->group_type == group_imbalanced)
866ab43e
PZ
8256 goto force_balance;
8257
583ffd99
BJ
8258 /*
8259 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
8260 * capacities from resulting in underutilization due to avg_load.
8261 */
8262 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
ea67821b 8263 busiest->group_no_capacity)
fab47622
NR
8264 goto force_balance;
8265
cc57aa8f 8266 /*
9c58c79a 8267 * If the local group is busier than the selected busiest group
cc57aa8f
PZ
8268 * don't try and pull any tasks.
8269 */
56cf515b 8270 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
8271 goto out_balanced;
8272
cc57aa8f
PZ
8273 /*
8274 * Don't pull any tasks if this group is already above the domain
8275 * average load.
8276 */
56cf515b 8277 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
8278 goto out_balanced;
8279
bd939f45 8280 if (env->idle == CPU_IDLE) {
aae6d3dd 8281 /*
97fb7a0a 8282 * This CPU is idle. If the busiest group is not overloaded
43f4d666 8283 * and there is no imbalance between this and busiest group
97fb7a0a 8284 * wrt idle CPUs, it is balanced. The imbalance becomes
43f4d666
VG
8285 * significant if the diff is greater than 1 otherwise we
8286 * might end up to just move the imbalance on another group
aae6d3dd 8287 */
43f4d666
VG
8288 if ((busiest->group_type != group_overloaded) &&
8289 (local->idle_cpus <= (busiest->idle_cpus + 1)))
aae6d3dd 8290 goto out_balanced;
c186fafe
PZ
8291 } else {
8292 /*
8293 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
8294 * imbalance_pct to be conservative.
8295 */
56cf515b
JK
8296 if (100 * busiest->avg_load <=
8297 env->sd->imbalance_pct * local->avg_load)
c186fafe 8298 goto out_balanced;
aae6d3dd 8299 }
1e3c88bd 8300
fab47622 8301force_balance:
1e3c88bd 8302 /* Looks like there is an imbalance. Compute it */
bd939f45 8303 calculate_imbalance(env, &sds);
1e3c88bd
PZ
8304 return sds.busiest;
8305
8306out_balanced:
bd939f45 8307 env->imbalance = 0;
1e3c88bd
PZ
8308 return NULL;
8309}
8310
8311/*
97fb7a0a 8312 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
1e3c88bd 8313 */
bd939f45 8314static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 8315 struct sched_group *group)
1e3c88bd
PZ
8316{
8317 struct rq *busiest = NULL, *rq;
ced549fa 8318 unsigned long busiest_load = 0, busiest_capacity = 1;
1e3c88bd
PZ
8319 int i;
8320
ae4df9d6 8321 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
ea67821b 8322 unsigned long capacity, wl;
0ec8aa00
PZ
8323 enum fbq_type rt;
8324
8325 rq = cpu_rq(i);
8326 rt = fbq_classify_rq(rq);
1e3c88bd 8327
0ec8aa00
PZ
8328 /*
8329 * We classify groups/runqueues into three groups:
8330 * - regular: there are !numa tasks
8331 * - remote: there are numa tasks that run on the 'wrong' node
8332 * - all: there is no distinction
8333 *
8334 * In order to avoid migrating ideally placed numa tasks,
8335 * ignore those when there's better options.
8336 *
8337 * If we ignore the actual busiest queue to migrate another
8338 * task, the next balance pass can still reduce the busiest
8339 * queue by moving tasks around inside the node.
8340 *
8341 * If we cannot move enough load due to this classification
8342 * the next pass will adjust the group classification and
8343 * allow migration of more tasks.
8344 *
8345 * Both cases only affect the total convergence complexity.
8346 */
8347 if (rt > env->fbq_type)
8348 continue;
8349
ced549fa 8350 capacity = capacity_of(i);
9d5efe05 8351
c7132dd6 8352 wl = weighted_cpuload(rq);
1e3c88bd 8353
6e40f5bb
TG
8354 /*
8355 * When comparing with imbalance, use weighted_cpuload()
97fb7a0a 8356 * which is not scaled with the CPU capacity.
6e40f5bb 8357 */
ea67821b
VG
8358
8359 if (rq->nr_running == 1 && wl > env->imbalance &&
8360 !check_cpu_capacity(rq, env->sd))
1e3c88bd
PZ
8361 continue;
8362
6e40f5bb 8363 /*
97fb7a0a
IM
8364 * For the load comparisons with the other CPU's, consider
8365 * the weighted_cpuload() scaled with the CPU capacity, so
8366 * that the load can be moved away from the CPU that is
ced549fa 8367 * potentially running at a lower capacity.
95a79b80 8368 *
ced549fa 8369 * Thus we're looking for max(wl_i / capacity_i), crosswise
95a79b80 8370 * multiplication to rid ourselves of the division works out
ced549fa
NP
8371 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
8372 * our previous maximum.
6e40f5bb 8373 */
ced549fa 8374 if (wl * busiest_capacity > busiest_load * capacity) {
95a79b80 8375 busiest_load = wl;
ced549fa 8376 busiest_capacity = capacity;
1e3c88bd
PZ
8377 busiest = rq;
8378 }
8379 }
8380
8381 return busiest;
8382}
8383
8384/*
8385 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
8386 * so long as it is large enough.
8387 */
8388#define MAX_PINNED_INTERVAL 512
8389
bd939f45 8390static int need_active_balance(struct lb_env *env)
1af3ed3d 8391{
bd939f45
PZ
8392 struct sched_domain *sd = env->sd;
8393
8394 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
8395
8396 /*
8397 * ASYM_PACKING needs to force migrate tasks from busy but
afe06efd
TC
8398 * lower priority CPUs in order to pack all tasks in the
8399 * highest priority CPUs.
532cb4c4 8400 */
afe06efd
TC
8401 if ((sd->flags & SD_ASYM_PACKING) &&
8402 sched_asym_prefer(env->dst_cpu, env->src_cpu))
532cb4c4 8403 return 1;
1af3ed3d
PZ
8404 }
8405
1aaf90a4
VG
8406 /*
8407 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
8408 * It's worth migrating the task if the src_cpu's capacity is reduced
8409 * because of other sched_class or IRQs if more capacity stays
8410 * available on dst_cpu.
8411 */
8412 if ((env->idle != CPU_NOT_IDLE) &&
8413 (env->src_rq->cfs.h_nr_running == 1)) {
8414 if ((check_cpu_capacity(env->src_rq, sd)) &&
8415 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
8416 return 1;
8417 }
8418
1af3ed3d
PZ
8419 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
8420}
8421
969c7921
TH
8422static int active_load_balance_cpu_stop(void *data);
8423
23f0d209
JK
8424static int should_we_balance(struct lb_env *env)
8425{
8426 struct sched_group *sg = env->sd->groups;
23f0d209
JK
8427 int cpu, balance_cpu = -1;
8428
024c9d2f
PZ
8429 /*
8430 * Ensure the balancing environment is consistent; can happen
8431 * when the softirq triggers 'during' hotplug.
8432 */
8433 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
8434 return 0;
8435
23f0d209 8436 /*
97fb7a0a 8437 * In the newly idle case, we will allow all the CPUs
23f0d209
JK
8438 * to do the newly idle load balance.
8439 */
8440 if (env->idle == CPU_NEWLY_IDLE)
8441 return 1;
8442
97fb7a0a 8443 /* Try to find first idle CPU */
e5c14b1f 8444 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
af218122 8445 if (!idle_cpu(cpu))
23f0d209
JK
8446 continue;
8447
8448 balance_cpu = cpu;
8449 break;
8450 }
8451
8452 if (balance_cpu == -1)
8453 balance_cpu = group_balance_cpu(sg);
8454
8455 /*
97fb7a0a 8456 * First idle CPU or the first CPU(busiest) in this sched group
23f0d209
JK
8457 * is eligible for doing load balancing at this and above domains.
8458 */
b0cff9d8 8459 return balance_cpu == env->dst_cpu;
23f0d209
JK
8460}
8461
1e3c88bd
PZ
8462/*
8463 * Check this_cpu to ensure it is balanced within domain. Attempt to move
8464 * tasks if there is an imbalance.
8465 */
8466static int load_balance(int this_cpu, struct rq *this_rq,
8467 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 8468 int *continue_balancing)
1e3c88bd 8469{
88b8dac0 8470 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 8471 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 8472 struct sched_group *group;
1e3c88bd 8473 struct rq *busiest;
8a8c69c3 8474 struct rq_flags rf;
4ba29684 8475 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
1e3c88bd 8476
8e45cb54
PZ
8477 struct lb_env env = {
8478 .sd = sd,
ddcdf6e7
PZ
8479 .dst_cpu = this_cpu,
8480 .dst_rq = this_rq,
ae4df9d6 8481 .dst_grpmask = sched_group_span(sd->groups),
8e45cb54 8482 .idle = idle,
eb95308e 8483 .loop_break = sched_nr_migrate_break,
b9403130 8484 .cpus = cpus,
0ec8aa00 8485 .fbq_type = all,
163122b7 8486 .tasks = LIST_HEAD_INIT(env.tasks),
8e45cb54
PZ
8487 };
8488
65a4433a 8489 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
1e3c88bd 8490
ae92882e 8491 schedstat_inc(sd->lb_count[idle]);
1e3c88bd
PZ
8492
8493redo:
23f0d209
JK
8494 if (!should_we_balance(&env)) {
8495 *continue_balancing = 0;
1e3c88bd 8496 goto out_balanced;
23f0d209 8497 }
1e3c88bd 8498
23f0d209 8499 group = find_busiest_group(&env);
1e3c88bd 8500 if (!group) {
ae92882e 8501 schedstat_inc(sd->lb_nobusyg[idle]);
1e3c88bd
PZ
8502 goto out_balanced;
8503 }
8504
b9403130 8505 busiest = find_busiest_queue(&env, group);
1e3c88bd 8506 if (!busiest) {
ae92882e 8507 schedstat_inc(sd->lb_nobusyq[idle]);
1e3c88bd
PZ
8508 goto out_balanced;
8509 }
8510
78feefc5 8511 BUG_ON(busiest == env.dst_rq);
1e3c88bd 8512
ae92882e 8513 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
1e3c88bd 8514
1aaf90a4
VG
8515 env.src_cpu = busiest->cpu;
8516 env.src_rq = busiest;
8517
1e3c88bd
PZ
8518 ld_moved = 0;
8519 if (busiest->nr_running > 1) {
8520 /*
8521 * Attempt to move tasks. If find_busiest_group has found
8522 * an imbalance but busiest->nr_running <= 1, the group is
8523 * still unbalanced. ld_moved simply stays zero, so it is
8524 * correctly treated as an imbalance.
8525 */
8e45cb54 8526 env.flags |= LBF_ALL_PINNED;
c82513e5 8527 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 8528
5d6523eb 8529more_balance:
8a8c69c3 8530 rq_lock_irqsave(busiest, &rf);
3bed5e21 8531 update_rq_clock(busiest);
88b8dac0
SV
8532
8533 /*
8534 * cur_ld_moved - load moved in current iteration
8535 * ld_moved - cumulative load moved across iterations
8536 */
163122b7 8537 cur_ld_moved = detach_tasks(&env);
1e3c88bd
PZ
8538
8539 /*
163122b7
KT
8540 * We've detached some tasks from busiest_rq. Every
8541 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
8542 * unlock busiest->lock, and we are able to be sure
8543 * that nobody can manipulate the tasks in parallel.
8544 * See task_rq_lock() family for the details.
1e3c88bd 8545 */
163122b7 8546
8a8c69c3 8547 rq_unlock(busiest, &rf);
163122b7
KT
8548
8549 if (cur_ld_moved) {
8550 attach_tasks(&env);
8551 ld_moved += cur_ld_moved;
8552 }
8553
8a8c69c3 8554 local_irq_restore(rf.flags);
88b8dac0 8555
f1cd0858
JK
8556 if (env.flags & LBF_NEED_BREAK) {
8557 env.flags &= ~LBF_NEED_BREAK;
8558 goto more_balance;
8559 }
8560
88b8dac0
SV
8561 /*
8562 * Revisit (affine) tasks on src_cpu that couldn't be moved to
8563 * us and move them to an alternate dst_cpu in our sched_group
8564 * where they can run. The upper limit on how many times we
97fb7a0a 8565 * iterate on same src_cpu is dependent on number of CPUs in our
88b8dac0
SV
8566 * sched_group.
8567 *
8568 * This changes load balance semantics a bit on who can move
8569 * load to a given_cpu. In addition to the given_cpu itself
8570 * (or a ilb_cpu acting on its behalf where given_cpu is
8571 * nohz-idle), we now have balance_cpu in a position to move
8572 * load to given_cpu. In rare situations, this may cause
8573 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
8574 * _independently_ and at _same_ time to move some load to
8575 * given_cpu) causing exceess load to be moved to given_cpu.
8576 * This however should not happen so much in practice and
8577 * moreover subsequent load balance cycles should correct the
8578 * excess load moved.
8579 */
6263322c 8580 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 8581
97fb7a0a 8582 /* Prevent to re-select dst_cpu via env's CPUs */
7aff2e3a
VD
8583 cpumask_clear_cpu(env.dst_cpu, env.cpus);
8584
78feefc5 8585 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 8586 env.dst_cpu = env.new_dst_cpu;
6263322c 8587 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
8588 env.loop = 0;
8589 env.loop_break = sched_nr_migrate_break;
e02e60c1 8590
88b8dac0
SV
8591 /*
8592 * Go back to "more_balance" rather than "redo" since we
8593 * need to continue with same src_cpu.
8594 */
8595 goto more_balance;
8596 }
1e3c88bd 8597
6263322c
PZ
8598 /*
8599 * We failed to reach balance because of affinity.
8600 */
8601 if (sd_parent) {
63b2ca30 8602 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6263322c 8603
afdeee05 8604 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
6263322c 8605 *group_imbalance = 1;
6263322c
PZ
8606 }
8607
1e3c88bd 8608 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 8609 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 8610 cpumask_clear_cpu(cpu_of(busiest), cpus);
65a4433a
JH
8611 /*
8612 * Attempting to continue load balancing at the current
8613 * sched_domain level only makes sense if there are
8614 * active CPUs remaining as possible busiest CPUs to
8615 * pull load from which are not contained within the
8616 * destination group that is receiving any migrated
8617 * load.
8618 */
8619 if (!cpumask_subset(cpus, env.dst_grpmask)) {
bbf18b19
PN
8620 env.loop = 0;
8621 env.loop_break = sched_nr_migrate_break;
1e3c88bd 8622 goto redo;
bbf18b19 8623 }
afdeee05 8624 goto out_all_pinned;
1e3c88bd
PZ
8625 }
8626 }
8627
8628 if (!ld_moved) {
ae92882e 8629 schedstat_inc(sd->lb_failed[idle]);
58b26c4c
VP
8630 /*
8631 * Increment the failure counter only on periodic balance.
8632 * We do not want newidle balance, which can be very
8633 * frequent, pollute the failure counter causing
8634 * excessive cache_hot migrations and active balances.
8635 */
8636 if (idle != CPU_NEWLY_IDLE)
8637 sd->nr_balance_failed++;
1e3c88bd 8638
bd939f45 8639 if (need_active_balance(&env)) {
8a8c69c3
PZ
8640 unsigned long flags;
8641
1e3c88bd
PZ
8642 raw_spin_lock_irqsave(&busiest->lock, flags);
8643
97fb7a0a
IM
8644 /*
8645 * Don't kick the active_load_balance_cpu_stop,
8646 * if the curr task on busiest CPU can't be
8647 * moved to this_cpu:
1e3c88bd 8648 */
0c98d344 8649 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
1e3c88bd
PZ
8650 raw_spin_unlock_irqrestore(&busiest->lock,
8651 flags);
8e45cb54 8652 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
8653 goto out_one_pinned;
8654 }
8655
969c7921
TH
8656 /*
8657 * ->active_balance synchronizes accesses to
8658 * ->active_balance_work. Once set, it's cleared
8659 * only after active load balance is finished.
8660 */
1e3c88bd
PZ
8661 if (!busiest->active_balance) {
8662 busiest->active_balance = 1;
8663 busiest->push_cpu = this_cpu;
8664 active_balance = 1;
8665 }
8666 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 8667
bd939f45 8668 if (active_balance) {
969c7921
TH
8669 stop_one_cpu_nowait(cpu_of(busiest),
8670 active_load_balance_cpu_stop, busiest,
8671 &busiest->active_balance_work);
bd939f45 8672 }
1e3c88bd 8673
d02c0711 8674 /* We've kicked active balancing, force task migration. */
1e3c88bd
PZ
8675 sd->nr_balance_failed = sd->cache_nice_tries+1;
8676 }
8677 } else
8678 sd->nr_balance_failed = 0;
8679
8680 if (likely(!active_balance)) {
8681 /* We were unbalanced, so reset the balancing interval */
8682 sd->balance_interval = sd->min_interval;
8683 } else {
8684 /*
8685 * If we've begun active balancing, start to back off. This
8686 * case may not be covered by the all_pinned logic if there
8687 * is only 1 task on the busy runqueue (because we don't call
163122b7 8688 * detach_tasks).
1e3c88bd
PZ
8689 */
8690 if (sd->balance_interval < sd->max_interval)
8691 sd->balance_interval *= 2;
8692 }
8693
1e3c88bd
PZ
8694 goto out;
8695
8696out_balanced:
afdeee05
VG
8697 /*
8698 * We reach balance although we may have faced some affinity
8699 * constraints. Clear the imbalance flag if it was set.
8700 */
8701 if (sd_parent) {
8702 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8703
8704 if (*group_imbalance)
8705 *group_imbalance = 0;
8706 }
8707
8708out_all_pinned:
8709 /*
8710 * We reach balance because all tasks are pinned at this level so
8711 * we can't migrate them. Let the imbalance flag set so parent level
8712 * can try to migrate them.
8713 */
ae92882e 8714 schedstat_inc(sd->lb_balanced[idle]);
1e3c88bd
PZ
8715
8716 sd->nr_balance_failed = 0;
8717
8718out_one_pinned:
8719 /* tune up the balancing interval */
8e45cb54 8720 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 8721 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
8722 (sd->balance_interval < sd->max_interval))
8723 sd->balance_interval *= 2;
8724
46e49b38 8725 ld_moved = 0;
1e3c88bd 8726out:
1e3c88bd
PZ
8727 return ld_moved;
8728}
8729
52a08ef1
JL
8730static inline unsigned long
8731get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
8732{
8733 unsigned long interval = sd->balance_interval;
8734
8735 if (cpu_busy)
8736 interval *= sd->busy_factor;
8737
8738 /* scale ms to jiffies */
8739 interval = msecs_to_jiffies(interval);
8740 interval = clamp(interval, 1UL, max_load_balance_interval);
8741
8742 return interval;
8743}
8744
8745static inline void
31851a98 8746update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
52a08ef1
JL
8747{
8748 unsigned long interval, next;
8749
31851a98
LY
8750 /* used by idle balance, so cpu_busy = 0 */
8751 interval = get_sd_balance_interval(sd, 0);
52a08ef1
JL
8752 next = sd->last_balance + interval;
8753
8754 if (time_after(*next_balance, next))
8755 *next_balance = next;
8756}
8757
1e3c88bd 8758/*
97fb7a0a 8759 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
969c7921
TH
8760 * running tasks off the busiest CPU onto idle CPUs. It requires at
8761 * least 1 task to be running on each physical CPU where possible, and
8762 * avoids physical / logical imbalances.
1e3c88bd 8763 */
969c7921 8764static int active_load_balance_cpu_stop(void *data)
1e3c88bd 8765{
969c7921
TH
8766 struct rq *busiest_rq = data;
8767 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 8768 int target_cpu = busiest_rq->push_cpu;
969c7921 8769 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 8770 struct sched_domain *sd;
e5673f28 8771 struct task_struct *p = NULL;
8a8c69c3 8772 struct rq_flags rf;
969c7921 8773
8a8c69c3 8774 rq_lock_irq(busiest_rq, &rf);
edd8e41d
PZ
8775 /*
8776 * Between queueing the stop-work and running it is a hole in which
8777 * CPUs can become inactive. We should not move tasks from or to
8778 * inactive CPUs.
8779 */
8780 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
8781 goto out_unlock;
969c7921 8782
97fb7a0a 8783 /* Make sure the requested CPU hasn't gone down in the meantime: */
969c7921
TH
8784 if (unlikely(busiest_cpu != smp_processor_id() ||
8785 !busiest_rq->active_balance))
8786 goto out_unlock;
1e3c88bd
PZ
8787
8788 /* Is there any task to move? */
8789 if (busiest_rq->nr_running <= 1)
969c7921 8790 goto out_unlock;
1e3c88bd
PZ
8791
8792 /*
8793 * This condition is "impossible", if it occurs
8794 * we need to fix it. Originally reported by
97fb7a0a 8795 * Bjorn Helgaas on a 128-CPU setup.
1e3c88bd
PZ
8796 */
8797 BUG_ON(busiest_rq == target_rq);
8798
1e3c88bd 8799 /* Search for an sd spanning us and the target CPU. */
dce840a0 8800 rcu_read_lock();
1e3c88bd
PZ
8801 for_each_domain(target_cpu, sd) {
8802 if ((sd->flags & SD_LOAD_BALANCE) &&
8803 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8804 break;
8805 }
8806
8807 if (likely(sd)) {
8e45cb54
PZ
8808 struct lb_env env = {
8809 .sd = sd,
ddcdf6e7
PZ
8810 .dst_cpu = target_cpu,
8811 .dst_rq = target_rq,
8812 .src_cpu = busiest_rq->cpu,
8813 .src_rq = busiest_rq,
8e45cb54 8814 .idle = CPU_IDLE,
65a4433a
JH
8815 /*
8816 * can_migrate_task() doesn't need to compute new_dst_cpu
8817 * for active balancing. Since we have CPU_IDLE, but no
8818 * @dst_grpmask we need to make that test go away with lying
8819 * about DST_PINNED.
8820 */
8821 .flags = LBF_DST_PINNED,
8e45cb54
PZ
8822 };
8823
ae92882e 8824 schedstat_inc(sd->alb_count);
3bed5e21 8825 update_rq_clock(busiest_rq);
1e3c88bd 8826
e5673f28 8827 p = detach_one_task(&env);
d02c0711 8828 if (p) {
ae92882e 8829 schedstat_inc(sd->alb_pushed);
d02c0711
SD
8830 /* Active balancing done, reset the failure counter. */
8831 sd->nr_balance_failed = 0;
8832 } else {
ae92882e 8833 schedstat_inc(sd->alb_failed);
d02c0711 8834 }
1e3c88bd 8835 }
dce840a0 8836 rcu_read_unlock();
969c7921
TH
8837out_unlock:
8838 busiest_rq->active_balance = 0;
8a8c69c3 8839 rq_unlock(busiest_rq, &rf);
e5673f28
KT
8840
8841 if (p)
8842 attach_one_task(target_rq, p);
8843
8844 local_irq_enable();
8845
969c7921 8846 return 0;
1e3c88bd
PZ
8847}
8848
af3fe03c
PZ
8849static DEFINE_SPINLOCK(balancing);
8850
8851/*
8852 * Scale the max load_balance interval with the number of CPUs in the system.
8853 * This trades load-balance latency on larger machines for less cross talk.
8854 */
8855void update_max_interval(void)
8856{
8857 max_load_balance_interval = HZ*num_online_cpus()/10;
8858}
8859
8860/*
8861 * It checks each scheduling domain to see if it is due to be balanced,
8862 * and initiates a balancing operation if so.
8863 *
8864 * Balancing parameters are set up in init_sched_domains.
8865 */
8866static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8867{
8868 int continue_balancing = 1;
8869 int cpu = rq->cpu;
8870 unsigned long interval;
8871 struct sched_domain *sd;
8872 /* Earliest time when we have to do rebalance again */
8873 unsigned long next_balance = jiffies + 60*HZ;
8874 int update_next_balance = 0;
8875 int need_serialize, need_decay = 0;
8876 u64 max_cost = 0;
8877
8878 rcu_read_lock();
8879 for_each_domain(cpu, sd) {
8880 /*
8881 * Decay the newidle max times here because this is a regular
8882 * visit to all the domains. Decay ~1% per second.
8883 */
8884 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8885 sd->max_newidle_lb_cost =
8886 (sd->max_newidle_lb_cost * 253) / 256;
8887 sd->next_decay_max_lb_cost = jiffies + HZ;
8888 need_decay = 1;
8889 }
8890 max_cost += sd->max_newidle_lb_cost;
8891
8892 if (!(sd->flags & SD_LOAD_BALANCE))
8893 continue;
8894
8895 /*
8896 * Stop the load balance at this level. There is another
8897 * CPU in our sched group which is doing load balancing more
8898 * actively.
8899 */
8900 if (!continue_balancing) {
8901 if (need_decay)
8902 continue;
8903 break;
8904 }
8905
8906 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8907
8908 need_serialize = sd->flags & SD_SERIALIZE;
8909 if (need_serialize) {
8910 if (!spin_trylock(&balancing))
8911 goto out;
8912 }
8913
8914 if (time_after_eq(jiffies, sd->last_balance + interval)) {
8915 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
8916 /*
8917 * The LBF_DST_PINNED logic could have changed
8918 * env->dst_cpu, so we can't know our idle
8919 * state even if we migrated tasks. Update it.
8920 */
8921 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
8922 }
8923 sd->last_balance = jiffies;
8924 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8925 }
8926 if (need_serialize)
8927 spin_unlock(&balancing);
8928out:
8929 if (time_after(next_balance, sd->last_balance + interval)) {
8930 next_balance = sd->last_balance + interval;
8931 update_next_balance = 1;
8932 }
8933 }
8934 if (need_decay) {
8935 /*
8936 * Ensure the rq-wide value also decays but keep it at a
8937 * reasonable floor to avoid funnies with rq->avg_idle.
8938 */
8939 rq->max_idle_balance_cost =
8940 max((u64)sysctl_sched_migration_cost, max_cost);
8941 }
8942 rcu_read_unlock();
8943
8944 /*
8945 * next_balance will be updated only when there is a need.
8946 * When the cpu is attached to null domain for ex, it will not be
8947 * updated.
8948 */
8949 if (likely(update_next_balance)) {
8950 rq->next_balance = next_balance;
8951
8952#ifdef CONFIG_NO_HZ_COMMON
8953 /*
8954 * If this CPU has been elected to perform the nohz idle
8955 * balance. Other idle CPUs have already rebalanced with
8956 * nohz_idle_balance() and nohz.next_balance has been
8957 * updated accordingly. This CPU is now running the idle load
8958 * balance for itself and we need to update the
8959 * nohz.next_balance accordingly.
8960 */
8961 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8962 nohz.next_balance = rq->next_balance;
8963#endif
8964 }
8965}
8966
d987fc7f
MG
8967static inline int on_null_domain(struct rq *rq)
8968{
8969 return unlikely(!rcu_dereference_sched(rq->sd));
8970}
8971
3451d024 8972#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
8973/*
8974 * idle load balancing details
83cd4fe2
VP
8975 * - When one of the busy CPUs notice that there may be an idle rebalancing
8976 * needed, they will kick the idle load balancer, which then does idle
8977 * load balancing for all the idle CPUs.
8978 */
1e3c88bd 8979
3dd0337d 8980static inline int find_new_ilb(void)
1e3c88bd 8981{
0b005cf5 8982 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 8983
786d6dc7
SS
8984 if (ilb < nr_cpu_ids && idle_cpu(ilb))
8985 return ilb;
8986
8987 return nr_cpu_ids;
1e3c88bd 8988}
1e3c88bd 8989
83cd4fe2
VP
8990/*
8991 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8992 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8993 * CPU (if there is one).
8994 */
a4064fb6 8995static void kick_ilb(unsigned int flags)
83cd4fe2
VP
8996{
8997 int ilb_cpu;
8998
8999 nohz.next_balance++;
9000
3dd0337d 9001 ilb_cpu = find_new_ilb();
83cd4fe2 9002
0b005cf5
SS
9003 if (ilb_cpu >= nr_cpu_ids)
9004 return;
83cd4fe2 9005
a4064fb6 9006 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
b7031a02 9007 if (flags & NOHZ_KICK_MASK)
1c792db7 9008 return;
4550487a 9009
1c792db7
SS
9010 /*
9011 * Use smp_send_reschedule() instead of resched_cpu().
97fb7a0a 9012 * This way we generate a sched IPI on the target CPU which
1c792db7
SS
9013 * is idle. And the softirq performing nohz idle load balance
9014 * will be run before returning from the IPI.
9015 */
9016 smp_send_reschedule(ilb_cpu);
4550487a
PZ
9017}
9018
9019/*
9020 * Current heuristic for kicking the idle load balancer in the presence
9021 * of an idle cpu in the system.
9022 * - This rq has more than one task.
9023 * - This rq has at least one CFS task and the capacity of the CPU is
9024 * significantly reduced because of RT tasks or IRQs.
9025 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
9026 * multiple busy cpu.
9027 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
9028 * domain span are idle.
9029 */
9030static void nohz_balancer_kick(struct rq *rq)
9031{
9032 unsigned long now = jiffies;
9033 struct sched_domain_shared *sds;
9034 struct sched_domain *sd;
9035 int nr_busy, i, cpu = rq->cpu;
a4064fb6 9036 unsigned int flags = 0;
4550487a
PZ
9037
9038 if (unlikely(rq->idle_balance))
9039 return;
9040
9041 /*
9042 * We may be recently in ticked or tickless idle mode. At the first
9043 * busy tick after returning from idle, we will update the busy stats.
9044 */
00357f5e 9045 nohz_balance_exit_idle(rq);
4550487a
PZ
9046
9047 /*
9048 * None are in tickless mode and hence no need for NOHZ idle load
9049 * balancing.
9050 */
9051 if (likely(!atomic_read(&nohz.nr_cpus)))
9052 return;
9053
f643ea22
VG
9054 if (READ_ONCE(nohz.has_blocked) &&
9055 time_after(now, READ_ONCE(nohz.next_blocked)))
a4064fb6
PZ
9056 flags = NOHZ_STATS_KICK;
9057
4550487a 9058 if (time_before(now, nohz.next_balance))
a4064fb6 9059 goto out;
4550487a
PZ
9060
9061 if (rq->nr_running >= 2) {
a4064fb6 9062 flags = NOHZ_KICK_MASK;
4550487a
PZ
9063 goto out;
9064 }
9065
9066 rcu_read_lock();
9067 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9068 if (sds) {
9069 /*
9070 * XXX: write a coherent comment on why we do this.
9071 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
9072 */
9073 nr_busy = atomic_read(&sds->nr_busy_cpus);
9074 if (nr_busy > 1) {
a4064fb6 9075 flags = NOHZ_KICK_MASK;
4550487a
PZ
9076 goto unlock;
9077 }
9078
9079 }
9080
9081 sd = rcu_dereference(rq->sd);
9082 if (sd) {
9083 if ((rq->cfs.h_nr_running >= 1) &&
9084 check_cpu_capacity(rq, sd)) {
a4064fb6 9085 flags = NOHZ_KICK_MASK;
4550487a
PZ
9086 goto unlock;
9087 }
9088 }
9089
9090 sd = rcu_dereference(per_cpu(sd_asym, cpu));
9091 if (sd) {
9092 for_each_cpu(i, sched_domain_span(sd)) {
9093 if (i == cpu ||
9094 !cpumask_test_cpu(i, nohz.idle_cpus_mask))
9095 continue;
9096
9097 if (sched_asym_prefer(i, cpu)) {
a4064fb6 9098 flags = NOHZ_KICK_MASK;
4550487a
PZ
9099 goto unlock;
9100 }
9101 }
9102 }
9103unlock:
9104 rcu_read_unlock();
9105out:
a4064fb6
PZ
9106 if (flags)
9107 kick_ilb(flags);
83cd4fe2
VP
9108}
9109
00357f5e 9110static void set_cpu_sd_state_busy(int cpu)
71325960 9111{
00357f5e 9112 struct sched_domain *sd;
a22e47a4 9113
00357f5e
PZ
9114 rcu_read_lock();
9115 sd = rcu_dereference(per_cpu(sd_llc, cpu));
a22e47a4 9116
00357f5e
PZ
9117 if (!sd || !sd->nohz_idle)
9118 goto unlock;
9119 sd->nohz_idle = 0;
9120
9121 atomic_inc(&sd->shared->nr_busy_cpus);
9122unlock:
9123 rcu_read_unlock();
71325960
SS
9124}
9125
00357f5e
PZ
9126void nohz_balance_exit_idle(struct rq *rq)
9127{
9128 SCHED_WARN_ON(rq != this_rq());
9129
9130 if (likely(!rq->nohz_tick_stopped))
9131 return;
9132
9133 rq->nohz_tick_stopped = 0;
9134 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
9135 atomic_dec(&nohz.nr_cpus);
9136
9137 set_cpu_sd_state_busy(rq->cpu);
9138}
9139
9140static void set_cpu_sd_state_idle(int cpu)
69e1e811
SS
9141{
9142 struct sched_domain *sd;
69e1e811 9143
69e1e811 9144 rcu_read_lock();
0e369d75 9145 sd = rcu_dereference(per_cpu(sd_llc, cpu));
25f55d9d
VG
9146
9147 if (!sd || sd->nohz_idle)
9148 goto unlock;
9149 sd->nohz_idle = 1;
9150
0e369d75 9151 atomic_dec(&sd->shared->nr_busy_cpus);
25f55d9d 9152unlock:
69e1e811
SS
9153 rcu_read_unlock();
9154}
9155
1e3c88bd 9156/*
97fb7a0a 9157 * This routine will record that the CPU is going idle with tick stopped.
0b005cf5 9158 * This info will be used in performing idle load balancing in the future.
1e3c88bd 9159 */
c1cc017c 9160void nohz_balance_enter_idle(int cpu)
1e3c88bd 9161{
00357f5e
PZ
9162 struct rq *rq = cpu_rq(cpu);
9163
9164 SCHED_WARN_ON(cpu != smp_processor_id());
9165
97fb7a0a 9166 /* If this CPU is going down, then nothing needs to be done: */
71325960
SS
9167 if (!cpu_active(cpu))
9168 return;
9169
387bc8b5 9170 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
de201559 9171 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
387bc8b5
FW
9172 return;
9173
f643ea22
VG
9174 /*
9175 * Can be set safely without rq->lock held
9176 * If a clear happens, it will have evaluated last additions because
9177 * rq->lock is held during the check and the clear
9178 */
9179 rq->has_blocked_load = 1;
9180
9181 /*
9182 * The tick is still stopped but load could have been added in the
9183 * meantime. We set the nohz.has_blocked flag to trig a check of the
9184 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
9185 * of nohz.has_blocked can only happen after checking the new load
9186 */
00357f5e 9187 if (rq->nohz_tick_stopped)
f643ea22 9188 goto out;
1e3c88bd 9189
97fb7a0a 9190 /* If we're a completely isolated CPU, we don't play: */
00357f5e 9191 if (on_null_domain(rq))
d987fc7f
MG
9192 return;
9193
00357f5e
PZ
9194 rq->nohz_tick_stopped = 1;
9195
c1cc017c
AS
9196 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
9197 atomic_inc(&nohz.nr_cpus);
00357f5e 9198
f643ea22
VG
9199 /*
9200 * Ensures that if nohz_idle_balance() fails to observe our
9201 * @idle_cpus_mask store, it must observe the @has_blocked
9202 * store.
9203 */
9204 smp_mb__after_atomic();
9205
00357f5e 9206 set_cpu_sd_state_idle(cpu);
f643ea22
VG
9207
9208out:
9209 /*
9210 * Each time a cpu enter idle, we assume that it has blocked load and
9211 * enable the periodic update of the load of idle cpus
9212 */
9213 WRITE_ONCE(nohz.has_blocked, 1);
1e3c88bd 9214}
1e3c88bd 9215
1e3c88bd 9216/*
31e77c93
VG
9217 * Internal function that runs load balance for all idle cpus. The load balance
9218 * can be a simple update of blocked load or a complete load balance with
9219 * tasks movement depending of flags.
9220 * The function returns false if the loop has stopped before running
9221 * through all idle CPUs.
1e3c88bd 9222 */
31e77c93
VG
9223static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
9224 enum cpu_idle_type idle)
83cd4fe2 9225{
c5afb6a8 9226 /* Earliest time when we have to do rebalance again */
a4064fb6
PZ
9227 unsigned long now = jiffies;
9228 unsigned long next_balance = now + 60*HZ;
f643ea22 9229 bool has_blocked_load = false;
c5afb6a8 9230 int update_next_balance = 0;
b7031a02 9231 int this_cpu = this_rq->cpu;
b7031a02 9232 int balance_cpu;
31e77c93 9233 int ret = false;
b7031a02 9234 struct rq *rq;
83cd4fe2 9235
b7031a02 9236 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
83cd4fe2 9237
f643ea22
VG
9238 /*
9239 * We assume there will be no idle load after this update and clear
9240 * the has_blocked flag. If a cpu enters idle in the mean time, it will
9241 * set the has_blocked flag and trig another update of idle load.
9242 * Because a cpu that becomes idle, is added to idle_cpus_mask before
9243 * setting the flag, we are sure to not clear the state and not
9244 * check the load of an idle cpu.
9245 */
9246 WRITE_ONCE(nohz.has_blocked, 0);
9247
9248 /*
9249 * Ensures that if we miss the CPU, we must see the has_blocked
9250 * store from nohz_balance_enter_idle().
9251 */
9252 smp_mb();
9253
83cd4fe2 9254 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 9255 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
9256 continue;
9257
9258 /*
97fb7a0a
IM
9259 * If this CPU gets work to do, stop the load balancing
9260 * work being done for other CPUs. Next load
83cd4fe2
VP
9261 * balancing owner will pick it up.
9262 */
f643ea22
VG
9263 if (need_resched()) {
9264 has_blocked_load = true;
9265 goto abort;
9266 }
83cd4fe2 9267
5ed4f1d9
VG
9268 rq = cpu_rq(balance_cpu);
9269
63928384 9270 has_blocked_load |= update_nohz_stats(rq, true);
f643ea22 9271
ed61bbc6
TC
9272 /*
9273 * If time for next balance is due,
9274 * do the balance.
9275 */
9276 if (time_after_eq(jiffies, rq->next_balance)) {
8a8c69c3
PZ
9277 struct rq_flags rf;
9278
31e77c93 9279 rq_lock_irqsave(rq, &rf);
ed61bbc6 9280 update_rq_clock(rq);
cee1afce 9281 cpu_load_update_idle(rq);
31e77c93 9282 rq_unlock_irqrestore(rq, &rf);
8a8c69c3 9283
b7031a02
PZ
9284 if (flags & NOHZ_BALANCE_KICK)
9285 rebalance_domains(rq, CPU_IDLE);
ed61bbc6 9286 }
83cd4fe2 9287
c5afb6a8
VG
9288 if (time_after(next_balance, rq->next_balance)) {
9289 next_balance = rq->next_balance;
9290 update_next_balance = 1;
9291 }
83cd4fe2 9292 }
c5afb6a8 9293
31e77c93
VG
9294 /* Newly idle CPU doesn't need an update */
9295 if (idle != CPU_NEWLY_IDLE) {
9296 update_blocked_averages(this_cpu);
9297 has_blocked_load |= this_rq->has_blocked_load;
9298 }
9299
b7031a02
PZ
9300 if (flags & NOHZ_BALANCE_KICK)
9301 rebalance_domains(this_rq, CPU_IDLE);
9302
f643ea22
VG
9303 WRITE_ONCE(nohz.next_blocked,
9304 now + msecs_to_jiffies(LOAD_AVG_PERIOD));
9305
31e77c93
VG
9306 /* The full idle balance loop has been done */
9307 ret = true;
9308
f643ea22
VG
9309abort:
9310 /* There is still blocked load, enable periodic update */
9311 if (has_blocked_load)
9312 WRITE_ONCE(nohz.has_blocked, 1);
a4064fb6 9313
c5afb6a8
VG
9314 /*
9315 * next_balance will be updated only when there is a need.
9316 * When the CPU is attached to null domain for ex, it will not be
9317 * updated.
9318 */
9319 if (likely(update_next_balance))
9320 nohz.next_balance = next_balance;
b7031a02 9321
31e77c93
VG
9322 return ret;
9323}
9324
9325/*
9326 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
9327 * rebalancing for all the cpus for whom scheduler ticks are stopped.
9328 */
9329static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9330{
9331 int this_cpu = this_rq->cpu;
9332 unsigned int flags;
9333
9334 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9335 return false;
9336
9337 if (idle != CPU_IDLE) {
9338 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9339 return false;
9340 }
9341
9342 /*
9343 * barrier, pairs with nohz_balance_enter_idle(), ensures ...
9344 */
9345 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9346 if (!(flags & NOHZ_KICK_MASK))
9347 return false;
9348
9349 _nohz_idle_balance(this_rq, flags, idle);
9350
b7031a02 9351 return true;
83cd4fe2 9352}
31e77c93
VG
9353
9354static void nohz_newidle_balance(struct rq *this_rq)
9355{
9356 int this_cpu = this_rq->cpu;
9357
9358 /*
9359 * This CPU doesn't want to be disturbed by scheduler
9360 * housekeeping
9361 */
9362 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
9363 return;
9364
9365 /* Will wake up very soon. No time for doing anything else*/
9366 if (this_rq->avg_idle < sysctl_sched_migration_cost)
9367 return;
9368
9369 /* Don't need to update blocked load of idle CPUs*/
9370 if (!READ_ONCE(nohz.has_blocked) ||
9371 time_before(jiffies, READ_ONCE(nohz.next_blocked)))
9372 return;
9373
9374 raw_spin_unlock(&this_rq->lock);
9375 /*
9376 * This CPU is going to be idle and blocked load of idle CPUs
9377 * need to be updated. Run the ilb locally as it is a good
9378 * candidate for ilb instead of waking up another idle CPU.
9379 * Kick an normal ilb if we failed to do the update.
9380 */
9381 if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
9382 kick_ilb(NOHZ_STATS_KICK);
9383 raw_spin_lock(&this_rq->lock);
9384}
9385
dd707247
PZ
9386#else /* !CONFIG_NO_HZ_COMMON */
9387static inline void nohz_balancer_kick(struct rq *rq) { }
9388
31e77c93 9389static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
b7031a02
PZ
9390{
9391 return false;
9392}
31e77c93
VG
9393
9394static inline void nohz_newidle_balance(struct rq *this_rq) { }
dd707247 9395#endif /* CONFIG_NO_HZ_COMMON */
83cd4fe2 9396
47ea5412
PZ
9397/*
9398 * idle_balance is called by schedule() if this_cpu is about to become
9399 * idle. Attempts to pull tasks from other CPUs.
9400 */
9401static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
9402{
9403 unsigned long next_balance = jiffies + HZ;
9404 int this_cpu = this_rq->cpu;
9405 struct sched_domain *sd;
9406 int pulled_task = 0;
9407 u64 curr_cost = 0;
9408
9409 /*
9410 * We must set idle_stamp _before_ calling idle_balance(), such that we
9411 * measure the duration of idle_balance() as idle time.
9412 */
9413 this_rq->idle_stamp = rq_clock(this_rq);
9414
9415 /*
9416 * Do not pull tasks towards !active CPUs...
9417 */
9418 if (!cpu_active(this_cpu))
9419 return 0;
9420
9421 /*
9422 * This is OK, because current is on_cpu, which avoids it being picked
9423 * for load-balance and preemption/IRQs are still disabled avoiding
9424 * further scheduler activity on it and we're being very careful to
9425 * re-start the picking loop.
9426 */
9427 rq_unpin_lock(this_rq, rf);
9428
9429 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
9430 !this_rq->rd->overload) {
31e77c93 9431
47ea5412
PZ
9432 rcu_read_lock();
9433 sd = rcu_dereference_check_sched_domain(this_rq->sd);
9434 if (sd)
9435 update_next_balance(sd, &next_balance);
9436 rcu_read_unlock();
9437
31e77c93
VG
9438 nohz_newidle_balance(this_rq);
9439
47ea5412
PZ
9440 goto out;
9441 }
9442
9443 raw_spin_unlock(&this_rq->lock);
9444
9445 update_blocked_averages(this_cpu);
9446 rcu_read_lock();
9447 for_each_domain(this_cpu, sd) {
9448 int continue_balancing = 1;
9449 u64 t0, domain_cost;
9450
9451 if (!(sd->flags & SD_LOAD_BALANCE))
9452 continue;
9453
9454 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
9455 update_next_balance(sd, &next_balance);
9456 break;
9457 }
9458
9459 if (sd->flags & SD_BALANCE_NEWIDLE) {
9460 t0 = sched_clock_cpu(this_cpu);
9461
9462 pulled_task = load_balance(this_cpu, this_rq,
9463 sd, CPU_NEWLY_IDLE,
9464 &continue_balancing);
9465
9466 domain_cost = sched_clock_cpu(this_cpu) - t0;
9467 if (domain_cost > sd->max_newidle_lb_cost)
9468 sd->max_newidle_lb_cost = domain_cost;
9469
9470 curr_cost += domain_cost;
9471 }
9472
9473 update_next_balance(sd, &next_balance);
9474
9475 /*
9476 * Stop searching for tasks to pull if there are
9477 * now runnable tasks on this rq.
9478 */
9479 if (pulled_task || this_rq->nr_running > 0)
9480 break;
9481 }
9482 rcu_read_unlock();
9483
9484 raw_spin_lock(&this_rq->lock);
9485
9486 if (curr_cost > this_rq->max_idle_balance_cost)
9487 this_rq->max_idle_balance_cost = curr_cost;
9488
457be908 9489out:
47ea5412
PZ
9490 /*
9491 * While browsing the domains, we released the rq lock, a task could
9492 * have been enqueued in the meantime. Since we're not going idle,
9493 * pretend we pulled a task.
9494 */
9495 if (this_rq->cfs.h_nr_running && !pulled_task)
9496 pulled_task = 1;
9497
47ea5412
PZ
9498 /* Move the next balance forward */
9499 if (time_after(this_rq->next_balance, next_balance))
9500 this_rq->next_balance = next_balance;
9501
9502 /* Is there a task of a high priority class? */
9503 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
9504 pulled_task = -1;
9505
9506 if (pulled_task)
9507 this_rq->idle_stamp = 0;
9508
9509 rq_repin_lock(this_rq, rf);
9510
9511 return pulled_task;
9512}
9513
83cd4fe2
VP
9514/*
9515 * run_rebalance_domains is triggered when needed from the scheduler tick.
9516 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
9517 */
0766f788 9518static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
1e3c88bd 9519{
208cb16b 9520 struct rq *this_rq = this_rq();
6eb57e0d 9521 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
9522 CPU_IDLE : CPU_NOT_IDLE;
9523
1e3c88bd 9524 /*
97fb7a0a
IM
9525 * If this CPU has a pending nohz_balance_kick, then do the
9526 * balancing on behalf of the other idle CPUs whose ticks are
d4573c3e 9527 * stopped. Do nohz_idle_balance *before* rebalance_domains to
97fb7a0a 9528 * give the idle CPUs a chance to load balance. Else we may
d4573c3e
PM
9529 * load balance only within the local sched_domain hierarchy
9530 * and abort nohz_idle_balance altogether if we pull some load.
1e3c88bd 9531 */
b7031a02
PZ
9532 if (nohz_idle_balance(this_rq, idle))
9533 return;
9534
9535 /* normal load balance */
9536 update_blocked_averages(this_rq->cpu);
d4573c3e 9537 rebalance_domains(this_rq, idle);
1e3c88bd
PZ
9538}
9539
1e3c88bd
PZ
9540/*
9541 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 9542 */
7caff66f 9543void trigger_load_balance(struct rq *rq)
1e3c88bd 9544{
1e3c88bd 9545 /* Don't need to rebalance while attached to NULL domain */
c726099e
DL
9546 if (unlikely(on_null_domain(rq)))
9547 return;
9548
9549 if (time_after_eq(jiffies, rq->next_balance))
1e3c88bd 9550 raise_softirq(SCHED_SOFTIRQ);
4550487a
PZ
9551
9552 nohz_balancer_kick(rq);
1e3c88bd
PZ
9553}
9554
0bcdcf28
CE
9555static void rq_online_fair(struct rq *rq)
9556{
9557 update_sysctl();
0e59bdae
KT
9558
9559 update_runtime_enabled(rq);
0bcdcf28
CE
9560}
9561
9562static void rq_offline_fair(struct rq *rq)
9563{
9564 update_sysctl();
a4c96ae3
PB
9565
9566 /* Ensure any throttled groups are reachable by pick_next_task */
9567 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
9568}
9569
55e12e5e 9570#endif /* CONFIG_SMP */
e1d1484f 9571
bf0f6f24 9572/*
d84b3131
FW
9573 * scheduler tick hitting a task of our scheduling class.
9574 *
9575 * NOTE: This function can be called remotely by the tick offload that
9576 * goes along full dynticks. Therefore no local assumption can be made
9577 * and everything must be accessed through the @rq and @curr passed in
9578 * parameters.
bf0f6f24 9579 */
8f4d37ec 9580static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
9581{
9582 struct cfs_rq *cfs_rq;
9583 struct sched_entity *se = &curr->se;
9584
9585 for_each_sched_entity(se) {
9586 cfs_rq = cfs_rq_of(se);
8f4d37ec 9587 entity_tick(cfs_rq, se, queued);
bf0f6f24 9588 }
18bf2805 9589
b52da86e 9590 if (static_branch_unlikely(&sched_numa_balancing))
cbee9f88 9591 task_tick_numa(rq, curr);
bf0f6f24
IM
9592}
9593
9594/*
cd29fe6f
PZ
9595 * called on fork with the child task as argument from the parent's context
9596 * - child not yet on the tasklist
9597 * - preemption disabled
bf0f6f24 9598 */
cd29fe6f 9599static void task_fork_fair(struct task_struct *p)
bf0f6f24 9600{
4fc420c9
DN
9601 struct cfs_rq *cfs_rq;
9602 struct sched_entity *se = &p->se, *curr;
cd29fe6f 9603 struct rq *rq = this_rq();
8a8c69c3 9604 struct rq_flags rf;
bf0f6f24 9605
8a8c69c3 9606 rq_lock(rq, &rf);
861d034e
PZ
9607 update_rq_clock(rq);
9608
4fc420c9
DN
9609 cfs_rq = task_cfs_rq(current);
9610 curr = cfs_rq->curr;
e210bffd
PZ
9611 if (curr) {
9612 update_curr(cfs_rq);
b5d9d734 9613 se->vruntime = curr->vruntime;
e210bffd 9614 }
aeb73b04 9615 place_entity(cfs_rq, se, 1);
4d78e7b6 9616
cd29fe6f 9617 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 9618 /*
edcb60a3
IM
9619 * Upon rescheduling, sched_class::put_prev_task() will place
9620 * 'current' within the tree based on its new key value.
9621 */
4d78e7b6 9622 swap(curr->vruntime, se->vruntime);
8875125e 9623 resched_curr(rq);
4d78e7b6 9624 }
bf0f6f24 9625
88ec22d3 9626 se->vruntime -= cfs_rq->min_vruntime;
8a8c69c3 9627 rq_unlock(rq, &rf);
bf0f6f24
IM
9628}
9629
cb469845
SR
9630/*
9631 * Priority of the task has changed. Check to see if we preempt
9632 * the current task.
9633 */
da7a735e
PZ
9634static void
9635prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 9636{
da0c1e65 9637 if (!task_on_rq_queued(p))
da7a735e
PZ
9638 return;
9639
cb469845
SR
9640 /*
9641 * Reschedule if we are currently running on this runqueue and
9642 * our priority decreased, or if we are not currently running on
9643 * this runqueue and our priority is higher than the current's
9644 */
da7a735e 9645 if (rq->curr == p) {
cb469845 9646 if (p->prio > oldprio)
8875125e 9647 resched_curr(rq);
cb469845 9648 } else
15afe09b 9649 check_preempt_curr(rq, p, 0);
cb469845
SR
9650}
9651
daa59407 9652static inline bool vruntime_normalized(struct task_struct *p)
da7a735e
PZ
9653{
9654 struct sched_entity *se = &p->se;
da7a735e
PZ
9655
9656 /*
daa59407
BP
9657 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
9658 * the dequeue_entity(.flags=0) will already have normalized the
9659 * vruntime.
9660 */
9661 if (p->on_rq)
9662 return true;
9663
9664 /*
9665 * When !on_rq, vruntime of the task has usually NOT been normalized.
9666 * But there are some cases where it has already been normalized:
da7a735e 9667 *
daa59407
BP
9668 * - A forked child which is waiting for being woken up by
9669 * wake_up_new_task().
9670 * - A task which has been woken up by try_to_wake_up() and
9671 * waiting for actually being woken up by sched_ttwu_pending().
da7a735e 9672 */
daa59407
BP
9673 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
9674 return true;
9675
9676 return false;
9677}
9678
09a43ace
VG
9679#ifdef CONFIG_FAIR_GROUP_SCHED
9680/*
9681 * Propagate the changes of the sched_entity across the tg tree to make it
9682 * visible to the root
9683 */
9684static void propagate_entity_cfs_rq(struct sched_entity *se)
9685{
9686 struct cfs_rq *cfs_rq;
9687
9688 /* Start to propagate at parent */
9689 se = se->parent;
9690
9691 for_each_sched_entity(se) {
9692 cfs_rq = cfs_rq_of(se);
9693
9694 if (cfs_rq_throttled(cfs_rq))
9695 break;
9696
88c0616e 9697 update_load_avg(cfs_rq, se, UPDATE_TG);
09a43ace
VG
9698 }
9699}
9700#else
9701static void propagate_entity_cfs_rq(struct sched_entity *se) { }
9702#endif
9703
df217913 9704static void detach_entity_cfs_rq(struct sched_entity *se)
daa59407 9705{
daa59407
BP
9706 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9707
9d89c257 9708 /* Catch up with the cfs_rq and remove our load when we leave */
88c0616e 9709 update_load_avg(cfs_rq, se, 0);
a05e8c51 9710 detach_entity_load_avg(cfs_rq, se);
7c3edd2c 9711 update_tg_load_avg(cfs_rq, false);
09a43ace 9712 propagate_entity_cfs_rq(se);
da7a735e
PZ
9713}
9714
df217913 9715static void attach_entity_cfs_rq(struct sched_entity *se)
cb469845 9716{
daa59407 9717 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7855a35a
BP
9718
9719#ifdef CONFIG_FAIR_GROUP_SCHED
eb7a59b2
M
9720 /*
9721 * Since the real-depth could have been changed (only FAIR
9722 * class maintain depth value), reset depth properly.
9723 */
9724 se->depth = se->parent ? se->parent->depth + 1 : 0;
9725#endif
7855a35a 9726
df217913 9727 /* Synchronize entity with its cfs_rq */
88c0616e 9728 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
ea14b57e 9729 attach_entity_load_avg(cfs_rq, se, 0);
7c3edd2c 9730 update_tg_load_avg(cfs_rq, false);
09a43ace 9731 propagate_entity_cfs_rq(se);
df217913
VG
9732}
9733
9734static void detach_task_cfs_rq(struct task_struct *p)
9735{
9736 struct sched_entity *se = &p->se;
9737 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9738
9739 if (!vruntime_normalized(p)) {
9740 /*
9741 * Fix up our vruntime so that the current sleep doesn't
9742 * cause 'unlimited' sleep bonus.
9743 */
9744 place_entity(cfs_rq, se, 0);
9745 se->vruntime -= cfs_rq->min_vruntime;
9746 }
9747
9748 detach_entity_cfs_rq(se);
9749}
9750
9751static void attach_task_cfs_rq(struct task_struct *p)
9752{
9753 struct sched_entity *se = &p->se;
9754 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9755
9756 attach_entity_cfs_rq(se);
daa59407
BP
9757
9758 if (!vruntime_normalized(p))
9759 se->vruntime += cfs_rq->min_vruntime;
9760}
6efdb105 9761
daa59407
BP
9762static void switched_from_fair(struct rq *rq, struct task_struct *p)
9763{
9764 detach_task_cfs_rq(p);
9765}
9766
9767static void switched_to_fair(struct rq *rq, struct task_struct *p)
9768{
9769 attach_task_cfs_rq(p);
7855a35a 9770
daa59407 9771 if (task_on_rq_queued(p)) {
7855a35a 9772 /*
daa59407
BP
9773 * We were most likely switched from sched_rt, so
9774 * kick off the schedule if running, otherwise just see
9775 * if we can still preempt the current task.
7855a35a 9776 */
daa59407
BP
9777 if (rq->curr == p)
9778 resched_curr(rq);
9779 else
9780 check_preempt_curr(rq, p, 0);
7855a35a 9781 }
cb469845
SR
9782}
9783
83b699ed
SV
9784/* Account for a task changing its policy or group.
9785 *
9786 * This routine is mostly called to set cfs_rq->curr field when a task
9787 * migrates between groups/classes.
9788 */
9789static void set_curr_task_fair(struct rq *rq)
9790{
9791 struct sched_entity *se = &rq->curr->se;
9792
ec12cb7f
PT
9793 for_each_sched_entity(se) {
9794 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9795
9796 set_next_entity(cfs_rq, se);
9797 /* ensure bandwidth has been allocated on our new cfs_rq */
9798 account_cfs_rq_runtime(cfs_rq, 0);
9799 }
83b699ed
SV
9800}
9801
029632fb
PZ
9802void init_cfs_rq(struct cfs_rq *cfs_rq)
9803{
bfb06889 9804 cfs_rq->tasks_timeline = RB_ROOT_CACHED;
029632fb
PZ
9805 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
9806#ifndef CONFIG_64BIT
9807 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
9808#endif
141965c7 9809#ifdef CONFIG_SMP
2a2f5d4e 9810 raw_spin_lock_init(&cfs_rq->removed.lock);
9ee474f5 9811#endif
029632fb
PZ
9812}
9813
810b3817 9814#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
9815static void task_set_group_fair(struct task_struct *p)
9816{
9817 struct sched_entity *se = &p->se;
9818
9819 set_task_rq(p, task_cpu(p));
9820 se->depth = se->parent ? se->parent->depth + 1 : 0;
9821}
9822
bc54da21 9823static void task_move_group_fair(struct task_struct *p)
810b3817 9824{
daa59407 9825 detach_task_cfs_rq(p);
b2b5ce02 9826 set_task_rq(p, task_cpu(p));
6efdb105
BP
9827
9828#ifdef CONFIG_SMP
9829 /* Tell se's cfs_rq has been changed -- migrated */
9830 p->se.avg.last_update_time = 0;
9831#endif
daa59407 9832 attach_task_cfs_rq(p);
810b3817 9833}
029632fb 9834
ea86cb4b
VG
9835static void task_change_group_fair(struct task_struct *p, int type)
9836{
9837 switch (type) {
9838 case TASK_SET_GROUP:
9839 task_set_group_fair(p);
9840 break;
9841
9842 case TASK_MOVE_GROUP:
9843 task_move_group_fair(p);
9844 break;
9845 }
9846}
9847
029632fb
PZ
9848void free_fair_sched_group(struct task_group *tg)
9849{
9850 int i;
9851
9852 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
9853
9854 for_each_possible_cpu(i) {
9855 if (tg->cfs_rq)
9856 kfree(tg->cfs_rq[i]);
6fe1f348 9857 if (tg->se)
029632fb
PZ
9858 kfree(tg->se[i]);
9859 }
9860
9861 kfree(tg->cfs_rq);
9862 kfree(tg->se);
9863}
9864
9865int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9866{
029632fb 9867 struct sched_entity *se;
b7fa30c9 9868 struct cfs_rq *cfs_rq;
029632fb
PZ
9869 int i;
9870
6396bb22 9871 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
029632fb
PZ
9872 if (!tg->cfs_rq)
9873 goto err;
6396bb22 9874 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
029632fb
PZ
9875 if (!tg->se)
9876 goto err;
9877
9878 tg->shares = NICE_0_LOAD;
9879
9880 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
9881
9882 for_each_possible_cpu(i) {
9883 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9884 GFP_KERNEL, cpu_to_node(i));
9885 if (!cfs_rq)
9886 goto err;
9887
9888 se = kzalloc_node(sizeof(struct sched_entity),
9889 GFP_KERNEL, cpu_to_node(i));
9890 if (!se)
9891 goto err_free_rq;
9892
9893 init_cfs_rq(cfs_rq);
9894 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
540247fb 9895 init_entity_runnable_average(se);
029632fb
PZ
9896 }
9897
9898 return 1;
9899
9900err_free_rq:
9901 kfree(cfs_rq);
9902err:
9903 return 0;
9904}
9905
8663e24d
PZ
9906void online_fair_sched_group(struct task_group *tg)
9907{
9908 struct sched_entity *se;
9909 struct rq *rq;
9910 int i;
9911
9912 for_each_possible_cpu(i) {
9913 rq = cpu_rq(i);
9914 se = tg->se[i];
9915
9916 raw_spin_lock_irq(&rq->lock);
4126bad6 9917 update_rq_clock(rq);
d0326691 9918 attach_entity_cfs_rq(se);
55e16d30 9919 sync_throttle(tg, i);
8663e24d
PZ
9920 raw_spin_unlock_irq(&rq->lock);
9921 }
9922}
9923
6fe1f348 9924void unregister_fair_sched_group(struct task_group *tg)
029632fb 9925{
029632fb 9926 unsigned long flags;
6fe1f348
PZ
9927 struct rq *rq;
9928 int cpu;
029632fb 9929
6fe1f348
PZ
9930 for_each_possible_cpu(cpu) {
9931 if (tg->se[cpu])
9932 remove_entity_load_avg(tg->se[cpu]);
029632fb 9933
6fe1f348
PZ
9934 /*
9935 * Only empty task groups can be destroyed; so we can speculatively
9936 * check on_list without danger of it being re-added.
9937 */
9938 if (!tg->cfs_rq[cpu]->on_list)
9939 continue;
9940
9941 rq = cpu_rq(cpu);
9942
9943 raw_spin_lock_irqsave(&rq->lock, flags);
9944 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
9945 raw_spin_unlock_irqrestore(&rq->lock, flags);
9946 }
029632fb
PZ
9947}
9948
9949void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
9950 struct sched_entity *se, int cpu,
9951 struct sched_entity *parent)
9952{
9953 struct rq *rq = cpu_rq(cpu);
9954
9955 cfs_rq->tg = tg;
9956 cfs_rq->rq = rq;
029632fb
PZ
9957 init_cfs_rq_runtime(cfs_rq);
9958
9959 tg->cfs_rq[cpu] = cfs_rq;
9960 tg->se[cpu] = se;
9961
9962 /* se could be NULL for root_task_group */
9963 if (!se)
9964 return;
9965
fed14d45 9966 if (!parent) {
029632fb 9967 se->cfs_rq = &rq->cfs;
fed14d45
PZ
9968 se->depth = 0;
9969 } else {
029632fb 9970 se->cfs_rq = parent->my_q;
fed14d45
PZ
9971 se->depth = parent->depth + 1;
9972 }
029632fb
PZ
9973
9974 se->my_q = cfs_rq;
0ac9b1c2
PT
9975 /* guarantee group entities always have weight */
9976 update_load_set(&se->load, NICE_0_LOAD);
029632fb
PZ
9977 se->parent = parent;
9978}
9979
9980static DEFINE_MUTEX(shares_mutex);
9981
9982int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9983{
9984 int i;
029632fb
PZ
9985
9986 /*
9987 * We can't change the weight of the root cgroup.
9988 */
9989 if (!tg->se[0])
9990 return -EINVAL;
9991
9992 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
9993
9994 mutex_lock(&shares_mutex);
9995 if (tg->shares == shares)
9996 goto done;
9997
9998 tg->shares = shares;
9999 for_each_possible_cpu(i) {
10000 struct rq *rq = cpu_rq(i);
8a8c69c3
PZ
10001 struct sched_entity *se = tg->se[i];
10002 struct rq_flags rf;
029632fb 10003
029632fb 10004 /* Propagate contribution to hierarchy */
8a8c69c3 10005 rq_lock_irqsave(rq, &rf);
71b1da46 10006 update_rq_clock(rq);
89ee048f 10007 for_each_sched_entity(se) {
88c0616e 10008 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
1ea6c46a 10009 update_cfs_group(se);
89ee048f 10010 }
8a8c69c3 10011 rq_unlock_irqrestore(rq, &rf);
029632fb
PZ
10012 }
10013
10014done:
10015 mutex_unlock(&shares_mutex);
10016 return 0;
10017}
10018#else /* CONFIG_FAIR_GROUP_SCHED */
10019
10020void free_fair_sched_group(struct task_group *tg) { }
10021
10022int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
10023{
10024 return 1;
10025}
10026
8663e24d
PZ
10027void online_fair_sched_group(struct task_group *tg) { }
10028
6fe1f348 10029void unregister_fair_sched_group(struct task_group *tg) { }
029632fb
PZ
10030
10031#endif /* CONFIG_FAIR_GROUP_SCHED */
10032
810b3817 10033
6d686f45 10034static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
10035{
10036 struct sched_entity *se = &task->se;
0d721cea
PW
10037 unsigned int rr_interval = 0;
10038
10039 /*
10040 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
10041 * idle runqueue:
10042 */
0d721cea 10043 if (rq->cfs.load.weight)
a59f4e07 10044 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
10045
10046 return rr_interval;
10047}
10048
bf0f6f24
IM
10049/*
10050 * All the scheduling class methods:
10051 */
029632fb 10052const struct sched_class fair_sched_class = {
5522d5d5 10053 .next = &idle_sched_class,
bf0f6f24
IM
10054 .enqueue_task = enqueue_task_fair,
10055 .dequeue_task = dequeue_task_fair,
10056 .yield_task = yield_task_fair,
d95f4122 10057 .yield_to_task = yield_to_task_fair,
bf0f6f24 10058
2e09bf55 10059 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
10060
10061 .pick_next_task = pick_next_task_fair,
10062 .put_prev_task = put_prev_task_fair,
10063
681f3e68 10064#ifdef CONFIG_SMP
4ce72a2c 10065 .select_task_rq = select_task_rq_fair,
0a74bef8 10066 .migrate_task_rq = migrate_task_rq_fair,
141965c7 10067
0bcdcf28
CE
10068 .rq_online = rq_online_fair,
10069 .rq_offline = rq_offline_fair,
88ec22d3 10070
12695578 10071 .task_dead = task_dead_fair,
c5b28038 10072 .set_cpus_allowed = set_cpus_allowed_common,
681f3e68 10073#endif
bf0f6f24 10074
83b699ed 10075 .set_curr_task = set_curr_task_fair,
bf0f6f24 10076 .task_tick = task_tick_fair,
cd29fe6f 10077 .task_fork = task_fork_fair,
cb469845
SR
10078
10079 .prio_changed = prio_changed_fair,
da7a735e 10080 .switched_from = switched_from_fair,
cb469845 10081 .switched_to = switched_to_fair,
810b3817 10082
0d721cea
PW
10083 .get_rr_interval = get_rr_interval_fair,
10084
6e998916
SG
10085 .update_curr = update_curr_fair,
10086
810b3817 10087#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 10088 .task_change_group = task_change_group_fair,
810b3817 10089#endif
bf0f6f24
IM
10090};
10091
10092#ifdef CONFIG_SCHED_DEBUG
029632fb 10093void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 10094{
a9e7f654 10095 struct cfs_rq *cfs_rq, *pos;
bf0f6f24 10096
5973e5b9 10097 rcu_read_lock();
a9e7f654 10098 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
5cef9eca 10099 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 10100 rcu_read_unlock();
bf0f6f24 10101}
397f2378
SD
10102
10103#ifdef CONFIG_NUMA_BALANCING
10104void show_numa_stats(struct task_struct *p, struct seq_file *m)
10105{
10106 int node;
10107 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
10108
10109 for_each_online_node(node) {
10110 if (p->numa_faults) {
10111 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
10112 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
10113 }
10114 if (p->numa_group) {
10115 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
10116 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
10117 }
10118 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
10119 }
10120}
10121#endif /* CONFIG_NUMA_BALANCING */
10122#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
10123
10124__init void init_sched_fair_class(void)
10125{
10126#ifdef CONFIG_SMP
10127 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
10128
3451d024 10129#ifdef CONFIG_NO_HZ_COMMON
554cecaf 10130 nohz.next_balance = jiffies;
f643ea22 10131 nohz.next_blocked = jiffies;
029632fb 10132 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
029632fb
PZ
10133#endif
10134#endif /* SMP */
10135
10136}