]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/sched/fair.c
sched/fair: Increase PELT accuracy for small tasks
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / fair.c
1 /*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21 */
22
23 #include <linux/sched/mm.h>
24 #include <linux/sched/topology.h>
25
26 #include <linux/latencytop.h>
27 #include <linux/cpumask.h>
28 #include <linux/cpuidle.h>
29 #include <linux/slab.h>
30 #include <linux/profile.h>
31 #include <linux/interrupt.h>
32 #include <linux/mempolicy.h>
33 #include <linux/migrate.h>
34 #include <linux/task_work.h>
35
36 #include <trace/events/sched.h>
37
38 #include "sched.h"
39
40 /*
41 * Targeted preemption latency for CPU-bound tasks:
42 *
43 * NOTE: this latency value is not the same as the concept of
44 * 'timeslice length' - timeslices in CFS are of variable length
45 * and have no persistent notion like in traditional, time-slice
46 * based scheduling concepts.
47 *
48 * (to see the precise effective timeslice length of your workload,
49 * run vmstat and monitor the context-switches (cs) field)
50 *
51 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
52 */
53 unsigned int sysctl_sched_latency = 6000000ULL;
54 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
55
56 /*
57 * The initial- and re-scaling of tunables is configurable
58 *
59 * Options are:
60 *
61 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
62 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
63 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
64 *
65 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
66 */
67 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
68
69 /*
70 * Minimal preemption granularity for CPU-bound tasks:
71 *
72 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
73 */
74 unsigned int sysctl_sched_min_granularity = 750000ULL;
75 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
76
77 /*
78 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
79 */
80 static unsigned int sched_nr_latency = 8;
81
82 /*
83 * After fork, child runs first. If set to 0 (default) then
84 * parent will (try to) run first.
85 */
86 unsigned int sysctl_sched_child_runs_first __read_mostly;
87
88 /*
89 * SCHED_OTHER wake-up granularity.
90 *
91 * This option delays the preemption effects of decoupled workloads
92 * and reduces their over-scheduling. Synchronous workloads will still
93 * have immediate wakeup/sleep latencies.
94 *
95 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
96 */
97 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
98 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
99
100 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
101
102 #ifdef CONFIG_SMP
103 /*
104 * For asym packing, by default the lower numbered cpu has higher priority.
105 */
106 int __weak arch_asym_cpu_priority(int cpu)
107 {
108 return -cpu;
109 }
110 #endif
111
112 #ifdef CONFIG_CFS_BANDWIDTH
113 /*
114 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
115 * each time a cfs_rq requests quota.
116 *
117 * Note: in the case that the slice exceeds the runtime remaining (either due
118 * to consumption or the quota being specified to be smaller than the slice)
119 * we will always only issue the remaining available time.
120 *
121 * (default: 5 msec, units: microseconds)
122 */
123 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
124 #endif
125
126 /*
127 * The margin used when comparing utilization with CPU capacity:
128 * util * margin < capacity * 1024
129 *
130 * (default: ~20%)
131 */
132 unsigned int capacity_margin = 1280;
133
134 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
135 {
136 lw->weight += inc;
137 lw->inv_weight = 0;
138 }
139
140 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
141 {
142 lw->weight -= dec;
143 lw->inv_weight = 0;
144 }
145
146 static inline void update_load_set(struct load_weight *lw, unsigned long w)
147 {
148 lw->weight = w;
149 lw->inv_weight = 0;
150 }
151
152 /*
153 * Increase the granularity value when there are more CPUs,
154 * because with more CPUs the 'effective latency' as visible
155 * to users decreases. But the relationship is not linear,
156 * so pick a second-best guess by going with the log2 of the
157 * number of CPUs.
158 *
159 * This idea comes from the SD scheduler of Con Kolivas:
160 */
161 static unsigned int get_update_sysctl_factor(void)
162 {
163 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
164 unsigned int factor;
165
166 switch (sysctl_sched_tunable_scaling) {
167 case SCHED_TUNABLESCALING_NONE:
168 factor = 1;
169 break;
170 case SCHED_TUNABLESCALING_LINEAR:
171 factor = cpus;
172 break;
173 case SCHED_TUNABLESCALING_LOG:
174 default:
175 factor = 1 + ilog2(cpus);
176 break;
177 }
178
179 return factor;
180 }
181
182 static void update_sysctl(void)
183 {
184 unsigned int factor = get_update_sysctl_factor();
185
186 #define SET_SYSCTL(name) \
187 (sysctl_##name = (factor) * normalized_sysctl_##name)
188 SET_SYSCTL(sched_min_granularity);
189 SET_SYSCTL(sched_latency);
190 SET_SYSCTL(sched_wakeup_granularity);
191 #undef SET_SYSCTL
192 }
193
194 void sched_init_granularity(void)
195 {
196 update_sysctl();
197 }
198
199 #define WMULT_CONST (~0U)
200 #define WMULT_SHIFT 32
201
202 static void __update_inv_weight(struct load_weight *lw)
203 {
204 unsigned long w;
205
206 if (likely(lw->inv_weight))
207 return;
208
209 w = scale_load_down(lw->weight);
210
211 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
212 lw->inv_weight = 1;
213 else if (unlikely(!w))
214 lw->inv_weight = WMULT_CONST;
215 else
216 lw->inv_weight = WMULT_CONST / w;
217 }
218
219 /*
220 * delta_exec * weight / lw.weight
221 * OR
222 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
223 *
224 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
225 * we're guaranteed shift stays positive because inv_weight is guaranteed to
226 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
227 *
228 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
229 * weight/lw.weight <= 1, and therefore our shift will also be positive.
230 */
231 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
232 {
233 u64 fact = scale_load_down(weight);
234 int shift = WMULT_SHIFT;
235
236 __update_inv_weight(lw);
237
238 if (unlikely(fact >> 32)) {
239 while (fact >> 32) {
240 fact >>= 1;
241 shift--;
242 }
243 }
244
245 /* hint to use a 32x32->64 mul */
246 fact = (u64)(u32)fact * lw->inv_weight;
247
248 while (fact >> 32) {
249 fact >>= 1;
250 shift--;
251 }
252
253 return mul_u64_u32_shr(delta_exec, fact, shift);
254 }
255
256
257 const struct sched_class fair_sched_class;
258
259 /**************************************************************
260 * CFS operations on generic schedulable entities:
261 */
262
263 #ifdef CONFIG_FAIR_GROUP_SCHED
264
265 /* cpu runqueue to which this cfs_rq is attached */
266 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
267 {
268 return cfs_rq->rq;
269 }
270
271 /* An entity is a task if it doesn't "own" a runqueue */
272 #define entity_is_task(se) (!se->my_q)
273
274 static inline struct task_struct *task_of(struct sched_entity *se)
275 {
276 SCHED_WARN_ON(!entity_is_task(se));
277 return container_of(se, struct task_struct, se);
278 }
279
280 /* Walk up scheduling entities hierarchy */
281 #define for_each_sched_entity(se) \
282 for (; se; se = se->parent)
283
284 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
285 {
286 return p->se.cfs_rq;
287 }
288
289 /* runqueue on which this entity is (to be) queued */
290 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
291 {
292 return se->cfs_rq;
293 }
294
295 /* runqueue "owned" by this group */
296 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
297 {
298 return grp->my_q;
299 }
300
301 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
302 {
303 if (!cfs_rq->on_list) {
304 struct rq *rq = rq_of(cfs_rq);
305 int cpu = cpu_of(rq);
306 /*
307 * Ensure we either appear before our parent (if already
308 * enqueued) or force our parent to appear after us when it is
309 * enqueued. The fact that we always enqueue bottom-up
310 * reduces this to two cases and a special case for the root
311 * cfs_rq. Furthermore, it also means that we will always reset
312 * tmp_alone_branch either when the branch is connected
313 * to a tree or when we reach the beg of the tree
314 */
315 if (cfs_rq->tg->parent &&
316 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
317 /*
318 * If parent is already on the list, we add the child
319 * just before. Thanks to circular linked property of
320 * the list, this means to put the child at the tail
321 * of the list that starts by parent.
322 */
323 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
324 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
325 /*
326 * The branch is now connected to its tree so we can
327 * reset tmp_alone_branch to the beginning of the
328 * list.
329 */
330 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
331 } else if (!cfs_rq->tg->parent) {
332 /*
333 * cfs rq without parent should be put
334 * at the tail of the list.
335 */
336 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
337 &rq->leaf_cfs_rq_list);
338 /*
339 * We have reach the beg of a tree so we can reset
340 * tmp_alone_branch to the beginning of the list.
341 */
342 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
343 } else {
344 /*
345 * The parent has not already been added so we want to
346 * make sure that it will be put after us.
347 * tmp_alone_branch points to the beg of the branch
348 * where we will add parent.
349 */
350 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
351 rq->tmp_alone_branch);
352 /*
353 * update tmp_alone_branch to points to the new beg
354 * of the branch
355 */
356 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
357 }
358
359 cfs_rq->on_list = 1;
360 }
361 }
362
363 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
364 {
365 if (cfs_rq->on_list) {
366 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
367 cfs_rq->on_list = 0;
368 }
369 }
370
371 /* Iterate thr' all leaf cfs_rq's on a runqueue */
372 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
373 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
374
375 /* Do the two (enqueued) entities belong to the same group ? */
376 static inline struct cfs_rq *
377 is_same_group(struct sched_entity *se, struct sched_entity *pse)
378 {
379 if (se->cfs_rq == pse->cfs_rq)
380 return se->cfs_rq;
381
382 return NULL;
383 }
384
385 static inline struct sched_entity *parent_entity(struct sched_entity *se)
386 {
387 return se->parent;
388 }
389
390 static void
391 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
392 {
393 int se_depth, pse_depth;
394
395 /*
396 * preemption test can be made between sibling entities who are in the
397 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
398 * both tasks until we find their ancestors who are siblings of common
399 * parent.
400 */
401
402 /* First walk up until both entities are at same depth */
403 se_depth = (*se)->depth;
404 pse_depth = (*pse)->depth;
405
406 while (se_depth > pse_depth) {
407 se_depth--;
408 *se = parent_entity(*se);
409 }
410
411 while (pse_depth > se_depth) {
412 pse_depth--;
413 *pse = parent_entity(*pse);
414 }
415
416 while (!is_same_group(*se, *pse)) {
417 *se = parent_entity(*se);
418 *pse = parent_entity(*pse);
419 }
420 }
421
422 #else /* !CONFIG_FAIR_GROUP_SCHED */
423
424 static inline struct task_struct *task_of(struct sched_entity *se)
425 {
426 return container_of(se, struct task_struct, se);
427 }
428
429 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
430 {
431 return container_of(cfs_rq, struct rq, cfs);
432 }
433
434 #define entity_is_task(se) 1
435
436 #define for_each_sched_entity(se) \
437 for (; se; se = NULL)
438
439 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
440 {
441 return &task_rq(p)->cfs;
442 }
443
444 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
445 {
446 struct task_struct *p = task_of(se);
447 struct rq *rq = task_rq(p);
448
449 return &rq->cfs;
450 }
451
452 /* runqueue "owned" by this group */
453 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
454 {
455 return NULL;
456 }
457
458 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
459 {
460 }
461
462 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
463 {
464 }
465
466 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
467 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
468
469 static inline struct sched_entity *parent_entity(struct sched_entity *se)
470 {
471 return NULL;
472 }
473
474 static inline void
475 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
476 {
477 }
478
479 #endif /* CONFIG_FAIR_GROUP_SCHED */
480
481 static __always_inline
482 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
483
484 /**************************************************************
485 * Scheduling class tree data structure manipulation methods:
486 */
487
488 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
489 {
490 s64 delta = (s64)(vruntime - max_vruntime);
491 if (delta > 0)
492 max_vruntime = vruntime;
493
494 return max_vruntime;
495 }
496
497 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
498 {
499 s64 delta = (s64)(vruntime - min_vruntime);
500 if (delta < 0)
501 min_vruntime = vruntime;
502
503 return min_vruntime;
504 }
505
506 static inline int entity_before(struct sched_entity *a,
507 struct sched_entity *b)
508 {
509 return (s64)(a->vruntime - b->vruntime) < 0;
510 }
511
512 static void update_min_vruntime(struct cfs_rq *cfs_rq)
513 {
514 struct sched_entity *curr = cfs_rq->curr;
515
516 u64 vruntime = cfs_rq->min_vruntime;
517
518 if (curr) {
519 if (curr->on_rq)
520 vruntime = curr->vruntime;
521 else
522 curr = NULL;
523 }
524
525 if (cfs_rq->rb_leftmost) {
526 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
527 struct sched_entity,
528 run_node);
529
530 if (!curr)
531 vruntime = se->vruntime;
532 else
533 vruntime = min_vruntime(vruntime, se->vruntime);
534 }
535
536 /* ensure we never gain time by being placed backwards. */
537 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
538 #ifndef CONFIG_64BIT
539 smp_wmb();
540 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
541 #endif
542 }
543
544 /*
545 * Enqueue an entity into the rb-tree:
546 */
547 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
548 {
549 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
550 struct rb_node *parent = NULL;
551 struct sched_entity *entry;
552 int leftmost = 1;
553
554 /*
555 * Find the right place in the rbtree:
556 */
557 while (*link) {
558 parent = *link;
559 entry = rb_entry(parent, struct sched_entity, run_node);
560 /*
561 * We dont care about collisions. Nodes with
562 * the same key stay together.
563 */
564 if (entity_before(se, entry)) {
565 link = &parent->rb_left;
566 } else {
567 link = &parent->rb_right;
568 leftmost = 0;
569 }
570 }
571
572 /*
573 * Maintain a cache of leftmost tree entries (it is frequently
574 * used):
575 */
576 if (leftmost)
577 cfs_rq->rb_leftmost = &se->run_node;
578
579 rb_link_node(&se->run_node, parent, link);
580 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
581 }
582
583 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
584 {
585 if (cfs_rq->rb_leftmost == &se->run_node) {
586 struct rb_node *next_node;
587
588 next_node = rb_next(&se->run_node);
589 cfs_rq->rb_leftmost = next_node;
590 }
591
592 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
593 }
594
595 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
596 {
597 struct rb_node *left = cfs_rq->rb_leftmost;
598
599 if (!left)
600 return NULL;
601
602 return rb_entry(left, struct sched_entity, run_node);
603 }
604
605 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
606 {
607 struct rb_node *next = rb_next(&se->run_node);
608
609 if (!next)
610 return NULL;
611
612 return rb_entry(next, struct sched_entity, run_node);
613 }
614
615 #ifdef CONFIG_SCHED_DEBUG
616 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
617 {
618 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
619
620 if (!last)
621 return NULL;
622
623 return rb_entry(last, struct sched_entity, run_node);
624 }
625
626 /**************************************************************
627 * Scheduling class statistics methods:
628 */
629
630 int sched_proc_update_handler(struct ctl_table *table, int write,
631 void __user *buffer, size_t *lenp,
632 loff_t *ppos)
633 {
634 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
635 unsigned int factor = get_update_sysctl_factor();
636
637 if (ret || !write)
638 return ret;
639
640 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
641 sysctl_sched_min_granularity);
642
643 #define WRT_SYSCTL(name) \
644 (normalized_sysctl_##name = sysctl_##name / (factor))
645 WRT_SYSCTL(sched_min_granularity);
646 WRT_SYSCTL(sched_latency);
647 WRT_SYSCTL(sched_wakeup_granularity);
648 #undef WRT_SYSCTL
649
650 return 0;
651 }
652 #endif
653
654 /*
655 * delta /= w
656 */
657 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
658 {
659 if (unlikely(se->load.weight != NICE_0_LOAD))
660 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
661
662 return delta;
663 }
664
665 /*
666 * The idea is to set a period in which each task runs once.
667 *
668 * When there are too many tasks (sched_nr_latency) we have to stretch
669 * this period because otherwise the slices get too small.
670 *
671 * p = (nr <= nl) ? l : l*nr/nl
672 */
673 static u64 __sched_period(unsigned long nr_running)
674 {
675 if (unlikely(nr_running > sched_nr_latency))
676 return nr_running * sysctl_sched_min_granularity;
677 else
678 return sysctl_sched_latency;
679 }
680
681 /*
682 * We calculate the wall-time slice from the period by taking a part
683 * proportional to the weight.
684 *
685 * s = p*P[w/rw]
686 */
687 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
688 {
689 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
690
691 for_each_sched_entity(se) {
692 struct load_weight *load;
693 struct load_weight lw;
694
695 cfs_rq = cfs_rq_of(se);
696 load = &cfs_rq->load;
697
698 if (unlikely(!se->on_rq)) {
699 lw = cfs_rq->load;
700
701 update_load_add(&lw, se->load.weight);
702 load = &lw;
703 }
704 slice = __calc_delta(slice, se->load.weight, load);
705 }
706 return slice;
707 }
708
709 /*
710 * We calculate the vruntime slice of a to-be-inserted task.
711 *
712 * vs = s/w
713 */
714 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
715 {
716 return calc_delta_fair(sched_slice(cfs_rq, se), se);
717 }
718
719 #ifdef CONFIG_SMP
720 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
721 static unsigned long task_h_load(struct task_struct *p);
722
723 /*
724 * We choose a half-life close to 1 scheduling period.
725 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
726 * dependent on this value.
727 */
728 #define LOAD_AVG_PERIOD 32
729 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
730
731 /* Give new sched_entity start runnable values to heavy its load in infant time */
732 void init_entity_runnable_average(struct sched_entity *se)
733 {
734 struct sched_avg *sa = &se->avg;
735
736 sa->last_update_time = 0;
737 /*
738 * sched_avg's period_contrib should be strictly less then 1024, so
739 * we give it 1023 to make sure it is almost a period (1024us), and
740 * will definitely be update (after enqueue).
741 */
742 sa->period_contrib = 1023;
743 /*
744 * Tasks are intialized with full load to be seen as heavy tasks until
745 * they get a chance to stabilize to their real load level.
746 * Group entities are intialized with zero load to reflect the fact that
747 * nothing has been attached to the task group yet.
748 */
749 if (entity_is_task(se))
750 sa->load_avg = scale_load_down(se->load.weight);
751 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
752 /*
753 * At this point, util_avg won't be used in select_task_rq_fair anyway
754 */
755 sa->util_avg = 0;
756 sa->util_sum = 0;
757 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
758 }
759
760 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
761 static void attach_entity_cfs_rq(struct sched_entity *se);
762
763 /*
764 * With new tasks being created, their initial util_avgs are extrapolated
765 * based on the cfs_rq's current util_avg:
766 *
767 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
768 *
769 * However, in many cases, the above util_avg does not give a desired
770 * value. Moreover, the sum of the util_avgs may be divergent, such
771 * as when the series is a harmonic series.
772 *
773 * To solve this problem, we also cap the util_avg of successive tasks to
774 * only 1/2 of the left utilization budget:
775 *
776 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
777 *
778 * where n denotes the nth task.
779 *
780 * For example, a simplest series from the beginning would be like:
781 *
782 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
783 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
784 *
785 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
786 * if util_avg > util_avg_cap.
787 */
788 void post_init_entity_util_avg(struct sched_entity *se)
789 {
790 struct cfs_rq *cfs_rq = cfs_rq_of(se);
791 struct sched_avg *sa = &se->avg;
792 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
793
794 if (cap > 0) {
795 if (cfs_rq->avg.util_avg != 0) {
796 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
797 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
798
799 if (sa->util_avg > cap)
800 sa->util_avg = cap;
801 } else {
802 sa->util_avg = cap;
803 }
804 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
805 }
806
807 if (entity_is_task(se)) {
808 struct task_struct *p = task_of(se);
809 if (p->sched_class != &fair_sched_class) {
810 /*
811 * For !fair tasks do:
812 *
813 update_cfs_rq_load_avg(now, cfs_rq, false);
814 attach_entity_load_avg(cfs_rq, se);
815 switched_from_fair(rq, p);
816 *
817 * such that the next switched_to_fair() has the
818 * expected state.
819 */
820 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
821 return;
822 }
823 }
824
825 attach_entity_cfs_rq(se);
826 }
827
828 #else /* !CONFIG_SMP */
829 void init_entity_runnable_average(struct sched_entity *se)
830 {
831 }
832 void post_init_entity_util_avg(struct sched_entity *se)
833 {
834 }
835 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
836 {
837 }
838 #endif /* CONFIG_SMP */
839
840 /*
841 * Update the current task's runtime statistics.
842 */
843 static void update_curr(struct cfs_rq *cfs_rq)
844 {
845 struct sched_entity *curr = cfs_rq->curr;
846 u64 now = rq_clock_task(rq_of(cfs_rq));
847 u64 delta_exec;
848
849 if (unlikely(!curr))
850 return;
851
852 delta_exec = now - curr->exec_start;
853 if (unlikely((s64)delta_exec <= 0))
854 return;
855
856 curr->exec_start = now;
857
858 schedstat_set(curr->statistics.exec_max,
859 max(delta_exec, curr->statistics.exec_max));
860
861 curr->sum_exec_runtime += delta_exec;
862 schedstat_add(cfs_rq->exec_clock, delta_exec);
863
864 curr->vruntime += calc_delta_fair(delta_exec, curr);
865 update_min_vruntime(cfs_rq);
866
867 if (entity_is_task(curr)) {
868 struct task_struct *curtask = task_of(curr);
869
870 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
871 cpuacct_charge(curtask, delta_exec);
872 account_group_exec_runtime(curtask, delta_exec);
873 }
874
875 account_cfs_rq_runtime(cfs_rq, delta_exec);
876 }
877
878 static void update_curr_fair(struct rq *rq)
879 {
880 update_curr(cfs_rq_of(&rq->curr->se));
881 }
882
883 static inline void
884 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
885 {
886 u64 wait_start, prev_wait_start;
887
888 if (!schedstat_enabled())
889 return;
890
891 wait_start = rq_clock(rq_of(cfs_rq));
892 prev_wait_start = schedstat_val(se->statistics.wait_start);
893
894 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
895 likely(wait_start > prev_wait_start))
896 wait_start -= prev_wait_start;
897
898 schedstat_set(se->statistics.wait_start, wait_start);
899 }
900
901 static inline void
902 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
903 {
904 struct task_struct *p;
905 u64 delta;
906
907 if (!schedstat_enabled())
908 return;
909
910 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
911
912 if (entity_is_task(se)) {
913 p = task_of(se);
914 if (task_on_rq_migrating(p)) {
915 /*
916 * Preserve migrating task's wait time so wait_start
917 * time stamp can be adjusted to accumulate wait time
918 * prior to migration.
919 */
920 schedstat_set(se->statistics.wait_start, delta);
921 return;
922 }
923 trace_sched_stat_wait(p, delta);
924 }
925
926 schedstat_set(se->statistics.wait_max,
927 max(schedstat_val(se->statistics.wait_max), delta));
928 schedstat_inc(se->statistics.wait_count);
929 schedstat_add(se->statistics.wait_sum, delta);
930 schedstat_set(se->statistics.wait_start, 0);
931 }
932
933 static inline void
934 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
935 {
936 struct task_struct *tsk = NULL;
937 u64 sleep_start, block_start;
938
939 if (!schedstat_enabled())
940 return;
941
942 sleep_start = schedstat_val(se->statistics.sleep_start);
943 block_start = schedstat_val(se->statistics.block_start);
944
945 if (entity_is_task(se))
946 tsk = task_of(se);
947
948 if (sleep_start) {
949 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
950
951 if ((s64)delta < 0)
952 delta = 0;
953
954 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
955 schedstat_set(se->statistics.sleep_max, delta);
956
957 schedstat_set(se->statistics.sleep_start, 0);
958 schedstat_add(se->statistics.sum_sleep_runtime, delta);
959
960 if (tsk) {
961 account_scheduler_latency(tsk, delta >> 10, 1);
962 trace_sched_stat_sleep(tsk, delta);
963 }
964 }
965 if (block_start) {
966 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
967
968 if ((s64)delta < 0)
969 delta = 0;
970
971 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
972 schedstat_set(se->statistics.block_max, delta);
973
974 schedstat_set(se->statistics.block_start, 0);
975 schedstat_add(se->statistics.sum_sleep_runtime, delta);
976
977 if (tsk) {
978 if (tsk->in_iowait) {
979 schedstat_add(se->statistics.iowait_sum, delta);
980 schedstat_inc(se->statistics.iowait_count);
981 trace_sched_stat_iowait(tsk, delta);
982 }
983
984 trace_sched_stat_blocked(tsk, delta);
985
986 /*
987 * Blocking time is in units of nanosecs, so shift by
988 * 20 to get a milliseconds-range estimation of the
989 * amount of time that the task spent sleeping:
990 */
991 if (unlikely(prof_on == SLEEP_PROFILING)) {
992 profile_hits(SLEEP_PROFILING,
993 (void *)get_wchan(tsk),
994 delta >> 20);
995 }
996 account_scheduler_latency(tsk, delta >> 10, 0);
997 }
998 }
999 }
1000
1001 /*
1002 * Task is being enqueued - update stats:
1003 */
1004 static inline void
1005 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1006 {
1007 if (!schedstat_enabled())
1008 return;
1009
1010 /*
1011 * Are we enqueueing a waiting task? (for current tasks
1012 * a dequeue/enqueue event is a NOP)
1013 */
1014 if (se != cfs_rq->curr)
1015 update_stats_wait_start(cfs_rq, se);
1016
1017 if (flags & ENQUEUE_WAKEUP)
1018 update_stats_enqueue_sleeper(cfs_rq, se);
1019 }
1020
1021 static inline void
1022 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1023 {
1024
1025 if (!schedstat_enabled())
1026 return;
1027
1028 /*
1029 * Mark the end of the wait period if dequeueing a
1030 * waiting task:
1031 */
1032 if (se != cfs_rq->curr)
1033 update_stats_wait_end(cfs_rq, se);
1034
1035 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1036 struct task_struct *tsk = task_of(se);
1037
1038 if (tsk->state & TASK_INTERRUPTIBLE)
1039 schedstat_set(se->statistics.sleep_start,
1040 rq_clock(rq_of(cfs_rq)));
1041 if (tsk->state & TASK_UNINTERRUPTIBLE)
1042 schedstat_set(se->statistics.block_start,
1043 rq_clock(rq_of(cfs_rq)));
1044 }
1045 }
1046
1047 /*
1048 * We are picking a new current task - update its stats:
1049 */
1050 static inline void
1051 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1052 {
1053 /*
1054 * We are starting a new run period:
1055 */
1056 se->exec_start = rq_clock_task(rq_of(cfs_rq));
1057 }
1058
1059 /**************************************************
1060 * Scheduling class queueing methods:
1061 */
1062
1063 #ifdef CONFIG_NUMA_BALANCING
1064 /*
1065 * Approximate time to scan a full NUMA task in ms. The task scan period is
1066 * calculated based on the tasks virtual memory size and
1067 * numa_balancing_scan_size.
1068 */
1069 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1070 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1071
1072 /* Portion of address space to scan in MB */
1073 unsigned int sysctl_numa_balancing_scan_size = 256;
1074
1075 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1076 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1077
1078 static unsigned int task_nr_scan_windows(struct task_struct *p)
1079 {
1080 unsigned long rss = 0;
1081 unsigned long nr_scan_pages;
1082
1083 /*
1084 * Calculations based on RSS as non-present and empty pages are skipped
1085 * by the PTE scanner and NUMA hinting faults should be trapped based
1086 * on resident pages
1087 */
1088 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1089 rss = get_mm_rss(p->mm);
1090 if (!rss)
1091 rss = nr_scan_pages;
1092
1093 rss = round_up(rss, nr_scan_pages);
1094 return rss / nr_scan_pages;
1095 }
1096
1097 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1098 #define MAX_SCAN_WINDOW 2560
1099
1100 static unsigned int task_scan_min(struct task_struct *p)
1101 {
1102 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1103 unsigned int scan, floor;
1104 unsigned int windows = 1;
1105
1106 if (scan_size < MAX_SCAN_WINDOW)
1107 windows = MAX_SCAN_WINDOW / scan_size;
1108 floor = 1000 / windows;
1109
1110 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1111 return max_t(unsigned int, floor, scan);
1112 }
1113
1114 static unsigned int task_scan_max(struct task_struct *p)
1115 {
1116 unsigned int smin = task_scan_min(p);
1117 unsigned int smax;
1118
1119 /* Watch for min being lower than max due to floor calculations */
1120 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1121 return max(smin, smax);
1122 }
1123
1124 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1125 {
1126 rq->nr_numa_running += (p->numa_preferred_nid != -1);
1127 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1128 }
1129
1130 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1131 {
1132 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1133 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1134 }
1135
1136 struct numa_group {
1137 atomic_t refcount;
1138
1139 spinlock_t lock; /* nr_tasks, tasks */
1140 int nr_tasks;
1141 pid_t gid;
1142 int active_nodes;
1143
1144 struct rcu_head rcu;
1145 unsigned long total_faults;
1146 unsigned long max_faults_cpu;
1147 /*
1148 * Faults_cpu is used to decide whether memory should move
1149 * towards the CPU. As a consequence, these stats are weighted
1150 * more by CPU use than by memory faults.
1151 */
1152 unsigned long *faults_cpu;
1153 unsigned long faults[0];
1154 };
1155
1156 /* Shared or private faults. */
1157 #define NR_NUMA_HINT_FAULT_TYPES 2
1158
1159 /* Memory and CPU locality */
1160 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1161
1162 /* Averaged statistics, and temporary buffers. */
1163 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1164
1165 pid_t task_numa_group_id(struct task_struct *p)
1166 {
1167 return p->numa_group ? p->numa_group->gid : 0;
1168 }
1169
1170 /*
1171 * The averaged statistics, shared & private, memory & cpu,
1172 * occupy the first half of the array. The second half of the
1173 * array is for current counters, which are averaged into the
1174 * first set by task_numa_placement.
1175 */
1176 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1177 {
1178 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1179 }
1180
1181 static inline unsigned long task_faults(struct task_struct *p, int nid)
1182 {
1183 if (!p->numa_faults)
1184 return 0;
1185
1186 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1187 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1188 }
1189
1190 static inline unsigned long group_faults(struct task_struct *p, int nid)
1191 {
1192 if (!p->numa_group)
1193 return 0;
1194
1195 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1196 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1197 }
1198
1199 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1200 {
1201 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1202 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1203 }
1204
1205 /*
1206 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1207 * considered part of a numa group's pseudo-interleaving set. Migrations
1208 * between these nodes are slowed down, to allow things to settle down.
1209 */
1210 #define ACTIVE_NODE_FRACTION 3
1211
1212 static bool numa_is_active_node(int nid, struct numa_group *ng)
1213 {
1214 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1215 }
1216
1217 /* Handle placement on systems where not all nodes are directly connected. */
1218 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1219 int maxdist, bool task)
1220 {
1221 unsigned long score = 0;
1222 int node;
1223
1224 /*
1225 * All nodes are directly connected, and the same distance
1226 * from each other. No need for fancy placement algorithms.
1227 */
1228 if (sched_numa_topology_type == NUMA_DIRECT)
1229 return 0;
1230
1231 /*
1232 * This code is called for each node, introducing N^2 complexity,
1233 * which should be ok given the number of nodes rarely exceeds 8.
1234 */
1235 for_each_online_node(node) {
1236 unsigned long faults;
1237 int dist = node_distance(nid, node);
1238
1239 /*
1240 * The furthest away nodes in the system are not interesting
1241 * for placement; nid was already counted.
1242 */
1243 if (dist == sched_max_numa_distance || node == nid)
1244 continue;
1245
1246 /*
1247 * On systems with a backplane NUMA topology, compare groups
1248 * of nodes, and move tasks towards the group with the most
1249 * memory accesses. When comparing two nodes at distance
1250 * "hoplimit", only nodes closer by than "hoplimit" are part
1251 * of each group. Skip other nodes.
1252 */
1253 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1254 dist > maxdist)
1255 continue;
1256
1257 /* Add up the faults from nearby nodes. */
1258 if (task)
1259 faults = task_faults(p, node);
1260 else
1261 faults = group_faults(p, node);
1262
1263 /*
1264 * On systems with a glueless mesh NUMA topology, there are
1265 * no fixed "groups of nodes". Instead, nodes that are not
1266 * directly connected bounce traffic through intermediate
1267 * nodes; a numa_group can occupy any set of nodes.
1268 * The further away a node is, the less the faults count.
1269 * This seems to result in good task placement.
1270 */
1271 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1272 faults *= (sched_max_numa_distance - dist);
1273 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1274 }
1275
1276 score += faults;
1277 }
1278
1279 return score;
1280 }
1281
1282 /*
1283 * These return the fraction of accesses done by a particular task, or
1284 * task group, on a particular numa node. The group weight is given a
1285 * larger multiplier, in order to group tasks together that are almost
1286 * evenly spread out between numa nodes.
1287 */
1288 static inline unsigned long task_weight(struct task_struct *p, int nid,
1289 int dist)
1290 {
1291 unsigned long faults, total_faults;
1292
1293 if (!p->numa_faults)
1294 return 0;
1295
1296 total_faults = p->total_numa_faults;
1297
1298 if (!total_faults)
1299 return 0;
1300
1301 faults = task_faults(p, nid);
1302 faults += score_nearby_nodes(p, nid, dist, true);
1303
1304 return 1000 * faults / total_faults;
1305 }
1306
1307 static inline unsigned long group_weight(struct task_struct *p, int nid,
1308 int dist)
1309 {
1310 unsigned long faults, total_faults;
1311
1312 if (!p->numa_group)
1313 return 0;
1314
1315 total_faults = p->numa_group->total_faults;
1316
1317 if (!total_faults)
1318 return 0;
1319
1320 faults = group_faults(p, nid);
1321 faults += score_nearby_nodes(p, nid, dist, false);
1322
1323 return 1000 * faults / total_faults;
1324 }
1325
1326 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1327 int src_nid, int dst_cpu)
1328 {
1329 struct numa_group *ng = p->numa_group;
1330 int dst_nid = cpu_to_node(dst_cpu);
1331 int last_cpupid, this_cpupid;
1332
1333 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1334
1335 /*
1336 * Multi-stage node selection is used in conjunction with a periodic
1337 * migration fault to build a temporal task<->page relation. By using
1338 * a two-stage filter we remove short/unlikely relations.
1339 *
1340 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1341 * a task's usage of a particular page (n_p) per total usage of this
1342 * page (n_t) (in a given time-span) to a probability.
1343 *
1344 * Our periodic faults will sample this probability and getting the
1345 * same result twice in a row, given these samples are fully
1346 * independent, is then given by P(n)^2, provided our sample period
1347 * is sufficiently short compared to the usage pattern.
1348 *
1349 * This quadric squishes small probabilities, making it less likely we
1350 * act on an unlikely task<->page relation.
1351 */
1352 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1353 if (!cpupid_pid_unset(last_cpupid) &&
1354 cpupid_to_nid(last_cpupid) != dst_nid)
1355 return false;
1356
1357 /* Always allow migrate on private faults */
1358 if (cpupid_match_pid(p, last_cpupid))
1359 return true;
1360
1361 /* A shared fault, but p->numa_group has not been set up yet. */
1362 if (!ng)
1363 return true;
1364
1365 /*
1366 * Destination node is much more heavily used than the source
1367 * node? Allow migration.
1368 */
1369 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1370 ACTIVE_NODE_FRACTION)
1371 return true;
1372
1373 /*
1374 * Distribute memory according to CPU & memory use on each node,
1375 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1376 *
1377 * faults_cpu(dst) 3 faults_cpu(src)
1378 * --------------- * - > ---------------
1379 * faults_mem(dst) 4 faults_mem(src)
1380 */
1381 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1382 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1383 }
1384
1385 static unsigned long weighted_cpuload(const int cpu);
1386 static unsigned long source_load(int cpu, int type);
1387 static unsigned long target_load(int cpu, int type);
1388 static unsigned long capacity_of(int cpu);
1389 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1390
1391 /* Cached statistics for all CPUs within a node */
1392 struct numa_stats {
1393 unsigned long nr_running;
1394 unsigned long load;
1395
1396 /* Total compute capacity of CPUs on a node */
1397 unsigned long compute_capacity;
1398
1399 /* Approximate capacity in terms of runnable tasks on a node */
1400 unsigned long task_capacity;
1401 int has_free_capacity;
1402 };
1403
1404 /*
1405 * XXX borrowed from update_sg_lb_stats
1406 */
1407 static void update_numa_stats(struct numa_stats *ns, int nid)
1408 {
1409 int smt, cpu, cpus = 0;
1410 unsigned long capacity;
1411
1412 memset(ns, 0, sizeof(*ns));
1413 for_each_cpu(cpu, cpumask_of_node(nid)) {
1414 struct rq *rq = cpu_rq(cpu);
1415
1416 ns->nr_running += rq->nr_running;
1417 ns->load += weighted_cpuload(cpu);
1418 ns->compute_capacity += capacity_of(cpu);
1419
1420 cpus++;
1421 }
1422
1423 /*
1424 * If we raced with hotplug and there are no CPUs left in our mask
1425 * the @ns structure is NULL'ed and task_numa_compare() will
1426 * not find this node attractive.
1427 *
1428 * We'll either bail at !has_free_capacity, or we'll detect a huge
1429 * imbalance and bail there.
1430 */
1431 if (!cpus)
1432 return;
1433
1434 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1435 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1436 capacity = cpus / smt; /* cores */
1437
1438 ns->task_capacity = min_t(unsigned, capacity,
1439 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1440 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1441 }
1442
1443 struct task_numa_env {
1444 struct task_struct *p;
1445
1446 int src_cpu, src_nid;
1447 int dst_cpu, dst_nid;
1448
1449 struct numa_stats src_stats, dst_stats;
1450
1451 int imbalance_pct;
1452 int dist;
1453
1454 struct task_struct *best_task;
1455 long best_imp;
1456 int best_cpu;
1457 };
1458
1459 static void task_numa_assign(struct task_numa_env *env,
1460 struct task_struct *p, long imp)
1461 {
1462 if (env->best_task)
1463 put_task_struct(env->best_task);
1464 if (p)
1465 get_task_struct(p);
1466
1467 env->best_task = p;
1468 env->best_imp = imp;
1469 env->best_cpu = env->dst_cpu;
1470 }
1471
1472 static bool load_too_imbalanced(long src_load, long dst_load,
1473 struct task_numa_env *env)
1474 {
1475 long imb, old_imb;
1476 long orig_src_load, orig_dst_load;
1477 long src_capacity, dst_capacity;
1478
1479 /*
1480 * The load is corrected for the CPU capacity available on each node.
1481 *
1482 * src_load dst_load
1483 * ------------ vs ---------
1484 * src_capacity dst_capacity
1485 */
1486 src_capacity = env->src_stats.compute_capacity;
1487 dst_capacity = env->dst_stats.compute_capacity;
1488
1489 /* We care about the slope of the imbalance, not the direction. */
1490 if (dst_load < src_load)
1491 swap(dst_load, src_load);
1492
1493 /* Is the difference below the threshold? */
1494 imb = dst_load * src_capacity * 100 -
1495 src_load * dst_capacity * env->imbalance_pct;
1496 if (imb <= 0)
1497 return false;
1498
1499 /*
1500 * The imbalance is above the allowed threshold.
1501 * Compare it with the old imbalance.
1502 */
1503 orig_src_load = env->src_stats.load;
1504 orig_dst_load = env->dst_stats.load;
1505
1506 if (orig_dst_load < orig_src_load)
1507 swap(orig_dst_load, orig_src_load);
1508
1509 old_imb = orig_dst_load * src_capacity * 100 -
1510 orig_src_load * dst_capacity * env->imbalance_pct;
1511
1512 /* Would this change make things worse? */
1513 return (imb > old_imb);
1514 }
1515
1516 /*
1517 * This checks if the overall compute and NUMA accesses of the system would
1518 * be improved if the source tasks was migrated to the target dst_cpu taking
1519 * into account that it might be best if task running on the dst_cpu should
1520 * be exchanged with the source task
1521 */
1522 static void task_numa_compare(struct task_numa_env *env,
1523 long taskimp, long groupimp)
1524 {
1525 struct rq *src_rq = cpu_rq(env->src_cpu);
1526 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1527 struct task_struct *cur;
1528 long src_load, dst_load;
1529 long load;
1530 long imp = env->p->numa_group ? groupimp : taskimp;
1531 long moveimp = imp;
1532 int dist = env->dist;
1533
1534 rcu_read_lock();
1535 cur = task_rcu_dereference(&dst_rq->curr);
1536 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1537 cur = NULL;
1538
1539 /*
1540 * Because we have preemption enabled we can get migrated around and
1541 * end try selecting ourselves (current == env->p) as a swap candidate.
1542 */
1543 if (cur == env->p)
1544 goto unlock;
1545
1546 /*
1547 * "imp" is the fault differential for the source task between the
1548 * source and destination node. Calculate the total differential for
1549 * the source task and potential destination task. The more negative
1550 * the value is, the more rmeote accesses that would be expected to
1551 * be incurred if the tasks were swapped.
1552 */
1553 if (cur) {
1554 /* Skip this swap candidate if cannot move to the source cpu */
1555 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1556 goto unlock;
1557
1558 /*
1559 * If dst and source tasks are in the same NUMA group, or not
1560 * in any group then look only at task weights.
1561 */
1562 if (cur->numa_group == env->p->numa_group) {
1563 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1564 task_weight(cur, env->dst_nid, dist);
1565 /*
1566 * Add some hysteresis to prevent swapping the
1567 * tasks within a group over tiny differences.
1568 */
1569 if (cur->numa_group)
1570 imp -= imp/16;
1571 } else {
1572 /*
1573 * Compare the group weights. If a task is all by
1574 * itself (not part of a group), use the task weight
1575 * instead.
1576 */
1577 if (cur->numa_group)
1578 imp += group_weight(cur, env->src_nid, dist) -
1579 group_weight(cur, env->dst_nid, dist);
1580 else
1581 imp += task_weight(cur, env->src_nid, dist) -
1582 task_weight(cur, env->dst_nid, dist);
1583 }
1584 }
1585
1586 if (imp <= env->best_imp && moveimp <= env->best_imp)
1587 goto unlock;
1588
1589 if (!cur) {
1590 /* Is there capacity at our destination? */
1591 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1592 !env->dst_stats.has_free_capacity)
1593 goto unlock;
1594
1595 goto balance;
1596 }
1597
1598 /* Balance doesn't matter much if we're running a task per cpu */
1599 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1600 dst_rq->nr_running == 1)
1601 goto assign;
1602
1603 /*
1604 * In the overloaded case, try and keep the load balanced.
1605 */
1606 balance:
1607 load = task_h_load(env->p);
1608 dst_load = env->dst_stats.load + load;
1609 src_load = env->src_stats.load - load;
1610
1611 if (moveimp > imp && moveimp > env->best_imp) {
1612 /*
1613 * If the improvement from just moving env->p direction is
1614 * better than swapping tasks around, check if a move is
1615 * possible. Store a slightly smaller score than moveimp,
1616 * so an actually idle CPU will win.
1617 */
1618 if (!load_too_imbalanced(src_load, dst_load, env)) {
1619 imp = moveimp - 1;
1620 cur = NULL;
1621 goto assign;
1622 }
1623 }
1624
1625 if (imp <= env->best_imp)
1626 goto unlock;
1627
1628 if (cur) {
1629 load = task_h_load(cur);
1630 dst_load -= load;
1631 src_load += load;
1632 }
1633
1634 if (load_too_imbalanced(src_load, dst_load, env))
1635 goto unlock;
1636
1637 /*
1638 * One idle CPU per node is evaluated for a task numa move.
1639 * Call select_idle_sibling to maybe find a better one.
1640 */
1641 if (!cur) {
1642 /*
1643 * select_idle_siblings() uses an per-cpu cpumask that
1644 * can be used from IRQ context.
1645 */
1646 local_irq_disable();
1647 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1648 env->dst_cpu);
1649 local_irq_enable();
1650 }
1651
1652 assign:
1653 task_numa_assign(env, cur, imp);
1654 unlock:
1655 rcu_read_unlock();
1656 }
1657
1658 static void task_numa_find_cpu(struct task_numa_env *env,
1659 long taskimp, long groupimp)
1660 {
1661 int cpu;
1662
1663 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1664 /* Skip this CPU if the source task cannot migrate */
1665 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
1666 continue;
1667
1668 env->dst_cpu = cpu;
1669 task_numa_compare(env, taskimp, groupimp);
1670 }
1671 }
1672
1673 /* Only move tasks to a NUMA node less busy than the current node. */
1674 static bool numa_has_capacity(struct task_numa_env *env)
1675 {
1676 struct numa_stats *src = &env->src_stats;
1677 struct numa_stats *dst = &env->dst_stats;
1678
1679 if (src->has_free_capacity && !dst->has_free_capacity)
1680 return false;
1681
1682 /*
1683 * Only consider a task move if the source has a higher load
1684 * than the destination, corrected for CPU capacity on each node.
1685 *
1686 * src->load dst->load
1687 * --------------------- vs ---------------------
1688 * src->compute_capacity dst->compute_capacity
1689 */
1690 if (src->load * dst->compute_capacity * env->imbalance_pct >
1691
1692 dst->load * src->compute_capacity * 100)
1693 return true;
1694
1695 return false;
1696 }
1697
1698 static int task_numa_migrate(struct task_struct *p)
1699 {
1700 struct task_numa_env env = {
1701 .p = p,
1702
1703 .src_cpu = task_cpu(p),
1704 .src_nid = task_node(p),
1705
1706 .imbalance_pct = 112,
1707
1708 .best_task = NULL,
1709 .best_imp = 0,
1710 .best_cpu = -1,
1711 };
1712 struct sched_domain *sd;
1713 unsigned long taskweight, groupweight;
1714 int nid, ret, dist;
1715 long taskimp, groupimp;
1716
1717 /*
1718 * Pick the lowest SD_NUMA domain, as that would have the smallest
1719 * imbalance and would be the first to start moving tasks about.
1720 *
1721 * And we want to avoid any moving of tasks about, as that would create
1722 * random movement of tasks -- counter the numa conditions we're trying
1723 * to satisfy here.
1724 */
1725 rcu_read_lock();
1726 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1727 if (sd)
1728 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1729 rcu_read_unlock();
1730
1731 /*
1732 * Cpusets can break the scheduler domain tree into smaller
1733 * balance domains, some of which do not cross NUMA boundaries.
1734 * Tasks that are "trapped" in such domains cannot be migrated
1735 * elsewhere, so there is no point in (re)trying.
1736 */
1737 if (unlikely(!sd)) {
1738 p->numa_preferred_nid = task_node(p);
1739 return -EINVAL;
1740 }
1741
1742 env.dst_nid = p->numa_preferred_nid;
1743 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1744 taskweight = task_weight(p, env.src_nid, dist);
1745 groupweight = group_weight(p, env.src_nid, dist);
1746 update_numa_stats(&env.src_stats, env.src_nid);
1747 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1748 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1749 update_numa_stats(&env.dst_stats, env.dst_nid);
1750
1751 /* Try to find a spot on the preferred nid. */
1752 if (numa_has_capacity(&env))
1753 task_numa_find_cpu(&env, taskimp, groupimp);
1754
1755 /*
1756 * Look at other nodes in these cases:
1757 * - there is no space available on the preferred_nid
1758 * - the task is part of a numa_group that is interleaved across
1759 * multiple NUMA nodes; in order to better consolidate the group,
1760 * we need to check other locations.
1761 */
1762 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1763 for_each_online_node(nid) {
1764 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1765 continue;
1766
1767 dist = node_distance(env.src_nid, env.dst_nid);
1768 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1769 dist != env.dist) {
1770 taskweight = task_weight(p, env.src_nid, dist);
1771 groupweight = group_weight(p, env.src_nid, dist);
1772 }
1773
1774 /* Only consider nodes where both task and groups benefit */
1775 taskimp = task_weight(p, nid, dist) - taskweight;
1776 groupimp = group_weight(p, nid, dist) - groupweight;
1777 if (taskimp < 0 && groupimp < 0)
1778 continue;
1779
1780 env.dist = dist;
1781 env.dst_nid = nid;
1782 update_numa_stats(&env.dst_stats, env.dst_nid);
1783 if (numa_has_capacity(&env))
1784 task_numa_find_cpu(&env, taskimp, groupimp);
1785 }
1786 }
1787
1788 /*
1789 * If the task is part of a workload that spans multiple NUMA nodes,
1790 * and is migrating into one of the workload's active nodes, remember
1791 * this node as the task's preferred numa node, so the workload can
1792 * settle down.
1793 * A task that migrated to a second choice node will be better off
1794 * trying for a better one later. Do not set the preferred node here.
1795 */
1796 if (p->numa_group) {
1797 struct numa_group *ng = p->numa_group;
1798
1799 if (env.best_cpu == -1)
1800 nid = env.src_nid;
1801 else
1802 nid = env.dst_nid;
1803
1804 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1805 sched_setnuma(p, env.dst_nid);
1806 }
1807
1808 /* No better CPU than the current one was found. */
1809 if (env.best_cpu == -1)
1810 return -EAGAIN;
1811
1812 /*
1813 * Reset the scan period if the task is being rescheduled on an
1814 * alternative node to recheck if the tasks is now properly placed.
1815 */
1816 p->numa_scan_period = task_scan_min(p);
1817
1818 if (env.best_task == NULL) {
1819 ret = migrate_task_to(p, env.best_cpu);
1820 if (ret != 0)
1821 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1822 return ret;
1823 }
1824
1825 ret = migrate_swap(p, env.best_task);
1826 if (ret != 0)
1827 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1828 put_task_struct(env.best_task);
1829 return ret;
1830 }
1831
1832 /* Attempt to migrate a task to a CPU on the preferred node. */
1833 static void numa_migrate_preferred(struct task_struct *p)
1834 {
1835 unsigned long interval = HZ;
1836
1837 /* This task has no NUMA fault statistics yet */
1838 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1839 return;
1840
1841 /* Periodically retry migrating the task to the preferred node */
1842 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1843 p->numa_migrate_retry = jiffies + interval;
1844
1845 /* Success if task is already running on preferred CPU */
1846 if (task_node(p) == p->numa_preferred_nid)
1847 return;
1848
1849 /* Otherwise, try migrate to a CPU on the preferred node */
1850 task_numa_migrate(p);
1851 }
1852
1853 /*
1854 * Find out how many nodes on the workload is actively running on. Do this by
1855 * tracking the nodes from which NUMA hinting faults are triggered. This can
1856 * be different from the set of nodes where the workload's memory is currently
1857 * located.
1858 */
1859 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1860 {
1861 unsigned long faults, max_faults = 0;
1862 int nid, active_nodes = 0;
1863
1864 for_each_online_node(nid) {
1865 faults = group_faults_cpu(numa_group, nid);
1866 if (faults > max_faults)
1867 max_faults = faults;
1868 }
1869
1870 for_each_online_node(nid) {
1871 faults = group_faults_cpu(numa_group, nid);
1872 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1873 active_nodes++;
1874 }
1875
1876 numa_group->max_faults_cpu = max_faults;
1877 numa_group->active_nodes = active_nodes;
1878 }
1879
1880 /*
1881 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1882 * increments. The more local the fault statistics are, the higher the scan
1883 * period will be for the next scan window. If local/(local+remote) ratio is
1884 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1885 * the scan period will decrease. Aim for 70% local accesses.
1886 */
1887 #define NUMA_PERIOD_SLOTS 10
1888 #define NUMA_PERIOD_THRESHOLD 7
1889
1890 /*
1891 * Increase the scan period (slow down scanning) if the majority of
1892 * our memory is already on our local node, or if the majority of
1893 * the page accesses are shared with other processes.
1894 * Otherwise, decrease the scan period.
1895 */
1896 static void update_task_scan_period(struct task_struct *p,
1897 unsigned long shared, unsigned long private)
1898 {
1899 unsigned int period_slot;
1900 int ratio;
1901 int diff;
1902
1903 unsigned long remote = p->numa_faults_locality[0];
1904 unsigned long local = p->numa_faults_locality[1];
1905
1906 /*
1907 * If there were no record hinting faults then either the task is
1908 * completely idle or all activity is areas that are not of interest
1909 * to automatic numa balancing. Related to that, if there were failed
1910 * migration then it implies we are migrating too quickly or the local
1911 * node is overloaded. In either case, scan slower
1912 */
1913 if (local + shared == 0 || p->numa_faults_locality[2]) {
1914 p->numa_scan_period = min(p->numa_scan_period_max,
1915 p->numa_scan_period << 1);
1916
1917 p->mm->numa_next_scan = jiffies +
1918 msecs_to_jiffies(p->numa_scan_period);
1919
1920 return;
1921 }
1922
1923 /*
1924 * Prepare to scale scan period relative to the current period.
1925 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1926 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1927 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1928 */
1929 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1930 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1931 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1932 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1933 if (!slot)
1934 slot = 1;
1935 diff = slot * period_slot;
1936 } else {
1937 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1938
1939 /*
1940 * Scale scan rate increases based on sharing. There is an
1941 * inverse relationship between the degree of sharing and
1942 * the adjustment made to the scanning period. Broadly
1943 * speaking the intent is that there is little point
1944 * scanning faster if shared accesses dominate as it may
1945 * simply bounce migrations uselessly
1946 */
1947 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1948 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1949 }
1950
1951 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1952 task_scan_min(p), task_scan_max(p));
1953 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1954 }
1955
1956 /*
1957 * Get the fraction of time the task has been running since the last
1958 * NUMA placement cycle. The scheduler keeps similar statistics, but
1959 * decays those on a 32ms period, which is orders of magnitude off
1960 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1961 * stats only if the task is so new there are no NUMA statistics yet.
1962 */
1963 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1964 {
1965 u64 runtime, delta, now;
1966 /* Use the start of this time slice to avoid calculations. */
1967 now = p->se.exec_start;
1968 runtime = p->se.sum_exec_runtime;
1969
1970 if (p->last_task_numa_placement) {
1971 delta = runtime - p->last_sum_exec_runtime;
1972 *period = now - p->last_task_numa_placement;
1973 } else {
1974 delta = p->se.avg.load_sum / p->se.load.weight;
1975 *period = LOAD_AVG_MAX;
1976 }
1977
1978 p->last_sum_exec_runtime = runtime;
1979 p->last_task_numa_placement = now;
1980
1981 return delta;
1982 }
1983
1984 /*
1985 * Determine the preferred nid for a task in a numa_group. This needs to
1986 * be done in a way that produces consistent results with group_weight,
1987 * otherwise workloads might not converge.
1988 */
1989 static int preferred_group_nid(struct task_struct *p, int nid)
1990 {
1991 nodemask_t nodes;
1992 int dist;
1993
1994 /* Direct connections between all NUMA nodes. */
1995 if (sched_numa_topology_type == NUMA_DIRECT)
1996 return nid;
1997
1998 /*
1999 * On a system with glueless mesh NUMA topology, group_weight
2000 * scores nodes according to the number of NUMA hinting faults on
2001 * both the node itself, and on nearby nodes.
2002 */
2003 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2004 unsigned long score, max_score = 0;
2005 int node, max_node = nid;
2006
2007 dist = sched_max_numa_distance;
2008
2009 for_each_online_node(node) {
2010 score = group_weight(p, node, dist);
2011 if (score > max_score) {
2012 max_score = score;
2013 max_node = node;
2014 }
2015 }
2016 return max_node;
2017 }
2018
2019 /*
2020 * Finding the preferred nid in a system with NUMA backplane
2021 * interconnect topology is more involved. The goal is to locate
2022 * tasks from numa_groups near each other in the system, and
2023 * untangle workloads from different sides of the system. This requires
2024 * searching down the hierarchy of node groups, recursively searching
2025 * inside the highest scoring group of nodes. The nodemask tricks
2026 * keep the complexity of the search down.
2027 */
2028 nodes = node_online_map;
2029 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2030 unsigned long max_faults = 0;
2031 nodemask_t max_group = NODE_MASK_NONE;
2032 int a, b;
2033
2034 /* Are there nodes at this distance from each other? */
2035 if (!find_numa_distance(dist))
2036 continue;
2037
2038 for_each_node_mask(a, nodes) {
2039 unsigned long faults = 0;
2040 nodemask_t this_group;
2041 nodes_clear(this_group);
2042
2043 /* Sum group's NUMA faults; includes a==b case. */
2044 for_each_node_mask(b, nodes) {
2045 if (node_distance(a, b) < dist) {
2046 faults += group_faults(p, b);
2047 node_set(b, this_group);
2048 node_clear(b, nodes);
2049 }
2050 }
2051
2052 /* Remember the top group. */
2053 if (faults > max_faults) {
2054 max_faults = faults;
2055 max_group = this_group;
2056 /*
2057 * subtle: at the smallest distance there is
2058 * just one node left in each "group", the
2059 * winner is the preferred nid.
2060 */
2061 nid = a;
2062 }
2063 }
2064 /* Next round, evaluate the nodes within max_group. */
2065 if (!max_faults)
2066 break;
2067 nodes = max_group;
2068 }
2069 return nid;
2070 }
2071
2072 static void task_numa_placement(struct task_struct *p)
2073 {
2074 int seq, nid, max_nid = -1, max_group_nid = -1;
2075 unsigned long max_faults = 0, max_group_faults = 0;
2076 unsigned long fault_types[2] = { 0, 0 };
2077 unsigned long total_faults;
2078 u64 runtime, period;
2079 spinlock_t *group_lock = NULL;
2080
2081 /*
2082 * The p->mm->numa_scan_seq field gets updated without
2083 * exclusive access. Use READ_ONCE() here to ensure
2084 * that the field is read in a single access:
2085 */
2086 seq = READ_ONCE(p->mm->numa_scan_seq);
2087 if (p->numa_scan_seq == seq)
2088 return;
2089 p->numa_scan_seq = seq;
2090 p->numa_scan_period_max = task_scan_max(p);
2091
2092 total_faults = p->numa_faults_locality[0] +
2093 p->numa_faults_locality[1];
2094 runtime = numa_get_avg_runtime(p, &period);
2095
2096 /* If the task is part of a group prevent parallel updates to group stats */
2097 if (p->numa_group) {
2098 group_lock = &p->numa_group->lock;
2099 spin_lock_irq(group_lock);
2100 }
2101
2102 /* Find the node with the highest number of faults */
2103 for_each_online_node(nid) {
2104 /* Keep track of the offsets in numa_faults array */
2105 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2106 unsigned long faults = 0, group_faults = 0;
2107 int priv;
2108
2109 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2110 long diff, f_diff, f_weight;
2111
2112 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2113 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2114 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2115 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2116
2117 /* Decay existing window, copy faults since last scan */
2118 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2119 fault_types[priv] += p->numa_faults[membuf_idx];
2120 p->numa_faults[membuf_idx] = 0;
2121
2122 /*
2123 * Normalize the faults_from, so all tasks in a group
2124 * count according to CPU use, instead of by the raw
2125 * number of faults. Tasks with little runtime have
2126 * little over-all impact on throughput, and thus their
2127 * faults are less important.
2128 */
2129 f_weight = div64_u64(runtime << 16, period + 1);
2130 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2131 (total_faults + 1);
2132 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2133 p->numa_faults[cpubuf_idx] = 0;
2134
2135 p->numa_faults[mem_idx] += diff;
2136 p->numa_faults[cpu_idx] += f_diff;
2137 faults += p->numa_faults[mem_idx];
2138 p->total_numa_faults += diff;
2139 if (p->numa_group) {
2140 /*
2141 * safe because we can only change our own group
2142 *
2143 * mem_idx represents the offset for a given
2144 * nid and priv in a specific region because it
2145 * is at the beginning of the numa_faults array.
2146 */
2147 p->numa_group->faults[mem_idx] += diff;
2148 p->numa_group->faults_cpu[mem_idx] += f_diff;
2149 p->numa_group->total_faults += diff;
2150 group_faults += p->numa_group->faults[mem_idx];
2151 }
2152 }
2153
2154 if (faults > max_faults) {
2155 max_faults = faults;
2156 max_nid = nid;
2157 }
2158
2159 if (group_faults > max_group_faults) {
2160 max_group_faults = group_faults;
2161 max_group_nid = nid;
2162 }
2163 }
2164
2165 update_task_scan_period(p, fault_types[0], fault_types[1]);
2166
2167 if (p->numa_group) {
2168 numa_group_count_active_nodes(p->numa_group);
2169 spin_unlock_irq(group_lock);
2170 max_nid = preferred_group_nid(p, max_group_nid);
2171 }
2172
2173 if (max_faults) {
2174 /* Set the new preferred node */
2175 if (max_nid != p->numa_preferred_nid)
2176 sched_setnuma(p, max_nid);
2177
2178 if (task_node(p) != p->numa_preferred_nid)
2179 numa_migrate_preferred(p);
2180 }
2181 }
2182
2183 static inline int get_numa_group(struct numa_group *grp)
2184 {
2185 return atomic_inc_not_zero(&grp->refcount);
2186 }
2187
2188 static inline void put_numa_group(struct numa_group *grp)
2189 {
2190 if (atomic_dec_and_test(&grp->refcount))
2191 kfree_rcu(grp, rcu);
2192 }
2193
2194 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2195 int *priv)
2196 {
2197 struct numa_group *grp, *my_grp;
2198 struct task_struct *tsk;
2199 bool join = false;
2200 int cpu = cpupid_to_cpu(cpupid);
2201 int i;
2202
2203 if (unlikely(!p->numa_group)) {
2204 unsigned int size = sizeof(struct numa_group) +
2205 4*nr_node_ids*sizeof(unsigned long);
2206
2207 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2208 if (!grp)
2209 return;
2210
2211 atomic_set(&grp->refcount, 1);
2212 grp->active_nodes = 1;
2213 grp->max_faults_cpu = 0;
2214 spin_lock_init(&grp->lock);
2215 grp->gid = p->pid;
2216 /* Second half of the array tracks nids where faults happen */
2217 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2218 nr_node_ids;
2219
2220 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2221 grp->faults[i] = p->numa_faults[i];
2222
2223 grp->total_faults = p->total_numa_faults;
2224
2225 grp->nr_tasks++;
2226 rcu_assign_pointer(p->numa_group, grp);
2227 }
2228
2229 rcu_read_lock();
2230 tsk = READ_ONCE(cpu_rq(cpu)->curr);
2231
2232 if (!cpupid_match_pid(tsk, cpupid))
2233 goto no_join;
2234
2235 grp = rcu_dereference(tsk->numa_group);
2236 if (!grp)
2237 goto no_join;
2238
2239 my_grp = p->numa_group;
2240 if (grp == my_grp)
2241 goto no_join;
2242
2243 /*
2244 * Only join the other group if its bigger; if we're the bigger group,
2245 * the other task will join us.
2246 */
2247 if (my_grp->nr_tasks > grp->nr_tasks)
2248 goto no_join;
2249
2250 /*
2251 * Tie-break on the grp address.
2252 */
2253 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2254 goto no_join;
2255
2256 /* Always join threads in the same process. */
2257 if (tsk->mm == current->mm)
2258 join = true;
2259
2260 /* Simple filter to avoid false positives due to PID collisions */
2261 if (flags & TNF_SHARED)
2262 join = true;
2263
2264 /* Update priv based on whether false sharing was detected */
2265 *priv = !join;
2266
2267 if (join && !get_numa_group(grp))
2268 goto no_join;
2269
2270 rcu_read_unlock();
2271
2272 if (!join)
2273 return;
2274
2275 BUG_ON(irqs_disabled());
2276 double_lock_irq(&my_grp->lock, &grp->lock);
2277
2278 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2279 my_grp->faults[i] -= p->numa_faults[i];
2280 grp->faults[i] += p->numa_faults[i];
2281 }
2282 my_grp->total_faults -= p->total_numa_faults;
2283 grp->total_faults += p->total_numa_faults;
2284
2285 my_grp->nr_tasks--;
2286 grp->nr_tasks++;
2287
2288 spin_unlock(&my_grp->lock);
2289 spin_unlock_irq(&grp->lock);
2290
2291 rcu_assign_pointer(p->numa_group, grp);
2292
2293 put_numa_group(my_grp);
2294 return;
2295
2296 no_join:
2297 rcu_read_unlock();
2298 return;
2299 }
2300
2301 void task_numa_free(struct task_struct *p)
2302 {
2303 struct numa_group *grp = p->numa_group;
2304 void *numa_faults = p->numa_faults;
2305 unsigned long flags;
2306 int i;
2307
2308 if (grp) {
2309 spin_lock_irqsave(&grp->lock, flags);
2310 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2311 grp->faults[i] -= p->numa_faults[i];
2312 grp->total_faults -= p->total_numa_faults;
2313
2314 grp->nr_tasks--;
2315 spin_unlock_irqrestore(&grp->lock, flags);
2316 RCU_INIT_POINTER(p->numa_group, NULL);
2317 put_numa_group(grp);
2318 }
2319
2320 p->numa_faults = NULL;
2321 kfree(numa_faults);
2322 }
2323
2324 /*
2325 * Got a PROT_NONE fault for a page on @node.
2326 */
2327 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2328 {
2329 struct task_struct *p = current;
2330 bool migrated = flags & TNF_MIGRATED;
2331 int cpu_node = task_node(current);
2332 int local = !!(flags & TNF_FAULT_LOCAL);
2333 struct numa_group *ng;
2334 int priv;
2335
2336 if (!static_branch_likely(&sched_numa_balancing))
2337 return;
2338
2339 /* for example, ksmd faulting in a user's mm */
2340 if (!p->mm)
2341 return;
2342
2343 /* Allocate buffer to track faults on a per-node basis */
2344 if (unlikely(!p->numa_faults)) {
2345 int size = sizeof(*p->numa_faults) *
2346 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2347
2348 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2349 if (!p->numa_faults)
2350 return;
2351
2352 p->total_numa_faults = 0;
2353 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2354 }
2355
2356 /*
2357 * First accesses are treated as private, otherwise consider accesses
2358 * to be private if the accessing pid has not changed
2359 */
2360 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2361 priv = 1;
2362 } else {
2363 priv = cpupid_match_pid(p, last_cpupid);
2364 if (!priv && !(flags & TNF_NO_GROUP))
2365 task_numa_group(p, last_cpupid, flags, &priv);
2366 }
2367
2368 /*
2369 * If a workload spans multiple NUMA nodes, a shared fault that
2370 * occurs wholly within the set of nodes that the workload is
2371 * actively using should be counted as local. This allows the
2372 * scan rate to slow down when a workload has settled down.
2373 */
2374 ng = p->numa_group;
2375 if (!priv && !local && ng && ng->active_nodes > 1 &&
2376 numa_is_active_node(cpu_node, ng) &&
2377 numa_is_active_node(mem_node, ng))
2378 local = 1;
2379
2380 task_numa_placement(p);
2381
2382 /*
2383 * Retry task to preferred node migration periodically, in case it
2384 * case it previously failed, or the scheduler moved us.
2385 */
2386 if (time_after(jiffies, p->numa_migrate_retry))
2387 numa_migrate_preferred(p);
2388
2389 if (migrated)
2390 p->numa_pages_migrated += pages;
2391 if (flags & TNF_MIGRATE_FAIL)
2392 p->numa_faults_locality[2] += pages;
2393
2394 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2395 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2396 p->numa_faults_locality[local] += pages;
2397 }
2398
2399 static void reset_ptenuma_scan(struct task_struct *p)
2400 {
2401 /*
2402 * We only did a read acquisition of the mmap sem, so
2403 * p->mm->numa_scan_seq is written to without exclusive access
2404 * and the update is not guaranteed to be atomic. That's not
2405 * much of an issue though, since this is just used for
2406 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2407 * expensive, to avoid any form of compiler optimizations:
2408 */
2409 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2410 p->mm->numa_scan_offset = 0;
2411 }
2412
2413 /*
2414 * The expensive part of numa migration is done from task_work context.
2415 * Triggered from task_tick_numa().
2416 */
2417 void task_numa_work(struct callback_head *work)
2418 {
2419 unsigned long migrate, next_scan, now = jiffies;
2420 struct task_struct *p = current;
2421 struct mm_struct *mm = p->mm;
2422 u64 runtime = p->se.sum_exec_runtime;
2423 struct vm_area_struct *vma;
2424 unsigned long start, end;
2425 unsigned long nr_pte_updates = 0;
2426 long pages, virtpages;
2427
2428 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2429
2430 work->next = work; /* protect against double add */
2431 /*
2432 * Who cares about NUMA placement when they're dying.
2433 *
2434 * NOTE: make sure not to dereference p->mm before this check,
2435 * exit_task_work() happens _after_ exit_mm() so we could be called
2436 * without p->mm even though we still had it when we enqueued this
2437 * work.
2438 */
2439 if (p->flags & PF_EXITING)
2440 return;
2441
2442 if (!mm->numa_next_scan) {
2443 mm->numa_next_scan = now +
2444 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2445 }
2446
2447 /*
2448 * Enforce maximal scan/migration frequency..
2449 */
2450 migrate = mm->numa_next_scan;
2451 if (time_before(now, migrate))
2452 return;
2453
2454 if (p->numa_scan_period == 0) {
2455 p->numa_scan_period_max = task_scan_max(p);
2456 p->numa_scan_period = task_scan_min(p);
2457 }
2458
2459 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2460 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2461 return;
2462
2463 /*
2464 * Delay this task enough that another task of this mm will likely win
2465 * the next time around.
2466 */
2467 p->node_stamp += 2 * TICK_NSEC;
2468
2469 start = mm->numa_scan_offset;
2470 pages = sysctl_numa_balancing_scan_size;
2471 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2472 virtpages = pages * 8; /* Scan up to this much virtual space */
2473 if (!pages)
2474 return;
2475
2476
2477 down_read(&mm->mmap_sem);
2478 vma = find_vma(mm, start);
2479 if (!vma) {
2480 reset_ptenuma_scan(p);
2481 start = 0;
2482 vma = mm->mmap;
2483 }
2484 for (; vma; vma = vma->vm_next) {
2485 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2486 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2487 continue;
2488 }
2489
2490 /*
2491 * Shared library pages mapped by multiple processes are not
2492 * migrated as it is expected they are cache replicated. Avoid
2493 * hinting faults in read-only file-backed mappings or the vdso
2494 * as migrating the pages will be of marginal benefit.
2495 */
2496 if (!vma->vm_mm ||
2497 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2498 continue;
2499
2500 /*
2501 * Skip inaccessible VMAs to avoid any confusion between
2502 * PROT_NONE and NUMA hinting ptes
2503 */
2504 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2505 continue;
2506
2507 do {
2508 start = max(start, vma->vm_start);
2509 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2510 end = min(end, vma->vm_end);
2511 nr_pte_updates = change_prot_numa(vma, start, end);
2512
2513 /*
2514 * Try to scan sysctl_numa_balancing_size worth of
2515 * hpages that have at least one present PTE that
2516 * is not already pte-numa. If the VMA contains
2517 * areas that are unused or already full of prot_numa
2518 * PTEs, scan up to virtpages, to skip through those
2519 * areas faster.
2520 */
2521 if (nr_pte_updates)
2522 pages -= (end - start) >> PAGE_SHIFT;
2523 virtpages -= (end - start) >> PAGE_SHIFT;
2524
2525 start = end;
2526 if (pages <= 0 || virtpages <= 0)
2527 goto out;
2528
2529 cond_resched();
2530 } while (end != vma->vm_end);
2531 }
2532
2533 out:
2534 /*
2535 * It is possible to reach the end of the VMA list but the last few
2536 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2537 * would find the !migratable VMA on the next scan but not reset the
2538 * scanner to the start so check it now.
2539 */
2540 if (vma)
2541 mm->numa_scan_offset = start;
2542 else
2543 reset_ptenuma_scan(p);
2544 up_read(&mm->mmap_sem);
2545
2546 /*
2547 * Make sure tasks use at least 32x as much time to run other code
2548 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2549 * Usually update_task_scan_period slows down scanning enough; on an
2550 * overloaded system we need to limit overhead on a per task basis.
2551 */
2552 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2553 u64 diff = p->se.sum_exec_runtime - runtime;
2554 p->node_stamp += 32 * diff;
2555 }
2556 }
2557
2558 /*
2559 * Drive the periodic memory faults..
2560 */
2561 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2562 {
2563 struct callback_head *work = &curr->numa_work;
2564 u64 period, now;
2565
2566 /*
2567 * We don't care about NUMA placement if we don't have memory.
2568 */
2569 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2570 return;
2571
2572 /*
2573 * Using runtime rather than walltime has the dual advantage that
2574 * we (mostly) drive the selection from busy threads and that the
2575 * task needs to have done some actual work before we bother with
2576 * NUMA placement.
2577 */
2578 now = curr->se.sum_exec_runtime;
2579 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2580
2581 if (now > curr->node_stamp + period) {
2582 if (!curr->node_stamp)
2583 curr->numa_scan_period = task_scan_min(curr);
2584 curr->node_stamp += period;
2585
2586 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2587 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2588 task_work_add(curr, work, true);
2589 }
2590 }
2591 }
2592 #else
2593 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2594 {
2595 }
2596
2597 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2598 {
2599 }
2600
2601 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2602 {
2603 }
2604 #endif /* CONFIG_NUMA_BALANCING */
2605
2606 static void
2607 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2608 {
2609 update_load_add(&cfs_rq->load, se->load.weight);
2610 if (!parent_entity(se))
2611 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2612 #ifdef CONFIG_SMP
2613 if (entity_is_task(se)) {
2614 struct rq *rq = rq_of(cfs_rq);
2615
2616 account_numa_enqueue(rq, task_of(se));
2617 list_add(&se->group_node, &rq->cfs_tasks);
2618 }
2619 #endif
2620 cfs_rq->nr_running++;
2621 }
2622
2623 static void
2624 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2625 {
2626 update_load_sub(&cfs_rq->load, se->load.weight);
2627 if (!parent_entity(se))
2628 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2629 #ifdef CONFIG_SMP
2630 if (entity_is_task(se)) {
2631 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2632 list_del_init(&se->group_node);
2633 }
2634 #endif
2635 cfs_rq->nr_running--;
2636 }
2637
2638 #ifdef CONFIG_FAIR_GROUP_SCHED
2639 # ifdef CONFIG_SMP
2640 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2641 {
2642 long tg_weight, load, shares;
2643
2644 /*
2645 * This really should be: cfs_rq->avg.load_avg, but instead we use
2646 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2647 * the shares for small weight interactive tasks.
2648 */
2649 load = scale_load_down(cfs_rq->load.weight);
2650
2651 tg_weight = atomic_long_read(&tg->load_avg);
2652
2653 /* Ensure tg_weight >= load */
2654 tg_weight -= cfs_rq->tg_load_avg_contrib;
2655 tg_weight += load;
2656
2657 shares = (tg->shares * load);
2658 if (tg_weight)
2659 shares /= tg_weight;
2660
2661 /*
2662 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
2663 * of a group with small tg->shares value. It is a floor value which is
2664 * assigned as a minimum load.weight to the sched_entity representing
2665 * the group on a CPU.
2666 *
2667 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
2668 * on an 8-core system with 8 tasks each runnable on one CPU shares has
2669 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
2670 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
2671 * instead of 0.
2672 */
2673 if (shares < MIN_SHARES)
2674 shares = MIN_SHARES;
2675 if (shares > tg->shares)
2676 shares = tg->shares;
2677
2678 return shares;
2679 }
2680 # else /* CONFIG_SMP */
2681 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2682 {
2683 return tg->shares;
2684 }
2685 # endif /* CONFIG_SMP */
2686
2687 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2688 unsigned long weight)
2689 {
2690 if (se->on_rq) {
2691 /* commit outstanding execution time */
2692 if (cfs_rq->curr == se)
2693 update_curr(cfs_rq);
2694 account_entity_dequeue(cfs_rq, se);
2695 }
2696
2697 update_load_set(&se->load, weight);
2698
2699 if (se->on_rq)
2700 account_entity_enqueue(cfs_rq, se);
2701 }
2702
2703 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2704
2705 static void update_cfs_shares(struct sched_entity *se)
2706 {
2707 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2708 struct task_group *tg;
2709 long shares;
2710
2711 if (!cfs_rq)
2712 return;
2713
2714 if (throttled_hierarchy(cfs_rq))
2715 return;
2716
2717 tg = cfs_rq->tg;
2718
2719 #ifndef CONFIG_SMP
2720 if (likely(se->load.weight == tg->shares))
2721 return;
2722 #endif
2723 shares = calc_cfs_shares(cfs_rq, tg);
2724
2725 reweight_entity(cfs_rq_of(se), se, shares);
2726 }
2727
2728 #else /* CONFIG_FAIR_GROUP_SCHED */
2729 static inline void update_cfs_shares(struct sched_entity *se)
2730 {
2731 }
2732 #endif /* CONFIG_FAIR_GROUP_SCHED */
2733
2734 #ifdef CONFIG_SMP
2735 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2736 static const u32 runnable_avg_yN_inv[] = {
2737 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2738 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2739 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2740 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2741 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2742 0x85aac367, 0x82cd8698,
2743 };
2744
2745 /*
2746 * Approximate:
2747 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2748 */
2749 static u64 decay_load(u64 val, u64 n)
2750 {
2751 unsigned int local_n;
2752
2753 if (unlikely(n > LOAD_AVG_PERIOD * 63))
2754 return 0;
2755
2756 /* after bounds checking we can collapse to 32-bit */
2757 local_n = n;
2758
2759 /*
2760 * As y^PERIOD = 1/2, we can combine
2761 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2762 * With a look-up table which covers y^n (n<PERIOD)
2763 *
2764 * To achieve constant time decay_load.
2765 */
2766 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2767 val >>= local_n / LOAD_AVG_PERIOD;
2768 local_n %= LOAD_AVG_PERIOD;
2769 }
2770
2771 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2772 return val;
2773 }
2774
2775 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
2776 {
2777 u32 c1, c2, c3 = d3; /* y^0 == 1 */
2778
2779 /*
2780 * c1 = d1 y^p
2781 */
2782 c1 = decay_load((u64)d1, periods);
2783
2784 /*
2785 * p-1
2786 * c2 = 1024 \Sum y^n
2787 * n=1
2788 *
2789 * inf inf
2790 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
2791 * n=0 n=p
2792 */
2793 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
2794
2795 return c1 + c2 + c3;
2796 }
2797
2798 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2799
2800 /*
2801 * Accumulate the three separate parts of the sum; d1 the remainder
2802 * of the last (incomplete) period, d2 the span of full periods and d3
2803 * the remainder of the (incomplete) current period.
2804 *
2805 * d1 d2 d3
2806 * ^ ^ ^
2807 * | | |
2808 * |<->|<----------------->|<--->|
2809 * ... |---x---|------| ... |------|-----x (now)
2810 *
2811 * p-1
2812 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
2813 * n=1
2814 *
2815 * = u y^p + (Step 1)
2816 *
2817 * p-1
2818 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
2819 * n=1
2820 */
2821 static __always_inline u32
2822 accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
2823 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2824 {
2825 unsigned long scale_freq, scale_cpu;
2826 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
2827 u64 periods;
2828
2829 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2830 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2831
2832 delta += sa->period_contrib;
2833 periods = delta / 1024; /* A period is 1024us (~1ms) */
2834
2835 /*
2836 * Step 1: decay old *_sum if we crossed period boundaries.
2837 */
2838 if (periods) {
2839 sa->load_sum = decay_load(sa->load_sum, periods);
2840 if (cfs_rq) {
2841 cfs_rq->runnable_load_sum =
2842 decay_load(cfs_rq->runnable_load_sum, periods);
2843 }
2844 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
2845
2846 /*
2847 * Step 2
2848 */
2849 delta %= 1024;
2850 contrib = __accumulate_pelt_segments(periods,
2851 1024 - sa->period_contrib, delta);
2852 }
2853 sa->period_contrib = delta;
2854
2855 contrib = cap_scale(contrib, scale_freq);
2856 if (weight) {
2857 sa->load_sum += weight * contrib;
2858 if (cfs_rq)
2859 cfs_rq->runnable_load_sum += weight * contrib;
2860 }
2861 if (running)
2862 sa->util_sum += contrib * scale_cpu;
2863
2864 return periods;
2865 }
2866
2867 /*
2868 * We can represent the historical contribution to runnable average as the
2869 * coefficients of a geometric series. To do this we sub-divide our runnable
2870 * history into segments of approximately 1ms (1024us); label the segment that
2871 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2872 *
2873 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2874 * p0 p1 p2
2875 * (now) (~1ms ago) (~2ms ago)
2876 *
2877 * Let u_i denote the fraction of p_i that the entity was runnable.
2878 *
2879 * We then designate the fractions u_i as our co-efficients, yielding the
2880 * following representation of historical load:
2881 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2882 *
2883 * We choose y based on the with of a reasonably scheduling period, fixing:
2884 * y^32 = 0.5
2885 *
2886 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2887 * approximately half as much as the contribution to load within the last ms
2888 * (u_0).
2889 *
2890 * When a period "rolls over" and we have new u_0`, multiplying the previous
2891 * sum again by y is sufficient to update:
2892 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2893 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2894 */
2895 static __always_inline int
2896 ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2897 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2898 {
2899 u64 delta;
2900
2901 delta = now - sa->last_update_time;
2902 /*
2903 * This should only happen when time goes backwards, which it
2904 * unfortunately does during sched clock init when we swap over to TSC.
2905 */
2906 if ((s64)delta < 0) {
2907 sa->last_update_time = now;
2908 return 0;
2909 }
2910
2911 /*
2912 * Use 1024ns as the unit of measurement since it's a reasonable
2913 * approximation of 1us and fast to compute.
2914 */
2915 delta >>= 10;
2916 if (!delta)
2917 return 0;
2918
2919 sa->last_update_time += delta << 10;
2920
2921 /*
2922 * Now we know we crossed measurement unit boundaries. The *_avg
2923 * accrues by two steps:
2924 *
2925 * Step 1: accumulate *_sum since last_update_time. If we haven't
2926 * crossed period boundaries, finish.
2927 */
2928 if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq))
2929 return 0;
2930
2931 /*
2932 * Step 2: update *_avg.
2933 */
2934 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2935 if (cfs_rq) {
2936 cfs_rq->runnable_load_avg =
2937 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2938 }
2939 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2940
2941 return 1;
2942 }
2943
2944 static int
2945 __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
2946 {
2947 return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
2948 }
2949
2950 static int
2951 __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
2952 {
2953 return ___update_load_avg(now, cpu, &se->avg,
2954 se->on_rq * scale_load_down(se->load.weight),
2955 cfs_rq->curr == se, NULL);
2956 }
2957
2958 static int
2959 __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
2960 {
2961 return ___update_load_avg(now, cpu, &cfs_rq->avg,
2962 scale_load_down(cfs_rq->load.weight),
2963 cfs_rq->curr != NULL, cfs_rq);
2964 }
2965
2966 /*
2967 * Signed add and clamp on underflow.
2968 *
2969 * Explicitly do a load-store to ensure the intermediate value never hits
2970 * memory. This allows lockless observations without ever seeing the negative
2971 * values.
2972 */
2973 #define add_positive(_ptr, _val) do { \
2974 typeof(_ptr) ptr = (_ptr); \
2975 typeof(_val) val = (_val); \
2976 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2977 \
2978 res = var + val; \
2979 \
2980 if (val < 0 && res > var) \
2981 res = 0; \
2982 \
2983 WRITE_ONCE(*ptr, res); \
2984 } while (0)
2985
2986 #ifdef CONFIG_FAIR_GROUP_SCHED
2987 /**
2988 * update_tg_load_avg - update the tg's load avg
2989 * @cfs_rq: the cfs_rq whose avg changed
2990 * @force: update regardless of how small the difference
2991 *
2992 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
2993 * However, because tg->load_avg is a global value there are performance
2994 * considerations.
2995 *
2996 * In order to avoid having to look at the other cfs_rq's, we use a
2997 * differential update where we store the last value we propagated. This in
2998 * turn allows skipping updates if the differential is 'small'.
2999 *
3000 * Updating tg's load_avg is necessary before update_cfs_share() (which is
3001 * done) and effective_load() (which is not done because it is too costly).
3002 */
3003 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
3004 {
3005 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3006
3007 /*
3008 * No need to update load_avg for root_task_group as it is not used.
3009 */
3010 if (cfs_rq->tg == &root_task_group)
3011 return;
3012
3013 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3014 atomic_long_add(delta, &cfs_rq->tg->load_avg);
3015 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3016 }
3017 }
3018
3019 /*
3020 * Called within set_task_rq() right before setting a task's cpu. The
3021 * caller only guarantees p->pi_lock is held; no other assumptions,
3022 * including the state of rq->lock, should be made.
3023 */
3024 void set_task_rq_fair(struct sched_entity *se,
3025 struct cfs_rq *prev, struct cfs_rq *next)
3026 {
3027 u64 p_last_update_time;
3028 u64 n_last_update_time;
3029
3030 if (!sched_feat(ATTACH_AGE_LOAD))
3031 return;
3032
3033 /*
3034 * We are supposed to update the task to "current" time, then its up to
3035 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3036 * getting what current time is, so simply throw away the out-of-date
3037 * time. This will result in the wakee task is less decayed, but giving
3038 * the wakee more load sounds not bad.
3039 */
3040 if (!(se->avg.last_update_time && prev))
3041 return;
3042
3043 #ifndef CONFIG_64BIT
3044 {
3045 u64 p_last_update_time_copy;
3046 u64 n_last_update_time_copy;
3047
3048 do {
3049 p_last_update_time_copy = prev->load_last_update_time_copy;
3050 n_last_update_time_copy = next->load_last_update_time_copy;
3051
3052 smp_rmb();
3053
3054 p_last_update_time = prev->avg.last_update_time;
3055 n_last_update_time = next->avg.last_update_time;
3056
3057 } while (p_last_update_time != p_last_update_time_copy ||
3058 n_last_update_time != n_last_update_time_copy);
3059 }
3060 #else
3061 p_last_update_time = prev->avg.last_update_time;
3062 n_last_update_time = next->avg.last_update_time;
3063 #endif
3064 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
3065 se->avg.last_update_time = n_last_update_time;
3066 }
3067
3068 /* Take into account change of utilization of a child task group */
3069 static inline void
3070 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
3071 {
3072 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3073 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3074
3075 /* Nothing to update */
3076 if (!delta)
3077 return;
3078
3079 /* Set new sched_entity's utilization */
3080 se->avg.util_avg = gcfs_rq->avg.util_avg;
3081 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
3082
3083 /* Update parent cfs_rq utilization */
3084 add_positive(&cfs_rq->avg.util_avg, delta);
3085 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
3086 }
3087
3088 /* Take into account change of load of a child task group */
3089 static inline void
3090 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
3091 {
3092 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3093 long delta, load = gcfs_rq->avg.load_avg;
3094
3095 /*
3096 * If the load of group cfs_rq is null, the load of the
3097 * sched_entity will also be null so we can skip the formula
3098 */
3099 if (load) {
3100 long tg_load;
3101
3102 /* Get tg's load and ensure tg_load > 0 */
3103 tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
3104
3105 /* Ensure tg_load >= load and updated with current load*/
3106 tg_load -= gcfs_rq->tg_load_avg_contrib;
3107 tg_load += load;
3108
3109 /*
3110 * We need to compute a correction term in the case that the
3111 * task group is consuming more CPU than a task of equal
3112 * weight. A task with a weight equals to tg->shares will have
3113 * a load less or equal to scale_load_down(tg->shares).
3114 * Similarly, the sched_entities that represent the task group
3115 * at parent level, can't have a load higher than
3116 * scale_load_down(tg->shares). And the Sum of sched_entities'
3117 * load must be <= scale_load_down(tg->shares).
3118 */
3119 if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
3120 /* scale gcfs_rq's load into tg's shares*/
3121 load *= scale_load_down(gcfs_rq->tg->shares);
3122 load /= tg_load;
3123 }
3124 }
3125
3126 delta = load - se->avg.load_avg;
3127
3128 /* Nothing to update */
3129 if (!delta)
3130 return;
3131
3132 /* Set new sched_entity's load */
3133 se->avg.load_avg = load;
3134 se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
3135
3136 /* Update parent cfs_rq load */
3137 add_positive(&cfs_rq->avg.load_avg, delta);
3138 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
3139
3140 /*
3141 * If the sched_entity is already enqueued, we also have to update the
3142 * runnable load avg.
3143 */
3144 if (se->on_rq) {
3145 /* Update parent cfs_rq runnable_load_avg */
3146 add_positive(&cfs_rq->runnable_load_avg, delta);
3147 cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
3148 }
3149 }
3150
3151 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
3152 {
3153 cfs_rq->propagate_avg = 1;
3154 }
3155
3156 static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
3157 {
3158 struct cfs_rq *cfs_rq = group_cfs_rq(se);
3159
3160 if (!cfs_rq->propagate_avg)
3161 return 0;
3162
3163 cfs_rq->propagate_avg = 0;
3164 return 1;
3165 }
3166
3167 /* Update task and its cfs_rq load average */
3168 static inline int propagate_entity_load_avg(struct sched_entity *se)
3169 {
3170 struct cfs_rq *cfs_rq;
3171
3172 if (entity_is_task(se))
3173 return 0;
3174
3175 if (!test_and_clear_tg_cfs_propagate(se))
3176 return 0;
3177
3178 cfs_rq = cfs_rq_of(se);
3179
3180 set_tg_cfs_propagate(cfs_rq);
3181
3182 update_tg_cfs_util(cfs_rq, se);
3183 update_tg_cfs_load(cfs_rq, se);
3184
3185 return 1;
3186 }
3187
3188 /*
3189 * Check if we need to update the load and the utilization of a blocked
3190 * group_entity:
3191 */
3192 static inline bool skip_blocked_update(struct sched_entity *se)
3193 {
3194 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3195
3196 /*
3197 * If sched_entity still have not zero load or utilization, we have to
3198 * decay it:
3199 */
3200 if (se->avg.load_avg || se->avg.util_avg)
3201 return false;
3202
3203 /*
3204 * If there is a pending propagation, we have to update the load and
3205 * the utilization of the sched_entity:
3206 */
3207 if (gcfs_rq->propagate_avg)
3208 return false;
3209
3210 /*
3211 * Otherwise, the load and the utilization of the sched_entity is
3212 * already zero and there is no pending propagation, so it will be a
3213 * waste of time to try to decay it:
3214 */
3215 return true;
3216 }
3217
3218 #else /* CONFIG_FAIR_GROUP_SCHED */
3219
3220 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
3221
3222 static inline int propagate_entity_load_avg(struct sched_entity *se)
3223 {
3224 return 0;
3225 }
3226
3227 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
3228
3229 #endif /* CONFIG_FAIR_GROUP_SCHED */
3230
3231 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
3232 {
3233 if (&this_rq()->cfs == cfs_rq) {
3234 /*
3235 * There are a few boundary cases this might miss but it should
3236 * get called often enough that that should (hopefully) not be
3237 * a real problem -- added to that it only calls on the local
3238 * CPU, so if we enqueue remotely we'll miss an update, but
3239 * the next tick/schedule should update.
3240 *
3241 * It will not get called when we go idle, because the idle
3242 * thread is a different class (!fair), nor will the utilization
3243 * number include things like RT tasks.
3244 *
3245 * As is, the util number is not freq-invariant (we'd have to
3246 * implement arch_scale_freq_capacity() for that).
3247 *
3248 * See cpu_util().
3249 */
3250 cpufreq_update_util(rq_of(cfs_rq), 0);
3251 }
3252 }
3253
3254 /*
3255 * Unsigned subtract and clamp on underflow.
3256 *
3257 * Explicitly do a load-store to ensure the intermediate value never hits
3258 * memory. This allows lockless observations without ever seeing the negative
3259 * values.
3260 */
3261 #define sub_positive(_ptr, _val) do { \
3262 typeof(_ptr) ptr = (_ptr); \
3263 typeof(*ptr) val = (_val); \
3264 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3265 res = var - val; \
3266 if (res > var) \
3267 res = 0; \
3268 WRITE_ONCE(*ptr, res); \
3269 } while (0)
3270
3271 /**
3272 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3273 * @now: current time, as per cfs_rq_clock_task()
3274 * @cfs_rq: cfs_rq to update
3275 * @update_freq: should we call cfs_rq_util_change() or will the call do so
3276 *
3277 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3278 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3279 * post_init_entity_util_avg().
3280 *
3281 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3282 *
3283 * Returns true if the load decayed or we removed load.
3284 *
3285 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3286 * call update_tg_load_avg() when this function returns true.
3287 */
3288 static inline int
3289 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3290 {
3291 struct sched_avg *sa = &cfs_rq->avg;
3292 int decayed, removed_load = 0, removed_util = 0;
3293
3294 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
3295 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
3296 sub_positive(&sa->load_avg, r);
3297 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
3298 removed_load = 1;
3299 set_tg_cfs_propagate(cfs_rq);
3300 }
3301
3302 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3303 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
3304 sub_positive(&sa->util_avg, r);
3305 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
3306 removed_util = 1;
3307 set_tg_cfs_propagate(cfs_rq);
3308 }
3309
3310 decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
3311
3312 #ifndef CONFIG_64BIT
3313 smp_wmb();
3314 cfs_rq->load_last_update_time_copy = sa->last_update_time;
3315 #endif
3316
3317 if (update_freq && (decayed || removed_util))
3318 cfs_rq_util_change(cfs_rq);
3319
3320 return decayed || removed_load;
3321 }
3322
3323 /*
3324 * Optional action to be done while updating the load average
3325 */
3326 #define UPDATE_TG 0x1
3327 #define SKIP_AGE_LOAD 0x2
3328
3329 /* Update task and its cfs_rq load average */
3330 static inline void update_load_avg(struct sched_entity *se, int flags)
3331 {
3332 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3333 u64 now = cfs_rq_clock_task(cfs_rq);
3334 struct rq *rq = rq_of(cfs_rq);
3335 int cpu = cpu_of(rq);
3336 int decayed;
3337
3338 /*
3339 * Track task load average for carrying it to new CPU after migrated, and
3340 * track group sched_entity load average for task_h_load calc in migration
3341 */
3342 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
3343 __update_load_avg_se(now, cpu, cfs_rq, se);
3344
3345 decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
3346 decayed |= propagate_entity_load_avg(se);
3347
3348 if (decayed && (flags & UPDATE_TG))
3349 update_tg_load_avg(cfs_rq, 0);
3350 }
3351
3352 /**
3353 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3354 * @cfs_rq: cfs_rq to attach to
3355 * @se: sched_entity to attach
3356 *
3357 * Must call update_cfs_rq_load_avg() before this, since we rely on
3358 * cfs_rq->avg.last_update_time being current.
3359 */
3360 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3361 {
3362 se->avg.last_update_time = cfs_rq->avg.last_update_time;
3363 cfs_rq->avg.load_avg += se->avg.load_avg;
3364 cfs_rq->avg.load_sum += se->avg.load_sum;
3365 cfs_rq->avg.util_avg += se->avg.util_avg;
3366 cfs_rq->avg.util_sum += se->avg.util_sum;
3367 set_tg_cfs_propagate(cfs_rq);
3368
3369 cfs_rq_util_change(cfs_rq);
3370 }
3371
3372 /**
3373 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3374 * @cfs_rq: cfs_rq to detach from
3375 * @se: sched_entity to detach
3376 *
3377 * Must call update_cfs_rq_load_avg() before this, since we rely on
3378 * cfs_rq->avg.last_update_time being current.
3379 */
3380 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3381 {
3382
3383 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3384 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3385 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3386 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3387 set_tg_cfs_propagate(cfs_rq);
3388
3389 cfs_rq_util_change(cfs_rq);
3390 }
3391
3392 /* Add the load generated by se into cfs_rq's load average */
3393 static inline void
3394 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3395 {
3396 struct sched_avg *sa = &se->avg;
3397
3398 cfs_rq->runnable_load_avg += sa->load_avg;
3399 cfs_rq->runnable_load_sum += sa->load_sum;
3400
3401 if (!sa->last_update_time) {
3402 attach_entity_load_avg(cfs_rq, se);
3403 update_tg_load_avg(cfs_rq, 0);
3404 }
3405 }
3406
3407 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
3408 static inline void
3409 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3410 {
3411 cfs_rq->runnable_load_avg =
3412 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3413 cfs_rq->runnable_load_sum =
3414 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
3415 }
3416
3417 #ifndef CONFIG_64BIT
3418 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3419 {
3420 u64 last_update_time_copy;
3421 u64 last_update_time;
3422
3423 do {
3424 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3425 smp_rmb();
3426 last_update_time = cfs_rq->avg.last_update_time;
3427 } while (last_update_time != last_update_time_copy);
3428
3429 return last_update_time;
3430 }
3431 #else
3432 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3433 {
3434 return cfs_rq->avg.last_update_time;
3435 }
3436 #endif
3437
3438 /*
3439 * Synchronize entity load avg of dequeued entity without locking
3440 * the previous rq.
3441 */
3442 void sync_entity_load_avg(struct sched_entity *se)
3443 {
3444 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3445 u64 last_update_time;
3446
3447 last_update_time = cfs_rq_last_update_time(cfs_rq);
3448 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
3449 }
3450
3451 /*
3452 * Task first catches up with cfs_rq, and then subtract
3453 * itself from the cfs_rq (task must be off the queue now).
3454 */
3455 void remove_entity_load_avg(struct sched_entity *se)
3456 {
3457 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3458
3459 /*
3460 * tasks cannot exit without having gone through wake_up_new_task() ->
3461 * post_init_entity_util_avg() which will have added things to the
3462 * cfs_rq, so we can remove unconditionally.
3463 *
3464 * Similarly for groups, they will have passed through
3465 * post_init_entity_util_avg() before unregister_sched_fair_group()
3466 * calls this.
3467 */
3468
3469 sync_entity_load_avg(se);
3470 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3471 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3472 }
3473
3474 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3475 {
3476 return cfs_rq->runnable_load_avg;
3477 }
3478
3479 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3480 {
3481 return cfs_rq->avg.load_avg;
3482 }
3483
3484 static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
3485
3486 #else /* CONFIG_SMP */
3487
3488 static inline int
3489 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3490 {
3491 return 0;
3492 }
3493
3494 #define UPDATE_TG 0x0
3495 #define SKIP_AGE_LOAD 0x0
3496
3497 static inline void update_load_avg(struct sched_entity *se, int not_used1)
3498 {
3499 cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
3500 }
3501
3502 static inline void
3503 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3504 static inline void
3505 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3506 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3507
3508 static inline void
3509 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3510 static inline void
3511 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3512
3513 static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
3514 {
3515 return 0;
3516 }
3517
3518 #endif /* CONFIG_SMP */
3519
3520 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3521 {
3522 #ifdef CONFIG_SCHED_DEBUG
3523 s64 d = se->vruntime - cfs_rq->min_vruntime;
3524
3525 if (d < 0)
3526 d = -d;
3527
3528 if (d > 3*sysctl_sched_latency)
3529 schedstat_inc(cfs_rq->nr_spread_over);
3530 #endif
3531 }
3532
3533 static void
3534 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3535 {
3536 u64 vruntime = cfs_rq->min_vruntime;
3537
3538 /*
3539 * The 'current' period is already promised to the current tasks,
3540 * however the extra weight of the new task will slow them down a
3541 * little, place the new task so that it fits in the slot that
3542 * stays open at the end.
3543 */
3544 if (initial && sched_feat(START_DEBIT))
3545 vruntime += sched_vslice(cfs_rq, se);
3546
3547 /* sleeps up to a single latency don't count. */
3548 if (!initial) {
3549 unsigned long thresh = sysctl_sched_latency;
3550
3551 /*
3552 * Halve their sleep time's effect, to allow
3553 * for a gentler effect of sleepers:
3554 */
3555 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3556 thresh >>= 1;
3557
3558 vruntime -= thresh;
3559 }
3560
3561 /* ensure we never gain time by being placed backwards. */
3562 se->vruntime = max_vruntime(se->vruntime, vruntime);
3563 }
3564
3565 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3566
3567 static inline void check_schedstat_required(void)
3568 {
3569 #ifdef CONFIG_SCHEDSTATS
3570 if (schedstat_enabled())
3571 return;
3572
3573 /* Force schedstat enabled if a dependent tracepoint is active */
3574 if (trace_sched_stat_wait_enabled() ||
3575 trace_sched_stat_sleep_enabled() ||
3576 trace_sched_stat_iowait_enabled() ||
3577 trace_sched_stat_blocked_enabled() ||
3578 trace_sched_stat_runtime_enabled()) {
3579 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3580 "stat_blocked and stat_runtime require the "
3581 "kernel parameter schedstats=enabled or "
3582 "kernel.sched_schedstats=1\n");
3583 }
3584 #endif
3585 }
3586
3587
3588 /*
3589 * MIGRATION
3590 *
3591 * dequeue
3592 * update_curr()
3593 * update_min_vruntime()
3594 * vruntime -= min_vruntime
3595 *
3596 * enqueue
3597 * update_curr()
3598 * update_min_vruntime()
3599 * vruntime += min_vruntime
3600 *
3601 * this way the vruntime transition between RQs is done when both
3602 * min_vruntime are up-to-date.
3603 *
3604 * WAKEUP (remote)
3605 *
3606 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
3607 * vruntime -= min_vruntime
3608 *
3609 * enqueue
3610 * update_curr()
3611 * update_min_vruntime()
3612 * vruntime += min_vruntime
3613 *
3614 * this way we don't have the most up-to-date min_vruntime on the originating
3615 * CPU and an up-to-date min_vruntime on the destination CPU.
3616 */
3617
3618 static void
3619 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3620 {
3621 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3622 bool curr = cfs_rq->curr == se;
3623
3624 /*
3625 * If we're the current task, we must renormalise before calling
3626 * update_curr().
3627 */
3628 if (renorm && curr)
3629 se->vruntime += cfs_rq->min_vruntime;
3630
3631 update_curr(cfs_rq);
3632
3633 /*
3634 * Otherwise, renormalise after, such that we're placed at the current
3635 * moment in time, instead of some random moment in the past. Being
3636 * placed in the past could significantly boost this task to the
3637 * fairness detriment of existing tasks.
3638 */
3639 if (renorm && !curr)
3640 se->vruntime += cfs_rq->min_vruntime;
3641
3642 /*
3643 * When enqueuing a sched_entity, we must:
3644 * - Update loads to have both entity and cfs_rq synced with now.
3645 * - Add its load to cfs_rq->runnable_avg
3646 * - For group_entity, update its weight to reflect the new share of
3647 * its group cfs_rq
3648 * - Add its new weight to cfs_rq->load.weight
3649 */
3650 update_load_avg(se, UPDATE_TG);
3651 enqueue_entity_load_avg(cfs_rq, se);
3652 update_cfs_shares(se);
3653 account_entity_enqueue(cfs_rq, se);
3654
3655 if (flags & ENQUEUE_WAKEUP)
3656 place_entity(cfs_rq, se, 0);
3657
3658 check_schedstat_required();
3659 update_stats_enqueue(cfs_rq, se, flags);
3660 check_spread(cfs_rq, se);
3661 if (!curr)
3662 __enqueue_entity(cfs_rq, se);
3663 se->on_rq = 1;
3664
3665 if (cfs_rq->nr_running == 1) {
3666 list_add_leaf_cfs_rq(cfs_rq);
3667 check_enqueue_throttle(cfs_rq);
3668 }
3669 }
3670
3671 static void __clear_buddies_last(struct sched_entity *se)
3672 {
3673 for_each_sched_entity(se) {
3674 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3675 if (cfs_rq->last != se)
3676 break;
3677
3678 cfs_rq->last = NULL;
3679 }
3680 }
3681
3682 static void __clear_buddies_next(struct sched_entity *se)
3683 {
3684 for_each_sched_entity(se) {
3685 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3686 if (cfs_rq->next != se)
3687 break;
3688
3689 cfs_rq->next = NULL;
3690 }
3691 }
3692
3693 static void __clear_buddies_skip(struct sched_entity *se)
3694 {
3695 for_each_sched_entity(se) {
3696 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3697 if (cfs_rq->skip != se)
3698 break;
3699
3700 cfs_rq->skip = NULL;
3701 }
3702 }
3703
3704 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3705 {
3706 if (cfs_rq->last == se)
3707 __clear_buddies_last(se);
3708
3709 if (cfs_rq->next == se)
3710 __clear_buddies_next(se);
3711
3712 if (cfs_rq->skip == se)
3713 __clear_buddies_skip(se);
3714 }
3715
3716 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3717
3718 static void
3719 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3720 {
3721 /*
3722 * Update run-time statistics of the 'current'.
3723 */
3724 update_curr(cfs_rq);
3725
3726 /*
3727 * When dequeuing a sched_entity, we must:
3728 * - Update loads to have both entity and cfs_rq synced with now.
3729 * - Substract its load from the cfs_rq->runnable_avg.
3730 * - Substract its previous weight from cfs_rq->load.weight.
3731 * - For group entity, update its weight to reflect the new share
3732 * of its group cfs_rq.
3733 */
3734 update_load_avg(se, UPDATE_TG);
3735 dequeue_entity_load_avg(cfs_rq, se);
3736
3737 update_stats_dequeue(cfs_rq, se, flags);
3738
3739 clear_buddies(cfs_rq, se);
3740
3741 if (se != cfs_rq->curr)
3742 __dequeue_entity(cfs_rq, se);
3743 se->on_rq = 0;
3744 account_entity_dequeue(cfs_rq, se);
3745
3746 /*
3747 * Normalize after update_curr(); which will also have moved
3748 * min_vruntime if @se is the one holding it back. But before doing
3749 * update_min_vruntime() again, which will discount @se's position and
3750 * can move min_vruntime forward still more.
3751 */
3752 if (!(flags & DEQUEUE_SLEEP))
3753 se->vruntime -= cfs_rq->min_vruntime;
3754
3755 /* return excess runtime on last dequeue */
3756 return_cfs_rq_runtime(cfs_rq);
3757
3758 update_cfs_shares(se);
3759
3760 /*
3761 * Now advance min_vruntime if @se was the entity holding it back,
3762 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3763 * put back on, and if we advance min_vruntime, we'll be placed back
3764 * further than we started -- ie. we'll be penalized.
3765 */
3766 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3767 update_min_vruntime(cfs_rq);
3768 }
3769
3770 /*
3771 * Preempt the current task with a newly woken task if needed:
3772 */
3773 static void
3774 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3775 {
3776 unsigned long ideal_runtime, delta_exec;
3777 struct sched_entity *se;
3778 s64 delta;
3779
3780 ideal_runtime = sched_slice(cfs_rq, curr);
3781 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3782 if (delta_exec > ideal_runtime) {
3783 resched_curr(rq_of(cfs_rq));
3784 /*
3785 * The current task ran long enough, ensure it doesn't get
3786 * re-elected due to buddy favours.
3787 */
3788 clear_buddies(cfs_rq, curr);
3789 return;
3790 }
3791
3792 /*
3793 * Ensure that a task that missed wakeup preemption by a
3794 * narrow margin doesn't have to wait for a full slice.
3795 * This also mitigates buddy induced latencies under load.
3796 */
3797 if (delta_exec < sysctl_sched_min_granularity)
3798 return;
3799
3800 se = __pick_first_entity(cfs_rq);
3801 delta = curr->vruntime - se->vruntime;
3802
3803 if (delta < 0)
3804 return;
3805
3806 if (delta > ideal_runtime)
3807 resched_curr(rq_of(cfs_rq));
3808 }
3809
3810 static void
3811 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3812 {
3813 /* 'current' is not kept within the tree. */
3814 if (se->on_rq) {
3815 /*
3816 * Any task has to be enqueued before it get to execute on
3817 * a CPU. So account for the time it spent waiting on the
3818 * runqueue.
3819 */
3820 update_stats_wait_end(cfs_rq, se);
3821 __dequeue_entity(cfs_rq, se);
3822 update_load_avg(se, UPDATE_TG);
3823 }
3824
3825 update_stats_curr_start(cfs_rq, se);
3826 cfs_rq->curr = se;
3827
3828 /*
3829 * Track our maximum slice length, if the CPU's load is at
3830 * least twice that of our own weight (i.e. dont track it
3831 * when there are only lesser-weight tasks around):
3832 */
3833 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3834 schedstat_set(se->statistics.slice_max,
3835 max((u64)schedstat_val(se->statistics.slice_max),
3836 se->sum_exec_runtime - se->prev_sum_exec_runtime));
3837 }
3838
3839 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3840 }
3841
3842 static int
3843 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3844
3845 /*
3846 * Pick the next process, keeping these things in mind, in this order:
3847 * 1) keep things fair between processes/task groups
3848 * 2) pick the "next" process, since someone really wants that to run
3849 * 3) pick the "last" process, for cache locality
3850 * 4) do not run the "skip" process, if something else is available
3851 */
3852 static struct sched_entity *
3853 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3854 {
3855 struct sched_entity *left = __pick_first_entity(cfs_rq);
3856 struct sched_entity *se;
3857
3858 /*
3859 * If curr is set we have to see if its left of the leftmost entity
3860 * still in the tree, provided there was anything in the tree at all.
3861 */
3862 if (!left || (curr && entity_before(curr, left)))
3863 left = curr;
3864
3865 se = left; /* ideally we run the leftmost entity */
3866
3867 /*
3868 * Avoid running the skip buddy, if running something else can
3869 * be done without getting too unfair.
3870 */
3871 if (cfs_rq->skip == se) {
3872 struct sched_entity *second;
3873
3874 if (se == curr) {
3875 second = __pick_first_entity(cfs_rq);
3876 } else {
3877 second = __pick_next_entity(se);
3878 if (!second || (curr && entity_before(curr, second)))
3879 second = curr;
3880 }
3881
3882 if (second && wakeup_preempt_entity(second, left) < 1)
3883 se = second;
3884 }
3885
3886 /*
3887 * Prefer last buddy, try to return the CPU to a preempted task.
3888 */
3889 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3890 se = cfs_rq->last;
3891
3892 /*
3893 * Someone really wants this to run. If it's not unfair, run it.
3894 */
3895 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3896 se = cfs_rq->next;
3897
3898 clear_buddies(cfs_rq, se);
3899
3900 return se;
3901 }
3902
3903 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3904
3905 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3906 {
3907 /*
3908 * If still on the runqueue then deactivate_task()
3909 * was not called and update_curr() has to be done:
3910 */
3911 if (prev->on_rq)
3912 update_curr(cfs_rq);
3913
3914 /* throttle cfs_rqs exceeding runtime */
3915 check_cfs_rq_runtime(cfs_rq);
3916
3917 check_spread(cfs_rq, prev);
3918
3919 if (prev->on_rq) {
3920 update_stats_wait_start(cfs_rq, prev);
3921 /* Put 'current' back into the tree. */
3922 __enqueue_entity(cfs_rq, prev);
3923 /* in !on_rq case, update occurred at dequeue */
3924 update_load_avg(prev, 0);
3925 }
3926 cfs_rq->curr = NULL;
3927 }
3928
3929 static void
3930 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3931 {
3932 /*
3933 * Update run-time statistics of the 'current'.
3934 */
3935 update_curr(cfs_rq);
3936
3937 /*
3938 * Ensure that runnable average is periodically updated.
3939 */
3940 update_load_avg(curr, UPDATE_TG);
3941 update_cfs_shares(curr);
3942
3943 #ifdef CONFIG_SCHED_HRTICK
3944 /*
3945 * queued ticks are scheduled to match the slice, so don't bother
3946 * validating it and just reschedule.
3947 */
3948 if (queued) {
3949 resched_curr(rq_of(cfs_rq));
3950 return;
3951 }
3952 /*
3953 * don't let the period tick interfere with the hrtick preemption
3954 */
3955 if (!sched_feat(DOUBLE_TICK) &&
3956 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3957 return;
3958 #endif
3959
3960 if (cfs_rq->nr_running > 1)
3961 check_preempt_tick(cfs_rq, curr);
3962 }
3963
3964
3965 /**************************************************
3966 * CFS bandwidth control machinery
3967 */
3968
3969 #ifdef CONFIG_CFS_BANDWIDTH
3970
3971 #ifdef HAVE_JUMP_LABEL
3972 static struct static_key __cfs_bandwidth_used;
3973
3974 static inline bool cfs_bandwidth_used(void)
3975 {
3976 return static_key_false(&__cfs_bandwidth_used);
3977 }
3978
3979 void cfs_bandwidth_usage_inc(void)
3980 {
3981 static_key_slow_inc(&__cfs_bandwidth_used);
3982 }
3983
3984 void cfs_bandwidth_usage_dec(void)
3985 {
3986 static_key_slow_dec(&__cfs_bandwidth_used);
3987 }
3988 #else /* HAVE_JUMP_LABEL */
3989 static bool cfs_bandwidth_used(void)
3990 {
3991 return true;
3992 }
3993
3994 void cfs_bandwidth_usage_inc(void) {}
3995 void cfs_bandwidth_usage_dec(void) {}
3996 #endif /* HAVE_JUMP_LABEL */
3997
3998 /*
3999 * default period for cfs group bandwidth.
4000 * default: 0.1s, units: nanoseconds
4001 */
4002 static inline u64 default_cfs_period(void)
4003 {
4004 return 100000000ULL;
4005 }
4006
4007 static inline u64 sched_cfs_bandwidth_slice(void)
4008 {
4009 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4010 }
4011
4012 /*
4013 * Replenish runtime according to assigned quota and update expiration time.
4014 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
4015 * additional synchronization around rq->lock.
4016 *
4017 * requires cfs_b->lock
4018 */
4019 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4020 {
4021 u64 now;
4022
4023 if (cfs_b->quota == RUNTIME_INF)
4024 return;
4025
4026 now = sched_clock_cpu(smp_processor_id());
4027 cfs_b->runtime = cfs_b->quota;
4028 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
4029 }
4030
4031 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4032 {
4033 return &tg->cfs_bandwidth;
4034 }
4035
4036 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
4037 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4038 {
4039 if (unlikely(cfs_rq->throttle_count))
4040 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
4041
4042 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
4043 }
4044
4045 /* returns 0 on failure to allocate runtime */
4046 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4047 {
4048 struct task_group *tg = cfs_rq->tg;
4049 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
4050 u64 amount = 0, min_amount, expires;
4051
4052 /* note: this is a positive sum as runtime_remaining <= 0 */
4053 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
4054
4055 raw_spin_lock(&cfs_b->lock);
4056 if (cfs_b->quota == RUNTIME_INF)
4057 amount = min_amount;
4058 else {
4059 start_cfs_bandwidth(cfs_b);
4060
4061 if (cfs_b->runtime > 0) {
4062 amount = min(cfs_b->runtime, min_amount);
4063 cfs_b->runtime -= amount;
4064 cfs_b->idle = 0;
4065 }
4066 }
4067 expires = cfs_b->runtime_expires;
4068 raw_spin_unlock(&cfs_b->lock);
4069
4070 cfs_rq->runtime_remaining += amount;
4071 /*
4072 * we may have advanced our local expiration to account for allowed
4073 * spread between our sched_clock and the one on which runtime was
4074 * issued.
4075 */
4076 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
4077 cfs_rq->runtime_expires = expires;
4078
4079 return cfs_rq->runtime_remaining > 0;
4080 }
4081
4082 /*
4083 * Note: This depends on the synchronization provided by sched_clock and the
4084 * fact that rq->clock snapshots this value.
4085 */
4086 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4087 {
4088 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4089
4090 /* if the deadline is ahead of our clock, nothing to do */
4091 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
4092 return;
4093
4094 if (cfs_rq->runtime_remaining < 0)
4095 return;
4096
4097 /*
4098 * If the local deadline has passed we have to consider the
4099 * possibility that our sched_clock is 'fast' and the global deadline
4100 * has not truly expired.
4101 *
4102 * Fortunately we can check determine whether this the case by checking
4103 * whether the global deadline has advanced. It is valid to compare
4104 * cfs_b->runtime_expires without any locks since we only care about
4105 * exact equality, so a partial write will still work.
4106 */
4107
4108 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
4109 /* extend local deadline, drift is bounded above by 2 ticks */
4110 cfs_rq->runtime_expires += TICK_NSEC;
4111 } else {
4112 /* global deadline is ahead, expiration has passed */
4113 cfs_rq->runtime_remaining = 0;
4114 }
4115 }
4116
4117 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4118 {
4119 /* dock delta_exec before expiring quota (as it could span periods) */
4120 cfs_rq->runtime_remaining -= delta_exec;
4121 expire_cfs_rq_runtime(cfs_rq);
4122
4123 if (likely(cfs_rq->runtime_remaining > 0))
4124 return;
4125
4126 /*
4127 * if we're unable to extend our runtime we resched so that the active
4128 * hierarchy can be throttled
4129 */
4130 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4131 resched_curr(rq_of(cfs_rq));
4132 }
4133
4134 static __always_inline
4135 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4136 {
4137 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
4138 return;
4139
4140 __account_cfs_rq_runtime(cfs_rq, delta_exec);
4141 }
4142
4143 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4144 {
4145 return cfs_bandwidth_used() && cfs_rq->throttled;
4146 }
4147
4148 /* check whether cfs_rq, or any parent, is throttled */
4149 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4150 {
4151 return cfs_bandwidth_used() && cfs_rq->throttle_count;
4152 }
4153
4154 /*
4155 * Ensure that neither of the group entities corresponding to src_cpu or
4156 * dest_cpu are members of a throttled hierarchy when performing group
4157 * load-balance operations.
4158 */
4159 static inline int throttled_lb_pair(struct task_group *tg,
4160 int src_cpu, int dest_cpu)
4161 {
4162 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4163
4164 src_cfs_rq = tg->cfs_rq[src_cpu];
4165 dest_cfs_rq = tg->cfs_rq[dest_cpu];
4166
4167 return throttled_hierarchy(src_cfs_rq) ||
4168 throttled_hierarchy(dest_cfs_rq);
4169 }
4170
4171 /* updated child weight may affect parent so we have to do this bottom up */
4172 static int tg_unthrottle_up(struct task_group *tg, void *data)
4173 {
4174 struct rq *rq = data;
4175 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4176
4177 cfs_rq->throttle_count--;
4178 if (!cfs_rq->throttle_count) {
4179 /* adjust cfs_rq_clock_task() */
4180 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
4181 cfs_rq->throttled_clock_task;
4182 }
4183
4184 return 0;
4185 }
4186
4187 static int tg_throttle_down(struct task_group *tg, void *data)
4188 {
4189 struct rq *rq = data;
4190 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4191
4192 /* group is entering throttled state, stop time */
4193 if (!cfs_rq->throttle_count)
4194 cfs_rq->throttled_clock_task = rq_clock_task(rq);
4195 cfs_rq->throttle_count++;
4196
4197 return 0;
4198 }
4199
4200 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
4201 {
4202 struct rq *rq = rq_of(cfs_rq);
4203 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4204 struct sched_entity *se;
4205 long task_delta, dequeue = 1;
4206 bool empty;
4207
4208 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4209
4210 /* freeze hierarchy runnable averages while throttled */
4211 rcu_read_lock();
4212 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4213 rcu_read_unlock();
4214
4215 task_delta = cfs_rq->h_nr_running;
4216 for_each_sched_entity(se) {
4217 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4218 /* throttled entity or throttle-on-deactivate */
4219 if (!se->on_rq)
4220 break;
4221
4222 if (dequeue)
4223 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4224 qcfs_rq->h_nr_running -= task_delta;
4225
4226 if (qcfs_rq->load.weight)
4227 dequeue = 0;
4228 }
4229
4230 if (!se)
4231 sub_nr_running(rq, task_delta);
4232
4233 cfs_rq->throttled = 1;
4234 cfs_rq->throttled_clock = rq_clock(rq);
4235 raw_spin_lock(&cfs_b->lock);
4236 empty = list_empty(&cfs_b->throttled_cfs_rq);
4237
4238 /*
4239 * Add to the _head_ of the list, so that an already-started
4240 * distribute_cfs_runtime will not see us
4241 */
4242 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4243
4244 /*
4245 * If we're the first throttled task, make sure the bandwidth
4246 * timer is running.
4247 */
4248 if (empty)
4249 start_cfs_bandwidth(cfs_b);
4250
4251 raw_spin_unlock(&cfs_b->lock);
4252 }
4253
4254 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
4255 {
4256 struct rq *rq = rq_of(cfs_rq);
4257 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4258 struct sched_entity *se;
4259 int enqueue = 1;
4260 long task_delta;
4261
4262 se = cfs_rq->tg->se[cpu_of(rq)];
4263
4264 cfs_rq->throttled = 0;
4265
4266 update_rq_clock(rq);
4267
4268 raw_spin_lock(&cfs_b->lock);
4269 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4270 list_del_rcu(&cfs_rq->throttled_list);
4271 raw_spin_unlock(&cfs_b->lock);
4272
4273 /* update hierarchical throttle state */
4274 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4275
4276 if (!cfs_rq->load.weight)
4277 return;
4278
4279 task_delta = cfs_rq->h_nr_running;
4280 for_each_sched_entity(se) {
4281 if (se->on_rq)
4282 enqueue = 0;
4283
4284 cfs_rq = cfs_rq_of(se);
4285 if (enqueue)
4286 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4287 cfs_rq->h_nr_running += task_delta;
4288
4289 if (cfs_rq_throttled(cfs_rq))
4290 break;
4291 }
4292
4293 if (!se)
4294 add_nr_running(rq, task_delta);
4295
4296 /* determine whether we need to wake up potentially idle cpu */
4297 if (rq->curr == rq->idle && rq->cfs.nr_running)
4298 resched_curr(rq);
4299 }
4300
4301 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4302 u64 remaining, u64 expires)
4303 {
4304 struct cfs_rq *cfs_rq;
4305 u64 runtime;
4306 u64 starting_runtime = remaining;
4307
4308 rcu_read_lock();
4309 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4310 throttled_list) {
4311 struct rq *rq = rq_of(cfs_rq);
4312 struct rq_flags rf;
4313
4314 rq_lock(rq, &rf);
4315 if (!cfs_rq_throttled(cfs_rq))
4316 goto next;
4317
4318 runtime = -cfs_rq->runtime_remaining + 1;
4319 if (runtime > remaining)
4320 runtime = remaining;
4321 remaining -= runtime;
4322
4323 cfs_rq->runtime_remaining += runtime;
4324 cfs_rq->runtime_expires = expires;
4325
4326 /* we check whether we're throttled above */
4327 if (cfs_rq->runtime_remaining > 0)
4328 unthrottle_cfs_rq(cfs_rq);
4329
4330 next:
4331 rq_unlock(rq, &rf);
4332
4333 if (!remaining)
4334 break;
4335 }
4336 rcu_read_unlock();
4337
4338 return starting_runtime - remaining;
4339 }
4340
4341 /*
4342 * Responsible for refilling a task_group's bandwidth and unthrottling its
4343 * cfs_rqs as appropriate. If there has been no activity within the last
4344 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4345 * used to track this state.
4346 */
4347 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4348 {
4349 u64 runtime, runtime_expires;
4350 int throttled;
4351
4352 /* no need to continue the timer with no bandwidth constraint */
4353 if (cfs_b->quota == RUNTIME_INF)
4354 goto out_deactivate;
4355
4356 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4357 cfs_b->nr_periods += overrun;
4358
4359 /*
4360 * idle depends on !throttled (for the case of a large deficit), and if
4361 * we're going inactive then everything else can be deferred
4362 */
4363 if (cfs_b->idle && !throttled)
4364 goto out_deactivate;
4365
4366 __refill_cfs_bandwidth_runtime(cfs_b);
4367
4368 if (!throttled) {
4369 /* mark as potentially idle for the upcoming period */
4370 cfs_b->idle = 1;
4371 return 0;
4372 }
4373
4374 /* account preceding periods in which throttling occurred */
4375 cfs_b->nr_throttled += overrun;
4376
4377 runtime_expires = cfs_b->runtime_expires;
4378
4379 /*
4380 * This check is repeated as we are holding onto the new bandwidth while
4381 * we unthrottle. This can potentially race with an unthrottled group
4382 * trying to acquire new bandwidth from the global pool. This can result
4383 * in us over-using our runtime if it is all used during this loop, but
4384 * only by limited amounts in that extreme case.
4385 */
4386 while (throttled && cfs_b->runtime > 0) {
4387 runtime = cfs_b->runtime;
4388 raw_spin_unlock(&cfs_b->lock);
4389 /* we can't nest cfs_b->lock while distributing bandwidth */
4390 runtime = distribute_cfs_runtime(cfs_b, runtime,
4391 runtime_expires);
4392 raw_spin_lock(&cfs_b->lock);
4393
4394 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4395
4396 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4397 }
4398
4399 /*
4400 * While we are ensured activity in the period following an
4401 * unthrottle, this also covers the case in which the new bandwidth is
4402 * insufficient to cover the existing bandwidth deficit. (Forcing the
4403 * timer to remain active while there are any throttled entities.)
4404 */
4405 cfs_b->idle = 0;
4406
4407 return 0;
4408
4409 out_deactivate:
4410 return 1;
4411 }
4412
4413 /* a cfs_rq won't donate quota below this amount */
4414 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4415 /* minimum remaining period time to redistribute slack quota */
4416 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4417 /* how long we wait to gather additional slack before distributing */
4418 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4419
4420 /*
4421 * Are we near the end of the current quota period?
4422 *
4423 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4424 * hrtimer base being cleared by hrtimer_start. In the case of
4425 * migrate_hrtimers, base is never cleared, so we are fine.
4426 */
4427 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4428 {
4429 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4430 u64 remaining;
4431
4432 /* if the call-back is running a quota refresh is already occurring */
4433 if (hrtimer_callback_running(refresh_timer))
4434 return 1;
4435
4436 /* is a quota refresh about to occur? */
4437 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4438 if (remaining < min_expire)
4439 return 1;
4440
4441 return 0;
4442 }
4443
4444 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4445 {
4446 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4447
4448 /* if there's a quota refresh soon don't bother with slack */
4449 if (runtime_refresh_within(cfs_b, min_left))
4450 return;
4451
4452 hrtimer_start(&cfs_b->slack_timer,
4453 ns_to_ktime(cfs_bandwidth_slack_period),
4454 HRTIMER_MODE_REL);
4455 }
4456
4457 /* we know any runtime found here is valid as update_curr() precedes return */
4458 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4459 {
4460 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4461 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4462
4463 if (slack_runtime <= 0)
4464 return;
4465
4466 raw_spin_lock(&cfs_b->lock);
4467 if (cfs_b->quota != RUNTIME_INF &&
4468 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4469 cfs_b->runtime += slack_runtime;
4470
4471 /* we are under rq->lock, defer unthrottling using a timer */
4472 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4473 !list_empty(&cfs_b->throttled_cfs_rq))
4474 start_cfs_slack_bandwidth(cfs_b);
4475 }
4476 raw_spin_unlock(&cfs_b->lock);
4477
4478 /* even if it's not valid for return we don't want to try again */
4479 cfs_rq->runtime_remaining -= slack_runtime;
4480 }
4481
4482 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4483 {
4484 if (!cfs_bandwidth_used())
4485 return;
4486
4487 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4488 return;
4489
4490 __return_cfs_rq_runtime(cfs_rq);
4491 }
4492
4493 /*
4494 * This is done with a timer (instead of inline with bandwidth return) since
4495 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4496 */
4497 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4498 {
4499 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4500 u64 expires;
4501
4502 /* confirm we're still not at a refresh boundary */
4503 raw_spin_lock(&cfs_b->lock);
4504 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4505 raw_spin_unlock(&cfs_b->lock);
4506 return;
4507 }
4508
4509 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4510 runtime = cfs_b->runtime;
4511
4512 expires = cfs_b->runtime_expires;
4513 raw_spin_unlock(&cfs_b->lock);
4514
4515 if (!runtime)
4516 return;
4517
4518 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4519
4520 raw_spin_lock(&cfs_b->lock);
4521 if (expires == cfs_b->runtime_expires)
4522 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4523 raw_spin_unlock(&cfs_b->lock);
4524 }
4525
4526 /*
4527 * When a group wakes up we want to make sure that its quota is not already
4528 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4529 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4530 */
4531 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4532 {
4533 if (!cfs_bandwidth_used())
4534 return;
4535
4536 /* an active group must be handled by the update_curr()->put() path */
4537 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4538 return;
4539
4540 /* ensure the group is not already throttled */
4541 if (cfs_rq_throttled(cfs_rq))
4542 return;
4543
4544 /* update runtime allocation */
4545 account_cfs_rq_runtime(cfs_rq, 0);
4546 if (cfs_rq->runtime_remaining <= 0)
4547 throttle_cfs_rq(cfs_rq);
4548 }
4549
4550 static void sync_throttle(struct task_group *tg, int cpu)
4551 {
4552 struct cfs_rq *pcfs_rq, *cfs_rq;
4553
4554 if (!cfs_bandwidth_used())
4555 return;
4556
4557 if (!tg->parent)
4558 return;
4559
4560 cfs_rq = tg->cfs_rq[cpu];
4561 pcfs_rq = tg->parent->cfs_rq[cpu];
4562
4563 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4564 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4565 }
4566
4567 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4568 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4569 {
4570 if (!cfs_bandwidth_used())
4571 return false;
4572
4573 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4574 return false;
4575
4576 /*
4577 * it's possible for a throttled entity to be forced into a running
4578 * state (e.g. set_curr_task), in this case we're finished.
4579 */
4580 if (cfs_rq_throttled(cfs_rq))
4581 return true;
4582
4583 throttle_cfs_rq(cfs_rq);
4584 return true;
4585 }
4586
4587 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4588 {
4589 struct cfs_bandwidth *cfs_b =
4590 container_of(timer, struct cfs_bandwidth, slack_timer);
4591
4592 do_sched_cfs_slack_timer(cfs_b);
4593
4594 return HRTIMER_NORESTART;
4595 }
4596
4597 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4598 {
4599 struct cfs_bandwidth *cfs_b =
4600 container_of(timer, struct cfs_bandwidth, period_timer);
4601 int overrun;
4602 int idle = 0;
4603
4604 raw_spin_lock(&cfs_b->lock);
4605 for (;;) {
4606 overrun = hrtimer_forward_now(timer, cfs_b->period);
4607 if (!overrun)
4608 break;
4609
4610 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4611 }
4612 if (idle)
4613 cfs_b->period_active = 0;
4614 raw_spin_unlock(&cfs_b->lock);
4615
4616 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4617 }
4618
4619 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4620 {
4621 raw_spin_lock_init(&cfs_b->lock);
4622 cfs_b->runtime = 0;
4623 cfs_b->quota = RUNTIME_INF;
4624 cfs_b->period = ns_to_ktime(default_cfs_period());
4625
4626 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4627 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4628 cfs_b->period_timer.function = sched_cfs_period_timer;
4629 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4630 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4631 }
4632
4633 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4634 {
4635 cfs_rq->runtime_enabled = 0;
4636 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4637 }
4638
4639 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4640 {
4641 lockdep_assert_held(&cfs_b->lock);
4642
4643 if (!cfs_b->period_active) {
4644 cfs_b->period_active = 1;
4645 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4646 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4647 }
4648 }
4649
4650 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4651 {
4652 /* init_cfs_bandwidth() was not called */
4653 if (!cfs_b->throttled_cfs_rq.next)
4654 return;
4655
4656 hrtimer_cancel(&cfs_b->period_timer);
4657 hrtimer_cancel(&cfs_b->slack_timer);
4658 }
4659
4660 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4661 {
4662 struct cfs_rq *cfs_rq;
4663
4664 for_each_leaf_cfs_rq(rq, cfs_rq) {
4665 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4666
4667 raw_spin_lock(&cfs_b->lock);
4668 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4669 raw_spin_unlock(&cfs_b->lock);
4670 }
4671 }
4672
4673 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4674 {
4675 struct cfs_rq *cfs_rq;
4676
4677 for_each_leaf_cfs_rq(rq, cfs_rq) {
4678 if (!cfs_rq->runtime_enabled)
4679 continue;
4680
4681 /*
4682 * clock_task is not advancing so we just need to make sure
4683 * there's some valid quota amount
4684 */
4685 cfs_rq->runtime_remaining = 1;
4686 /*
4687 * Offline rq is schedulable till cpu is completely disabled
4688 * in take_cpu_down(), so we prevent new cfs throttling here.
4689 */
4690 cfs_rq->runtime_enabled = 0;
4691
4692 if (cfs_rq_throttled(cfs_rq))
4693 unthrottle_cfs_rq(cfs_rq);
4694 }
4695 }
4696
4697 #else /* CONFIG_CFS_BANDWIDTH */
4698 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4699 {
4700 return rq_clock_task(rq_of(cfs_rq));
4701 }
4702
4703 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4704 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4705 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4706 static inline void sync_throttle(struct task_group *tg, int cpu) {}
4707 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4708
4709 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4710 {
4711 return 0;
4712 }
4713
4714 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4715 {
4716 return 0;
4717 }
4718
4719 static inline int throttled_lb_pair(struct task_group *tg,
4720 int src_cpu, int dest_cpu)
4721 {
4722 return 0;
4723 }
4724
4725 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4726
4727 #ifdef CONFIG_FAIR_GROUP_SCHED
4728 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4729 #endif
4730
4731 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4732 {
4733 return NULL;
4734 }
4735 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4736 static inline void update_runtime_enabled(struct rq *rq) {}
4737 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4738
4739 #endif /* CONFIG_CFS_BANDWIDTH */
4740
4741 /**************************************************
4742 * CFS operations on tasks:
4743 */
4744
4745 #ifdef CONFIG_SCHED_HRTICK
4746 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4747 {
4748 struct sched_entity *se = &p->se;
4749 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4750
4751 SCHED_WARN_ON(task_rq(p) != rq);
4752
4753 if (rq->cfs.h_nr_running > 1) {
4754 u64 slice = sched_slice(cfs_rq, se);
4755 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4756 s64 delta = slice - ran;
4757
4758 if (delta < 0) {
4759 if (rq->curr == p)
4760 resched_curr(rq);
4761 return;
4762 }
4763 hrtick_start(rq, delta);
4764 }
4765 }
4766
4767 /*
4768 * called from enqueue/dequeue and updates the hrtick when the
4769 * current task is from our class and nr_running is low enough
4770 * to matter.
4771 */
4772 static void hrtick_update(struct rq *rq)
4773 {
4774 struct task_struct *curr = rq->curr;
4775
4776 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4777 return;
4778
4779 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4780 hrtick_start_fair(rq, curr);
4781 }
4782 #else /* !CONFIG_SCHED_HRTICK */
4783 static inline void
4784 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4785 {
4786 }
4787
4788 static inline void hrtick_update(struct rq *rq)
4789 {
4790 }
4791 #endif
4792
4793 /*
4794 * The enqueue_task method is called before nr_running is
4795 * increased. Here we update the fair scheduling stats and
4796 * then put the task into the rbtree:
4797 */
4798 static void
4799 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4800 {
4801 struct cfs_rq *cfs_rq;
4802 struct sched_entity *se = &p->se;
4803
4804 /*
4805 * If in_iowait is set, the code below may not trigger any cpufreq
4806 * utilization updates, so do it here explicitly with the IOWAIT flag
4807 * passed.
4808 */
4809 if (p->in_iowait)
4810 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4811
4812 for_each_sched_entity(se) {
4813 if (se->on_rq)
4814 break;
4815 cfs_rq = cfs_rq_of(se);
4816 enqueue_entity(cfs_rq, se, flags);
4817
4818 /*
4819 * end evaluation on encountering a throttled cfs_rq
4820 *
4821 * note: in the case of encountering a throttled cfs_rq we will
4822 * post the final h_nr_running increment below.
4823 */
4824 if (cfs_rq_throttled(cfs_rq))
4825 break;
4826 cfs_rq->h_nr_running++;
4827
4828 flags = ENQUEUE_WAKEUP;
4829 }
4830
4831 for_each_sched_entity(se) {
4832 cfs_rq = cfs_rq_of(se);
4833 cfs_rq->h_nr_running++;
4834
4835 if (cfs_rq_throttled(cfs_rq))
4836 break;
4837
4838 update_load_avg(se, UPDATE_TG);
4839 update_cfs_shares(se);
4840 }
4841
4842 if (!se)
4843 add_nr_running(rq, 1);
4844
4845 hrtick_update(rq);
4846 }
4847
4848 static void set_next_buddy(struct sched_entity *se);
4849
4850 /*
4851 * The dequeue_task method is called before nr_running is
4852 * decreased. We remove the task from the rbtree and
4853 * update the fair scheduling stats:
4854 */
4855 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4856 {
4857 struct cfs_rq *cfs_rq;
4858 struct sched_entity *se = &p->se;
4859 int task_sleep = flags & DEQUEUE_SLEEP;
4860
4861 for_each_sched_entity(se) {
4862 cfs_rq = cfs_rq_of(se);
4863 dequeue_entity(cfs_rq, se, flags);
4864
4865 /*
4866 * end evaluation on encountering a throttled cfs_rq
4867 *
4868 * note: in the case of encountering a throttled cfs_rq we will
4869 * post the final h_nr_running decrement below.
4870 */
4871 if (cfs_rq_throttled(cfs_rq))
4872 break;
4873 cfs_rq->h_nr_running--;
4874
4875 /* Don't dequeue parent if it has other entities besides us */
4876 if (cfs_rq->load.weight) {
4877 /* Avoid re-evaluating load for this entity: */
4878 se = parent_entity(se);
4879 /*
4880 * Bias pick_next to pick a task from this cfs_rq, as
4881 * p is sleeping when it is within its sched_slice.
4882 */
4883 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4884 set_next_buddy(se);
4885 break;
4886 }
4887 flags |= DEQUEUE_SLEEP;
4888 }
4889
4890 for_each_sched_entity(se) {
4891 cfs_rq = cfs_rq_of(se);
4892 cfs_rq->h_nr_running--;
4893
4894 if (cfs_rq_throttled(cfs_rq))
4895 break;
4896
4897 update_load_avg(se, UPDATE_TG);
4898 update_cfs_shares(se);
4899 }
4900
4901 if (!se)
4902 sub_nr_running(rq, 1);
4903
4904 hrtick_update(rq);
4905 }
4906
4907 #ifdef CONFIG_SMP
4908
4909 /* Working cpumask for: load_balance, load_balance_newidle. */
4910 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4911 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4912
4913 #ifdef CONFIG_NO_HZ_COMMON
4914 /*
4915 * per rq 'load' arrray crap; XXX kill this.
4916 */
4917
4918 /*
4919 * The exact cpuload calculated at every tick would be:
4920 *
4921 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4922 *
4923 * If a cpu misses updates for n ticks (as it was idle) and update gets
4924 * called on the n+1-th tick when cpu may be busy, then we have:
4925 *
4926 * load_n = (1 - 1/2^i)^n * load_0
4927 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
4928 *
4929 * decay_load_missed() below does efficient calculation of
4930 *
4931 * load' = (1 - 1/2^i)^n * load
4932 *
4933 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4934 * This allows us to precompute the above in said factors, thereby allowing the
4935 * reduction of an arbitrary n in O(log_2 n) steps. (See also
4936 * fixed_power_int())
4937 *
4938 * The calculation is approximated on a 128 point scale.
4939 */
4940 #define DEGRADE_SHIFT 7
4941
4942 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4943 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4944 { 0, 0, 0, 0, 0, 0, 0, 0 },
4945 { 64, 32, 8, 0, 0, 0, 0, 0 },
4946 { 96, 72, 40, 12, 1, 0, 0, 0 },
4947 { 112, 98, 75, 43, 15, 1, 0, 0 },
4948 { 120, 112, 98, 76, 45, 16, 2, 0 }
4949 };
4950
4951 /*
4952 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4953 * would be when CPU is idle and so we just decay the old load without
4954 * adding any new load.
4955 */
4956 static unsigned long
4957 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4958 {
4959 int j = 0;
4960
4961 if (!missed_updates)
4962 return load;
4963
4964 if (missed_updates >= degrade_zero_ticks[idx])
4965 return 0;
4966
4967 if (idx == 1)
4968 return load >> missed_updates;
4969
4970 while (missed_updates) {
4971 if (missed_updates % 2)
4972 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4973
4974 missed_updates >>= 1;
4975 j++;
4976 }
4977 return load;
4978 }
4979 #endif /* CONFIG_NO_HZ_COMMON */
4980
4981 /**
4982 * __cpu_load_update - update the rq->cpu_load[] statistics
4983 * @this_rq: The rq to update statistics for
4984 * @this_load: The current load
4985 * @pending_updates: The number of missed updates
4986 *
4987 * Update rq->cpu_load[] statistics. This function is usually called every
4988 * scheduler tick (TICK_NSEC).
4989 *
4990 * This function computes a decaying average:
4991 *
4992 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4993 *
4994 * Because of NOHZ it might not get called on every tick which gives need for
4995 * the @pending_updates argument.
4996 *
4997 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4998 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4999 * = A * (A * load[i]_n-2 + B) + B
5000 * = A * (A * (A * load[i]_n-3 + B) + B) + B
5001 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
5002 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
5003 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
5004 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
5005 *
5006 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
5007 * any change in load would have resulted in the tick being turned back on.
5008 *
5009 * For regular NOHZ, this reduces to:
5010 *
5011 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
5012 *
5013 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
5014 * term.
5015 */
5016 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
5017 unsigned long pending_updates)
5018 {
5019 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
5020 int i, scale;
5021
5022 this_rq->nr_load_updates++;
5023
5024 /* Update our load: */
5025 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
5026 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
5027 unsigned long old_load, new_load;
5028
5029 /* scale is effectively 1 << i now, and >> i divides by scale */
5030
5031 old_load = this_rq->cpu_load[i];
5032 #ifdef CONFIG_NO_HZ_COMMON
5033 old_load = decay_load_missed(old_load, pending_updates - 1, i);
5034 if (tickless_load) {
5035 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
5036 /*
5037 * old_load can never be a negative value because a
5038 * decayed tickless_load cannot be greater than the
5039 * original tickless_load.
5040 */
5041 old_load += tickless_load;
5042 }
5043 #endif
5044 new_load = this_load;
5045 /*
5046 * Round up the averaging division if load is increasing. This
5047 * prevents us from getting stuck on 9 if the load is 10, for
5048 * example.
5049 */
5050 if (new_load > old_load)
5051 new_load += scale - 1;
5052
5053 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
5054 }
5055
5056 sched_avg_update(this_rq);
5057 }
5058
5059 /* Used instead of source_load when we know the type == 0 */
5060 static unsigned long weighted_cpuload(const int cpu)
5061 {
5062 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
5063 }
5064
5065 #ifdef CONFIG_NO_HZ_COMMON
5066 /*
5067 * There is no sane way to deal with nohz on smp when using jiffies because the
5068 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
5069 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
5070 *
5071 * Therefore we need to avoid the delta approach from the regular tick when
5072 * possible since that would seriously skew the load calculation. This is why we
5073 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
5074 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
5075 * loop exit, nohz_idle_balance, nohz full exit...)
5076 *
5077 * This means we might still be one tick off for nohz periods.
5078 */
5079
5080 static void cpu_load_update_nohz(struct rq *this_rq,
5081 unsigned long curr_jiffies,
5082 unsigned long load)
5083 {
5084 unsigned long pending_updates;
5085
5086 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
5087 if (pending_updates) {
5088 this_rq->last_load_update_tick = curr_jiffies;
5089 /*
5090 * In the regular NOHZ case, we were idle, this means load 0.
5091 * In the NOHZ_FULL case, we were non-idle, we should consider
5092 * its weighted load.
5093 */
5094 cpu_load_update(this_rq, load, pending_updates);
5095 }
5096 }
5097
5098 /*
5099 * Called from nohz_idle_balance() to update the load ratings before doing the
5100 * idle balance.
5101 */
5102 static void cpu_load_update_idle(struct rq *this_rq)
5103 {
5104 /*
5105 * bail if there's load or we're actually up-to-date.
5106 */
5107 if (weighted_cpuload(cpu_of(this_rq)))
5108 return;
5109
5110 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
5111 }
5112
5113 /*
5114 * Record CPU load on nohz entry so we know the tickless load to account
5115 * on nohz exit. cpu_load[0] happens then to be updated more frequently
5116 * than other cpu_load[idx] but it should be fine as cpu_load readers
5117 * shouldn't rely into synchronized cpu_load[*] updates.
5118 */
5119 void cpu_load_update_nohz_start(void)
5120 {
5121 struct rq *this_rq = this_rq();
5122
5123 /*
5124 * This is all lockless but should be fine. If weighted_cpuload changes
5125 * concurrently we'll exit nohz. And cpu_load write can race with
5126 * cpu_load_update_idle() but both updater would be writing the same.
5127 */
5128 this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
5129 }
5130
5131 /*
5132 * Account the tickless load in the end of a nohz frame.
5133 */
5134 void cpu_load_update_nohz_stop(void)
5135 {
5136 unsigned long curr_jiffies = READ_ONCE(jiffies);
5137 struct rq *this_rq = this_rq();
5138 unsigned long load;
5139 struct rq_flags rf;
5140
5141 if (curr_jiffies == this_rq->last_load_update_tick)
5142 return;
5143
5144 load = weighted_cpuload(cpu_of(this_rq));
5145 rq_lock(this_rq, &rf);
5146 update_rq_clock(this_rq);
5147 cpu_load_update_nohz(this_rq, curr_jiffies, load);
5148 rq_unlock(this_rq, &rf);
5149 }
5150 #else /* !CONFIG_NO_HZ_COMMON */
5151 static inline void cpu_load_update_nohz(struct rq *this_rq,
5152 unsigned long curr_jiffies,
5153 unsigned long load) { }
5154 #endif /* CONFIG_NO_HZ_COMMON */
5155
5156 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
5157 {
5158 #ifdef CONFIG_NO_HZ_COMMON
5159 /* See the mess around cpu_load_update_nohz(). */
5160 this_rq->last_load_update_tick = READ_ONCE(jiffies);
5161 #endif
5162 cpu_load_update(this_rq, load, 1);
5163 }
5164
5165 /*
5166 * Called from scheduler_tick()
5167 */
5168 void cpu_load_update_active(struct rq *this_rq)
5169 {
5170 unsigned long load = weighted_cpuload(cpu_of(this_rq));
5171
5172 if (tick_nohz_tick_stopped())
5173 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
5174 else
5175 cpu_load_update_periodic(this_rq, load);
5176 }
5177
5178 /*
5179 * Return a low guess at the load of a migration-source cpu weighted
5180 * according to the scheduling class and "nice" value.
5181 *
5182 * We want to under-estimate the load of migration sources, to
5183 * balance conservatively.
5184 */
5185 static unsigned long source_load(int cpu, int type)
5186 {
5187 struct rq *rq = cpu_rq(cpu);
5188 unsigned long total = weighted_cpuload(cpu);
5189
5190 if (type == 0 || !sched_feat(LB_BIAS))
5191 return total;
5192
5193 return min(rq->cpu_load[type-1], total);
5194 }
5195
5196 /*
5197 * Return a high guess at the load of a migration-target cpu weighted
5198 * according to the scheduling class and "nice" value.
5199 */
5200 static unsigned long target_load(int cpu, int type)
5201 {
5202 struct rq *rq = cpu_rq(cpu);
5203 unsigned long total = weighted_cpuload(cpu);
5204
5205 if (type == 0 || !sched_feat(LB_BIAS))
5206 return total;
5207
5208 return max(rq->cpu_load[type-1], total);
5209 }
5210
5211 static unsigned long capacity_of(int cpu)
5212 {
5213 return cpu_rq(cpu)->cpu_capacity;
5214 }
5215
5216 static unsigned long capacity_orig_of(int cpu)
5217 {
5218 return cpu_rq(cpu)->cpu_capacity_orig;
5219 }
5220
5221 static unsigned long cpu_avg_load_per_task(int cpu)
5222 {
5223 struct rq *rq = cpu_rq(cpu);
5224 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
5225 unsigned long load_avg = weighted_cpuload(cpu);
5226
5227 if (nr_running)
5228 return load_avg / nr_running;
5229
5230 return 0;
5231 }
5232
5233 #ifdef CONFIG_FAIR_GROUP_SCHED
5234 /*
5235 * effective_load() calculates the load change as seen from the root_task_group
5236 *
5237 * Adding load to a group doesn't make a group heavier, but can cause movement
5238 * of group shares between cpus. Assuming the shares were perfectly aligned one
5239 * can calculate the shift in shares.
5240 *
5241 * Calculate the effective load difference if @wl is added (subtracted) to @tg
5242 * on this @cpu and results in a total addition (subtraction) of @wg to the
5243 * total group weight.
5244 *
5245 * Given a runqueue weight distribution (rw_i) we can compute a shares
5246 * distribution (s_i) using:
5247 *
5248 * s_i = rw_i / \Sum rw_j (1)
5249 *
5250 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
5251 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
5252 * shares distribution (s_i):
5253 *
5254 * rw_i = { 2, 4, 1, 0 }
5255 * s_i = { 2/7, 4/7, 1/7, 0 }
5256 *
5257 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
5258 * task used to run on and the CPU the waker is running on), we need to
5259 * compute the effect of waking a task on either CPU and, in case of a sync
5260 * wakeup, compute the effect of the current task going to sleep.
5261 *
5262 * So for a change of @wl to the local @cpu with an overall group weight change
5263 * of @wl we can compute the new shares distribution (s'_i) using:
5264 *
5265 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
5266 *
5267 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
5268 * differences in waking a task to CPU 0. The additional task changes the
5269 * weight and shares distributions like:
5270 *
5271 * rw'_i = { 3, 4, 1, 0 }
5272 * s'_i = { 3/8, 4/8, 1/8, 0 }
5273 *
5274 * We can then compute the difference in effective weight by using:
5275 *
5276 * dw_i = S * (s'_i - s_i) (3)
5277 *
5278 * Where 'S' is the group weight as seen by its parent.
5279 *
5280 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5281 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5282 * 4/7) times the weight of the group.
5283 */
5284 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5285 {
5286 struct sched_entity *se = tg->se[cpu];
5287
5288 if (!tg->parent) /* the trivial, non-cgroup case */
5289 return wl;
5290
5291 for_each_sched_entity(se) {
5292 struct cfs_rq *cfs_rq = se->my_q;
5293 long W, w = cfs_rq_load_avg(cfs_rq);
5294
5295 tg = cfs_rq->tg;
5296
5297 /*
5298 * W = @wg + \Sum rw_j
5299 */
5300 W = wg + atomic_long_read(&tg->load_avg);
5301
5302 /* Ensure \Sum rw_j >= rw_i */
5303 W -= cfs_rq->tg_load_avg_contrib;
5304 W += w;
5305
5306 /*
5307 * w = rw_i + @wl
5308 */
5309 w += wl;
5310
5311 /*
5312 * wl = S * s'_i; see (2)
5313 */
5314 if (W > 0 && w < W)
5315 wl = (w * (long)scale_load_down(tg->shares)) / W;
5316 else
5317 wl = scale_load_down(tg->shares);
5318
5319 /*
5320 * Per the above, wl is the new se->load.weight value; since
5321 * those are clipped to [MIN_SHARES, ...) do so now. See
5322 * calc_cfs_shares().
5323 */
5324 if (wl < MIN_SHARES)
5325 wl = MIN_SHARES;
5326
5327 /*
5328 * wl = dw_i = S * (s'_i - s_i); see (3)
5329 */
5330 wl -= se->avg.load_avg;
5331
5332 /*
5333 * Recursively apply this logic to all parent groups to compute
5334 * the final effective load change on the root group. Since
5335 * only the @tg group gets extra weight, all parent groups can
5336 * only redistribute existing shares. @wl is the shift in shares
5337 * resulting from this level per the above.
5338 */
5339 wg = 0;
5340 }
5341
5342 return wl;
5343 }
5344 #else
5345
5346 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5347 {
5348 return wl;
5349 }
5350
5351 #endif
5352
5353 static void record_wakee(struct task_struct *p)
5354 {
5355 /*
5356 * Only decay a single time; tasks that have less then 1 wakeup per
5357 * jiffy will not have built up many flips.
5358 */
5359 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5360 current->wakee_flips >>= 1;
5361 current->wakee_flip_decay_ts = jiffies;
5362 }
5363
5364 if (current->last_wakee != p) {
5365 current->last_wakee = p;
5366 current->wakee_flips++;
5367 }
5368 }
5369
5370 /*
5371 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5372 *
5373 * A waker of many should wake a different task than the one last awakened
5374 * at a frequency roughly N times higher than one of its wakees.
5375 *
5376 * In order to determine whether we should let the load spread vs consolidating
5377 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5378 * partner, and a factor of lls_size higher frequency in the other.
5379 *
5380 * With both conditions met, we can be relatively sure that the relationship is
5381 * non-monogamous, with partner count exceeding socket size.
5382 *
5383 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5384 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5385 * socket size.
5386 */
5387 static int wake_wide(struct task_struct *p)
5388 {
5389 unsigned int master = current->wakee_flips;
5390 unsigned int slave = p->wakee_flips;
5391 int factor = this_cpu_read(sd_llc_size);
5392
5393 if (master < slave)
5394 swap(master, slave);
5395 if (slave < factor || master < slave * factor)
5396 return 0;
5397 return 1;
5398 }
5399
5400 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5401 int prev_cpu, int sync)
5402 {
5403 s64 this_load, load;
5404 s64 this_eff_load, prev_eff_load;
5405 int idx, this_cpu;
5406 struct task_group *tg;
5407 unsigned long weight;
5408 int balanced;
5409
5410 idx = sd->wake_idx;
5411 this_cpu = smp_processor_id();
5412 load = source_load(prev_cpu, idx);
5413 this_load = target_load(this_cpu, idx);
5414
5415 /*
5416 * If sync wakeup then subtract the (maximum possible)
5417 * effect of the currently running task from the load
5418 * of the current CPU:
5419 */
5420 if (sync) {
5421 tg = task_group(current);
5422 weight = current->se.avg.load_avg;
5423
5424 this_load += effective_load(tg, this_cpu, -weight, -weight);
5425 load += effective_load(tg, prev_cpu, 0, -weight);
5426 }
5427
5428 tg = task_group(p);
5429 weight = p->se.avg.load_avg;
5430
5431 /*
5432 * In low-load situations, where prev_cpu is idle and this_cpu is idle
5433 * due to the sync cause above having dropped this_load to 0, we'll
5434 * always have an imbalance, but there's really nothing you can do
5435 * about that, so that's good too.
5436 *
5437 * Otherwise check if either cpus are near enough in load to allow this
5438 * task to be woken on this_cpu.
5439 */
5440 this_eff_load = 100;
5441 this_eff_load *= capacity_of(prev_cpu);
5442
5443 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5444 prev_eff_load *= capacity_of(this_cpu);
5445
5446 if (this_load > 0) {
5447 this_eff_load *= this_load +
5448 effective_load(tg, this_cpu, weight, weight);
5449
5450 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
5451 }
5452
5453 balanced = this_eff_load <= prev_eff_load;
5454
5455 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5456
5457 if (!balanced)
5458 return 0;
5459
5460 schedstat_inc(sd->ttwu_move_affine);
5461 schedstat_inc(p->se.statistics.nr_wakeups_affine);
5462
5463 return 1;
5464 }
5465
5466 static inline int task_util(struct task_struct *p);
5467 static int cpu_util_wake(int cpu, struct task_struct *p);
5468
5469 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
5470 {
5471 return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
5472 }
5473
5474 /*
5475 * find_idlest_group finds and returns the least busy CPU group within the
5476 * domain.
5477 */
5478 static struct sched_group *
5479 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5480 int this_cpu, int sd_flag)
5481 {
5482 struct sched_group *idlest = NULL, *group = sd->groups;
5483 struct sched_group *most_spare_sg = NULL;
5484 unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0;
5485 unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0;
5486 unsigned long most_spare = 0, this_spare = 0;
5487 int load_idx = sd->forkexec_idx;
5488 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
5489 unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
5490 (sd->imbalance_pct-100) / 100;
5491
5492 if (sd_flag & SD_BALANCE_WAKE)
5493 load_idx = sd->wake_idx;
5494
5495 do {
5496 unsigned long load, avg_load, runnable_load;
5497 unsigned long spare_cap, max_spare_cap;
5498 int local_group;
5499 int i;
5500
5501 /* Skip over this group if it has no CPUs allowed */
5502 if (!cpumask_intersects(sched_group_cpus(group),
5503 &p->cpus_allowed))
5504 continue;
5505
5506 local_group = cpumask_test_cpu(this_cpu,
5507 sched_group_cpus(group));
5508
5509 /*
5510 * Tally up the load of all CPUs in the group and find
5511 * the group containing the CPU with most spare capacity.
5512 */
5513 avg_load = 0;
5514 runnable_load = 0;
5515 max_spare_cap = 0;
5516
5517 for_each_cpu(i, sched_group_cpus(group)) {
5518 /* Bias balancing toward cpus of our domain */
5519 if (local_group)
5520 load = source_load(i, load_idx);
5521 else
5522 load = target_load(i, load_idx);
5523
5524 runnable_load += load;
5525
5526 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
5527
5528 spare_cap = capacity_spare_wake(i, p);
5529
5530 if (spare_cap > max_spare_cap)
5531 max_spare_cap = spare_cap;
5532 }
5533
5534 /* Adjust by relative CPU capacity of the group */
5535 avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
5536 group->sgc->capacity;
5537 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
5538 group->sgc->capacity;
5539
5540 if (local_group) {
5541 this_runnable_load = runnable_load;
5542 this_avg_load = avg_load;
5543 this_spare = max_spare_cap;
5544 } else {
5545 if (min_runnable_load > (runnable_load + imbalance)) {
5546 /*
5547 * The runnable load is significantly smaller
5548 * so we can pick this new cpu
5549 */
5550 min_runnable_load = runnable_load;
5551 min_avg_load = avg_load;
5552 idlest = group;
5553 } else if ((runnable_load < (min_runnable_load + imbalance)) &&
5554 (100*min_avg_load > imbalance_scale*avg_load)) {
5555 /*
5556 * The runnable loads are close so take the
5557 * blocked load into account through avg_load.
5558 */
5559 min_avg_load = avg_load;
5560 idlest = group;
5561 }
5562
5563 if (most_spare < max_spare_cap) {
5564 most_spare = max_spare_cap;
5565 most_spare_sg = group;
5566 }
5567 }
5568 } while (group = group->next, group != sd->groups);
5569
5570 /*
5571 * The cross-over point between using spare capacity or least load
5572 * is too conservative for high utilization tasks on partially
5573 * utilized systems if we require spare_capacity > task_util(p),
5574 * so we allow for some task stuffing by using
5575 * spare_capacity > task_util(p)/2.
5576 *
5577 * Spare capacity can't be used for fork because the utilization has
5578 * not been set yet, we must first select a rq to compute the initial
5579 * utilization.
5580 */
5581 if (sd_flag & SD_BALANCE_FORK)
5582 goto skip_spare;
5583
5584 if (this_spare > task_util(p) / 2 &&
5585 imbalance_scale*this_spare > 100*most_spare)
5586 return NULL;
5587
5588 if (most_spare > task_util(p) / 2)
5589 return most_spare_sg;
5590
5591 skip_spare:
5592 if (!idlest)
5593 return NULL;
5594
5595 if (min_runnable_load > (this_runnable_load + imbalance))
5596 return NULL;
5597
5598 if ((this_runnable_load < (min_runnable_load + imbalance)) &&
5599 (100*this_avg_load < imbalance_scale*min_avg_load))
5600 return NULL;
5601
5602 return idlest;
5603 }
5604
5605 /*
5606 * find_idlest_cpu - find the idlest cpu among the cpus in group.
5607 */
5608 static int
5609 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5610 {
5611 unsigned long load, min_load = ULONG_MAX;
5612 unsigned int min_exit_latency = UINT_MAX;
5613 u64 latest_idle_timestamp = 0;
5614 int least_loaded_cpu = this_cpu;
5615 int shallowest_idle_cpu = -1;
5616 int i;
5617
5618 /* Check if we have any choice: */
5619 if (group->group_weight == 1)
5620 return cpumask_first(sched_group_cpus(group));
5621
5622 /* Traverse only the allowed CPUs */
5623 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
5624 if (idle_cpu(i)) {
5625 struct rq *rq = cpu_rq(i);
5626 struct cpuidle_state *idle = idle_get_state(rq);
5627 if (idle && idle->exit_latency < min_exit_latency) {
5628 /*
5629 * We give priority to a CPU whose idle state
5630 * has the smallest exit latency irrespective
5631 * of any idle timestamp.
5632 */
5633 min_exit_latency = idle->exit_latency;
5634 latest_idle_timestamp = rq->idle_stamp;
5635 shallowest_idle_cpu = i;
5636 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5637 rq->idle_stamp > latest_idle_timestamp) {
5638 /*
5639 * If equal or no active idle state, then
5640 * the most recently idled CPU might have
5641 * a warmer cache.
5642 */
5643 latest_idle_timestamp = rq->idle_stamp;
5644 shallowest_idle_cpu = i;
5645 }
5646 } else if (shallowest_idle_cpu == -1) {
5647 load = weighted_cpuload(i);
5648 if (load < min_load || (load == min_load && i == this_cpu)) {
5649 min_load = load;
5650 least_loaded_cpu = i;
5651 }
5652 }
5653 }
5654
5655 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5656 }
5657
5658 /*
5659 * Implement a for_each_cpu() variant that starts the scan at a given cpu
5660 * (@start), and wraps around.
5661 *
5662 * This is used to scan for idle CPUs; such that not all CPUs looking for an
5663 * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5664 * through the LLC domain.
5665 *
5666 * Especially tbench is found sensitive to this.
5667 */
5668
5669 static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5670 {
5671 int next;
5672
5673 again:
5674 next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5675
5676 if (*wrapped) {
5677 if (next >= start)
5678 return nr_cpumask_bits;
5679 } else {
5680 if (next >= nr_cpumask_bits) {
5681 *wrapped = 1;
5682 n = -1;
5683 goto again;
5684 }
5685 }
5686
5687 return next;
5688 }
5689
5690 #define for_each_cpu_wrap(cpu, mask, start, wrap) \
5691 for ((wrap) = 0, (cpu) = (start)-1; \
5692 (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
5693 (cpu) < nr_cpumask_bits; )
5694
5695 #ifdef CONFIG_SCHED_SMT
5696
5697 static inline void set_idle_cores(int cpu, int val)
5698 {
5699 struct sched_domain_shared *sds;
5700
5701 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5702 if (sds)
5703 WRITE_ONCE(sds->has_idle_cores, val);
5704 }
5705
5706 static inline bool test_idle_cores(int cpu, bool def)
5707 {
5708 struct sched_domain_shared *sds;
5709
5710 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5711 if (sds)
5712 return READ_ONCE(sds->has_idle_cores);
5713
5714 return def;
5715 }
5716
5717 /*
5718 * Scans the local SMT mask to see if the entire core is idle, and records this
5719 * information in sd_llc_shared->has_idle_cores.
5720 *
5721 * Since SMT siblings share all cache levels, inspecting this limited remote
5722 * state should be fairly cheap.
5723 */
5724 void __update_idle_core(struct rq *rq)
5725 {
5726 int core = cpu_of(rq);
5727 int cpu;
5728
5729 rcu_read_lock();
5730 if (test_idle_cores(core, true))
5731 goto unlock;
5732
5733 for_each_cpu(cpu, cpu_smt_mask(core)) {
5734 if (cpu == core)
5735 continue;
5736
5737 if (!idle_cpu(cpu))
5738 goto unlock;
5739 }
5740
5741 set_idle_cores(core, 1);
5742 unlock:
5743 rcu_read_unlock();
5744 }
5745
5746 /*
5747 * Scan the entire LLC domain for idle cores; this dynamically switches off if
5748 * there are no idle cores left in the system; tracked through
5749 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5750 */
5751 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5752 {
5753 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5754 int core, cpu, wrap;
5755
5756 if (!static_branch_likely(&sched_smt_present))
5757 return -1;
5758
5759 if (!test_idle_cores(target, false))
5760 return -1;
5761
5762 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
5763
5764 for_each_cpu_wrap(core, cpus, target, wrap) {
5765 bool idle = true;
5766
5767 for_each_cpu(cpu, cpu_smt_mask(core)) {
5768 cpumask_clear_cpu(cpu, cpus);
5769 if (!idle_cpu(cpu))
5770 idle = false;
5771 }
5772
5773 if (idle)
5774 return core;
5775 }
5776
5777 /*
5778 * Failed to find an idle core; stop looking for one.
5779 */
5780 set_idle_cores(target, 0);
5781
5782 return -1;
5783 }
5784
5785 /*
5786 * Scan the local SMT mask for idle CPUs.
5787 */
5788 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5789 {
5790 int cpu;
5791
5792 if (!static_branch_likely(&sched_smt_present))
5793 return -1;
5794
5795 for_each_cpu(cpu, cpu_smt_mask(target)) {
5796 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5797 continue;
5798 if (idle_cpu(cpu))
5799 return cpu;
5800 }
5801
5802 return -1;
5803 }
5804
5805 #else /* CONFIG_SCHED_SMT */
5806
5807 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5808 {
5809 return -1;
5810 }
5811
5812 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5813 {
5814 return -1;
5815 }
5816
5817 #endif /* CONFIG_SCHED_SMT */
5818
5819 /*
5820 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5821 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5822 * average idle time for this rq (as found in rq->avg_idle).
5823 */
5824 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5825 {
5826 struct sched_domain *this_sd;
5827 u64 avg_cost, avg_idle = this_rq()->avg_idle;
5828 u64 time, cost;
5829 s64 delta;
5830 int cpu, wrap;
5831
5832 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5833 if (!this_sd)
5834 return -1;
5835
5836 avg_cost = this_sd->avg_scan_cost;
5837
5838 /*
5839 * Due to large variance we need a large fuzz factor; hackbench in
5840 * particularly is sensitive here.
5841 */
5842 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5843 return -1;
5844
5845 time = local_clock();
5846
5847 for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5848 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5849 continue;
5850 if (idle_cpu(cpu))
5851 break;
5852 }
5853
5854 time = local_clock() - time;
5855 cost = this_sd->avg_scan_cost;
5856 delta = (s64)(time - cost) / 8;
5857 this_sd->avg_scan_cost += delta;
5858
5859 return cpu;
5860 }
5861
5862 /*
5863 * Try and locate an idle core/thread in the LLC cache domain.
5864 */
5865 static int select_idle_sibling(struct task_struct *p, int prev, int target)
5866 {
5867 struct sched_domain *sd;
5868 int i;
5869
5870 if (idle_cpu(target))
5871 return target;
5872
5873 /*
5874 * If the previous cpu is cache affine and idle, don't be stupid.
5875 */
5876 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5877 return prev;
5878
5879 sd = rcu_dereference(per_cpu(sd_llc, target));
5880 if (!sd)
5881 return target;
5882
5883 i = select_idle_core(p, sd, target);
5884 if ((unsigned)i < nr_cpumask_bits)
5885 return i;
5886
5887 i = select_idle_cpu(p, sd, target);
5888 if ((unsigned)i < nr_cpumask_bits)
5889 return i;
5890
5891 i = select_idle_smt(p, sd, target);
5892 if ((unsigned)i < nr_cpumask_bits)
5893 return i;
5894
5895 return target;
5896 }
5897
5898 /*
5899 * cpu_util returns the amount of capacity of a CPU that is used by CFS
5900 * tasks. The unit of the return value must be the one of capacity so we can
5901 * compare the utilization with the capacity of the CPU that is available for
5902 * CFS task (ie cpu_capacity).
5903 *
5904 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5905 * recent utilization of currently non-runnable tasks on a CPU. It represents
5906 * the amount of utilization of a CPU in the range [0..capacity_orig] where
5907 * capacity_orig is the cpu_capacity available at the highest frequency
5908 * (arch_scale_freq_capacity()).
5909 * The utilization of a CPU converges towards a sum equal to or less than the
5910 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5911 * the running time on this CPU scaled by capacity_curr.
5912 *
5913 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5914 * higher than capacity_orig because of unfortunate rounding in
5915 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5916 * the average stabilizes with the new running time. We need to check that the
5917 * utilization stays within the range of [0..capacity_orig] and cap it if
5918 * necessary. Without utilization capping, a group could be seen as overloaded
5919 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5920 * available capacity. We allow utilization to overshoot capacity_curr (but not
5921 * capacity_orig) as it useful for predicting the capacity required after task
5922 * migrations (scheduler-driven DVFS).
5923 */
5924 static int cpu_util(int cpu)
5925 {
5926 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5927 unsigned long capacity = capacity_orig_of(cpu);
5928
5929 return (util >= capacity) ? capacity : util;
5930 }
5931
5932 static inline int task_util(struct task_struct *p)
5933 {
5934 return p->se.avg.util_avg;
5935 }
5936
5937 /*
5938 * cpu_util_wake: Compute cpu utilization with any contributions from
5939 * the waking task p removed.
5940 */
5941 static int cpu_util_wake(int cpu, struct task_struct *p)
5942 {
5943 unsigned long util, capacity;
5944
5945 /* Task has no contribution or is new */
5946 if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
5947 return cpu_util(cpu);
5948
5949 capacity = capacity_orig_of(cpu);
5950 util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
5951
5952 return (util >= capacity) ? capacity : util;
5953 }
5954
5955 /*
5956 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5957 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5958 *
5959 * In that case WAKE_AFFINE doesn't make sense and we'll let
5960 * BALANCE_WAKE sort things out.
5961 */
5962 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
5963 {
5964 long min_cap, max_cap;
5965
5966 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
5967 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
5968
5969 /* Minimum capacity is close to max, no need to abort wake_affine */
5970 if (max_cap - min_cap < max_cap >> 3)
5971 return 0;
5972
5973 /* Bring task utilization in sync with prev_cpu */
5974 sync_entity_load_avg(&p->se);
5975
5976 return min_cap * 1024 < task_util(p) * capacity_margin;
5977 }
5978
5979 /*
5980 * select_task_rq_fair: Select target runqueue for the waking task in domains
5981 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5982 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
5983 *
5984 * Balances load by selecting the idlest cpu in the idlest group, or under
5985 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
5986 *
5987 * Returns the target cpu number.
5988 *
5989 * preempt must be disabled.
5990 */
5991 static int
5992 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5993 {
5994 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5995 int cpu = smp_processor_id();
5996 int new_cpu = prev_cpu;
5997 int want_affine = 0;
5998 int sync = wake_flags & WF_SYNC;
5999
6000 if (sd_flag & SD_BALANCE_WAKE) {
6001 record_wakee(p);
6002 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
6003 && cpumask_test_cpu(cpu, &p->cpus_allowed);
6004 }
6005
6006 rcu_read_lock();
6007 for_each_domain(cpu, tmp) {
6008 if (!(tmp->flags & SD_LOAD_BALANCE))
6009 break;
6010
6011 /*
6012 * If both cpu and prev_cpu are part of this domain,
6013 * cpu is a valid SD_WAKE_AFFINE target.
6014 */
6015 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
6016 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
6017 affine_sd = tmp;
6018 break;
6019 }
6020
6021 if (tmp->flags & sd_flag)
6022 sd = tmp;
6023 else if (!want_affine)
6024 break;
6025 }
6026
6027 if (affine_sd) {
6028 sd = NULL; /* Prefer wake_affine over balance flags */
6029 if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
6030 new_cpu = cpu;
6031 }
6032
6033 if (!sd) {
6034 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
6035 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6036
6037 } else while (sd) {
6038 struct sched_group *group;
6039 int weight;
6040
6041 if (!(sd->flags & sd_flag)) {
6042 sd = sd->child;
6043 continue;
6044 }
6045
6046 group = find_idlest_group(sd, p, cpu, sd_flag);
6047 if (!group) {
6048 sd = sd->child;
6049 continue;
6050 }
6051
6052 new_cpu = find_idlest_cpu(group, p, cpu);
6053 if (new_cpu == -1 || new_cpu == cpu) {
6054 /* Now try balancing at a lower domain level of cpu */
6055 sd = sd->child;
6056 continue;
6057 }
6058
6059 /* Now try balancing at a lower domain level of new_cpu */
6060 cpu = new_cpu;
6061 weight = sd->span_weight;
6062 sd = NULL;
6063 for_each_domain(cpu, tmp) {
6064 if (weight <= tmp->span_weight)
6065 break;
6066 if (tmp->flags & sd_flag)
6067 sd = tmp;
6068 }
6069 /* while loop will break here if sd == NULL */
6070 }
6071 rcu_read_unlock();
6072
6073 return new_cpu;
6074 }
6075
6076 /*
6077 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
6078 * cfs_rq_of(p) references at time of call are still valid and identify the
6079 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
6080 */
6081 static void migrate_task_rq_fair(struct task_struct *p)
6082 {
6083 /*
6084 * As blocked tasks retain absolute vruntime the migration needs to
6085 * deal with this by subtracting the old and adding the new
6086 * min_vruntime -- the latter is done by enqueue_entity() when placing
6087 * the task on the new runqueue.
6088 */
6089 if (p->state == TASK_WAKING) {
6090 struct sched_entity *se = &p->se;
6091 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6092 u64 min_vruntime;
6093
6094 #ifndef CONFIG_64BIT
6095 u64 min_vruntime_copy;
6096
6097 do {
6098 min_vruntime_copy = cfs_rq->min_vruntime_copy;
6099 smp_rmb();
6100 min_vruntime = cfs_rq->min_vruntime;
6101 } while (min_vruntime != min_vruntime_copy);
6102 #else
6103 min_vruntime = cfs_rq->min_vruntime;
6104 #endif
6105
6106 se->vruntime -= min_vruntime;
6107 }
6108
6109 /*
6110 * We are supposed to update the task to "current" time, then its up to date
6111 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
6112 * what current time is, so simply throw away the out-of-date time. This
6113 * will result in the wakee task is less decayed, but giving the wakee more
6114 * load sounds not bad.
6115 */
6116 remove_entity_load_avg(&p->se);
6117
6118 /* Tell new CPU we are migrated */
6119 p->se.avg.last_update_time = 0;
6120
6121 /* We have migrated, no longer consider this task hot */
6122 p->se.exec_start = 0;
6123 }
6124
6125 static void task_dead_fair(struct task_struct *p)
6126 {
6127 remove_entity_load_avg(&p->se);
6128 }
6129 #endif /* CONFIG_SMP */
6130
6131 static unsigned long
6132 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
6133 {
6134 unsigned long gran = sysctl_sched_wakeup_granularity;
6135
6136 /*
6137 * Since its curr running now, convert the gran from real-time
6138 * to virtual-time in his units.
6139 *
6140 * By using 'se' instead of 'curr' we penalize light tasks, so
6141 * they get preempted easier. That is, if 'se' < 'curr' then
6142 * the resulting gran will be larger, therefore penalizing the
6143 * lighter, if otoh 'se' > 'curr' then the resulting gran will
6144 * be smaller, again penalizing the lighter task.
6145 *
6146 * This is especially important for buddies when the leftmost
6147 * task is higher priority than the buddy.
6148 */
6149 return calc_delta_fair(gran, se);
6150 }
6151
6152 /*
6153 * Should 'se' preempt 'curr'.
6154 *
6155 * |s1
6156 * |s2
6157 * |s3
6158 * g
6159 * |<--->|c
6160 *
6161 * w(c, s1) = -1
6162 * w(c, s2) = 0
6163 * w(c, s3) = 1
6164 *
6165 */
6166 static int
6167 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
6168 {
6169 s64 gran, vdiff = curr->vruntime - se->vruntime;
6170
6171 if (vdiff <= 0)
6172 return -1;
6173
6174 gran = wakeup_gran(curr, se);
6175 if (vdiff > gran)
6176 return 1;
6177
6178 return 0;
6179 }
6180
6181 static void set_last_buddy(struct sched_entity *se)
6182 {
6183 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6184 return;
6185
6186 for_each_sched_entity(se)
6187 cfs_rq_of(se)->last = se;
6188 }
6189
6190 static void set_next_buddy(struct sched_entity *se)
6191 {
6192 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6193 return;
6194
6195 for_each_sched_entity(se)
6196 cfs_rq_of(se)->next = se;
6197 }
6198
6199 static void set_skip_buddy(struct sched_entity *se)
6200 {
6201 for_each_sched_entity(se)
6202 cfs_rq_of(se)->skip = se;
6203 }
6204
6205 /*
6206 * Preempt the current task with a newly woken task if needed:
6207 */
6208 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
6209 {
6210 struct task_struct *curr = rq->curr;
6211 struct sched_entity *se = &curr->se, *pse = &p->se;
6212 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6213 int scale = cfs_rq->nr_running >= sched_nr_latency;
6214 int next_buddy_marked = 0;
6215
6216 if (unlikely(se == pse))
6217 return;
6218
6219 /*
6220 * This is possible from callers such as attach_tasks(), in which we
6221 * unconditionally check_prempt_curr() after an enqueue (which may have
6222 * lead to a throttle). This both saves work and prevents false
6223 * next-buddy nomination below.
6224 */
6225 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
6226 return;
6227
6228 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
6229 set_next_buddy(pse);
6230 next_buddy_marked = 1;
6231 }
6232
6233 /*
6234 * We can come here with TIF_NEED_RESCHED already set from new task
6235 * wake up path.
6236 *
6237 * Note: this also catches the edge-case of curr being in a throttled
6238 * group (e.g. via set_curr_task), since update_curr() (in the
6239 * enqueue of curr) will have resulted in resched being set. This
6240 * prevents us from potentially nominating it as a false LAST_BUDDY
6241 * below.
6242 */
6243 if (test_tsk_need_resched(curr))
6244 return;
6245
6246 /* Idle tasks are by definition preempted by non-idle tasks. */
6247 if (unlikely(curr->policy == SCHED_IDLE) &&
6248 likely(p->policy != SCHED_IDLE))
6249 goto preempt;
6250
6251 /*
6252 * Batch and idle tasks do not preempt non-idle tasks (their preemption
6253 * is driven by the tick):
6254 */
6255 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
6256 return;
6257
6258 find_matching_se(&se, &pse);
6259 update_curr(cfs_rq_of(se));
6260 BUG_ON(!pse);
6261 if (wakeup_preempt_entity(se, pse) == 1) {
6262 /*
6263 * Bias pick_next to pick the sched entity that is
6264 * triggering this preemption.
6265 */
6266 if (!next_buddy_marked)
6267 set_next_buddy(pse);
6268 goto preempt;
6269 }
6270
6271 return;
6272
6273 preempt:
6274 resched_curr(rq);
6275 /*
6276 * Only set the backward buddy when the current task is still
6277 * on the rq. This can happen when a wakeup gets interleaved
6278 * with schedule on the ->pre_schedule() or idle_balance()
6279 * point, either of which can * drop the rq lock.
6280 *
6281 * Also, during early boot the idle thread is in the fair class,
6282 * for obvious reasons its a bad idea to schedule back to it.
6283 */
6284 if (unlikely(!se->on_rq || curr == rq->idle))
6285 return;
6286
6287 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
6288 set_last_buddy(se);
6289 }
6290
6291 static struct task_struct *
6292 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6293 {
6294 struct cfs_rq *cfs_rq = &rq->cfs;
6295 struct sched_entity *se;
6296 struct task_struct *p;
6297 int new_tasks;
6298
6299 again:
6300 #ifdef CONFIG_FAIR_GROUP_SCHED
6301 if (!cfs_rq->nr_running)
6302 goto idle;
6303
6304 if (prev->sched_class != &fair_sched_class)
6305 goto simple;
6306
6307 /*
6308 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
6309 * likely that a next task is from the same cgroup as the current.
6310 *
6311 * Therefore attempt to avoid putting and setting the entire cgroup
6312 * hierarchy, only change the part that actually changes.
6313 */
6314
6315 do {
6316 struct sched_entity *curr = cfs_rq->curr;
6317
6318 /*
6319 * Since we got here without doing put_prev_entity() we also
6320 * have to consider cfs_rq->curr. If it is still a runnable
6321 * entity, update_curr() will update its vruntime, otherwise
6322 * forget we've ever seen it.
6323 */
6324 if (curr) {
6325 if (curr->on_rq)
6326 update_curr(cfs_rq);
6327 else
6328 curr = NULL;
6329
6330 /*
6331 * This call to check_cfs_rq_runtime() will do the
6332 * throttle and dequeue its entity in the parent(s).
6333 * Therefore the 'simple' nr_running test will indeed
6334 * be correct.
6335 */
6336 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
6337 goto simple;
6338 }
6339
6340 se = pick_next_entity(cfs_rq, curr);
6341 cfs_rq = group_cfs_rq(se);
6342 } while (cfs_rq);
6343
6344 p = task_of(se);
6345
6346 /*
6347 * Since we haven't yet done put_prev_entity and if the selected task
6348 * is a different task than we started out with, try and touch the
6349 * least amount of cfs_rqs.
6350 */
6351 if (prev != p) {
6352 struct sched_entity *pse = &prev->se;
6353
6354 while (!(cfs_rq = is_same_group(se, pse))) {
6355 int se_depth = se->depth;
6356 int pse_depth = pse->depth;
6357
6358 if (se_depth <= pse_depth) {
6359 put_prev_entity(cfs_rq_of(pse), pse);
6360 pse = parent_entity(pse);
6361 }
6362 if (se_depth >= pse_depth) {
6363 set_next_entity(cfs_rq_of(se), se);
6364 se = parent_entity(se);
6365 }
6366 }
6367
6368 put_prev_entity(cfs_rq, pse);
6369 set_next_entity(cfs_rq, se);
6370 }
6371
6372 if (hrtick_enabled(rq))
6373 hrtick_start_fair(rq, p);
6374
6375 return p;
6376 simple:
6377 cfs_rq = &rq->cfs;
6378 #endif
6379
6380 if (!cfs_rq->nr_running)
6381 goto idle;
6382
6383 put_prev_task(rq, prev);
6384
6385 do {
6386 se = pick_next_entity(cfs_rq, NULL);
6387 set_next_entity(cfs_rq, se);
6388 cfs_rq = group_cfs_rq(se);
6389 } while (cfs_rq);
6390
6391 p = task_of(se);
6392
6393 if (hrtick_enabled(rq))
6394 hrtick_start_fair(rq, p);
6395
6396 return p;
6397
6398 idle:
6399 new_tasks = idle_balance(rq, rf);
6400
6401 /*
6402 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6403 * possible for any higher priority task to appear. In that case we
6404 * must re-start the pick_next_entity() loop.
6405 */
6406 if (new_tasks < 0)
6407 return RETRY_TASK;
6408
6409 if (new_tasks > 0)
6410 goto again;
6411
6412 return NULL;
6413 }
6414
6415 /*
6416 * Account for a descheduled task:
6417 */
6418 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6419 {
6420 struct sched_entity *se = &prev->se;
6421 struct cfs_rq *cfs_rq;
6422
6423 for_each_sched_entity(se) {
6424 cfs_rq = cfs_rq_of(se);
6425 put_prev_entity(cfs_rq, se);
6426 }
6427 }
6428
6429 /*
6430 * sched_yield() is very simple
6431 *
6432 * The magic of dealing with the ->skip buddy is in pick_next_entity.
6433 */
6434 static void yield_task_fair(struct rq *rq)
6435 {
6436 struct task_struct *curr = rq->curr;
6437 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6438 struct sched_entity *se = &curr->se;
6439
6440 /*
6441 * Are we the only task in the tree?
6442 */
6443 if (unlikely(rq->nr_running == 1))
6444 return;
6445
6446 clear_buddies(cfs_rq, se);
6447
6448 if (curr->policy != SCHED_BATCH) {
6449 update_rq_clock(rq);
6450 /*
6451 * Update run-time statistics of the 'current'.
6452 */
6453 update_curr(cfs_rq);
6454 /*
6455 * Tell update_rq_clock() that we've just updated,
6456 * so we don't do microscopic update in schedule()
6457 * and double the fastpath cost.
6458 */
6459 rq_clock_skip_update(rq, true);
6460 }
6461
6462 set_skip_buddy(se);
6463 }
6464
6465 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6466 {
6467 struct sched_entity *se = &p->se;
6468
6469 /* throttled hierarchies are not runnable */
6470 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
6471 return false;
6472
6473 /* Tell the scheduler that we'd really like pse to run next. */
6474 set_next_buddy(se);
6475
6476 yield_task_fair(rq);
6477
6478 return true;
6479 }
6480
6481 #ifdef CONFIG_SMP
6482 /**************************************************
6483 * Fair scheduling class load-balancing methods.
6484 *
6485 * BASICS
6486 *
6487 * The purpose of load-balancing is to achieve the same basic fairness the
6488 * per-cpu scheduler provides, namely provide a proportional amount of compute
6489 * time to each task. This is expressed in the following equation:
6490 *
6491 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
6492 *
6493 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6494 * W_i,0 is defined as:
6495 *
6496 * W_i,0 = \Sum_j w_i,j (2)
6497 *
6498 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
6499 * is derived from the nice value as per sched_prio_to_weight[].
6500 *
6501 * The weight average is an exponential decay average of the instantaneous
6502 * weight:
6503 *
6504 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
6505 *
6506 * C_i is the compute capacity of cpu i, typically it is the
6507 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6508 * can also include other factors [XXX].
6509 *
6510 * To achieve this balance we define a measure of imbalance which follows
6511 * directly from (1):
6512 *
6513 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
6514 *
6515 * We them move tasks around to minimize the imbalance. In the continuous
6516 * function space it is obvious this converges, in the discrete case we get
6517 * a few fun cases generally called infeasible weight scenarios.
6518 *
6519 * [XXX expand on:
6520 * - infeasible weights;
6521 * - local vs global optima in the discrete case. ]
6522 *
6523 *
6524 * SCHED DOMAINS
6525 *
6526 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6527 * for all i,j solution, we create a tree of cpus that follows the hardware
6528 * topology where each level pairs two lower groups (or better). This results
6529 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6530 * tree to only the first of the previous level and we decrease the frequency
6531 * of load-balance at each level inv. proportional to the number of cpus in
6532 * the groups.
6533 *
6534 * This yields:
6535 *
6536 * log_2 n 1 n
6537 * \Sum { --- * --- * 2^i } = O(n) (5)
6538 * i = 0 2^i 2^i
6539 * `- size of each group
6540 * | | `- number of cpus doing load-balance
6541 * | `- freq
6542 * `- sum over all levels
6543 *
6544 * Coupled with a limit on how many tasks we can migrate every balance pass,
6545 * this makes (5) the runtime complexity of the balancer.
6546 *
6547 * An important property here is that each CPU is still (indirectly) connected
6548 * to every other cpu in at most O(log n) steps:
6549 *
6550 * The adjacency matrix of the resulting graph is given by:
6551 *
6552 * log_2 n
6553 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
6554 * k = 0
6555 *
6556 * And you'll find that:
6557 *
6558 * A^(log_2 n)_i,j != 0 for all i,j (7)
6559 *
6560 * Showing there's indeed a path between every cpu in at most O(log n) steps.
6561 * The task movement gives a factor of O(m), giving a convergence complexity
6562 * of:
6563 *
6564 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
6565 *
6566 *
6567 * WORK CONSERVING
6568 *
6569 * In order to avoid CPUs going idle while there's still work to do, new idle
6570 * balancing is more aggressive and has the newly idle cpu iterate up the domain
6571 * tree itself instead of relying on other CPUs to bring it work.
6572 *
6573 * This adds some complexity to both (5) and (8) but it reduces the total idle
6574 * time.
6575 *
6576 * [XXX more?]
6577 *
6578 *
6579 * CGROUPS
6580 *
6581 * Cgroups make a horror show out of (2), instead of a simple sum we get:
6582 *
6583 * s_k,i
6584 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
6585 * S_k
6586 *
6587 * Where
6588 *
6589 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
6590 *
6591 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6592 *
6593 * The big problem is S_k, its a global sum needed to compute a local (W_i)
6594 * property.
6595 *
6596 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6597 * rewrite all of this once again.]
6598 */
6599
6600 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6601
6602 enum fbq_type { regular, remote, all };
6603
6604 #define LBF_ALL_PINNED 0x01
6605 #define LBF_NEED_BREAK 0x02
6606 #define LBF_DST_PINNED 0x04
6607 #define LBF_SOME_PINNED 0x08
6608
6609 struct lb_env {
6610 struct sched_domain *sd;
6611
6612 struct rq *src_rq;
6613 int src_cpu;
6614
6615 int dst_cpu;
6616 struct rq *dst_rq;
6617
6618 struct cpumask *dst_grpmask;
6619 int new_dst_cpu;
6620 enum cpu_idle_type idle;
6621 long imbalance;
6622 /* The set of CPUs under consideration for load-balancing */
6623 struct cpumask *cpus;
6624
6625 unsigned int flags;
6626
6627 unsigned int loop;
6628 unsigned int loop_break;
6629 unsigned int loop_max;
6630
6631 enum fbq_type fbq_type;
6632 struct list_head tasks;
6633 };
6634
6635 /*
6636 * Is this task likely cache-hot:
6637 */
6638 static int task_hot(struct task_struct *p, struct lb_env *env)
6639 {
6640 s64 delta;
6641
6642 lockdep_assert_held(&env->src_rq->lock);
6643
6644 if (p->sched_class != &fair_sched_class)
6645 return 0;
6646
6647 if (unlikely(p->policy == SCHED_IDLE))
6648 return 0;
6649
6650 /*
6651 * Buddy candidates are cache hot:
6652 */
6653 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
6654 (&p->se == cfs_rq_of(&p->se)->next ||
6655 &p->se == cfs_rq_of(&p->se)->last))
6656 return 1;
6657
6658 if (sysctl_sched_migration_cost == -1)
6659 return 1;
6660 if (sysctl_sched_migration_cost == 0)
6661 return 0;
6662
6663 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
6664
6665 return delta < (s64)sysctl_sched_migration_cost;
6666 }
6667
6668 #ifdef CONFIG_NUMA_BALANCING
6669 /*
6670 * Returns 1, if task migration degrades locality
6671 * Returns 0, if task migration improves locality i.e migration preferred.
6672 * Returns -1, if task migration is not affected by locality.
6673 */
6674 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
6675 {
6676 struct numa_group *numa_group = rcu_dereference(p->numa_group);
6677 unsigned long src_faults, dst_faults;
6678 int src_nid, dst_nid;
6679
6680 if (!static_branch_likely(&sched_numa_balancing))
6681 return -1;
6682
6683 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
6684 return -1;
6685
6686 src_nid = cpu_to_node(env->src_cpu);
6687 dst_nid = cpu_to_node(env->dst_cpu);
6688
6689 if (src_nid == dst_nid)
6690 return -1;
6691
6692 /* Migrating away from the preferred node is always bad. */
6693 if (src_nid == p->numa_preferred_nid) {
6694 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6695 return 1;
6696 else
6697 return -1;
6698 }
6699
6700 /* Encourage migration to the preferred node. */
6701 if (dst_nid == p->numa_preferred_nid)
6702 return 0;
6703
6704 if (numa_group) {
6705 src_faults = group_faults(p, src_nid);
6706 dst_faults = group_faults(p, dst_nid);
6707 } else {
6708 src_faults = task_faults(p, src_nid);
6709 dst_faults = task_faults(p, dst_nid);
6710 }
6711
6712 return dst_faults < src_faults;
6713 }
6714
6715 #else
6716 static inline int migrate_degrades_locality(struct task_struct *p,
6717 struct lb_env *env)
6718 {
6719 return -1;
6720 }
6721 #endif
6722
6723 /*
6724 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6725 */
6726 static
6727 int can_migrate_task(struct task_struct *p, struct lb_env *env)
6728 {
6729 int tsk_cache_hot;
6730
6731 lockdep_assert_held(&env->src_rq->lock);
6732
6733 /*
6734 * We do not migrate tasks that are:
6735 * 1) throttled_lb_pair, or
6736 * 2) cannot be migrated to this CPU due to cpus_allowed, or
6737 * 3) running (obviously), or
6738 * 4) are cache-hot on their current CPU.
6739 */
6740 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6741 return 0;
6742
6743 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
6744 int cpu;
6745
6746 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
6747
6748 env->flags |= LBF_SOME_PINNED;
6749
6750 /*
6751 * Remember if this task can be migrated to any other cpu in
6752 * our sched_group. We may want to revisit it if we couldn't
6753 * meet load balance goals by pulling other tasks on src_cpu.
6754 *
6755 * Also avoid computing new_dst_cpu if we have already computed
6756 * one in current iteration.
6757 */
6758 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
6759 return 0;
6760
6761 /* Prevent to re-select dst_cpu via env's cpus */
6762 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6763 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
6764 env->flags |= LBF_DST_PINNED;
6765 env->new_dst_cpu = cpu;
6766 break;
6767 }
6768 }
6769
6770 return 0;
6771 }
6772
6773 /* Record that we found atleast one task that could run on dst_cpu */
6774 env->flags &= ~LBF_ALL_PINNED;
6775
6776 if (task_running(env->src_rq, p)) {
6777 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
6778 return 0;
6779 }
6780
6781 /*
6782 * Aggressive migration if:
6783 * 1) destination numa is preferred
6784 * 2) task is cache cold, or
6785 * 3) too many balance attempts have failed.
6786 */
6787 tsk_cache_hot = migrate_degrades_locality(p, env);
6788 if (tsk_cache_hot == -1)
6789 tsk_cache_hot = task_hot(p, env);
6790
6791 if (tsk_cache_hot <= 0 ||
6792 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
6793 if (tsk_cache_hot == 1) {
6794 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6795 schedstat_inc(p->se.statistics.nr_forced_migrations);
6796 }
6797 return 1;
6798 }
6799
6800 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
6801 return 0;
6802 }
6803
6804 /*
6805 * detach_task() -- detach the task for the migration specified in env
6806 */
6807 static void detach_task(struct task_struct *p, struct lb_env *env)
6808 {
6809 lockdep_assert_held(&env->src_rq->lock);
6810
6811 p->on_rq = TASK_ON_RQ_MIGRATING;
6812 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
6813 set_task_cpu(p, env->dst_cpu);
6814 }
6815
6816 /*
6817 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
6818 * part of active balancing operations within "domain".
6819 *
6820 * Returns a task if successful and NULL otherwise.
6821 */
6822 static struct task_struct *detach_one_task(struct lb_env *env)
6823 {
6824 struct task_struct *p, *n;
6825
6826 lockdep_assert_held(&env->src_rq->lock);
6827
6828 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
6829 if (!can_migrate_task(p, env))
6830 continue;
6831
6832 detach_task(p, env);
6833
6834 /*
6835 * Right now, this is only the second place where
6836 * lb_gained[env->idle] is updated (other is detach_tasks)
6837 * so we can safely collect stats here rather than
6838 * inside detach_tasks().
6839 */
6840 schedstat_inc(env->sd->lb_gained[env->idle]);
6841 return p;
6842 }
6843 return NULL;
6844 }
6845
6846 static const unsigned int sched_nr_migrate_break = 32;
6847
6848 /*
6849 * detach_tasks() -- tries to detach up to imbalance weighted load from
6850 * busiest_rq, as part of a balancing operation within domain "sd".
6851 *
6852 * Returns number of detached tasks if successful and 0 otherwise.
6853 */
6854 static int detach_tasks(struct lb_env *env)
6855 {
6856 struct list_head *tasks = &env->src_rq->cfs_tasks;
6857 struct task_struct *p;
6858 unsigned long load;
6859 int detached = 0;
6860
6861 lockdep_assert_held(&env->src_rq->lock);
6862
6863 if (env->imbalance <= 0)
6864 return 0;
6865
6866 while (!list_empty(tasks)) {
6867 /*
6868 * We don't want to steal all, otherwise we may be treated likewise,
6869 * which could at worst lead to a livelock crash.
6870 */
6871 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6872 break;
6873
6874 p = list_first_entry(tasks, struct task_struct, se.group_node);
6875
6876 env->loop++;
6877 /* We've more or less seen every task there is, call it quits */
6878 if (env->loop > env->loop_max)
6879 break;
6880
6881 /* take a breather every nr_migrate tasks */
6882 if (env->loop > env->loop_break) {
6883 env->loop_break += sched_nr_migrate_break;
6884 env->flags |= LBF_NEED_BREAK;
6885 break;
6886 }
6887
6888 if (!can_migrate_task(p, env))
6889 goto next;
6890
6891 load = task_h_load(p);
6892
6893 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6894 goto next;
6895
6896 if ((load / 2) > env->imbalance)
6897 goto next;
6898
6899 detach_task(p, env);
6900 list_add(&p->se.group_node, &env->tasks);
6901
6902 detached++;
6903 env->imbalance -= load;
6904
6905 #ifdef CONFIG_PREEMPT
6906 /*
6907 * NEWIDLE balancing is a source of latency, so preemptible
6908 * kernels will stop after the first task is detached to minimize
6909 * the critical section.
6910 */
6911 if (env->idle == CPU_NEWLY_IDLE)
6912 break;
6913 #endif
6914
6915 /*
6916 * We only want to steal up to the prescribed amount of
6917 * weighted load.
6918 */
6919 if (env->imbalance <= 0)
6920 break;
6921
6922 continue;
6923 next:
6924 list_move_tail(&p->se.group_node, tasks);
6925 }
6926
6927 /*
6928 * Right now, this is one of only two places we collect this stat
6929 * so we can safely collect detach_one_task() stats here rather
6930 * than inside detach_one_task().
6931 */
6932 schedstat_add(env->sd->lb_gained[env->idle], detached);
6933
6934 return detached;
6935 }
6936
6937 /*
6938 * attach_task() -- attach the task detached by detach_task() to its new rq.
6939 */
6940 static void attach_task(struct rq *rq, struct task_struct *p)
6941 {
6942 lockdep_assert_held(&rq->lock);
6943
6944 BUG_ON(task_rq(p) != rq);
6945 activate_task(rq, p, ENQUEUE_NOCLOCK);
6946 p->on_rq = TASK_ON_RQ_QUEUED;
6947 check_preempt_curr(rq, p, 0);
6948 }
6949
6950 /*
6951 * attach_one_task() -- attaches the task returned from detach_one_task() to
6952 * its new rq.
6953 */
6954 static void attach_one_task(struct rq *rq, struct task_struct *p)
6955 {
6956 struct rq_flags rf;
6957
6958 rq_lock(rq, &rf);
6959 update_rq_clock(rq);
6960 attach_task(rq, p);
6961 rq_unlock(rq, &rf);
6962 }
6963
6964 /*
6965 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6966 * new rq.
6967 */
6968 static void attach_tasks(struct lb_env *env)
6969 {
6970 struct list_head *tasks = &env->tasks;
6971 struct task_struct *p;
6972 struct rq_flags rf;
6973
6974 rq_lock(env->dst_rq, &rf);
6975 update_rq_clock(env->dst_rq);
6976
6977 while (!list_empty(tasks)) {
6978 p = list_first_entry(tasks, struct task_struct, se.group_node);
6979 list_del_init(&p->se.group_node);
6980
6981 attach_task(env->dst_rq, p);
6982 }
6983
6984 rq_unlock(env->dst_rq, &rf);
6985 }
6986
6987 #ifdef CONFIG_FAIR_GROUP_SCHED
6988 static void update_blocked_averages(int cpu)
6989 {
6990 struct rq *rq = cpu_rq(cpu);
6991 struct cfs_rq *cfs_rq;
6992 struct rq_flags rf;
6993
6994 rq_lock_irqsave(rq, &rf);
6995 update_rq_clock(rq);
6996
6997 /*
6998 * Iterates the task_group tree in a bottom up fashion, see
6999 * list_add_leaf_cfs_rq() for details.
7000 */
7001 for_each_leaf_cfs_rq(rq, cfs_rq) {
7002 struct sched_entity *se;
7003
7004 /* throttled entities do not contribute to load */
7005 if (throttled_hierarchy(cfs_rq))
7006 continue;
7007
7008 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
7009 update_tg_load_avg(cfs_rq, 0);
7010
7011 /* Propagate pending load changes to the parent, if any: */
7012 se = cfs_rq->tg->se[cpu];
7013 if (se && !skip_blocked_update(se))
7014 update_load_avg(se, 0);
7015 }
7016 rq_unlock_irqrestore(rq, &rf);
7017 }
7018
7019 /*
7020 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
7021 * This needs to be done in a top-down fashion because the load of a child
7022 * group is a fraction of its parents load.
7023 */
7024 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
7025 {
7026 struct rq *rq = rq_of(cfs_rq);
7027 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
7028 unsigned long now = jiffies;
7029 unsigned long load;
7030
7031 if (cfs_rq->last_h_load_update == now)
7032 return;
7033
7034 cfs_rq->h_load_next = NULL;
7035 for_each_sched_entity(se) {
7036 cfs_rq = cfs_rq_of(se);
7037 cfs_rq->h_load_next = se;
7038 if (cfs_rq->last_h_load_update == now)
7039 break;
7040 }
7041
7042 if (!se) {
7043 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
7044 cfs_rq->last_h_load_update = now;
7045 }
7046
7047 while ((se = cfs_rq->h_load_next) != NULL) {
7048 load = cfs_rq->h_load;
7049 load = div64_ul(load * se->avg.load_avg,
7050 cfs_rq_load_avg(cfs_rq) + 1);
7051 cfs_rq = group_cfs_rq(se);
7052 cfs_rq->h_load = load;
7053 cfs_rq->last_h_load_update = now;
7054 }
7055 }
7056
7057 static unsigned long task_h_load(struct task_struct *p)
7058 {
7059 struct cfs_rq *cfs_rq = task_cfs_rq(p);
7060
7061 update_cfs_rq_h_load(cfs_rq);
7062 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
7063 cfs_rq_load_avg(cfs_rq) + 1);
7064 }
7065 #else
7066 static inline void update_blocked_averages(int cpu)
7067 {
7068 struct rq *rq = cpu_rq(cpu);
7069 struct cfs_rq *cfs_rq = &rq->cfs;
7070 struct rq_flags rf;
7071
7072 rq_lock_irqsave(rq, &rf);
7073 update_rq_clock(rq);
7074 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
7075 rq_unlock_irqrestore(rq, &rf);
7076 }
7077
7078 static unsigned long task_h_load(struct task_struct *p)
7079 {
7080 return p->se.avg.load_avg;
7081 }
7082 #endif
7083
7084 /********** Helpers for find_busiest_group ************************/
7085
7086 enum group_type {
7087 group_other = 0,
7088 group_imbalanced,
7089 group_overloaded,
7090 };
7091
7092 /*
7093 * sg_lb_stats - stats of a sched_group required for load_balancing
7094 */
7095 struct sg_lb_stats {
7096 unsigned long avg_load; /*Avg load across the CPUs of the group */
7097 unsigned long group_load; /* Total load over the CPUs of the group */
7098 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
7099 unsigned long load_per_task;
7100 unsigned long group_capacity;
7101 unsigned long group_util; /* Total utilization of the group */
7102 unsigned int sum_nr_running; /* Nr tasks running in the group */
7103 unsigned int idle_cpus;
7104 unsigned int group_weight;
7105 enum group_type group_type;
7106 int group_no_capacity;
7107 #ifdef CONFIG_NUMA_BALANCING
7108 unsigned int nr_numa_running;
7109 unsigned int nr_preferred_running;
7110 #endif
7111 };
7112
7113 /*
7114 * sd_lb_stats - Structure to store the statistics of a sched_domain
7115 * during load balancing.
7116 */
7117 struct sd_lb_stats {
7118 struct sched_group *busiest; /* Busiest group in this sd */
7119 struct sched_group *local; /* Local group in this sd */
7120 unsigned long total_load; /* Total load of all groups in sd */
7121 unsigned long total_capacity; /* Total capacity of all groups in sd */
7122 unsigned long avg_load; /* Average load across all groups in sd */
7123
7124 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
7125 struct sg_lb_stats local_stat; /* Statistics of the local group */
7126 };
7127
7128 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7129 {
7130 /*
7131 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
7132 * local_stat because update_sg_lb_stats() does a full clear/assignment.
7133 * We must however clear busiest_stat::avg_load because
7134 * update_sd_pick_busiest() reads this before assignment.
7135 */
7136 *sds = (struct sd_lb_stats){
7137 .busiest = NULL,
7138 .local = NULL,
7139 .total_load = 0UL,
7140 .total_capacity = 0UL,
7141 .busiest_stat = {
7142 .avg_load = 0UL,
7143 .sum_nr_running = 0,
7144 .group_type = group_other,
7145 },
7146 };
7147 }
7148
7149 /**
7150 * get_sd_load_idx - Obtain the load index for a given sched domain.
7151 * @sd: The sched_domain whose load_idx is to be obtained.
7152 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
7153 *
7154 * Return: The load index.
7155 */
7156 static inline int get_sd_load_idx(struct sched_domain *sd,
7157 enum cpu_idle_type idle)
7158 {
7159 int load_idx;
7160
7161 switch (idle) {
7162 case CPU_NOT_IDLE:
7163 load_idx = sd->busy_idx;
7164 break;
7165
7166 case CPU_NEWLY_IDLE:
7167 load_idx = sd->newidle_idx;
7168 break;
7169 default:
7170 load_idx = sd->idle_idx;
7171 break;
7172 }
7173
7174 return load_idx;
7175 }
7176
7177 static unsigned long scale_rt_capacity(int cpu)
7178 {
7179 struct rq *rq = cpu_rq(cpu);
7180 u64 total, used, age_stamp, avg;
7181 s64 delta;
7182
7183 /*
7184 * Since we're reading these variables without serialization make sure
7185 * we read them once before doing sanity checks on them.
7186 */
7187 age_stamp = READ_ONCE(rq->age_stamp);
7188 avg = READ_ONCE(rq->rt_avg);
7189 delta = __rq_clock_broken(rq) - age_stamp;
7190
7191 if (unlikely(delta < 0))
7192 delta = 0;
7193
7194 total = sched_avg_period() + delta;
7195
7196 used = div_u64(avg, total);
7197
7198 if (likely(used < SCHED_CAPACITY_SCALE))
7199 return SCHED_CAPACITY_SCALE - used;
7200
7201 return 1;
7202 }
7203
7204 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
7205 {
7206 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
7207 struct sched_group *sdg = sd->groups;
7208
7209 cpu_rq(cpu)->cpu_capacity_orig = capacity;
7210
7211 capacity *= scale_rt_capacity(cpu);
7212 capacity >>= SCHED_CAPACITY_SHIFT;
7213
7214 if (!capacity)
7215 capacity = 1;
7216
7217 cpu_rq(cpu)->cpu_capacity = capacity;
7218 sdg->sgc->capacity = capacity;
7219 sdg->sgc->min_capacity = capacity;
7220 }
7221
7222 void update_group_capacity(struct sched_domain *sd, int cpu)
7223 {
7224 struct sched_domain *child = sd->child;
7225 struct sched_group *group, *sdg = sd->groups;
7226 unsigned long capacity, min_capacity;
7227 unsigned long interval;
7228
7229 interval = msecs_to_jiffies(sd->balance_interval);
7230 interval = clamp(interval, 1UL, max_load_balance_interval);
7231 sdg->sgc->next_update = jiffies + interval;
7232
7233 if (!child) {
7234 update_cpu_capacity(sd, cpu);
7235 return;
7236 }
7237
7238 capacity = 0;
7239 min_capacity = ULONG_MAX;
7240
7241 if (child->flags & SD_OVERLAP) {
7242 /*
7243 * SD_OVERLAP domains cannot assume that child groups
7244 * span the current group.
7245 */
7246
7247 for_each_cpu(cpu, sched_group_cpus(sdg)) {
7248 struct sched_group_capacity *sgc;
7249 struct rq *rq = cpu_rq(cpu);
7250
7251 /*
7252 * build_sched_domains() -> init_sched_groups_capacity()
7253 * gets here before we've attached the domains to the
7254 * runqueues.
7255 *
7256 * Use capacity_of(), which is set irrespective of domains
7257 * in update_cpu_capacity().
7258 *
7259 * This avoids capacity from being 0 and
7260 * causing divide-by-zero issues on boot.
7261 */
7262 if (unlikely(!rq->sd)) {
7263 capacity += capacity_of(cpu);
7264 } else {
7265 sgc = rq->sd->groups->sgc;
7266 capacity += sgc->capacity;
7267 }
7268
7269 min_capacity = min(capacity, min_capacity);
7270 }
7271 } else {
7272 /*
7273 * !SD_OVERLAP domains can assume that child groups
7274 * span the current group.
7275 */
7276
7277 group = child->groups;
7278 do {
7279 struct sched_group_capacity *sgc = group->sgc;
7280
7281 capacity += sgc->capacity;
7282 min_capacity = min(sgc->min_capacity, min_capacity);
7283 group = group->next;
7284 } while (group != child->groups);
7285 }
7286
7287 sdg->sgc->capacity = capacity;
7288 sdg->sgc->min_capacity = min_capacity;
7289 }
7290
7291 /*
7292 * Check whether the capacity of the rq has been noticeably reduced by side
7293 * activity. The imbalance_pct is used for the threshold.
7294 * Return true is the capacity is reduced
7295 */
7296 static inline int
7297 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
7298 {
7299 return ((rq->cpu_capacity * sd->imbalance_pct) <
7300 (rq->cpu_capacity_orig * 100));
7301 }
7302
7303 /*
7304 * Group imbalance indicates (and tries to solve) the problem where balancing
7305 * groups is inadequate due to ->cpus_allowed constraints.
7306 *
7307 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
7308 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
7309 * Something like:
7310 *
7311 * { 0 1 2 3 } { 4 5 6 7 }
7312 * * * * *
7313 *
7314 * If we were to balance group-wise we'd place two tasks in the first group and
7315 * two tasks in the second group. Clearly this is undesired as it will overload
7316 * cpu 3 and leave one of the cpus in the second group unused.
7317 *
7318 * The current solution to this issue is detecting the skew in the first group
7319 * by noticing the lower domain failed to reach balance and had difficulty
7320 * moving tasks due to affinity constraints.
7321 *
7322 * When this is so detected; this group becomes a candidate for busiest; see
7323 * update_sd_pick_busiest(). And calculate_imbalance() and
7324 * find_busiest_group() avoid some of the usual balance conditions to allow it
7325 * to create an effective group imbalance.
7326 *
7327 * This is a somewhat tricky proposition since the next run might not find the
7328 * group imbalance and decide the groups need to be balanced again. A most
7329 * subtle and fragile situation.
7330 */
7331
7332 static inline int sg_imbalanced(struct sched_group *group)
7333 {
7334 return group->sgc->imbalance;
7335 }
7336
7337 /*
7338 * group_has_capacity returns true if the group has spare capacity that could
7339 * be used by some tasks.
7340 * We consider that a group has spare capacity if the * number of task is
7341 * smaller than the number of CPUs or if the utilization is lower than the
7342 * available capacity for CFS tasks.
7343 * For the latter, we use a threshold to stabilize the state, to take into
7344 * account the variance of the tasks' load and to return true if the available
7345 * capacity in meaningful for the load balancer.
7346 * As an example, an available capacity of 1% can appear but it doesn't make
7347 * any benefit for the load balance.
7348 */
7349 static inline bool
7350 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
7351 {
7352 if (sgs->sum_nr_running < sgs->group_weight)
7353 return true;
7354
7355 if ((sgs->group_capacity * 100) >
7356 (sgs->group_util * env->sd->imbalance_pct))
7357 return true;
7358
7359 return false;
7360 }
7361
7362 /*
7363 * group_is_overloaded returns true if the group has more tasks than it can
7364 * handle.
7365 * group_is_overloaded is not equals to !group_has_capacity because a group
7366 * with the exact right number of tasks, has no more spare capacity but is not
7367 * overloaded so both group_has_capacity and group_is_overloaded return
7368 * false.
7369 */
7370 static inline bool
7371 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
7372 {
7373 if (sgs->sum_nr_running <= sgs->group_weight)
7374 return false;
7375
7376 if ((sgs->group_capacity * 100) <
7377 (sgs->group_util * env->sd->imbalance_pct))
7378 return true;
7379
7380 return false;
7381 }
7382
7383 /*
7384 * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
7385 * per-CPU capacity than sched_group ref.
7386 */
7387 static inline bool
7388 group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
7389 {
7390 return sg->sgc->min_capacity * capacity_margin <
7391 ref->sgc->min_capacity * 1024;
7392 }
7393
7394 static inline enum
7395 group_type group_classify(struct sched_group *group,
7396 struct sg_lb_stats *sgs)
7397 {
7398 if (sgs->group_no_capacity)
7399 return group_overloaded;
7400
7401 if (sg_imbalanced(group))
7402 return group_imbalanced;
7403
7404 return group_other;
7405 }
7406
7407 /**
7408 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
7409 * @env: The load balancing environment.
7410 * @group: sched_group whose statistics are to be updated.
7411 * @load_idx: Load index of sched_domain of this_cpu for load calc.
7412 * @local_group: Does group contain this_cpu.
7413 * @sgs: variable to hold the statistics for this group.
7414 * @overload: Indicate more than one runnable task for any CPU.
7415 */
7416 static inline void update_sg_lb_stats(struct lb_env *env,
7417 struct sched_group *group, int load_idx,
7418 int local_group, struct sg_lb_stats *sgs,
7419 bool *overload)
7420 {
7421 unsigned long load;
7422 int i, nr_running;
7423
7424 memset(sgs, 0, sizeof(*sgs));
7425
7426 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7427 struct rq *rq = cpu_rq(i);
7428
7429 /* Bias balancing toward cpus of our domain */
7430 if (local_group)
7431 load = target_load(i, load_idx);
7432 else
7433 load = source_load(i, load_idx);
7434
7435 sgs->group_load += load;
7436 sgs->group_util += cpu_util(i);
7437 sgs->sum_nr_running += rq->cfs.h_nr_running;
7438
7439 nr_running = rq->nr_running;
7440 if (nr_running > 1)
7441 *overload = true;
7442
7443 #ifdef CONFIG_NUMA_BALANCING
7444 sgs->nr_numa_running += rq->nr_numa_running;
7445 sgs->nr_preferred_running += rq->nr_preferred_running;
7446 #endif
7447 sgs->sum_weighted_load += weighted_cpuload(i);
7448 /*
7449 * No need to call idle_cpu() if nr_running is not 0
7450 */
7451 if (!nr_running && idle_cpu(i))
7452 sgs->idle_cpus++;
7453 }
7454
7455 /* Adjust by relative CPU capacity of the group */
7456 sgs->group_capacity = group->sgc->capacity;
7457 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
7458
7459 if (sgs->sum_nr_running)
7460 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
7461
7462 sgs->group_weight = group->group_weight;
7463
7464 sgs->group_no_capacity = group_is_overloaded(env, sgs);
7465 sgs->group_type = group_classify(group, sgs);
7466 }
7467
7468 /**
7469 * update_sd_pick_busiest - return 1 on busiest group
7470 * @env: The load balancing environment.
7471 * @sds: sched_domain statistics
7472 * @sg: sched_group candidate to be checked for being the busiest
7473 * @sgs: sched_group statistics
7474 *
7475 * Determine if @sg is a busier group than the previously selected
7476 * busiest group.
7477 *
7478 * Return: %true if @sg is a busier group than the previously selected
7479 * busiest group. %false otherwise.
7480 */
7481 static bool update_sd_pick_busiest(struct lb_env *env,
7482 struct sd_lb_stats *sds,
7483 struct sched_group *sg,
7484 struct sg_lb_stats *sgs)
7485 {
7486 struct sg_lb_stats *busiest = &sds->busiest_stat;
7487
7488 if (sgs->group_type > busiest->group_type)
7489 return true;
7490
7491 if (sgs->group_type < busiest->group_type)
7492 return false;
7493
7494 if (sgs->avg_load <= busiest->avg_load)
7495 return false;
7496
7497 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
7498 goto asym_packing;
7499
7500 /*
7501 * Candidate sg has no more than one task per CPU and
7502 * has higher per-CPU capacity. Migrating tasks to less
7503 * capable CPUs may harm throughput. Maximize throughput,
7504 * power/energy consequences are not considered.
7505 */
7506 if (sgs->sum_nr_running <= sgs->group_weight &&
7507 group_smaller_cpu_capacity(sds->local, sg))
7508 return false;
7509
7510 asym_packing:
7511 /* This is the busiest node in its class. */
7512 if (!(env->sd->flags & SD_ASYM_PACKING))
7513 return true;
7514
7515 /* No ASYM_PACKING if target cpu is already busy */
7516 if (env->idle == CPU_NOT_IDLE)
7517 return true;
7518 /*
7519 * ASYM_PACKING needs to move all the work to the highest
7520 * prority CPUs in the group, therefore mark all groups
7521 * of lower priority than ourself as busy.
7522 */
7523 if (sgs->sum_nr_running &&
7524 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
7525 if (!sds->busiest)
7526 return true;
7527
7528 /* Prefer to move from lowest priority cpu's work */
7529 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
7530 sg->asym_prefer_cpu))
7531 return true;
7532 }
7533
7534 return false;
7535 }
7536
7537 #ifdef CONFIG_NUMA_BALANCING
7538 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7539 {
7540 if (sgs->sum_nr_running > sgs->nr_numa_running)
7541 return regular;
7542 if (sgs->sum_nr_running > sgs->nr_preferred_running)
7543 return remote;
7544 return all;
7545 }
7546
7547 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7548 {
7549 if (rq->nr_running > rq->nr_numa_running)
7550 return regular;
7551 if (rq->nr_running > rq->nr_preferred_running)
7552 return remote;
7553 return all;
7554 }
7555 #else
7556 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7557 {
7558 return all;
7559 }
7560
7561 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7562 {
7563 return regular;
7564 }
7565 #endif /* CONFIG_NUMA_BALANCING */
7566
7567 /**
7568 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
7569 * @env: The load balancing environment.
7570 * @sds: variable to hold the statistics for this sched_domain.
7571 */
7572 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7573 {
7574 struct sched_domain *child = env->sd->child;
7575 struct sched_group *sg = env->sd->groups;
7576 struct sg_lb_stats *local = &sds->local_stat;
7577 struct sg_lb_stats tmp_sgs;
7578 int load_idx, prefer_sibling = 0;
7579 bool overload = false;
7580
7581 if (child && child->flags & SD_PREFER_SIBLING)
7582 prefer_sibling = 1;
7583
7584 load_idx = get_sd_load_idx(env->sd, env->idle);
7585
7586 do {
7587 struct sg_lb_stats *sgs = &tmp_sgs;
7588 int local_group;
7589
7590 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
7591 if (local_group) {
7592 sds->local = sg;
7593 sgs = local;
7594
7595 if (env->idle != CPU_NEWLY_IDLE ||
7596 time_after_eq(jiffies, sg->sgc->next_update))
7597 update_group_capacity(env->sd, env->dst_cpu);
7598 }
7599
7600 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7601 &overload);
7602
7603 if (local_group)
7604 goto next_group;
7605
7606 /*
7607 * In case the child domain prefers tasks go to siblings
7608 * first, lower the sg capacity so that we'll try
7609 * and move all the excess tasks away. We lower the capacity
7610 * of a group only if the local group has the capacity to fit
7611 * these excess tasks. The extra check prevents the case where
7612 * you always pull from the heaviest group when it is already
7613 * under-utilized (possible with a large weight task outweighs
7614 * the tasks on the system).
7615 */
7616 if (prefer_sibling && sds->local &&
7617 group_has_capacity(env, local) &&
7618 (sgs->sum_nr_running > local->sum_nr_running + 1)) {
7619 sgs->group_no_capacity = 1;
7620 sgs->group_type = group_classify(sg, sgs);
7621 }
7622
7623 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
7624 sds->busiest = sg;
7625 sds->busiest_stat = *sgs;
7626 }
7627
7628 next_group:
7629 /* Now, start updating sd_lb_stats */
7630 sds->total_load += sgs->group_load;
7631 sds->total_capacity += sgs->group_capacity;
7632
7633 sg = sg->next;
7634 } while (sg != env->sd->groups);
7635
7636 if (env->sd->flags & SD_NUMA)
7637 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
7638
7639 if (!env->sd->parent) {
7640 /* update overload indicator if we are at root domain */
7641 if (env->dst_rq->rd->overload != overload)
7642 env->dst_rq->rd->overload = overload;
7643 }
7644
7645 }
7646
7647 /**
7648 * check_asym_packing - Check to see if the group is packed into the
7649 * sched doman.
7650 *
7651 * This is primarily intended to used at the sibling level. Some
7652 * cores like POWER7 prefer to use lower numbered SMT threads. In the
7653 * case of POWER7, it can move to lower SMT modes only when higher
7654 * threads are idle. When in lower SMT modes, the threads will
7655 * perform better since they share less core resources. Hence when we
7656 * have idle threads, we want them to be the higher ones.
7657 *
7658 * This packing function is run on idle threads. It checks to see if
7659 * the busiest CPU in this domain (core in the P7 case) has a higher
7660 * CPU number than the packing function is being run on. Here we are
7661 * assuming lower CPU number will be equivalent to lower a SMT thread
7662 * number.
7663 *
7664 * Return: 1 when packing is required and a task should be moved to
7665 * this CPU. The amount of the imbalance is returned in *imbalance.
7666 *
7667 * @env: The load balancing environment.
7668 * @sds: Statistics of the sched_domain which is to be packed
7669 */
7670 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
7671 {
7672 int busiest_cpu;
7673
7674 if (!(env->sd->flags & SD_ASYM_PACKING))
7675 return 0;
7676
7677 if (env->idle == CPU_NOT_IDLE)
7678 return 0;
7679
7680 if (!sds->busiest)
7681 return 0;
7682
7683 busiest_cpu = sds->busiest->asym_prefer_cpu;
7684 if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
7685 return 0;
7686
7687 env->imbalance = DIV_ROUND_CLOSEST(
7688 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
7689 SCHED_CAPACITY_SCALE);
7690
7691 return 1;
7692 }
7693
7694 /**
7695 * fix_small_imbalance - Calculate the minor imbalance that exists
7696 * amongst the groups of a sched_domain, during
7697 * load balancing.
7698 * @env: The load balancing environment.
7699 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
7700 */
7701 static inline
7702 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7703 {
7704 unsigned long tmp, capa_now = 0, capa_move = 0;
7705 unsigned int imbn = 2;
7706 unsigned long scaled_busy_load_per_task;
7707 struct sg_lb_stats *local, *busiest;
7708
7709 local = &sds->local_stat;
7710 busiest = &sds->busiest_stat;
7711
7712 if (!local->sum_nr_running)
7713 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7714 else if (busiest->load_per_task > local->load_per_task)
7715 imbn = 1;
7716
7717 scaled_busy_load_per_task =
7718 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7719 busiest->group_capacity;
7720
7721 if (busiest->avg_load + scaled_busy_load_per_task >=
7722 local->avg_load + (scaled_busy_load_per_task * imbn)) {
7723 env->imbalance = busiest->load_per_task;
7724 return;
7725 }
7726
7727 /*
7728 * OK, we don't have enough imbalance to justify moving tasks,
7729 * however we may be able to increase total CPU capacity used by
7730 * moving them.
7731 */
7732
7733 capa_now += busiest->group_capacity *
7734 min(busiest->load_per_task, busiest->avg_load);
7735 capa_now += local->group_capacity *
7736 min(local->load_per_task, local->avg_load);
7737 capa_now /= SCHED_CAPACITY_SCALE;
7738
7739 /* Amount of load we'd subtract */
7740 if (busiest->avg_load > scaled_busy_load_per_task) {
7741 capa_move += busiest->group_capacity *
7742 min(busiest->load_per_task,
7743 busiest->avg_load - scaled_busy_load_per_task);
7744 }
7745
7746 /* Amount of load we'd add */
7747 if (busiest->avg_load * busiest->group_capacity <
7748 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
7749 tmp = (busiest->avg_load * busiest->group_capacity) /
7750 local->group_capacity;
7751 } else {
7752 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7753 local->group_capacity;
7754 }
7755 capa_move += local->group_capacity *
7756 min(local->load_per_task, local->avg_load + tmp);
7757 capa_move /= SCHED_CAPACITY_SCALE;
7758
7759 /* Move if we gain throughput */
7760 if (capa_move > capa_now)
7761 env->imbalance = busiest->load_per_task;
7762 }
7763
7764 /**
7765 * calculate_imbalance - Calculate the amount of imbalance present within the
7766 * groups of a given sched_domain during load balance.
7767 * @env: load balance environment
7768 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
7769 */
7770 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7771 {
7772 unsigned long max_pull, load_above_capacity = ~0UL;
7773 struct sg_lb_stats *local, *busiest;
7774
7775 local = &sds->local_stat;
7776 busiest = &sds->busiest_stat;
7777
7778 if (busiest->group_type == group_imbalanced) {
7779 /*
7780 * In the group_imb case we cannot rely on group-wide averages
7781 * to ensure cpu-load equilibrium, look at wider averages. XXX
7782 */
7783 busiest->load_per_task =
7784 min(busiest->load_per_task, sds->avg_load);
7785 }
7786
7787 /*
7788 * Avg load of busiest sg can be less and avg load of local sg can
7789 * be greater than avg load across all sgs of sd because avg load
7790 * factors in sg capacity and sgs with smaller group_type are
7791 * skipped when updating the busiest sg:
7792 */
7793 if (busiest->avg_load <= sds->avg_load ||
7794 local->avg_load >= sds->avg_load) {
7795 env->imbalance = 0;
7796 return fix_small_imbalance(env, sds);
7797 }
7798
7799 /*
7800 * If there aren't any idle cpus, avoid creating some.
7801 */
7802 if (busiest->group_type == group_overloaded &&
7803 local->group_type == group_overloaded) {
7804 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
7805 if (load_above_capacity > busiest->group_capacity) {
7806 load_above_capacity -= busiest->group_capacity;
7807 load_above_capacity *= scale_load_down(NICE_0_LOAD);
7808 load_above_capacity /= busiest->group_capacity;
7809 } else
7810 load_above_capacity = ~0UL;
7811 }
7812
7813 /*
7814 * We're trying to get all the cpus to the average_load, so we don't
7815 * want to push ourselves above the average load, nor do we wish to
7816 * reduce the max loaded cpu below the average load. At the same time,
7817 * we also don't want to reduce the group load below the group
7818 * capacity. Thus we look for the minimum possible imbalance.
7819 */
7820 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
7821
7822 /* How much load to actually move to equalise the imbalance */
7823 env->imbalance = min(
7824 max_pull * busiest->group_capacity,
7825 (sds->avg_load - local->avg_load) * local->group_capacity
7826 ) / SCHED_CAPACITY_SCALE;
7827
7828 /*
7829 * if *imbalance is less than the average load per runnable task
7830 * there is no guarantee that any tasks will be moved so we'll have
7831 * a think about bumping its value to force at least one task to be
7832 * moved
7833 */
7834 if (env->imbalance < busiest->load_per_task)
7835 return fix_small_imbalance(env, sds);
7836 }
7837
7838 /******* find_busiest_group() helpers end here *********************/
7839
7840 /**
7841 * find_busiest_group - Returns the busiest group within the sched_domain
7842 * if there is an imbalance.
7843 *
7844 * Also calculates the amount of weighted load which should be moved
7845 * to restore balance.
7846 *
7847 * @env: The load balancing environment.
7848 *
7849 * Return: - The busiest group if imbalance exists.
7850 */
7851 static struct sched_group *find_busiest_group(struct lb_env *env)
7852 {
7853 struct sg_lb_stats *local, *busiest;
7854 struct sd_lb_stats sds;
7855
7856 init_sd_lb_stats(&sds);
7857
7858 /*
7859 * Compute the various statistics relavent for load balancing at
7860 * this level.
7861 */
7862 update_sd_lb_stats(env, &sds);
7863 local = &sds.local_stat;
7864 busiest = &sds.busiest_stat;
7865
7866 /* ASYM feature bypasses nice load balance check */
7867 if (check_asym_packing(env, &sds))
7868 return sds.busiest;
7869
7870 /* There is no busy sibling group to pull tasks from */
7871 if (!sds.busiest || busiest->sum_nr_running == 0)
7872 goto out_balanced;
7873
7874 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7875 / sds.total_capacity;
7876
7877 /*
7878 * If the busiest group is imbalanced the below checks don't
7879 * work because they assume all things are equal, which typically
7880 * isn't true due to cpus_allowed constraints and the like.
7881 */
7882 if (busiest->group_type == group_imbalanced)
7883 goto force_balance;
7884
7885 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
7886 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7887 busiest->group_no_capacity)
7888 goto force_balance;
7889
7890 /*
7891 * If the local group is busier than the selected busiest group
7892 * don't try and pull any tasks.
7893 */
7894 if (local->avg_load >= busiest->avg_load)
7895 goto out_balanced;
7896
7897 /*
7898 * Don't pull any tasks if this group is already above the domain
7899 * average load.
7900 */
7901 if (local->avg_load >= sds.avg_load)
7902 goto out_balanced;
7903
7904 if (env->idle == CPU_IDLE) {
7905 /*
7906 * This cpu is idle. If the busiest group is not overloaded
7907 * and there is no imbalance between this and busiest group
7908 * wrt idle cpus, it is balanced. The imbalance becomes
7909 * significant if the diff is greater than 1 otherwise we
7910 * might end up to just move the imbalance on another group
7911 */
7912 if ((busiest->group_type != group_overloaded) &&
7913 (local->idle_cpus <= (busiest->idle_cpus + 1)))
7914 goto out_balanced;
7915 } else {
7916 /*
7917 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7918 * imbalance_pct to be conservative.
7919 */
7920 if (100 * busiest->avg_load <=
7921 env->sd->imbalance_pct * local->avg_load)
7922 goto out_balanced;
7923 }
7924
7925 force_balance:
7926 /* Looks like there is an imbalance. Compute it */
7927 calculate_imbalance(env, &sds);
7928 return sds.busiest;
7929
7930 out_balanced:
7931 env->imbalance = 0;
7932 return NULL;
7933 }
7934
7935 /*
7936 * find_busiest_queue - find the busiest runqueue among the cpus in group.
7937 */
7938 static struct rq *find_busiest_queue(struct lb_env *env,
7939 struct sched_group *group)
7940 {
7941 struct rq *busiest = NULL, *rq;
7942 unsigned long busiest_load = 0, busiest_capacity = 1;
7943 int i;
7944
7945 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7946 unsigned long capacity, wl;
7947 enum fbq_type rt;
7948
7949 rq = cpu_rq(i);
7950 rt = fbq_classify_rq(rq);
7951
7952 /*
7953 * We classify groups/runqueues into three groups:
7954 * - regular: there are !numa tasks
7955 * - remote: there are numa tasks that run on the 'wrong' node
7956 * - all: there is no distinction
7957 *
7958 * In order to avoid migrating ideally placed numa tasks,
7959 * ignore those when there's better options.
7960 *
7961 * If we ignore the actual busiest queue to migrate another
7962 * task, the next balance pass can still reduce the busiest
7963 * queue by moving tasks around inside the node.
7964 *
7965 * If we cannot move enough load due to this classification
7966 * the next pass will adjust the group classification and
7967 * allow migration of more tasks.
7968 *
7969 * Both cases only affect the total convergence complexity.
7970 */
7971 if (rt > env->fbq_type)
7972 continue;
7973
7974 capacity = capacity_of(i);
7975
7976 wl = weighted_cpuload(i);
7977
7978 /*
7979 * When comparing with imbalance, use weighted_cpuload()
7980 * which is not scaled with the cpu capacity.
7981 */
7982
7983 if (rq->nr_running == 1 && wl > env->imbalance &&
7984 !check_cpu_capacity(rq, env->sd))
7985 continue;
7986
7987 /*
7988 * For the load comparisons with the other cpu's, consider
7989 * the weighted_cpuload() scaled with the cpu capacity, so
7990 * that the load can be moved away from the cpu that is
7991 * potentially running at a lower capacity.
7992 *
7993 * Thus we're looking for max(wl_i / capacity_i), crosswise
7994 * multiplication to rid ourselves of the division works out
7995 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
7996 * our previous maximum.
7997 */
7998 if (wl * busiest_capacity > busiest_load * capacity) {
7999 busiest_load = wl;
8000 busiest_capacity = capacity;
8001 busiest = rq;
8002 }
8003 }
8004
8005 return busiest;
8006 }
8007
8008 /*
8009 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
8010 * so long as it is large enough.
8011 */
8012 #define MAX_PINNED_INTERVAL 512
8013
8014 static int need_active_balance(struct lb_env *env)
8015 {
8016 struct sched_domain *sd = env->sd;
8017
8018 if (env->idle == CPU_NEWLY_IDLE) {
8019
8020 /*
8021 * ASYM_PACKING needs to force migrate tasks from busy but
8022 * lower priority CPUs in order to pack all tasks in the
8023 * highest priority CPUs.
8024 */
8025 if ((sd->flags & SD_ASYM_PACKING) &&
8026 sched_asym_prefer(env->dst_cpu, env->src_cpu))
8027 return 1;
8028 }
8029
8030 /*
8031 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
8032 * It's worth migrating the task if the src_cpu's capacity is reduced
8033 * because of other sched_class or IRQs if more capacity stays
8034 * available on dst_cpu.
8035 */
8036 if ((env->idle != CPU_NOT_IDLE) &&
8037 (env->src_rq->cfs.h_nr_running == 1)) {
8038 if ((check_cpu_capacity(env->src_rq, sd)) &&
8039 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
8040 return 1;
8041 }
8042
8043 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
8044 }
8045
8046 static int active_load_balance_cpu_stop(void *data);
8047
8048 static int should_we_balance(struct lb_env *env)
8049 {
8050 struct sched_group *sg = env->sd->groups;
8051 struct cpumask *sg_cpus, *sg_mask;
8052 int cpu, balance_cpu = -1;
8053
8054 /*
8055 * In the newly idle case, we will allow all the cpu's
8056 * to do the newly idle load balance.
8057 */
8058 if (env->idle == CPU_NEWLY_IDLE)
8059 return 1;
8060
8061 sg_cpus = sched_group_cpus(sg);
8062 sg_mask = sched_group_mask(sg);
8063 /* Try to find first idle cpu */
8064 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
8065 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
8066 continue;
8067
8068 balance_cpu = cpu;
8069 break;
8070 }
8071
8072 if (balance_cpu == -1)
8073 balance_cpu = group_balance_cpu(sg);
8074
8075 /*
8076 * First idle cpu or the first cpu(busiest) in this sched group
8077 * is eligible for doing load balancing at this and above domains.
8078 */
8079 return balance_cpu == env->dst_cpu;
8080 }
8081
8082 /*
8083 * Check this_cpu to ensure it is balanced within domain. Attempt to move
8084 * tasks if there is an imbalance.
8085 */
8086 static int load_balance(int this_cpu, struct rq *this_rq,
8087 struct sched_domain *sd, enum cpu_idle_type idle,
8088 int *continue_balancing)
8089 {
8090 int ld_moved, cur_ld_moved, active_balance = 0;
8091 struct sched_domain *sd_parent = sd->parent;
8092 struct sched_group *group;
8093 struct rq *busiest;
8094 struct rq_flags rf;
8095 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
8096
8097 struct lb_env env = {
8098 .sd = sd,
8099 .dst_cpu = this_cpu,
8100 .dst_rq = this_rq,
8101 .dst_grpmask = sched_group_cpus(sd->groups),
8102 .idle = idle,
8103 .loop_break = sched_nr_migrate_break,
8104 .cpus = cpus,
8105 .fbq_type = all,
8106 .tasks = LIST_HEAD_INIT(env.tasks),
8107 };
8108
8109 /*
8110 * For NEWLY_IDLE load_balancing, we don't need to consider
8111 * other cpus in our group
8112 */
8113 if (idle == CPU_NEWLY_IDLE)
8114 env.dst_grpmask = NULL;
8115
8116 cpumask_copy(cpus, cpu_active_mask);
8117
8118 schedstat_inc(sd->lb_count[idle]);
8119
8120 redo:
8121 if (!should_we_balance(&env)) {
8122 *continue_balancing = 0;
8123 goto out_balanced;
8124 }
8125
8126 group = find_busiest_group(&env);
8127 if (!group) {
8128 schedstat_inc(sd->lb_nobusyg[idle]);
8129 goto out_balanced;
8130 }
8131
8132 busiest = find_busiest_queue(&env, group);
8133 if (!busiest) {
8134 schedstat_inc(sd->lb_nobusyq[idle]);
8135 goto out_balanced;
8136 }
8137
8138 BUG_ON(busiest == env.dst_rq);
8139
8140 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
8141
8142 env.src_cpu = busiest->cpu;
8143 env.src_rq = busiest;
8144
8145 ld_moved = 0;
8146 if (busiest->nr_running > 1) {
8147 /*
8148 * Attempt to move tasks. If find_busiest_group has found
8149 * an imbalance but busiest->nr_running <= 1, the group is
8150 * still unbalanced. ld_moved simply stays zero, so it is
8151 * correctly treated as an imbalance.
8152 */
8153 env.flags |= LBF_ALL_PINNED;
8154 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8155
8156 more_balance:
8157 rq_lock_irqsave(busiest, &rf);
8158 update_rq_clock(busiest);
8159
8160 /*
8161 * cur_ld_moved - load moved in current iteration
8162 * ld_moved - cumulative load moved across iterations
8163 */
8164 cur_ld_moved = detach_tasks(&env);
8165
8166 /*
8167 * We've detached some tasks from busiest_rq. Every
8168 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
8169 * unlock busiest->lock, and we are able to be sure
8170 * that nobody can manipulate the tasks in parallel.
8171 * See task_rq_lock() family for the details.
8172 */
8173
8174 rq_unlock(busiest, &rf);
8175
8176 if (cur_ld_moved) {
8177 attach_tasks(&env);
8178 ld_moved += cur_ld_moved;
8179 }
8180
8181 local_irq_restore(rf.flags);
8182
8183 if (env.flags & LBF_NEED_BREAK) {
8184 env.flags &= ~LBF_NEED_BREAK;
8185 goto more_balance;
8186 }
8187
8188 /*
8189 * Revisit (affine) tasks on src_cpu that couldn't be moved to
8190 * us and move them to an alternate dst_cpu in our sched_group
8191 * where they can run. The upper limit on how many times we
8192 * iterate on same src_cpu is dependent on number of cpus in our
8193 * sched_group.
8194 *
8195 * This changes load balance semantics a bit on who can move
8196 * load to a given_cpu. In addition to the given_cpu itself
8197 * (or a ilb_cpu acting on its behalf where given_cpu is
8198 * nohz-idle), we now have balance_cpu in a position to move
8199 * load to given_cpu. In rare situations, this may cause
8200 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
8201 * _independently_ and at _same_ time to move some load to
8202 * given_cpu) causing exceess load to be moved to given_cpu.
8203 * This however should not happen so much in practice and
8204 * moreover subsequent load balance cycles should correct the
8205 * excess load moved.
8206 */
8207 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
8208
8209 /* Prevent to re-select dst_cpu via env's cpus */
8210 cpumask_clear_cpu(env.dst_cpu, env.cpus);
8211
8212 env.dst_rq = cpu_rq(env.new_dst_cpu);
8213 env.dst_cpu = env.new_dst_cpu;
8214 env.flags &= ~LBF_DST_PINNED;
8215 env.loop = 0;
8216 env.loop_break = sched_nr_migrate_break;
8217
8218 /*
8219 * Go back to "more_balance" rather than "redo" since we
8220 * need to continue with same src_cpu.
8221 */
8222 goto more_balance;
8223 }
8224
8225 /*
8226 * We failed to reach balance because of affinity.
8227 */
8228 if (sd_parent) {
8229 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8230
8231 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
8232 *group_imbalance = 1;
8233 }
8234
8235 /* All tasks on this runqueue were pinned by CPU affinity */
8236 if (unlikely(env.flags & LBF_ALL_PINNED)) {
8237 cpumask_clear_cpu(cpu_of(busiest), cpus);
8238 if (!cpumask_empty(cpus)) {
8239 env.loop = 0;
8240 env.loop_break = sched_nr_migrate_break;
8241 goto redo;
8242 }
8243 goto out_all_pinned;
8244 }
8245 }
8246
8247 if (!ld_moved) {
8248 schedstat_inc(sd->lb_failed[idle]);
8249 /*
8250 * Increment the failure counter only on periodic balance.
8251 * We do not want newidle balance, which can be very
8252 * frequent, pollute the failure counter causing
8253 * excessive cache_hot migrations and active balances.
8254 */
8255 if (idle != CPU_NEWLY_IDLE)
8256 sd->nr_balance_failed++;
8257
8258 if (need_active_balance(&env)) {
8259 unsigned long flags;
8260
8261 raw_spin_lock_irqsave(&busiest->lock, flags);
8262
8263 /* don't kick the active_load_balance_cpu_stop,
8264 * if the curr task on busiest cpu can't be
8265 * moved to this_cpu
8266 */
8267 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
8268 raw_spin_unlock_irqrestore(&busiest->lock,
8269 flags);
8270 env.flags |= LBF_ALL_PINNED;
8271 goto out_one_pinned;
8272 }
8273
8274 /*
8275 * ->active_balance synchronizes accesses to
8276 * ->active_balance_work. Once set, it's cleared
8277 * only after active load balance is finished.
8278 */
8279 if (!busiest->active_balance) {
8280 busiest->active_balance = 1;
8281 busiest->push_cpu = this_cpu;
8282 active_balance = 1;
8283 }
8284 raw_spin_unlock_irqrestore(&busiest->lock, flags);
8285
8286 if (active_balance) {
8287 stop_one_cpu_nowait(cpu_of(busiest),
8288 active_load_balance_cpu_stop, busiest,
8289 &busiest->active_balance_work);
8290 }
8291
8292 /* We've kicked active balancing, force task migration. */
8293 sd->nr_balance_failed = sd->cache_nice_tries+1;
8294 }
8295 } else
8296 sd->nr_balance_failed = 0;
8297
8298 if (likely(!active_balance)) {
8299 /* We were unbalanced, so reset the balancing interval */
8300 sd->balance_interval = sd->min_interval;
8301 } else {
8302 /*
8303 * If we've begun active balancing, start to back off. This
8304 * case may not be covered by the all_pinned logic if there
8305 * is only 1 task on the busy runqueue (because we don't call
8306 * detach_tasks).
8307 */
8308 if (sd->balance_interval < sd->max_interval)
8309 sd->balance_interval *= 2;
8310 }
8311
8312 goto out;
8313
8314 out_balanced:
8315 /*
8316 * We reach balance although we may have faced some affinity
8317 * constraints. Clear the imbalance flag if it was set.
8318 */
8319 if (sd_parent) {
8320 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8321
8322 if (*group_imbalance)
8323 *group_imbalance = 0;
8324 }
8325
8326 out_all_pinned:
8327 /*
8328 * We reach balance because all tasks are pinned at this level so
8329 * we can't migrate them. Let the imbalance flag set so parent level
8330 * can try to migrate them.
8331 */
8332 schedstat_inc(sd->lb_balanced[idle]);
8333
8334 sd->nr_balance_failed = 0;
8335
8336 out_one_pinned:
8337 /* tune up the balancing interval */
8338 if (((env.flags & LBF_ALL_PINNED) &&
8339 sd->balance_interval < MAX_PINNED_INTERVAL) ||
8340 (sd->balance_interval < sd->max_interval))
8341 sd->balance_interval *= 2;
8342
8343 ld_moved = 0;
8344 out:
8345 return ld_moved;
8346 }
8347
8348 static inline unsigned long
8349 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
8350 {
8351 unsigned long interval = sd->balance_interval;
8352
8353 if (cpu_busy)
8354 interval *= sd->busy_factor;
8355
8356 /* scale ms to jiffies */
8357 interval = msecs_to_jiffies(interval);
8358 interval = clamp(interval, 1UL, max_load_balance_interval);
8359
8360 return interval;
8361 }
8362
8363 static inline void
8364 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
8365 {
8366 unsigned long interval, next;
8367
8368 /* used by idle balance, so cpu_busy = 0 */
8369 interval = get_sd_balance_interval(sd, 0);
8370 next = sd->last_balance + interval;
8371
8372 if (time_after(*next_balance, next))
8373 *next_balance = next;
8374 }
8375
8376 /*
8377 * idle_balance is called by schedule() if this_cpu is about to become
8378 * idle. Attempts to pull tasks from other CPUs.
8379 */
8380 static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
8381 {
8382 unsigned long next_balance = jiffies + HZ;
8383 int this_cpu = this_rq->cpu;
8384 struct sched_domain *sd;
8385 int pulled_task = 0;
8386 u64 curr_cost = 0;
8387
8388 /*
8389 * We must set idle_stamp _before_ calling idle_balance(), such that we
8390 * measure the duration of idle_balance() as idle time.
8391 */
8392 this_rq->idle_stamp = rq_clock(this_rq);
8393
8394 /*
8395 * This is OK, because current is on_cpu, which avoids it being picked
8396 * for load-balance and preemption/IRQs are still disabled avoiding
8397 * further scheduler activity on it and we're being very careful to
8398 * re-start the picking loop.
8399 */
8400 rq_unpin_lock(this_rq, rf);
8401
8402 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
8403 !this_rq->rd->overload) {
8404 rcu_read_lock();
8405 sd = rcu_dereference_check_sched_domain(this_rq->sd);
8406 if (sd)
8407 update_next_balance(sd, &next_balance);
8408 rcu_read_unlock();
8409
8410 goto out;
8411 }
8412
8413 raw_spin_unlock(&this_rq->lock);
8414
8415 update_blocked_averages(this_cpu);
8416 rcu_read_lock();
8417 for_each_domain(this_cpu, sd) {
8418 int continue_balancing = 1;
8419 u64 t0, domain_cost;
8420
8421 if (!(sd->flags & SD_LOAD_BALANCE))
8422 continue;
8423
8424 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
8425 update_next_balance(sd, &next_balance);
8426 break;
8427 }
8428
8429 if (sd->flags & SD_BALANCE_NEWIDLE) {
8430 t0 = sched_clock_cpu(this_cpu);
8431
8432 pulled_task = load_balance(this_cpu, this_rq,
8433 sd, CPU_NEWLY_IDLE,
8434 &continue_balancing);
8435
8436 domain_cost = sched_clock_cpu(this_cpu) - t0;
8437 if (domain_cost > sd->max_newidle_lb_cost)
8438 sd->max_newidle_lb_cost = domain_cost;
8439
8440 curr_cost += domain_cost;
8441 }
8442
8443 update_next_balance(sd, &next_balance);
8444
8445 /*
8446 * Stop searching for tasks to pull if there are
8447 * now runnable tasks on this rq.
8448 */
8449 if (pulled_task || this_rq->nr_running > 0)
8450 break;
8451 }
8452 rcu_read_unlock();
8453
8454 raw_spin_lock(&this_rq->lock);
8455
8456 if (curr_cost > this_rq->max_idle_balance_cost)
8457 this_rq->max_idle_balance_cost = curr_cost;
8458
8459 /*
8460 * While browsing the domains, we released the rq lock, a task could
8461 * have been enqueued in the meantime. Since we're not going idle,
8462 * pretend we pulled a task.
8463 */
8464 if (this_rq->cfs.h_nr_running && !pulled_task)
8465 pulled_task = 1;
8466
8467 out:
8468 /* Move the next balance forward */
8469 if (time_after(this_rq->next_balance, next_balance))
8470 this_rq->next_balance = next_balance;
8471
8472 /* Is there a task of a high priority class? */
8473 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
8474 pulled_task = -1;
8475
8476 if (pulled_task)
8477 this_rq->idle_stamp = 0;
8478
8479 rq_repin_lock(this_rq, rf);
8480
8481 return pulled_task;
8482 }
8483
8484 /*
8485 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8486 * running tasks off the busiest CPU onto idle CPUs. It requires at
8487 * least 1 task to be running on each physical CPU where possible, and
8488 * avoids physical / logical imbalances.
8489 */
8490 static int active_load_balance_cpu_stop(void *data)
8491 {
8492 struct rq *busiest_rq = data;
8493 int busiest_cpu = cpu_of(busiest_rq);
8494 int target_cpu = busiest_rq->push_cpu;
8495 struct rq *target_rq = cpu_rq(target_cpu);
8496 struct sched_domain *sd;
8497 struct task_struct *p = NULL;
8498 struct rq_flags rf;
8499
8500 rq_lock_irq(busiest_rq, &rf);
8501
8502 /* make sure the requested cpu hasn't gone down in the meantime */
8503 if (unlikely(busiest_cpu != smp_processor_id() ||
8504 !busiest_rq->active_balance))
8505 goto out_unlock;
8506
8507 /* Is there any task to move? */
8508 if (busiest_rq->nr_running <= 1)
8509 goto out_unlock;
8510
8511 /*
8512 * This condition is "impossible", if it occurs
8513 * we need to fix it. Originally reported by
8514 * Bjorn Helgaas on a 128-cpu setup.
8515 */
8516 BUG_ON(busiest_rq == target_rq);
8517
8518 /* Search for an sd spanning us and the target CPU. */
8519 rcu_read_lock();
8520 for_each_domain(target_cpu, sd) {
8521 if ((sd->flags & SD_LOAD_BALANCE) &&
8522 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8523 break;
8524 }
8525
8526 if (likely(sd)) {
8527 struct lb_env env = {
8528 .sd = sd,
8529 .dst_cpu = target_cpu,
8530 .dst_rq = target_rq,
8531 .src_cpu = busiest_rq->cpu,
8532 .src_rq = busiest_rq,
8533 .idle = CPU_IDLE,
8534 };
8535
8536 schedstat_inc(sd->alb_count);
8537 update_rq_clock(busiest_rq);
8538
8539 p = detach_one_task(&env);
8540 if (p) {
8541 schedstat_inc(sd->alb_pushed);
8542 /* Active balancing done, reset the failure counter. */
8543 sd->nr_balance_failed = 0;
8544 } else {
8545 schedstat_inc(sd->alb_failed);
8546 }
8547 }
8548 rcu_read_unlock();
8549 out_unlock:
8550 busiest_rq->active_balance = 0;
8551 rq_unlock(busiest_rq, &rf);
8552
8553 if (p)
8554 attach_one_task(target_rq, p);
8555
8556 local_irq_enable();
8557
8558 return 0;
8559 }
8560
8561 static inline int on_null_domain(struct rq *rq)
8562 {
8563 return unlikely(!rcu_dereference_sched(rq->sd));
8564 }
8565
8566 #ifdef CONFIG_NO_HZ_COMMON
8567 /*
8568 * idle load balancing details
8569 * - When one of the busy CPUs notice that there may be an idle rebalancing
8570 * needed, they will kick the idle load balancer, which then does idle
8571 * load balancing for all the idle CPUs.
8572 */
8573 static struct {
8574 cpumask_var_t idle_cpus_mask;
8575 atomic_t nr_cpus;
8576 unsigned long next_balance; /* in jiffy units */
8577 } nohz ____cacheline_aligned;
8578
8579 static inline int find_new_ilb(void)
8580 {
8581 int ilb = cpumask_first(nohz.idle_cpus_mask);
8582
8583 if (ilb < nr_cpu_ids && idle_cpu(ilb))
8584 return ilb;
8585
8586 return nr_cpu_ids;
8587 }
8588
8589 /*
8590 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8591 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8592 * CPU (if there is one).
8593 */
8594 static void nohz_balancer_kick(void)
8595 {
8596 int ilb_cpu;
8597
8598 nohz.next_balance++;
8599
8600 ilb_cpu = find_new_ilb();
8601
8602 if (ilb_cpu >= nr_cpu_ids)
8603 return;
8604
8605 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
8606 return;
8607 /*
8608 * Use smp_send_reschedule() instead of resched_cpu().
8609 * This way we generate a sched IPI on the target cpu which
8610 * is idle. And the softirq performing nohz idle load balance
8611 * will be run before returning from the IPI.
8612 */
8613 smp_send_reschedule(ilb_cpu);
8614 return;
8615 }
8616
8617 void nohz_balance_exit_idle(unsigned int cpu)
8618 {
8619 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
8620 /*
8621 * Completely isolated CPUs don't ever set, so we must test.
8622 */
8623 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8624 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8625 atomic_dec(&nohz.nr_cpus);
8626 }
8627 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8628 }
8629 }
8630
8631 static inline void set_cpu_sd_state_busy(void)
8632 {
8633 struct sched_domain *sd;
8634 int cpu = smp_processor_id();
8635
8636 rcu_read_lock();
8637 sd = rcu_dereference(per_cpu(sd_llc, cpu));
8638
8639 if (!sd || !sd->nohz_idle)
8640 goto unlock;
8641 sd->nohz_idle = 0;
8642
8643 atomic_inc(&sd->shared->nr_busy_cpus);
8644 unlock:
8645 rcu_read_unlock();
8646 }
8647
8648 void set_cpu_sd_state_idle(void)
8649 {
8650 struct sched_domain *sd;
8651 int cpu = smp_processor_id();
8652
8653 rcu_read_lock();
8654 sd = rcu_dereference(per_cpu(sd_llc, cpu));
8655
8656 if (!sd || sd->nohz_idle)
8657 goto unlock;
8658 sd->nohz_idle = 1;
8659
8660 atomic_dec(&sd->shared->nr_busy_cpus);
8661 unlock:
8662 rcu_read_unlock();
8663 }
8664
8665 /*
8666 * This routine will record that the cpu is going idle with tick stopped.
8667 * This info will be used in performing idle load balancing in the future.
8668 */
8669 void nohz_balance_enter_idle(int cpu)
8670 {
8671 /*
8672 * If this cpu is going down, then nothing needs to be done.
8673 */
8674 if (!cpu_active(cpu))
8675 return;
8676
8677 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8678 return;
8679
8680 /*
8681 * If we're a completely isolated CPU, we don't play.
8682 */
8683 if (on_null_domain(cpu_rq(cpu)))
8684 return;
8685
8686 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8687 atomic_inc(&nohz.nr_cpus);
8688 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8689 }
8690 #endif
8691
8692 static DEFINE_SPINLOCK(balancing);
8693
8694 /*
8695 * Scale the max load_balance interval with the number of CPUs in the system.
8696 * This trades load-balance latency on larger machines for less cross talk.
8697 */
8698 void update_max_interval(void)
8699 {
8700 max_load_balance_interval = HZ*num_online_cpus()/10;
8701 }
8702
8703 /*
8704 * It checks each scheduling domain to see if it is due to be balanced,
8705 * and initiates a balancing operation if so.
8706 *
8707 * Balancing parameters are set up in init_sched_domains.
8708 */
8709 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8710 {
8711 int continue_balancing = 1;
8712 int cpu = rq->cpu;
8713 unsigned long interval;
8714 struct sched_domain *sd;
8715 /* Earliest time when we have to do rebalance again */
8716 unsigned long next_balance = jiffies + 60*HZ;
8717 int update_next_balance = 0;
8718 int need_serialize, need_decay = 0;
8719 u64 max_cost = 0;
8720
8721 update_blocked_averages(cpu);
8722
8723 rcu_read_lock();
8724 for_each_domain(cpu, sd) {
8725 /*
8726 * Decay the newidle max times here because this is a regular
8727 * visit to all the domains. Decay ~1% per second.
8728 */
8729 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8730 sd->max_newidle_lb_cost =
8731 (sd->max_newidle_lb_cost * 253) / 256;
8732 sd->next_decay_max_lb_cost = jiffies + HZ;
8733 need_decay = 1;
8734 }
8735 max_cost += sd->max_newidle_lb_cost;
8736
8737 if (!(sd->flags & SD_LOAD_BALANCE))
8738 continue;
8739
8740 /*
8741 * Stop the load balance at this level. There is another
8742 * CPU in our sched group which is doing load balancing more
8743 * actively.
8744 */
8745 if (!continue_balancing) {
8746 if (need_decay)
8747 continue;
8748 break;
8749 }
8750
8751 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8752
8753 need_serialize = sd->flags & SD_SERIALIZE;
8754 if (need_serialize) {
8755 if (!spin_trylock(&balancing))
8756 goto out;
8757 }
8758
8759 if (time_after_eq(jiffies, sd->last_balance + interval)) {
8760 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
8761 /*
8762 * The LBF_DST_PINNED logic could have changed
8763 * env->dst_cpu, so we can't know our idle
8764 * state even if we migrated tasks. Update it.
8765 */
8766 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
8767 }
8768 sd->last_balance = jiffies;
8769 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8770 }
8771 if (need_serialize)
8772 spin_unlock(&balancing);
8773 out:
8774 if (time_after(next_balance, sd->last_balance + interval)) {
8775 next_balance = sd->last_balance + interval;
8776 update_next_balance = 1;
8777 }
8778 }
8779 if (need_decay) {
8780 /*
8781 * Ensure the rq-wide value also decays but keep it at a
8782 * reasonable floor to avoid funnies with rq->avg_idle.
8783 */
8784 rq->max_idle_balance_cost =
8785 max((u64)sysctl_sched_migration_cost, max_cost);
8786 }
8787 rcu_read_unlock();
8788
8789 /*
8790 * next_balance will be updated only when there is a need.
8791 * When the cpu is attached to null domain for ex, it will not be
8792 * updated.
8793 */
8794 if (likely(update_next_balance)) {
8795 rq->next_balance = next_balance;
8796
8797 #ifdef CONFIG_NO_HZ_COMMON
8798 /*
8799 * If this CPU has been elected to perform the nohz idle
8800 * balance. Other idle CPUs have already rebalanced with
8801 * nohz_idle_balance() and nohz.next_balance has been
8802 * updated accordingly. This CPU is now running the idle load
8803 * balance for itself and we need to update the
8804 * nohz.next_balance accordingly.
8805 */
8806 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8807 nohz.next_balance = rq->next_balance;
8808 #endif
8809 }
8810 }
8811
8812 #ifdef CONFIG_NO_HZ_COMMON
8813 /*
8814 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
8815 * rebalancing for all the cpus for whom scheduler ticks are stopped.
8816 */
8817 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
8818 {
8819 int this_cpu = this_rq->cpu;
8820 struct rq *rq;
8821 int balance_cpu;
8822 /* Earliest time when we have to do rebalance again */
8823 unsigned long next_balance = jiffies + 60*HZ;
8824 int update_next_balance = 0;
8825
8826 if (idle != CPU_IDLE ||
8827 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8828 goto end;
8829
8830 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8831 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
8832 continue;
8833
8834 /*
8835 * If this cpu gets work to do, stop the load balancing
8836 * work being done for other cpus. Next load
8837 * balancing owner will pick it up.
8838 */
8839 if (need_resched())
8840 break;
8841
8842 rq = cpu_rq(balance_cpu);
8843
8844 /*
8845 * If time for next balance is due,
8846 * do the balance.
8847 */
8848 if (time_after_eq(jiffies, rq->next_balance)) {
8849 struct rq_flags rf;
8850
8851 rq_lock_irq(rq, &rf);
8852 update_rq_clock(rq);
8853 cpu_load_update_idle(rq);
8854 rq_unlock_irq(rq, &rf);
8855
8856 rebalance_domains(rq, CPU_IDLE);
8857 }
8858
8859 if (time_after(next_balance, rq->next_balance)) {
8860 next_balance = rq->next_balance;
8861 update_next_balance = 1;
8862 }
8863 }
8864
8865 /*
8866 * next_balance will be updated only when there is a need.
8867 * When the CPU is attached to null domain for ex, it will not be
8868 * updated.
8869 */
8870 if (likely(update_next_balance))
8871 nohz.next_balance = next_balance;
8872 end:
8873 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
8874 }
8875
8876 /*
8877 * Current heuristic for kicking the idle load balancer in the presence
8878 * of an idle cpu in the system.
8879 * - This rq has more than one task.
8880 * - This rq has at least one CFS task and the capacity of the CPU is
8881 * significantly reduced because of RT tasks or IRQs.
8882 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
8883 * multiple busy cpu.
8884 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8885 * domain span are idle.
8886 */
8887 static inline bool nohz_kick_needed(struct rq *rq)
8888 {
8889 unsigned long now = jiffies;
8890 struct sched_domain_shared *sds;
8891 struct sched_domain *sd;
8892 int nr_busy, i, cpu = rq->cpu;
8893 bool kick = false;
8894
8895 if (unlikely(rq->idle_balance))
8896 return false;
8897
8898 /*
8899 * We may be recently in ticked or tickless idle mode. At the first
8900 * busy tick after returning from idle, we will update the busy stats.
8901 */
8902 set_cpu_sd_state_busy();
8903 nohz_balance_exit_idle(cpu);
8904
8905 /*
8906 * None are in tickless mode and hence no need for NOHZ idle load
8907 * balancing.
8908 */
8909 if (likely(!atomic_read(&nohz.nr_cpus)))
8910 return false;
8911
8912 if (time_before(now, nohz.next_balance))
8913 return false;
8914
8915 if (rq->nr_running >= 2)
8916 return true;
8917
8918 rcu_read_lock();
8919 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
8920 if (sds) {
8921 /*
8922 * XXX: write a coherent comment on why we do this.
8923 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
8924 */
8925 nr_busy = atomic_read(&sds->nr_busy_cpus);
8926 if (nr_busy > 1) {
8927 kick = true;
8928 goto unlock;
8929 }
8930
8931 }
8932
8933 sd = rcu_dereference(rq->sd);
8934 if (sd) {
8935 if ((rq->cfs.h_nr_running >= 1) &&
8936 check_cpu_capacity(rq, sd)) {
8937 kick = true;
8938 goto unlock;
8939 }
8940 }
8941
8942 sd = rcu_dereference(per_cpu(sd_asym, cpu));
8943 if (sd) {
8944 for_each_cpu(i, sched_domain_span(sd)) {
8945 if (i == cpu ||
8946 !cpumask_test_cpu(i, nohz.idle_cpus_mask))
8947 continue;
8948
8949 if (sched_asym_prefer(i, cpu)) {
8950 kick = true;
8951 goto unlock;
8952 }
8953 }
8954 }
8955 unlock:
8956 rcu_read_unlock();
8957 return kick;
8958 }
8959 #else
8960 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
8961 #endif
8962
8963 /*
8964 * run_rebalance_domains is triggered when needed from the scheduler tick.
8965 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
8966 */
8967 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
8968 {
8969 struct rq *this_rq = this_rq();
8970 enum cpu_idle_type idle = this_rq->idle_balance ?
8971 CPU_IDLE : CPU_NOT_IDLE;
8972
8973 /*
8974 * If this cpu has a pending nohz_balance_kick, then do the
8975 * balancing on behalf of the other idle cpus whose ticks are
8976 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8977 * give the idle cpus a chance to load balance. Else we may
8978 * load balance only within the local sched_domain hierarchy
8979 * and abort nohz_idle_balance altogether if we pull some load.
8980 */
8981 nohz_idle_balance(this_rq, idle);
8982 rebalance_domains(this_rq, idle);
8983 }
8984
8985 /*
8986 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
8987 */
8988 void trigger_load_balance(struct rq *rq)
8989 {
8990 /* Don't need to rebalance while attached to NULL domain */
8991 if (unlikely(on_null_domain(rq)))
8992 return;
8993
8994 if (time_after_eq(jiffies, rq->next_balance))
8995 raise_softirq(SCHED_SOFTIRQ);
8996 #ifdef CONFIG_NO_HZ_COMMON
8997 if (nohz_kick_needed(rq))
8998 nohz_balancer_kick();
8999 #endif
9000 }
9001
9002 static void rq_online_fair(struct rq *rq)
9003 {
9004 update_sysctl();
9005
9006 update_runtime_enabled(rq);
9007 }
9008
9009 static void rq_offline_fair(struct rq *rq)
9010 {
9011 update_sysctl();
9012
9013 /* Ensure any throttled groups are reachable by pick_next_task */
9014 unthrottle_offline_cfs_rqs(rq);
9015 }
9016
9017 #endif /* CONFIG_SMP */
9018
9019 /*
9020 * scheduler tick hitting a task of our scheduling class:
9021 */
9022 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
9023 {
9024 struct cfs_rq *cfs_rq;
9025 struct sched_entity *se = &curr->se;
9026
9027 for_each_sched_entity(se) {
9028 cfs_rq = cfs_rq_of(se);
9029 entity_tick(cfs_rq, se, queued);
9030 }
9031
9032 if (static_branch_unlikely(&sched_numa_balancing))
9033 task_tick_numa(rq, curr);
9034 }
9035
9036 /*
9037 * called on fork with the child task as argument from the parent's context
9038 * - child not yet on the tasklist
9039 * - preemption disabled
9040 */
9041 static void task_fork_fair(struct task_struct *p)
9042 {
9043 struct cfs_rq *cfs_rq;
9044 struct sched_entity *se = &p->se, *curr;
9045 struct rq *rq = this_rq();
9046 struct rq_flags rf;
9047
9048 rq_lock(rq, &rf);
9049 update_rq_clock(rq);
9050
9051 cfs_rq = task_cfs_rq(current);
9052 curr = cfs_rq->curr;
9053 if (curr) {
9054 update_curr(cfs_rq);
9055 se->vruntime = curr->vruntime;
9056 }
9057 place_entity(cfs_rq, se, 1);
9058
9059 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
9060 /*
9061 * Upon rescheduling, sched_class::put_prev_task() will place
9062 * 'current' within the tree based on its new key value.
9063 */
9064 swap(curr->vruntime, se->vruntime);
9065 resched_curr(rq);
9066 }
9067
9068 se->vruntime -= cfs_rq->min_vruntime;
9069 rq_unlock(rq, &rf);
9070 }
9071
9072 /*
9073 * Priority of the task has changed. Check to see if we preempt
9074 * the current task.
9075 */
9076 static void
9077 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
9078 {
9079 if (!task_on_rq_queued(p))
9080 return;
9081
9082 /*
9083 * Reschedule if we are currently running on this runqueue and
9084 * our priority decreased, or if we are not currently running on
9085 * this runqueue and our priority is higher than the current's
9086 */
9087 if (rq->curr == p) {
9088 if (p->prio > oldprio)
9089 resched_curr(rq);
9090 } else
9091 check_preempt_curr(rq, p, 0);
9092 }
9093
9094 static inline bool vruntime_normalized(struct task_struct *p)
9095 {
9096 struct sched_entity *se = &p->se;
9097
9098 /*
9099 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
9100 * the dequeue_entity(.flags=0) will already have normalized the
9101 * vruntime.
9102 */
9103 if (p->on_rq)
9104 return true;
9105
9106 /*
9107 * When !on_rq, vruntime of the task has usually NOT been normalized.
9108 * But there are some cases where it has already been normalized:
9109 *
9110 * - A forked child which is waiting for being woken up by
9111 * wake_up_new_task().
9112 * - A task which has been woken up by try_to_wake_up() and
9113 * waiting for actually being woken up by sched_ttwu_pending().
9114 */
9115 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
9116 return true;
9117
9118 return false;
9119 }
9120
9121 #ifdef CONFIG_FAIR_GROUP_SCHED
9122 /*
9123 * Propagate the changes of the sched_entity across the tg tree to make it
9124 * visible to the root
9125 */
9126 static void propagate_entity_cfs_rq(struct sched_entity *se)
9127 {
9128 struct cfs_rq *cfs_rq;
9129
9130 /* Start to propagate at parent */
9131 se = se->parent;
9132
9133 for_each_sched_entity(se) {
9134 cfs_rq = cfs_rq_of(se);
9135
9136 if (cfs_rq_throttled(cfs_rq))
9137 break;
9138
9139 update_load_avg(se, UPDATE_TG);
9140 }
9141 }
9142 #else
9143 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
9144 #endif
9145
9146 static void detach_entity_cfs_rq(struct sched_entity *se)
9147 {
9148 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9149
9150 /* Catch up with the cfs_rq and remove our load when we leave */
9151 update_load_avg(se, 0);
9152 detach_entity_load_avg(cfs_rq, se);
9153 update_tg_load_avg(cfs_rq, false);
9154 propagate_entity_cfs_rq(se);
9155 }
9156
9157 static void attach_entity_cfs_rq(struct sched_entity *se)
9158 {
9159 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9160
9161 #ifdef CONFIG_FAIR_GROUP_SCHED
9162 /*
9163 * Since the real-depth could have been changed (only FAIR
9164 * class maintain depth value), reset depth properly.
9165 */
9166 se->depth = se->parent ? se->parent->depth + 1 : 0;
9167 #endif
9168
9169 /* Synchronize entity with its cfs_rq */
9170 update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
9171 attach_entity_load_avg(cfs_rq, se);
9172 update_tg_load_avg(cfs_rq, false);
9173 propagate_entity_cfs_rq(se);
9174 }
9175
9176 static void detach_task_cfs_rq(struct task_struct *p)
9177 {
9178 struct sched_entity *se = &p->se;
9179 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9180
9181 if (!vruntime_normalized(p)) {
9182 /*
9183 * Fix up our vruntime so that the current sleep doesn't
9184 * cause 'unlimited' sleep bonus.
9185 */
9186 place_entity(cfs_rq, se, 0);
9187 se->vruntime -= cfs_rq->min_vruntime;
9188 }
9189
9190 detach_entity_cfs_rq(se);
9191 }
9192
9193 static void attach_task_cfs_rq(struct task_struct *p)
9194 {
9195 struct sched_entity *se = &p->se;
9196 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9197
9198 attach_entity_cfs_rq(se);
9199
9200 if (!vruntime_normalized(p))
9201 se->vruntime += cfs_rq->min_vruntime;
9202 }
9203
9204 static void switched_from_fair(struct rq *rq, struct task_struct *p)
9205 {
9206 detach_task_cfs_rq(p);
9207 }
9208
9209 static void switched_to_fair(struct rq *rq, struct task_struct *p)
9210 {
9211 attach_task_cfs_rq(p);
9212
9213 if (task_on_rq_queued(p)) {
9214 /*
9215 * We were most likely switched from sched_rt, so
9216 * kick off the schedule if running, otherwise just see
9217 * if we can still preempt the current task.
9218 */
9219 if (rq->curr == p)
9220 resched_curr(rq);
9221 else
9222 check_preempt_curr(rq, p, 0);
9223 }
9224 }
9225
9226 /* Account for a task changing its policy or group.
9227 *
9228 * This routine is mostly called to set cfs_rq->curr field when a task
9229 * migrates between groups/classes.
9230 */
9231 static void set_curr_task_fair(struct rq *rq)
9232 {
9233 struct sched_entity *se = &rq->curr->se;
9234
9235 for_each_sched_entity(se) {
9236 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9237
9238 set_next_entity(cfs_rq, se);
9239 /* ensure bandwidth has been allocated on our new cfs_rq */
9240 account_cfs_rq_runtime(cfs_rq, 0);
9241 }
9242 }
9243
9244 void init_cfs_rq(struct cfs_rq *cfs_rq)
9245 {
9246 cfs_rq->tasks_timeline = RB_ROOT;
9247 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
9248 #ifndef CONFIG_64BIT
9249 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
9250 #endif
9251 #ifdef CONFIG_SMP
9252 #ifdef CONFIG_FAIR_GROUP_SCHED
9253 cfs_rq->propagate_avg = 0;
9254 #endif
9255 atomic_long_set(&cfs_rq->removed_load_avg, 0);
9256 atomic_long_set(&cfs_rq->removed_util_avg, 0);
9257 #endif
9258 }
9259
9260 #ifdef CONFIG_FAIR_GROUP_SCHED
9261 static void task_set_group_fair(struct task_struct *p)
9262 {
9263 struct sched_entity *se = &p->se;
9264
9265 set_task_rq(p, task_cpu(p));
9266 se->depth = se->parent ? se->parent->depth + 1 : 0;
9267 }
9268
9269 static void task_move_group_fair(struct task_struct *p)
9270 {
9271 detach_task_cfs_rq(p);
9272 set_task_rq(p, task_cpu(p));
9273
9274 #ifdef CONFIG_SMP
9275 /* Tell se's cfs_rq has been changed -- migrated */
9276 p->se.avg.last_update_time = 0;
9277 #endif
9278 attach_task_cfs_rq(p);
9279 }
9280
9281 static void task_change_group_fair(struct task_struct *p, int type)
9282 {
9283 switch (type) {
9284 case TASK_SET_GROUP:
9285 task_set_group_fair(p);
9286 break;
9287
9288 case TASK_MOVE_GROUP:
9289 task_move_group_fair(p);
9290 break;
9291 }
9292 }
9293
9294 void free_fair_sched_group(struct task_group *tg)
9295 {
9296 int i;
9297
9298 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
9299
9300 for_each_possible_cpu(i) {
9301 if (tg->cfs_rq)
9302 kfree(tg->cfs_rq[i]);
9303 if (tg->se)
9304 kfree(tg->se[i]);
9305 }
9306
9307 kfree(tg->cfs_rq);
9308 kfree(tg->se);
9309 }
9310
9311 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9312 {
9313 struct sched_entity *se;
9314 struct cfs_rq *cfs_rq;
9315 int i;
9316
9317 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
9318 if (!tg->cfs_rq)
9319 goto err;
9320 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
9321 if (!tg->se)
9322 goto err;
9323
9324 tg->shares = NICE_0_LOAD;
9325
9326 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
9327
9328 for_each_possible_cpu(i) {
9329 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9330 GFP_KERNEL, cpu_to_node(i));
9331 if (!cfs_rq)
9332 goto err;
9333
9334 se = kzalloc_node(sizeof(struct sched_entity),
9335 GFP_KERNEL, cpu_to_node(i));
9336 if (!se)
9337 goto err_free_rq;
9338
9339 init_cfs_rq(cfs_rq);
9340 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
9341 init_entity_runnable_average(se);
9342 }
9343
9344 return 1;
9345
9346 err_free_rq:
9347 kfree(cfs_rq);
9348 err:
9349 return 0;
9350 }
9351
9352 void online_fair_sched_group(struct task_group *tg)
9353 {
9354 struct sched_entity *se;
9355 struct rq *rq;
9356 int i;
9357
9358 for_each_possible_cpu(i) {
9359 rq = cpu_rq(i);
9360 se = tg->se[i];
9361
9362 raw_spin_lock_irq(&rq->lock);
9363 update_rq_clock(rq);
9364 attach_entity_cfs_rq(se);
9365 sync_throttle(tg, i);
9366 raw_spin_unlock_irq(&rq->lock);
9367 }
9368 }
9369
9370 void unregister_fair_sched_group(struct task_group *tg)
9371 {
9372 unsigned long flags;
9373 struct rq *rq;
9374 int cpu;
9375
9376 for_each_possible_cpu(cpu) {
9377 if (tg->se[cpu])
9378 remove_entity_load_avg(tg->se[cpu]);
9379
9380 /*
9381 * Only empty task groups can be destroyed; so we can speculatively
9382 * check on_list without danger of it being re-added.
9383 */
9384 if (!tg->cfs_rq[cpu]->on_list)
9385 continue;
9386
9387 rq = cpu_rq(cpu);
9388
9389 raw_spin_lock_irqsave(&rq->lock, flags);
9390 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
9391 raw_spin_unlock_irqrestore(&rq->lock, flags);
9392 }
9393 }
9394
9395 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
9396 struct sched_entity *se, int cpu,
9397 struct sched_entity *parent)
9398 {
9399 struct rq *rq = cpu_rq(cpu);
9400
9401 cfs_rq->tg = tg;
9402 cfs_rq->rq = rq;
9403 init_cfs_rq_runtime(cfs_rq);
9404
9405 tg->cfs_rq[cpu] = cfs_rq;
9406 tg->se[cpu] = se;
9407
9408 /* se could be NULL for root_task_group */
9409 if (!se)
9410 return;
9411
9412 if (!parent) {
9413 se->cfs_rq = &rq->cfs;
9414 se->depth = 0;
9415 } else {
9416 se->cfs_rq = parent->my_q;
9417 se->depth = parent->depth + 1;
9418 }
9419
9420 se->my_q = cfs_rq;
9421 /* guarantee group entities always have weight */
9422 update_load_set(&se->load, NICE_0_LOAD);
9423 se->parent = parent;
9424 }
9425
9426 static DEFINE_MUTEX(shares_mutex);
9427
9428 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9429 {
9430 int i;
9431
9432 /*
9433 * We can't change the weight of the root cgroup.
9434 */
9435 if (!tg->se[0])
9436 return -EINVAL;
9437
9438 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
9439
9440 mutex_lock(&shares_mutex);
9441 if (tg->shares == shares)
9442 goto done;
9443
9444 tg->shares = shares;
9445 for_each_possible_cpu(i) {
9446 struct rq *rq = cpu_rq(i);
9447 struct sched_entity *se = tg->se[i];
9448 struct rq_flags rf;
9449
9450 /* Propagate contribution to hierarchy */
9451 rq_lock_irqsave(rq, &rf);
9452 update_rq_clock(rq);
9453 for_each_sched_entity(se) {
9454 update_load_avg(se, UPDATE_TG);
9455 update_cfs_shares(se);
9456 }
9457 rq_unlock_irqrestore(rq, &rf);
9458 }
9459
9460 done:
9461 mutex_unlock(&shares_mutex);
9462 return 0;
9463 }
9464 #else /* CONFIG_FAIR_GROUP_SCHED */
9465
9466 void free_fair_sched_group(struct task_group *tg) { }
9467
9468 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9469 {
9470 return 1;
9471 }
9472
9473 void online_fair_sched_group(struct task_group *tg) { }
9474
9475 void unregister_fair_sched_group(struct task_group *tg) { }
9476
9477 #endif /* CONFIG_FAIR_GROUP_SCHED */
9478
9479
9480 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
9481 {
9482 struct sched_entity *se = &task->se;
9483 unsigned int rr_interval = 0;
9484
9485 /*
9486 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
9487 * idle runqueue:
9488 */
9489 if (rq->cfs.load.weight)
9490 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
9491
9492 return rr_interval;
9493 }
9494
9495 /*
9496 * All the scheduling class methods:
9497 */
9498 const struct sched_class fair_sched_class = {
9499 .next = &idle_sched_class,
9500 .enqueue_task = enqueue_task_fair,
9501 .dequeue_task = dequeue_task_fair,
9502 .yield_task = yield_task_fair,
9503 .yield_to_task = yield_to_task_fair,
9504
9505 .check_preempt_curr = check_preempt_wakeup,
9506
9507 .pick_next_task = pick_next_task_fair,
9508 .put_prev_task = put_prev_task_fair,
9509
9510 #ifdef CONFIG_SMP
9511 .select_task_rq = select_task_rq_fair,
9512 .migrate_task_rq = migrate_task_rq_fair,
9513
9514 .rq_online = rq_online_fair,
9515 .rq_offline = rq_offline_fair,
9516
9517 .task_dead = task_dead_fair,
9518 .set_cpus_allowed = set_cpus_allowed_common,
9519 #endif
9520
9521 .set_curr_task = set_curr_task_fair,
9522 .task_tick = task_tick_fair,
9523 .task_fork = task_fork_fair,
9524
9525 .prio_changed = prio_changed_fair,
9526 .switched_from = switched_from_fair,
9527 .switched_to = switched_to_fair,
9528
9529 .get_rr_interval = get_rr_interval_fair,
9530
9531 .update_curr = update_curr_fair,
9532
9533 #ifdef CONFIG_FAIR_GROUP_SCHED
9534 .task_change_group = task_change_group_fair,
9535 #endif
9536 };
9537
9538 #ifdef CONFIG_SCHED_DEBUG
9539 void print_cfs_stats(struct seq_file *m, int cpu)
9540 {
9541 struct cfs_rq *cfs_rq;
9542
9543 rcu_read_lock();
9544 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
9545 print_cfs_rq(m, cpu, cfs_rq);
9546 rcu_read_unlock();
9547 }
9548
9549 #ifdef CONFIG_NUMA_BALANCING
9550 void show_numa_stats(struct task_struct *p, struct seq_file *m)
9551 {
9552 int node;
9553 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
9554
9555 for_each_online_node(node) {
9556 if (p->numa_faults) {
9557 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
9558 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
9559 }
9560 if (p->numa_group) {
9561 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
9562 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
9563 }
9564 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
9565 }
9566 }
9567 #endif /* CONFIG_NUMA_BALANCING */
9568 #endif /* CONFIG_SCHED_DEBUG */
9569
9570 __init void init_sched_fair_class(void)
9571 {
9572 #ifdef CONFIG_SMP
9573 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
9574
9575 #ifdef CONFIG_NO_HZ_COMMON
9576 nohz.next_balance = jiffies;
9577 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
9578 #endif
9579 #endif /* SMP */
9580
9581 }