]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/sched/fair.c
sched/numa: Find the preferred nid with complex NUMA topology
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / fair.c
1 /*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 */
22
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpuidle.h>
27 #include <linux/slab.h>
28 #include <linux/profile.h>
29 #include <linux/interrupt.h>
30 #include <linux/mempolicy.h>
31 #include <linux/migrate.h>
32 #include <linux/task_work.h>
33
34 #include <trace/events/sched.h>
35
36 #include "sched.h"
37
38 /*
39 * Targeted preemption latency for CPU-bound tasks:
40 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41 *
42 * NOTE: this latency value is not the same as the concept of
43 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
46 *
47 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
49 */
50 unsigned int sysctl_sched_latency = 6000000ULL;
51 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52
53 /*
54 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62 enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65 /*
66 * Minimal preemption granularity for CPU-bound tasks:
67 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 */
69 unsigned int sysctl_sched_min_granularity = 750000ULL;
70 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71
72 /*
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
75 static unsigned int sched_nr_latency = 8;
76
77 /*
78 * After fork, child runs first. If set to 0 (default) then
79 * parent will (try to) run first.
80 */
81 unsigned int sysctl_sched_child_runs_first __read_mostly;
82
83 /*
84 * SCHED_OTHER wake-up granularity.
85 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
91 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93
94 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
96 /*
97 * The exponential sliding window over which load is averaged for shares
98 * distribution.
99 * (default: 10msec)
100 */
101 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
103 #ifdef CONFIG_CFS_BANDWIDTH
104 /*
105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
107 *
108 * Note: in the case that the slice exceeds the runtime remaining (either due
109 * to consumption or the quota being specified to be smaller than the slice)
110 * we will always only issue the remaining available time.
111 *
112 * default: 5 msec, units: microseconds
113 */
114 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115 #endif
116
117 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118 {
119 lw->weight += inc;
120 lw->inv_weight = 0;
121 }
122
123 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124 {
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127 }
128
129 static inline void update_load_set(struct load_weight *lw, unsigned long w)
130 {
131 lw->weight = w;
132 lw->inv_weight = 0;
133 }
134
135 /*
136 * Increase the granularity value when there are more CPUs,
137 * because with more CPUs the 'effective latency' as visible
138 * to users decreases. But the relationship is not linear,
139 * so pick a second-best guess by going with the log2 of the
140 * number of CPUs.
141 *
142 * This idea comes from the SD scheduler of Con Kolivas:
143 */
144 static int get_update_sysctl_factor(void)
145 {
146 unsigned int cpus = min_t(int, num_online_cpus(), 8);
147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163 }
164
165 static void update_sysctl(void)
166 {
167 unsigned int factor = get_update_sysctl_factor();
168
169 #define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174 #undef SET_SYSCTL
175 }
176
177 void sched_init_granularity(void)
178 {
179 update_sysctl();
180 }
181
182 #define WMULT_CONST (~0U)
183 #define WMULT_SHIFT 32
184
185 static void __update_inv_weight(struct load_weight *lw)
186 {
187 unsigned long w;
188
189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
198 else
199 lw->inv_weight = WMULT_CONST / w;
200 }
201
202 /*
203 * delta_exec * weight / lw.weight
204 * OR
205 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 *
207 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
208 * we're guaranteed shift stays positive because inv_weight is guaranteed to
209 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 *
211 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
212 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 */
214 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215 {
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
218
219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
226 }
227
228 /* hint to use a 32x32->64 mul */
229 fact = (u64)(u32)fact * lw->inv_weight;
230
231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
237 }
238
239
240 const struct sched_class fair_sched_class;
241
242 /**************************************************************
243 * CFS operations on generic schedulable entities:
244 */
245
246 #ifdef CONFIG_FAIR_GROUP_SCHED
247
248 /* cpu runqueue to which this cfs_rq is attached */
249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250 {
251 return cfs_rq->rq;
252 }
253
254 /* An entity is a task if it doesn't "own" a runqueue */
255 #define entity_is_task(se) (!se->my_q)
256
257 static inline struct task_struct *task_of(struct sched_entity *se)
258 {
259 #ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261 #endif
262 return container_of(se, struct task_struct, se);
263 }
264
265 /* Walk up scheduling entities hierarchy */
266 #define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270 {
271 return p->se.cfs_rq;
272 }
273
274 /* runqueue on which this entity is (to be) queued */
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276 {
277 return se->cfs_rq;
278 }
279
280 /* runqueue "owned" by this group */
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282 {
283 return grp->my_q;
284 }
285
286 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
287 int force_update);
288
289 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
290 {
291 if (!cfs_rq->on_list) {
292 /*
293 * Ensure we either appear before our parent (if already
294 * enqueued) or force our parent to appear after us when it is
295 * enqueued. The fact that we always enqueue bottom-up
296 * reduces this to two cases.
297 */
298 if (cfs_rq->tg->parent &&
299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
300 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 } else {
303 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
304 &rq_of(cfs_rq)->leaf_cfs_rq_list);
305 }
306
307 cfs_rq->on_list = 1;
308 /* We should have no load, but we need to update last_decay. */
309 update_cfs_rq_blocked_load(cfs_rq, 0);
310 }
311 }
312
313 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
314 {
315 if (cfs_rq->on_list) {
316 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
317 cfs_rq->on_list = 0;
318 }
319 }
320
321 /* Iterate thr' all leaf cfs_rq's on a runqueue */
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
324
325 /* Do the two (enqueued) entities belong to the same group ? */
326 static inline struct cfs_rq *
327 is_same_group(struct sched_entity *se, struct sched_entity *pse)
328 {
329 if (se->cfs_rq == pse->cfs_rq)
330 return se->cfs_rq;
331
332 return NULL;
333 }
334
335 static inline struct sched_entity *parent_entity(struct sched_entity *se)
336 {
337 return se->parent;
338 }
339
340 static void
341 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
342 {
343 int se_depth, pse_depth;
344
345 /*
346 * preemption test can be made between sibling entities who are in the
347 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
348 * both tasks until we find their ancestors who are siblings of common
349 * parent.
350 */
351
352 /* First walk up until both entities are at same depth */
353 se_depth = (*se)->depth;
354 pse_depth = (*pse)->depth;
355
356 while (se_depth > pse_depth) {
357 se_depth--;
358 *se = parent_entity(*se);
359 }
360
361 while (pse_depth > se_depth) {
362 pse_depth--;
363 *pse = parent_entity(*pse);
364 }
365
366 while (!is_same_group(*se, *pse)) {
367 *se = parent_entity(*se);
368 *pse = parent_entity(*pse);
369 }
370 }
371
372 #else /* !CONFIG_FAIR_GROUP_SCHED */
373
374 static inline struct task_struct *task_of(struct sched_entity *se)
375 {
376 return container_of(se, struct task_struct, se);
377 }
378
379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
380 {
381 return container_of(cfs_rq, struct rq, cfs);
382 }
383
384 #define entity_is_task(se) 1
385
386 #define for_each_sched_entity(se) \
387 for (; se; se = NULL)
388
389 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
390 {
391 return &task_rq(p)->cfs;
392 }
393
394 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
395 {
396 struct task_struct *p = task_of(se);
397 struct rq *rq = task_rq(p);
398
399 return &rq->cfs;
400 }
401
402 /* runqueue "owned" by this group */
403 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
404 {
405 return NULL;
406 }
407
408 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
409 {
410 }
411
412 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
413 {
414 }
415
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
418
419 static inline struct sched_entity *parent_entity(struct sched_entity *se)
420 {
421 return NULL;
422 }
423
424 static inline void
425 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
426 {
427 }
428
429 #endif /* CONFIG_FAIR_GROUP_SCHED */
430
431 static __always_inline
432 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
433
434 /**************************************************************
435 * Scheduling class tree data structure manipulation methods:
436 */
437
438 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
439 {
440 s64 delta = (s64)(vruntime - max_vruntime);
441 if (delta > 0)
442 max_vruntime = vruntime;
443
444 return max_vruntime;
445 }
446
447 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
448 {
449 s64 delta = (s64)(vruntime - min_vruntime);
450 if (delta < 0)
451 min_vruntime = vruntime;
452
453 return min_vruntime;
454 }
455
456 static inline int entity_before(struct sched_entity *a,
457 struct sched_entity *b)
458 {
459 return (s64)(a->vruntime - b->vruntime) < 0;
460 }
461
462 static void update_min_vruntime(struct cfs_rq *cfs_rq)
463 {
464 u64 vruntime = cfs_rq->min_vruntime;
465
466 if (cfs_rq->curr)
467 vruntime = cfs_rq->curr->vruntime;
468
469 if (cfs_rq->rb_leftmost) {
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
471 struct sched_entity,
472 run_node);
473
474 if (!cfs_rq->curr)
475 vruntime = se->vruntime;
476 else
477 vruntime = min_vruntime(vruntime, se->vruntime);
478 }
479
480 /* ensure we never gain time by being placed backwards. */
481 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
482 #ifndef CONFIG_64BIT
483 smp_wmb();
484 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
485 #endif
486 }
487
488 /*
489 * Enqueue an entity into the rb-tree:
490 */
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
492 {
493 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
494 struct rb_node *parent = NULL;
495 struct sched_entity *entry;
496 int leftmost = 1;
497
498 /*
499 * Find the right place in the rbtree:
500 */
501 while (*link) {
502 parent = *link;
503 entry = rb_entry(parent, struct sched_entity, run_node);
504 /*
505 * We dont care about collisions. Nodes with
506 * the same key stay together.
507 */
508 if (entity_before(se, entry)) {
509 link = &parent->rb_left;
510 } else {
511 link = &parent->rb_right;
512 leftmost = 0;
513 }
514 }
515
516 /*
517 * Maintain a cache of leftmost tree entries (it is frequently
518 * used):
519 */
520 if (leftmost)
521 cfs_rq->rb_leftmost = &se->run_node;
522
523 rb_link_node(&se->run_node, parent, link);
524 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
525 }
526
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
528 {
529 if (cfs_rq->rb_leftmost == &se->run_node) {
530 struct rb_node *next_node;
531
532 next_node = rb_next(&se->run_node);
533 cfs_rq->rb_leftmost = next_node;
534 }
535
536 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
537 }
538
539 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
540 {
541 struct rb_node *left = cfs_rq->rb_leftmost;
542
543 if (!left)
544 return NULL;
545
546 return rb_entry(left, struct sched_entity, run_node);
547 }
548
549 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
550 {
551 struct rb_node *next = rb_next(&se->run_node);
552
553 if (!next)
554 return NULL;
555
556 return rb_entry(next, struct sched_entity, run_node);
557 }
558
559 #ifdef CONFIG_SCHED_DEBUG
560 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
561 {
562 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
563
564 if (!last)
565 return NULL;
566
567 return rb_entry(last, struct sched_entity, run_node);
568 }
569
570 /**************************************************************
571 * Scheduling class statistics methods:
572 */
573
574 int sched_proc_update_handler(struct ctl_table *table, int write,
575 void __user *buffer, size_t *lenp,
576 loff_t *ppos)
577 {
578 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
579 int factor = get_update_sysctl_factor();
580
581 if (ret || !write)
582 return ret;
583
584 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
585 sysctl_sched_min_granularity);
586
587 #define WRT_SYSCTL(name) \
588 (normalized_sysctl_##name = sysctl_##name / (factor))
589 WRT_SYSCTL(sched_min_granularity);
590 WRT_SYSCTL(sched_latency);
591 WRT_SYSCTL(sched_wakeup_granularity);
592 #undef WRT_SYSCTL
593
594 return 0;
595 }
596 #endif
597
598 /*
599 * delta /= w
600 */
601 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
602 {
603 if (unlikely(se->load.weight != NICE_0_LOAD))
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
605
606 return delta;
607 }
608
609 /*
610 * The idea is to set a period in which each task runs once.
611 *
612 * When there are too many tasks (sched_nr_latency) we have to stretch
613 * this period because otherwise the slices get too small.
614 *
615 * p = (nr <= nl) ? l : l*nr/nl
616 */
617 static u64 __sched_period(unsigned long nr_running)
618 {
619 u64 period = sysctl_sched_latency;
620 unsigned long nr_latency = sched_nr_latency;
621
622 if (unlikely(nr_running > nr_latency)) {
623 period = sysctl_sched_min_granularity;
624 period *= nr_running;
625 }
626
627 return period;
628 }
629
630 /*
631 * We calculate the wall-time slice from the period by taking a part
632 * proportional to the weight.
633 *
634 * s = p*P[w/rw]
635 */
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
637 {
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
639
640 for_each_sched_entity(se) {
641 struct load_weight *load;
642 struct load_weight lw;
643
644 cfs_rq = cfs_rq_of(se);
645 load = &cfs_rq->load;
646
647 if (unlikely(!se->on_rq)) {
648 lw = cfs_rq->load;
649
650 update_load_add(&lw, se->load.weight);
651 load = &lw;
652 }
653 slice = __calc_delta(slice, se->load.weight, load);
654 }
655 return slice;
656 }
657
658 /*
659 * We calculate the vruntime slice of a to-be-inserted task.
660 *
661 * vs = s/w
662 */
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
664 {
665 return calc_delta_fair(sched_slice(cfs_rq, se), se);
666 }
667
668 #ifdef CONFIG_SMP
669 static int select_idle_sibling(struct task_struct *p, int cpu);
670 static unsigned long task_h_load(struct task_struct *p);
671
672 static inline void __update_task_entity_contrib(struct sched_entity *se);
673
674 /* Give new task start runnable values to heavy its load in infant time */
675 void init_task_runnable_average(struct task_struct *p)
676 {
677 u32 slice;
678
679 p->se.avg.decay_count = 0;
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
681 p->se.avg.runnable_avg_sum = slice;
682 p->se.avg.runnable_avg_period = slice;
683 __update_task_entity_contrib(&p->se);
684 }
685 #else
686 void init_task_runnable_average(struct task_struct *p)
687 {
688 }
689 #endif
690
691 /*
692 * Update the current task's runtime statistics.
693 */
694 static void update_curr(struct cfs_rq *cfs_rq)
695 {
696 struct sched_entity *curr = cfs_rq->curr;
697 u64 now = rq_clock_task(rq_of(cfs_rq));
698 u64 delta_exec;
699
700 if (unlikely(!curr))
701 return;
702
703 delta_exec = now - curr->exec_start;
704 if (unlikely((s64)delta_exec <= 0))
705 return;
706
707 curr->exec_start = now;
708
709 schedstat_set(curr->statistics.exec_max,
710 max(delta_exec, curr->statistics.exec_max));
711
712 curr->sum_exec_runtime += delta_exec;
713 schedstat_add(cfs_rq, exec_clock, delta_exec);
714
715 curr->vruntime += calc_delta_fair(delta_exec, curr);
716 update_min_vruntime(cfs_rq);
717
718 if (entity_is_task(curr)) {
719 struct task_struct *curtask = task_of(curr);
720
721 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
722 cpuacct_charge(curtask, delta_exec);
723 account_group_exec_runtime(curtask, delta_exec);
724 }
725
726 account_cfs_rq_runtime(cfs_rq, delta_exec);
727 }
728
729 static inline void
730 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
731 {
732 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
733 }
734
735 /*
736 * Task is being enqueued - update stats:
737 */
738 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
739 {
740 /*
741 * Are we enqueueing a waiting task? (for current tasks
742 * a dequeue/enqueue event is a NOP)
743 */
744 if (se != cfs_rq->curr)
745 update_stats_wait_start(cfs_rq, se);
746 }
747
748 static void
749 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
750 {
751 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
752 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
753 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
754 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
755 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
756 #ifdef CONFIG_SCHEDSTATS
757 if (entity_is_task(se)) {
758 trace_sched_stat_wait(task_of(se),
759 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
760 }
761 #endif
762 schedstat_set(se->statistics.wait_start, 0);
763 }
764
765 static inline void
766 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
767 {
768 /*
769 * Mark the end of the wait period if dequeueing a
770 * waiting task:
771 */
772 if (se != cfs_rq->curr)
773 update_stats_wait_end(cfs_rq, se);
774 }
775
776 /*
777 * We are picking a new current task - update its stats:
778 */
779 static inline void
780 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
781 {
782 /*
783 * We are starting a new run period:
784 */
785 se->exec_start = rq_clock_task(rq_of(cfs_rq));
786 }
787
788 /**************************************************
789 * Scheduling class queueing methods:
790 */
791
792 #ifdef CONFIG_NUMA_BALANCING
793 /*
794 * Approximate time to scan a full NUMA task in ms. The task scan period is
795 * calculated based on the tasks virtual memory size and
796 * numa_balancing_scan_size.
797 */
798 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
799 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
800
801 /* Portion of address space to scan in MB */
802 unsigned int sysctl_numa_balancing_scan_size = 256;
803
804 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
805 unsigned int sysctl_numa_balancing_scan_delay = 1000;
806
807 static unsigned int task_nr_scan_windows(struct task_struct *p)
808 {
809 unsigned long rss = 0;
810 unsigned long nr_scan_pages;
811
812 /*
813 * Calculations based on RSS as non-present and empty pages are skipped
814 * by the PTE scanner and NUMA hinting faults should be trapped based
815 * on resident pages
816 */
817 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
818 rss = get_mm_rss(p->mm);
819 if (!rss)
820 rss = nr_scan_pages;
821
822 rss = round_up(rss, nr_scan_pages);
823 return rss / nr_scan_pages;
824 }
825
826 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
827 #define MAX_SCAN_WINDOW 2560
828
829 static unsigned int task_scan_min(struct task_struct *p)
830 {
831 unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
832 unsigned int scan, floor;
833 unsigned int windows = 1;
834
835 if (scan_size < MAX_SCAN_WINDOW)
836 windows = MAX_SCAN_WINDOW / scan_size;
837 floor = 1000 / windows;
838
839 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
840 return max_t(unsigned int, floor, scan);
841 }
842
843 static unsigned int task_scan_max(struct task_struct *p)
844 {
845 unsigned int smin = task_scan_min(p);
846 unsigned int smax;
847
848 /* Watch for min being lower than max due to floor calculations */
849 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
850 return max(smin, smax);
851 }
852
853 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
854 {
855 rq->nr_numa_running += (p->numa_preferred_nid != -1);
856 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
857 }
858
859 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
860 {
861 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
862 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
863 }
864
865 struct numa_group {
866 atomic_t refcount;
867
868 spinlock_t lock; /* nr_tasks, tasks */
869 int nr_tasks;
870 pid_t gid;
871 struct list_head task_list;
872
873 struct rcu_head rcu;
874 nodemask_t active_nodes;
875 unsigned long total_faults;
876 /*
877 * Faults_cpu is used to decide whether memory should move
878 * towards the CPU. As a consequence, these stats are weighted
879 * more by CPU use than by memory faults.
880 */
881 unsigned long *faults_cpu;
882 unsigned long faults[0];
883 };
884
885 /* Shared or private faults. */
886 #define NR_NUMA_HINT_FAULT_TYPES 2
887
888 /* Memory and CPU locality */
889 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
890
891 /* Averaged statistics, and temporary buffers. */
892 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
893
894 pid_t task_numa_group_id(struct task_struct *p)
895 {
896 return p->numa_group ? p->numa_group->gid : 0;
897 }
898
899 static inline int task_faults_idx(int nid, int priv)
900 {
901 return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
902 }
903
904 static inline unsigned long task_faults(struct task_struct *p, int nid)
905 {
906 if (!p->numa_faults_memory)
907 return 0;
908
909 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
910 p->numa_faults_memory[task_faults_idx(nid, 1)];
911 }
912
913 static inline unsigned long group_faults(struct task_struct *p, int nid)
914 {
915 if (!p->numa_group)
916 return 0;
917
918 return p->numa_group->faults[task_faults_idx(nid, 0)] +
919 p->numa_group->faults[task_faults_idx(nid, 1)];
920 }
921
922 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
923 {
924 return group->faults_cpu[task_faults_idx(nid, 0)] +
925 group->faults_cpu[task_faults_idx(nid, 1)];
926 }
927
928 /* Handle placement on systems where not all nodes are directly connected. */
929 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
930 int maxdist, bool task)
931 {
932 unsigned long score = 0;
933 int node;
934
935 /*
936 * All nodes are directly connected, and the same distance
937 * from each other. No need for fancy placement algorithms.
938 */
939 if (sched_numa_topology_type == NUMA_DIRECT)
940 return 0;
941
942 /*
943 * This code is called for each node, introducing N^2 complexity,
944 * which should be ok given the number of nodes rarely exceeds 8.
945 */
946 for_each_online_node(node) {
947 unsigned long faults;
948 int dist = node_distance(nid, node);
949
950 /*
951 * The furthest away nodes in the system are not interesting
952 * for placement; nid was already counted.
953 */
954 if (dist == sched_max_numa_distance || node == nid)
955 continue;
956
957 /*
958 * On systems with a backplane NUMA topology, compare groups
959 * of nodes, and move tasks towards the group with the most
960 * memory accesses. When comparing two nodes at distance
961 * "hoplimit", only nodes closer by than "hoplimit" are part
962 * of each group. Skip other nodes.
963 */
964 if (sched_numa_topology_type == NUMA_BACKPLANE &&
965 dist > maxdist)
966 continue;
967
968 /* Add up the faults from nearby nodes. */
969 if (task)
970 faults = task_faults(p, node);
971 else
972 faults = group_faults(p, node);
973
974 /*
975 * On systems with a glueless mesh NUMA topology, there are
976 * no fixed "groups of nodes". Instead, nodes that are not
977 * directly connected bounce traffic through intermediate
978 * nodes; a numa_group can occupy any set of nodes.
979 * The further away a node is, the less the faults count.
980 * This seems to result in good task placement.
981 */
982 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
983 faults *= (sched_max_numa_distance - dist);
984 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
985 }
986
987 score += faults;
988 }
989
990 return score;
991 }
992
993 /*
994 * These return the fraction of accesses done by a particular task, or
995 * task group, on a particular numa node. The group weight is given a
996 * larger multiplier, in order to group tasks together that are almost
997 * evenly spread out between numa nodes.
998 */
999 static inline unsigned long task_weight(struct task_struct *p, int nid,
1000 int dist)
1001 {
1002 unsigned long faults, total_faults;
1003
1004 if (!p->numa_faults_memory)
1005 return 0;
1006
1007 total_faults = p->total_numa_faults;
1008
1009 if (!total_faults)
1010 return 0;
1011
1012 faults = task_faults(p, nid);
1013 faults += score_nearby_nodes(p, nid, dist, true);
1014
1015 return 1000 * faults / total_faults;
1016 }
1017
1018 static inline unsigned long group_weight(struct task_struct *p, int nid,
1019 int dist)
1020 {
1021 unsigned long faults, total_faults;
1022
1023 if (!p->numa_group)
1024 return 0;
1025
1026 total_faults = p->numa_group->total_faults;
1027
1028 if (!total_faults)
1029 return 0;
1030
1031 faults = group_faults(p, nid);
1032 faults += score_nearby_nodes(p, nid, dist, false);
1033
1034 return 1000 * faults / total_faults;
1035 }
1036
1037 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1038 int src_nid, int dst_cpu)
1039 {
1040 struct numa_group *ng = p->numa_group;
1041 int dst_nid = cpu_to_node(dst_cpu);
1042 int last_cpupid, this_cpupid;
1043
1044 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1045
1046 /*
1047 * Multi-stage node selection is used in conjunction with a periodic
1048 * migration fault to build a temporal task<->page relation. By using
1049 * a two-stage filter we remove short/unlikely relations.
1050 *
1051 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1052 * a task's usage of a particular page (n_p) per total usage of this
1053 * page (n_t) (in a given time-span) to a probability.
1054 *
1055 * Our periodic faults will sample this probability and getting the
1056 * same result twice in a row, given these samples are fully
1057 * independent, is then given by P(n)^2, provided our sample period
1058 * is sufficiently short compared to the usage pattern.
1059 *
1060 * This quadric squishes small probabilities, making it less likely we
1061 * act on an unlikely task<->page relation.
1062 */
1063 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1064 if (!cpupid_pid_unset(last_cpupid) &&
1065 cpupid_to_nid(last_cpupid) != dst_nid)
1066 return false;
1067
1068 /* Always allow migrate on private faults */
1069 if (cpupid_match_pid(p, last_cpupid))
1070 return true;
1071
1072 /* A shared fault, but p->numa_group has not been set up yet. */
1073 if (!ng)
1074 return true;
1075
1076 /*
1077 * Do not migrate if the destination is not a node that
1078 * is actively used by this numa group.
1079 */
1080 if (!node_isset(dst_nid, ng->active_nodes))
1081 return false;
1082
1083 /*
1084 * Source is a node that is not actively used by this
1085 * numa group, while the destination is. Migrate.
1086 */
1087 if (!node_isset(src_nid, ng->active_nodes))
1088 return true;
1089
1090 /*
1091 * Both source and destination are nodes in active
1092 * use by this numa group. Maximize memory bandwidth
1093 * by migrating from more heavily used groups, to less
1094 * heavily used ones, spreading the load around.
1095 * Use a 1/4 hysteresis to avoid spurious page movement.
1096 */
1097 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1098 }
1099
1100 static unsigned long weighted_cpuload(const int cpu);
1101 static unsigned long source_load(int cpu, int type);
1102 static unsigned long target_load(int cpu, int type);
1103 static unsigned long capacity_of(int cpu);
1104 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1105
1106 /* Cached statistics for all CPUs within a node */
1107 struct numa_stats {
1108 unsigned long nr_running;
1109 unsigned long load;
1110
1111 /* Total compute capacity of CPUs on a node */
1112 unsigned long compute_capacity;
1113
1114 /* Approximate capacity in terms of runnable tasks on a node */
1115 unsigned long task_capacity;
1116 int has_free_capacity;
1117 };
1118
1119 /*
1120 * XXX borrowed from update_sg_lb_stats
1121 */
1122 static void update_numa_stats(struct numa_stats *ns, int nid)
1123 {
1124 int smt, cpu, cpus = 0;
1125 unsigned long capacity;
1126
1127 memset(ns, 0, sizeof(*ns));
1128 for_each_cpu(cpu, cpumask_of_node(nid)) {
1129 struct rq *rq = cpu_rq(cpu);
1130
1131 ns->nr_running += rq->nr_running;
1132 ns->load += weighted_cpuload(cpu);
1133 ns->compute_capacity += capacity_of(cpu);
1134
1135 cpus++;
1136 }
1137
1138 /*
1139 * If we raced with hotplug and there are no CPUs left in our mask
1140 * the @ns structure is NULL'ed and task_numa_compare() will
1141 * not find this node attractive.
1142 *
1143 * We'll either bail at !has_free_capacity, or we'll detect a huge
1144 * imbalance and bail there.
1145 */
1146 if (!cpus)
1147 return;
1148
1149 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1150 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1151 capacity = cpus / smt; /* cores */
1152
1153 ns->task_capacity = min_t(unsigned, capacity,
1154 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1155 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1156 }
1157
1158 struct task_numa_env {
1159 struct task_struct *p;
1160
1161 int src_cpu, src_nid;
1162 int dst_cpu, dst_nid;
1163
1164 struct numa_stats src_stats, dst_stats;
1165
1166 int imbalance_pct;
1167 int dist;
1168
1169 struct task_struct *best_task;
1170 long best_imp;
1171 int best_cpu;
1172 };
1173
1174 static void task_numa_assign(struct task_numa_env *env,
1175 struct task_struct *p, long imp)
1176 {
1177 if (env->best_task)
1178 put_task_struct(env->best_task);
1179 if (p)
1180 get_task_struct(p);
1181
1182 env->best_task = p;
1183 env->best_imp = imp;
1184 env->best_cpu = env->dst_cpu;
1185 }
1186
1187 static bool load_too_imbalanced(long src_load, long dst_load,
1188 struct task_numa_env *env)
1189 {
1190 long imb, old_imb;
1191 long orig_src_load, orig_dst_load;
1192 long src_capacity, dst_capacity;
1193
1194 /*
1195 * The load is corrected for the CPU capacity available on each node.
1196 *
1197 * src_load dst_load
1198 * ------------ vs ---------
1199 * src_capacity dst_capacity
1200 */
1201 src_capacity = env->src_stats.compute_capacity;
1202 dst_capacity = env->dst_stats.compute_capacity;
1203
1204 /* We care about the slope of the imbalance, not the direction. */
1205 if (dst_load < src_load)
1206 swap(dst_load, src_load);
1207
1208 /* Is the difference below the threshold? */
1209 imb = dst_load * src_capacity * 100 -
1210 src_load * dst_capacity * env->imbalance_pct;
1211 if (imb <= 0)
1212 return false;
1213
1214 /*
1215 * The imbalance is above the allowed threshold.
1216 * Compare it with the old imbalance.
1217 */
1218 orig_src_load = env->src_stats.load;
1219 orig_dst_load = env->dst_stats.load;
1220
1221 if (orig_dst_load < orig_src_load)
1222 swap(orig_dst_load, orig_src_load);
1223
1224 old_imb = orig_dst_load * src_capacity * 100 -
1225 orig_src_load * dst_capacity * env->imbalance_pct;
1226
1227 /* Would this change make things worse? */
1228 return (imb > old_imb);
1229 }
1230
1231 /*
1232 * This checks if the overall compute and NUMA accesses of the system would
1233 * be improved if the source tasks was migrated to the target dst_cpu taking
1234 * into account that it might be best if task running on the dst_cpu should
1235 * be exchanged with the source task
1236 */
1237 static void task_numa_compare(struct task_numa_env *env,
1238 long taskimp, long groupimp)
1239 {
1240 struct rq *src_rq = cpu_rq(env->src_cpu);
1241 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1242 struct task_struct *cur;
1243 long src_load, dst_load;
1244 long load;
1245 long imp = env->p->numa_group ? groupimp : taskimp;
1246 long moveimp = imp;
1247 int dist = env->dist;
1248
1249 rcu_read_lock();
1250
1251 raw_spin_lock_irq(&dst_rq->lock);
1252 cur = dst_rq->curr;
1253 /*
1254 * No need to move the exiting task, and this ensures that ->curr
1255 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1256 * is safe under RCU read lock.
1257 * Note that rcu_read_lock() itself can't protect from the final
1258 * put_task_struct() after the last schedule().
1259 */
1260 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1261 cur = NULL;
1262 raw_spin_unlock_irq(&dst_rq->lock);
1263
1264 /*
1265 * "imp" is the fault differential for the source task between the
1266 * source and destination node. Calculate the total differential for
1267 * the source task and potential destination task. The more negative
1268 * the value is, the more rmeote accesses that would be expected to
1269 * be incurred if the tasks were swapped.
1270 */
1271 if (cur) {
1272 /* Skip this swap candidate if cannot move to the source cpu */
1273 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1274 goto unlock;
1275
1276 /*
1277 * If dst and source tasks are in the same NUMA group, or not
1278 * in any group then look only at task weights.
1279 */
1280 if (cur->numa_group == env->p->numa_group) {
1281 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1282 task_weight(cur, env->dst_nid, dist);
1283 /*
1284 * Add some hysteresis to prevent swapping the
1285 * tasks within a group over tiny differences.
1286 */
1287 if (cur->numa_group)
1288 imp -= imp/16;
1289 } else {
1290 /*
1291 * Compare the group weights. If a task is all by
1292 * itself (not part of a group), use the task weight
1293 * instead.
1294 */
1295 if (cur->numa_group)
1296 imp += group_weight(cur, env->src_nid, dist) -
1297 group_weight(cur, env->dst_nid, dist);
1298 else
1299 imp += task_weight(cur, env->src_nid, dist) -
1300 task_weight(cur, env->dst_nid, dist);
1301 }
1302 }
1303
1304 if (imp <= env->best_imp && moveimp <= env->best_imp)
1305 goto unlock;
1306
1307 if (!cur) {
1308 /* Is there capacity at our destination? */
1309 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1310 !env->dst_stats.has_free_capacity)
1311 goto unlock;
1312
1313 goto balance;
1314 }
1315
1316 /* Balance doesn't matter much if we're running a task per cpu */
1317 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1318 dst_rq->nr_running == 1)
1319 goto assign;
1320
1321 /*
1322 * In the overloaded case, try and keep the load balanced.
1323 */
1324 balance:
1325 load = task_h_load(env->p);
1326 dst_load = env->dst_stats.load + load;
1327 src_load = env->src_stats.load - load;
1328
1329 if (moveimp > imp && moveimp > env->best_imp) {
1330 /*
1331 * If the improvement from just moving env->p direction is
1332 * better than swapping tasks around, check if a move is
1333 * possible. Store a slightly smaller score than moveimp,
1334 * so an actually idle CPU will win.
1335 */
1336 if (!load_too_imbalanced(src_load, dst_load, env)) {
1337 imp = moveimp - 1;
1338 cur = NULL;
1339 goto assign;
1340 }
1341 }
1342
1343 if (imp <= env->best_imp)
1344 goto unlock;
1345
1346 if (cur) {
1347 load = task_h_load(cur);
1348 dst_load -= load;
1349 src_load += load;
1350 }
1351
1352 if (load_too_imbalanced(src_load, dst_load, env))
1353 goto unlock;
1354
1355 /*
1356 * One idle CPU per node is evaluated for a task numa move.
1357 * Call select_idle_sibling to maybe find a better one.
1358 */
1359 if (!cur)
1360 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1361
1362 assign:
1363 task_numa_assign(env, cur, imp);
1364 unlock:
1365 rcu_read_unlock();
1366 }
1367
1368 static void task_numa_find_cpu(struct task_numa_env *env,
1369 long taskimp, long groupimp)
1370 {
1371 int cpu;
1372
1373 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1374 /* Skip this CPU if the source task cannot migrate */
1375 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1376 continue;
1377
1378 env->dst_cpu = cpu;
1379 task_numa_compare(env, taskimp, groupimp);
1380 }
1381 }
1382
1383 static int task_numa_migrate(struct task_struct *p)
1384 {
1385 struct task_numa_env env = {
1386 .p = p,
1387
1388 .src_cpu = task_cpu(p),
1389 .src_nid = task_node(p),
1390
1391 .imbalance_pct = 112,
1392
1393 .best_task = NULL,
1394 .best_imp = 0,
1395 .best_cpu = -1
1396 };
1397 struct sched_domain *sd;
1398 unsigned long taskweight, groupweight;
1399 int nid, ret, dist;
1400 long taskimp, groupimp;
1401
1402 /*
1403 * Pick the lowest SD_NUMA domain, as that would have the smallest
1404 * imbalance and would be the first to start moving tasks about.
1405 *
1406 * And we want to avoid any moving of tasks about, as that would create
1407 * random movement of tasks -- counter the numa conditions we're trying
1408 * to satisfy here.
1409 */
1410 rcu_read_lock();
1411 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1412 if (sd)
1413 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1414 rcu_read_unlock();
1415
1416 /*
1417 * Cpusets can break the scheduler domain tree into smaller
1418 * balance domains, some of which do not cross NUMA boundaries.
1419 * Tasks that are "trapped" in such domains cannot be migrated
1420 * elsewhere, so there is no point in (re)trying.
1421 */
1422 if (unlikely(!sd)) {
1423 p->numa_preferred_nid = task_node(p);
1424 return -EINVAL;
1425 }
1426
1427 env.dst_nid = p->numa_preferred_nid;
1428 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1429 taskweight = task_weight(p, env.src_nid, dist);
1430 groupweight = group_weight(p, env.src_nid, dist);
1431 update_numa_stats(&env.src_stats, env.src_nid);
1432 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1433 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1434 update_numa_stats(&env.dst_stats, env.dst_nid);
1435
1436 /* Try to find a spot on the preferred nid. */
1437 task_numa_find_cpu(&env, taskimp, groupimp);
1438
1439 /* No space available on the preferred nid. Look elsewhere. */
1440 if (env.best_cpu == -1) {
1441 for_each_online_node(nid) {
1442 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1443 continue;
1444
1445 dist = node_distance(env.src_nid, env.dst_nid);
1446 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1447 dist != env.dist) {
1448 taskweight = task_weight(p, env.src_nid, dist);
1449 groupweight = group_weight(p, env.src_nid, dist);
1450 }
1451
1452 /* Only consider nodes where both task and groups benefit */
1453 taskimp = task_weight(p, nid, dist) - taskweight;
1454 groupimp = group_weight(p, nid, dist) - groupweight;
1455 if (taskimp < 0 && groupimp < 0)
1456 continue;
1457
1458 env.dist = dist;
1459 env.dst_nid = nid;
1460 update_numa_stats(&env.dst_stats, env.dst_nid);
1461 task_numa_find_cpu(&env, taskimp, groupimp);
1462 }
1463 }
1464
1465 /*
1466 * If the task is part of a workload that spans multiple NUMA nodes,
1467 * and is migrating into one of the workload's active nodes, remember
1468 * this node as the task's preferred numa node, so the workload can
1469 * settle down.
1470 * A task that migrated to a second choice node will be better off
1471 * trying for a better one later. Do not set the preferred node here.
1472 */
1473 if (p->numa_group) {
1474 if (env.best_cpu == -1)
1475 nid = env.src_nid;
1476 else
1477 nid = env.dst_nid;
1478
1479 if (node_isset(nid, p->numa_group->active_nodes))
1480 sched_setnuma(p, env.dst_nid);
1481 }
1482
1483 /* No better CPU than the current one was found. */
1484 if (env.best_cpu == -1)
1485 return -EAGAIN;
1486
1487 /*
1488 * Reset the scan period if the task is being rescheduled on an
1489 * alternative node to recheck if the tasks is now properly placed.
1490 */
1491 p->numa_scan_period = task_scan_min(p);
1492
1493 if (env.best_task == NULL) {
1494 ret = migrate_task_to(p, env.best_cpu);
1495 if (ret != 0)
1496 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1497 return ret;
1498 }
1499
1500 ret = migrate_swap(p, env.best_task);
1501 if (ret != 0)
1502 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1503 put_task_struct(env.best_task);
1504 return ret;
1505 }
1506
1507 /* Attempt to migrate a task to a CPU on the preferred node. */
1508 static void numa_migrate_preferred(struct task_struct *p)
1509 {
1510 unsigned long interval = HZ;
1511
1512 /* This task has no NUMA fault statistics yet */
1513 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
1514 return;
1515
1516 /* Periodically retry migrating the task to the preferred node */
1517 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1518 p->numa_migrate_retry = jiffies + interval;
1519
1520 /* Success if task is already running on preferred CPU */
1521 if (task_node(p) == p->numa_preferred_nid)
1522 return;
1523
1524 /* Otherwise, try migrate to a CPU on the preferred node */
1525 task_numa_migrate(p);
1526 }
1527
1528 /*
1529 * Find the nodes on which the workload is actively running. We do this by
1530 * tracking the nodes from which NUMA hinting faults are triggered. This can
1531 * be different from the set of nodes where the workload's memory is currently
1532 * located.
1533 *
1534 * The bitmask is used to make smarter decisions on when to do NUMA page
1535 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1536 * are added when they cause over 6/16 of the maximum number of faults, but
1537 * only removed when they drop below 3/16.
1538 */
1539 static void update_numa_active_node_mask(struct numa_group *numa_group)
1540 {
1541 unsigned long faults, max_faults = 0;
1542 int nid;
1543
1544 for_each_online_node(nid) {
1545 faults = group_faults_cpu(numa_group, nid);
1546 if (faults > max_faults)
1547 max_faults = faults;
1548 }
1549
1550 for_each_online_node(nid) {
1551 faults = group_faults_cpu(numa_group, nid);
1552 if (!node_isset(nid, numa_group->active_nodes)) {
1553 if (faults > max_faults * 6 / 16)
1554 node_set(nid, numa_group->active_nodes);
1555 } else if (faults < max_faults * 3 / 16)
1556 node_clear(nid, numa_group->active_nodes);
1557 }
1558 }
1559
1560 /*
1561 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1562 * increments. The more local the fault statistics are, the higher the scan
1563 * period will be for the next scan window. If local/(local+remote) ratio is
1564 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1565 * the scan period will decrease. Aim for 70% local accesses.
1566 */
1567 #define NUMA_PERIOD_SLOTS 10
1568 #define NUMA_PERIOD_THRESHOLD 7
1569
1570 /*
1571 * Increase the scan period (slow down scanning) if the majority of
1572 * our memory is already on our local node, or if the majority of
1573 * the page accesses are shared with other processes.
1574 * Otherwise, decrease the scan period.
1575 */
1576 static void update_task_scan_period(struct task_struct *p,
1577 unsigned long shared, unsigned long private)
1578 {
1579 unsigned int period_slot;
1580 int ratio;
1581 int diff;
1582
1583 unsigned long remote = p->numa_faults_locality[0];
1584 unsigned long local = p->numa_faults_locality[1];
1585
1586 /*
1587 * If there were no record hinting faults then either the task is
1588 * completely idle or all activity is areas that are not of interest
1589 * to automatic numa balancing. Scan slower
1590 */
1591 if (local + shared == 0) {
1592 p->numa_scan_period = min(p->numa_scan_period_max,
1593 p->numa_scan_period << 1);
1594
1595 p->mm->numa_next_scan = jiffies +
1596 msecs_to_jiffies(p->numa_scan_period);
1597
1598 return;
1599 }
1600
1601 /*
1602 * Prepare to scale scan period relative to the current period.
1603 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1604 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1605 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1606 */
1607 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1608 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1609 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1610 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1611 if (!slot)
1612 slot = 1;
1613 diff = slot * period_slot;
1614 } else {
1615 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1616
1617 /*
1618 * Scale scan rate increases based on sharing. There is an
1619 * inverse relationship between the degree of sharing and
1620 * the adjustment made to the scanning period. Broadly
1621 * speaking the intent is that there is little point
1622 * scanning faster if shared accesses dominate as it may
1623 * simply bounce migrations uselessly
1624 */
1625 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1626 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1627 }
1628
1629 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1630 task_scan_min(p), task_scan_max(p));
1631 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1632 }
1633
1634 /*
1635 * Get the fraction of time the task has been running since the last
1636 * NUMA placement cycle. The scheduler keeps similar statistics, but
1637 * decays those on a 32ms period, which is orders of magnitude off
1638 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1639 * stats only if the task is so new there are no NUMA statistics yet.
1640 */
1641 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1642 {
1643 u64 runtime, delta, now;
1644 /* Use the start of this time slice to avoid calculations. */
1645 now = p->se.exec_start;
1646 runtime = p->se.sum_exec_runtime;
1647
1648 if (p->last_task_numa_placement) {
1649 delta = runtime - p->last_sum_exec_runtime;
1650 *period = now - p->last_task_numa_placement;
1651 } else {
1652 delta = p->se.avg.runnable_avg_sum;
1653 *period = p->se.avg.runnable_avg_period;
1654 }
1655
1656 p->last_sum_exec_runtime = runtime;
1657 p->last_task_numa_placement = now;
1658
1659 return delta;
1660 }
1661
1662 /*
1663 * Determine the preferred nid for a task in a numa_group. This needs to
1664 * be done in a way that produces consistent results with group_weight,
1665 * otherwise workloads might not converge.
1666 */
1667 static int preferred_group_nid(struct task_struct *p, int nid)
1668 {
1669 nodemask_t nodes;
1670 int dist;
1671
1672 /* Direct connections between all NUMA nodes. */
1673 if (sched_numa_topology_type == NUMA_DIRECT)
1674 return nid;
1675
1676 /*
1677 * On a system with glueless mesh NUMA topology, group_weight
1678 * scores nodes according to the number of NUMA hinting faults on
1679 * both the node itself, and on nearby nodes.
1680 */
1681 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1682 unsigned long score, max_score = 0;
1683 int node, max_node = nid;
1684
1685 dist = sched_max_numa_distance;
1686
1687 for_each_online_node(node) {
1688 score = group_weight(p, node, dist);
1689 if (score > max_score) {
1690 max_score = score;
1691 max_node = node;
1692 }
1693 }
1694 return max_node;
1695 }
1696
1697 /*
1698 * Finding the preferred nid in a system with NUMA backplane
1699 * interconnect topology is more involved. The goal is to locate
1700 * tasks from numa_groups near each other in the system, and
1701 * untangle workloads from different sides of the system. This requires
1702 * searching down the hierarchy of node groups, recursively searching
1703 * inside the highest scoring group of nodes. The nodemask tricks
1704 * keep the complexity of the search down.
1705 */
1706 nodes = node_online_map;
1707 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1708 unsigned long max_faults = 0;
1709 nodemask_t max_group;
1710 int a, b;
1711
1712 /* Are there nodes at this distance from each other? */
1713 if (!find_numa_distance(dist))
1714 continue;
1715
1716 for_each_node_mask(a, nodes) {
1717 unsigned long faults = 0;
1718 nodemask_t this_group;
1719 nodes_clear(this_group);
1720
1721 /* Sum group's NUMA faults; includes a==b case. */
1722 for_each_node_mask(b, nodes) {
1723 if (node_distance(a, b) < dist) {
1724 faults += group_faults(p, b);
1725 node_set(b, this_group);
1726 node_clear(b, nodes);
1727 }
1728 }
1729
1730 /* Remember the top group. */
1731 if (faults > max_faults) {
1732 max_faults = faults;
1733 max_group = this_group;
1734 /*
1735 * subtle: at the smallest distance there is
1736 * just one node left in each "group", the
1737 * winner is the preferred nid.
1738 */
1739 nid = a;
1740 }
1741 }
1742 /* Next round, evaluate the nodes within max_group. */
1743 nodes = max_group;
1744 }
1745 return nid;
1746 }
1747
1748 static void task_numa_placement(struct task_struct *p)
1749 {
1750 int seq, nid, max_nid = -1, max_group_nid = -1;
1751 unsigned long max_faults = 0, max_group_faults = 0;
1752 unsigned long fault_types[2] = { 0, 0 };
1753 unsigned long total_faults;
1754 u64 runtime, period;
1755 spinlock_t *group_lock = NULL;
1756
1757 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1758 if (p->numa_scan_seq == seq)
1759 return;
1760 p->numa_scan_seq = seq;
1761 p->numa_scan_period_max = task_scan_max(p);
1762
1763 total_faults = p->numa_faults_locality[0] +
1764 p->numa_faults_locality[1];
1765 runtime = numa_get_avg_runtime(p, &period);
1766
1767 /* If the task is part of a group prevent parallel updates to group stats */
1768 if (p->numa_group) {
1769 group_lock = &p->numa_group->lock;
1770 spin_lock_irq(group_lock);
1771 }
1772
1773 /* Find the node with the highest number of faults */
1774 for_each_online_node(nid) {
1775 unsigned long faults = 0, group_faults = 0;
1776 int priv, i;
1777
1778 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1779 long diff, f_diff, f_weight;
1780
1781 i = task_faults_idx(nid, priv);
1782
1783 /* Decay existing window, copy faults since last scan */
1784 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
1785 fault_types[priv] += p->numa_faults_buffer_memory[i];
1786 p->numa_faults_buffer_memory[i] = 0;
1787
1788 /*
1789 * Normalize the faults_from, so all tasks in a group
1790 * count according to CPU use, instead of by the raw
1791 * number of faults. Tasks with little runtime have
1792 * little over-all impact on throughput, and thus their
1793 * faults are less important.
1794 */
1795 f_weight = div64_u64(runtime << 16, period + 1);
1796 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1797 (total_faults + 1);
1798 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
1799 p->numa_faults_buffer_cpu[i] = 0;
1800
1801 p->numa_faults_memory[i] += diff;
1802 p->numa_faults_cpu[i] += f_diff;
1803 faults += p->numa_faults_memory[i];
1804 p->total_numa_faults += diff;
1805 if (p->numa_group) {
1806 /* safe because we can only change our own group */
1807 p->numa_group->faults[i] += diff;
1808 p->numa_group->faults_cpu[i] += f_diff;
1809 p->numa_group->total_faults += diff;
1810 group_faults += p->numa_group->faults[i];
1811 }
1812 }
1813
1814 if (faults > max_faults) {
1815 max_faults = faults;
1816 max_nid = nid;
1817 }
1818
1819 if (group_faults > max_group_faults) {
1820 max_group_faults = group_faults;
1821 max_group_nid = nid;
1822 }
1823 }
1824
1825 update_task_scan_period(p, fault_types[0], fault_types[1]);
1826
1827 if (p->numa_group) {
1828 update_numa_active_node_mask(p->numa_group);
1829 spin_unlock_irq(group_lock);
1830 max_nid = preferred_group_nid(p, max_group_nid);
1831 }
1832
1833 if (max_faults) {
1834 /* Set the new preferred node */
1835 if (max_nid != p->numa_preferred_nid)
1836 sched_setnuma(p, max_nid);
1837
1838 if (task_node(p) != p->numa_preferred_nid)
1839 numa_migrate_preferred(p);
1840 }
1841 }
1842
1843 static inline int get_numa_group(struct numa_group *grp)
1844 {
1845 return atomic_inc_not_zero(&grp->refcount);
1846 }
1847
1848 static inline void put_numa_group(struct numa_group *grp)
1849 {
1850 if (atomic_dec_and_test(&grp->refcount))
1851 kfree_rcu(grp, rcu);
1852 }
1853
1854 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1855 int *priv)
1856 {
1857 struct numa_group *grp, *my_grp;
1858 struct task_struct *tsk;
1859 bool join = false;
1860 int cpu = cpupid_to_cpu(cpupid);
1861 int i;
1862
1863 if (unlikely(!p->numa_group)) {
1864 unsigned int size = sizeof(struct numa_group) +
1865 4*nr_node_ids*sizeof(unsigned long);
1866
1867 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1868 if (!grp)
1869 return;
1870
1871 atomic_set(&grp->refcount, 1);
1872 spin_lock_init(&grp->lock);
1873 INIT_LIST_HEAD(&grp->task_list);
1874 grp->gid = p->pid;
1875 /* Second half of the array tracks nids where faults happen */
1876 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1877 nr_node_ids;
1878
1879 node_set(task_node(current), grp->active_nodes);
1880
1881 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1882 grp->faults[i] = p->numa_faults_memory[i];
1883
1884 grp->total_faults = p->total_numa_faults;
1885
1886 list_add(&p->numa_entry, &grp->task_list);
1887 grp->nr_tasks++;
1888 rcu_assign_pointer(p->numa_group, grp);
1889 }
1890
1891 rcu_read_lock();
1892 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1893
1894 if (!cpupid_match_pid(tsk, cpupid))
1895 goto no_join;
1896
1897 grp = rcu_dereference(tsk->numa_group);
1898 if (!grp)
1899 goto no_join;
1900
1901 my_grp = p->numa_group;
1902 if (grp == my_grp)
1903 goto no_join;
1904
1905 /*
1906 * Only join the other group if its bigger; if we're the bigger group,
1907 * the other task will join us.
1908 */
1909 if (my_grp->nr_tasks > grp->nr_tasks)
1910 goto no_join;
1911
1912 /*
1913 * Tie-break on the grp address.
1914 */
1915 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1916 goto no_join;
1917
1918 /* Always join threads in the same process. */
1919 if (tsk->mm == current->mm)
1920 join = true;
1921
1922 /* Simple filter to avoid false positives due to PID collisions */
1923 if (flags & TNF_SHARED)
1924 join = true;
1925
1926 /* Update priv based on whether false sharing was detected */
1927 *priv = !join;
1928
1929 if (join && !get_numa_group(grp))
1930 goto no_join;
1931
1932 rcu_read_unlock();
1933
1934 if (!join)
1935 return;
1936
1937 BUG_ON(irqs_disabled());
1938 double_lock_irq(&my_grp->lock, &grp->lock);
1939
1940 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1941 my_grp->faults[i] -= p->numa_faults_memory[i];
1942 grp->faults[i] += p->numa_faults_memory[i];
1943 }
1944 my_grp->total_faults -= p->total_numa_faults;
1945 grp->total_faults += p->total_numa_faults;
1946
1947 list_move(&p->numa_entry, &grp->task_list);
1948 my_grp->nr_tasks--;
1949 grp->nr_tasks++;
1950
1951 spin_unlock(&my_grp->lock);
1952 spin_unlock_irq(&grp->lock);
1953
1954 rcu_assign_pointer(p->numa_group, grp);
1955
1956 put_numa_group(my_grp);
1957 return;
1958
1959 no_join:
1960 rcu_read_unlock();
1961 return;
1962 }
1963
1964 void task_numa_free(struct task_struct *p)
1965 {
1966 struct numa_group *grp = p->numa_group;
1967 void *numa_faults = p->numa_faults_memory;
1968 unsigned long flags;
1969 int i;
1970
1971 if (grp) {
1972 spin_lock_irqsave(&grp->lock, flags);
1973 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1974 grp->faults[i] -= p->numa_faults_memory[i];
1975 grp->total_faults -= p->total_numa_faults;
1976
1977 list_del(&p->numa_entry);
1978 grp->nr_tasks--;
1979 spin_unlock_irqrestore(&grp->lock, flags);
1980 RCU_INIT_POINTER(p->numa_group, NULL);
1981 put_numa_group(grp);
1982 }
1983
1984 p->numa_faults_memory = NULL;
1985 p->numa_faults_buffer_memory = NULL;
1986 p->numa_faults_cpu= NULL;
1987 p->numa_faults_buffer_cpu = NULL;
1988 kfree(numa_faults);
1989 }
1990
1991 /*
1992 * Got a PROT_NONE fault for a page on @node.
1993 */
1994 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
1995 {
1996 struct task_struct *p = current;
1997 bool migrated = flags & TNF_MIGRATED;
1998 int cpu_node = task_node(current);
1999 int local = !!(flags & TNF_FAULT_LOCAL);
2000 int priv;
2001
2002 if (!numabalancing_enabled)
2003 return;
2004
2005 /* for example, ksmd faulting in a user's mm */
2006 if (!p->mm)
2007 return;
2008
2009 /* Allocate buffer to track faults on a per-node basis */
2010 if (unlikely(!p->numa_faults_memory)) {
2011 int size = sizeof(*p->numa_faults_memory) *
2012 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2013
2014 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2015 if (!p->numa_faults_memory)
2016 return;
2017
2018 BUG_ON(p->numa_faults_buffer_memory);
2019 /*
2020 * The averaged statistics, shared & private, memory & cpu,
2021 * occupy the first half of the array. The second half of the
2022 * array is for current counters, which are averaged into the
2023 * first set by task_numa_placement.
2024 */
2025 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
2026 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
2027 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
2028 p->total_numa_faults = 0;
2029 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2030 }
2031
2032 /*
2033 * First accesses are treated as private, otherwise consider accesses
2034 * to be private if the accessing pid has not changed
2035 */
2036 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2037 priv = 1;
2038 } else {
2039 priv = cpupid_match_pid(p, last_cpupid);
2040 if (!priv && !(flags & TNF_NO_GROUP))
2041 task_numa_group(p, last_cpupid, flags, &priv);
2042 }
2043
2044 /*
2045 * If a workload spans multiple NUMA nodes, a shared fault that
2046 * occurs wholly within the set of nodes that the workload is
2047 * actively using should be counted as local. This allows the
2048 * scan rate to slow down when a workload has settled down.
2049 */
2050 if (!priv && !local && p->numa_group &&
2051 node_isset(cpu_node, p->numa_group->active_nodes) &&
2052 node_isset(mem_node, p->numa_group->active_nodes))
2053 local = 1;
2054
2055 task_numa_placement(p);
2056
2057 /*
2058 * Retry task to preferred node migration periodically, in case it
2059 * case it previously failed, or the scheduler moved us.
2060 */
2061 if (time_after(jiffies, p->numa_migrate_retry))
2062 numa_migrate_preferred(p);
2063
2064 if (migrated)
2065 p->numa_pages_migrated += pages;
2066
2067 p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
2068 p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
2069 p->numa_faults_locality[local] += pages;
2070 }
2071
2072 static void reset_ptenuma_scan(struct task_struct *p)
2073 {
2074 ACCESS_ONCE(p->mm->numa_scan_seq)++;
2075 p->mm->numa_scan_offset = 0;
2076 }
2077
2078 /*
2079 * The expensive part of numa migration is done from task_work context.
2080 * Triggered from task_tick_numa().
2081 */
2082 void task_numa_work(struct callback_head *work)
2083 {
2084 unsigned long migrate, next_scan, now = jiffies;
2085 struct task_struct *p = current;
2086 struct mm_struct *mm = p->mm;
2087 struct vm_area_struct *vma;
2088 unsigned long start, end;
2089 unsigned long nr_pte_updates = 0;
2090 long pages;
2091
2092 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2093
2094 work->next = work; /* protect against double add */
2095 /*
2096 * Who cares about NUMA placement when they're dying.
2097 *
2098 * NOTE: make sure not to dereference p->mm before this check,
2099 * exit_task_work() happens _after_ exit_mm() so we could be called
2100 * without p->mm even though we still had it when we enqueued this
2101 * work.
2102 */
2103 if (p->flags & PF_EXITING)
2104 return;
2105
2106 if (!mm->numa_next_scan) {
2107 mm->numa_next_scan = now +
2108 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2109 }
2110
2111 /*
2112 * Enforce maximal scan/migration frequency..
2113 */
2114 migrate = mm->numa_next_scan;
2115 if (time_before(now, migrate))
2116 return;
2117
2118 if (p->numa_scan_period == 0) {
2119 p->numa_scan_period_max = task_scan_max(p);
2120 p->numa_scan_period = task_scan_min(p);
2121 }
2122
2123 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2124 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2125 return;
2126
2127 /*
2128 * Delay this task enough that another task of this mm will likely win
2129 * the next time around.
2130 */
2131 p->node_stamp += 2 * TICK_NSEC;
2132
2133 start = mm->numa_scan_offset;
2134 pages = sysctl_numa_balancing_scan_size;
2135 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2136 if (!pages)
2137 return;
2138
2139 down_read(&mm->mmap_sem);
2140 vma = find_vma(mm, start);
2141 if (!vma) {
2142 reset_ptenuma_scan(p);
2143 start = 0;
2144 vma = mm->mmap;
2145 }
2146 for (; vma; vma = vma->vm_next) {
2147 if (!vma_migratable(vma) || !vma_policy_mof(vma))
2148 continue;
2149
2150 /*
2151 * Shared library pages mapped by multiple processes are not
2152 * migrated as it is expected they are cache replicated. Avoid
2153 * hinting faults in read-only file-backed mappings or the vdso
2154 * as migrating the pages will be of marginal benefit.
2155 */
2156 if (!vma->vm_mm ||
2157 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2158 continue;
2159
2160 /*
2161 * Skip inaccessible VMAs to avoid any confusion between
2162 * PROT_NONE and NUMA hinting ptes
2163 */
2164 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2165 continue;
2166
2167 do {
2168 start = max(start, vma->vm_start);
2169 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2170 end = min(end, vma->vm_end);
2171 nr_pte_updates += change_prot_numa(vma, start, end);
2172
2173 /*
2174 * Scan sysctl_numa_balancing_scan_size but ensure that
2175 * at least one PTE is updated so that unused virtual
2176 * address space is quickly skipped.
2177 */
2178 if (nr_pte_updates)
2179 pages -= (end - start) >> PAGE_SHIFT;
2180
2181 start = end;
2182 if (pages <= 0)
2183 goto out;
2184
2185 cond_resched();
2186 } while (end != vma->vm_end);
2187 }
2188
2189 out:
2190 /*
2191 * It is possible to reach the end of the VMA list but the last few
2192 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2193 * would find the !migratable VMA on the next scan but not reset the
2194 * scanner to the start so check it now.
2195 */
2196 if (vma)
2197 mm->numa_scan_offset = start;
2198 else
2199 reset_ptenuma_scan(p);
2200 up_read(&mm->mmap_sem);
2201 }
2202
2203 /*
2204 * Drive the periodic memory faults..
2205 */
2206 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2207 {
2208 struct callback_head *work = &curr->numa_work;
2209 u64 period, now;
2210
2211 /*
2212 * We don't care about NUMA placement if we don't have memory.
2213 */
2214 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2215 return;
2216
2217 /*
2218 * Using runtime rather than walltime has the dual advantage that
2219 * we (mostly) drive the selection from busy threads and that the
2220 * task needs to have done some actual work before we bother with
2221 * NUMA placement.
2222 */
2223 now = curr->se.sum_exec_runtime;
2224 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2225
2226 if (now - curr->node_stamp > period) {
2227 if (!curr->node_stamp)
2228 curr->numa_scan_period = task_scan_min(curr);
2229 curr->node_stamp += period;
2230
2231 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2232 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2233 task_work_add(curr, work, true);
2234 }
2235 }
2236 }
2237 #else
2238 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2239 {
2240 }
2241
2242 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2243 {
2244 }
2245
2246 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2247 {
2248 }
2249 #endif /* CONFIG_NUMA_BALANCING */
2250
2251 static void
2252 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2253 {
2254 update_load_add(&cfs_rq->load, se->load.weight);
2255 if (!parent_entity(se))
2256 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2257 #ifdef CONFIG_SMP
2258 if (entity_is_task(se)) {
2259 struct rq *rq = rq_of(cfs_rq);
2260
2261 account_numa_enqueue(rq, task_of(se));
2262 list_add(&se->group_node, &rq->cfs_tasks);
2263 }
2264 #endif
2265 cfs_rq->nr_running++;
2266 }
2267
2268 static void
2269 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2270 {
2271 update_load_sub(&cfs_rq->load, se->load.weight);
2272 if (!parent_entity(se))
2273 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2274 if (entity_is_task(se)) {
2275 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2276 list_del_init(&se->group_node);
2277 }
2278 cfs_rq->nr_running--;
2279 }
2280
2281 #ifdef CONFIG_FAIR_GROUP_SCHED
2282 # ifdef CONFIG_SMP
2283 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2284 {
2285 long tg_weight;
2286
2287 /*
2288 * Use this CPU's actual weight instead of the last load_contribution
2289 * to gain a more accurate current total weight. See
2290 * update_cfs_rq_load_contribution().
2291 */
2292 tg_weight = atomic_long_read(&tg->load_avg);
2293 tg_weight -= cfs_rq->tg_load_contrib;
2294 tg_weight += cfs_rq->load.weight;
2295
2296 return tg_weight;
2297 }
2298
2299 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2300 {
2301 long tg_weight, load, shares;
2302
2303 tg_weight = calc_tg_weight(tg, cfs_rq);
2304 load = cfs_rq->load.weight;
2305
2306 shares = (tg->shares * load);
2307 if (tg_weight)
2308 shares /= tg_weight;
2309
2310 if (shares < MIN_SHARES)
2311 shares = MIN_SHARES;
2312 if (shares > tg->shares)
2313 shares = tg->shares;
2314
2315 return shares;
2316 }
2317 # else /* CONFIG_SMP */
2318 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2319 {
2320 return tg->shares;
2321 }
2322 # endif /* CONFIG_SMP */
2323 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2324 unsigned long weight)
2325 {
2326 if (se->on_rq) {
2327 /* commit outstanding execution time */
2328 if (cfs_rq->curr == se)
2329 update_curr(cfs_rq);
2330 account_entity_dequeue(cfs_rq, se);
2331 }
2332
2333 update_load_set(&se->load, weight);
2334
2335 if (se->on_rq)
2336 account_entity_enqueue(cfs_rq, se);
2337 }
2338
2339 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2340
2341 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2342 {
2343 struct task_group *tg;
2344 struct sched_entity *se;
2345 long shares;
2346
2347 tg = cfs_rq->tg;
2348 se = tg->se[cpu_of(rq_of(cfs_rq))];
2349 if (!se || throttled_hierarchy(cfs_rq))
2350 return;
2351 #ifndef CONFIG_SMP
2352 if (likely(se->load.weight == tg->shares))
2353 return;
2354 #endif
2355 shares = calc_cfs_shares(cfs_rq, tg);
2356
2357 reweight_entity(cfs_rq_of(se), se, shares);
2358 }
2359 #else /* CONFIG_FAIR_GROUP_SCHED */
2360 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2361 {
2362 }
2363 #endif /* CONFIG_FAIR_GROUP_SCHED */
2364
2365 #ifdef CONFIG_SMP
2366 /*
2367 * We choose a half-life close to 1 scheduling period.
2368 * Note: The tables below are dependent on this value.
2369 */
2370 #define LOAD_AVG_PERIOD 32
2371 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2372 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2373
2374 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2375 static const u32 runnable_avg_yN_inv[] = {
2376 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2377 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2378 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2379 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2380 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2381 0x85aac367, 0x82cd8698,
2382 };
2383
2384 /*
2385 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2386 * over-estimates when re-combining.
2387 */
2388 static const u32 runnable_avg_yN_sum[] = {
2389 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2390 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2391 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2392 };
2393
2394 /*
2395 * Approximate:
2396 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2397 */
2398 static __always_inline u64 decay_load(u64 val, u64 n)
2399 {
2400 unsigned int local_n;
2401
2402 if (!n)
2403 return val;
2404 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2405 return 0;
2406
2407 /* after bounds checking we can collapse to 32-bit */
2408 local_n = n;
2409
2410 /*
2411 * As y^PERIOD = 1/2, we can combine
2412 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2413 * With a look-up table which covers y^n (n<PERIOD)
2414 *
2415 * To achieve constant time decay_load.
2416 */
2417 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2418 val >>= local_n / LOAD_AVG_PERIOD;
2419 local_n %= LOAD_AVG_PERIOD;
2420 }
2421
2422 val *= runnable_avg_yN_inv[local_n];
2423 /* We don't use SRR here since we always want to round down. */
2424 return val >> 32;
2425 }
2426
2427 /*
2428 * For updates fully spanning n periods, the contribution to runnable
2429 * average will be: \Sum 1024*y^n
2430 *
2431 * We can compute this reasonably efficiently by combining:
2432 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2433 */
2434 static u32 __compute_runnable_contrib(u64 n)
2435 {
2436 u32 contrib = 0;
2437
2438 if (likely(n <= LOAD_AVG_PERIOD))
2439 return runnable_avg_yN_sum[n];
2440 else if (unlikely(n >= LOAD_AVG_MAX_N))
2441 return LOAD_AVG_MAX;
2442
2443 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2444 do {
2445 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2446 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2447
2448 n -= LOAD_AVG_PERIOD;
2449 } while (n > LOAD_AVG_PERIOD);
2450
2451 contrib = decay_load(contrib, n);
2452 return contrib + runnable_avg_yN_sum[n];
2453 }
2454
2455 /*
2456 * We can represent the historical contribution to runnable average as the
2457 * coefficients of a geometric series. To do this we sub-divide our runnable
2458 * history into segments of approximately 1ms (1024us); label the segment that
2459 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2460 *
2461 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2462 * p0 p1 p2
2463 * (now) (~1ms ago) (~2ms ago)
2464 *
2465 * Let u_i denote the fraction of p_i that the entity was runnable.
2466 *
2467 * We then designate the fractions u_i as our co-efficients, yielding the
2468 * following representation of historical load:
2469 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2470 *
2471 * We choose y based on the with of a reasonably scheduling period, fixing:
2472 * y^32 = 0.5
2473 *
2474 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2475 * approximately half as much as the contribution to load within the last ms
2476 * (u_0).
2477 *
2478 * When a period "rolls over" and we have new u_0`, multiplying the previous
2479 * sum again by y is sufficient to update:
2480 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2481 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2482 */
2483 static __always_inline int __update_entity_runnable_avg(u64 now,
2484 struct sched_avg *sa,
2485 int runnable)
2486 {
2487 u64 delta, periods;
2488 u32 runnable_contrib;
2489 int delta_w, decayed = 0;
2490
2491 delta = now - sa->last_runnable_update;
2492 /*
2493 * This should only happen when time goes backwards, which it
2494 * unfortunately does during sched clock init when we swap over to TSC.
2495 */
2496 if ((s64)delta < 0) {
2497 sa->last_runnable_update = now;
2498 return 0;
2499 }
2500
2501 /*
2502 * Use 1024ns as the unit of measurement since it's a reasonable
2503 * approximation of 1us and fast to compute.
2504 */
2505 delta >>= 10;
2506 if (!delta)
2507 return 0;
2508 sa->last_runnable_update = now;
2509
2510 /* delta_w is the amount already accumulated against our next period */
2511 delta_w = sa->runnable_avg_period % 1024;
2512 if (delta + delta_w >= 1024) {
2513 /* period roll-over */
2514 decayed = 1;
2515
2516 /*
2517 * Now that we know we're crossing a period boundary, figure
2518 * out how much from delta we need to complete the current
2519 * period and accrue it.
2520 */
2521 delta_w = 1024 - delta_w;
2522 if (runnable)
2523 sa->runnable_avg_sum += delta_w;
2524 sa->runnable_avg_period += delta_w;
2525
2526 delta -= delta_w;
2527
2528 /* Figure out how many additional periods this update spans */
2529 periods = delta / 1024;
2530 delta %= 1024;
2531
2532 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2533 periods + 1);
2534 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2535 periods + 1);
2536
2537 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2538 runnable_contrib = __compute_runnable_contrib(periods);
2539 if (runnable)
2540 sa->runnable_avg_sum += runnable_contrib;
2541 sa->runnable_avg_period += runnable_contrib;
2542 }
2543
2544 /* Remainder of delta accrued against u_0` */
2545 if (runnable)
2546 sa->runnable_avg_sum += delta;
2547 sa->runnable_avg_period += delta;
2548
2549 return decayed;
2550 }
2551
2552 /* Synchronize an entity's decay with its parenting cfs_rq.*/
2553 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2554 {
2555 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2556 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2557
2558 decays -= se->avg.decay_count;
2559 if (!decays)
2560 return 0;
2561
2562 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2563 se->avg.decay_count = 0;
2564
2565 return decays;
2566 }
2567
2568 #ifdef CONFIG_FAIR_GROUP_SCHED
2569 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2570 int force_update)
2571 {
2572 struct task_group *tg = cfs_rq->tg;
2573 long tg_contrib;
2574
2575 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2576 tg_contrib -= cfs_rq->tg_load_contrib;
2577
2578 if (!tg_contrib)
2579 return;
2580
2581 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2582 atomic_long_add(tg_contrib, &tg->load_avg);
2583 cfs_rq->tg_load_contrib += tg_contrib;
2584 }
2585 }
2586
2587 /*
2588 * Aggregate cfs_rq runnable averages into an equivalent task_group
2589 * representation for computing load contributions.
2590 */
2591 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2592 struct cfs_rq *cfs_rq)
2593 {
2594 struct task_group *tg = cfs_rq->tg;
2595 long contrib;
2596
2597 /* The fraction of a cpu used by this cfs_rq */
2598 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2599 sa->runnable_avg_period + 1);
2600 contrib -= cfs_rq->tg_runnable_contrib;
2601
2602 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2603 atomic_add(contrib, &tg->runnable_avg);
2604 cfs_rq->tg_runnable_contrib += contrib;
2605 }
2606 }
2607
2608 static inline void __update_group_entity_contrib(struct sched_entity *se)
2609 {
2610 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2611 struct task_group *tg = cfs_rq->tg;
2612 int runnable_avg;
2613
2614 u64 contrib;
2615
2616 contrib = cfs_rq->tg_load_contrib * tg->shares;
2617 se->avg.load_avg_contrib = div_u64(contrib,
2618 atomic_long_read(&tg->load_avg) + 1);
2619
2620 /*
2621 * For group entities we need to compute a correction term in the case
2622 * that they are consuming <1 cpu so that we would contribute the same
2623 * load as a task of equal weight.
2624 *
2625 * Explicitly co-ordinating this measurement would be expensive, but
2626 * fortunately the sum of each cpus contribution forms a usable
2627 * lower-bound on the true value.
2628 *
2629 * Consider the aggregate of 2 contributions. Either they are disjoint
2630 * (and the sum represents true value) or they are disjoint and we are
2631 * understating by the aggregate of their overlap.
2632 *
2633 * Extending this to N cpus, for a given overlap, the maximum amount we
2634 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2635 * cpus that overlap for this interval and w_i is the interval width.
2636 *
2637 * On a small machine; the first term is well-bounded which bounds the
2638 * total error since w_i is a subset of the period. Whereas on a
2639 * larger machine, while this first term can be larger, if w_i is the
2640 * of consequential size guaranteed to see n_i*w_i quickly converge to
2641 * our upper bound of 1-cpu.
2642 */
2643 runnable_avg = atomic_read(&tg->runnable_avg);
2644 if (runnable_avg < NICE_0_LOAD) {
2645 se->avg.load_avg_contrib *= runnable_avg;
2646 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2647 }
2648 }
2649
2650 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2651 {
2652 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2653 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2654 }
2655 #else /* CONFIG_FAIR_GROUP_SCHED */
2656 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2657 int force_update) {}
2658 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2659 struct cfs_rq *cfs_rq) {}
2660 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2661 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2662 #endif /* CONFIG_FAIR_GROUP_SCHED */
2663
2664 static inline void __update_task_entity_contrib(struct sched_entity *se)
2665 {
2666 u32 contrib;
2667
2668 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2669 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2670 contrib /= (se->avg.runnable_avg_period + 1);
2671 se->avg.load_avg_contrib = scale_load(contrib);
2672 }
2673
2674 /* Compute the current contribution to load_avg by se, return any delta */
2675 static long __update_entity_load_avg_contrib(struct sched_entity *se)
2676 {
2677 long old_contrib = se->avg.load_avg_contrib;
2678
2679 if (entity_is_task(se)) {
2680 __update_task_entity_contrib(se);
2681 } else {
2682 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2683 __update_group_entity_contrib(se);
2684 }
2685
2686 return se->avg.load_avg_contrib - old_contrib;
2687 }
2688
2689 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2690 long load_contrib)
2691 {
2692 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2693 cfs_rq->blocked_load_avg -= load_contrib;
2694 else
2695 cfs_rq->blocked_load_avg = 0;
2696 }
2697
2698 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2699
2700 /* Update a sched_entity's runnable average */
2701 static inline void update_entity_load_avg(struct sched_entity *se,
2702 int update_cfs_rq)
2703 {
2704 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2705 long contrib_delta;
2706 u64 now;
2707
2708 /*
2709 * For a group entity we need to use their owned cfs_rq_clock_task() in
2710 * case they are the parent of a throttled hierarchy.
2711 */
2712 if (entity_is_task(se))
2713 now = cfs_rq_clock_task(cfs_rq);
2714 else
2715 now = cfs_rq_clock_task(group_cfs_rq(se));
2716
2717 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2718 return;
2719
2720 contrib_delta = __update_entity_load_avg_contrib(se);
2721
2722 if (!update_cfs_rq)
2723 return;
2724
2725 if (se->on_rq)
2726 cfs_rq->runnable_load_avg += contrib_delta;
2727 else
2728 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2729 }
2730
2731 /*
2732 * Decay the load contributed by all blocked children and account this so that
2733 * their contribution may appropriately discounted when they wake up.
2734 */
2735 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2736 {
2737 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2738 u64 decays;
2739
2740 decays = now - cfs_rq->last_decay;
2741 if (!decays && !force_update)
2742 return;
2743
2744 if (atomic_long_read(&cfs_rq->removed_load)) {
2745 unsigned long removed_load;
2746 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2747 subtract_blocked_load_contrib(cfs_rq, removed_load);
2748 }
2749
2750 if (decays) {
2751 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2752 decays);
2753 atomic64_add(decays, &cfs_rq->decay_counter);
2754 cfs_rq->last_decay = now;
2755 }
2756
2757 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2758 }
2759
2760 /* Add the load generated by se into cfs_rq's child load-average */
2761 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2762 struct sched_entity *se,
2763 int wakeup)
2764 {
2765 /*
2766 * We track migrations using entity decay_count <= 0, on a wake-up
2767 * migration we use a negative decay count to track the remote decays
2768 * accumulated while sleeping.
2769 *
2770 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2771 * are seen by enqueue_entity_load_avg() as a migration with an already
2772 * constructed load_avg_contrib.
2773 */
2774 if (unlikely(se->avg.decay_count <= 0)) {
2775 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2776 if (se->avg.decay_count) {
2777 /*
2778 * In a wake-up migration we have to approximate the
2779 * time sleeping. This is because we can't synchronize
2780 * clock_task between the two cpus, and it is not
2781 * guaranteed to be read-safe. Instead, we can
2782 * approximate this using our carried decays, which are
2783 * explicitly atomically readable.
2784 */
2785 se->avg.last_runnable_update -= (-se->avg.decay_count)
2786 << 20;
2787 update_entity_load_avg(se, 0);
2788 /* Indicate that we're now synchronized and on-rq */
2789 se->avg.decay_count = 0;
2790 }
2791 wakeup = 0;
2792 } else {
2793 __synchronize_entity_decay(se);
2794 }
2795
2796 /* migrated tasks did not contribute to our blocked load */
2797 if (wakeup) {
2798 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2799 update_entity_load_avg(se, 0);
2800 }
2801
2802 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2803 /* we force update consideration on load-balancer moves */
2804 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2805 }
2806
2807 /*
2808 * Remove se's load from this cfs_rq child load-average, if the entity is
2809 * transitioning to a blocked state we track its projected decay using
2810 * blocked_load_avg.
2811 */
2812 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2813 struct sched_entity *se,
2814 int sleep)
2815 {
2816 update_entity_load_avg(se, 1);
2817 /* we force update consideration on load-balancer moves */
2818 update_cfs_rq_blocked_load(cfs_rq, !sleep);
2819
2820 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2821 if (sleep) {
2822 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2823 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2824 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2825 }
2826
2827 /*
2828 * Update the rq's load with the elapsed running time before entering
2829 * idle. if the last scheduled task is not a CFS task, idle_enter will
2830 * be the only way to update the runnable statistic.
2831 */
2832 void idle_enter_fair(struct rq *this_rq)
2833 {
2834 update_rq_runnable_avg(this_rq, 1);
2835 }
2836
2837 /*
2838 * Update the rq's load with the elapsed idle time before a task is
2839 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2840 * be the only way to update the runnable statistic.
2841 */
2842 void idle_exit_fair(struct rq *this_rq)
2843 {
2844 update_rq_runnable_avg(this_rq, 0);
2845 }
2846
2847 static int idle_balance(struct rq *this_rq);
2848
2849 #else /* CONFIG_SMP */
2850
2851 static inline void update_entity_load_avg(struct sched_entity *se,
2852 int update_cfs_rq) {}
2853 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2854 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2855 struct sched_entity *se,
2856 int wakeup) {}
2857 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2858 struct sched_entity *se,
2859 int sleep) {}
2860 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2861 int force_update) {}
2862
2863 static inline int idle_balance(struct rq *rq)
2864 {
2865 return 0;
2866 }
2867
2868 #endif /* CONFIG_SMP */
2869
2870 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2871 {
2872 #ifdef CONFIG_SCHEDSTATS
2873 struct task_struct *tsk = NULL;
2874
2875 if (entity_is_task(se))
2876 tsk = task_of(se);
2877
2878 if (se->statistics.sleep_start) {
2879 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2880
2881 if ((s64)delta < 0)
2882 delta = 0;
2883
2884 if (unlikely(delta > se->statistics.sleep_max))
2885 se->statistics.sleep_max = delta;
2886
2887 se->statistics.sleep_start = 0;
2888 se->statistics.sum_sleep_runtime += delta;
2889
2890 if (tsk) {
2891 account_scheduler_latency(tsk, delta >> 10, 1);
2892 trace_sched_stat_sleep(tsk, delta);
2893 }
2894 }
2895 if (se->statistics.block_start) {
2896 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2897
2898 if ((s64)delta < 0)
2899 delta = 0;
2900
2901 if (unlikely(delta > se->statistics.block_max))
2902 se->statistics.block_max = delta;
2903
2904 se->statistics.block_start = 0;
2905 se->statistics.sum_sleep_runtime += delta;
2906
2907 if (tsk) {
2908 if (tsk->in_iowait) {
2909 se->statistics.iowait_sum += delta;
2910 se->statistics.iowait_count++;
2911 trace_sched_stat_iowait(tsk, delta);
2912 }
2913
2914 trace_sched_stat_blocked(tsk, delta);
2915
2916 /*
2917 * Blocking time is in units of nanosecs, so shift by
2918 * 20 to get a milliseconds-range estimation of the
2919 * amount of time that the task spent sleeping:
2920 */
2921 if (unlikely(prof_on == SLEEP_PROFILING)) {
2922 profile_hits(SLEEP_PROFILING,
2923 (void *)get_wchan(tsk),
2924 delta >> 20);
2925 }
2926 account_scheduler_latency(tsk, delta >> 10, 0);
2927 }
2928 }
2929 #endif
2930 }
2931
2932 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2933 {
2934 #ifdef CONFIG_SCHED_DEBUG
2935 s64 d = se->vruntime - cfs_rq->min_vruntime;
2936
2937 if (d < 0)
2938 d = -d;
2939
2940 if (d > 3*sysctl_sched_latency)
2941 schedstat_inc(cfs_rq, nr_spread_over);
2942 #endif
2943 }
2944
2945 static void
2946 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2947 {
2948 u64 vruntime = cfs_rq->min_vruntime;
2949
2950 /*
2951 * The 'current' period is already promised to the current tasks,
2952 * however the extra weight of the new task will slow them down a
2953 * little, place the new task so that it fits in the slot that
2954 * stays open at the end.
2955 */
2956 if (initial && sched_feat(START_DEBIT))
2957 vruntime += sched_vslice(cfs_rq, se);
2958
2959 /* sleeps up to a single latency don't count. */
2960 if (!initial) {
2961 unsigned long thresh = sysctl_sched_latency;
2962
2963 /*
2964 * Halve their sleep time's effect, to allow
2965 * for a gentler effect of sleepers:
2966 */
2967 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2968 thresh >>= 1;
2969
2970 vruntime -= thresh;
2971 }
2972
2973 /* ensure we never gain time by being placed backwards. */
2974 se->vruntime = max_vruntime(se->vruntime, vruntime);
2975 }
2976
2977 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2978
2979 static void
2980 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2981 {
2982 /*
2983 * Update the normalized vruntime before updating min_vruntime
2984 * through calling update_curr().
2985 */
2986 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
2987 se->vruntime += cfs_rq->min_vruntime;
2988
2989 /*
2990 * Update run-time statistics of the 'current'.
2991 */
2992 update_curr(cfs_rq);
2993 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
2994 account_entity_enqueue(cfs_rq, se);
2995 update_cfs_shares(cfs_rq);
2996
2997 if (flags & ENQUEUE_WAKEUP) {
2998 place_entity(cfs_rq, se, 0);
2999 enqueue_sleeper(cfs_rq, se);
3000 }
3001
3002 update_stats_enqueue(cfs_rq, se);
3003 check_spread(cfs_rq, se);
3004 if (se != cfs_rq->curr)
3005 __enqueue_entity(cfs_rq, se);
3006 se->on_rq = 1;
3007
3008 if (cfs_rq->nr_running == 1) {
3009 list_add_leaf_cfs_rq(cfs_rq);
3010 check_enqueue_throttle(cfs_rq);
3011 }
3012 }
3013
3014 static void __clear_buddies_last(struct sched_entity *se)
3015 {
3016 for_each_sched_entity(se) {
3017 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3018 if (cfs_rq->last != se)
3019 break;
3020
3021 cfs_rq->last = NULL;
3022 }
3023 }
3024
3025 static void __clear_buddies_next(struct sched_entity *se)
3026 {
3027 for_each_sched_entity(se) {
3028 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3029 if (cfs_rq->next != se)
3030 break;
3031
3032 cfs_rq->next = NULL;
3033 }
3034 }
3035
3036 static void __clear_buddies_skip(struct sched_entity *se)
3037 {
3038 for_each_sched_entity(se) {
3039 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3040 if (cfs_rq->skip != se)
3041 break;
3042
3043 cfs_rq->skip = NULL;
3044 }
3045 }
3046
3047 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3048 {
3049 if (cfs_rq->last == se)
3050 __clear_buddies_last(se);
3051
3052 if (cfs_rq->next == se)
3053 __clear_buddies_next(se);
3054
3055 if (cfs_rq->skip == se)
3056 __clear_buddies_skip(se);
3057 }
3058
3059 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3060
3061 static void
3062 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3063 {
3064 /*
3065 * Update run-time statistics of the 'current'.
3066 */
3067 update_curr(cfs_rq);
3068 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
3069
3070 update_stats_dequeue(cfs_rq, se);
3071 if (flags & DEQUEUE_SLEEP) {
3072 #ifdef CONFIG_SCHEDSTATS
3073 if (entity_is_task(se)) {
3074 struct task_struct *tsk = task_of(se);
3075
3076 if (tsk->state & TASK_INTERRUPTIBLE)
3077 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
3078 if (tsk->state & TASK_UNINTERRUPTIBLE)
3079 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
3080 }
3081 #endif
3082 }
3083
3084 clear_buddies(cfs_rq, se);
3085
3086 if (se != cfs_rq->curr)
3087 __dequeue_entity(cfs_rq, se);
3088 se->on_rq = 0;
3089 account_entity_dequeue(cfs_rq, se);
3090
3091 /*
3092 * Normalize the entity after updating the min_vruntime because the
3093 * update can refer to the ->curr item and we need to reflect this
3094 * movement in our normalized position.
3095 */
3096 if (!(flags & DEQUEUE_SLEEP))
3097 se->vruntime -= cfs_rq->min_vruntime;
3098
3099 /* return excess runtime on last dequeue */
3100 return_cfs_rq_runtime(cfs_rq);
3101
3102 update_min_vruntime(cfs_rq);
3103 update_cfs_shares(cfs_rq);
3104 }
3105
3106 /*
3107 * Preempt the current task with a newly woken task if needed:
3108 */
3109 static void
3110 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3111 {
3112 unsigned long ideal_runtime, delta_exec;
3113 struct sched_entity *se;
3114 s64 delta;
3115
3116 ideal_runtime = sched_slice(cfs_rq, curr);
3117 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3118 if (delta_exec > ideal_runtime) {
3119 resched_curr(rq_of(cfs_rq));
3120 /*
3121 * The current task ran long enough, ensure it doesn't get
3122 * re-elected due to buddy favours.
3123 */
3124 clear_buddies(cfs_rq, curr);
3125 return;
3126 }
3127
3128 /*
3129 * Ensure that a task that missed wakeup preemption by a
3130 * narrow margin doesn't have to wait for a full slice.
3131 * This also mitigates buddy induced latencies under load.
3132 */
3133 if (delta_exec < sysctl_sched_min_granularity)
3134 return;
3135
3136 se = __pick_first_entity(cfs_rq);
3137 delta = curr->vruntime - se->vruntime;
3138
3139 if (delta < 0)
3140 return;
3141
3142 if (delta > ideal_runtime)
3143 resched_curr(rq_of(cfs_rq));
3144 }
3145
3146 static void
3147 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3148 {
3149 /* 'current' is not kept within the tree. */
3150 if (se->on_rq) {
3151 /*
3152 * Any task has to be enqueued before it get to execute on
3153 * a CPU. So account for the time it spent waiting on the
3154 * runqueue.
3155 */
3156 update_stats_wait_end(cfs_rq, se);
3157 __dequeue_entity(cfs_rq, se);
3158 }
3159
3160 update_stats_curr_start(cfs_rq, se);
3161 cfs_rq->curr = se;
3162 #ifdef CONFIG_SCHEDSTATS
3163 /*
3164 * Track our maximum slice length, if the CPU's load is at
3165 * least twice that of our own weight (i.e. dont track it
3166 * when there are only lesser-weight tasks around):
3167 */
3168 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3169 se->statistics.slice_max = max(se->statistics.slice_max,
3170 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3171 }
3172 #endif
3173 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3174 }
3175
3176 static int
3177 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3178
3179 /*
3180 * Pick the next process, keeping these things in mind, in this order:
3181 * 1) keep things fair between processes/task groups
3182 * 2) pick the "next" process, since someone really wants that to run
3183 * 3) pick the "last" process, for cache locality
3184 * 4) do not run the "skip" process, if something else is available
3185 */
3186 static struct sched_entity *
3187 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3188 {
3189 struct sched_entity *left = __pick_first_entity(cfs_rq);
3190 struct sched_entity *se;
3191
3192 /*
3193 * If curr is set we have to see if its left of the leftmost entity
3194 * still in the tree, provided there was anything in the tree at all.
3195 */
3196 if (!left || (curr && entity_before(curr, left)))
3197 left = curr;
3198
3199 se = left; /* ideally we run the leftmost entity */
3200
3201 /*
3202 * Avoid running the skip buddy, if running something else can
3203 * be done without getting too unfair.
3204 */
3205 if (cfs_rq->skip == se) {
3206 struct sched_entity *second;
3207
3208 if (se == curr) {
3209 second = __pick_first_entity(cfs_rq);
3210 } else {
3211 second = __pick_next_entity(se);
3212 if (!second || (curr && entity_before(curr, second)))
3213 second = curr;
3214 }
3215
3216 if (second && wakeup_preempt_entity(second, left) < 1)
3217 se = second;
3218 }
3219
3220 /*
3221 * Prefer last buddy, try to return the CPU to a preempted task.
3222 */
3223 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3224 se = cfs_rq->last;
3225
3226 /*
3227 * Someone really wants this to run. If it's not unfair, run it.
3228 */
3229 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3230 se = cfs_rq->next;
3231
3232 clear_buddies(cfs_rq, se);
3233
3234 return se;
3235 }
3236
3237 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3238
3239 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3240 {
3241 /*
3242 * If still on the runqueue then deactivate_task()
3243 * was not called and update_curr() has to be done:
3244 */
3245 if (prev->on_rq)
3246 update_curr(cfs_rq);
3247
3248 /* throttle cfs_rqs exceeding runtime */
3249 check_cfs_rq_runtime(cfs_rq);
3250
3251 check_spread(cfs_rq, prev);
3252 if (prev->on_rq) {
3253 update_stats_wait_start(cfs_rq, prev);
3254 /* Put 'current' back into the tree. */
3255 __enqueue_entity(cfs_rq, prev);
3256 /* in !on_rq case, update occurred at dequeue */
3257 update_entity_load_avg(prev, 1);
3258 }
3259 cfs_rq->curr = NULL;
3260 }
3261
3262 static void
3263 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3264 {
3265 /*
3266 * Update run-time statistics of the 'current'.
3267 */
3268 update_curr(cfs_rq);
3269
3270 /*
3271 * Ensure that runnable average is periodically updated.
3272 */
3273 update_entity_load_avg(curr, 1);
3274 update_cfs_rq_blocked_load(cfs_rq, 1);
3275 update_cfs_shares(cfs_rq);
3276
3277 #ifdef CONFIG_SCHED_HRTICK
3278 /*
3279 * queued ticks are scheduled to match the slice, so don't bother
3280 * validating it and just reschedule.
3281 */
3282 if (queued) {
3283 resched_curr(rq_of(cfs_rq));
3284 return;
3285 }
3286 /*
3287 * don't let the period tick interfere with the hrtick preemption
3288 */
3289 if (!sched_feat(DOUBLE_TICK) &&
3290 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3291 return;
3292 #endif
3293
3294 if (cfs_rq->nr_running > 1)
3295 check_preempt_tick(cfs_rq, curr);
3296 }
3297
3298
3299 /**************************************************
3300 * CFS bandwidth control machinery
3301 */
3302
3303 #ifdef CONFIG_CFS_BANDWIDTH
3304
3305 #ifdef HAVE_JUMP_LABEL
3306 static struct static_key __cfs_bandwidth_used;
3307
3308 static inline bool cfs_bandwidth_used(void)
3309 {
3310 return static_key_false(&__cfs_bandwidth_used);
3311 }
3312
3313 void cfs_bandwidth_usage_inc(void)
3314 {
3315 static_key_slow_inc(&__cfs_bandwidth_used);
3316 }
3317
3318 void cfs_bandwidth_usage_dec(void)
3319 {
3320 static_key_slow_dec(&__cfs_bandwidth_used);
3321 }
3322 #else /* HAVE_JUMP_LABEL */
3323 static bool cfs_bandwidth_used(void)
3324 {
3325 return true;
3326 }
3327
3328 void cfs_bandwidth_usage_inc(void) {}
3329 void cfs_bandwidth_usage_dec(void) {}
3330 #endif /* HAVE_JUMP_LABEL */
3331
3332 /*
3333 * default period for cfs group bandwidth.
3334 * default: 0.1s, units: nanoseconds
3335 */
3336 static inline u64 default_cfs_period(void)
3337 {
3338 return 100000000ULL;
3339 }
3340
3341 static inline u64 sched_cfs_bandwidth_slice(void)
3342 {
3343 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3344 }
3345
3346 /*
3347 * Replenish runtime according to assigned quota and update expiration time.
3348 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3349 * additional synchronization around rq->lock.
3350 *
3351 * requires cfs_b->lock
3352 */
3353 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3354 {
3355 u64 now;
3356
3357 if (cfs_b->quota == RUNTIME_INF)
3358 return;
3359
3360 now = sched_clock_cpu(smp_processor_id());
3361 cfs_b->runtime = cfs_b->quota;
3362 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3363 }
3364
3365 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3366 {
3367 return &tg->cfs_bandwidth;
3368 }
3369
3370 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3371 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3372 {
3373 if (unlikely(cfs_rq->throttle_count))
3374 return cfs_rq->throttled_clock_task;
3375
3376 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3377 }
3378
3379 /* returns 0 on failure to allocate runtime */
3380 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3381 {
3382 struct task_group *tg = cfs_rq->tg;
3383 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3384 u64 amount = 0, min_amount, expires;
3385
3386 /* note: this is a positive sum as runtime_remaining <= 0 */
3387 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3388
3389 raw_spin_lock(&cfs_b->lock);
3390 if (cfs_b->quota == RUNTIME_INF)
3391 amount = min_amount;
3392 else {
3393 /*
3394 * If the bandwidth pool has become inactive, then at least one
3395 * period must have elapsed since the last consumption.
3396 * Refresh the global state and ensure bandwidth timer becomes
3397 * active.
3398 */
3399 if (!cfs_b->timer_active) {
3400 __refill_cfs_bandwidth_runtime(cfs_b);
3401 __start_cfs_bandwidth(cfs_b, false);
3402 }
3403
3404 if (cfs_b->runtime > 0) {
3405 amount = min(cfs_b->runtime, min_amount);
3406 cfs_b->runtime -= amount;
3407 cfs_b->idle = 0;
3408 }
3409 }
3410 expires = cfs_b->runtime_expires;
3411 raw_spin_unlock(&cfs_b->lock);
3412
3413 cfs_rq->runtime_remaining += amount;
3414 /*
3415 * we may have advanced our local expiration to account for allowed
3416 * spread between our sched_clock and the one on which runtime was
3417 * issued.
3418 */
3419 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3420 cfs_rq->runtime_expires = expires;
3421
3422 return cfs_rq->runtime_remaining > 0;
3423 }
3424
3425 /*
3426 * Note: This depends on the synchronization provided by sched_clock and the
3427 * fact that rq->clock snapshots this value.
3428 */
3429 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3430 {
3431 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3432
3433 /* if the deadline is ahead of our clock, nothing to do */
3434 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3435 return;
3436
3437 if (cfs_rq->runtime_remaining < 0)
3438 return;
3439
3440 /*
3441 * If the local deadline has passed we have to consider the
3442 * possibility that our sched_clock is 'fast' and the global deadline
3443 * has not truly expired.
3444 *
3445 * Fortunately we can check determine whether this the case by checking
3446 * whether the global deadline has advanced. It is valid to compare
3447 * cfs_b->runtime_expires without any locks since we only care about
3448 * exact equality, so a partial write will still work.
3449 */
3450
3451 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3452 /* extend local deadline, drift is bounded above by 2 ticks */
3453 cfs_rq->runtime_expires += TICK_NSEC;
3454 } else {
3455 /* global deadline is ahead, expiration has passed */
3456 cfs_rq->runtime_remaining = 0;
3457 }
3458 }
3459
3460 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3461 {
3462 /* dock delta_exec before expiring quota (as it could span periods) */
3463 cfs_rq->runtime_remaining -= delta_exec;
3464 expire_cfs_rq_runtime(cfs_rq);
3465
3466 if (likely(cfs_rq->runtime_remaining > 0))
3467 return;
3468
3469 /*
3470 * if we're unable to extend our runtime we resched so that the active
3471 * hierarchy can be throttled
3472 */
3473 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3474 resched_curr(rq_of(cfs_rq));
3475 }
3476
3477 static __always_inline
3478 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3479 {
3480 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3481 return;
3482
3483 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3484 }
3485
3486 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3487 {
3488 return cfs_bandwidth_used() && cfs_rq->throttled;
3489 }
3490
3491 /* check whether cfs_rq, or any parent, is throttled */
3492 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3493 {
3494 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3495 }
3496
3497 /*
3498 * Ensure that neither of the group entities corresponding to src_cpu or
3499 * dest_cpu are members of a throttled hierarchy when performing group
3500 * load-balance operations.
3501 */
3502 static inline int throttled_lb_pair(struct task_group *tg,
3503 int src_cpu, int dest_cpu)
3504 {
3505 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3506
3507 src_cfs_rq = tg->cfs_rq[src_cpu];
3508 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3509
3510 return throttled_hierarchy(src_cfs_rq) ||
3511 throttled_hierarchy(dest_cfs_rq);
3512 }
3513
3514 /* updated child weight may affect parent so we have to do this bottom up */
3515 static int tg_unthrottle_up(struct task_group *tg, void *data)
3516 {
3517 struct rq *rq = data;
3518 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3519
3520 cfs_rq->throttle_count--;
3521 #ifdef CONFIG_SMP
3522 if (!cfs_rq->throttle_count) {
3523 /* adjust cfs_rq_clock_task() */
3524 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3525 cfs_rq->throttled_clock_task;
3526 }
3527 #endif
3528
3529 return 0;
3530 }
3531
3532 static int tg_throttle_down(struct task_group *tg, void *data)
3533 {
3534 struct rq *rq = data;
3535 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3536
3537 /* group is entering throttled state, stop time */
3538 if (!cfs_rq->throttle_count)
3539 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3540 cfs_rq->throttle_count++;
3541
3542 return 0;
3543 }
3544
3545 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3546 {
3547 struct rq *rq = rq_of(cfs_rq);
3548 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3549 struct sched_entity *se;
3550 long task_delta, dequeue = 1;
3551
3552 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3553
3554 /* freeze hierarchy runnable averages while throttled */
3555 rcu_read_lock();
3556 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3557 rcu_read_unlock();
3558
3559 task_delta = cfs_rq->h_nr_running;
3560 for_each_sched_entity(se) {
3561 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3562 /* throttled entity or throttle-on-deactivate */
3563 if (!se->on_rq)
3564 break;
3565
3566 if (dequeue)
3567 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3568 qcfs_rq->h_nr_running -= task_delta;
3569
3570 if (qcfs_rq->load.weight)
3571 dequeue = 0;
3572 }
3573
3574 if (!se)
3575 sub_nr_running(rq, task_delta);
3576
3577 cfs_rq->throttled = 1;
3578 cfs_rq->throttled_clock = rq_clock(rq);
3579 raw_spin_lock(&cfs_b->lock);
3580 /*
3581 * Add to the _head_ of the list, so that an already-started
3582 * distribute_cfs_runtime will not see us
3583 */
3584 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3585 if (!cfs_b->timer_active)
3586 __start_cfs_bandwidth(cfs_b, false);
3587 raw_spin_unlock(&cfs_b->lock);
3588 }
3589
3590 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3591 {
3592 struct rq *rq = rq_of(cfs_rq);
3593 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3594 struct sched_entity *se;
3595 int enqueue = 1;
3596 long task_delta;
3597
3598 se = cfs_rq->tg->se[cpu_of(rq)];
3599
3600 cfs_rq->throttled = 0;
3601
3602 update_rq_clock(rq);
3603
3604 raw_spin_lock(&cfs_b->lock);
3605 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3606 list_del_rcu(&cfs_rq->throttled_list);
3607 raw_spin_unlock(&cfs_b->lock);
3608
3609 /* update hierarchical throttle state */
3610 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3611
3612 if (!cfs_rq->load.weight)
3613 return;
3614
3615 task_delta = cfs_rq->h_nr_running;
3616 for_each_sched_entity(se) {
3617 if (se->on_rq)
3618 enqueue = 0;
3619
3620 cfs_rq = cfs_rq_of(se);
3621 if (enqueue)
3622 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3623 cfs_rq->h_nr_running += task_delta;
3624
3625 if (cfs_rq_throttled(cfs_rq))
3626 break;
3627 }
3628
3629 if (!se)
3630 add_nr_running(rq, task_delta);
3631
3632 /* determine whether we need to wake up potentially idle cpu */
3633 if (rq->curr == rq->idle && rq->cfs.nr_running)
3634 resched_curr(rq);
3635 }
3636
3637 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3638 u64 remaining, u64 expires)
3639 {
3640 struct cfs_rq *cfs_rq;
3641 u64 runtime;
3642 u64 starting_runtime = remaining;
3643
3644 rcu_read_lock();
3645 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3646 throttled_list) {
3647 struct rq *rq = rq_of(cfs_rq);
3648
3649 raw_spin_lock(&rq->lock);
3650 if (!cfs_rq_throttled(cfs_rq))
3651 goto next;
3652
3653 runtime = -cfs_rq->runtime_remaining + 1;
3654 if (runtime > remaining)
3655 runtime = remaining;
3656 remaining -= runtime;
3657
3658 cfs_rq->runtime_remaining += runtime;
3659 cfs_rq->runtime_expires = expires;
3660
3661 /* we check whether we're throttled above */
3662 if (cfs_rq->runtime_remaining > 0)
3663 unthrottle_cfs_rq(cfs_rq);
3664
3665 next:
3666 raw_spin_unlock(&rq->lock);
3667
3668 if (!remaining)
3669 break;
3670 }
3671 rcu_read_unlock();
3672
3673 return starting_runtime - remaining;
3674 }
3675
3676 /*
3677 * Responsible for refilling a task_group's bandwidth and unthrottling its
3678 * cfs_rqs as appropriate. If there has been no activity within the last
3679 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3680 * used to track this state.
3681 */
3682 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3683 {
3684 u64 runtime, runtime_expires;
3685 int throttled;
3686
3687 /* no need to continue the timer with no bandwidth constraint */
3688 if (cfs_b->quota == RUNTIME_INF)
3689 goto out_deactivate;
3690
3691 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3692 cfs_b->nr_periods += overrun;
3693
3694 /*
3695 * idle depends on !throttled (for the case of a large deficit), and if
3696 * we're going inactive then everything else can be deferred
3697 */
3698 if (cfs_b->idle && !throttled)
3699 goto out_deactivate;
3700
3701 /*
3702 * if we have relooped after returning idle once, we need to update our
3703 * status as actually running, so that other cpus doing
3704 * __start_cfs_bandwidth will stop trying to cancel us.
3705 */
3706 cfs_b->timer_active = 1;
3707
3708 __refill_cfs_bandwidth_runtime(cfs_b);
3709
3710 if (!throttled) {
3711 /* mark as potentially idle for the upcoming period */
3712 cfs_b->idle = 1;
3713 return 0;
3714 }
3715
3716 /* account preceding periods in which throttling occurred */
3717 cfs_b->nr_throttled += overrun;
3718
3719 runtime_expires = cfs_b->runtime_expires;
3720
3721 /*
3722 * This check is repeated as we are holding onto the new bandwidth while
3723 * we unthrottle. This can potentially race with an unthrottled group
3724 * trying to acquire new bandwidth from the global pool. This can result
3725 * in us over-using our runtime if it is all used during this loop, but
3726 * only by limited amounts in that extreme case.
3727 */
3728 while (throttled && cfs_b->runtime > 0) {
3729 runtime = cfs_b->runtime;
3730 raw_spin_unlock(&cfs_b->lock);
3731 /* we can't nest cfs_b->lock while distributing bandwidth */
3732 runtime = distribute_cfs_runtime(cfs_b, runtime,
3733 runtime_expires);
3734 raw_spin_lock(&cfs_b->lock);
3735
3736 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3737
3738 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3739 }
3740
3741 /*
3742 * While we are ensured activity in the period following an
3743 * unthrottle, this also covers the case in which the new bandwidth is
3744 * insufficient to cover the existing bandwidth deficit. (Forcing the
3745 * timer to remain active while there are any throttled entities.)
3746 */
3747 cfs_b->idle = 0;
3748
3749 return 0;
3750
3751 out_deactivate:
3752 cfs_b->timer_active = 0;
3753 return 1;
3754 }
3755
3756 /* a cfs_rq won't donate quota below this amount */
3757 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3758 /* minimum remaining period time to redistribute slack quota */
3759 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3760 /* how long we wait to gather additional slack before distributing */
3761 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3762
3763 /*
3764 * Are we near the end of the current quota period?
3765 *
3766 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3767 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3768 * migrate_hrtimers, base is never cleared, so we are fine.
3769 */
3770 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3771 {
3772 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3773 u64 remaining;
3774
3775 /* if the call-back is running a quota refresh is already occurring */
3776 if (hrtimer_callback_running(refresh_timer))
3777 return 1;
3778
3779 /* is a quota refresh about to occur? */
3780 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3781 if (remaining < min_expire)
3782 return 1;
3783
3784 return 0;
3785 }
3786
3787 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3788 {
3789 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3790
3791 /* if there's a quota refresh soon don't bother with slack */
3792 if (runtime_refresh_within(cfs_b, min_left))
3793 return;
3794
3795 start_bandwidth_timer(&cfs_b->slack_timer,
3796 ns_to_ktime(cfs_bandwidth_slack_period));
3797 }
3798
3799 /* we know any runtime found here is valid as update_curr() precedes return */
3800 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3801 {
3802 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3803 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3804
3805 if (slack_runtime <= 0)
3806 return;
3807
3808 raw_spin_lock(&cfs_b->lock);
3809 if (cfs_b->quota != RUNTIME_INF &&
3810 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3811 cfs_b->runtime += slack_runtime;
3812
3813 /* we are under rq->lock, defer unthrottling using a timer */
3814 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3815 !list_empty(&cfs_b->throttled_cfs_rq))
3816 start_cfs_slack_bandwidth(cfs_b);
3817 }
3818 raw_spin_unlock(&cfs_b->lock);
3819
3820 /* even if it's not valid for return we don't want to try again */
3821 cfs_rq->runtime_remaining -= slack_runtime;
3822 }
3823
3824 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3825 {
3826 if (!cfs_bandwidth_used())
3827 return;
3828
3829 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3830 return;
3831
3832 __return_cfs_rq_runtime(cfs_rq);
3833 }
3834
3835 /*
3836 * This is done with a timer (instead of inline with bandwidth return) since
3837 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3838 */
3839 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3840 {
3841 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3842 u64 expires;
3843
3844 /* confirm we're still not at a refresh boundary */
3845 raw_spin_lock(&cfs_b->lock);
3846 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3847 raw_spin_unlock(&cfs_b->lock);
3848 return;
3849 }
3850
3851 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
3852 runtime = cfs_b->runtime;
3853
3854 expires = cfs_b->runtime_expires;
3855 raw_spin_unlock(&cfs_b->lock);
3856
3857 if (!runtime)
3858 return;
3859
3860 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3861
3862 raw_spin_lock(&cfs_b->lock);
3863 if (expires == cfs_b->runtime_expires)
3864 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3865 raw_spin_unlock(&cfs_b->lock);
3866 }
3867
3868 /*
3869 * When a group wakes up we want to make sure that its quota is not already
3870 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3871 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3872 */
3873 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3874 {
3875 if (!cfs_bandwidth_used())
3876 return;
3877
3878 /* an active group must be handled by the update_curr()->put() path */
3879 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3880 return;
3881
3882 /* ensure the group is not already throttled */
3883 if (cfs_rq_throttled(cfs_rq))
3884 return;
3885
3886 /* update runtime allocation */
3887 account_cfs_rq_runtime(cfs_rq, 0);
3888 if (cfs_rq->runtime_remaining <= 0)
3889 throttle_cfs_rq(cfs_rq);
3890 }
3891
3892 /* conditionally throttle active cfs_rq's from put_prev_entity() */
3893 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3894 {
3895 if (!cfs_bandwidth_used())
3896 return false;
3897
3898 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3899 return false;
3900
3901 /*
3902 * it's possible for a throttled entity to be forced into a running
3903 * state (e.g. set_curr_task), in this case we're finished.
3904 */
3905 if (cfs_rq_throttled(cfs_rq))
3906 return true;
3907
3908 throttle_cfs_rq(cfs_rq);
3909 return true;
3910 }
3911
3912 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3913 {
3914 struct cfs_bandwidth *cfs_b =
3915 container_of(timer, struct cfs_bandwidth, slack_timer);
3916 do_sched_cfs_slack_timer(cfs_b);
3917
3918 return HRTIMER_NORESTART;
3919 }
3920
3921 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3922 {
3923 struct cfs_bandwidth *cfs_b =
3924 container_of(timer, struct cfs_bandwidth, period_timer);
3925 ktime_t now;
3926 int overrun;
3927 int idle = 0;
3928
3929 raw_spin_lock(&cfs_b->lock);
3930 for (;;) {
3931 now = hrtimer_cb_get_time(timer);
3932 overrun = hrtimer_forward(timer, now, cfs_b->period);
3933
3934 if (!overrun)
3935 break;
3936
3937 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3938 }
3939 raw_spin_unlock(&cfs_b->lock);
3940
3941 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3942 }
3943
3944 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3945 {
3946 raw_spin_lock_init(&cfs_b->lock);
3947 cfs_b->runtime = 0;
3948 cfs_b->quota = RUNTIME_INF;
3949 cfs_b->period = ns_to_ktime(default_cfs_period());
3950
3951 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3952 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3953 cfs_b->period_timer.function = sched_cfs_period_timer;
3954 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3955 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3956 }
3957
3958 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3959 {
3960 cfs_rq->runtime_enabled = 0;
3961 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3962 }
3963
3964 /* requires cfs_b->lock, may release to reprogram timer */
3965 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3966 {
3967 /*
3968 * The timer may be active because we're trying to set a new bandwidth
3969 * period or because we're racing with the tear-down path
3970 * (timer_active==0 becomes visible before the hrtimer call-back
3971 * terminates). In either case we ensure that it's re-programmed
3972 */
3973 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3974 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3975 /* bounce the lock to allow do_sched_cfs_period_timer to run */
3976 raw_spin_unlock(&cfs_b->lock);
3977 cpu_relax();
3978 raw_spin_lock(&cfs_b->lock);
3979 /* if someone else restarted the timer then we're done */
3980 if (!force && cfs_b->timer_active)
3981 return;
3982 }
3983
3984 cfs_b->timer_active = 1;
3985 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3986 }
3987
3988 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3989 {
3990 hrtimer_cancel(&cfs_b->period_timer);
3991 hrtimer_cancel(&cfs_b->slack_timer);
3992 }
3993
3994 static void __maybe_unused update_runtime_enabled(struct rq *rq)
3995 {
3996 struct cfs_rq *cfs_rq;
3997
3998 for_each_leaf_cfs_rq(rq, cfs_rq) {
3999 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4000
4001 raw_spin_lock(&cfs_b->lock);
4002 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4003 raw_spin_unlock(&cfs_b->lock);
4004 }
4005 }
4006
4007 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4008 {
4009 struct cfs_rq *cfs_rq;
4010
4011 for_each_leaf_cfs_rq(rq, cfs_rq) {
4012 if (!cfs_rq->runtime_enabled)
4013 continue;
4014
4015 /*
4016 * clock_task is not advancing so we just need to make sure
4017 * there's some valid quota amount
4018 */
4019 cfs_rq->runtime_remaining = 1;
4020 /*
4021 * Offline rq is schedulable till cpu is completely disabled
4022 * in take_cpu_down(), so we prevent new cfs throttling here.
4023 */
4024 cfs_rq->runtime_enabled = 0;
4025
4026 if (cfs_rq_throttled(cfs_rq))
4027 unthrottle_cfs_rq(cfs_rq);
4028 }
4029 }
4030
4031 #else /* CONFIG_CFS_BANDWIDTH */
4032 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4033 {
4034 return rq_clock_task(rq_of(cfs_rq));
4035 }
4036
4037 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4038 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4039 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4040 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4041
4042 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4043 {
4044 return 0;
4045 }
4046
4047 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4048 {
4049 return 0;
4050 }
4051
4052 static inline int throttled_lb_pair(struct task_group *tg,
4053 int src_cpu, int dest_cpu)
4054 {
4055 return 0;
4056 }
4057
4058 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4059
4060 #ifdef CONFIG_FAIR_GROUP_SCHED
4061 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4062 #endif
4063
4064 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4065 {
4066 return NULL;
4067 }
4068 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4069 static inline void update_runtime_enabled(struct rq *rq) {}
4070 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4071
4072 #endif /* CONFIG_CFS_BANDWIDTH */
4073
4074 /**************************************************
4075 * CFS operations on tasks:
4076 */
4077
4078 #ifdef CONFIG_SCHED_HRTICK
4079 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4080 {
4081 struct sched_entity *se = &p->se;
4082 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4083
4084 WARN_ON(task_rq(p) != rq);
4085
4086 if (cfs_rq->nr_running > 1) {
4087 u64 slice = sched_slice(cfs_rq, se);
4088 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4089 s64 delta = slice - ran;
4090
4091 if (delta < 0) {
4092 if (rq->curr == p)
4093 resched_curr(rq);
4094 return;
4095 }
4096 hrtick_start(rq, delta);
4097 }
4098 }
4099
4100 /*
4101 * called from enqueue/dequeue and updates the hrtick when the
4102 * current task is from our class and nr_running is low enough
4103 * to matter.
4104 */
4105 static void hrtick_update(struct rq *rq)
4106 {
4107 struct task_struct *curr = rq->curr;
4108
4109 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4110 return;
4111
4112 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4113 hrtick_start_fair(rq, curr);
4114 }
4115 #else /* !CONFIG_SCHED_HRTICK */
4116 static inline void
4117 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4118 {
4119 }
4120
4121 static inline void hrtick_update(struct rq *rq)
4122 {
4123 }
4124 #endif
4125
4126 /*
4127 * The enqueue_task method is called before nr_running is
4128 * increased. Here we update the fair scheduling stats and
4129 * then put the task into the rbtree:
4130 */
4131 static void
4132 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4133 {
4134 struct cfs_rq *cfs_rq;
4135 struct sched_entity *se = &p->se;
4136
4137 for_each_sched_entity(se) {
4138 if (se->on_rq)
4139 break;
4140 cfs_rq = cfs_rq_of(se);
4141 enqueue_entity(cfs_rq, se, flags);
4142
4143 /*
4144 * end evaluation on encountering a throttled cfs_rq
4145 *
4146 * note: in the case of encountering a throttled cfs_rq we will
4147 * post the final h_nr_running increment below.
4148 */
4149 if (cfs_rq_throttled(cfs_rq))
4150 break;
4151 cfs_rq->h_nr_running++;
4152
4153 flags = ENQUEUE_WAKEUP;
4154 }
4155
4156 for_each_sched_entity(se) {
4157 cfs_rq = cfs_rq_of(se);
4158 cfs_rq->h_nr_running++;
4159
4160 if (cfs_rq_throttled(cfs_rq))
4161 break;
4162
4163 update_cfs_shares(cfs_rq);
4164 update_entity_load_avg(se, 1);
4165 }
4166
4167 if (!se) {
4168 update_rq_runnable_avg(rq, rq->nr_running);
4169 add_nr_running(rq, 1);
4170 }
4171 hrtick_update(rq);
4172 }
4173
4174 static void set_next_buddy(struct sched_entity *se);
4175
4176 /*
4177 * The dequeue_task method is called before nr_running is
4178 * decreased. We remove the task from the rbtree and
4179 * update the fair scheduling stats:
4180 */
4181 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4182 {
4183 struct cfs_rq *cfs_rq;
4184 struct sched_entity *se = &p->se;
4185 int task_sleep = flags & DEQUEUE_SLEEP;
4186
4187 for_each_sched_entity(se) {
4188 cfs_rq = cfs_rq_of(se);
4189 dequeue_entity(cfs_rq, se, flags);
4190
4191 /*
4192 * end evaluation on encountering a throttled cfs_rq
4193 *
4194 * note: in the case of encountering a throttled cfs_rq we will
4195 * post the final h_nr_running decrement below.
4196 */
4197 if (cfs_rq_throttled(cfs_rq))
4198 break;
4199 cfs_rq->h_nr_running--;
4200
4201 /* Don't dequeue parent if it has other entities besides us */
4202 if (cfs_rq->load.weight) {
4203 /*
4204 * Bias pick_next to pick a task from this cfs_rq, as
4205 * p is sleeping when it is within its sched_slice.
4206 */
4207 if (task_sleep && parent_entity(se))
4208 set_next_buddy(parent_entity(se));
4209
4210 /* avoid re-evaluating load for this entity */
4211 se = parent_entity(se);
4212 break;
4213 }
4214 flags |= DEQUEUE_SLEEP;
4215 }
4216
4217 for_each_sched_entity(se) {
4218 cfs_rq = cfs_rq_of(se);
4219 cfs_rq->h_nr_running--;
4220
4221 if (cfs_rq_throttled(cfs_rq))
4222 break;
4223
4224 update_cfs_shares(cfs_rq);
4225 update_entity_load_avg(se, 1);
4226 }
4227
4228 if (!se) {
4229 sub_nr_running(rq, 1);
4230 update_rq_runnable_avg(rq, 1);
4231 }
4232 hrtick_update(rq);
4233 }
4234
4235 #ifdef CONFIG_SMP
4236 /* Used instead of source_load when we know the type == 0 */
4237 static unsigned long weighted_cpuload(const int cpu)
4238 {
4239 return cpu_rq(cpu)->cfs.runnable_load_avg;
4240 }
4241
4242 /*
4243 * Return a low guess at the load of a migration-source cpu weighted
4244 * according to the scheduling class and "nice" value.
4245 *
4246 * We want to under-estimate the load of migration sources, to
4247 * balance conservatively.
4248 */
4249 static unsigned long source_load(int cpu, int type)
4250 {
4251 struct rq *rq = cpu_rq(cpu);
4252 unsigned long total = weighted_cpuload(cpu);
4253
4254 if (type == 0 || !sched_feat(LB_BIAS))
4255 return total;
4256
4257 return min(rq->cpu_load[type-1], total);
4258 }
4259
4260 /*
4261 * Return a high guess at the load of a migration-target cpu weighted
4262 * according to the scheduling class and "nice" value.
4263 */
4264 static unsigned long target_load(int cpu, int type)
4265 {
4266 struct rq *rq = cpu_rq(cpu);
4267 unsigned long total = weighted_cpuload(cpu);
4268
4269 if (type == 0 || !sched_feat(LB_BIAS))
4270 return total;
4271
4272 return max(rq->cpu_load[type-1], total);
4273 }
4274
4275 static unsigned long capacity_of(int cpu)
4276 {
4277 return cpu_rq(cpu)->cpu_capacity;
4278 }
4279
4280 static unsigned long cpu_avg_load_per_task(int cpu)
4281 {
4282 struct rq *rq = cpu_rq(cpu);
4283 unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
4284 unsigned long load_avg = rq->cfs.runnable_load_avg;
4285
4286 if (nr_running)
4287 return load_avg / nr_running;
4288
4289 return 0;
4290 }
4291
4292 static void record_wakee(struct task_struct *p)
4293 {
4294 /*
4295 * Rough decay (wiping) for cost saving, don't worry
4296 * about the boundary, really active task won't care
4297 * about the loss.
4298 */
4299 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
4300 current->wakee_flips >>= 1;
4301 current->wakee_flip_decay_ts = jiffies;
4302 }
4303
4304 if (current->last_wakee != p) {
4305 current->last_wakee = p;
4306 current->wakee_flips++;
4307 }
4308 }
4309
4310 static void task_waking_fair(struct task_struct *p)
4311 {
4312 struct sched_entity *se = &p->se;
4313 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4314 u64 min_vruntime;
4315
4316 #ifndef CONFIG_64BIT
4317 u64 min_vruntime_copy;
4318
4319 do {
4320 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4321 smp_rmb();
4322 min_vruntime = cfs_rq->min_vruntime;
4323 } while (min_vruntime != min_vruntime_copy);
4324 #else
4325 min_vruntime = cfs_rq->min_vruntime;
4326 #endif
4327
4328 se->vruntime -= min_vruntime;
4329 record_wakee(p);
4330 }
4331
4332 #ifdef CONFIG_FAIR_GROUP_SCHED
4333 /*
4334 * effective_load() calculates the load change as seen from the root_task_group
4335 *
4336 * Adding load to a group doesn't make a group heavier, but can cause movement
4337 * of group shares between cpus. Assuming the shares were perfectly aligned one
4338 * can calculate the shift in shares.
4339 *
4340 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4341 * on this @cpu and results in a total addition (subtraction) of @wg to the
4342 * total group weight.
4343 *
4344 * Given a runqueue weight distribution (rw_i) we can compute a shares
4345 * distribution (s_i) using:
4346 *
4347 * s_i = rw_i / \Sum rw_j (1)
4348 *
4349 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4350 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4351 * shares distribution (s_i):
4352 *
4353 * rw_i = { 2, 4, 1, 0 }
4354 * s_i = { 2/7, 4/7, 1/7, 0 }
4355 *
4356 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4357 * task used to run on and the CPU the waker is running on), we need to
4358 * compute the effect of waking a task on either CPU and, in case of a sync
4359 * wakeup, compute the effect of the current task going to sleep.
4360 *
4361 * So for a change of @wl to the local @cpu with an overall group weight change
4362 * of @wl we can compute the new shares distribution (s'_i) using:
4363 *
4364 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4365 *
4366 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4367 * differences in waking a task to CPU 0. The additional task changes the
4368 * weight and shares distributions like:
4369 *
4370 * rw'_i = { 3, 4, 1, 0 }
4371 * s'_i = { 3/8, 4/8, 1/8, 0 }
4372 *
4373 * We can then compute the difference in effective weight by using:
4374 *
4375 * dw_i = S * (s'_i - s_i) (3)
4376 *
4377 * Where 'S' is the group weight as seen by its parent.
4378 *
4379 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4380 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4381 * 4/7) times the weight of the group.
4382 */
4383 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4384 {
4385 struct sched_entity *se = tg->se[cpu];
4386
4387 if (!tg->parent) /* the trivial, non-cgroup case */
4388 return wl;
4389
4390 for_each_sched_entity(se) {
4391 long w, W;
4392
4393 tg = se->my_q->tg;
4394
4395 /*
4396 * W = @wg + \Sum rw_j
4397 */
4398 W = wg + calc_tg_weight(tg, se->my_q);
4399
4400 /*
4401 * w = rw_i + @wl
4402 */
4403 w = se->my_q->load.weight + wl;
4404
4405 /*
4406 * wl = S * s'_i; see (2)
4407 */
4408 if (W > 0 && w < W)
4409 wl = (w * tg->shares) / W;
4410 else
4411 wl = tg->shares;
4412
4413 /*
4414 * Per the above, wl is the new se->load.weight value; since
4415 * those are clipped to [MIN_SHARES, ...) do so now. See
4416 * calc_cfs_shares().
4417 */
4418 if (wl < MIN_SHARES)
4419 wl = MIN_SHARES;
4420
4421 /*
4422 * wl = dw_i = S * (s'_i - s_i); see (3)
4423 */
4424 wl -= se->load.weight;
4425
4426 /*
4427 * Recursively apply this logic to all parent groups to compute
4428 * the final effective load change on the root group. Since
4429 * only the @tg group gets extra weight, all parent groups can
4430 * only redistribute existing shares. @wl is the shift in shares
4431 * resulting from this level per the above.
4432 */
4433 wg = 0;
4434 }
4435
4436 return wl;
4437 }
4438 #else
4439
4440 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4441 {
4442 return wl;
4443 }
4444
4445 #endif
4446
4447 static int wake_wide(struct task_struct *p)
4448 {
4449 int factor = this_cpu_read(sd_llc_size);
4450
4451 /*
4452 * Yeah, it's the switching-frequency, could means many wakee or
4453 * rapidly switch, use factor here will just help to automatically
4454 * adjust the loose-degree, so bigger node will lead to more pull.
4455 */
4456 if (p->wakee_flips > factor) {
4457 /*
4458 * wakee is somewhat hot, it needs certain amount of cpu
4459 * resource, so if waker is far more hot, prefer to leave
4460 * it alone.
4461 */
4462 if (current->wakee_flips > (factor * p->wakee_flips))
4463 return 1;
4464 }
4465
4466 return 0;
4467 }
4468
4469 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4470 {
4471 s64 this_load, load;
4472 s64 this_eff_load, prev_eff_load;
4473 int idx, this_cpu, prev_cpu;
4474 struct task_group *tg;
4475 unsigned long weight;
4476 int balanced;
4477
4478 /*
4479 * If we wake multiple tasks be careful to not bounce
4480 * ourselves around too much.
4481 */
4482 if (wake_wide(p))
4483 return 0;
4484
4485 idx = sd->wake_idx;
4486 this_cpu = smp_processor_id();
4487 prev_cpu = task_cpu(p);
4488 load = source_load(prev_cpu, idx);
4489 this_load = target_load(this_cpu, idx);
4490
4491 /*
4492 * If sync wakeup then subtract the (maximum possible)
4493 * effect of the currently running task from the load
4494 * of the current CPU:
4495 */
4496 if (sync) {
4497 tg = task_group(current);
4498 weight = current->se.load.weight;
4499
4500 this_load += effective_load(tg, this_cpu, -weight, -weight);
4501 load += effective_load(tg, prev_cpu, 0, -weight);
4502 }
4503
4504 tg = task_group(p);
4505 weight = p->se.load.weight;
4506
4507 /*
4508 * In low-load situations, where prev_cpu is idle and this_cpu is idle
4509 * due to the sync cause above having dropped this_load to 0, we'll
4510 * always have an imbalance, but there's really nothing you can do
4511 * about that, so that's good too.
4512 *
4513 * Otherwise check if either cpus are near enough in load to allow this
4514 * task to be woken on this_cpu.
4515 */
4516 this_eff_load = 100;
4517 this_eff_load *= capacity_of(prev_cpu);
4518
4519 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4520 prev_eff_load *= capacity_of(this_cpu);
4521
4522 if (this_load > 0) {
4523 this_eff_load *= this_load +
4524 effective_load(tg, this_cpu, weight, weight);
4525
4526 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4527 }
4528
4529 balanced = this_eff_load <= prev_eff_load;
4530
4531 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4532
4533 if (!balanced)
4534 return 0;
4535
4536 schedstat_inc(sd, ttwu_move_affine);
4537 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4538
4539 return 1;
4540 }
4541
4542 /*
4543 * find_idlest_group finds and returns the least busy CPU group within the
4544 * domain.
4545 */
4546 static struct sched_group *
4547 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4548 int this_cpu, int sd_flag)
4549 {
4550 struct sched_group *idlest = NULL, *group = sd->groups;
4551 unsigned long min_load = ULONG_MAX, this_load = 0;
4552 int load_idx = sd->forkexec_idx;
4553 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4554
4555 if (sd_flag & SD_BALANCE_WAKE)
4556 load_idx = sd->wake_idx;
4557
4558 do {
4559 unsigned long load, avg_load;
4560 int local_group;
4561 int i;
4562
4563 /* Skip over this group if it has no CPUs allowed */
4564 if (!cpumask_intersects(sched_group_cpus(group),
4565 tsk_cpus_allowed(p)))
4566 continue;
4567
4568 local_group = cpumask_test_cpu(this_cpu,
4569 sched_group_cpus(group));
4570
4571 /* Tally up the load of all CPUs in the group */
4572 avg_load = 0;
4573
4574 for_each_cpu(i, sched_group_cpus(group)) {
4575 /* Bias balancing toward cpus of our domain */
4576 if (local_group)
4577 load = source_load(i, load_idx);
4578 else
4579 load = target_load(i, load_idx);
4580
4581 avg_load += load;
4582 }
4583
4584 /* Adjust by relative CPU capacity of the group */
4585 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
4586
4587 if (local_group) {
4588 this_load = avg_load;
4589 } else if (avg_load < min_load) {
4590 min_load = avg_load;
4591 idlest = group;
4592 }
4593 } while (group = group->next, group != sd->groups);
4594
4595 if (!idlest || 100*this_load < imbalance*min_load)
4596 return NULL;
4597 return idlest;
4598 }
4599
4600 /*
4601 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4602 */
4603 static int
4604 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4605 {
4606 unsigned long load, min_load = ULONG_MAX;
4607 unsigned int min_exit_latency = UINT_MAX;
4608 u64 latest_idle_timestamp = 0;
4609 int least_loaded_cpu = this_cpu;
4610 int shallowest_idle_cpu = -1;
4611 int i;
4612
4613 /* Traverse only the allowed CPUs */
4614 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4615 if (idle_cpu(i)) {
4616 struct rq *rq = cpu_rq(i);
4617 struct cpuidle_state *idle = idle_get_state(rq);
4618 if (idle && idle->exit_latency < min_exit_latency) {
4619 /*
4620 * We give priority to a CPU whose idle state
4621 * has the smallest exit latency irrespective
4622 * of any idle timestamp.
4623 */
4624 min_exit_latency = idle->exit_latency;
4625 latest_idle_timestamp = rq->idle_stamp;
4626 shallowest_idle_cpu = i;
4627 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
4628 rq->idle_stamp > latest_idle_timestamp) {
4629 /*
4630 * If equal or no active idle state, then
4631 * the most recently idled CPU might have
4632 * a warmer cache.
4633 */
4634 latest_idle_timestamp = rq->idle_stamp;
4635 shallowest_idle_cpu = i;
4636 }
4637 } else {
4638 load = weighted_cpuload(i);
4639 if (load < min_load || (load == min_load && i == this_cpu)) {
4640 min_load = load;
4641 least_loaded_cpu = i;
4642 }
4643 }
4644 }
4645
4646 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
4647 }
4648
4649 /*
4650 * Try and locate an idle CPU in the sched_domain.
4651 */
4652 static int select_idle_sibling(struct task_struct *p, int target)
4653 {
4654 struct sched_domain *sd;
4655 struct sched_group *sg;
4656 int i = task_cpu(p);
4657
4658 if (idle_cpu(target))
4659 return target;
4660
4661 /*
4662 * If the prevous cpu is cache affine and idle, don't be stupid.
4663 */
4664 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4665 return i;
4666
4667 /*
4668 * Otherwise, iterate the domains and find an elegible idle cpu.
4669 */
4670 sd = rcu_dereference(per_cpu(sd_llc, target));
4671 for_each_lower_domain(sd) {
4672 sg = sd->groups;
4673 do {
4674 if (!cpumask_intersects(sched_group_cpus(sg),
4675 tsk_cpus_allowed(p)))
4676 goto next;
4677
4678 for_each_cpu(i, sched_group_cpus(sg)) {
4679 if (i == target || !idle_cpu(i))
4680 goto next;
4681 }
4682
4683 target = cpumask_first_and(sched_group_cpus(sg),
4684 tsk_cpus_allowed(p));
4685 goto done;
4686 next:
4687 sg = sg->next;
4688 } while (sg != sd->groups);
4689 }
4690 done:
4691 return target;
4692 }
4693
4694 /*
4695 * select_task_rq_fair: Select target runqueue for the waking task in domains
4696 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4697 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
4698 *
4699 * Balances load by selecting the idlest cpu in the idlest group, or under
4700 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
4701 *
4702 * Returns the target cpu number.
4703 *
4704 * preempt must be disabled.
4705 */
4706 static int
4707 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4708 {
4709 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4710 int cpu = smp_processor_id();
4711 int new_cpu = cpu;
4712 int want_affine = 0;
4713 int sync = wake_flags & WF_SYNC;
4714
4715 if (p->nr_cpus_allowed == 1)
4716 return prev_cpu;
4717
4718 if (sd_flag & SD_BALANCE_WAKE)
4719 want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
4720
4721 rcu_read_lock();
4722 for_each_domain(cpu, tmp) {
4723 if (!(tmp->flags & SD_LOAD_BALANCE))
4724 continue;
4725
4726 /*
4727 * If both cpu and prev_cpu are part of this domain,
4728 * cpu is a valid SD_WAKE_AFFINE target.
4729 */
4730 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4731 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4732 affine_sd = tmp;
4733 break;
4734 }
4735
4736 if (tmp->flags & sd_flag)
4737 sd = tmp;
4738 }
4739
4740 if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4741 prev_cpu = cpu;
4742
4743 if (sd_flag & SD_BALANCE_WAKE) {
4744 new_cpu = select_idle_sibling(p, prev_cpu);
4745 goto unlock;
4746 }
4747
4748 while (sd) {
4749 struct sched_group *group;
4750 int weight;
4751
4752 if (!(sd->flags & sd_flag)) {
4753 sd = sd->child;
4754 continue;
4755 }
4756
4757 group = find_idlest_group(sd, p, cpu, sd_flag);
4758 if (!group) {
4759 sd = sd->child;
4760 continue;
4761 }
4762
4763 new_cpu = find_idlest_cpu(group, p, cpu);
4764 if (new_cpu == -1 || new_cpu == cpu) {
4765 /* Now try balancing at a lower domain level of cpu */
4766 sd = sd->child;
4767 continue;
4768 }
4769
4770 /* Now try balancing at a lower domain level of new_cpu */
4771 cpu = new_cpu;
4772 weight = sd->span_weight;
4773 sd = NULL;
4774 for_each_domain(cpu, tmp) {
4775 if (weight <= tmp->span_weight)
4776 break;
4777 if (tmp->flags & sd_flag)
4778 sd = tmp;
4779 }
4780 /* while loop will break here if sd == NULL */
4781 }
4782 unlock:
4783 rcu_read_unlock();
4784
4785 return new_cpu;
4786 }
4787
4788 /*
4789 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4790 * cfs_rq_of(p) references at time of call are still valid and identify the
4791 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4792 * other assumptions, including the state of rq->lock, should be made.
4793 */
4794 static void
4795 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4796 {
4797 struct sched_entity *se = &p->se;
4798 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4799
4800 /*
4801 * Load tracking: accumulate removed load so that it can be processed
4802 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4803 * to blocked load iff they have a positive decay-count. It can never
4804 * be negative here since on-rq tasks have decay-count == 0.
4805 */
4806 if (se->avg.decay_count) {
4807 se->avg.decay_count = -__synchronize_entity_decay(se);
4808 atomic_long_add(se->avg.load_avg_contrib,
4809 &cfs_rq->removed_load);
4810 }
4811
4812 /* We have migrated, no longer consider this task hot */
4813 se->exec_start = 0;
4814 }
4815 #endif /* CONFIG_SMP */
4816
4817 static unsigned long
4818 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4819 {
4820 unsigned long gran = sysctl_sched_wakeup_granularity;
4821
4822 /*
4823 * Since its curr running now, convert the gran from real-time
4824 * to virtual-time in his units.
4825 *
4826 * By using 'se' instead of 'curr' we penalize light tasks, so
4827 * they get preempted easier. That is, if 'se' < 'curr' then
4828 * the resulting gran will be larger, therefore penalizing the
4829 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4830 * be smaller, again penalizing the lighter task.
4831 *
4832 * This is especially important for buddies when the leftmost
4833 * task is higher priority than the buddy.
4834 */
4835 return calc_delta_fair(gran, se);
4836 }
4837
4838 /*
4839 * Should 'se' preempt 'curr'.
4840 *
4841 * |s1
4842 * |s2
4843 * |s3
4844 * g
4845 * |<--->|c
4846 *
4847 * w(c, s1) = -1
4848 * w(c, s2) = 0
4849 * w(c, s3) = 1
4850 *
4851 */
4852 static int
4853 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4854 {
4855 s64 gran, vdiff = curr->vruntime - se->vruntime;
4856
4857 if (vdiff <= 0)
4858 return -1;
4859
4860 gran = wakeup_gran(curr, se);
4861 if (vdiff > gran)
4862 return 1;
4863
4864 return 0;
4865 }
4866
4867 static void set_last_buddy(struct sched_entity *se)
4868 {
4869 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4870 return;
4871
4872 for_each_sched_entity(se)
4873 cfs_rq_of(se)->last = se;
4874 }
4875
4876 static void set_next_buddy(struct sched_entity *se)
4877 {
4878 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4879 return;
4880
4881 for_each_sched_entity(se)
4882 cfs_rq_of(se)->next = se;
4883 }
4884
4885 static void set_skip_buddy(struct sched_entity *se)
4886 {
4887 for_each_sched_entity(se)
4888 cfs_rq_of(se)->skip = se;
4889 }
4890
4891 /*
4892 * Preempt the current task with a newly woken task if needed:
4893 */
4894 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
4895 {
4896 struct task_struct *curr = rq->curr;
4897 struct sched_entity *se = &curr->se, *pse = &p->se;
4898 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4899 int scale = cfs_rq->nr_running >= sched_nr_latency;
4900 int next_buddy_marked = 0;
4901
4902 if (unlikely(se == pse))
4903 return;
4904
4905 /*
4906 * This is possible from callers such as attach_tasks(), in which we
4907 * unconditionally check_prempt_curr() after an enqueue (which may have
4908 * lead to a throttle). This both saves work and prevents false
4909 * next-buddy nomination below.
4910 */
4911 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4912 return;
4913
4914 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
4915 set_next_buddy(pse);
4916 next_buddy_marked = 1;
4917 }
4918
4919 /*
4920 * We can come here with TIF_NEED_RESCHED already set from new task
4921 * wake up path.
4922 *
4923 * Note: this also catches the edge-case of curr being in a throttled
4924 * group (e.g. via set_curr_task), since update_curr() (in the
4925 * enqueue of curr) will have resulted in resched being set. This
4926 * prevents us from potentially nominating it as a false LAST_BUDDY
4927 * below.
4928 */
4929 if (test_tsk_need_resched(curr))
4930 return;
4931
4932 /* Idle tasks are by definition preempted by non-idle tasks. */
4933 if (unlikely(curr->policy == SCHED_IDLE) &&
4934 likely(p->policy != SCHED_IDLE))
4935 goto preempt;
4936
4937 /*
4938 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4939 * is driven by the tick):
4940 */
4941 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
4942 return;
4943
4944 find_matching_se(&se, &pse);
4945 update_curr(cfs_rq_of(se));
4946 BUG_ON(!pse);
4947 if (wakeup_preempt_entity(se, pse) == 1) {
4948 /*
4949 * Bias pick_next to pick the sched entity that is
4950 * triggering this preemption.
4951 */
4952 if (!next_buddy_marked)
4953 set_next_buddy(pse);
4954 goto preempt;
4955 }
4956
4957 return;
4958
4959 preempt:
4960 resched_curr(rq);
4961 /*
4962 * Only set the backward buddy when the current task is still
4963 * on the rq. This can happen when a wakeup gets interleaved
4964 * with schedule on the ->pre_schedule() or idle_balance()
4965 * point, either of which can * drop the rq lock.
4966 *
4967 * Also, during early boot the idle thread is in the fair class,
4968 * for obvious reasons its a bad idea to schedule back to it.
4969 */
4970 if (unlikely(!se->on_rq || curr == rq->idle))
4971 return;
4972
4973 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4974 set_last_buddy(se);
4975 }
4976
4977 static struct task_struct *
4978 pick_next_task_fair(struct rq *rq, struct task_struct *prev)
4979 {
4980 struct cfs_rq *cfs_rq = &rq->cfs;
4981 struct sched_entity *se;
4982 struct task_struct *p;
4983 int new_tasks;
4984
4985 again:
4986 #ifdef CONFIG_FAIR_GROUP_SCHED
4987 if (!cfs_rq->nr_running)
4988 goto idle;
4989
4990 if (prev->sched_class != &fair_sched_class)
4991 goto simple;
4992
4993 /*
4994 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
4995 * likely that a next task is from the same cgroup as the current.
4996 *
4997 * Therefore attempt to avoid putting and setting the entire cgroup
4998 * hierarchy, only change the part that actually changes.
4999 */
5000
5001 do {
5002 struct sched_entity *curr = cfs_rq->curr;
5003
5004 /*
5005 * Since we got here without doing put_prev_entity() we also
5006 * have to consider cfs_rq->curr. If it is still a runnable
5007 * entity, update_curr() will update its vruntime, otherwise
5008 * forget we've ever seen it.
5009 */
5010 if (curr && curr->on_rq)
5011 update_curr(cfs_rq);
5012 else
5013 curr = NULL;
5014
5015 /*
5016 * This call to check_cfs_rq_runtime() will do the throttle and
5017 * dequeue its entity in the parent(s). Therefore the 'simple'
5018 * nr_running test will indeed be correct.
5019 */
5020 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5021 goto simple;
5022
5023 se = pick_next_entity(cfs_rq, curr);
5024 cfs_rq = group_cfs_rq(se);
5025 } while (cfs_rq);
5026
5027 p = task_of(se);
5028
5029 /*
5030 * Since we haven't yet done put_prev_entity and if the selected task
5031 * is a different task than we started out with, try and touch the
5032 * least amount of cfs_rqs.
5033 */
5034 if (prev != p) {
5035 struct sched_entity *pse = &prev->se;
5036
5037 while (!(cfs_rq = is_same_group(se, pse))) {
5038 int se_depth = se->depth;
5039 int pse_depth = pse->depth;
5040
5041 if (se_depth <= pse_depth) {
5042 put_prev_entity(cfs_rq_of(pse), pse);
5043 pse = parent_entity(pse);
5044 }
5045 if (se_depth >= pse_depth) {
5046 set_next_entity(cfs_rq_of(se), se);
5047 se = parent_entity(se);
5048 }
5049 }
5050
5051 put_prev_entity(cfs_rq, pse);
5052 set_next_entity(cfs_rq, se);
5053 }
5054
5055 if (hrtick_enabled(rq))
5056 hrtick_start_fair(rq, p);
5057
5058 return p;
5059 simple:
5060 cfs_rq = &rq->cfs;
5061 #endif
5062
5063 if (!cfs_rq->nr_running)
5064 goto idle;
5065
5066 put_prev_task(rq, prev);
5067
5068 do {
5069 se = pick_next_entity(cfs_rq, NULL);
5070 set_next_entity(cfs_rq, se);
5071 cfs_rq = group_cfs_rq(se);
5072 } while (cfs_rq);
5073
5074 p = task_of(se);
5075
5076 if (hrtick_enabled(rq))
5077 hrtick_start_fair(rq, p);
5078
5079 return p;
5080
5081 idle:
5082 new_tasks = idle_balance(rq);
5083 /*
5084 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5085 * possible for any higher priority task to appear. In that case we
5086 * must re-start the pick_next_entity() loop.
5087 */
5088 if (new_tasks < 0)
5089 return RETRY_TASK;
5090
5091 if (new_tasks > 0)
5092 goto again;
5093
5094 return NULL;
5095 }
5096
5097 /*
5098 * Account for a descheduled task:
5099 */
5100 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
5101 {
5102 struct sched_entity *se = &prev->se;
5103 struct cfs_rq *cfs_rq;
5104
5105 for_each_sched_entity(se) {
5106 cfs_rq = cfs_rq_of(se);
5107 put_prev_entity(cfs_rq, se);
5108 }
5109 }
5110
5111 /*
5112 * sched_yield() is very simple
5113 *
5114 * The magic of dealing with the ->skip buddy is in pick_next_entity.
5115 */
5116 static void yield_task_fair(struct rq *rq)
5117 {
5118 struct task_struct *curr = rq->curr;
5119 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5120 struct sched_entity *se = &curr->se;
5121
5122 /*
5123 * Are we the only task in the tree?
5124 */
5125 if (unlikely(rq->nr_running == 1))
5126 return;
5127
5128 clear_buddies(cfs_rq, se);
5129
5130 if (curr->policy != SCHED_BATCH) {
5131 update_rq_clock(rq);
5132 /*
5133 * Update run-time statistics of the 'current'.
5134 */
5135 update_curr(cfs_rq);
5136 /*
5137 * Tell update_rq_clock() that we've just updated,
5138 * so we don't do microscopic update in schedule()
5139 * and double the fastpath cost.
5140 */
5141 rq->skip_clock_update = 1;
5142 }
5143
5144 set_skip_buddy(se);
5145 }
5146
5147 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5148 {
5149 struct sched_entity *se = &p->se;
5150
5151 /* throttled hierarchies are not runnable */
5152 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
5153 return false;
5154
5155 /* Tell the scheduler that we'd really like pse to run next. */
5156 set_next_buddy(se);
5157
5158 yield_task_fair(rq);
5159
5160 return true;
5161 }
5162
5163 #ifdef CONFIG_SMP
5164 /**************************************************
5165 * Fair scheduling class load-balancing methods.
5166 *
5167 * BASICS
5168 *
5169 * The purpose of load-balancing is to achieve the same basic fairness the
5170 * per-cpu scheduler provides, namely provide a proportional amount of compute
5171 * time to each task. This is expressed in the following equation:
5172 *
5173 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
5174 *
5175 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
5176 * W_i,0 is defined as:
5177 *
5178 * W_i,0 = \Sum_j w_i,j (2)
5179 *
5180 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
5181 * is derived from the nice value as per prio_to_weight[].
5182 *
5183 * The weight average is an exponential decay average of the instantaneous
5184 * weight:
5185 *
5186 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
5187 *
5188 * C_i is the compute capacity of cpu i, typically it is the
5189 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
5190 * can also include other factors [XXX].
5191 *
5192 * To achieve this balance we define a measure of imbalance which follows
5193 * directly from (1):
5194 *
5195 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
5196 *
5197 * We them move tasks around to minimize the imbalance. In the continuous
5198 * function space it is obvious this converges, in the discrete case we get
5199 * a few fun cases generally called infeasible weight scenarios.
5200 *
5201 * [XXX expand on:
5202 * - infeasible weights;
5203 * - local vs global optima in the discrete case. ]
5204 *
5205 *
5206 * SCHED DOMAINS
5207 *
5208 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5209 * for all i,j solution, we create a tree of cpus that follows the hardware
5210 * topology where each level pairs two lower groups (or better). This results
5211 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5212 * tree to only the first of the previous level and we decrease the frequency
5213 * of load-balance at each level inv. proportional to the number of cpus in
5214 * the groups.
5215 *
5216 * This yields:
5217 *
5218 * log_2 n 1 n
5219 * \Sum { --- * --- * 2^i } = O(n) (5)
5220 * i = 0 2^i 2^i
5221 * `- size of each group
5222 * | | `- number of cpus doing load-balance
5223 * | `- freq
5224 * `- sum over all levels
5225 *
5226 * Coupled with a limit on how many tasks we can migrate every balance pass,
5227 * this makes (5) the runtime complexity of the balancer.
5228 *
5229 * An important property here is that each CPU is still (indirectly) connected
5230 * to every other cpu in at most O(log n) steps:
5231 *
5232 * The adjacency matrix of the resulting graph is given by:
5233 *
5234 * log_2 n
5235 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5236 * k = 0
5237 *
5238 * And you'll find that:
5239 *
5240 * A^(log_2 n)_i,j != 0 for all i,j (7)
5241 *
5242 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5243 * The task movement gives a factor of O(m), giving a convergence complexity
5244 * of:
5245 *
5246 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5247 *
5248 *
5249 * WORK CONSERVING
5250 *
5251 * In order to avoid CPUs going idle while there's still work to do, new idle
5252 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5253 * tree itself instead of relying on other CPUs to bring it work.
5254 *
5255 * This adds some complexity to both (5) and (8) but it reduces the total idle
5256 * time.
5257 *
5258 * [XXX more?]
5259 *
5260 *
5261 * CGROUPS
5262 *
5263 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5264 *
5265 * s_k,i
5266 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5267 * S_k
5268 *
5269 * Where
5270 *
5271 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5272 *
5273 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5274 *
5275 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5276 * property.
5277 *
5278 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5279 * rewrite all of this once again.]
5280 */
5281
5282 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5283
5284 enum fbq_type { regular, remote, all };
5285
5286 #define LBF_ALL_PINNED 0x01
5287 #define LBF_NEED_BREAK 0x02
5288 #define LBF_DST_PINNED 0x04
5289 #define LBF_SOME_PINNED 0x08
5290
5291 struct lb_env {
5292 struct sched_domain *sd;
5293
5294 struct rq *src_rq;
5295 int src_cpu;
5296
5297 int dst_cpu;
5298 struct rq *dst_rq;
5299
5300 struct cpumask *dst_grpmask;
5301 int new_dst_cpu;
5302 enum cpu_idle_type idle;
5303 long imbalance;
5304 /* The set of CPUs under consideration for load-balancing */
5305 struct cpumask *cpus;
5306
5307 unsigned int flags;
5308
5309 unsigned int loop;
5310 unsigned int loop_break;
5311 unsigned int loop_max;
5312
5313 enum fbq_type fbq_type;
5314 struct list_head tasks;
5315 };
5316
5317 /*
5318 * Is this task likely cache-hot:
5319 */
5320 static int task_hot(struct task_struct *p, struct lb_env *env)
5321 {
5322 s64 delta;
5323
5324 lockdep_assert_held(&env->src_rq->lock);
5325
5326 if (p->sched_class != &fair_sched_class)
5327 return 0;
5328
5329 if (unlikely(p->policy == SCHED_IDLE))
5330 return 0;
5331
5332 /*
5333 * Buddy candidates are cache hot:
5334 */
5335 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
5336 (&p->se == cfs_rq_of(&p->se)->next ||
5337 &p->se == cfs_rq_of(&p->se)->last))
5338 return 1;
5339
5340 if (sysctl_sched_migration_cost == -1)
5341 return 1;
5342 if (sysctl_sched_migration_cost == 0)
5343 return 0;
5344
5345 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
5346
5347 return delta < (s64)sysctl_sched_migration_cost;
5348 }
5349
5350 #ifdef CONFIG_NUMA_BALANCING
5351 /* Returns true if the destination node has incurred more faults */
5352 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5353 {
5354 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5355 int src_nid, dst_nid;
5356
5357 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
5358 !(env->sd->flags & SD_NUMA)) {
5359 return false;
5360 }
5361
5362 src_nid = cpu_to_node(env->src_cpu);
5363 dst_nid = cpu_to_node(env->dst_cpu);
5364
5365 if (src_nid == dst_nid)
5366 return false;
5367
5368 if (numa_group) {
5369 /* Task is already in the group's interleave set. */
5370 if (node_isset(src_nid, numa_group->active_nodes))
5371 return false;
5372
5373 /* Task is moving into the group's interleave set. */
5374 if (node_isset(dst_nid, numa_group->active_nodes))
5375 return true;
5376
5377 return group_faults(p, dst_nid) > group_faults(p, src_nid);
5378 }
5379
5380 /* Encourage migration to the preferred node. */
5381 if (dst_nid == p->numa_preferred_nid)
5382 return true;
5383
5384 return task_faults(p, dst_nid) > task_faults(p, src_nid);
5385 }
5386
5387
5388 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5389 {
5390 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5391 int src_nid, dst_nid;
5392
5393 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5394 return false;
5395
5396 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
5397 return false;
5398
5399 src_nid = cpu_to_node(env->src_cpu);
5400 dst_nid = cpu_to_node(env->dst_cpu);
5401
5402 if (src_nid == dst_nid)
5403 return false;
5404
5405 if (numa_group) {
5406 /* Task is moving within/into the group's interleave set. */
5407 if (node_isset(dst_nid, numa_group->active_nodes))
5408 return false;
5409
5410 /* Task is moving out of the group's interleave set. */
5411 if (node_isset(src_nid, numa_group->active_nodes))
5412 return true;
5413
5414 return group_faults(p, dst_nid) < group_faults(p, src_nid);
5415 }
5416
5417 /* Migrating away from the preferred node is always bad. */
5418 if (src_nid == p->numa_preferred_nid)
5419 return true;
5420
5421 return task_faults(p, dst_nid) < task_faults(p, src_nid);
5422 }
5423
5424 #else
5425 static inline bool migrate_improves_locality(struct task_struct *p,
5426 struct lb_env *env)
5427 {
5428 return false;
5429 }
5430
5431 static inline bool migrate_degrades_locality(struct task_struct *p,
5432 struct lb_env *env)
5433 {
5434 return false;
5435 }
5436 #endif
5437
5438 /*
5439 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5440 */
5441 static
5442 int can_migrate_task(struct task_struct *p, struct lb_env *env)
5443 {
5444 int tsk_cache_hot = 0;
5445
5446 lockdep_assert_held(&env->src_rq->lock);
5447
5448 /*
5449 * We do not migrate tasks that are:
5450 * 1) throttled_lb_pair, or
5451 * 2) cannot be migrated to this CPU due to cpus_allowed, or
5452 * 3) running (obviously), or
5453 * 4) are cache-hot on their current CPU.
5454 */
5455 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5456 return 0;
5457
5458 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5459 int cpu;
5460
5461 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5462
5463 env->flags |= LBF_SOME_PINNED;
5464
5465 /*
5466 * Remember if this task can be migrated to any other cpu in
5467 * our sched_group. We may want to revisit it if we couldn't
5468 * meet load balance goals by pulling other tasks on src_cpu.
5469 *
5470 * Also avoid computing new_dst_cpu if we have already computed
5471 * one in current iteration.
5472 */
5473 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5474 return 0;
5475
5476 /* Prevent to re-select dst_cpu via env's cpus */
5477 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5478 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5479 env->flags |= LBF_DST_PINNED;
5480 env->new_dst_cpu = cpu;
5481 break;
5482 }
5483 }
5484
5485 return 0;
5486 }
5487
5488 /* Record that we found atleast one task that could run on dst_cpu */
5489 env->flags &= ~LBF_ALL_PINNED;
5490
5491 if (task_running(env->src_rq, p)) {
5492 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5493 return 0;
5494 }
5495
5496 /*
5497 * Aggressive migration if:
5498 * 1) destination numa is preferred
5499 * 2) task is cache cold, or
5500 * 3) too many balance attempts have failed.
5501 */
5502 tsk_cache_hot = task_hot(p, env);
5503 if (!tsk_cache_hot)
5504 tsk_cache_hot = migrate_degrades_locality(p, env);
5505
5506 if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
5507 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5508 if (tsk_cache_hot) {
5509 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5510 schedstat_inc(p, se.statistics.nr_forced_migrations);
5511 }
5512 return 1;
5513 }
5514
5515 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5516 return 0;
5517 }
5518
5519 /*
5520 * detach_task() -- detach the task for the migration specified in env
5521 */
5522 static void detach_task(struct task_struct *p, struct lb_env *env)
5523 {
5524 lockdep_assert_held(&env->src_rq->lock);
5525
5526 deactivate_task(env->src_rq, p, 0);
5527 p->on_rq = TASK_ON_RQ_MIGRATING;
5528 set_task_cpu(p, env->dst_cpu);
5529 }
5530
5531 /*
5532 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
5533 * part of active balancing operations within "domain".
5534 *
5535 * Returns a task if successful and NULL otherwise.
5536 */
5537 static struct task_struct *detach_one_task(struct lb_env *env)
5538 {
5539 struct task_struct *p, *n;
5540
5541 lockdep_assert_held(&env->src_rq->lock);
5542
5543 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5544 if (!can_migrate_task(p, env))
5545 continue;
5546
5547 detach_task(p, env);
5548
5549 /*
5550 * Right now, this is only the second place where
5551 * lb_gained[env->idle] is updated (other is detach_tasks)
5552 * so we can safely collect stats here rather than
5553 * inside detach_tasks().
5554 */
5555 schedstat_inc(env->sd, lb_gained[env->idle]);
5556 return p;
5557 }
5558 return NULL;
5559 }
5560
5561 static const unsigned int sched_nr_migrate_break = 32;
5562
5563 /*
5564 * detach_tasks() -- tries to detach up to imbalance weighted load from
5565 * busiest_rq, as part of a balancing operation within domain "sd".
5566 *
5567 * Returns number of detached tasks if successful and 0 otherwise.
5568 */
5569 static int detach_tasks(struct lb_env *env)
5570 {
5571 struct list_head *tasks = &env->src_rq->cfs_tasks;
5572 struct task_struct *p;
5573 unsigned long load;
5574 int detached = 0;
5575
5576 lockdep_assert_held(&env->src_rq->lock);
5577
5578 if (env->imbalance <= 0)
5579 return 0;
5580
5581 while (!list_empty(tasks)) {
5582 p = list_first_entry(tasks, struct task_struct, se.group_node);
5583
5584 env->loop++;
5585 /* We've more or less seen every task there is, call it quits */
5586 if (env->loop > env->loop_max)
5587 break;
5588
5589 /* take a breather every nr_migrate tasks */
5590 if (env->loop > env->loop_break) {
5591 env->loop_break += sched_nr_migrate_break;
5592 env->flags |= LBF_NEED_BREAK;
5593 break;
5594 }
5595
5596 if (!can_migrate_task(p, env))
5597 goto next;
5598
5599 load = task_h_load(p);
5600
5601 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5602 goto next;
5603
5604 if ((load / 2) > env->imbalance)
5605 goto next;
5606
5607 detach_task(p, env);
5608 list_add(&p->se.group_node, &env->tasks);
5609
5610 detached++;
5611 env->imbalance -= load;
5612
5613 #ifdef CONFIG_PREEMPT
5614 /*
5615 * NEWIDLE balancing is a source of latency, so preemptible
5616 * kernels will stop after the first task is detached to minimize
5617 * the critical section.
5618 */
5619 if (env->idle == CPU_NEWLY_IDLE)
5620 break;
5621 #endif
5622
5623 /*
5624 * We only want to steal up to the prescribed amount of
5625 * weighted load.
5626 */
5627 if (env->imbalance <= 0)
5628 break;
5629
5630 continue;
5631 next:
5632 list_move_tail(&p->se.group_node, tasks);
5633 }
5634
5635 /*
5636 * Right now, this is one of only two places we collect this stat
5637 * so we can safely collect detach_one_task() stats here rather
5638 * than inside detach_one_task().
5639 */
5640 schedstat_add(env->sd, lb_gained[env->idle], detached);
5641
5642 return detached;
5643 }
5644
5645 /*
5646 * attach_task() -- attach the task detached by detach_task() to its new rq.
5647 */
5648 static void attach_task(struct rq *rq, struct task_struct *p)
5649 {
5650 lockdep_assert_held(&rq->lock);
5651
5652 BUG_ON(task_rq(p) != rq);
5653 p->on_rq = TASK_ON_RQ_QUEUED;
5654 activate_task(rq, p, 0);
5655 check_preempt_curr(rq, p, 0);
5656 }
5657
5658 /*
5659 * attach_one_task() -- attaches the task returned from detach_one_task() to
5660 * its new rq.
5661 */
5662 static void attach_one_task(struct rq *rq, struct task_struct *p)
5663 {
5664 raw_spin_lock(&rq->lock);
5665 attach_task(rq, p);
5666 raw_spin_unlock(&rq->lock);
5667 }
5668
5669 /*
5670 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
5671 * new rq.
5672 */
5673 static void attach_tasks(struct lb_env *env)
5674 {
5675 struct list_head *tasks = &env->tasks;
5676 struct task_struct *p;
5677
5678 raw_spin_lock(&env->dst_rq->lock);
5679
5680 while (!list_empty(tasks)) {
5681 p = list_first_entry(tasks, struct task_struct, se.group_node);
5682 list_del_init(&p->se.group_node);
5683
5684 attach_task(env->dst_rq, p);
5685 }
5686
5687 raw_spin_unlock(&env->dst_rq->lock);
5688 }
5689
5690 #ifdef CONFIG_FAIR_GROUP_SCHED
5691 /*
5692 * update tg->load_weight by folding this cpu's load_avg
5693 */
5694 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5695 {
5696 struct sched_entity *se = tg->se[cpu];
5697 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5698
5699 /* throttled entities do not contribute to load */
5700 if (throttled_hierarchy(cfs_rq))
5701 return;
5702
5703 update_cfs_rq_blocked_load(cfs_rq, 1);
5704
5705 if (se) {
5706 update_entity_load_avg(se, 1);
5707 /*
5708 * We pivot on our runnable average having decayed to zero for
5709 * list removal. This generally implies that all our children
5710 * have also been removed (modulo rounding error or bandwidth
5711 * control); however, such cases are rare and we can fix these
5712 * at enqueue.
5713 *
5714 * TODO: fix up out-of-order children on enqueue.
5715 */
5716 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5717 list_del_leaf_cfs_rq(cfs_rq);
5718 } else {
5719 struct rq *rq = rq_of(cfs_rq);
5720 update_rq_runnable_avg(rq, rq->nr_running);
5721 }
5722 }
5723
5724 static void update_blocked_averages(int cpu)
5725 {
5726 struct rq *rq = cpu_rq(cpu);
5727 struct cfs_rq *cfs_rq;
5728 unsigned long flags;
5729
5730 raw_spin_lock_irqsave(&rq->lock, flags);
5731 update_rq_clock(rq);
5732 /*
5733 * Iterates the task_group tree in a bottom up fashion, see
5734 * list_add_leaf_cfs_rq() for details.
5735 */
5736 for_each_leaf_cfs_rq(rq, cfs_rq) {
5737 /*
5738 * Note: We may want to consider periodically releasing
5739 * rq->lock about these updates so that creating many task
5740 * groups does not result in continually extending hold time.
5741 */
5742 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5743 }
5744
5745 raw_spin_unlock_irqrestore(&rq->lock, flags);
5746 }
5747
5748 /*
5749 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5750 * This needs to be done in a top-down fashion because the load of a child
5751 * group is a fraction of its parents load.
5752 */
5753 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5754 {
5755 struct rq *rq = rq_of(cfs_rq);
5756 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5757 unsigned long now = jiffies;
5758 unsigned long load;
5759
5760 if (cfs_rq->last_h_load_update == now)
5761 return;
5762
5763 cfs_rq->h_load_next = NULL;
5764 for_each_sched_entity(se) {
5765 cfs_rq = cfs_rq_of(se);
5766 cfs_rq->h_load_next = se;
5767 if (cfs_rq->last_h_load_update == now)
5768 break;
5769 }
5770
5771 if (!se) {
5772 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5773 cfs_rq->last_h_load_update = now;
5774 }
5775
5776 while ((se = cfs_rq->h_load_next) != NULL) {
5777 load = cfs_rq->h_load;
5778 load = div64_ul(load * se->avg.load_avg_contrib,
5779 cfs_rq->runnable_load_avg + 1);
5780 cfs_rq = group_cfs_rq(se);
5781 cfs_rq->h_load = load;
5782 cfs_rq->last_h_load_update = now;
5783 }
5784 }
5785
5786 static unsigned long task_h_load(struct task_struct *p)
5787 {
5788 struct cfs_rq *cfs_rq = task_cfs_rq(p);
5789
5790 update_cfs_rq_h_load(cfs_rq);
5791 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5792 cfs_rq->runnable_load_avg + 1);
5793 }
5794 #else
5795 static inline void update_blocked_averages(int cpu)
5796 {
5797 }
5798
5799 static unsigned long task_h_load(struct task_struct *p)
5800 {
5801 return p->se.avg.load_avg_contrib;
5802 }
5803 #endif
5804
5805 /********** Helpers for find_busiest_group ************************/
5806
5807 enum group_type {
5808 group_other = 0,
5809 group_imbalanced,
5810 group_overloaded,
5811 };
5812
5813 /*
5814 * sg_lb_stats - stats of a sched_group required for load_balancing
5815 */
5816 struct sg_lb_stats {
5817 unsigned long avg_load; /*Avg load across the CPUs of the group */
5818 unsigned long group_load; /* Total load over the CPUs of the group */
5819 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5820 unsigned long load_per_task;
5821 unsigned long group_capacity;
5822 unsigned int sum_nr_running; /* Nr tasks running in the group */
5823 unsigned int group_capacity_factor;
5824 unsigned int idle_cpus;
5825 unsigned int group_weight;
5826 enum group_type group_type;
5827 int group_has_free_capacity;
5828 #ifdef CONFIG_NUMA_BALANCING
5829 unsigned int nr_numa_running;
5830 unsigned int nr_preferred_running;
5831 #endif
5832 };
5833
5834 /*
5835 * sd_lb_stats - Structure to store the statistics of a sched_domain
5836 * during load balancing.
5837 */
5838 struct sd_lb_stats {
5839 struct sched_group *busiest; /* Busiest group in this sd */
5840 struct sched_group *local; /* Local group in this sd */
5841 unsigned long total_load; /* Total load of all groups in sd */
5842 unsigned long total_capacity; /* Total capacity of all groups in sd */
5843 unsigned long avg_load; /* Average load across all groups in sd */
5844
5845 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
5846 struct sg_lb_stats local_stat; /* Statistics of the local group */
5847 };
5848
5849 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5850 {
5851 /*
5852 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5853 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5854 * We must however clear busiest_stat::avg_load because
5855 * update_sd_pick_busiest() reads this before assignment.
5856 */
5857 *sds = (struct sd_lb_stats){
5858 .busiest = NULL,
5859 .local = NULL,
5860 .total_load = 0UL,
5861 .total_capacity = 0UL,
5862 .busiest_stat = {
5863 .avg_load = 0UL,
5864 .sum_nr_running = 0,
5865 .group_type = group_other,
5866 },
5867 };
5868 }
5869
5870 /**
5871 * get_sd_load_idx - Obtain the load index for a given sched domain.
5872 * @sd: The sched_domain whose load_idx is to be obtained.
5873 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
5874 *
5875 * Return: The load index.
5876 */
5877 static inline int get_sd_load_idx(struct sched_domain *sd,
5878 enum cpu_idle_type idle)
5879 {
5880 int load_idx;
5881
5882 switch (idle) {
5883 case CPU_NOT_IDLE:
5884 load_idx = sd->busy_idx;
5885 break;
5886
5887 case CPU_NEWLY_IDLE:
5888 load_idx = sd->newidle_idx;
5889 break;
5890 default:
5891 load_idx = sd->idle_idx;
5892 break;
5893 }
5894
5895 return load_idx;
5896 }
5897
5898 static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
5899 {
5900 return SCHED_CAPACITY_SCALE;
5901 }
5902
5903 unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
5904 {
5905 return default_scale_capacity(sd, cpu);
5906 }
5907
5908 static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
5909 {
5910 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
5911 return sd->smt_gain / sd->span_weight;
5912
5913 return SCHED_CAPACITY_SCALE;
5914 }
5915
5916 unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
5917 {
5918 return default_scale_cpu_capacity(sd, cpu);
5919 }
5920
5921 static unsigned long scale_rt_capacity(int cpu)
5922 {
5923 struct rq *rq = cpu_rq(cpu);
5924 u64 total, available, age_stamp, avg;
5925 s64 delta;
5926
5927 /*
5928 * Since we're reading these variables without serialization make sure
5929 * we read them once before doing sanity checks on them.
5930 */
5931 age_stamp = ACCESS_ONCE(rq->age_stamp);
5932 avg = ACCESS_ONCE(rq->rt_avg);
5933
5934 delta = rq_clock(rq) - age_stamp;
5935 if (unlikely(delta < 0))
5936 delta = 0;
5937
5938 total = sched_avg_period() + delta;
5939
5940 if (unlikely(total < avg)) {
5941 /* Ensures that capacity won't end up being negative */
5942 available = 0;
5943 } else {
5944 available = total - avg;
5945 }
5946
5947 if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
5948 total = SCHED_CAPACITY_SCALE;
5949
5950 total >>= SCHED_CAPACITY_SHIFT;
5951
5952 return div_u64(available, total);
5953 }
5954
5955 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
5956 {
5957 unsigned long capacity = SCHED_CAPACITY_SCALE;
5958 struct sched_group *sdg = sd->groups;
5959
5960 if (sched_feat(ARCH_CAPACITY))
5961 capacity *= arch_scale_cpu_capacity(sd, cpu);
5962 else
5963 capacity *= default_scale_cpu_capacity(sd, cpu);
5964
5965 capacity >>= SCHED_CAPACITY_SHIFT;
5966
5967 sdg->sgc->capacity_orig = capacity;
5968
5969 if (sched_feat(ARCH_CAPACITY))
5970 capacity *= arch_scale_freq_capacity(sd, cpu);
5971 else
5972 capacity *= default_scale_capacity(sd, cpu);
5973
5974 capacity >>= SCHED_CAPACITY_SHIFT;
5975
5976 capacity *= scale_rt_capacity(cpu);
5977 capacity >>= SCHED_CAPACITY_SHIFT;
5978
5979 if (!capacity)
5980 capacity = 1;
5981
5982 cpu_rq(cpu)->cpu_capacity = capacity;
5983 sdg->sgc->capacity = capacity;
5984 }
5985
5986 void update_group_capacity(struct sched_domain *sd, int cpu)
5987 {
5988 struct sched_domain *child = sd->child;
5989 struct sched_group *group, *sdg = sd->groups;
5990 unsigned long capacity, capacity_orig;
5991 unsigned long interval;
5992
5993 interval = msecs_to_jiffies(sd->balance_interval);
5994 interval = clamp(interval, 1UL, max_load_balance_interval);
5995 sdg->sgc->next_update = jiffies + interval;
5996
5997 if (!child) {
5998 update_cpu_capacity(sd, cpu);
5999 return;
6000 }
6001
6002 capacity_orig = capacity = 0;
6003
6004 if (child->flags & SD_OVERLAP) {
6005 /*
6006 * SD_OVERLAP domains cannot assume that child groups
6007 * span the current group.
6008 */
6009
6010 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6011 struct sched_group_capacity *sgc;
6012 struct rq *rq = cpu_rq(cpu);
6013
6014 /*
6015 * build_sched_domains() -> init_sched_groups_capacity()
6016 * gets here before we've attached the domains to the
6017 * runqueues.
6018 *
6019 * Use capacity_of(), which is set irrespective of domains
6020 * in update_cpu_capacity().
6021 *
6022 * This avoids capacity/capacity_orig from being 0 and
6023 * causing divide-by-zero issues on boot.
6024 *
6025 * Runtime updates will correct capacity_orig.
6026 */
6027 if (unlikely(!rq->sd)) {
6028 capacity_orig += capacity_of(cpu);
6029 capacity += capacity_of(cpu);
6030 continue;
6031 }
6032
6033 sgc = rq->sd->groups->sgc;
6034 capacity_orig += sgc->capacity_orig;
6035 capacity += sgc->capacity;
6036 }
6037 } else {
6038 /*
6039 * !SD_OVERLAP domains can assume that child groups
6040 * span the current group.
6041 */
6042
6043 group = child->groups;
6044 do {
6045 capacity_orig += group->sgc->capacity_orig;
6046 capacity += group->sgc->capacity;
6047 group = group->next;
6048 } while (group != child->groups);
6049 }
6050
6051 sdg->sgc->capacity_orig = capacity_orig;
6052 sdg->sgc->capacity = capacity;
6053 }
6054
6055 /*
6056 * Try and fix up capacity for tiny siblings, this is needed when
6057 * things like SD_ASYM_PACKING need f_b_g to select another sibling
6058 * which on its own isn't powerful enough.
6059 *
6060 * See update_sd_pick_busiest() and check_asym_packing().
6061 */
6062 static inline int
6063 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
6064 {
6065 /*
6066 * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
6067 */
6068 if (!(sd->flags & SD_SHARE_CPUCAPACITY))
6069 return 0;
6070
6071 /*
6072 * If ~90% of the cpu_capacity is still there, we're good.
6073 */
6074 if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
6075 return 1;
6076
6077 return 0;
6078 }
6079
6080 /*
6081 * Group imbalance indicates (and tries to solve) the problem where balancing
6082 * groups is inadequate due to tsk_cpus_allowed() constraints.
6083 *
6084 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6085 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6086 * Something like:
6087 *
6088 * { 0 1 2 3 } { 4 5 6 7 }
6089 * * * * *
6090 *
6091 * If we were to balance group-wise we'd place two tasks in the first group and
6092 * two tasks in the second group. Clearly this is undesired as it will overload
6093 * cpu 3 and leave one of the cpus in the second group unused.
6094 *
6095 * The current solution to this issue is detecting the skew in the first group
6096 * by noticing the lower domain failed to reach balance and had difficulty
6097 * moving tasks due to affinity constraints.
6098 *
6099 * When this is so detected; this group becomes a candidate for busiest; see
6100 * update_sd_pick_busiest(). And calculate_imbalance() and
6101 * find_busiest_group() avoid some of the usual balance conditions to allow it
6102 * to create an effective group imbalance.
6103 *
6104 * This is a somewhat tricky proposition since the next run might not find the
6105 * group imbalance and decide the groups need to be balanced again. A most
6106 * subtle and fragile situation.
6107 */
6108
6109 static inline int sg_imbalanced(struct sched_group *group)
6110 {
6111 return group->sgc->imbalance;
6112 }
6113
6114 /*
6115 * Compute the group capacity factor.
6116 *
6117 * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
6118 * first dividing out the smt factor and computing the actual number of cores
6119 * and limit unit capacity with that.
6120 */
6121 static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
6122 {
6123 unsigned int capacity_factor, smt, cpus;
6124 unsigned int capacity, capacity_orig;
6125
6126 capacity = group->sgc->capacity;
6127 capacity_orig = group->sgc->capacity_orig;
6128 cpus = group->group_weight;
6129
6130 /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
6131 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
6132 capacity_factor = cpus / smt; /* cores */
6133
6134 capacity_factor = min_t(unsigned,
6135 capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
6136 if (!capacity_factor)
6137 capacity_factor = fix_small_capacity(env->sd, group);
6138
6139 return capacity_factor;
6140 }
6141
6142 static enum group_type
6143 group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
6144 {
6145 if (sgs->sum_nr_running > sgs->group_capacity_factor)
6146 return group_overloaded;
6147
6148 if (sg_imbalanced(group))
6149 return group_imbalanced;
6150
6151 return group_other;
6152 }
6153
6154 /**
6155 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6156 * @env: The load balancing environment.
6157 * @group: sched_group whose statistics are to be updated.
6158 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6159 * @local_group: Does group contain this_cpu.
6160 * @sgs: variable to hold the statistics for this group.
6161 * @overload: Indicate more than one runnable task for any CPU.
6162 */
6163 static inline void update_sg_lb_stats(struct lb_env *env,
6164 struct sched_group *group, int load_idx,
6165 int local_group, struct sg_lb_stats *sgs,
6166 bool *overload)
6167 {
6168 unsigned long load;
6169 int i;
6170
6171 memset(sgs, 0, sizeof(*sgs));
6172
6173 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6174 struct rq *rq = cpu_rq(i);
6175
6176 /* Bias balancing toward cpus of our domain */
6177 if (local_group)
6178 load = target_load(i, load_idx);
6179 else
6180 load = source_load(i, load_idx);
6181
6182 sgs->group_load += load;
6183 sgs->sum_nr_running += rq->cfs.h_nr_running;
6184
6185 if (rq->nr_running > 1)
6186 *overload = true;
6187
6188 #ifdef CONFIG_NUMA_BALANCING
6189 sgs->nr_numa_running += rq->nr_numa_running;
6190 sgs->nr_preferred_running += rq->nr_preferred_running;
6191 #endif
6192 sgs->sum_weighted_load += weighted_cpuload(i);
6193 if (idle_cpu(i))
6194 sgs->idle_cpus++;
6195 }
6196
6197 /* Adjust by relative CPU capacity of the group */
6198 sgs->group_capacity = group->sgc->capacity;
6199 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6200
6201 if (sgs->sum_nr_running)
6202 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6203
6204 sgs->group_weight = group->group_weight;
6205 sgs->group_capacity_factor = sg_capacity_factor(env, group);
6206 sgs->group_type = group_classify(group, sgs);
6207
6208 if (sgs->group_capacity_factor > sgs->sum_nr_running)
6209 sgs->group_has_free_capacity = 1;
6210 }
6211
6212 /**
6213 * update_sd_pick_busiest - return 1 on busiest group
6214 * @env: The load balancing environment.
6215 * @sds: sched_domain statistics
6216 * @sg: sched_group candidate to be checked for being the busiest
6217 * @sgs: sched_group statistics
6218 *
6219 * Determine if @sg is a busier group than the previously selected
6220 * busiest group.
6221 *
6222 * Return: %true if @sg is a busier group than the previously selected
6223 * busiest group. %false otherwise.
6224 */
6225 static bool update_sd_pick_busiest(struct lb_env *env,
6226 struct sd_lb_stats *sds,
6227 struct sched_group *sg,
6228 struct sg_lb_stats *sgs)
6229 {
6230 struct sg_lb_stats *busiest = &sds->busiest_stat;
6231
6232 if (sgs->group_type > busiest->group_type)
6233 return true;
6234
6235 if (sgs->group_type < busiest->group_type)
6236 return false;
6237
6238 if (sgs->avg_load <= busiest->avg_load)
6239 return false;
6240
6241 /* This is the busiest node in its class. */
6242 if (!(env->sd->flags & SD_ASYM_PACKING))
6243 return true;
6244
6245 /*
6246 * ASYM_PACKING needs to move all the work to the lowest
6247 * numbered CPUs in the group, therefore mark all groups
6248 * higher than ourself as busy.
6249 */
6250 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
6251 if (!sds->busiest)
6252 return true;
6253
6254 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6255 return true;
6256 }
6257
6258 return false;
6259 }
6260
6261 #ifdef CONFIG_NUMA_BALANCING
6262 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6263 {
6264 if (sgs->sum_nr_running > sgs->nr_numa_running)
6265 return regular;
6266 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6267 return remote;
6268 return all;
6269 }
6270
6271 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6272 {
6273 if (rq->nr_running > rq->nr_numa_running)
6274 return regular;
6275 if (rq->nr_running > rq->nr_preferred_running)
6276 return remote;
6277 return all;
6278 }
6279 #else
6280 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6281 {
6282 return all;
6283 }
6284
6285 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6286 {
6287 return regular;
6288 }
6289 #endif /* CONFIG_NUMA_BALANCING */
6290
6291 /**
6292 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
6293 * @env: The load balancing environment.
6294 * @sds: variable to hold the statistics for this sched_domain.
6295 */
6296 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
6297 {
6298 struct sched_domain *child = env->sd->child;
6299 struct sched_group *sg = env->sd->groups;
6300 struct sg_lb_stats tmp_sgs;
6301 int load_idx, prefer_sibling = 0;
6302 bool overload = false;
6303
6304 if (child && child->flags & SD_PREFER_SIBLING)
6305 prefer_sibling = 1;
6306
6307 load_idx = get_sd_load_idx(env->sd, env->idle);
6308
6309 do {
6310 struct sg_lb_stats *sgs = &tmp_sgs;
6311 int local_group;
6312
6313 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
6314 if (local_group) {
6315 sds->local = sg;
6316 sgs = &sds->local_stat;
6317
6318 if (env->idle != CPU_NEWLY_IDLE ||
6319 time_after_eq(jiffies, sg->sgc->next_update))
6320 update_group_capacity(env->sd, env->dst_cpu);
6321 }
6322
6323 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6324 &overload);
6325
6326 if (local_group)
6327 goto next_group;
6328
6329 /*
6330 * In case the child domain prefers tasks go to siblings
6331 * first, lower the sg capacity factor to one so that we'll try
6332 * and move all the excess tasks away. We lower the capacity
6333 * of a group only if the local group has the capacity to fit
6334 * these excess tasks, i.e. nr_running < group_capacity_factor. The
6335 * extra check prevents the case where you always pull from the
6336 * heaviest group when it is already under-utilized (possible
6337 * with a large weight task outweighs the tasks on the system).
6338 */
6339 if (prefer_sibling && sds->local &&
6340 sds->local_stat.group_has_free_capacity)
6341 sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
6342
6343 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
6344 sds->busiest = sg;
6345 sds->busiest_stat = *sgs;
6346 }
6347
6348 next_group:
6349 /* Now, start updating sd_lb_stats */
6350 sds->total_load += sgs->group_load;
6351 sds->total_capacity += sgs->group_capacity;
6352
6353 sg = sg->next;
6354 } while (sg != env->sd->groups);
6355
6356 if (env->sd->flags & SD_NUMA)
6357 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
6358
6359 if (!env->sd->parent) {
6360 /* update overload indicator if we are at root domain */
6361 if (env->dst_rq->rd->overload != overload)
6362 env->dst_rq->rd->overload = overload;
6363 }
6364
6365 }
6366
6367 /**
6368 * check_asym_packing - Check to see if the group is packed into the
6369 * sched doman.
6370 *
6371 * This is primarily intended to used at the sibling level. Some
6372 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6373 * case of POWER7, it can move to lower SMT modes only when higher
6374 * threads are idle. When in lower SMT modes, the threads will
6375 * perform better since they share less core resources. Hence when we
6376 * have idle threads, we want them to be the higher ones.
6377 *
6378 * This packing function is run on idle threads. It checks to see if
6379 * the busiest CPU in this domain (core in the P7 case) has a higher
6380 * CPU number than the packing function is being run on. Here we are
6381 * assuming lower CPU number will be equivalent to lower a SMT thread
6382 * number.
6383 *
6384 * Return: 1 when packing is required and a task should be moved to
6385 * this CPU. The amount of the imbalance is returned in *imbalance.
6386 *
6387 * @env: The load balancing environment.
6388 * @sds: Statistics of the sched_domain which is to be packed
6389 */
6390 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6391 {
6392 int busiest_cpu;
6393
6394 if (!(env->sd->flags & SD_ASYM_PACKING))
6395 return 0;
6396
6397 if (!sds->busiest)
6398 return 0;
6399
6400 busiest_cpu = group_first_cpu(sds->busiest);
6401 if (env->dst_cpu > busiest_cpu)
6402 return 0;
6403
6404 env->imbalance = DIV_ROUND_CLOSEST(
6405 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
6406 SCHED_CAPACITY_SCALE);
6407
6408 return 1;
6409 }
6410
6411 /**
6412 * fix_small_imbalance - Calculate the minor imbalance that exists
6413 * amongst the groups of a sched_domain, during
6414 * load balancing.
6415 * @env: The load balancing environment.
6416 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
6417 */
6418 static inline
6419 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6420 {
6421 unsigned long tmp, capa_now = 0, capa_move = 0;
6422 unsigned int imbn = 2;
6423 unsigned long scaled_busy_load_per_task;
6424 struct sg_lb_stats *local, *busiest;
6425
6426 local = &sds->local_stat;
6427 busiest = &sds->busiest_stat;
6428
6429 if (!local->sum_nr_running)
6430 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6431 else if (busiest->load_per_task > local->load_per_task)
6432 imbn = 1;
6433
6434 scaled_busy_load_per_task =
6435 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6436 busiest->group_capacity;
6437
6438 if (busiest->avg_load + scaled_busy_load_per_task >=
6439 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6440 env->imbalance = busiest->load_per_task;
6441 return;
6442 }
6443
6444 /*
6445 * OK, we don't have enough imbalance to justify moving tasks,
6446 * however we may be able to increase total CPU capacity used by
6447 * moving them.
6448 */
6449
6450 capa_now += busiest->group_capacity *
6451 min(busiest->load_per_task, busiest->avg_load);
6452 capa_now += local->group_capacity *
6453 min(local->load_per_task, local->avg_load);
6454 capa_now /= SCHED_CAPACITY_SCALE;
6455
6456 /* Amount of load we'd subtract */
6457 if (busiest->avg_load > scaled_busy_load_per_task) {
6458 capa_move += busiest->group_capacity *
6459 min(busiest->load_per_task,
6460 busiest->avg_load - scaled_busy_load_per_task);
6461 }
6462
6463 /* Amount of load we'd add */
6464 if (busiest->avg_load * busiest->group_capacity <
6465 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
6466 tmp = (busiest->avg_load * busiest->group_capacity) /
6467 local->group_capacity;
6468 } else {
6469 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6470 local->group_capacity;
6471 }
6472 capa_move += local->group_capacity *
6473 min(local->load_per_task, local->avg_load + tmp);
6474 capa_move /= SCHED_CAPACITY_SCALE;
6475
6476 /* Move if we gain throughput */
6477 if (capa_move > capa_now)
6478 env->imbalance = busiest->load_per_task;
6479 }
6480
6481 /**
6482 * calculate_imbalance - Calculate the amount of imbalance present within the
6483 * groups of a given sched_domain during load balance.
6484 * @env: load balance environment
6485 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
6486 */
6487 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6488 {
6489 unsigned long max_pull, load_above_capacity = ~0UL;
6490 struct sg_lb_stats *local, *busiest;
6491
6492 local = &sds->local_stat;
6493 busiest = &sds->busiest_stat;
6494
6495 if (busiest->group_type == group_imbalanced) {
6496 /*
6497 * In the group_imb case we cannot rely on group-wide averages
6498 * to ensure cpu-load equilibrium, look at wider averages. XXX
6499 */
6500 busiest->load_per_task =
6501 min(busiest->load_per_task, sds->avg_load);
6502 }
6503
6504 /*
6505 * In the presence of smp nice balancing, certain scenarios can have
6506 * max load less than avg load(as we skip the groups at or below
6507 * its cpu_capacity, while calculating max_load..)
6508 */
6509 if (busiest->avg_load <= sds->avg_load ||
6510 local->avg_load >= sds->avg_load) {
6511 env->imbalance = 0;
6512 return fix_small_imbalance(env, sds);
6513 }
6514
6515 /*
6516 * If there aren't any idle cpus, avoid creating some.
6517 */
6518 if (busiest->group_type == group_overloaded &&
6519 local->group_type == group_overloaded) {
6520 load_above_capacity =
6521 (busiest->sum_nr_running - busiest->group_capacity_factor);
6522
6523 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
6524 load_above_capacity /= busiest->group_capacity;
6525 }
6526
6527 /*
6528 * We're trying to get all the cpus to the average_load, so we don't
6529 * want to push ourselves above the average load, nor do we wish to
6530 * reduce the max loaded cpu below the average load. At the same time,
6531 * we also don't want to reduce the group load below the group capacity
6532 * (so that we can implement power-savings policies etc). Thus we look
6533 * for the minimum possible imbalance.
6534 */
6535 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6536
6537 /* How much load to actually move to equalise the imbalance */
6538 env->imbalance = min(
6539 max_pull * busiest->group_capacity,
6540 (sds->avg_load - local->avg_load) * local->group_capacity
6541 ) / SCHED_CAPACITY_SCALE;
6542
6543 /*
6544 * if *imbalance is less than the average load per runnable task
6545 * there is no guarantee that any tasks will be moved so we'll have
6546 * a think about bumping its value to force at least one task to be
6547 * moved
6548 */
6549 if (env->imbalance < busiest->load_per_task)
6550 return fix_small_imbalance(env, sds);
6551 }
6552
6553 /******* find_busiest_group() helpers end here *********************/
6554
6555 /**
6556 * find_busiest_group - Returns the busiest group within the sched_domain
6557 * if there is an imbalance. If there isn't an imbalance, and
6558 * the user has opted for power-savings, it returns a group whose
6559 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6560 * such a group exists.
6561 *
6562 * Also calculates the amount of weighted load which should be moved
6563 * to restore balance.
6564 *
6565 * @env: The load balancing environment.
6566 *
6567 * Return: - The busiest group if imbalance exists.
6568 * - If no imbalance and user has opted for power-savings balance,
6569 * return the least loaded group whose CPUs can be
6570 * put to idle by rebalancing its tasks onto our group.
6571 */
6572 static struct sched_group *find_busiest_group(struct lb_env *env)
6573 {
6574 struct sg_lb_stats *local, *busiest;
6575 struct sd_lb_stats sds;
6576
6577 init_sd_lb_stats(&sds);
6578
6579 /*
6580 * Compute the various statistics relavent for load balancing at
6581 * this level.
6582 */
6583 update_sd_lb_stats(env, &sds);
6584 local = &sds.local_stat;
6585 busiest = &sds.busiest_stat;
6586
6587 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6588 check_asym_packing(env, &sds))
6589 return sds.busiest;
6590
6591 /* There is no busy sibling group to pull tasks from */
6592 if (!sds.busiest || busiest->sum_nr_running == 0)
6593 goto out_balanced;
6594
6595 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6596 / sds.total_capacity;
6597
6598 /*
6599 * If the busiest group is imbalanced the below checks don't
6600 * work because they assume all things are equal, which typically
6601 * isn't true due to cpus_allowed constraints and the like.
6602 */
6603 if (busiest->group_type == group_imbalanced)
6604 goto force_balance;
6605
6606 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6607 if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
6608 !busiest->group_has_free_capacity)
6609 goto force_balance;
6610
6611 /*
6612 * If the local group is busier than the selected busiest group
6613 * don't try and pull any tasks.
6614 */
6615 if (local->avg_load >= busiest->avg_load)
6616 goto out_balanced;
6617
6618 /*
6619 * Don't pull any tasks if this group is already above the domain
6620 * average load.
6621 */
6622 if (local->avg_load >= sds.avg_load)
6623 goto out_balanced;
6624
6625 if (env->idle == CPU_IDLE) {
6626 /*
6627 * This cpu is idle. If the busiest group is not overloaded
6628 * and there is no imbalance between this and busiest group
6629 * wrt idle cpus, it is balanced. The imbalance becomes
6630 * significant if the diff is greater than 1 otherwise we
6631 * might end up to just move the imbalance on another group
6632 */
6633 if ((busiest->group_type != group_overloaded) &&
6634 (local->idle_cpus <= (busiest->idle_cpus + 1)))
6635 goto out_balanced;
6636 } else {
6637 /*
6638 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6639 * imbalance_pct to be conservative.
6640 */
6641 if (100 * busiest->avg_load <=
6642 env->sd->imbalance_pct * local->avg_load)
6643 goto out_balanced;
6644 }
6645
6646 force_balance:
6647 /* Looks like there is an imbalance. Compute it */
6648 calculate_imbalance(env, &sds);
6649 return sds.busiest;
6650
6651 out_balanced:
6652 env->imbalance = 0;
6653 return NULL;
6654 }
6655
6656 /*
6657 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6658 */
6659 static struct rq *find_busiest_queue(struct lb_env *env,
6660 struct sched_group *group)
6661 {
6662 struct rq *busiest = NULL, *rq;
6663 unsigned long busiest_load = 0, busiest_capacity = 1;
6664 int i;
6665
6666 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6667 unsigned long capacity, capacity_factor, wl;
6668 enum fbq_type rt;
6669
6670 rq = cpu_rq(i);
6671 rt = fbq_classify_rq(rq);
6672
6673 /*
6674 * We classify groups/runqueues into three groups:
6675 * - regular: there are !numa tasks
6676 * - remote: there are numa tasks that run on the 'wrong' node
6677 * - all: there is no distinction
6678 *
6679 * In order to avoid migrating ideally placed numa tasks,
6680 * ignore those when there's better options.
6681 *
6682 * If we ignore the actual busiest queue to migrate another
6683 * task, the next balance pass can still reduce the busiest
6684 * queue by moving tasks around inside the node.
6685 *
6686 * If we cannot move enough load due to this classification
6687 * the next pass will adjust the group classification and
6688 * allow migration of more tasks.
6689 *
6690 * Both cases only affect the total convergence complexity.
6691 */
6692 if (rt > env->fbq_type)
6693 continue;
6694
6695 capacity = capacity_of(i);
6696 capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
6697 if (!capacity_factor)
6698 capacity_factor = fix_small_capacity(env->sd, group);
6699
6700 wl = weighted_cpuload(i);
6701
6702 /*
6703 * When comparing with imbalance, use weighted_cpuload()
6704 * which is not scaled with the cpu capacity.
6705 */
6706 if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
6707 continue;
6708
6709 /*
6710 * For the load comparisons with the other cpu's, consider
6711 * the weighted_cpuload() scaled with the cpu capacity, so
6712 * that the load can be moved away from the cpu that is
6713 * potentially running at a lower capacity.
6714 *
6715 * Thus we're looking for max(wl_i / capacity_i), crosswise
6716 * multiplication to rid ourselves of the division works out
6717 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6718 * our previous maximum.
6719 */
6720 if (wl * busiest_capacity > busiest_load * capacity) {
6721 busiest_load = wl;
6722 busiest_capacity = capacity;
6723 busiest = rq;
6724 }
6725 }
6726
6727 return busiest;
6728 }
6729
6730 /*
6731 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6732 * so long as it is large enough.
6733 */
6734 #define MAX_PINNED_INTERVAL 512
6735
6736 /* Working cpumask for load_balance and load_balance_newidle. */
6737 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6738
6739 static int need_active_balance(struct lb_env *env)
6740 {
6741 struct sched_domain *sd = env->sd;
6742
6743 if (env->idle == CPU_NEWLY_IDLE) {
6744
6745 /*
6746 * ASYM_PACKING needs to force migrate tasks from busy but
6747 * higher numbered CPUs in order to pack all tasks in the
6748 * lowest numbered CPUs.
6749 */
6750 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6751 return 1;
6752 }
6753
6754 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6755 }
6756
6757 static int active_load_balance_cpu_stop(void *data);
6758
6759 static int should_we_balance(struct lb_env *env)
6760 {
6761 struct sched_group *sg = env->sd->groups;
6762 struct cpumask *sg_cpus, *sg_mask;
6763 int cpu, balance_cpu = -1;
6764
6765 /*
6766 * In the newly idle case, we will allow all the cpu's
6767 * to do the newly idle load balance.
6768 */
6769 if (env->idle == CPU_NEWLY_IDLE)
6770 return 1;
6771
6772 sg_cpus = sched_group_cpus(sg);
6773 sg_mask = sched_group_mask(sg);
6774 /* Try to find first idle cpu */
6775 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6776 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6777 continue;
6778
6779 balance_cpu = cpu;
6780 break;
6781 }
6782
6783 if (balance_cpu == -1)
6784 balance_cpu = group_balance_cpu(sg);
6785
6786 /*
6787 * First idle cpu or the first cpu(busiest) in this sched group
6788 * is eligible for doing load balancing at this and above domains.
6789 */
6790 return balance_cpu == env->dst_cpu;
6791 }
6792
6793 /*
6794 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6795 * tasks if there is an imbalance.
6796 */
6797 static int load_balance(int this_cpu, struct rq *this_rq,
6798 struct sched_domain *sd, enum cpu_idle_type idle,
6799 int *continue_balancing)
6800 {
6801 int ld_moved, cur_ld_moved, active_balance = 0;
6802 struct sched_domain *sd_parent = sd->parent;
6803 struct sched_group *group;
6804 struct rq *busiest;
6805 unsigned long flags;
6806 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
6807
6808 struct lb_env env = {
6809 .sd = sd,
6810 .dst_cpu = this_cpu,
6811 .dst_rq = this_rq,
6812 .dst_grpmask = sched_group_cpus(sd->groups),
6813 .idle = idle,
6814 .loop_break = sched_nr_migrate_break,
6815 .cpus = cpus,
6816 .fbq_type = all,
6817 .tasks = LIST_HEAD_INIT(env.tasks),
6818 };
6819
6820 /*
6821 * For NEWLY_IDLE load_balancing, we don't need to consider
6822 * other cpus in our group
6823 */
6824 if (idle == CPU_NEWLY_IDLE)
6825 env.dst_grpmask = NULL;
6826
6827 cpumask_copy(cpus, cpu_active_mask);
6828
6829 schedstat_inc(sd, lb_count[idle]);
6830
6831 redo:
6832 if (!should_we_balance(&env)) {
6833 *continue_balancing = 0;
6834 goto out_balanced;
6835 }
6836
6837 group = find_busiest_group(&env);
6838 if (!group) {
6839 schedstat_inc(sd, lb_nobusyg[idle]);
6840 goto out_balanced;
6841 }
6842
6843 busiest = find_busiest_queue(&env, group);
6844 if (!busiest) {
6845 schedstat_inc(sd, lb_nobusyq[idle]);
6846 goto out_balanced;
6847 }
6848
6849 BUG_ON(busiest == env.dst_rq);
6850
6851 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6852
6853 ld_moved = 0;
6854 if (busiest->nr_running > 1) {
6855 /*
6856 * Attempt to move tasks. If find_busiest_group has found
6857 * an imbalance but busiest->nr_running <= 1, the group is
6858 * still unbalanced. ld_moved simply stays zero, so it is
6859 * correctly treated as an imbalance.
6860 */
6861 env.flags |= LBF_ALL_PINNED;
6862 env.src_cpu = busiest->cpu;
6863 env.src_rq = busiest;
6864 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
6865
6866 more_balance:
6867 raw_spin_lock_irqsave(&busiest->lock, flags);
6868
6869 /*
6870 * cur_ld_moved - load moved in current iteration
6871 * ld_moved - cumulative load moved across iterations
6872 */
6873 cur_ld_moved = detach_tasks(&env);
6874
6875 /*
6876 * We've detached some tasks from busiest_rq. Every
6877 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
6878 * unlock busiest->lock, and we are able to be sure
6879 * that nobody can manipulate the tasks in parallel.
6880 * See task_rq_lock() family for the details.
6881 */
6882
6883 raw_spin_unlock(&busiest->lock);
6884
6885 if (cur_ld_moved) {
6886 attach_tasks(&env);
6887 ld_moved += cur_ld_moved;
6888 }
6889
6890 local_irq_restore(flags);
6891
6892 if (env.flags & LBF_NEED_BREAK) {
6893 env.flags &= ~LBF_NEED_BREAK;
6894 goto more_balance;
6895 }
6896
6897 /*
6898 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6899 * us and move them to an alternate dst_cpu in our sched_group
6900 * where they can run. The upper limit on how many times we
6901 * iterate on same src_cpu is dependent on number of cpus in our
6902 * sched_group.
6903 *
6904 * This changes load balance semantics a bit on who can move
6905 * load to a given_cpu. In addition to the given_cpu itself
6906 * (or a ilb_cpu acting on its behalf where given_cpu is
6907 * nohz-idle), we now have balance_cpu in a position to move
6908 * load to given_cpu. In rare situations, this may cause
6909 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6910 * _independently_ and at _same_ time to move some load to
6911 * given_cpu) causing exceess load to be moved to given_cpu.
6912 * This however should not happen so much in practice and
6913 * moreover subsequent load balance cycles should correct the
6914 * excess load moved.
6915 */
6916 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
6917
6918 /* Prevent to re-select dst_cpu via env's cpus */
6919 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6920
6921 env.dst_rq = cpu_rq(env.new_dst_cpu);
6922 env.dst_cpu = env.new_dst_cpu;
6923 env.flags &= ~LBF_DST_PINNED;
6924 env.loop = 0;
6925 env.loop_break = sched_nr_migrate_break;
6926
6927 /*
6928 * Go back to "more_balance" rather than "redo" since we
6929 * need to continue with same src_cpu.
6930 */
6931 goto more_balance;
6932 }
6933
6934 /*
6935 * We failed to reach balance because of affinity.
6936 */
6937 if (sd_parent) {
6938 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6939
6940 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
6941 *group_imbalance = 1;
6942 }
6943
6944 /* All tasks on this runqueue were pinned by CPU affinity */
6945 if (unlikely(env.flags & LBF_ALL_PINNED)) {
6946 cpumask_clear_cpu(cpu_of(busiest), cpus);
6947 if (!cpumask_empty(cpus)) {
6948 env.loop = 0;
6949 env.loop_break = sched_nr_migrate_break;
6950 goto redo;
6951 }
6952 goto out_all_pinned;
6953 }
6954 }
6955
6956 if (!ld_moved) {
6957 schedstat_inc(sd, lb_failed[idle]);
6958 /*
6959 * Increment the failure counter only on periodic balance.
6960 * We do not want newidle balance, which can be very
6961 * frequent, pollute the failure counter causing
6962 * excessive cache_hot migrations and active balances.
6963 */
6964 if (idle != CPU_NEWLY_IDLE)
6965 sd->nr_balance_failed++;
6966
6967 if (need_active_balance(&env)) {
6968 raw_spin_lock_irqsave(&busiest->lock, flags);
6969
6970 /* don't kick the active_load_balance_cpu_stop,
6971 * if the curr task on busiest cpu can't be
6972 * moved to this_cpu
6973 */
6974 if (!cpumask_test_cpu(this_cpu,
6975 tsk_cpus_allowed(busiest->curr))) {
6976 raw_spin_unlock_irqrestore(&busiest->lock,
6977 flags);
6978 env.flags |= LBF_ALL_PINNED;
6979 goto out_one_pinned;
6980 }
6981
6982 /*
6983 * ->active_balance synchronizes accesses to
6984 * ->active_balance_work. Once set, it's cleared
6985 * only after active load balance is finished.
6986 */
6987 if (!busiest->active_balance) {
6988 busiest->active_balance = 1;
6989 busiest->push_cpu = this_cpu;
6990 active_balance = 1;
6991 }
6992 raw_spin_unlock_irqrestore(&busiest->lock, flags);
6993
6994 if (active_balance) {
6995 stop_one_cpu_nowait(cpu_of(busiest),
6996 active_load_balance_cpu_stop, busiest,
6997 &busiest->active_balance_work);
6998 }
6999
7000 /*
7001 * We've kicked active balancing, reset the failure
7002 * counter.
7003 */
7004 sd->nr_balance_failed = sd->cache_nice_tries+1;
7005 }
7006 } else
7007 sd->nr_balance_failed = 0;
7008
7009 if (likely(!active_balance)) {
7010 /* We were unbalanced, so reset the balancing interval */
7011 sd->balance_interval = sd->min_interval;
7012 } else {
7013 /*
7014 * If we've begun active balancing, start to back off. This
7015 * case may not be covered by the all_pinned logic if there
7016 * is only 1 task on the busy runqueue (because we don't call
7017 * detach_tasks).
7018 */
7019 if (sd->balance_interval < sd->max_interval)
7020 sd->balance_interval *= 2;
7021 }
7022
7023 goto out;
7024
7025 out_balanced:
7026 /*
7027 * We reach balance although we may have faced some affinity
7028 * constraints. Clear the imbalance flag if it was set.
7029 */
7030 if (sd_parent) {
7031 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7032
7033 if (*group_imbalance)
7034 *group_imbalance = 0;
7035 }
7036
7037 out_all_pinned:
7038 /*
7039 * We reach balance because all tasks are pinned at this level so
7040 * we can't migrate them. Let the imbalance flag set so parent level
7041 * can try to migrate them.
7042 */
7043 schedstat_inc(sd, lb_balanced[idle]);
7044
7045 sd->nr_balance_failed = 0;
7046
7047 out_one_pinned:
7048 /* tune up the balancing interval */
7049 if (((env.flags & LBF_ALL_PINNED) &&
7050 sd->balance_interval < MAX_PINNED_INTERVAL) ||
7051 (sd->balance_interval < sd->max_interval))
7052 sd->balance_interval *= 2;
7053
7054 ld_moved = 0;
7055 out:
7056 return ld_moved;
7057 }
7058
7059 static inline unsigned long
7060 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7061 {
7062 unsigned long interval = sd->balance_interval;
7063
7064 if (cpu_busy)
7065 interval *= sd->busy_factor;
7066
7067 /* scale ms to jiffies */
7068 interval = msecs_to_jiffies(interval);
7069 interval = clamp(interval, 1UL, max_load_balance_interval);
7070
7071 return interval;
7072 }
7073
7074 static inline void
7075 update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7076 {
7077 unsigned long interval, next;
7078
7079 interval = get_sd_balance_interval(sd, cpu_busy);
7080 next = sd->last_balance + interval;
7081
7082 if (time_after(*next_balance, next))
7083 *next_balance = next;
7084 }
7085
7086 /*
7087 * idle_balance is called by schedule() if this_cpu is about to become
7088 * idle. Attempts to pull tasks from other CPUs.
7089 */
7090 static int idle_balance(struct rq *this_rq)
7091 {
7092 unsigned long next_balance = jiffies + HZ;
7093 int this_cpu = this_rq->cpu;
7094 struct sched_domain *sd;
7095 int pulled_task = 0;
7096 u64 curr_cost = 0;
7097
7098 idle_enter_fair(this_rq);
7099
7100 /*
7101 * We must set idle_stamp _before_ calling idle_balance(), such that we
7102 * measure the duration of idle_balance() as idle time.
7103 */
7104 this_rq->idle_stamp = rq_clock(this_rq);
7105
7106 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7107 !this_rq->rd->overload) {
7108 rcu_read_lock();
7109 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7110 if (sd)
7111 update_next_balance(sd, 0, &next_balance);
7112 rcu_read_unlock();
7113
7114 goto out;
7115 }
7116
7117 /*
7118 * Drop the rq->lock, but keep IRQ/preempt disabled.
7119 */
7120 raw_spin_unlock(&this_rq->lock);
7121
7122 update_blocked_averages(this_cpu);
7123 rcu_read_lock();
7124 for_each_domain(this_cpu, sd) {
7125 int continue_balancing = 1;
7126 u64 t0, domain_cost;
7127
7128 if (!(sd->flags & SD_LOAD_BALANCE))
7129 continue;
7130
7131 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7132 update_next_balance(sd, 0, &next_balance);
7133 break;
7134 }
7135
7136 if (sd->flags & SD_BALANCE_NEWIDLE) {
7137 t0 = sched_clock_cpu(this_cpu);
7138
7139 pulled_task = load_balance(this_cpu, this_rq,
7140 sd, CPU_NEWLY_IDLE,
7141 &continue_balancing);
7142
7143 domain_cost = sched_clock_cpu(this_cpu) - t0;
7144 if (domain_cost > sd->max_newidle_lb_cost)
7145 sd->max_newidle_lb_cost = domain_cost;
7146
7147 curr_cost += domain_cost;
7148 }
7149
7150 update_next_balance(sd, 0, &next_balance);
7151
7152 /*
7153 * Stop searching for tasks to pull if there are
7154 * now runnable tasks on this rq.
7155 */
7156 if (pulled_task || this_rq->nr_running > 0)
7157 break;
7158 }
7159 rcu_read_unlock();
7160
7161 raw_spin_lock(&this_rq->lock);
7162
7163 if (curr_cost > this_rq->max_idle_balance_cost)
7164 this_rq->max_idle_balance_cost = curr_cost;
7165
7166 /*
7167 * While browsing the domains, we released the rq lock, a task could
7168 * have been enqueued in the meantime. Since we're not going idle,
7169 * pretend we pulled a task.
7170 */
7171 if (this_rq->cfs.h_nr_running && !pulled_task)
7172 pulled_task = 1;
7173
7174 out:
7175 /* Move the next balance forward */
7176 if (time_after(this_rq->next_balance, next_balance))
7177 this_rq->next_balance = next_balance;
7178
7179 /* Is there a task of a high priority class? */
7180 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7181 pulled_task = -1;
7182
7183 if (pulled_task) {
7184 idle_exit_fair(this_rq);
7185 this_rq->idle_stamp = 0;
7186 }
7187
7188 return pulled_task;
7189 }
7190
7191 /*
7192 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7193 * running tasks off the busiest CPU onto idle CPUs. It requires at
7194 * least 1 task to be running on each physical CPU where possible, and
7195 * avoids physical / logical imbalances.
7196 */
7197 static int active_load_balance_cpu_stop(void *data)
7198 {
7199 struct rq *busiest_rq = data;
7200 int busiest_cpu = cpu_of(busiest_rq);
7201 int target_cpu = busiest_rq->push_cpu;
7202 struct rq *target_rq = cpu_rq(target_cpu);
7203 struct sched_domain *sd;
7204 struct task_struct *p = NULL;
7205
7206 raw_spin_lock_irq(&busiest_rq->lock);
7207
7208 /* make sure the requested cpu hasn't gone down in the meantime */
7209 if (unlikely(busiest_cpu != smp_processor_id() ||
7210 !busiest_rq->active_balance))
7211 goto out_unlock;
7212
7213 /* Is there any task to move? */
7214 if (busiest_rq->nr_running <= 1)
7215 goto out_unlock;
7216
7217 /*
7218 * This condition is "impossible", if it occurs
7219 * we need to fix it. Originally reported by
7220 * Bjorn Helgaas on a 128-cpu setup.
7221 */
7222 BUG_ON(busiest_rq == target_rq);
7223
7224 /* Search for an sd spanning us and the target CPU. */
7225 rcu_read_lock();
7226 for_each_domain(target_cpu, sd) {
7227 if ((sd->flags & SD_LOAD_BALANCE) &&
7228 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7229 break;
7230 }
7231
7232 if (likely(sd)) {
7233 struct lb_env env = {
7234 .sd = sd,
7235 .dst_cpu = target_cpu,
7236 .dst_rq = target_rq,
7237 .src_cpu = busiest_rq->cpu,
7238 .src_rq = busiest_rq,
7239 .idle = CPU_IDLE,
7240 };
7241
7242 schedstat_inc(sd, alb_count);
7243
7244 p = detach_one_task(&env);
7245 if (p)
7246 schedstat_inc(sd, alb_pushed);
7247 else
7248 schedstat_inc(sd, alb_failed);
7249 }
7250 rcu_read_unlock();
7251 out_unlock:
7252 busiest_rq->active_balance = 0;
7253 raw_spin_unlock(&busiest_rq->lock);
7254
7255 if (p)
7256 attach_one_task(target_rq, p);
7257
7258 local_irq_enable();
7259
7260 return 0;
7261 }
7262
7263 static inline int on_null_domain(struct rq *rq)
7264 {
7265 return unlikely(!rcu_dereference_sched(rq->sd));
7266 }
7267
7268 #ifdef CONFIG_NO_HZ_COMMON
7269 /*
7270 * idle load balancing details
7271 * - When one of the busy CPUs notice that there may be an idle rebalancing
7272 * needed, they will kick the idle load balancer, which then does idle
7273 * load balancing for all the idle CPUs.
7274 */
7275 static struct {
7276 cpumask_var_t idle_cpus_mask;
7277 atomic_t nr_cpus;
7278 unsigned long next_balance; /* in jiffy units */
7279 } nohz ____cacheline_aligned;
7280
7281 static inline int find_new_ilb(void)
7282 {
7283 int ilb = cpumask_first(nohz.idle_cpus_mask);
7284
7285 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7286 return ilb;
7287
7288 return nr_cpu_ids;
7289 }
7290
7291 /*
7292 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7293 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7294 * CPU (if there is one).
7295 */
7296 static void nohz_balancer_kick(void)
7297 {
7298 int ilb_cpu;
7299
7300 nohz.next_balance++;
7301
7302 ilb_cpu = find_new_ilb();
7303
7304 if (ilb_cpu >= nr_cpu_ids)
7305 return;
7306
7307 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
7308 return;
7309 /*
7310 * Use smp_send_reschedule() instead of resched_cpu().
7311 * This way we generate a sched IPI on the target cpu which
7312 * is idle. And the softirq performing nohz idle load balance
7313 * will be run before returning from the IPI.
7314 */
7315 smp_send_reschedule(ilb_cpu);
7316 return;
7317 }
7318
7319 static inline void nohz_balance_exit_idle(int cpu)
7320 {
7321 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
7322 /*
7323 * Completely isolated CPUs don't ever set, so we must test.
7324 */
7325 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7326 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7327 atomic_dec(&nohz.nr_cpus);
7328 }
7329 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7330 }
7331 }
7332
7333 static inline void set_cpu_sd_state_busy(void)
7334 {
7335 struct sched_domain *sd;
7336 int cpu = smp_processor_id();
7337
7338 rcu_read_lock();
7339 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7340
7341 if (!sd || !sd->nohz_idle)
7342 goto unlock;
7343 sd->nohz_idle = 0;
7344
7345 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
7346 unlock:
7347 rcu_read_unlock();
7348 }
7349
7350 void set_cpu_sd_state_idle(void)
7351 {
7352 struct sched_domain *sd;
7353 int cpu = smp_processor_id();
7354
7355 rcu_read_lock();
7356 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7357
7358 if (!sd || sd->nohz_idle)
7359 goto unlock;
7360 sd->nohz_idle = 1;
7361
7362 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
7363 unlock:
7364 rcu_read_unlock();
7365 }
7366
7367 /*
7368 * This routine will record that the cpu is going idle with tick stopped.
7369 * This info will be used in performing idle load balancing in the future.
7370 */
7371 void nohz_balance_enter_idle(int cpu)
7372 {
7373 /*
7374 * If this cpu is going down, then nothing needs to be done.
7375 */
7376 if (!cpu_active(cpu))
7377 return;
7378
7379 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7380 return;
7381
7382 /*
7383 * If we're a completely isolated CPU, we don't play.
7384 */
7385 if (on_null_domain(cpu_rq(cpu)))
7386 return;
7387
7388 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7389 atomic_inc(&nohz.nr_cpus);
7390 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7391 }
7392
7393 static int sched_ilb_notifier(struct notifier_block *nfb,
7394 unsigned long action, void *hcpu)
7395 {
7396 switch (action & ~CPU_TASKS_FROZEN) {
7397 case CPU_DYING:
7398 nohz_balance_exit_idle(smp_processor_id());
7399 return NOTIFY_OK;
7400 default:
7401 return NOTIFY_DONE;
7402 }
7403 }
7404 #endif
7405
7406 static DEFINE_SPINLOCK(balancing);
7407
7408 /*
7409 * Scale the max load_balance interval with the number of CPUs in the system.
7410 * This trades load-balance latency on larger machines for less cross talk.
7411 */
7412 void update_max_interval(void)
7413 {
7414 max_load_balance_interval = HZ*num_online_cpus()/10;
7415 }
7416
7417 /*
7418 * It checks each scheduling domain to see if it is due to be balanced,
7419 * and initiates a balancing operation if so.
7420 *
7421 * Balancing parameters are set up in init_sched_domains.
7422 */
7423 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
7424 {
7425 int continue_balancing = 1;
7426 int cpu = rq->cpu;
7427 unsigned long interval;
7428 struct sched_domain *sd;
7429 /* Earliest time when we have to do rebalance again */
7430 unsigned long next_balance = jiffies + 60*HZ;
7431 int update_next_balance = 0;
7432 int need_serialize, need_decay = 0;
7433 u64 max_cost = 0;
7434
7435 update_blocked_averages(cpu);
7436
7437 rcu_read_lock();
7438 for_each_domain(cpu, sd) {
7439 /*
7440 * Decay the newidle max times here because this is a regular
7441 * visit to all the domains. Decay ~1% per second.
7442 */
7443 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7444 sd->max_newidle_lb_cost =
7445 (sd->max_newidle_lb_cost * 253) / 256;
7446 sd->next_decay_max_lb_cost = jiffies + HZ;
7447 need_decay = 1;
7448 }
7449 max_cost += sd->max_newidle_lb_cost;
7450
7451 if (!(sd->flags & SD_LOAD_BALANCE))
7452 continue;
7453
7454 /*
7455 * Stop the load balance at this level. There is another
7456 * CPU in our sched group which is doing load balancing more
7457 * actively.
7458 */
7459 if (!continue_balancing) {
7460 if (need_decay)
7461 continue;
7462 break;
7463 }
7464
7465 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7466
7467 need_serialize = sd->flags & SD_SERIALIZE;
7468 if (need_serialize) {
7469 if (!spin_trylock(&balancing))
7470 goto out;
7471 }
7472
7473 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7474 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7475 /*
7476 * The LBF_DST_PINNED logic could have changed
7477 * env->dst_cpu, so we can't know our idle
7478 * state even if we migrated tasks. Update it.
7479 */
7480 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7481 }
7482 sd->last_balance = jiffies;
7483 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7484 }
7485 if (need_serialize)
7486 spin_unlock(&balancing);
7487 out:
7488 if (time_after(next_balance, sd->last_balance + interval)) {
7489 next_balance = sd->last_balance + interval;
7490 update_next_balance = 1;
7491 }
7492 }
7493 if (need_decay) {
7494 /*
7495 * Ensure the rq-wide value also decays but keep it at a
7496 * reasonable floor to avoid funnies with rq->avg_idle.
7497 */
7498 rq->max_idle_balance_cost =
7499 max((u64)sysctl_sched_migration_cost, max_cost);
7500 }
7501 rcu_read_unlock();
7502
7503 /*
7504 * next_balance will be updated only when there is a need.
7505 * When the cpu is attached to null domain for ex, it will not be
7506 * updated.
7507 */
7508 if (likely(update_next_balance))
7509 rq->next_balance = next_balance;
7510 }
7511
7512 #ifdef CONFIG_NO_HZ_COMMON
7513 /*
7514 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
7515 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7516 */
7517 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7518 {
7519 int this_cpu = this_rq->cpu;
7520 struct rq *rq;
7521 int balance_cpu;
7522
7523 if (idle != CPU_IDLE ||
7524 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7525 goto end;
7526
7527 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7528 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7529 continue;
7530
7531 /*
7532 * If this cpu gets work to do, stop the load balancing
7533 * work being done for other cpus. Next load
7534 * balancing owner will pick it up.
7535 */
7536 if (need_resched())
7537 break;
7538
7539 rq = cpu_rq(balance_cpu);
7540
7541 /*
7542 * If time for next balance is due,
7543 * do the balance.
7544 */
7545 if (time_after_eq(jiffies, rq->next_balance)) {
7546 raw_spin_lock_irq(&rq->lock);
7547 update_rq_clock(rq);
7548 update_idle_cpu_load(rq);
7549 raw_spin_unlock_irq(&rq->lock);
7550 rebalance_domains(rq, CPU_IDLE);
7551 }
7552
7553 if (time_after(this_rq->next_balance, rq->next_balance))
7554 this_rq->next_balance = rq->next_balance;
7555 }
7556 nohz.next_balance = this_rq->next_balance;
7557 end:
7558 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7559 }
7560
7561 /*
7562 * Current heuristic for kicking the idle load balancer in the presence
7563 * of an idle cpu is the system.
7564 * - This rq has more than one task.
7565 * - At any scheduler domain level, this cpu's scheduler group has multiple
7566 * busy cpu's exceeding the group's capacity.
7567 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7568 * domain span are idle.
7569 */
7570 static inline int nohz_kick_needed(struct rq *rq)
7571 {
7572 unsigned long now = jiffies;
7573 struct sched_domain *sd;
7574 struct sched_group_capacity *sgc;
7575 int nr_busy, cpu = rq->cpu;
7576
7577 if (unlikely(rq->idle_balance))
7578 return 0;
7579
7580 /*
7581 * We may be recently in ticked or tickless idle mode. At the first
7582 * busy tick after returning from idle, we will update the busy stats.
7583 */
7584 set_cpu_sd_state_busy();
7585 nohz_balance_exit_idle(cpu);
7586
7587 /*
7588 * None are in tickless mode and hence no need for NOHZ idle load
7589 * balancing.
7590 */
7591 if (likely(!atomic_read(&nohz.nr_cpus)))
7592 return 0;
7593
7594 if (time_before(now, nohz.next_balance))
7595 return 0;
7596
7597 if (rq->nr_running >= 2)
7598 goto need_kick;
7599
7600 rcu_read_lock();
7601 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7602
7603 if (sd) {
7604 sgc = sd->groups->sgc;
7605 nr_busy = atomic_read(&sgc->nr_busy_cpus);
7606
7607 if (nr_busy > 1)
7608 goto need_kick_unlock;
7609 }
7610
7611 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7612
7613 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7614 sched_domain_span(sd)) < cpu))
7615 goto need_kick_unlock;
7616
7617 rcu_read_unlock();
7618 return 0;
7619
7620 need_kick_unlock:
7621 rcu_read_unlock();
7622 need_kick:
7623 return 1;
7624 }
7625 #else
7626 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7627 #endif
7628
7629 /*
7630 * run_rebalance_domains is triggered when needed from the scheduler tick.
7631 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7632 */
7633 static void run_rebalance_domains(struct softirq_action *h)
7634 {
7635 struct rq *this_rq = this_rq();
7636 enum cpu_idle_type idle = this_rq->idle_balance ?
7637 CPU_IDLE : CPU_NOT_IDLE;
7638
7639 rebalance_domains(this_rq, idle);
7640
7641 /*
7642 * If this cpu has a pending nohz_balance_kick, then do the
7643 * balancing on behalf of the other idle cpus whose ticks are
7644 * stopped.
7645 */
7646 nohz_idle_balance(this_rq, idle);
7647 }
7648
7649 /*
7650 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
7651 */
7652 void trigger_load_balance(struct rq *rq)
7653 {
7654 /* Don't need to rebalance while attached to NULL domain */
7655 if (unlikely(on_null_domain(rq)))
7656 return;
7657
7658 if (time_after_eq(jiffies, rq->next_balance))
7659 raise_softirq(SCHED_SOFTIRQ);
7660 #ifdef CONFIG_NO_HZ_COMMON
7661 if (nohz_kick_needed(rq))
7662 nohz_balancer_kick();
7663 #endif
7664 }
7665
7666 static void rq_online_fair(struct rq *rq)
7667 {
7668 update_sysctl();
7669
7670 update_runtime_enabled(rq);
7671 }
7672
7673 static void rq_offline_fair(struct rq *rq)
7674 {
7675 update_sysctl();
7676
7677 /* Ensure any throttled groups are reachable by pick_next_task */
7678 unthrottle_offline_cfs_rqs(rq);
7679 }
7680
7681 #endif /* CONFIG_SMP */
7682
7683 /*
7684 * scheduler tick hitting a task of our scheduling class:
7685 */
7686 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7687 {
7688 struct cfs_rq *cfs_rq;
7689 struct sched_entity *se = &curr->se;
7690
7691 for_each_sched_entity(se) {
7692 cfs_rq = cfs_rq_of(se);
7693 entity_tick(cfs_rq, se, queued);
7694 }
7695
7696 if (numabalancing_enabled)
7697 task_tick_numa(rq, curr);
7698
7699 update_rq_runnable_avg(rq, 1);
7700 }
7701
7702 /*
7703 * called on fork with the child task as argument from the parent's context
7704 * - child not yet on the tasklist
7705 * - preemption disabled
7706 */
7707 static void task_fork_fair(struct task_struct *p)
7708 {
7709 struct cfs_rq *cfs_rq;
7710 struct sched_entity *se = &p->se, *curr;
7711 int this_cpu = smp_processor_id();
7712 struct rq *rq = this_rq();
7713 unsigned long flags;
7714
7715 raw_spin_lock_irqsave(&rq->lock, flags);
7716
7717 update_rq_clock(rq);
7718
7719 cfs_rq = task_cfs_rq(current);
7720 curr = cfs_rq->curr;
7721
7722 /*
7723 * Not only the cpu but also the task_group of the parent might have
7724 * been changed after parent->se.parent,cfs_rq were copied to
7725 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7726 * of child point to valid ones.
7727 */
7728 rcu_read_lock();
7729 __set_task_cpu(p, this_cpu);
7730 rcu_read_unlock();
7731
7732 update_curr(cfs_rq);
7733
7734 if (curr)
7735 se->vruntime = curr->vruntime;
7736 place_entity(cfs_rq, se, 1);
7737
7738 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
7739 /*
7740 * Upon rescheduling, sched_class::put_prev_task() will place
7741 * 'current' within the tree based on its new key value.
7742 */
7743 swap(curr->vruntime, se->vruntime);
7744 resched_curr(rq);
7745 }
7746
7747 se->vruntime -= cfs_rq->min_vruntime;
7748
7749 raw_spin_unlock_irqrestore(&rq->lock, flags);
7750 }
7751
7752 /*
7753 * Priority of the task has changed. Check to see if we preempt
7754 * the current task.
7755 */
7756 static void
7757 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7758 {
7759 if (!task_on_rq_queued(p))
7760 return;
7761
7762 /*
7763 * Reschedule if we are currently running on this runqueue and
7764 * our priority decreased, or if we are not currently running on
7765 * this runqueue and our priority is higher than the current's
7766 */
7767 if (rq->curr == p) {
7768 if (p->prio > oldprio)
7769 resched_curr(rq);
7770 } else
7771 check_preempt_curr(rq, p, 0);
7772 }
7773
7774 static void switched_from_fair(struct rq *rq, struct task_struct *p)
7775 {
7776 struct sched_entity *se = &p->se;
7777 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7778
7779 /*
7780 * Ensure the task's vruntime is normalized, so that when it's
7781 * switched back to the fair class the enqueue_entity(.flags=0) will
7782 * do the right thing.
7783 *
7784 * If it's queued, then the dequeue_entity(.flags=0) will already
7785 * have normalized the vruntime, if it's !queued, then only when
7786 * the task is sleeping will it still have non-normalized vruntime.
7787 */
7788 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
7789 /*
7790 * Fix up our vruntime so that the current sleep doesn't
7791 * cause 'unlimited' sleep bonus.
7792 */
7793 place_entity(cfs_rq, se, 0);
7794 se->vruntime -= cfs_rq->min_vruntime;
7795 }
7796
7797 #ifdef CONFIG_SMP
7798 /*
7799 * Remove our load from contribution when we leave sched_fair
7800 * and ensure we don't carry in an old decay_count if we
7801 * switch back.
7802 */
7803 if (se->avg.decay_count) {
7804 __synchronize_entity_decay(se);
7805 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7806 }
7807 #endif
7808 }
7809
7810 /*
7811 * We switched to the sched_fair class.
7812 */
7813 static void switched_to_fair(struct rq *rq, struct task_struct *p)
7814 {
7815 #ifdef CONFIG_FAIR_GROUP_SCHED
7816 struct sched_entity *se = &p->se;
7817 /*
7818 * Since the real-depth could have been changed (only FAIR
7819 * class maintain depth value), reset depth properly.
7820 */
7821 se->depth = se->parent ? se->parent->depth + 1 : 0;
7822 #endif
7823 if (!task_on_rq_queued(p))
7824 return;
7825
7826 /*
7827 * We were most likely switched from sched_rt, so
7828 * kick off the schedule if running, otherwise just see
7829 * if we can still preempt the current task.
7830 */
7831 if (rq->curr == p)
7832 resched_curr(rq);
7833 else
7834 check_preempt_curr(rq, p, 0);
7835 }
7836
7837 /* Account for a task changing its policy or group.
7838 *
7839 * This routine is mostly called to set cfs_rq->curr field when a task
7840 * migrates between groups/classes.
7841 */
7842 static void set_curr_task_fair(struct rq *rq)
7843 {
7844 struct sched_entity *se = &rq->curr->se;
7845
7846 for_each_sched_entity(se) {
7847 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7848
7849 set_next_entity(cfs_rq, se);
7850 /* ensure bandwidth has been allocated on our new cfs_rq */
7851 account_cfs_rq_runtime(cfs_rq, 0);
7852 }
7853 }
7854
7855 void init_cfs_rq(struct cfs_rq *cfs_rq)
7856 {
7857 cfs_rq->tasks_timeline = RB_ROOT;
7858 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7859 #ifndef CONFIG_64BIT
7860 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7861 #endif
7862 #ifdef CONFIG_SMP
7863 atomic64_set(&cfs_rq->decay_counter, 1);
7864 atomic_long_set(&cfs_rq->removed_load, 0);
7865 #endif
7866 }
7867
7868 #ifdef CONFIG_FAIR_GROUP_SCHED
7869 static void task_move_group_fair(struct task_struct *p, int queued)
7870 {
7871 struct sched_entity *se = &p->se;
7872 struct cfs_rq *cfs_rq;
7873
7874 /*
7875 * If the task was not on the rq at the time of this cgroup movement
7876 * it must have been asleep, sleeping tasks keep their ->vruntime
7877 * absolute on their old rq until wakeup (needed for the fair sleeper
7878 * bonus in place_entity()).
7879 *
7880 * If it was on the rq, we've just 'preempted' it, which does convert
7881 * ->vruntime to a relative base.
7882 *
7883 * Make sure both cases convert their relative position when migrating
7884 * to another cgroup's rq. This does somewhat interfere with the
7885 * fair sleeper stuff for the first placement, but who cares.
7886 */
7887 /*
7888 * When !queued, vruntime of the task has usually NOT been normalized.
7889 * But there are some cases where it has already been normalized:
7890 *
7891 * - Moving a forked child which is waiting for being woken up by
7892 * wake_up_new_task().
7893 * - Moving a task which has been woken up by try_to_wake_up() and
7894 * waiting for actually being woken up by sched_ttwu_pending().
7895 *
7896 * To prevent boost or penalty in the new cfs_rq caused by delta
7897 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7898 */
7899 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7900 queued = 1;
7901
7902 if (!queued)
7903 se->vruntime -= cfs_rq_of(se)->min_vruntime;
7904 set_task_rq(p, task_cpu(p));
7905 se->depth = se->parent ? se->parent->depth + 1 : 0;
7906 if (!queued) {
7907 cfs_rq = cfs_rq_of(se);
7908 se->vruntime += cfs_rq->min_vruntime;
7909 #ifdef CONFIG_SMP
7910 /*
7911 * migrate_task_rq_fair() will have removed our previous
7912 * contribution, but we must synchronize for ongoing future
7913 * decay.
7914 */
7915 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7916 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
7917 #endif
7918 }
7919 }
7920
7921 void free_fair_sched_group(struct task_group *tg)
7922 {
7923 int i;
7924
7925 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7926
7927 for_each_possible_cpu(i) {
7928 if (tg->cfs_rq)
7929 kfree(tg->cfs_rq[i]);
7930 if (tg->se)
7931 kfree(tg->se[i]);
7932 }
7933
7934 kfree(tg->cfs_rq);
7935 kfree(tg->se);
7936 }
7937
7938 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7939 {
7940 struct cfs_rq *cfs_rq;
7941 struct sched_entity *se;
7942 int i;
7943
7944 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7945 if (!tg->cfs_rq)
7946 goto err;
7947 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7948 if (!tg->se)
7949 goto err;
7950
7951 tg->shares = NICE_0_LOAD;
7952
7953 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7954
7955 for_each_possible_cpu(i) {
7956 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7957 GFP_KERNEL, cpu_to_node(i));
7958 if (!cfs_rq)
7959 goto err;
7960
7961 se = kzalloc_node(sizeof(struct sched_entity),
7962 GFP_KERNEL, cpu_to_node(i));
7963 if (!se)
7964 goto err_free_rq;
7965
7966 init_cfs_rq(cfs_rq);
7967 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7968 }
7969
7970 return 1;
7971
7972 err_free_rq:
7973 kfree(cfs_rq);
7974 err:
7975 return 0;
7976 }
7977
7978 void unregister_fair_sched_group(struct task_group *tg, int cpu)
7979 {
7980 struct rq *rq = cpu_rq(cpu);
7981 unsigned long flags;
7982
7983 /*
7984 * Only empty task groups can be destroyed; so we can speculatively
7985 * check on_list without danger of it being re-added.
7986 */
7987 if (!tg->cfs_rq[cpu]->on_list)
7988 return;
7989
7990 raw_spin_lock_irqsave(&rq->lock, flags);
7991 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7992 raw_spin_unlock_irqrestore(&rq->lock, flags);
7993 }
7994
7995 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7996 struct sched_entity *se, int cpu,
7997 struct sched_entity *parent)
7998 {
7999 struct rq *rq = cpu_rq(cpu);
8000
8001 cfs_rq->tg = tg;
8002 cfs_rq->rq = rq;
8003 init_cfs_rq_runtime(cfs_rq);
8004
8005 tg->cfs_rq[cpu] = cfs_rq;
8006 tg->se[cpu] = se;
8007
8008 /* se could be NULL for root_task_group */
8009 if (!se)
8010 return;
8011
8012 if (!parent) {
8013 se->cfs_rq = &rq->cfs;
8014 se->depth = 0;
8015 } else {
8016 se->cfs_rq = parent->my_q;
8017 se->depth = parent->depth + 1;
8018 }
8019
8020 se->my_q = cfs_rq;
8021 /* guarantee group entities always have weight */
8022 update_load_set(&se->load, NICE_0_LOAD);
8023 se->parent = parent;
8024 }
8025
8026 static DEFINE_MUTEX(shares_mutex);
8027
8028 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8029 {
8030 int i;
8031 unsigned long flags;
8032
8033 /*
8034 * We can't change the weight of the root cgroup.
8035 */
8036 if (!tg->se[0])
8037 return -EINVAL;
8038
8039 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8040
8041 mutex_lock(&shares_mutex);
8042 if (tg->shares == shares)
8043 goto done;
8044
8045 tg->shares = shares;
8046 for_each_possible_cpu(i) {
8047 struct rq *rq = cpu_rq(i);
8048 struct sched_entity *se;
8049
8050 se = tg->se[i];
8051 /* Propagate contribution to hierarchy */
8052 raw_spin_lock_irqsave(&rq->lock, flags);
8053
8054 /* Possible calls to update_curr() need rq clock */
8055 update_rq_clock(rq);
8056 for_each_sched_entity(se)
8057 update_cfs_shares(group_cfs_rq(se));
8058 raw_spin_unlock_irqrestore(&rq->lock, flags);
8059 }
8060
8061 done:
8062 mutex_unlock(&shares_mutex);
8063 return 0;
8064 }
8065 #else /* CONFIG_FAIR_GROUP_SCHED */
8066
8067 void free_fair_sched_group(struct task_group *tg) { }
8068
8069 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8070 {
8071 return 1;
8072 }
8073
8074 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
8075
8076 #endif /* CONFIG_FAIR_GROUP_SCHED */
8077
8078
8079 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8080 {
8081 struct sched_entity *se = &task->se;
8082 unsigned int rr_interval = 0;
8083
8084 /*
8085 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
8086 * idle runqueue:
8087 */
8088 if (rq->cfs.load.weight)
8089 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
8090
8091 return rr_interval;
8092 }
8093
8094 /*
8095 * All the scheduling class methods:
8096 */
8097 const struct sched_class fair_sched_class = {
8098 .next = &idle_sched_class,
8099 .enqueue_task = enqueue_task_fair,
8100 .dequeue_task = dequeue_task_fair,
8101 .yield_task = yield_task_fair,
8102 .yield_to_task = yield_to_task_fair,
8103
8104 .check_preempt_curr = check_preempt_wakeup,
8105
8106 .pick_next_task = pick_next_task_fair,
8107 .put_prev_task = put_prev_task_fair,
8108
8109 #ifdef CONFIG_SMP
8110 .select_task_rq = select_task_rq_fair,
8111 .migrate_task_rq = migrate_task_rq_fair,
8112
8113 .rq_online = rq_online_fair,
8114 .rq_offline = rq_offline_fair,
8115
8116 .task_waking = task_waking_fair,
8117 #endif
8118
8119 .set_curr_task = set_curr_task_fair,
8120 .task_tick = task_tick_fair,
8121 .task_fork = task_fork_fair,
8122
8123 .prio_changed = prio_changed_fair,
8124 .switched_from = switched_from_fair,
8125 .switched_to = switched_to_fair,
8126
8127 .get_rr_interval = get_rr_interval_fair,
8128
8129 #ifdef CONFIG_FAIR_GROUP_SCHED
8130 .task_move_group = task_move_group_fair,
8131 #endif
8132 };
8133
8134 #ifdef CONFIG_SCHED_DEBUG
8135 void print_cfs_stats(struct seq_file *m, int cpu)
8136 {
8137 struct cfs_rq *cfs_rq;
8138
8139 rcu_read_lock();
8140 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
8141 print_cfs_rq(m, cpu, cfs_rq);
8142 rcu_read_unlock();
8143 }
8144 #endif
8145
8146 __init void init_sched_fair_class(void)
8147 {
8148 #ifdef CONFIG_SMP
8149 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8150
8151 #ifdef CONFIG_NO_HZ_COMMON
8152 nohz.next_balance = jiffies;
8153 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8154 cpu_notifier(sched_ilb_notifier, 0);
8155 #endif
8156 #endif /* SMP */
8157
8158 }