]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/sched/fair.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-focal-kernel.git] / kernel / sched / fair.c
1 /*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 */
22
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpuidle.h>
27 #include <linux/slab.h>
28 #include <linux/profile.h>
29 #include <linux/interrupt.h>
30 #include <linux/mempolicy.h>
31 #include <linux/migrate.h>
32 #include <linux/task_work.h>
33
34 #include <trace/events/sched.h>
35
36 #include "sched.h"
37
38 /*
39 * Targeted preemption latency for CPU-bound tasks:
40 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41 *
42 * NOTE: this latency value is not the same as the concept of
43 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
46 *
47 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
49 */
50 unsigned int sysctl_sched_latency = 6000000ULL;
51 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
52
53 /*
54 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62 enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65 /*
66 * Minimal preemption granularity for CPU-bound tasks:
67 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 */
69 unsigned int sysctl_sched_min_granularity = 750000ULL;
70 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
71
72 /*
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
75 static unsigned int sched_nr_latency = 8;
76
77 /*
78 * After fork, child runs first. If set to 0 (default) then
79 * parent will (try to) run first.
80 */
81 unsigned int sysctl_sched_child_runs_first __read_mostly;
82
83 /*
84 * SCHED_OTHER wake-up granularity.
85 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
91 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
92 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93
94 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
96 /*
97 * The exponential sliding window over which load is averaged for shares
98 * distribution.
99 * (default: 10msec)
100 */
101 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
103 #ifdef CONFIG_CFS_BANDWIDTH
104 /*
105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
107 *
108 * Note: in the case that the slice exceeds the runtime remaining (either due
109 * to consumption or the quota being specified to be smaller than the slice)
110 * we will always only issue the remaining available time.
111 *
112 * default: 5 msec, units: microseconds
113 */
114 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115 #endif
116
117 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118 {
119 lw->weight += inc;
120 lw->inv_weight = 0;
121 }
122
123 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124 {
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127 }
128
129 static inline void update_load_set(struct load_weight *lw, unsigned long w)
130 {
131 lw->weight = w;
132 lw->inv_weight = 0;
133 }
134
135 /*
136 * Increase the granularity value when there are more CPUs,
137 * because with more CPUs the 'effective latency' as visible
138 * to users decreases. But the relationship is not linear,
139 * so pick a second-best guess by going with the log2 of the
140 * number of CPUs.
141 *
142 * This idea comes from the SD scheduler of Con Kolivas:
143 */
144 static int get_update_sysctl_factor(void)
145 {
146 unsigned int cpus = min_t(int, num_online_cpus(), 8);
147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163 }
164
165 static void update_sysctl(void)
166 {
167 unsigned int factor = get_update_sysctl_factor();
168
169 #define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174 #undef SET_SYSCTL
175 }
176
177 void sched_init_granularity(void)
178 {
179 update_sysctl();
180 }
181
182 #define WMULT_CONST (~0U)
183 #define WMULT_SHIFT 32
184
185 static void __update_inv_weight(struct load_weight *lw)
186 {
187 unsigned long w;
188
189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
198 else
199 lw->inv_weight = WMULT_CONST / w;
200 }
201
202 /*
203 * delta_exec * weight / lw.weight
204 * OR
205 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 *
207 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
208 * we're guaranteed shift stays positive because inv_weight is guaranteed to
209 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 *
211 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
212 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 */
214 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215 {
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
218
219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
226 }
227
228 /* hint to use a 32x32->64 mul */
229 fact = (u64)(u32)fact * lw->inv_weight;
230
231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
237 }
238
239
240 const struct sched_class fair_sched_class;
241
242 /**************************************************************
243 * CFS operations on generic schedulable entities:
244 */
245
246 #ifdef CONFIG_FAIR_GROUP_SCHED
247
248 /* cpu runqueue to which this cfs_rq is attached */
249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250 {
251 return cfs_rq->rq;
252 }
253
254 /* An entity is a task if it doesn't "own" a runqueue */
255 #define entity_is_task(se) (!se->my_q)
256
257 static inline struct task_struct *task_of(struct sched_entity *se)
258 {
259 #ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261 #endif
262 return container_of(se, struct task_struct, se);
263 }
264
265 /* Walk up scheduling entities hierarchy */
266 #define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270 {
271 return p->se.cfs_rq;
272 }
273
274 /* runqueue on which this entity is (to be) queued */
275 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276 {
277 return se->cfs_rq;
278 }
279
280 /* runqueue "owned" by this group */
281 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282 {
283 return grp->my_q;
284 }
285
286 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
287 int force_update);
288
289 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
290 {
291 if (!cfs_rq->on_list) {
292 /*
293 * Ensure we either appear before our parent (if already
294 * enqueued) or force our parent to appear after us when it is
295 * enqueued. The fact that we always enqueue bottom-up
296 * reduces this to two cases.
297 */
298 if (cfs_rq->tg->parent &&
299 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
300 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 } else {
303 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
304 &rq_of(cfs_rq)->leaf_cfs_rq_list);
305 }
306
307 cfs_rq->on_list = 1;
308 /* We should have no load, but we need to update last_decay. */
309 update_cfs_rq_blocked_load(cfs_rq, 0);
310 }
311 }
312
313 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
314 {
315 if (cfs_rq->on_list) {
316 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
317 cfs_rq->on_list = 0;
318 }
319 }
320
321 /* Iterate thr' all leaf cfs_rq's on a runqueue */
322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
324
325 /* Do the two (enqueued) entities belong to the same group ? */
326 static inline struct cfs_rq *
327 is_same_group(struct sched_entity *se, struct sched_entity *pse)
328 {
329 if (se->cfs_rq == pse->cfs_rq)
330 return se->cfs_rq;
331
332 return NULL;
333 }
334
335 static inline struct sched_entity *parent_entity(struct sched_entity *se)
336 {
337 return se->parent;
338 }
339
340 static void
341 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
342 {
343 int se_depth, pse_depth;
344
345 /*
346 * preemption test can be made between sibling entities who are in the
347 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
348 * both tasks until we find their ancestors who are siblings of common
349 * parent.
350 */
351
352 /* First walk up until both entities are at same depth */
353 se_depth = (*se)->depth;
354 pse_depth = (*pse)->depth;
355
356 while (se_depth > pse_depth) {
357 se_depth--;
358 *se = parent_entity(*se);
359 }
360
361 while (pse_depth > se_depth) {
362 pse_depth--;
363 *pse = parent_entity(*pse);
364 }
365
366 while (!is_same_group(*se, *pse)) {
367 *se = parent_entity(*se);
368 *pse = parent_entity(*pse);
369 }
370 }
371
372 #else /* !CONFIG_FAIR_GROUP_SCHED */
373
374 static inline struct task_struct *task_of(struct sched_entity *se)
375 {
376 return container_of(se, struct task_struct, se);
377 }
378
379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
380 {
381 return container_of(cfs_rq, struct rq, cfs);
382 }
383
384 #define entity_is_task(se) 1
385
386 #define for_each_sched_entity(se) \
387 for (; se; se = NULL)
388
389 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
390 {
391 return &task_rq(p)->cfs;
392 }
393
394 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
395 {
396 struct task_struct *p = task_of(se);
397 struct rq *rq = task_rq(p);
398
399 return &rq->cfs;
400 }
401
402 /* runqueue "owned" by this group */
403 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
404 {
405 return NULL;
406 }
407
408 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
409 {
410 }
411
412 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
413 {
414 }
415
416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
418
419 static inline struct sched_entity *parent_entity(struct sched_entity *se)
420 {
421 return NULL;
422 }
423
424 static inline void
425 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
426 {
427 }
428
429 #endif /* CONFIG_FAIR_GROUP_SCHED */
430
431 static __always_inline
432 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
433
434 /**************************************************************
435 * Scheduling class tree data structure manipulation methods:
436 */
437
438 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
439 {
440 s64 delta = (s64)(vruntime - max_vruntime);
441 if (delta > 0)
442 max_vruntime = vruntime;
443
444 return max_vruntime;
445 }
446
447 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
448 {
449 s64 delta = (s64)(vruntime - min_vruntime);
450 if (delta < 0)
451 min_vruntime = vruntime;
452
453 return min_vruntime;
454 }
455
456 static inline int entity_before(struct sched_entity *a,
457 struct sched_entity *b)
458 {
459 return (s64)(a->vruntime - b->vruntime) < 0;
460 }
461
462 static void update_min_vruntime(struct cfs_rq *cfs_rq)
463 {
464 u64 vruntime = cfs_rq->min_vruntime;
465
466 if (cfs_rq->curr)
467 vruntime = cfs_rq->curr->vruntime;
468
469 if (cfs_rq->rb_leftmost) {
470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
471 struct sched_entity,
472 run_node);
473
474 if (!cfs_rq->curr)
475 vruntime = se->vruntime;
476 else
477 vruntime = min_vruntime(vruntime, se->vruntime);
478 }
479
480 /* ensure we never gain time by being placed backwards. */
481 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
482 #ifndef CONFIG_64BIT
483 smp_wmb();
484 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
485 #endif
486 }
487
488 /*
489 * Enqueue an entity into the rb-tree:
490 */
491 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
492 {
493 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
494 struct rb_node *parent = NULL;
495 struct sched_entity *entry;
496 int leftmost = 1;
497
498 /*
499 * Find the right place in the rbtree:
500 */
501 while (*link) {
502 parent = *link;
503 entry = rb_entry(parent, struct sched_entity, run_node);
504 /*
505 * We dont care about collisions. Nodes with
506 * the same key stay together.
507 */
508 if (entity_before(se, entry)) {
509 link = &parent->rb_left;
510 } else {
511 link = &parent->rb_right;
512 leftmost = 0;
513 }
514 }
515
516 /*
517 * Maintain a cache of leftmost tree entries (it is frequently
518 * used):
519 */
520 if (leftmost)
521 cfs_rq->rb_leftmost = &se->run_node;
522
523 rb_link_node(&se->run_node, parent, link);
524 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
525 }
526
527 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
528 {
529 if (cfs_rq->rb_leftmost == &se->run_node) {
530 struct rb_node *next_node;
531
532 next_node = rb_next(&se->run_node);
533 cfs_rq->rb_leftmost = next_node;
534 }
535
536 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
537 }
538
539 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
540 {
541 struct rb_node *left = cfs_rq->rb_leftmost;
542
543 if (!left)
544 return NULL;
545
546 return rb_entry(left, struct sched_entity, run_node);
547 }
548
549 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
550 {
551 struct rb_node *next = rb_next(&se->run_node);
552
553 if (!next)
554 return NULL;
555
556 return rb_entry(next, struct sched_entity, run_node);
557 }
558
559 #ifdef CONFIG_SCHED_DEBUG
560 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
561 {
562 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
563
564 if (!last)
565 return NULL;
566
567 return rb_entry(last, struct sched_entity, run_node);
568 }
569
570 /**************************************************************
571 * Scheduling class statistics methods:
572 */
573
574 int sched_proc_update_handler(struct ctl_table *table, int write,
575 void __user *buffer, size_t *lenp,
576 loff_t *ppos)
577 {
578 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
579 int factor = get_update_sysctl_factor();
580
581 if (ret || !write)
582 return ret;
583
584 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
585 sysctl_sched_min_granularity);
586
587 #define WRT_SYSCTL(name) \
588 (normalized_sysctl_##name = sysctl_##name / (factor))
589 WRT_SYSCTL(sched_min_granularity);
590 WRT_SYSCTL(sched_latency);
591 WRT_SYSCTL(sched_wakeup_granularity);
592 #undef WRT_SYSCTL
593
594 return 0;
595 }
596 #endif
597
598 /*
599 * delta /= w
600 */
601 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
602 {
603 if (unlikely(se->load.weight != NICE_0_LOAD))
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
605
606 return delta;
607 }
608
609 /*
610 * The idea is to set a period in which each task runs once.
611 *
612 * When there are too many tasks (sched_nr_latency) we have to stretch
613 * this period because otherwise the slices get too small.
614 *
615 * p = (nr <= nl) ? l : l*nr/nl
616 */
617 static u64 __sched_period(unsigned long nr_running)
618 {
619 u64 period = sysctl_sched_latency;
620 unsigned long nr_latency = sched_nr_latency;
621
622 if (unlikely(nr_running > nr_latency)) {
623 period = sysctl_sched_min_granularity;
624 period *= nr_running;
625 }
626
627 return period;
628 }
629
630 /*
631 * We calculate the wall-time slice from the period by taking a part
632 * proportional to the weight.
633 *
634 * s = p*P[w/rw]
635 */
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
637 {
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
639
640 for_each_sched_entity(se) {
641 struct load_weight *load;
642 struct load_weight lw;
643
644 cfs_rq = cfs_rq_of(se);
645 load = &cfs_rq->load;
646
647 if (unlikely(!se->on_rq)) {
648 lw = cfs_rq->load;
649
650 update_load_add(&lw, se->load.weight);
651 load = &lw;
652 }
653 slice = __calc_delta(slice, se->load.weight, load);
654 }
655 return slice;
656 }
657
658 /*
659 * We calculate the vruntime slice of a to-be-inserted task.
660 *
661 * vs = s/w
662 */
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
664 {
665 return calc_delta_fair(sched_slice(cfs_rq, se), se);
666 }
667
668 #ifdef CONFIG_SMP
669 static int select_idle_sibling(struct task_struct *p, int cpu);
670 static unsigned long task_h_load(struct task_struct *p);
671
672 static inline void __update_task_entity_contrib(struct sched_entity *se);
673 static inline void __update_task_entity_utilization(struct sched_entity *se);
674
675 /* Give new task start runnable values to heavy its load in infant time */
676 void init_task_runnable_average(struct task_struct *p)
677 {
678 u32 slice;
679
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
681 p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
682 p->se.avg.avg_period = slice;
683 __update_task_entity_contrib(&p->se);
684 __update_task_entity_utilization(&p->se);
685 }
686 #else
687 void init_task_runnable_average(struct task_struct *p)
688 {
689 }
690 #endif
691
692 /*
693 * Update the current task's runtime statistics.
694 */
695 static void update_curr(struct cfs_rq *cfs_rq)
696 {
697 struct sched_entity *curr = cfs_rq->curr;
698 u64 now = rq_clock_task(rq_of(cfs_rq));
699 u64 delta_exec;
700
701 if (unlikely(!curr))
702 return;
703
704 delta_exec = now - curr->exec_start;
705 if (unlikely((s64)delta_exec <= 0))
706 return;
707
708 curr->exec_start = now;
709
710 schedstat_set(curr->statistics.exec_max,
711 max(delta_exec, curr->statistics.exec_max));
712
713 curr->sum_exec_runtime += delta_exec;
714 schedstat_add(cfs_rq, exec_clock, delta_exec);
715
716 curr->vruntime += calc_delta_fair(delta_exec, curr);
717 update_min_vruntime(cfs_rq);
718
719 if (entity_is_task(curr)) {
720 struct task_struct *curtask = task_of(curr);
721
722 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
723 cpuacct_charge(curtask, delta_exec);
724 account_group_exec_runtime(curtask, delta_exec);
725 }
726
727 account_cfs_rq_runtime(cfs_rq, delta_exec);
728 }
729
730 static void update_curr_fair(struct rq *rq)
731 {
732 update_curr(cfs_rq_of(&rq->curr->se));
733 }
734
735 static inline void
736 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
737 {
738 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
739 }
740
741 /*
742 * Task is being enqueued - update stats:
743 */
744 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
745 {
746 /*
747 * Are we enqueueing a waiting task? (for current tasks
748 * a dequeue/enqueue event is a NOP)
749 */
750 if (se != cfs_rq->curr)
751 update_stats_wait_start(cfs_rq, se);
752 }
753
754 static void
755 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
756 {
757 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
758 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
759 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
760 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
761 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
762 #ifdef CONFIG_SCHEDSTATS
763 if (entity_is_task(se)) {
764 trace_sched_stat_wait(task_of(se),
765 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
766 }
767 #endif
768 schedstat_set(se->statistics.wait_start, 0);
769 }
770
771 static inline void
772 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
773 {
774 /*
775 * Mark the end of the wait period if dequeueing a
776 * waiting task:
777 */
778 if (se != cfs_rq->curr)
779 update_stats_wait_end(cfs_rq, se);
780 }
781
782 /*
783 * We are picking a new current task - update its stats:
784 */
785 static inline void
786 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
787 {
788 /*
789 * We are starting a new run period:
790 */
791 se->exec_start = rq_clock_task(rq_of(cfs_rq));
792 }
793
794 /**************************************************
795 * Scheduling class queueing methods:
796 */
797
798 #ifdef CONFIG_NUMA_BALANCING
799 /*
800 * Approximate time to scan a full NUMA task in ms. The task scan period is
801 * calculated based on the tasks virtual memory size and
802 * numa_balancing_scan_size.
803 */
804 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
805 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
806
807 /* Portion of address space to scan in MB */
808 unsigned int sysctl_numa_balancing_scan_size = 256;
809
810 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
811 unsigned int sysctl_numa_balancing_scan_delay = 1000;
812
813 static unsigned int task_nr_scan_windows(struct task_struct *p)
814 {
815 unsigned long rss = 0;
816 unsigned long nr_scan_pages;
817
818 /*
819 * Calculations based on RSS as non-present and empty pages are skipped
820 * by the PTE scanner and NUMA hinting faults should be trapped based
821 * on resident pages
822 */
823 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
824 rss = get_mm_rss(p->mm);
825 if (!rss)
826 rss = nr_scan_pages;
827
828 rss = round_up(rss, nr_scan_pages);
829 return rss / nr_scan_pages;
830 }
831
832 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
833 #define MAX_SCAN_WINDOW 2560
834
835 static unsigned int task_scan_min(struct task_struct *p)
836 {
837 unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
838 unsigned int scan, floor;
839 unsigned int windows = 1;
840
841 if (scan_size < MAX_SCAN_WINDOW)
842 windows = MAX_SCAN_WINDOW / scan_size;
843 floor = 1000 / windows;
844
845 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
846 return max_t(unsigned int, floor, scan);
847 }
848
849 static unsigned int task_scan_max(struct task_struct *p)
850 {
851 unsigned int smin = task_scan_min(p);
852 unsigned int smax;
853
854 /* Watch for min being lower than max due to floor calculations */
855 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
856 return max(smin, smax);
857 }
858
859 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
860 {
861 rq->nr_numa_running += (p->numa_preferred_nid != -1);
862 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
863 }
864
865 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
866 {
867 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
868 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
869 }
870
871 struct numa_group {
872 atomic_t refcount;
873
874 spinlock_t lock; /* nr_tasks, tasks */
875 int nr_tasks;
876 pid_t gid;
877
878 struct rcu_head rcu;
879 nodemask_t active_nodes;
880 unsigned long total_faults;
881 /*
882 * Faults_cpu is used to decide whether memory should move
883 * towards the CPU. As a consequence, these stats are weighted
884 * more by CPU use than by memory faults.
885 */
886 unsigned long *faults_cpu;
887 unsigned long faults[0];
888 };
889
890 /* Shared or private faults. */
891 #define NR_NUMA_HINT_FAULT_TYPES 2
892
893 /* Memory and CPU locality */
894 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
895
896 /* Averaged statistics, and temporary buffers. */
897 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
898
899 pid_t task_numa_group_id(struct task_struct *p)
900 {
901 return p->numa_group ? p->numa_group->gid : 0;
902 }
903
904 /*
905 * The averaged statistics, shared & private, memory & cpu,
906 * occupy the first half of the array. The second half of the
907 * array is for current counters, which are averaged into the
908 * first set by task_numa_placement.
909 */
910 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
911 {
912 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
913 }
914
915 static inline unsigned long task_faults(struct task_struct *p, int nid)
916 {
917 if (!p->numa_faults)
918 return 0;
919
920 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
921 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
922 }
923
924 static inline unsigned long group_faults(struct task_struct *p, int nid)
925 {
926 if (!p->numa_group)
927 return 0;
928
929 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
930 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
931 }
932
933 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
934 {
935 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
936 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
937 }
938
939 /* Handle placement on systems where not all nodes are directly connected. */
940 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
941 int maxdist, bool task)
942 {
943 unsigned long score = 0;
944 int node;
945
946 /*
947 * All nodes are directly connected, and the same distance
948 * from each other. No need for fancy placement algorithms.
949 */
950 if (sched_numa_topology_type == NUMA_DIRECT)
951 return 0;
952
953 /*
954 * This code is called for each node, introducing N^2 complexity,
955 * which should be ok given the number of nodes rarely exceeds 8.
956 */
957 for_each_online_node(node) {
958 unsigned long faults;
959 int dist = node_distance(nid, node);
960
961 /*
962 * The furthest away nodes in the system are not interesting
963 * for placement; nid was already counted.
964 */
965 if (dist == sched_max_numa_distance || node == nid)
966 continue;
967
968 /*
969 * On systems with a backplane NUMA topology, compare groups
970 * of nodes, and move tasks towards the group with the most
971 * memory accesses. When comparing two nodes at distance
972 * "hoplimit", only nodes closer by than "hoplimit" are part
973 * of each group. Skip other nodes.
974 */
975 if (sched_numa_topology_type == NUMA_BACKPLANE &&
976 dist > maxdist)
977 continue;
978
979 /* Add up the faults from nearby nodes. */
980 if (task)
981 faults = task_faults(p, node);
982 else
983 faults = group_faults(p, node);
984
985 /*
986 * On systems with a glueless mesh NUMA topology, there are
987 * no fixed "groups of nodes". Instead, nodes that are not
988 * directly connected bounce traffic through intermediate
989 * nodes; a numa_group can occupy any set of nodes.
990 * The further away a node is, the less the faults count.
991 * This seems to result in good task placement.
992 */
993 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
994 faults *= (sched_max_numa_distance - dist);
995 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
996 }
997
998 score += faults;
999 }
1000
1001 return score;
1002 }
1003
1004 /*
1005 * These return the fraction of accesses done by a particular task, or
1006 * task group, on a particular numa node. The group weight is given a
1007 * larger multiplier, in order to group tasks together that are almost
1008 * evenly spread out between numa nodes.
1009 */
1010 static inline unsigned long task_weight(struct task_struct *p, int nid,
1011 int dist)
1012 {
1013 unsigned long faults, total_faults;
1014
1015 if (!p->numa_faults)
1016 return 0;
1017
1018 total_faults = p->total_numa_faults;
1019
1020 if (!total_faults)
1021 return 0;
1022
1023 faults = task_faults(p, nid);
1024 faults += score_nearby_nodes(p, nid, dist, true);
1025
1026 return 1000 * faults / total_faults;
1027 }
1028
1029 static inline unsigned long group_weight(struct task_struct *p, int nid,
1030 int dist)
1031 {
1032 unsigned long faults, total_faults;
1033
1034 if (!p->numa_group)
1035 return 0;
1036
1037 total_faults = p->numa_group->total_faults;
1038
1039 if (!total_faults)
1040 return 0;
1041
1042 faults = group_faults(p, nid);
1043 faults += score_nearby_nodes(p, nid, dist, false);
1044
1045 return 1000 * faults / total_faults;
1046 }
1047
1048 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1049 int src_nid, int dst_cpu)
1050 {
1051 struct numa_group *ng = p->numa_group;
1052 int dst_nid = cpu_to_node(dst_cpu);
1053 int last_cpupid, this_cpupid;
1054
1055 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1056
1057 /*
1058 * Multi-stage node selection is used in conjunction with a periodic
1059 * migration fault to build a temporal task<->page relation. By using
1060 * a two-stage filter we remove short/unlikely relations.
1061 *
1062 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1063 * a task's usage of a particular page (n_p) per total usage of this
1064 * page (n_t) (in a given time-span) to a probability.
1065 *
1066 * Our periodic faults will sample this probability and getting the
1067 * same result twice in a row, given these samples are fully
1068 * independent, is then given by P(n)^2, provided our sample period
1069 * is sufficiently short compared to the usage pattern.
1070 *
1071 * This quadric squishes small probabilities, making it less likely we
1072 * act on an unlikely task<->page relation.
1073 */
1074 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1075 if (!cpupid_pid_unset(last_cpupid) &&
1076 cpupid_to_nid(last_cpupid) != dst_nid)
1077 return false;
1078
1079 /* Always allow migrate on private faults */
1080 if (cpupid_match_pid(p, last_cpupid))
1081 return true;
1082
1083 /* A shared fault, but p->numa_group has not been set up yet. */
1084 if (!ng)
1085 return true;
1086
1087 /*
1088 * Do not migrate if the destination is not a node that
1089 * is actively used by this numa group.
1090 */
1091 if (!node_isset(dst_nid, ng->active_nodes))
1092 return false;
1093
1094 /*
1095 * Source is a node that is not actively used by this
1096 * numa group, while the destination is. Migrate.
1097 */
1098 if (!node_isset(src_nid, ng->active_nodes))
1099 return true;
1100
1101 /*
1102 * Both source and destination are nodes in active
1103 * use by this numa group. Maximize memory bandwidth
1104 * by migrating from more heavily used groups, to less
1105 * heavily used ones, spreading the load around.
1106 * Use a 1/4 hysteresis to avoid spurious page movement.
1107 */
1108 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1109 }
1110
1111 static unsigned long weighted_cpuload(const int cpu);
1112 static unsigned long source_load(int cpu, int type);
1113 static unsigned long target_load(int cpu, int type);
1114 static unsigned long capacity_of(int cpu);
1115 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1116
1117 /* Cached statistics for all CPUs within a node */
1118 struct numa_stats {
1119 unsigned long nr_running;
1120 unsigned long load;
1121
1122 /* Total compute capacity of CPUs on a node */
1123 unsigned long compute_capacity;
1124
1125 /* Approximate capacity in terms of runnable tasks on a node */
1126 unsigned long task_capacity;
1127 int has_free_capacity;
1128 };
1129
1130 /*
1131 * XXX borrowed from update_sg_lb_stats
1132 */
1133 static void update_numa_stats(struct numa_stats *ns, int nid)
1134 {
1135 int smt, cpu, cpus = 0;
1136 unsigned long capacity;
1137
1138 memset(ns, 0, sizeof(*ns));
1139 for_each_cpu(cpu, cpumask_of_node(nid)) {
1140 struct rq *rq = cpu_rq(cpu);
1141
1142 ns->nr_running += rq->nr_running;
1143 ns->load += weighted_cpuload(cpu);
1144 ns->compute_capacity += capacity_of(cpu);
1145
1146 cpus++;
1147 }
1148
1149 /*
1150 * If we raced with hotplug and there are no CPUs left in our mask
1151 * the @ns structure is NULL'ed and task_numa_compare() will
1152 * not find this node attractive.
1153 *
1154 * We'll either bail at !has_free_capacity, or we'll detect a huge
1155 * imbalance and bail there.
1156 */
1157 if (!cpus)
1158 return;
1159
1160 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1161 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1162 capacity = cpus / smt; /* cores */
1163
1164 ns->task_capacity = min_t(unsigned, capacity,
1165 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1166 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1167 }
1168
1169 struct task_numa_env {
1170 struct task_struct *p;
1171
1172 int src_cpu, src_nid;
1173 int dst_cpu, dst_nid;
1174
1175 struct numa_stats src_stats, dst_stats;
1176
1177 int imbalance_pct;
1178 int dist;
1179
1180 struct task_struct *best_task;
1181 long best_imp;
1182 int best_cpu;
1183 };
1184
1185 static void task_numa_assign(struct task_numa_env *env,
1186 struct task_struct *p, long imp)
1187 {
1188 if (env->best_task)
1189 put_task_struct(env->best_task);
1190 if (p)
1191 get_task_struct(p);
1192
1193 env->best_task = p;
1194 env->best_imp = imp;
1195 env->best_cpu = env->dst_cpu;
1196 }
1197
1198 static bool load_too_imbalanced(long src_load, long dst_load,
1199 struct task_numa_env *env)
1200 {
1201 long src_capacity, dst_capacity;
1202 long orig_src_load;
1203 long load_a, load_b;
1204 long moved_load;
1205 long imb;
1206
1207 /*
1208 * The load is corrected for the CPU capacity available on each node.
1209 *
1210 * src_load dst_load
1211 * ------------ vs ---------
1212 * src_capacity dst_capacity
1213 */
1214 src_capacity = env->src_stats.compute_capacity;
1215 dst_capacity = env->dst_stats.compute_capacity;
1216
1217 /* We care about the slope of the imbalance, not the direction. */
1218 load_a = dst_load;
1219 load_b = src_load;
1220 if (load_a < load_b)
1221 swap(load_a, load_b);
1222
1223 /* Is the difference below the threshold? */
1224 imb = load_a * src_capacity * 100 -
1225 load_b * dst_capacity * env->imbalance_pct;
1226 if (imb <= 0)
1227 return false;
1228
1229 /*
1230 * The imbalance is above the allowed threshold.
1231 * Allow a move that brings us closer to a balanced situation,
1232 * without moving things past the point of balance.
1233 */
1234 orig_src_load = env->src_stats.load;
1235
1236 /*
1237 * In a task swap, there will be one load moving from src to dst,
1238 * and another moving back. This is the net sum of both moves.
1239 * A simple task move will always have a positive value.
1240 * Allow the move if it brings the system closer to a balanced
1241 * situation, without crossing over the balance point.
1242 */
1243 moved_load = orig_src_load - src_load;
1244
1245 if (moved_load > 0)
1246 /* Moving src -> dst. Did we overshoot balance? */
1247 return src_load * dst_capacity < dst_load * src_capacity;
1248 else
1249 /* Moving dst -> src. Did we overshoot balance? */
1250 return dst_load * src_capacity < src_load * dst_capacity;
1251 }
1252
1253 /*
1254 * This checks if the overall compute and NUMA accesses of the system would
1255 * be improved if the source tasks was migrated to the target dst_cpu taking
1256 * into account that it might be best if task running on the dst_cpu should
1257 * be exchanged with the source task
1258 */
1259 static void task_numa_compare(struct task_numa_env *env,
1260 long taskimp, long groupimp)
1261 {
1262 struct rq *src_rq = cpu_rq(env->src_cpu);
1263 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1264 struct task_struct *cur;
1265 long src_load, dst_load;
1266 long load;
1267 long imp = env->p->numa_group ? groupimp : taskimp;
1268 long moveimp = imp;
1269 int dist = env->dist;
1270
1271 rcu_read_lock();
1272
1273 raw_spin_lock_irq(&dst_rq->lock);
1274 cur = dst_rq->curr;
1275 /*
1276 * No need to move the exiting task, and this ensures that ->curr
1277 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1278 * is safe under RCU read lock.
1279 * Note that rcu_read_lock() itself can't protect from the final
1280 * put_task_struct() after the last schedule().
1281 */
1282 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1283 cur = NULL;
1284 raw_spin_unlock_irq(&dst_rq->lock);
1285
1286 /*
1287 * Because we have preemption enabled we can get migrated around and
1288 * end try selecting ourselves (current == env->p) as a swap candidate.
1289 */
1290 if (cur == env->p)
1291 goto unlock;
1292
1293 /*
1294 * "imp" is the fault differential for the source task between the
1295 * source and destination node. Calculate the total differential for
1296 * the source task and potential destination task. The more negative
1297 * the value is, the more rmeote accesses that would be expected to
1298 * be incurred if the tasks were swapped.
1299 */
1300 if (cur) {
1301 /* Skip this swap candidate if cannot move to the source cpu */
1302 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1303 goto unlock;
1304
1305 /*
1306 * If dst and source tasks are in the same NUMA group, or not
1307 * in any group then look only at task weights.
1308 */
1309 if (cur->numa_group == env->p->numa_group) {
1310 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1311 task_weight(cur, env->dst_nid, dist);
1312 /*
1313 * Add some hysteresis to prevent swapping the
1314 * tasks within a group over tiny differences.
1315 */
1316 if (cur->numa_group)
1317 imp -= imp/16;
1318 } else {
1319 /*
1320 * Compare the group weights. If a task is all by
1321 * itself (not part of a group), use the task weight
1322 * instead.
1323 */
1324 if (cur->numa_group)
1325 imp += group_weight(cur, env->src_nid, dist) -
1326 group_weight(cur, env->dst_nid, dist);
1327 else
1328 imp += task_weight(cur, env->src_nid, dist) -
1329 task_weight(cur, env->dst_nid, dist);
1330 }
1331 }
1332
1333 if (imp <= env->best_imp && moveimp <= env->best_imp)
1334 goto unlock;
1335
1336 if (!cur) {
1337 /* Is there capacity at our destination? */
1338 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1339 !env->dst_stats.has_free_capacity)
1340 goto unlock;
1341
1342 goto balance;
1343 }
1344
1345 /* Balance doesn't matter much if we're running a task per cpu */
1346 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1347 dst_rq->nr_running == 1)
1348 goto assign;
1349
1350 /*
1351 * In the overloaded case, try and keep the load balanced.
1352 */
1353 balance:
1354 load = task_h_load(env->p);
1355 dst_load = env->dst_stats.load + load;
1356 src_load = env->src_stats.load - load;
1357
1358 if (moveimp > imp && moveimp > env->best_imp) {
1359 /*
1360 * If the improvement from just moving env->p direction is
1361 * better than swapping tasks around, check if a move is
1362 * possible. Store a slightly smaller score than moveimp,
1363 * so an actually idle CPU will win.
1364 */
1365 if (!load_too_imbalanced(src_load, dst_load, env)) {
1366 imp = moveimp - 1;
1367 cur = NULL;
1368 goto assign;
1369 }
1370 }
1371
1372 if (imp <= env->best_imp)
1373 goto unlock;
1374
1375 if (cur) {
1376 load = task_h_load(cur);
1377 dst_load -= load;
1378 src_load += load;
1379 }
1380
1381 if (load_too_imbalanced(src_load, dst_load, env))
1382 goto unlock;
1383
1384 /*
1385 * One idle CPU per node is evaluated for a task numa move.
1386 * Call select_idle_sibling to maybe find a better one.
1387 */
1388 if (!cur)
1389 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1390
1391 assign:
1392 task_numa_assign(env, cur, imp);
1393 unlock:
1394 rcu_read_unlock();
1395 }
1396
1397 static void task_numa_find_cpu(struct task_numa_env *env,
1398 long taskimp, long groupimp)
1399 {
1400 int cpu;
1401
1402 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1403 /* Skip this CPU if the source task cannot migrate */
1404 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1405 continue;
1406
1407 env->dst_cpu = cpu;
1408 task_numa_compare(env, taskimp, groupimp);
1409 }
1410 }
1411
1412 static int task_numa_migrate(struct task_struct *p)
1413 {
1414 struct task_numa_env env = {
1415 .p = p,
1416
1417 .src_cpu = task_cpu(p),
1418 .src_nid = task_node(p),
1419
1420 .imbalance_pct = 112,
1421
1422 .best_task = NULL,
1423 .best_imp = 0,
1424 .best_cpu = -1
1425 };
1426 struct sched_domain *sd;
1427 unsigned long taskweight, groupweight;
1428 int nid, ret, dist;
1429 long taskimp, groupimp;
1430
1431 /*
1432 * Pick the lowest SD_NUMA domain, as that would have the smallest
1433 * imbalance and would be the first to start moving tasks about.
1434 *
1435 * And we want to avoid any moving of tasks about, as that would create
1436 * random movement of tasks -- counter the numa conditions we're trying
1437 * to satisfy here.
1438 */
1439 rcu_read_lock();
1440 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1441 if (sd)
1442 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1443 rcu_read_unlock();
1444
1445 /*
1446 * Cpusets can break the scheduler domain tree into smaller
1447 * balance domains, some of which do not cross NUMA boundaries.
1448 * Tasks that are "trapped" in such domains cannot be migrated
1449 * elsewhere, so there is no point in (re)trying.
1450 */
1451 if (unlikely(!sd)) {
1452 p->numa_preferred_nid = task_node(p);
1453 return -EINVAL;
1454 }
1455
1456 env.dst_nid = p->numa_preferred_nid;
1457 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1458 taskweight = task_weight(p, env.src_nid, dist);
1459 groupweight = group_weight(p, env.src_nid, dist);
1460 update_numa_stats(&env.src_stats, env.src_nid);
1461 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1462 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1463 update_numa_stats(&env.dst_stats, env.dst_nid);
1464
1465 /* Try to find a spot on the preferred nid. */
1466 task_numa_find_cpu(&env, taskimp, groupimp);
1467
1468 /*
1469 * Look at other nodes in these cases:
1470 * - there is no space available on the preferred_nid
1471 * - the task is part of a numa_group that is interleaved across
1472 * multiple NUMA nodes; in order to better consolidate the group,
1473 * we need to check other locations.
1474 */
1475 if (env.best_cpu == -1 || (p->numa_group &&
1476 nodes_weight(p->numa_group->active_nodes) > 1)) {
1477 for_each_online_node(nid) {
1478 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1479 continue;
1480
1481 dist = node_distance(env.src_nid, env.dst_nid);
1482 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1483 dist != env.dist) {
1484 taskweight = task_weight(p, env.src_nid, dist);
1485 groupweight = group_weight(p, env.src_nid, dist);
1486 }
1487
1488 /* Only consider nodes where both task and groups benefit */
1489 taskimp = task_weight(p, nid, dist) - taskweight;
1490 groupimp = group_weight(p, nid, dist) - groupweight;
1491 if (taskimp < 0 && groupimp < 0)
1492 continue;
1493
1494 env.dist = dist;
1495 env.dst_nid = nid;
1496 update_numa_stats(&env.dst_stats, env.dst_nid);
1497 task_numa_find_cpu(&env, taskimp, groupimp);
1498 }
1499 }
1500
1501 /*
1502 * If the task is part of a workload that spans multiple NUMA nodes,
1503 * and is migrating into one of the workload's active nodes, remember
1504 * this node as the task's preferred numa node, so the workload can
1505 * settle down.
1506 * A task that migrated to a second choice node will be better off
1507 * trying for a better one later. Do not set the preferred node here.
1508 */
1509 if (p->numa_group) {
1510 if (env.best_cpu == -1)
1511 nid = env.src_nid;
1512 else
1513 nid = env.dst_nid;
1514
1515 if (node_isset(nid, p->numa_group->active_nodes))
1516 sched_setnuma(p, env.dst_nid);
1517 }
1518
1519 /* No better CPU than the current one was found. */
1520 if (env.best_cpu == -1)
1521 return -EAGAIN;
1522
1523 /*
1524 * Reset the scan period if the task is being rescheduled on an
1525 * alternative node to recheck if the tasks is now properly placed.
1526 */
1527 p->numa_scan_period = task_scan_min(p);
1528
1529 if (env.best_task == NULL) {
1530 ret = migrate_task_to(p, env.best_cpu);
1531 if (ret != 0)
1532 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1533 return ret;
1534 }
1535
1536 ret = migrate_swap(p, env.best_task);
1537 if (ret != 0)
1538 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1539 put_task_struct(env.best_task);
1540 return ret;
1541 }
1542
1543 /* Attempt to migrate a task to a CPU on the preferred node. */
1544 static void numa_migrate_preferred(struct task_struct *p)
1545 {
1546 unsigned long interval = HZ;
1547
1548 /* This task has no NUMA fault statistics yet */
1549 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1550 return;
1551
1552 /* Periodically retry migrating the task to the preferred node */
1553 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1554 p->numa_migrate_retry = jiffies + interval;
1555
1556 /* Success if task is already running on preferred CPU */
1557 if (task_node(p) == p->numa_preferred_nid)
1558 return;
1559
1560 /* Otherwise, try migrate to a CPU on the preferred node */
1561 task_numa_migrate(p);
1562 }
1563
1564 /*
1565 * Find the nodes on which the workload is actively running. We do this by
1566 * tracking the nodes from which NUMA hinting faults are triggered. This can
1567 * be different from the set of nodes where the workload's memory is currently
1568 * located.
1569 *
1570 * The bitmask is used to make smarter decisions on when to do NUMA page
1571 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1572 * are added when they cause over 6/16 of the maximum number of faults, but
1573 * only removed when they drop below 3/16.
1574 */
1575 static void update_numa_active_node_mask(struct numa_group *numa_group)
1576 {
1577 unsigned long faults, max_faults = 0;
1578 int nid;
1579
1580 for_each_online_node(nid) {
1581 faults = group_faults_cpu(numa_group, nid);
1582 if (faults > max_faults)
1583 max_faults = faults;
1584 }
1585
1586 for_each_online_node(nid) {
1587 faults = group_faults_cpu(numa_group, nid);
1588 if (!node_isset(nid, numa_group->active_nodes)) {
1589 if (faults > max_faults * 6 / 16)
1590 node_set(nid, numa_group->active_nodes);
1591 } else if (faults < max_faults * 3 / 16)
1592 node_clear(nid, numa_group->active_nodes);
1593 }
1594 }
1595
1596 /*
1597 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1598 * increments. The more local the fault statistics are, the higher the scan
1599 * period will be for the next scan window. If local/(local+remote) ratio is
1600 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1601 * the scan period will decrease. Aim for 70% local accesses.
1602 */
1603 #define NUMA_PERIOD_SLOTS 10
1604 #define NUMA_PERIOD_THRESHOLD 7
1605
1606 /*
1607 * Increase the scan period (slow down scanning) if the majority of
1608 * our memory is already on our local node, or if the majority of
1609 * the page accesses are shared with other processes.
1610 * Otherwise, decrease the scan period.
1611 */
1612 static void update_task_scan_period(struct task_struct *p,
1613 unsigned long shared, unsigned long private)
1614 {
1615 unsigned int period_slot;
1616 int ratio;
1617 int diff;
1618
1619 unsigned long remote = p->numa_faults_locality[0];
1620 unsigned long local = p->numa_faults_locality[1];
1621
1622 /*
1623 * If there were no record hinting faults then either the task is
1624 * completely idle or all activity is areas that are not of interest
1625 * to automatic numa balancing. Related to that, if there were failed
1626 * migration then it implies we are migrating too quickly or the local
1627 * node is overloaded. In either case, scan slower
1628 */
1629 if (local + shared == 0 || p->numa_faults_locality[2]) {
1630 p->numa_scan_period = min(p->numa_scan_period_max,
1631 p->numa_scan_period << 1);
1632
1633 p->mm->numa_next_scan = jiffies +
1634 msecs_to_jiffies(p->numa_scan_period);
1635
1636 return;
1637 }
1638
1639 /*
1640 * Prepare to scale scan period relative to the current period.
1641 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1642 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1643 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1644 */
1645 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1646 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1647 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1648 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1649 if (!slot)
1650 slot = 1;
1651 diff = slot * period_slot;
1652 } else {
1653 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1654
1655 /*
1656 * Scale scan rate increases based on sharing. There is an
1657 * inverse relationship between the degree of sharing and
1658 * the adjustment made to the scanning period. Broadly
1659 * speaking the intent is that there is little point
1660 * scanning faster if shared accesses dominate as it may
1661 * simply bounce migrations uselessly
1662 */
1663 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1664 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1665 }
1666
1667 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1668 task_scan_min(p), task_scan_max(p));
1669 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1670 }
1671
1672 /*
1673 * Get the fraction of time the task has been running since the last
1674 * NUMA placement cycle. The scheduler keeps similar statistics, but
1675 * decays those on a 32ms period, which is orders of magnitude off
1676 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1677 * stats only if the task is so new there are no NUMA statistics yet.
1678 */
1679 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1680 {
1681 u64 runtime, delta, now;
1682 /* Use the start of this time slice to avoid calculations. */
1683 now = p->se.exec_start;
1684 runtime = p->se.sum_exec_runtime;
1685
1686 if (p->last_task_numa_placement) {
1687 delta = runtime - p->last_sum_exec_runtime;
1688 *period = now - p->last_task_numa_placement;
1689 } else {
1690 delta = p->se.avg.runnable_avg_sum;
1691 *period = p->se.avg.avg_period;
1692 }
1693
1694 p->last_sum_exec_runtime = runtime;
1695 p->last_task_numa_placement = now;
1696
1697 return delta;
1698 }
1699
1700 /*
1701 * Determine the preferred nid for a task in a numa_group. This needs to
1702 * be done in a way that produces consistent results with group_weight,
1703 * otherwise workloads might not converge.
1704 */
1705 static int preferred_group_nid(struct task_struct *p, int nid)
1706 {
1707 nodemask_t nodes;
1708 int dist;
1709
1710 /* Direct connections between all NUMA nodes. */
1711 if (sched_numa_topology_type == NUMA_DIRECT)
1712 return nid;
1713
1714 /*
1715 * On a system with glueless mesh NUMA topology, group_weight
1716 * scores nodes according to the number of NUMA hinting faults on
1717 * both the node itself, and on nearby nodes.
1718 */
1719 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1720 unsigned long score, max_score = 0;
1721 int node, max_node = nid;
1722
1723 dist = sched_max_numa_distance;
1724
1725 for_each_online_node(node) {
1726 score = group_weight(p, node, dist);
1727 if (score > max_score) {
1728 max_score = score;
1729 max_node = node;
1730 }
1731 }
1732 return max_node;
1733 }
1734
1735 /*
1736 * Finding the preferred nid in a system with NUMA backplane
1737 * interconnect topology is more involved. The goal is to locate
1738 * tasks from numa_groups near each other in the system, and
1739 * untangle workloads from different sides of the system. This requires
1740 * searching down the hierarchy of node groups, recursively searching
1741 * inside the highest scoring group of nodes. The nodemask tricks
1742 * keep the complexity of the search down.
1743 */
1744 nodes = node_online_map;
1745 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1746 unsigned long max_faults = 0;
1747 nodemask_t max_group = NODE_MASK_NONE;
1748 int a, b;
1749
1750 /* Are there nodes at this distance from each other? */
1751 if (!find_numa_distance(dist))
1752 continue;
1753
1754 for_each_node_mask(a, nodes) {
1755 unsigned long faults = 0;
1756 nodemask_t this_group;
1757 nodes_clear(this_group);
1758
1759 /* Sum group's NUMA faults; includes a==b case. */
1760 for_each_node_mask(b, nodes) {
1761 if (node_distance(a, b) < dist) {
1762 faults += group_faults(p, b);
1763 node_set(b, this_group);
1764 node_clear(b, nodes);
1765 }
1766 }
1767
1768 /* Remember the top group. */
1769 if (faults > max_faults) {
1770 max_faults = faults;
1771 max_group = this_group;
1772 /*
1773 * subtle: at the smallest distance there is
1774 * just one node left in each "group", the
1775 * winner is the preferred nid.
1776 */
1777 nid = a;
1778 }
1779 }
1780 /* Next round, evaluate the nodes within max_group. */
1781 if (!max_faults)
1782 break;
1783 nodes = max_group;
1784 }
1785 return nid;
1786 }
1787
1788 static void task_numa_placement(struct task_struct *p)
1789 {
1790 int seq, nid, max_nid = -1, max_group_nid = -1;
1791 unsigned long max_faults = 0, max_group_faults = 0;
1792 unsigned long fault_types[2] = { 0, 0 };
1793 unsigned long total_faults;
1794 u64 runtime, period;
1795 spinlock_t *group_lock = NULL;
1796
1797 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1798 if (p->numa_scan_seq == seq)
1799 return;
1800 p->numa_scan_seq = seq;
1801 p->numa_scan_period_max = task_scan_max(p);
1802
1803 total_faults = p->numa_faults_locality[0] +
1804 p->numa_faults_locality[1];
1805 runtime = numa_get_avg_runtime(p, &period);
1806
1807 /* If the task is part of a group prevent parallel updates to group stats */
1808 if (p->numa_group) {
1809 group_lock = &p->numa_group->lock;
1810 spin_lock_irq(group_lock);
1811 }
1812
1813 /* Find the node with the highest number of faults */
1814 for_each_online_node(nid) {
1815 /* Keep track of the offsets in numa_faults array */
1816 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
1817 unsigned long faults = 0, group_faults = 0;
1818 int priv;
1819
1820 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1821 long diff, f_diff, f_weight;
1822
1823 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1824 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1825 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1826 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
1827
1828 /* Decay existing window, copy faults since last scan */
1829 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1830 fault_types[priv] += p->numa_faults[membuf_idx];
1831 p->numa_faults[membuf_idx] = 0;
1832
1833 /*
1834 * Normalize the faults_from, so all tasks in a group
1835 * count according to CPU use, instead of by the raw
1836 * number of faults. Tasks with little runtime have
1837 * little over-all impact on throughput, and thus their
1838 * faults are less important.
1839 */
1840 f_weight = div64_u64(runtime << 16, period + 1);
1841 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
1842 (total_faults + 1);
1843 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1844 p->numa_faults[cpubuf_idx] = 0;
1845
1846 p->numa_faults[mem_idx] += diff;
1847 p->numa_faults[cpu_idx] += f_diff;
1848 faults += p->numa_faults[mem_idx];
1849 p->total_numa_faults += diff;
1850 if (p->numa_group) {
1851 /*
1852 * safe because we can only change our own group
1853 *
1854 * mem_idx represents the offset for a given
1855 * nid and priv in a specific region because it
1856 * is at the beginning of the numa_faults array.
1857 */
1858 p->numa_group->faults[mem_idx] += diff;
1859 p->numa_group->faults_cpu[mem_idx] += f_diff;
1860 p->numa_group->total_faults += diff;
1861 group_faults += p->numa_group->faults[mem_idx];
1862 }
1863 }
1864
1865 if (faults > max_faults) {
1866 max_faults = faults;
1867 max_nid = nid;
1868 }
1869
1870 if (group_faults > max_group_faults) {
1871 max_group_faults = group_faults;
1872 max_group_nid = nid;
1873 }
1874 }
1875
1876 update_task_scan_period(p, fault_types[0], fault_types[1]);
1877
1878 if (p->numa_group) {
1879 update_numa_active_node_mask(p->numa_group);
1880 spin_unlock_irq(group_lock);
1881 max_nid = preferred_group_nid(p, max_group_nid);
1882 }
1883
1884 if (max_faults) {
1885 /* Set the new preferred node */
1886 if (max_nid != p->numa_preferred_nid)
1887 sched_setnuma(p, max_nid);
1888
1889 if (task_node(p) != p->numa_preferred_nid)
1890 numa_migrate_preferred(p);
1891 }
1892 }
1893
1894 static inline int get_numa_group(struct numa_group *grp)
1895 {
1896 return atomic_inc_not_zero(&grp->refcount);
1897 }
1898
1899 static inline void put_numa_group(struct numa_group *grp)
1900 {
1901 if (atomic_dec_and_test(&grp->refcount))
1902 kfree_rcu(grp, rcu);
1903 }
1904
1905 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1906 int *priv)
1907 {
1908 struct numa_group *grp, *my_grp;
1909 struct task_struct *tsk;
1910 bool join = false;
1911 int cpu = cpupid_to_cpu(cpupid);
1912 int i;
1913
1914 if (unlikely(!p->numa_group)) {
1915 unsigned int size = sizeof(struct numa_group) +
1916 4*nr_node_ids*sizeof(unsigned long);
1917
1918 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1919 if (!grp)
1920 return;
1921
1922 atomic_set(&grp->refcount, 1);
1923 spin_lock_init(&grp->lock);
1924 grp->gid = p->pid;
1925 /* Second half of the array tracks nids where faults happen */
1926 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1927 nr_node_ids;
1928
1929 node_set(task_node(current), grp->active_nodes);
1930
1931 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1932 grp->faults[i] = p->numa_faults[i];
1933
1934 grp->total_faults = p->total_numa_faults;
1935
1936 grp->nr_tasks++;
1937 rcu_assign_pointer(p->numa_group, grp);
1938 }
1939
1940 rcu_read_lock();
1941 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1942
1943 if (!cpupid_match_pid(tsk, cpupid))
1944 goto no_join;
1945
1946 grp = rcu_dereference(tsk->numa_group);
1947 if (!grp)
1948 goto no_join;
1949
1950 my_grp = p->numa_group;
1951 if (grp == my_grp)
1952 goto no_join;
1953
1954 /*
1955 * Only join the other group if its bigger; if we're the bigger group,
1956 * the other task will join us.
1957 */
1958 if (my_grp->nr_tasks > grp->nr_tasks)
1959 goto no_join;
1960
1961 /*
1962 * Tie-break on the grp address.
1963 */
1964 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1965 goto no_join;
1966
1967 /* Always join threads in the same process. */
1968 if (tsk->mm == current->mm)
1969 join = true;
1970
1971 /* Simple filter to avoid false positives due to PID collisions */
1972 if (flags & TNF_SHARED)
1973 join = true;
1974
1975 /* Update priv based on whether false sharing was detected */
1976 *priv = !join;
1977
1978 if (join && !get_numa_group(grp))
1979 goto no_join;
1980
1981 rcu_read_unlock();
1982
1983 if (!join)
1984 return;
1985
1986 BUG_ON(irqs_disabled());
1987 double_lock_irq(&my_grp->lock, &grp->lock);
1988
1989 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1990 my_grp->faults[i] -= p->numa_faults[i];
1991 grp->faults[i] += p->numa_faults[i];
1992 }
1993 my_grp->total_faults -= p->total_numa_faults;
1994 grp->total_faults += p->total_numa_faults;
1995
1996 my_grp->nr_tasks--;
1997 grp->nr_tasks++;
1998
1999 spin_unlock(&my_grp->lock);
2000 spin_unlock_irq(&grp->lock);
2001
2002 rcu_assign_pointer(p->numa_group, grp);
2003
2004 put_numa_group(my_grp);
2005 return;
2006
2007 no_join:
2008 rcu_read_unlock();
2009 return;
2010 }
2011
2012 void task_numa_free(struct task_struct *p)
2013 {
2014 struct numa_group *grp = p->numa_group;
2015 void *numa_faults = p->numa_faults;
2016 unsigned long flags;
2017 int i;
2018
2019 if (grp) {
2020 spin_lock_irqsave(&grp->lock, flags);
2021 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2022 grp->faults[i] -= p->numa_faults[i];
2023 grp->total_faults -= p->total_numa_faults;
2024
2025 grp->nr_tasks--;
2026 spin_unlock_irqrestore(&grp->lock, flags);
2027 RCU_INIT_POINTER(p->numa_group, NULL);
2028 put_numa_group(grp);
2029 }
2030
2031 p->numa_faults = NULL;
2032 kfree(numa_faults);
2033 }
2034
2035 /*
2036 * Got a PROT_NONE fault for a page on @node.
2037 */
2038 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2039 {
2040 struct task_struct *p = current;
2041 bool migrated = flags & TNF_MIGRATED;
2042 int cpu_node = task_node(current);
2043 int local = !!(flags & TNF_FAULT_LOCAL);
2044 int priv;
2045
2046 if (!numabalancing_enabled)
2047 return;
2048
2049 /* for example, ksmd faulting in a user's mm */
2050 if (!p->mm)
2051 return;
2052
2053 /* Allocate buffer to track faults on a per-node basis */
2054 if (unlikely(!p->numa_faults)) {
2055 int size = sizeof(*p->numa_faults) *
2056 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2057
2058 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2059 if (!p->numa_faults)
2060 return;
2061
2062 p->total_numa_faults = 0;
2063 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2064 }
2065
2066 /*
2067 * First accesses are treated as private, otherwise consider accesses
2068 * to be private if the accessing pid has not changed
2069 */
2070 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2071 priv = 1;
2072 } else {
2073 priv = cpupid_match_pid(p, last_cpupid);
2074 if (!priv && !(flags & TNF_NO_GROUP))
2075 task_numa_group(p, last_cpupid, flags, &priv);
2076 }
2077
2078 /*
2079 * If a workload spans multiple NUMA nodes, a shared fault that
2080 * occurs wholly within the set of nodes that the workload is
2081 * actively using should be counted as local. This allows the
2082 * scan rate to slow down when a workload has settled down.
2083 */
2084 if (!priv && !local && p->numa_group &&
2085 node_isset(cpu_node, p->numa_group->active_nodes) &&
2086 node_isset(mem_node, p->numa_group->active_nodes))
2087 local = 1;
2088
2089 task_numa_placement(p);
2090
2091 /*
2092 * Retry task to preferred node migration periodically, in case it
2093 * case it previously failed, or the scheduler moved us.
2094 */
2095 if (time_after(jiffies, p->numa_migrate_retry))
2096 numa_migrate_preferred(p);
2097
2098 if (migrated)
2099 p->numa_pages_migrated += pages;
2100 if (flags & TNF_MIGRATE_FAIL)
2101 p->numa_faults_locality[2] += pages;
2102
2103 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2104 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2105 p->numa_faults_locality[local] += pages;
2106 }
2107
2108 static void reset_ptenuma_scan(struct task_struct *p)
2109 {
2110 ACCESS_ONCE(p->mm->numa_scan_seq)++;
2111 p->mm->numa_scan_offset = 0;
2112 }
2113
2114 /*
2115 * The expensive part of numa migration is done from task_work context.
2116 * Triggered from task_tick_numa().
2117 */
2118 void task_numa_work(struct callback_head *work)
2119 {
2120 unsigned long migrate, next_scan, now = jiffies;
2121 struct task_struct *p = current;
2122 struct mm_struct *mm = p->mm;
2123 struct vm_area_struct *vma;
2124 unsigned long start, end;
2125 unsigned long nr_pte_updates = 0;
2126 long pages;
2127
2128 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2129
2130 work->next = work; /* protect against double add */
2131 /*
2132 * Who cares about NUMA placement when they're dying.
2133 *
2134 * NOTE: make sure not to dereference p->mm before this check,
2135 * exit_task_work() happens _after_ exit_mm() so we could be called
2136 * without p->mm even though we still had it when we enqueued this
2137 * work.
2138 */
2139 if (p->flags & PF_EXITING)
2140 return;
2141
2142 if (!mm->numa_next_scan) {
2143 mm->numa_next_scan = now +
2144 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2145 }
2146
2147 /*
2148 * Enforce maximal scan/migration frequency..
2149 */
2150 migrate = mm->numa_next_scan;
2151 if (time_before(now, migrate))
2152 return;
2153
2154 if (p->numa_scan_period == 0) {
2155 p->numa_scan_period_max = task_scan_max(p);
2156 p->numa_scan_period = task_scan_min(p);
2157 }
2158
2159 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2160 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2161 return;
2162
2163 /*
2164 * Delay this task enough that another task of this mm will likely win
2165 * the next time around.
2166 */
2167 p->node_stamp += 2 * TICK_NSEC;
2168
2169 start = mm->numa_scan_offset;
2170 pages = sysctl_numa_balancing_scan_size;
2171 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2172 if (!pages)
2173 return;
2174
2175 down_read(&mm->mmap_sem);
2176 vma = find_vma(mm, start);
2177 if (!vma) {
2178 reset_ptenuma_scan(p);
2179 start = 0;
2180 vma = mm->mmap;
2181 }
2182 for (; vma; vma = vma->vm_next) {
2183 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2184 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2185 continue;
2186 }
2187
2188 /*
2189 * Shared library pages mapped by multiple processes are not
2190 * migrated as it is expected they are cache replicated. Avoid
2191 * hinting faults in read-only file-backed mappings or the vdso
2192 * as migrating the pages will be of marginal benefit.
2193 */
2194 if (!vma->vm_mm ||
2195 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2196 continue;
2197
2198 /*
2199 * Skip inaccessible VMAs to avoid any confusion between
2200 * PROT_NONE and NUMA hinting ptes
2201 */
2202 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2203 continue;
2204
2205 do {
2206 start = max(start, vma->vm_start);
2207 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2208 end = min(end, vma->vm_end);
2209 nr_pte_updates += change_prot_numa(vma, start, end);
2210
2211 /*
2212 * Scan sysctl_numa_balancing_scan_size but ensure that
2213 * at least one PTE is updated so that unused virtual
2214 * address space is quickly skipped.
2215 */
2216 if (nr_pte_updates)
2217 pages -= (end - start) >> PAGE_SHIFT;
2218
2219 start = end;
2220 if (pages <= 0)
2221 goto out;
2222
2223 cond_resched();
2224 } while (end != vma->vm_end);
2225 }
2226
2227 out:
2228 /*
2229 * It is possible to reach the end of the VMA list but the last few
2230 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2231 * would find the !migratable VMA on the next scan but not reset the
2232 * scanner to the start so check it now.
2233 */
2234 if (vma)
2235 mm->numa_scan_offset = start;
2236 else
2237 reset_ptenuma_scan(p);
2238 up_read(&mm->mmap_sem);
2239 }
2240
2241 /*
2242 * Drive the periodic memory faults..
2243 */
2244 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2245 {
2246 struct callback_head *work = &curr->numa_work;
2247 u64 period, now;
2248
2249 /*
2250 * We don't care about NUMA placement if we don't have memory.
2251 */
2252 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2253 return;
2254
2255 /*
2256 * Using runtime rather than walltime has the dual advantage that
2257 * we (mostly) drive the selection from busy threads and that the
2258 * task needs to have done some actual work before we bother with
2259 * NUMA placement.
2260 */
2261 now = curr->se.sum_exec_runtime;
2262 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2263
2264 if (now - curr->node_stamp > period) {
2265 if (!curr->node_stamp)
2266 curr->numa_scan_period = task_scan_min(curr);
2267 curr->node_stamp += period;
2268
2269 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2270 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2271 task_work_add(curr, work, true);
2272 }
2273 }
2274 }
2275 #else
2276 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2277 {
2278 }
2279
2280 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2281 {
2282 }
2283
2284 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2285 {
2286 }
2287 #endif /* CONFIG_NUMA_BALANCING */
2288
2289 static void
2290 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2291 {
2292 update_load_add(&cfs_rq->load, se->load.weight);
2293 if (!parent_entity(se))
2294 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2295 #ifdef CONFIG_SMP
2296 if (entity_is_task(se)) {
2297 struct rq *rq = rq_of(cfs_rq);
2298
2299 account_numa_enqueue(rq, task_of(se));
2300 list_add(&se->group_node, &rq->cfs_tasks);
2301 }
2302 #endif
2303 cfs_rq->nr_running++;
2304 }
2305
2306 static void
2307 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2308 {
2309 update_load_sub(&cfs_rq->load, se->load.weight);
2310 if (!parent_entity(se))
2311 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2312 if (entity_is_task(se)) {
2313 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2314 list_del_init(&se->group_node);
2315 }
2316 cfs_rq->nr_running--;
2317 }
2318
2319 #ifdef CONFIG_FAIR_GROUP_SCHED
2320 # ifdef CONFIG_SMP
2321 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2322 {
2323 long tg_weight;
2324
2325 /*
2326 * Use this CPU's actual weight instead of the last load_contribution
2327 * to gain a more accurate current total weight. See
2328 * update_cfs_rq_load_contribution().
2329 */
2330 tg_weight = atomic_long_read(&tg->load_avg);
2331 tg_weight -= cfs_rq->tg_load_contrib;
2332 tg_weight += cfs_rq->load.weight;
2333
2334 return tg_weight;
2335 }
2336
2337 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2338 {
2339 long tg_weight, load, shares;
2340
2341 tg_weight = calc_tg_weight(tg, cfs_rq);
2342 load = cfs_rq->load.weight;
2343
2344 shares = (tg->shares * load);
2345 if (tg_weight)
2346 shares /= tg_weight;
2347
2348 if (shares < MIN_SHARES)
2349 shares = MIN_SHARES;
2350 if (shares > tg->shares)
2351 shares = tg->shares;
2352
2353 return shares;
2354 }
2355 # else /* CONFIG_SMP */
2356 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2357 {
2358 return tg->shares;
2359 }
2360 # endif /* CONFIG_SMP */
2361 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2362 unsigned long weight)
2363 {
2364 if (se->on_rq) {
2365 /* commit outstanding execution time */
2366 if (cfs_rq->curr == se)
2367 update_curr(cfs_rq);
2368 account_entity_dequeue(cfs_rq, se);
2369 }
2370
2371 update_load_set(&se->load, weight);
2372
2373 if (se->on_rq)
2374 account_entity_enqueue(cfs_rq, se);
2375 }
2376
2377 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2378
2379 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2380 {
2381 struct task_group *tg;
2382 struct sched_entity *se;
2383 long shares;
2384
2385 tg = cfs_rq->tg;
2386 se = tg->se[cpu_of(rq_of(cfs_rq))];
2387 if (!se || throttled_hierarchy(cfs_rq))
2388 return;
2389 #ifndef CONFIG_SMP
2390 if (likely(se->load.weight == tg->shares))
2391 return;
2392 #endif
2393 shares = calc_cfs_shares(cfs_rq, tg);
2394
2395 reweight_entity(cfs_rq_of(se), se, shares);
2396 }
2397 #else /* CONFIG_FAIR_GROUP_SCHED */
2398 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2399 {
2400 }
2401 #endif /* CONFIG_FAIR_GROUP_SCHED */
2402
2403 #ifdef CONFIG_SMP
2404 /*
2405 * We choose a half-life close to 1 scheduling period.
2406 * Note: The tables below are dependent on this value.
2407 */
2408 #define LOAD_AVG_PERIOD 32
2409 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2410 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2411
2412 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2413 static const u32 runnable_avg_yN_inv[] = {
2414 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2415 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2416 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2417 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2418 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2419 0x85aac367, 0x82cd8698,
2420 };
2421
2422 /*
2423 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2424 * over-estimates when re-combining.
2425 */
2426 static const u32 runnable_avg_yN_sum[] = {
2427 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2428 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2429 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2430 };
2431
2432 /*
2433 * Approximate:
2434 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2435 */
2436 static __always_inline u64 decay_load(u64 val, u64 n)
2437 {
2438 unsigned int local_n;
2439
2440 if (!n)
2441 return val;
2442 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2443 return 0;
2444
2445 /* after bounds checking we can collapse to 32-bit */
2446 local_n = n;
2447
2448 /*
2449 * As y^PERIOD = 1/2, we can combine
2450 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2451 * With a look-up table which covers y^n (n<PERIOD)
2452 *
2453 * To achieve constant time decay_load.
2454 */
2455 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2456 val >>= local_n / LOAD_AVG_PERIOD;
2457 local_n %= LOAD_AVG_PERIOD;
2458 }
2459
2460 val *= runnable_avg_yN_inv[local_n];
2461 /* We don't use SRR here since we always want to round down. */
2462 return val >> 32;
2463 }
2464
2465 /*
2466 * For updates fully spanning n periods, the contribution to runnable
2467 * average will be: \Sum 1024*y^n
2468 *
2469 * We can compute this reasonably efficiently by combining:
2470 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2471 */
2472 static u32 __compute_runnable_contrib(u64 n)
2473 {
2474 u32 contrib = 0;
2475
2476 if (likely(n <= LOAD_AVG_PERIOD))
2477 return runnable_avg_yN_sum[n];
2478 else if (unlikely(n >= LOAD_AVG_MAX_N))
2479 return LOAD_AVG_MAX;
2480
2481 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2482 do {
2483 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2484 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2485
2486 n -= LOAD_AVG_PERIOD;
2487 } while (n > LOAD_AVG_PERIOD);
2488
2489 contrib = decay_load(contrib, n);
2490 return contrib + runnable_avg_yN_sum[n];
2491 }
2492
2493 /*
2494 * We can represent the historical contribution to runnable average as the
2495 * coefficients of a geometric series. To do this we sub-divide our runnable
2496 * history into segments of approximately 1ms (1024us); label the segment that
2497 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2498 *
2499 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2500 * p0 p1 p2
2501 * (now) (~1ms ago) (~2ms ago)
2502 *
2503 * Let u_i denote the fraction of p_i that the entity was runnable.
2504 *
2505 * We then designate the fractions u_i as our co-efficients, yielding the
2506 * following representation of historical load:
2507 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2508 *
2509 * We choose y based on the with of a reasonably scheduling period, fixing:
2510 * y^32 = 0.5
2511 *
2512 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2513 * approximately half as much as the contribution to load within the last ms
2514 * (u_0).
2515 *
2516 * When a period "rolls over" and we have new u_0`, multiplying the previous
2517 * sum again by y is sufficient to update:
2518 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2519 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2520 */
2521 static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
2522 struct sched_avg *sa,
2523 int runnable,
2524 int running)
2525 {
2526 u64 delta, periods;
2527 u32 runnable_contrib;
2528 int delta_w, decayed = 0;
2529 unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
2530
2531 delta = now - sa->last_runnable_update;
2532 /*
2533 * This should only happen when time goes backwards, which it
2534 * unfortunately does during sched clock init when we swap over to TSC.
2535 */
2536 if ((s64)delta < 0) {
2537 sa->last_runnable_update = now;
2538 return 0;
2539 }
2540
2541 /*
2542 * Use 1024ns as the unit of measurement since it's a reasonable
2543 * approximation of 1us and fast to compute.
2544 */
2545 delta >>= 10;
2546 if (!delta)
2547 return 0;
2548 sa->last_runnable_update = now;
2549
2550 /* delta_w is the amount already accumulated against our next period */
2551 delta_w = sa->avg_period % 1024;
2552 if (delta + delta_w >= 1024) {
2553 /* period roll-over */
2554 decayed = 1;
2555
2556 /*
2557 * Now that we know we're crossing a period boundary, figure
2558 * out how much from delta we need to complete the current
2559 * period and accrue it.
2560 */
2561 delta_w = 1024 - delta_w;
2562 if (runnable)
2563 sa->runnable_avg_sum += delta_w;
2564 if (running)
2565 sa->running_avg_sum += delta_w * scale_freq
2566 >> SCHED_CAPACITY_SHIFT;
2567 sa->avg_period += delta_w;
2568
2569 delta -= delta_w;
2570
2571 /* Figure out how many additional periods this update spans */
2572 periods = delta / 1024;
2573 delta %= 1024;
2574
2575 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2576 periods + 1);
2577 sa->running_avg_sum = decay_load(sa->running_avg_sum,
2578 periods + 1);
2579 sa->avg_period = decay_load(sa->avg_period,
2580 periods + 1);
2581
2582 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2583 runnable_contrib = __compute_runnable_contrib(periods);
2584 if (runnable)
2585 sa->runnable_avg_sum += runnable_contrib;
2586 if (running)
2587 sa->running_avg_sum += runnable_contrib * scale_freq
2588 >> SCHED_CAPACITY_SHIFT;
2589 sa->avg_period += runnable_contrib;
2590 }
2591
2592 /* Remainder of delta accrued against u_0` */
2593 if (runnable)
2594 sa->runnable_avg_sum += delta;
2595 if (running)
2596 sa->running_avg_sum += delta * scale_freq
2597 >> SCHED_CAPACITY_SHIFT;
2598 sa->avg_period += delta;
2599
2600 return decayed;
2601 }
2602
2603 /* Synchronize an entity's decay with its parenting cfs_rq.*/
2604 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2605 {
2606 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2607 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2608
2609 decays -= se->avg.decay_count;
2610 se->avg.decay_count = 0;
2611 if (!decays)
2612 return 0;
2613
2614 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2615 se->avg.utilization_avg_contrib =
2616 decay_load(se->avg.utilization_avg_contrib, decays);
2617
2618 return decays;
2619 }
2620
2621 #ifdef CONFIG_FAIR_GROUP_SCHED
2622 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2623 int force_update)
2624 {
2625 struct task_group *tg = cfs_rq->tg;
2626 long tg_contrib;
2627
2628 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2629 tg_contrib -= cfs_rq->tg_load_contrib;
2630
2631 if (!tg_contrib)
2632 return;
2633
2634 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2635 atomic_long_add(tg_contrib, &tg->load_avg);
2636 cfs_rq->tg_load_contrib += tg_contrib;
2637 }
2638 }
2639
2640 /*
2641 * Aggregate cfs_rq runnable averages into an equivalent task_group
2642 * representation for computing load contributions.
2643 */
2644 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2645 struct cfs_rq *cfs_rq)
2646 {
2647 struct task_group *tg = cfs_rq->tg;
2648 long contrib;
2649
2650 /* The fraction of a cpu used by this cfs_rq */
2651 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2652 sa->avg_period + 1);
2653 contrib -= cfs_rq->tg_runnable_contrib;
2654
2655 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2656 atomic_add(contrib, &tg->runnable_avg);
2657 cfs_rq->tg_runnable_contrib += contrib;
2658 }
2659 }
2660
2661 static inline void __update_group_entity_contrib(struct sched_entity *se)
2662 {
2663 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2664 struct task_group *tg = cfs_rq->tg;
2665 int runnable_avg;
2666
2667 u64 contrib;
2668
2669 contrib = cfs_rq->tg_load_contrib * tg->shares;
2670 se->avg.load_avg_contrib = div_u64(contrib,
2671 atomic_long_read(&tg->load_avg) + 1);
2672
2673 /*
2674 * For group entities we need to compute a correction term in the case
2675 * that they are consuming <1 cpu so that we would contribute the same
2676 * load as a task of equal weight.
2677 *
2678 * Explicitly co-ordinating this measurement would be expensive, but
2679 * fortunately the sum of each cpus contribution forms a usable
2680 * lower-bound on the true value.
2681 *
2682 * Consider the aggregate of 2 contributions. Either they are disjoint
2683 * (and the sum represents true value) or they are disjoint and we are
2684 * understating by the aggregate of their overlap.
2685 *
2686 * Extending this to N cpus, for a given overlap, the maximum amount we
2687 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2688 * cpus that overlap for this interval and w_i is the interval width.
2689 *
2690 * On a small machine; the first term is well-bounded which bounds the
2691 * total error since w_i is a subset of the period. Whereas on a
2692 * larger machine, while this first term can be larger, if w_i is the
2693 * of consequential size guaranteed to see n_i*w_i quickly converge to
2694 * our upper bound of 1-cpu.
2695 */
2696 runnable_avg = atomic_read(&tg->runnable_avg);
2697 if (runnable_avg < NICE_0_LOAD) {
2698 se->avg.load_avg_contrib *= runnable_avg;
2699 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2700 }
2701 }
2702
2703 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2704 {
2705 __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
2706 runnable, runnable);
2707 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2708 }
2709 #else /* CONFIG_FAIR_GROUP_SCHED */
2710 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2711 int force_update) {}
2712 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2713 struct cfs_rq *cfs_rq) {}
2714 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2715 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2716 #endif /* CONFIG_FAIR_GROUP_SCHED */
2717
2718 static inline void __update_task_entity_contrib(struct sched_entity *se)
2719 {
2720 u32 contrib;
2721
2722 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2723 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2724 contrib /= (se->avg.avg_period + 1);
2725 se->avg.load_avg_contrib = scale_load(contrib);
2726 }
2727
2728 /* Compute the current contribution to load_avg by se, return any delta */
2729 static long __update_entity_load_avg_contrib(struct sched_entity *se)
2730 {
2731 long old_contrib = se->avg.load_avg_contrib;
2732
2733 if (entity_is_task(se)) {
2734 __update_task_entity_contrib(se);
2735 } else {
2736 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2737 __update_group_entity_contrib(se);
2738 }
2739
2740 return se->avg.load_avg_contrib - old_contrib;
2741 }
2742
2743
2744 static inline void __update_task_entity_utilization(struct sched_entity *se)
2745 {
2746 u32 contrib;
2747
2748 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2749 contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE);
2750 contrib /= (se->avg.avg_period + 1);
2751 se->avg.utilization_avg_contrib = scale_load(contrib);
2752 }
2753
2754 static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
2755 {
2756 long old_contrib = se->avg.utilization_avg_contrib;
2757
2758 if (entity_is_task(se))
2759 __update_task_entity_utilization(se);
2760 else
2761 se->avg.utilization_avg_contrib =
2762 group_cfs_rq(se)->utilization_load_avg;
2763
2764 return se->avg.utilization_avg_contrib - old_contrib;
2765 }
2766
2767 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2768 long load_contrib)
2769 {
2770 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2771 cfs_rq->blocked_load_avg -= load_contrib;
2772 else
2773 cfs_rq->blocked_load_avg = 0;
2774 }
2775
2776 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2777
2778 /* Update a sched_entity's runnable average */
2779 static inline void update_entity_load_avg(struct sched_entity *se,
2780 int update_cfs_rq)
2781 {
2782 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2783 long contrib_delta, utilization_delta;
2784 int cpu = cpu_of(rq_of(cfs_rq));
2785 u64 now;
2786
2787 /*
2788 * For a group entity we need to use their owned cfs_rq_clock_task() in
2789 * case they are the parent of a throttled hierarchy.
2790 */
2791 if (entity_is_task(se))
2792 now = cfs_rq_clock_task(cfs_rq);
2793 else
2794 now = cfs_rq_clock_task(group_cfs_rq(se));
2795
2796 if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
2797 cfs_rq->curr == se))
2798 return;
2799
2800 contrib_delta = __update_entity_load_avg_contrib(se);
2801 utilization_delta = __update_entity_utilization_avg_contrib(se);
2802
2803 if (!update_cfs_rq)
2804 return;
2805
2806 if (se->on_rq) {
2807 cfs_rq->runnable_load_avg += contrib_delta;
2808 cfs_rq->utilization_load_avg += utilization_delta;
2809 } else {
2810 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2811 }
2812 }
2813
2814 /*
2815 * Decay the load contributed by all blocked children and account this so that
2816 * their contribution may appropriately discounted when they wake up.
2817 */
2818 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2819 {
2820 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2821 u64 decays;
2822
2823 decays = now - cfs_rq->last_decay;
2824 if (!decays && !force_update)
2825 return;
2826
2827 if (atomic_long_read(&cfs_rq->removed_load)) {
2828 unsigned long removed_load;
2829 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2830 subtract_blocked_load_contrib(cfs_rq, removed_load);
2831 }
2832
2833 if (decays) {
2834 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2835 decays);
2836 atomic64_add(decays, &cfs_rq->decay_counter);
2837 cfs_rq->last_decay = now;
2838 }
2839
2840 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2841 }
2842
2843 /* Add the load generated by se into cfs_rq's child load-average */
2844 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2845 struct sched_entity *se,
2846 int wakeup)
2847 {
2848 /*
2849 * We track migrations using entity decay_count <= 0, on a wake-up
2850 * migration we use a negative decay count to track the remote decays
2851 * accumulated while sleeping.
2852 *
2853 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2854 * are seen by enqueue_entity_load_avg() as a migration with an already
2855 * constructed load_avg_contrib.
2856 */
2857 if (unlikely(se->avg.decay_count <= 0)) {
2858 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2859 if (se->avg.decay_count) {
2860 /*
2861 * In a wake-up migration we have to approximate the
2862 * time sleeping. This is because we can't synchronize
2863 * clock_task between the two cpus, and it is not
2864 * guaranteed to be read-safe. Instead, we can
2865 * approximate this using our carried decays, which are
2866 * explicitly atomically readable.
2867 */
2868 se->avg.last_runnable_update -= (-se->avg.decay_count)
2869 << 20;
2870 update_entity_load_avg(se, 0);
2871 /* Indicate that we're now synchronized and on-rq */
2872 se->avg.decay_count = 0;
2873 }
2874 wakeup = 0;
2875 } else {
2876 __synchronize_entity_decay(se);
2877 }
2878
2879 /* migrated tasks did not contribute to our blocked load */
2880 if (wakeup) {
2881 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2882 update_entity_load_avg(se, 0);
2883 }
2884
2885 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2886 cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
2887 /* we force update consideration on load-balancer moves */
2888 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2889 }
2890
2891 /*
2892 * Remove se's load from this cfs_rq child load-average, if the entity is
2893 * transitioning to a blocked state we track its projected decay using
2894 * blocked_load_avg.
2895 */
2896 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2897 struct sched_entity *se,
2898 int sleep)
2899 {
2900 update_entity_load_avg(se, 1);
2901 /* we force update consideration on load-balancer moves */
2902 update_cfs_rq_blocked_load(cfs_rq, !sleep);
2903
2904 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2905 cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
2906 if (sleep) {
2907 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2908 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2909 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2910 }
2911
2912 /*
2913 * Update the rq's load with the elapsed running time before entering
2914 * idle. if the last scheduled task is not a CFS task, idle_enter will
2915 * be the only way to update the runnable statistic.
2916 */
2917 void idle_enter_fair(struct rq *this_rq)
2918 {
2919 update_rq_runnable_avg(this_rq, 1);
2920 }
2921
2922 /*
2923 * Update the rq's load with the elapsed idle time before a task is
2924 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2925 * be the only way to update the runnable statistic.
2926 */
2927 void idle_exit_fair(struct rq *this_rq)
2928 {
2929 update_rq_runnable_avg(this_rq, 0);
2930 }
2931
2932 static int idle_balance(struct rq *this_rq);
2933
2934 #else /* CONFIG_SMP */
2935
2936 static inline void update_entity_load_avg(struct sched_entity *se,
2937 int update_cfs_rq) {}
2938 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2939 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2940 struct sched_entity *se,
2941 int wakeup) {}
2942 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2943 struct sched_entity *se,
2944 int sleep) {}
2945 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2946 int force_update) {}
2947
2948 static inline int idle_balance(struct rq *rq)
2949 {
2950 return 0;
2951 }
2952
2953 #endif /* CONFIG_SMP */
2954
2955 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2956 {
2957 #ifdef CONFIG_SCHEDSTATS
2958 struct task_struct *tsk = NULL;
2959
2960 if (entity_is_task(se))
2961 tsk = task_of(se);
2962
2963 if (se->statistics.sleep_start) {
2964 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2965
2966 if ((s64)delta < 0)
2967 delta = 0;
2968
2969 if (unlikely(delta > se->statistics.sleep_max))
2970 se->statistics.sleep_max = delta;
2971
2972 se->statistics.sleep_start = 0;
2973 se->statistics.sum_sleep_runtime += delta;
2974
2975 if (tsk) {
2976 account_scheduler_latency(tsk, delta >> 10, 1);
2977 trace_sched_stat_sleep(tsk, delta);
2978 }
2979 }
2980 if (se->statistics.block_start) {
2981 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2982
2983 if ((s64)delta < 0)
2984 delta = 0;
2985
2986 if (unlikely(delta > se->statistics.block_max))
2987 se->statistics.block_max = delta;
2988
2989 se->statistics.block_start = 0;
2990 se->statistics.sum_sleep_runtime += delta;
2991
2992 if (tsk) {
2993 if (tsk->in_iowait) {
2994 se->statistics.iowait_sum += delta;
2995 se->statistics.iowait_count++;
2996 trace_sched_stat_iowait(tsk, delta);
2997 }
2998
2999 trace_sched_stat_blocked(tsk, delta);
3000
3001 /*
3002 * Blocking time is in units of nanosecs, so shift by
3003 * 20 to get a milliseconds-range estimation of the
3004 * amount of time that the task spent sleeping:
3005 */
3006 if (unlikely(prof_on == SLEEP_PROFILING)) {
3007 profile_hits(SLEEP_PROFILING,
3008 (void *)get_wchan(tsk),
3009 delta >> 20);
3010 }
3011 account_scheduler_latency(tsk, delta >> 10, 0);
3012 }
3013 }
3014 #endif
3015 }
3016
3017 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3018 {
3019 #ifdef CONFIG_SCHED_DEBUG
3020 s64 d = se->vruntime - cfs_rq->min_vruntime;
3021
3022 if (d < 0)
3023 d = -d;
3024
3025 if (d > 3*sysctl_sched_latency)
3026 schedstat_inc(cfs_rq, nr_spread_over);
3027 #endif
3028 }
3029
3030 static void
3031 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3032 {
3033 u64 vruntime = cfs_rq->min_vruntime;
3034
3035 /*
3036 * The 'current' period is already promised to the current tasks,
3037 * however the extra weight of the new task will slow them down a
3038 * little, place the new task so that it fits in the slot that
3039 * stays open at the end.
3040 */
3041 if (initial && sched_feat(START_DEBIT))
3042 vruntime += sched_vslice(cfs_rq, se);
3043
3044 /* sleeps up to a single latency don't count. */
3045 if (!initial) {
3046 unsigned long thresh = sysctl_sched_latency;
3047
3048 /*
3049 * Halve their sleep time's effect, to allow
3050 * for a gentler effect of sleepers:
3051 */
3052 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3053 thresh >>= 1;
3054
3055 vruntime -= thresh;
3056 }
3057
3058 /* ensure we never gain time by being placed backwards. */
3059 se->vruntime = max_vruntime(se->vruntime, vruntime);
3060 }
3061
3062 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3063
3064 static void
3065 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3066 {
3067 /*
3068 * Update the normalized vruntime before updating min_vruntime
3069 * through calling update_curr().
3070 */
3071 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
3072 se->vruntime += cfs_rq->min_vruntime;
3073
3074 /*
3075 * Update run-time statistics of the 'current'.
3076 */
3077 update_curr(cfs_rq);
3078 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
3079 account_entity_enqueue(cfs_rq, se);
3080 update_cfs_shares(cfs_rq);
3081
3082 if (flags & ENQUEUE_WAKEUP) {
3083 place_entity(cfs_rq, se, 0);
3084 enqueue_sleeper(cfs_rq, se);
3085 }
3086
3087 update_stats_enqueue(cfs_rq, se);
3088 check_spread(cfs_rq, se);
3089 if (se != cfs_rq->curr)
3090 __enqueue_entity(cfs_rq, se);
3091 se->on_rq = 1;
3092
3093 if (cfs_rq->nr_running == 1) {
3094 list_add_leaf_cfs_rq(cfs_rq);
3095 check_enqueue_throttle(cfs_rq);
3096 }
3097 }
3098
3099 static void __clear_buddies_last(struct sched_entity *se)
3100 {
3101 for_each_sched_entity(se) {
3102 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3103 if (cfs_rq->last != se)
3104 break;
3105
3106 cfs_rq->last = NULL;
3107 }
3108 }
3109
3110 static void __clear_buddies_next(struct sched_entity *se)
3111 {
3112 for_each_sched_entity(se) {
3113 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3114 if (cfs_rq->next != se)
3115 break;
3116
3117 cfs_rq->next = NULL;
3118 }
3119 }
3120
3121 static void __clear_buddies_skip(struct sched_entity *se)
3122 {
3123 for_each_sched_entity(se) {
3124 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3125 if (cfs_rq->skip != se)
3126 break;
3127
3128 cfs_rq->skip = NULL;
3129 }
3130 }
3131
3132 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3133 {
3134 if (cfs_rq->last == se)
3135 __clear_buddies_last(se);
3136
3137 if (cfs_rq->next == se)
3138 __clear_buddies_next(se);
3139
3140 if (cfs_rq->skip == se)
3141 __clear_buddies_skip(se);
3142 }
3143
3144 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3145
3146 static void
3147 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3148 {
3149 /*
3150 * Update run-time statistics of the 'current'.
3151 */
3152 update_curr(cfs_rq);
3153 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
3154
3155 update_stats_dequeue(cfs_rq, se);
3156 if (flags & DEQUEUE_SLEEP) {
3157 #ifdef CONFIG_SCHEDSTATS
3158 if (entity_is_task(se)) {
3159 struct task_struct *tsk = task_of(se);
3160
3161 if (tsk->state & TASK_INTERRUPTIBLE)
3162 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
3163 if (tsk->state & TASK_UNINTERRUPTIBLE)
3164 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
3165 }
3166 #endif
3167 }
3168
3169 clear_buddies(cfs_rq, se);
3170
3171 if (se != cfs_rq->curr)
3172 __dequeue_entity(cfs_rq, se);
3173 se->on_rq = 0;
3174 account_entity_dequeue(cfs_rq, se);
3175
3176 /*
3177 * Normalize the entity after updating the min_vruntime because the
3178 * update can refer to the ->curr item and we need to reflect this
3179 * movement in our normalized position.
3180 */
3181 if (!(flags & DEQUEUE_SLEEP))
3182 se->vruntime -= cfs_rq->min_vruntime;
3183
3184 /* return excess runtime on last dequeue */
3185 return_cfs_rq_runtime(cfs_rq);
3186
3187 update_min_vruntime(cfs_rq);
3188 update_cfs_shares(cfs_rq);
3189 }
3190
3191 /*
3192 * Preempt the current task with a newly woken task if needed:
3193 */
3194 static void
3195 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3196 {
3197 unsigned long ideal_runtime, delta_exec;
3198 struct sched_entity *se;
3199 s64 delta;
3200
3201 ideal_runtime = sched_slice(cfs_rq, curr);
3202 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3203 if (delta_exec > ideal_runtime) {
3204 resched_curr(rq_of(cfs_rq));
3205 /*
3206 * The current task ran long enough, ensure it doesn't get
3207 * re-elected due to buddy favours.
3208 */
3209 clear_buddies(cfs_rq, curr);
3210 return;
3211 }
3212
3213 /*
3214 * Ensure that a task that missed wakeup preemption by a
3215 * narrow margin doesn't have to wait for a full slice.
3216 * This also mitigates buddy induced latencies under load.
3217 */
3218 if (delta_exec < sysctl_sched_min_granularity)
3219 return;
3220
3221 se = __pick_first_entity(cfs_rq);
3222 delta = curr->vruntime - se->vruntime;
3223
3224 if (delta < 0)
3225 return;
3226
3227 if (delta > ideal_runtime)
3228 resched_curr(rq_of(cfs_rq));
3229 }
3230
3231 static void
3232 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3233 {
3234 /* 'current' is not kept within the tree. */
3235 if (se->on_rq) {
3236 /*
3237 * Any task has to be enqueued before it get to execute on
3238 * a CPU. So account for the time it spent waiting on the
3239 * runqueue.
3240 */
3241 update_stats_wait_end(cfs_rq, se);
3242 __dequeue_entity(cfs_rq, se);
3243 update_entity_load_avg(se, 1);
3244 }
3245
3246 update_stats_curr_start(cfs_rq, se);
3247 cfs_rq->curr = se;
3248 #ifdef CONFIG_SCHEDSTATS
3249 /*
3250 * Track our maximum slice length, if the CPU's load is at
3251 * least twice that of our own weight (i.e. dont track it
3252 * when there are only lesser-weight tasks around):
3253 */
3254 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3255 se->statistics.slice_max = max(se->statistics.slice_max,
3256 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3257 }
3258 #endif
3259 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3260 }
3261
3262 static int
3263 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3264
3265 /*
3266 * Pick the next process, keeping these things in mind, in this order:
3267 * 1) keep things fair between processes/task groups
3268 * 2) pick the "next" process, since someone really wants that to run
3269 * 3) pick the "last" process, for cache locality
3270 * 4) do not run the "skip" process, if something else is available
3271 */
3272 static struct sched_entity *
3273 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3274 {
3275 struct sched_entity *left = __pick_first_entity(cfs_rq);
3276 struct sched_entity *se;
3277
3278 /*
3279 * If curr is set we have to see if its left of the leftmost entity
3280 * still in the tree, provided there was anything in the tree at all.
3281 */
3282 if (!left || (curr && entity_before(curr, left)))
3283 left = curr;
3284
3285 se = left; /* ideally we run the leftmost entity */
3286
3287 /*
3288 * Avoid running the skip buddy, if running something else can
3289 * be done without getting too unfair.
3290 */
3291 if (cfs_rq->skip == se) {
3292 struct sched_entity *second;
3293
3294 if (se == curr) {
3295 second = __pick_first_entity(cfs_rq);
3296 } else {
3297 second = __pick_next_entity(se);
3298 if (!second || (curr && entity_before(curr, second)))
3299 second = curr;
3300 }
3301
3302 if (second && wakeup_preempt_entity(second, left) < 1)
3303 se = second;
3304 }
3305
3306 /*
3307 * Prefer last buddy, try to return the CPU to a preempted task.
3308 */
3309 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3310 se = cfs_rq->last;
3311
3312 /*
3313 * Someone really wants this to run. If it's not unfair, run it.
3314 */
3315 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3316 se = cfs_rq->next;
3317
3318 clear_buddies(cfs_rq, se);
3319
3320 return se;
3321 }
3322
3323 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3324
3325 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3326 {
3327 /*
3328 * If still on the runqueue then deactivate_task()
3329 * was not called and update_curr() has to be done:
3330 */
3331 if (prev->on_rq)
3332 update_curr(cfs_rq);
3333
3334 /* throttle cfs_rqs exceeding runtime */
3335 check_cfs_rq_runtime(cfs_rq);
3336
3337 check_spread(cfs_rq, prev);
3338 if (prev->on_rq) {
3339 update_stats_wait_start(cfs_rq, prev);
3340 /* Put 'current' back into the tree. */
3341 __enqueue_entity(cfs_rq, prev);
3342 /* in !on_rq case, update occurred at dequeue */
3343 update_entity_load_avg(prev, 1);
3344 }
3345 cfs_rq->curr = NULL;
3346 }
3347
3348 static void
3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3350 {
3351 /*
3352 * Update run-time statistics of the 'current'.
3353 */
3354 update_curr(cfs_rq);
3355
3356 /*
3357 * Ensure that runnable average is periodically updated.
3358 */
3359 update_entity_load_avg(curr, 1);
3360 update_cfs_rq_blocked_load(cfs_rq, 1);
3361 update_cfs_shares(cfs_rq);
3362
3363 #ifdef CONFIG_SCHED_HRTICK
3364 /*
3365 * queued ticks are scheduled to match the slice, so don't bother
3366 * validating it and just reschedule.
3367 */
3368 if (queued) {
3369 resched_curr(rq_of(cfs_rq));
3370 return;
3371 }
3372 /*
3373 * don't let the period tick interfere with the hrtick preemption
3374 */
3375 if (!sched_feat(DOUBLE_TICK) &&
3376 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3377 return;
3378 #endif
3379
3380 if (cfs_rq->nr_running > 1)
3381 check_preempt_tick(cfs_rq, curr);
3382 }
3383
3384
3385 /**************************************************
3386 * CFS bandwidth control machinery
3387 */
3388
3389 #ifdef CONFIG_CFS_BANDWIDTH
3390
3391 #ifdef HAVE_JUMP_LABEL
3392 static struct static_key __cfs_bandwidth_used;
3393
3394 static inline bool cfs_bandwidth_used(void)
3395 {
3396 return static_key_false(&__cfs_bandwidth_used);
3397 }
3398
3399 void cfs_bandwidth_usage_inc(void)
3400 {
3401 static_key_slow_inc(&__cfs_bandwidth_used);
3402 }
3403
3404 void cfs_bandwidth_usage_dec(void)
3405 {
3406 static_key_slow_dec(&__cfs_bandwidth_used);
3407 }
3408 #else /* HAVE_JUMP_LABEL */
3409 static bool cfs_bandwidth_used(void)
3410 {
3411 return true;
3412 }
3413
3414 void cfs_bandwidth_usage_inc(void) {}
3415 void cfs_bandwidth_usage_dec(void) {}
3416 #endif /* HAVE_JUMP_LABEL */
3417
3418 /*
3419 * default period for cfs group bandwidth.
3420 * default: 0.1s, units: nanoseconds
3421 */
3422 static inline u64 default_cfs_period(void)
3423 {
3424 return 100000000ULL;
3425 }
3426
3427 static inline u64 sched_cfs_bandwidth_slice(void)
3428 {
3429 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3430 }
3431
3432 /*
3433 * Replenish runtime according to assigned quota and update expiration time.
3434 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3435 * additional synchronization around rq->lock.
3436 *
3437 * requires cfs_b->lock
3438 */
3439 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3440 {
3441 u64 now;
3442
3443 if (cfs_b->quota == RUNTIME_INF)
3444 return;
3445
3446 now = sched_clock_cpu(smp_processor_id());
3447 cfs_b->runtime = cfs_b->quota;
3448 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3449 }
3450
3451 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3452 {
3453 return &tg->cfs_bandwidth;
3454 }
3455
3456 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3457 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3458 {
3459 if (unlikely(cfs_rq->throttle_count))
3460 return cfs_rq->throttled_clock_task;
3461
3462 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3463 }
3464
3465 /* returns 0 on failure to allocate runtime */
3466 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3467 {
3468 struct task_group *tg = cfs_rq->tg;
3469 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3470 u64 amount = 0, min_amount, expires;
3471
3472 /* note: this is a positive sum as runtime_remaining <= 0 */
3473 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3474
3475 raw_spin_lock(&cfs_b->lock);
3476 if (cfs_b->quota == RUNTIME_INF)
3477 amount = min_amount;
3478 else {
3479 /*
3480 * If the bandwidth pool has become inactive, then at least one
3481 * period must have elapsed since the last consumption.
3482 * Refresh the global state and ensure bandwidth timer becomes
3483 * active.
3484 */
3485 if (!cfs_b->timer_active) {
3486 __refill_cfs_bandwidth_runtime(cfs_b);
3487 __start_cfs_bandwidth(cfs_b, false);
3488 }
3489
3490 if (cfs_b->runtime > 0) {
3491 amount = min(cfs_b->runtime, min_amount);
3492 cfs_b->runtime -= amount;
3493 cfs_b->idle = 0;
3494 }
3495 }
3496 expires = cfs_b->runtime_expires;
3497 raw_spin_unlock(&cfs_b->lock);
3498
3499 cfs_rq->runtime_remaining += amount;
3500 /*
3501 * we may have advanced our local expiration to account for allowed
3502 * spread between our sched_clock and the one on which runtime was
3503 * issued.
3504 */
3505 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3506 cfs_rq->runtime_expires = expires;
3507
3508 return cfs_rq->runtime_remaining > 0;
3509 }
3510
3511 /*
3512 * Note: This depends on the synchronization provided by sched_clock and the
3513 * fact that rq->clock snapshots this value.
3514 */
3515 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3516 {
3517 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3518
3519 /* if the deadline is ahead of our clock, nothing to do */
3520 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3521 return;
3522
3523 if (cfs_rq->runtime_remaining < 0)
3524 return;
3525
3526 /*
3527 * If the local deadline has passed we have to consider the
3528 * possibility that our sched_clock is 'fast' and the global deadline
3529 * has not truly expired.
3530 *
3531 * Fortunately we can check determine whether this the case by checking
3532 * whether the global deadline has advanced. It is valid to compare
3533 * cfs_b->runtime_expires without any locks since we only care about
3534 * exact equality, so a partial write will still work.
3535 */
3536
3537 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3538 /* extend local deadline, drift is bounded above by 2 ticks */
3539 cfs_rq->runtime_expires += TICK_NSEC;
3540 } else {
3541 /* global deadline is ahead, expiration has passed */
3542 cfs_rq->runtime_remaining = 0;
3543 }
3544 }
3545
3546 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3547 {
3548 /* dock delta_exec before expiring quota (as it could span periods) */
3549 cfs_rq->runtime_remaining -= delta_exec;
3550 expire_cfs_rq_runtime(cfs_rq);
3551
3552 if (likely(cfs_rq->runtime_remaining > 0))
3553 return;
3554
3555 /*
3556 * if we're unable to extend our runtime we resched so that the active
3557 * hierarchy can be throttled
3558 */
3559 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3560 resched_curr(rq_of(cfs_rq));
3561 }
3562
3563 static __always_inline
3564 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3565 {
3566 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3567 return;
3568
3569 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3570 }
3571
3572 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3573 {
3574 return cfs_bandwidth_used() && cfs_rq->throttled;
3575 }
3576
3577 /* check whether cfs_rq, or any parent, is throttled */
3578 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3579 {
3580 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3581 }
3582
3583 /*
3584 * Ensure that neither of the group entities corresponding to src_cpu or
3585 * dest_cpu are members of a throttled hierarchy when performing group
3586 * load-balance operations.
3587 */
3588 static inline int throttled_lb_pair(struct task_group *tg,
3589 int src_cpu, int dest_cpu)
3590 {
3591 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3592
3593 src_cfs_rq = tg->cfs_rq[src_cpu];
3594 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3595
3596 return throttled_hierarchy(src_cfs_rq) ||
3597 throttled_hierarchy(dest_cfs_rq);
3598 }
3599
3600 /* updated child weight may affect parent so we have to do this bottom up */
3601 static int tg_unthrottle_up(struct task_group *tg, void *data)
3602 {
3603 struct rq *rq = data;
3604 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3605
3606 cfs_rq->throttle_count--;
3607 #ifdef CONFIG_SMP
3608 if (!cfs_rq->throttle_count) {
3609 /* adjust cfs_rq_clock_task() */
3610 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3611 cfs_rq->throttled_clock_task;
3612 }
3613 #endif
3614
3615 return 0;
3616 }
3617
3618 static int tg_throttle_down(struct task_group *tg, void *data)
3619 {
3620 struct rq *rq = data;
3621 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3622
3623 /* group is entering throttled state, stop time */
3624 if (!cfs_rq->throttle_count)
3625 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3626 cfs_rq->throttle_count++;
3627
3628 return 0;
3629 }
3630
3631 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3632 {
3633 struct rq *rq = rq_of(cfs_rq);
3634 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3635 struct sched_entity *se;
3636 long task_delta, dequeue = 1;
3637
3638 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3639
3640 /* freeze hierarchy runnable averages while throttled */
3641 rcu_read_lock();
3642 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3643 rcu_read_unlock();
3644
3645 task_delta = cfs_rq->h_nr_running;
3646 for_each_sched_entity(se) {
3647 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3648 /* throttled entity or throttle-on-deactivate */
3649 if (!se->on_rq)
3650 break;
3651
3652 if (dequeue)
3653 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3654 qcfs_rq->h_nr_running -= task_delta;
3655
3656 if (qcfs_rq->load.weight)
3657 dequeue = 0;
3658 }
3659
3660 if (!se)
3661 sub_nr_running(rq, task_delta);
3662
3663 cfs_rq->throttled = 1;
3664 cfs_rq->throttled_clock = rq_clock(rq);
3665 raw_spin_lock(&cfs_b->lock);
3666 /*
3667 * Add to the _head_ of the list, so that an already-started
3668 * distribute_cfs_runtime will not see us
3669 */
3670 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3671 if (!cfs_b->timer_active)
3672 __start_cfs_bandwidth(cfs_b, false);
3673 raw_spin_unlock(&cfs_b->lock);
3674 }
3675
3676 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3677 {
3678 struct rq *rq = rq_of(cfs_rq);
3679 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3680 struct sched_entity *se;
3681 int enqueue = 1;
3682 long task_delta;
3683
3684 se = cfs_rq->tg->se[cpu_of(rq)];
3685
3686 cfs_rq->throttled = 0;
3687
3688 update_rq_clock(rq);
3689
3690 raw_spin_lock(&cfs_b->lock);
3691 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3692 list_del_rcu(&cfs_rq->throttled_list);
3693 raw_spin_unlock(&cfs_b->lock);
3694
3695 /* update hierarchical throttle state */
3696 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3697
3698 if (!cfs_rq->load.weight)
3699 return;
3700
3701 task_delta = cfs_rq->h_nr_running;
3702 for_each_sched_entity(se) {
3703 if (se->on_rq)
3704 enqueue = 0;
3705
3706 cfs_rq = cfs_rq_of(se);
3707 if (enqueue)
3708 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3709 cfs_rq->h_nr_running += task_delta;
3710
3711 if (cfs_rq_throttled(cfs_rq))
3712 break;
3713 }
3714
3715 if (!se)
3716 add_nr_running(rq, task_delta);
3717
3718 /* determine whether we need to wake up potentially idle cpu */
3719 if (rq->curr == rq->idle && rq->cfs.nr_running)
3720 resched_curr(rq);
3721 }
3722
3723 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3724 u64 remaining, u64 expires)
3725 {
3726 struct cfs_rq *cfs_rq;
3727 u64 runtime;
3728 u64 starting_runtime = remaining;
3729
3730 rcu_read_lock();
3731 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3732 throttled_list) {
3733 struct rq *rq = rq_of(cfs_rq);
3734
3735 raw_spin_lock(&rq->lock);
3736 if (!cfs_rq_throttled(cfs_rq))
3737 goto next;
3738
3739 runtime = -cfs_rq->runtime_remaining + 1;
3740 if (runtime > remaining)
3741 runtime = remaining;
3742 remaining -= runtime;
3743
3744 cfs_rq->runtime_remaining += runtime;
3745 cfs_rq->runtime_expires = expires;
3746
3747 /* we check whether we're throttled above */
3748 if (cfs_rq->runtime_remaining > 0)
3749 unthrottle_cfs_rq(cfs_rq);
3750
3751 next:
3752 raw_spin_unlock(&rq->lock);
3753
3754 if (!remaining)
3755 break;
3756 }
3757 rcu_read_unlock();
3758
3759 return starting_runtime - remaining;
3760 }
3761
3762 /*
3763 * Responsible for refilling a task_group's bandwidth and unthrottling its
3764 * cfs_rqs as appropriate. If there has been no activity within the last
3765 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3766 * used to track this state.
3767 */
3768 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3769 {
3770 u64 runtime, runtime_expires;
3771 int throttled;
3772
3773 /* no need to continue the timer with no bandwidth constraint */
3774 if (cfs_b->quota == RUNTIME_INF)
3775 goto out_deactivate;
3776
3777 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3778 cfs_b->nr_periods += overrun;
3779
3780 /*
3781 * idle depends on !throttled (for the case of a large deficit), and if
3782 * we're going inactive then everything else can be deferred
3783 */
3784 if (cfs_b->idle && !throttled)
3785 goto out_deactivate;
3786
3787 /*
3788 * if we have relooped after returning idle once, we need to update our
3789 * status as actually running, so that other cpus doing
3790 * __start_cfs_bandwidth will stop trying to cancel us.
3791 */
3792 cfs_b->timer_active = 1;
3793
3794 __refill_cfs_bandwidth_runtime(cfs_b);
3795
3796 if (!throttled) {
3797 /* mark as potentially idle for the upcoming period */
3798 cfs_b->idle = 1;
3799 return 0;
3800 }
3801
3802 /* account preceding periods in which throttling occurred */
3803 cfs_b->nr_throttled += overrun;
3804
3805 runtime_expires = cfs_b->runtime_expires;
3806
3807 /*
3808 * This check is repeated as we are holding onto the new bandwidth while
3809 * we unthrottle. This can potentially race with an unthrottled group
3810 * trying to acquire new bandwidth from the global pool. This can result
3811 * in us over-using our runtime if it is all used during this loop, but
3812 * only by limited amounts in that extreme case.
3813 */
3814 while (throttled && cfs_b->runtime > 0) {
3815 runtime = cfs_b->runtime;
3816 raw_spin_unlock(&cfs_b->lock);
3817 /* we can't nest cfs_b->lock while distributing bandwidth */
3818 runtime = distribute_cfs_runtime(cfs_b, runtime,
3819 runtime_expires);
3820 raw_spin_lock(&cfs_b->lock);
3821
3822 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3823
3824 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3825 }
3826
3827 /*
3828 * While we are ensured activity in the period following an
3829 * unthrottle, this also covers the case in which the new bandwidth is
3830 * insufficient to cover the existing bandwidth deficit. (Forcing the
3831 * timer to remain active while there are any throttled entities.)
3832 */
3833 cfs_b->idle = 0;
3834
3835 return 0;
3836
3837 out_deactivate:
3838 cfs_b->timer_active = 0;
3839 return 1;
3840 }
3841
3842 /* a cfs_rq won't donate quota below this amount */
3843 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3844 /* minimum remaining period time to redistribute slack quota */
3845 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3846 /* how long we wait to gather additional slack before distributing */
3847 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3848
3849 /*
3850 * Are we near the end of the current quota period?
3851 *
3852 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3853 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3854 * migrate_hrtimers, base is never cleared, so we are fine.
3855 */
3856 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3857 {
3858 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3859 u64 remaining;
3860
3861 /* if the call-back is running a quota refresh is already occurring */
3862 if (hrtimer_callback_running(refresh_timer))
3863 return 1;
3864
3865 /* is a quota refresh about to occur? */
3866 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3867 if (remaining < min_expire)
3868 return 1;
3869
3870 return 0;
3871 }
3872
3873 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3874 {
3875 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3876
3877 /* if there's a quota refresh soon don't bother with slack */
3878 if (runtime_refresh_within(cfs_b, min_left))
3879 return;
3880
3881 start_bandwidth_timer(&cfs_b->slack_timer,
3882 ns_to_ktime(cfs_bandwidth_slack_period));
3883 }
3884
3885 /* we know any runtime found here is valid as update_curr() precedes return */
3886 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3887 {
3888 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3889 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3890
3891 if (slack_runtime <= 0)
3892 return;
3893
3894 raw_spin_lock(&cfs_b->lock);
3895 if (cfs_b->quota != RUNTIME_INF &&
3896 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3897 cfs_b->runtime += slack_runtime;
3898
3899 /* we are under rq->lock, defer unthrottling using a timer */
3900 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3901 !list_empty(&cfs_b->throttled_cfs_rq))
3902 start_cfs_slack_bandwidth(cfs_b);
3903 }
3904 raw_spin_unlock(&cfs_b->lock);
3905
3906 /* even if it's not valid for return we don't want to try again */
3907 cfs_rq->runtime_remaining -= slack_runtime;
3908 }
3909
3910 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3911 {
3912 if (!cfs_bandwidth_used())
3913 return;
3914
3915 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3916 return;
3917
3918 __return_cfs_rq_runtime(cfs_rq);
3919 }
3920
3921 /*
3922 * This is done with a timer (instead of inline with bandwidth return) since
3923 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3924 */
3925 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3926 {
3927 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3928 u64 expires;
3929
3930 /* confirm we're still not at a refresh boundary */
3931 raw_spin_lock(&cfs_b->lock);
3932 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3933 raw_spin_unlock(&cfs_b->lock);
3934 return;
3935 }
3936
3937 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
3938 runtime = cfs_b->runtime;
3939
3940 expires = cfs_b->runtime_expires;
3941 raw_spin_unlock(&cfs_b->lock);
3942
3943 if (!runtime)
3944 return;
3945
3946 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3947
3948 raw_spin_lock(&cfs_b->lock);
3949 if (expires == cfs_b->runtime_expires)
3950 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3951 raw_spin_unlock(&cfs_b->lock);
3952 }
3953
3954 /*
3955 * When a group wakes up we want to make sure that its quota is not already
3956 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3957 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3958 */
3959 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3960 {
3961 if (!cfs_bandwidth_used())
3962 return;
3963
3964 /* an active group must be handled by the update_curr()->put() path */
3965 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3966 return;
3967
3968 /* ensure the group is not already throttled */
3969 if (cfs_rq_throttled(cfs_rq))
3970 return;
3971
3972 /* update runtime allocation */
3973 account_cfs_rq_runtime(cfs_rq, 0);
3974 if (cfs_rq->runtime_remaining <= 0)
3975 throttle_cfs_rq(cfs_rq);
3976 }
3977
3978 /* conditionally throttle active cfs_rq's from put_prev_entity() */
3979 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3980 {
3981 if (!cfs_bandwidth_used())
3982 return false;
3983
3984 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3985 return false;
3986
3987 /*
3988 * it's possible for a throttled entity to be forced into a running
3989 * state (e.g. set_curr_task), in this case we're finished.
3990 */
3991 if (cfs_rq_throttled(cfs_rq))
3992 return true;
3993
3994 throttle_cfs_rq(cfs_rq);
3995 return true;
3996 }
3997
3998 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3999 {
4000 struct cfs_bandwidth *cfs_b =
4001 container_of(timer, struct cfs_bandwidth, slack_timer);
4002 do_sched_cfs_slack_timer(cfs_b);
4003
4004 return HRTIMER_NORESTART;
4005 }
4006
4007 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4008 {
4009 struct cfs_bandwidth *cfs_b =
4010 container_of(timer, struct cfs_bandwidth, period_timer);
4011 ktime_t now;
4012 int overrun;
4013 int idle = 0;
4014
4015 raw_spin_lock(&cfs_b->lock);
4016 for (;;) {
4017 now = hrtimer_cb_get_time(timer);
4018 overrun = hrtimer_forward(timer, now, cfs_b->period);
4019
4020 if (!overrun)
4021 break;
4022
4023 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4024 }
4025 raw_spin_unlock(&cfs_b->lock);
4026
4027 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4028 }
4029
4030 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4031 {
4032 raw_spin_lock_init(&cfs_b->lock);
4033 cfs_b->runtime = 0;
4034 cfs_b->quota = RUNTIME_INF;
4035 cfs_b->period = ns_to_ktime(default_cfs_period());
4036
4037 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4038 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4039 cfs_b->period_timer.function = sched_cfs_period_timer;
4040 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4041 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4042 }
4043
4044 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4045 {
4046 cfs_rq->runtime_enabled = 0;
4047 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4048 }
4049
4050 /* requires cfs_b->lock, may release to reprogram timer */
4051 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
4052 {
4053 /*
4054 * The timer may be active because we're trying to set a new bandwidth
4055 * period or because we're racing with the tear-down path
4056 * (timer_active==0 becomes visible before the hrtimer call-back
4057 * terminates). In either case we ensure that it's re-programmed
4058 */
4059 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
4060 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
4061 /* bounce the lock to allow do_sched_cfs_period_timer to run */
4062 raw_spin_unlock(&cfs_b->lock);
4063 cpu_relax();
4064 raw_spin_lock(&cfs_b->lock);
4065 /* if someone else restarted the timer then we're done */
4066 if (!force && cfs_b->timer_active)
4067 return;
4068 }
4069
4070 cfs_b->timer_active = 1;
4071 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
4072 }
4073
4074 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4075 {
4076 /* init_cfs_bandwidth() was not called */
4077 if (!cfs_b->throttled_cfs_rq.next)
4078 return;
4079
4080 hrtimer_cancel(&cfs_b->period_timer);
4081 hrtimer_cancel(&cfs_b->slack_timer);
4082 }
4083
4084 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4085 {
4086 struct cfs_rq *cfs_rq;
4087
4088 for_each_leaf_cfs_rq(rq, cfs_rq) {
4089 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4090
4091 raw_spin_lock(&cfs_b->lock);
4092 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4093 raw_spin_unlock(&cfs_b->lock);
4094 }
4095 }
4096
4097 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4098 {
4099 struct cfs_rq *cfs_rq;
4100
4101 for_each_leaf_cfs_rq(rq, cfs_rq) {
4102 if (!cfs_rq->runtime_enabled)
4103 continue;
4104
4105 /*
4106 * clock_task is not advancing so we just need to make sure
4107 * there's some valid quota amount
4108 */
4109 cfs_rq->runtime_remaining = 1;
4110 /*
4111 * Offline rq is schedulable till cpu is completely disabled
4112 * in take_cpu_down(), so we prevent new cfs throttling here.
4113 */
4114 cfs_rq->runtime_enabled = 0;
4115
4116 if (cfs_rq_throttled(cfs_rq))
4117 unthrottle_cfs_rq(cfs_rq);
4118 }
4119 }
4120
4121 #else /* CONFIG_CFS_BANDWIDTH */
4122 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4123 {
4124 return rq_clock_task(rq_of(cfs_rq));
4125 }
4126
4127 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4128 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4129 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4130 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4131
4132 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4133 {
4134 return 0;
4135 }
4136
4137 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4138 {
4139 return 0;
4140 }
4141
4142 static inline int throttled_lb_pair(struct task_group *tg,
4143 int src_cpu, int dest_cpu)
4144 {
4145 return 0;
4146 }
4147
4148 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4149
4150 #ifdef CONFIG_FAIR_GROUP_SCHED
4151 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4152 #endif
4153
4154 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4155 {
4156 return NULL;
4157 }
4158 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4159 static inline void update_runtime_enabled(struct rq *rq) {}
4160 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4161
4162 #endif /* CONFIG_CFS_BANDWIDTH */
4163
4164 /**************************************************
4165 * CFS operations on tasks:
4166 */
4167
4168 #ifdef CONFIG_SCHED_HRTICK
4169 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4170 {
4171 struct sched_entity *se = &p->se;
4172 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4173
4174 WARN_ON(task_rq(p) != rq);
4175
4176 if (cfs_rq->nr_running > 1) {
4177 u64 slice = sched_slice(cfs_rq, se);
4178 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4179 s64 delta = slice - ran;
4180
4181 if (delta < 0) {
4182 if (rq->curr == p)
4183 resched_curr(rq);
4184 return;
4185 }
4186 hrtick_start(rq, delta);
4187 }
4188 }
4189
4190 /*
4191 * called from enqueue/dequeue and updates the hrtick when the
4192 * current task is from our class and nr_running is low enough
4193 * to matter.
4194 */
4195 static void hrtick_update(struct rq *rq)
4196 {
4197 struct task_struct *curr = rq->curr;
4198
4199 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4200 return;
4201
4202 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4203 hrtick_start_fair(rq, curr);
4204 }
4205 #else /* !CONFIG_SCHED_HRTICK */
4206 static inline void
4207 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4208 {
4209 }
4210
4211 static inline void hrtick_update(struct rq *rq)
4212 {
4213 }
4214 #endif
4215
4216 /*
4217 * The enqueue_task method is called before nr_running is
4218 * increased. Here we update the fair scheduling stats and
4219 * then put the task into the rbtree:
4220 */
4221 static void
4222 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4223 {
4224 struct cfs_rq *cfs_rq;
4225 struct sched_entity *se = &p->se;
4226
4227 for_each_sched_entity(se) {
4228 if (se->on_rq)
4229 break;
4230 cfs_rq = cfs_rq_of(se);
4231 enqueue_entity(cfs_rq, se, flags);
4232
4233 /*
4234 * end evaluation on encountering a throttled cfs_rq
4235 *
4236 * note: in the case of encountering a throttled cfs_rq we will
4237 * post the final h_nr_running increment below.
4238 */
4239 if (cfs_rq_throttled(cfs_rq))
4240 break;
4241 cfs_rq->h_nr_running++;
4242
4243 flags = ENQUEUE_WAKEUP;
4244 }
4245
4246 for_each_sched_entity(se) {
4247 cfs_rq = cfs_rq_of(se);
4248 cfs_rq->h_nr_running++;
4249
4250 if (cfs_rq_throttled(cfs_rq))
4251 break;
4252
4253 update_cfs_shares(cfs_rq);
4254 update_entity_load_avg(se, 1);
4255 }
4256
4257 if (!se) {
4258 update_rq_runnable_avg(rq, rq->nr_running);
4259 add_nr_running(rq, 1);
4260 }
4261 hrtick_update(rq);
4262 }
4263
4264 static void set_next_buddy(struct sched_entity *se);
4265
4266 /*
4267 * The dequeue_task method is called before nr_running is
4268 * decreased. We remove the task from the rbtree and
4269 * update the fair scheduling stats:
4270 */
4271 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4272 {
4273 struct cfs_rq *cfs_rq;
4274 struct sched_entity *se = &p->se;
4275 int task_sleep = flags & DEQUEUE_SLEEP;
4276
4277 for_each_sched_entity(se) {
4278 cfs_rq = cfs_rq_of(se);
4279 dequeue_entity(cfs_rq, se, flags);
4280
4281 /*
4282 * end evaluation on encountering a throttled cfs_rq
4283 *
4284 * note: in the case of encountering a throttled cfs_rq we will
4285 * post the final h_nr_running decrement below.
4286 */
4287 if (cfs_rq_throttled(cfs_rq))
4288 break;
4289 cfs_rq->h_nr_running--;
4290
4291 /* Don't dequeue parent if it has other entities besides us */
4292 if (cfs_rq->load.weight) {
4293 /*
4294 * Bias pick_next to pick a task from this cfs_rq, as
4295 * p is sleeping when it is within its sched_slice.
4296 */
4297 if (task_sleep && parent_entity(se))
4298 set_next_buddy(parent_entity(se));
4299
4300 /* avoid re-evaluating load for this entity */
4301 se = parent_entity(se);
4302 break;
4303 }
4304 flags |= DEQUEUE_SLEEP;
4305 }
4306
4307 for_each_sched_entity(se) {
4308 cfs_rq = cfs_rq_of(se);
4309 cfs_rq->h_nr_running--;
4310
4311 if (cfs_rq_throttled(cfs_rq))
4312 break;
4313
4314 update_cfs_shares(cfs_rq);
4315 update_entity_load_avg(se, 1);
4316 }
4317
4318 if (!se) {
4319 sub_nr_running(rq, 1);
4320 update_rq_runnable_avg(rq, 1);
4321 }
4322 hrtick_update(rq);
4323 }
4324
4325 #ifdef CONFIG_SMP
4326 /* Used instead of source_load when we know the type == 0 */
4327 static unsigned long weighted_cpuload(const int cpu)
4328 {
4329 return cpu_rq(cpu)->cfs.runnable_load_avg;
4330 }
4331
4332 /*
4333 * Return a low guess at the load of a migration-source cpu weighted
4334 * according to the scheduling class and "nice" value.
4335 *
4336 * We want to under-estimate the load of migration sources, to
4337 * balance conservatively.
4338 */
4339 static unsigned long source_load(int cpu, int type)
4340 {
4341 struct rq *rq = cpu_rq(cpu);
4342 unsigned long total = weighted_cpuload(cpu);
4343
4344 if (type == 0 || !sched_feat(LB_BIAS))
4345 return total;
4346
4347 return min(rq->cpu_load[type-1], total);
4348 }
4349
4350 /*
4351 * Return a high guess at the load of a migration-target cpu weighted
4352 * according to the scheduling class and "nice" value.
4353 */
4354 static unsigned long target_load(int cpu, int type)
4355 {
4356 struct rq *rq = cpu_rq(cpu);
4357 unsigned long total = weighted_cpuload(cpu);
4358
4359 if (type == 0 || !sched_feat(LB_BIAS))
4360 return total;
4361
4362 return max(rq->cpu_load[type-1], total);
4363 }
4364
4365 static unsigned long capacity_of(int cpu)
4366 {
4367 return cpu_rq(cpu)->cpu_capacity;
4368 }
4369
4370 static unsigned long capacity_orig_of(int cpu)
4371 {
4372 return cpu_rq(cpu)->cpu_capacity_orig;
4373 }
4374
4375 static unsigned long cpu_avg_load_per_task(int cpu)
4376 {
4377 struct rq *rq = cpu_rq(cpu);
4378 unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
4379 unsigned long load_avg = rq->cfs.runnable_load_avg;
4380
4381 if (nr_running)
4382 return load_avg / nr_running;
4383
4384 return 0;
4385 }
4386
4387 static void record_wakee(struct task_struct *p)
4388 {
4389 /*
4390 * Rough decay (wiping) for cost saving, don't worry
4391 * about the boundary, really active task won't care
4392 * about the loss.
4393 */
4394 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
4395 current->wakee_flips >>= 1;
4396 current->wakee_flip_decay_ts = jiffies;
4397 }
4398
4399 if (current->last_wakee != p) {
4400 current->last_wakee = p;
4401 current->wakee_flips++;
4402 }
4403 }
4404
4405 static void task_waking_fair(struct task_struct *p)
4406 {
4407 struct sched_entity *se = &p->se;
4408 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4409 u64 min_vruntime;
4410
4411 #ifndef CONFIG_64BIT
4412 u64 min_vruntime_copy;
4413
4414 do {
4415 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4416 smp_rmb();
4417 min_vruntime = cfs_rq->min_vruntime;
4418 } while (min_vruntime != min_vruntime_copy);
4419 #else
4420 min_vruntime = cfs_rq->min_vruntime;
4421 #endif
4422
4423 se->vruntime -= min_vruntime;
4424 record_wakee(p);
4425 }
4426
4427 #ifdef CONFIG_FAIR_GROUP_SCHED
4428 /*
4429 * effective_load() calculates the load change as seen from the root_task_group
4430 *
4431 * Adding load to a group doesn't make a group heavier, but can cause movement
4432 * of group shares between cpus. Assuming the shares were perfectly aligned one
4433 * can calculate the shift in shares.
4434 *
4435 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4436 * on this @cpu and results in a total addition (subtraction) of @wg to the
4437 * total group weight.
4438 *
4439 * Given a runqueue weight distribution (rw_i) we can compute a shares
4440 * distribution (s_i) using:
4441 *
4442 * s_i = rw_i / \Sum rw_j (1)
4443 *
4444 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4445 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4446 * shares distribution (s_i):
4447 *
4448 * rw_i = { 2, 4, 1, 0 }
4449 * s_i = { 2/7, 4/7, 1/7, 0 }
4450 *
4451 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4452 * task used to run on and the CPU the waker is running on), we need to
4453 * compute the effect of waking a task on either CPU and, in case of a sync
4454 * wakeup, compute the effect of the current task going to sleep.
4455 *
4456 * So for a change of @wl to the local @cpu with an overall group weight change
4457 * of @wl we can compute the new shares distribution (s'_i) using:
4458 *
4459 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4460 *
4461 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4462 * differences in waking a task to CPU 0. The additional task changes the
4463 * weight and shares distributions like:
4464 *
4465 * rw'_i = { 3, 4, 1, 0 }
4466 * s'_i = { 3/8, 4/8, 1/8, 0 }
4467 *
4468 * We can then compute the difference in effective weight by using:
4469 *
4470 * dw_i = S * (s'_i - s_i) (3)
4471 *
4472 * Where 'S' is the group weight as seen by its parent.
4473 *
4474 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4475 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4476 * 4/7) times the weight of the group.
4477 */
4478 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4479 {
4480 struct sched_entity *se = tg->se[cpu];
4481
4482 if (!tg->parent) /* the trivial, non-cgroup case */
4483 return wl;
4484
4485 for_each_sched_entity(se) {
4486 long w, W;
4487
4488 tg = se->my_q->tg;
4489
4490 /*
4491 * W = @wg + \Sum rw_j
4492 */
4493 W = wg + calc_tg_weight(tg, se->my_q);
4494
4495 /*
4496 * w = rw_i + @wl
4497 */
4498 w = se->my_q->load.weight + wl;
4499
4500 /*
4501 * wl = S * s'_i; see (2)
4502 */
4503 if (W > 0 && w < W)
4504 wl = (w * (long)tg->shares) / W;
4505 else
4506 wl = tg->shares;
4507
4508 /*
4509 * Per the above, wl is the new se->load.weight value; since
4510 * those are clipped to [MIN_SHARES, ...) do so now. See
4511 * calc_cfs_shares().
4512 */
4513 if (wl < MIN_SHARES)
4514 wl = MIN_SHARES;
4515
4516 /*
4517 * wl = dw_i = S * (s'_i - s_i); see (3)
4518 */
4519 wl -= se->load.weight;
4520
4521 /*
4522 * Recursively apply this logic to all parent groups to compute
4523 * the final effective load change on the root group. Since
4524 * only the @tg group gets extra weight, all parent groups can
4525 * only redistribute existing shares. @wl is the shift in shares
4526 * resulting from this level per the above.
4527 */
4528 wg = 0;
4529 }
4530
4531 return wl;
4532 }
4533 #else
4534
4535 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4536 {
4537 return wl;
4538 }
4539
4540 #endif
4541
4542 static int wake_wide(struct task_struct *p)
4543 {
4544 int factor = this_cpu_read(sd_llc_size);
4545
4546 /*
4547 * Yeah, it's the switching-frequency, could means many wakee or
4548 * rapidly switch, use factor here will just help to automatically
4549 * adjust the loose-degree, so bigger node will lead to more pull.
4550 */
4551 if (p->wakee_flips > factor) {
4552 /*
4553 * wakee is somewhat hot, it needs certain amount of cpu
4554 * resource, so if waker is far more hot, prefer to leave
4555 * it alone.
4556 */
4557 if (current->wakee_flips > (factor * p->wakee_flips))
4558 return 1;
4559 }
4560
4561 return 0;
4562 }
4563
4564 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4565 {
4566 s64 this_load, load;
4567 s64 this_eff_load, prev_eff_load;
4568 int idx, this_cpu, prev_cpu;
4569 struct task_group *tg;
4570 unsigned long weight;
4571 int balanced;
4572
4573 /*
4574 * If we wake multiple tasks be careful to not bounce
4575 * ourselves around too much.
4576 */
4577 if (wake_wide(p))
4578 return 0;
4579
4580 idx = sd->wake_idx;
4581 this_cpu = smp_processor_id();
4582 prev_cpu = task_cpu(p);
4583 load = source_load(prev_cpu, idx);
4584 this_load = target_load(this_cpu, idx);
4585
4586 /*
4587 * If sync wakeup then subtract the (maximum possible)
4588 * effect of the currently running task from the load
4589 * of the current CPU:
4590 */
4591 if (sync) {
4592 tg = task_group(current);
4593 weight = current->se.load.weight;
4594
4595 this_load += effective_load(tg, this_cpu, -weight, -weight);
4596 load += effective_load(tg, prev_cpu, 0, -weight);
4597 }
4598
4599 tg = task_group(p);
4600 weight = p->se.load.weight;
4601
4602 /*
4603 * In low-load situations, where prev_cpu is idle and this_cpu is idle
4604 * due to the sync cause above having dropped this_load to 0, we'll
4605 * always have an imbalance, but there's really nothing you can do
4606 * about that, so that's good too.
4607 *
4608 * Otherwise check if either cpus are near enough in load to allow this
4609 * task to be woken on this_cpu.
4610 */
4611 this_eff_load = 100;
4612 this_eff_load *= capacity_of(prev_cpu);
4613
4614 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4615 prev_eff_load *= capacity_of(this_cpu);
4616
4617 if (this_load > 0) {
4618 this_eff_load *= this_load +
4619 effective_load(tg, this_cpu, weight, weight);
4620
4621 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4622 }
4623
4624 balanced = this_eff_load <= prev_eff_load;
4625
4626 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4627
4628 if (!balanced)
4629 return 0;
4630
4631 schedstat_inc(sd, ttwu_move_affine);
4632 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4633
4634 return 1;
4635 }
4636
4637 /*
4638 * find_idlest_group finds and returns the least busy CPU group within the
4639 * domain.
4640 */
4641 static struct sched_group *
4642 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4643 int this_cpu, int sd_flag)
4644 {
4645 struct sched_group *idlest = NULL, *group = sd->groups;
4646 unsigned long min_load = ULONG_MAX, this_load = 0;
4647 int load_idx = sd->forkexec_idx;
4648 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4649
4650 if (sd_flag & SD_BALANCE_WAKE)
4651 load_idx = sd->wake_idx;
4652
4653 do {
4654 unsigned long load, avg_load;
4655 int local_group;
4656 int i;
4657
4658 /* Skip over this group if it has no CPUs allowed */
4659 if (!cpumask_intersects(sched_group_cpus(group),
4660 tsk_cpus_allowed(p)))
4661 continue;
4662
4663 local_group = cpumask_test_cpu(this_cpu,
4664 sched_group_cpus(group));
4665
4666 /* Tally up the load of all CPUs in the group */
4667 avg_load = 0;
4668
4669 for_each_cpu(i, sched_group_cpus(group)) {
4670 /* Bias balancing toward cpus of our domain */
4671 if (local_group)
4672 load = source_load(i, load_idx);
4673 else
4674 load = target_load(i, load_idx);
4675
4676 avg_load += load;
4677 }
4678
4679 /* Adjust by relative CPU capacity of the group */
4680 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
4681
4682 if (local_group) {
4683 this_load = avg_load;
4684 } else if (avg_load < min_load) {
4685 min_load = avg_load;
4686 idlest = group;
4687 }
4688 } while (group = group->next, group != sd->groups);
4689
4690 if (!idlest || 100*this_load < imbalance*min_load)
4691 return NULL;
4692 return idlest;
4693 }
4694
4695 /*
4696 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4697 */
4698 static int
4699 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4700 {
4701 unsigned long load, min_load = ULONG_MAX;
4702 unsigned int min_exit_latency = UINT_MAX;
4703 u64 latest_idle_timestamp = 0;
4704 int least_loaded_cpu = this_cpu;
4705 int shallowest_idle_cpu = -1;
4706 int i;
4707
4708 /* Traverse only the allowed CPUs */
4709 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4710 if (idle_cpu(i)) {
4711 struct rq *rq = cpu_rq(i);
4712 struct cpuidle_state *idle = idle_get_state(rq);
4713 if (idle && idle->exit_latency < min_exit_latency) {
4714 /*
4715 * We give priority to a CPU whose idle state
4716 * has the smallest exit latency irrespective
4717 * of any idle timestamp.
4718 */
4719 min_exit_latency = idle->exit_latency;
4720 latest_idle_timestamp = rq->idle_stamp;
4721 shallowest_idle_cpu = i;
4722 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
4723 rq->idle_stamp > latest_idle_timestamp) {
4724 /*
4725 * If equal or no active idle state, then
4726 * the most recently idled CPU might have
4727 * a warmer cache.
4728 */
4729 latest_idle_timestamp = rq->idle_stamp;
4730 shallowest_idle_cpu = i;
4731 }
4732 } else if (shallowest_idle_cpu == -1) {
4733 load = weighted_cpuload(i);
4734 if (load < min_load || (load == min_load && i == this_cpu)) {
4735 min_load = load;
4736 least_loaded_cpu = i;
4737 }
4738 }
4739 }
4740
4741 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
4742 }
4743
4744 /*
4745 * Try and locate an idle CPU in the sched_domain.
4746 */
4747 static int select_idle_sibling(struct task_struct *p, int target)
4748 {
4749 struct sched_domain *sd;
4750 struct sched_group *sg;
4751 int i = task_cpu(p);
4752
4753 if (idle_cpu(target))
4754 return target;
4755
4756 /*
4757 * If the prevous cpu is cache affine and idle, don't be stupid.
4758 */
4759 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4760 return i;
4761
4762 /*
4763 * Otherwise, iterate the domains and find an elegible idle cpu.
4764 */
4765 sd = rcu_dereference(per_cpu(sd_llc, target));
4766 for_each_lower_domain(sd) {
4767 sg = sd->groups;
4768 do {
4769 if (!cpumask_intersects(sched_group_cpus(sg),
4770 tsk_cpus_allowed(p)))
4771 goto next;
4772
4773 for_each_cpu(i, sched_group_cpus(sg)) {
4774 if (i == target || !idle_cpu(i))
4775 goto next;
4776 }
4777
4778 target = cpumask_first_and(sched_group_cpus(sg),
4779 tsk_cpus_allowed(p));
4780 goto done;
4781 next:
4782 sg = sg->next;
4783 } while (sg != sd->groups);
4784 }
4785 done:
4786 return target;
4787 }
4788 /*
4789 * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
4790 * tasks. The unit of the return value must be the one of capacity so we can
4791 * compare the usage with the capacity of the CPU that is available for CFS
4792 * task (ie cpu_capacity).
4793 * cfs.utilization_load_avg is the sum of running time of runnable tasks on a
4794 * CPU. It represents the amount of utilization of a CPU in the range
4795 * [0..SCHED_LOAD_SCALE]. The usage of a CPU can't be higher than the full
4796 * capacity of the CPU because it's about the running time on this CPU.
4797 * Nevertheless, cfs.utilization_load_avg can be higher than SCHED_LOAD_SCALE
4798 * because of unfortunate rounding in avg_period and running_load_avg or just
4799 * after migrating tasks until the average stabilizes with the new running
4800 * time. So we need to check that the usage stays into the range
4801 * [0..cpu_capacity_orig] and cap if necessary.
4802 * Without capping the usage, a group could be seen as overloaded (CPU0 usage
4803 * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity
4804 */
4805 static int get_cpu_usage(int cpu)
4806 {
4807 unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
4808 unsigned long capacity = capacity_orig_of(cpu);
4809
4810 if (usage >= SCHED_LOAD_SCALE)
4811 return capacity;
4812
4813 return (usage * capacity) >> SCHED_LOAD_SHIFT;
4814 }
4815
4816 /*
4817 * select_task_rq_fair: Select target runqueue for the waking task in domains
4818 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4819 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
4820 *
4821 * Balances load by selecting the idlest cpu in the idlest group, or under
4822 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
4823 *
4824 * Returns the target cpu number.
4825 *
4826 * preempt must be disabled.
4827 */
4828 static int
4829 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4830 {
4831 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4832 int cpu = smp_processor_id();
4833 int new_cpu = cpu;
4834 int want_affine = 0;
4835 int sync = wake_flags & WF_SYNC;
4836
4837 if (sd_flag & SD_BALANCE_WAKE)
4838 want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
4839
4840 rcu_read_lock();
4841 for_each_domain(cpu, tmp) {
4842 if (!(tmp->flags & SD_LOAD_BALANCE))
4843 continue;
4844
4845 /*
4846 * If both cpu and prev_cpu are part of this domain,
4847 * cpu is a valid SD_WAKE_AFFINE target.
4848 */
4849 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4850 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4851 affine_sd = tmp;
4852 break;
4853 }
4854
4855 if (tmp->flags & sd_flag)
4856 sd = tmp;
4857 }
4858
4859 if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4860 prev_cpu = cpu;
4861
4862 if (sd_flag & SD_BALANCE_WAKE) {
4863 new_cpu = select_idle_sibling(p, prev_cpu);
4864 goto unlock;
4865 }
4866
4867 while (sd) {
4868 struct sched_group *group;
4869 int weight;
4870
4871 if (!(sd->flags & sd_flag)) {
4872 sd = sd->child;
4873 continue;
4874 }
4875
4876 group = find_idlest_group(sd, p, cpu, sd_flag);
4877 if (!group) {
4878 sd = sd->child;
4879 continue;
4880 }
4881
4882 new_cpu = find_idlest_cpu(group, p, cpu);
4883 if (new_cpu == -1 || new_cpu == cpu) {
4884 /* Now try balancing at a lower domain level of cpu */
4885 sd = sd->child;
4886 continue;
4887 }
4888
4889 /* Now try balancing at a lower domain level of new_cpu */
4890 cpu = new_cpu;
4891 weight = sd->span_weight;
4892 sd = NULL;
4893 for_each_domain(cpu, tmp) {
4894 if (weight <= tmp->span_weight)
4895 break;
4896 if (tmp->flags & sd_flag)
4897 sd = tmp;
4898 }
4899 /* while loop will break here if sd == NULL */
4900 }
4901 unlock:
4902 rcu_read_unlock();
4903
4904 return new_cpu;
4905 }
4906
4907 /*
4908 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4909 * cfs_rq_of(p) references at time of call are still valid and identify the
4910 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4911 * other assumptions, including the state of rq->lock, should be made.
4912 */
4913 static void
4914 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4915 {
4916 struct sched_entity *se = &p->se;
4917 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4918
4919 /*
4920 * Load tracking: accumulate removed load so that it can be processed
4921 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4922 * to blocked load iff they have a positive decay-count. It can never
4923 * be negative here since on-rq tasks have decay-count == 0.
4924 */
4925 if (se->avg.decay_count) {
4926 se->avg.decay_count = -__synchronize_entity_decay(se);
4927 atomic_long_add(se->avg.load_avg_contrib,
4928 &cfs_rq->removed_load);
4929 }
4930
4931 /* We have migrated, no longer consider this task hot */
4932 se->exec_start = 0;
4933 }
4934 #endif /* CONFIG_SMP */
4935
4936 static unsigned long
4937 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4938 {
4939 unsigned long gran = sysctl_sched_wakeup_granularity;
4940
4941 /*
4942 * Since its curr running now, convert the gran from real-time
4943 * to virtual-time in his units.
4944 *
4945 * By using 'se' instead of 'curr' we penalize light tasks, so
4946 * they get preempted easier. That is, if 'se' < 'curr' then
4947 * the resulting gran will be larger, therefore penalizing the
4948 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4949 * be smaller, again penalizing the lighter task.
4950 *
4951 * This is especially important for buddies when the leftmost
4952 * task is higher priority than the buddy.
4953 */
4954 return calc_delta_fair(gran, se);
4955 }
4956
4957 /*
4958 * Should 'se' preempt 'curr'.
4959 *
4960 * |s1
4961 * |s2
4962 * |s3
4963 * g
4964 * |<--->|c
4965 *
4966 * w(c, s1) = -1
4967 * w(c, s2) = 0
4968 * w(c, s3) = 1
4969 *
4970 */
4971 static int
4972 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4973 {
4974 s64 gran, vdiff = curr->vruntime - se->vruntime;
4975
4976 if (vdiff <= 0)
4977 return -1;
4978
4979 gran = wakeup_gran(curr, se);
4980 if (vdiff > gran)
4981 return 1;
4982
4983 return 0;
4984 }
4985
4986 static void set_last_buddy(struct sched_entity *se)
4987 {
4988 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4989 return;
4990
4991 for_each_sched_entity(se)
4992 cfs_rq_of(se)->last = se;
4993 }
4994
4995 static void set_next_buddy(struct sched_entity *se)
4996 {
4997 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4998 return;
4999
5000 for_each_sched_entity(se)
5001 cfs_rq_of(se)->next = se;
5002 }
5003
5004 static void set_skip_buddy(struct sched_entity *se)
5005 {
5006 for_each_sched_entity(se)
5007 cfs_rq_of(se)->skip = se;
5008 }
5009
5010 /*
5011 * Preempt the current task with a newly woken task if needed:
5012 */
5013 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5014 {
5015 struct task_struct *curr = rq->curr;
5016 struct sched_entity *se = &curr->se, *pse = &p->se;
5017 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5018 int scale = cfs_rq->nr_running >= sched_nr_latency;
5019 int next_buddy_marked = 0;
5020
5021 if (unlikely(se == pse))
5022 return;
5023
5024 /*
5025 * This is possible from callers such as attach_tasks(), in which we
5026 * unconditionally check_prempt_curr() after an enqueue (which may have
5027 * lead to a throttle). This both saves work and prevents false
5028 * next-buddy nomination below.
5029 */
5030 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5031 return;
5032
5033 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5034 set_next_buddy(pse);
5035 next_buddy_marked = 1;
5036 }
5037
5038 /*
5039 * We can come here with TIF_NEED_RESCHED already set from new task
5040 * wake up path.
5041 *
5042 * Note: this also catches the edge-case of curr being in a throttled
5043 * group (e.g. via set_curr_task), since update_curr() (in the
5044 * enqueue of curr) will have resulted in resched being set. This
5045 * prevents us from potentially nominating it as a false LAST_BUDDY
5046 * below.
5047 */
5048 if (test_tsk_need_resched(curr))
5049 return;
5050
5051 /* Idle tasks are by definition preempted by non-idle tasks. */
5052 if (unlikely(curr->policy == SCHED_IDLE) &&
5053 likely(p->policy != SCHED_IDLE))
5054 goto preempt;
5055
5056 /*
5057 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5058 * is driven by the tick):
5059 */
5060 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5061 return;
5062
5063 find_matching_se(&se, &pse);
5064 update_curr(cfs_rq_of(se));
5065 BUG_ON(!pse);
5066 if (wakeup_preempt_entity(se, pse) == 1) {
5067 /*
5068 * Bias pick_next to pick the sched entity that is
5069 * triggering this preemption.
5070 */
5071 if (!next_buddy_marked)
5072 set_next_buddy(pse);
5073 goto preempt;
5074 }
5075
5076 return;
5077
5078 preempt:
5079 resched_curr(rq);
5080 /*
5081 * Only set the backward buddy when the current task is still
5082 * on the rq. This can happen when a wakeup gets interleaved
5083 * with schedule on the ->pre_schedule() or idle_balance()
5084 * point, either of which can * drop the rq lock.
5085 *
5086 * Also, during early boot the idle thread is in the fair class,
5087 * for obvious reasons its a bad idea to schedule back to it.
5088 */
5089 if (unlikely(!se->on_rq || curr == rq->idle))
5090 return;
5091
5092 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5093 set_last_buddy(se);
5094 }
5095
5096 static struct task_struct *
5097 pick_next_task_fair(struct rq *rq, struct task_struct *prev)
5098 {
5099 struct cfs_rq *cfs_rq = &rq->cfs;
5100 struct sched_entity *se;
5101 struct task_struct *p;
5102 int new_tasks;
5103
5104 again:
5105 #ifdef CONFIG_FAIR_GROUP_SCHED
5106 if (!cfs_rq->nr_running)
5107 goto idle;
5108
5109 if (prev->sched_class != &fair_sched_class)
5110 goto simple;
5111
5112 /*
5113 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5114 * likely that a next task is from the same cgroup as the current.
5115 *
5116 * Therefore attempt to avoid putting and setting the entire cgroup
5117 * hierarchy, only change the part that actually changes.
5118 */
5119
5120 do {
5121 struct sched_entity *curr = cfs_rq->curr;
5122
5123 /*
5124 * Since we got here without doing put_prev_entity() we also
5125 * have to consider cfs_rq->curr. If it is still a runnable
5126 * entity, update_curr() will update its vruntime, otherwise
5127 * forget we've ever seen it.
5128 */
5129 if (curr && curr->on_rq)
5130 update_curr(cfs_rq);
5131 else
5132 curr = NULL;
5133
5134 /*
5135 * This call to check_cfs_rq_runtime() will do the throttle and
5136 * dequeue its entity in the parent(s). Therefore the 'simple'
5137 * nr_running test will indeed be correct.
5138 */
5139 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5140 goto simple;
5141
5142 se = pick_next_entity(cfs_rq, curr);
5143 cfs_rq = group_cfs_rq(se);
5144 } while (cfs_rq);
5145
5146 p = task_of(se);
5147
5148 /*
5149 * Since we haven't yet done put_prev_entity and if the selected task
5150 * is a different task than we started out with, try and touch the
5151 * least amount of cfs_rqs.
5152 */
5153 if (prev != p) {
5154 struct sched_entity *pse = &prev->se;
5155
5156 while (!(cfs_rq = is_same_group(se, pse))) {
5157 int se_depth = se->depth;
5158 int pse_depth = pse->depth;
5159
5160 if (se_depth <= pse_depth) {
5161 put_prev_entity(cfs_rq_of(pse), pse);
5162 pse = parent_entity(pse);
5163 }
5164 if (se_depth >= pse_depth) {
5165 set_next_entity(cfs_rq_of(se), se);
5166 se = parent_entity(se);
5167 }
5168 }
5169
5170 put_prev_entity(cfs_rq, pse);
5171 set_next_entity(cfs_rq, se);
5172 }
5173
5174 if (hrtick_enabled(rq))
5175 hrtick_start_fair(rq, p);
5176
5177 return p;
5178 simple:
5179 cfs_rq = &rq->cfs;
5180 #endif
5181
5182 if (!cfs_rq->nr_running)
5183 goto idle;
5184
5185 put_prev_task(rq, prev);
5186
5187 do {
5188 se = pick_next_entity(cfs_rq, NULL);
5189 set_next_entity(cfs_rq, se);
5190 cfs_rq = group_cfs_rq(se);
5191 } while (cfs_rq);
5192
5193 p = task_of(se);
5194
5195 if (hrtick_enabled(rq))
5196 hrtick_start_fair(rq, p);
5197
5198 return p;
5199
5200 idle:
5201 new_tasks = idle_balance(rq);
5202 /*
5203 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5204 * possible for any higher priority task to appear. In that case we
5205 * must re-start the pick_next_entity() loop.
5206 */
5207 if (new_tasks < 0)
5208 return RETRY_TASK;
5209
5210 if (new_tasks > 0)
5211 goto again;
5212
5213 return NULL;
5214 }
5215
5216 /*
5217 * Account for a descheduled task:
5218 */
5219 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
5220 {
5221 struct sched_entity *se = &prev->se;
5222 struct cfs_rq *cfs_rq;
5223
5224 for_each_sched_entity(se) {
5225 cfs_rq = cfs_rq_of(se);
5226 put_prev_entity(cfs_rq, se);
5227 }
5228 }
5229
5230 /*
5231 * sched_yield() is very simple
5232 *
5233 * The magic of dealing with the ->skip buddy is in pick_next_entity.
5234 */
5235 static void yield_task_fair(struct rq *rq)
5236 {
5237 struct task_struct *curr = rq->curr;
5238 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5239 struct sched_entity *se = &curr->se;
5240
5241 /*
5242 * Are we the only task in the tree?
5243 */
5244 if (unlikely(rq->nr_running == 1))
5245 return;
5246
5247 clear_buddies(cfs_rq, se);
5248
5249 if (curr->policy != SCHED_BATCH) {
5250 update_rq_clock(rq);
5251 /*
5252 * Update run-time statistics of the 'current'.
5253 */
5254 update_curr(cfs_rq);
5255 /*
5256 * Tell update_rq_clock() that we've just updated,
5257 * so we don't do microscopic update in schedule()
5258 * and double the fastpath cost.
5259 */
5260 rq_clock_skip_update(rq, true);
5261 }
5262
5263 set_skip_buddy(se);
5264 }
5265
5266 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5267 {
5268 struct sched_entity *se = &p->se;
5269
5270 /* throttled hierarchies are not runnable */
5271 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
5272 return false;
5273
5274 /* Tell the scheduler that we'd really like pse to run next. */
5275 set_next_buddy(se);
5276
5277 yield_task_fair(rq);
5278
5279 return true;
5280 }
5281
5282 #ifdef CONFIG_SMP
5283 /**************************************************
5284 * Fair scheduling class load-balancing methods.
5285 *
5286 * BASICS
5287 *
5288 * The purpose of load-balancing is to achieve the same basic fairness the
5289 * per-cpu scheduler provides, namely provide a proportional amount of compute
5290 * time to each task. This is expressed in the following equation:
5291 *
5292 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
5293 *
5294 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
5295 * W_i,0 is defined as:
5296 *
5297 * W_i,0 = \Sum_j w_i,j (2)
5298 *
5299 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
5300 * is derived from the nice value as per prio_to_weight[].
5301 *
5302 * The weight average is an exponential decay average of the instantaneous
5303 * weight:
5304 *
5305 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
5306 *
5307 * C_i is the compute capacity of cpu i, typically it is the
5308 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
5309 * can also include other factors [XXX].
5310 *
5311 * To achieve this balance we define a measure of imbalance which follows
5312 * directly from (1):
5313 *
5314 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
5315 *
5316 * We them move tasks around to minimize the imbalance. In the continuous
5317 * function space it is obvious this converges, in the discrete case we get
5318 * a few fun cases generally called infeasible weight scenarios.
5319 *
5320 * [XXX expand on:
5321 * - infeasible weights;
5322 * - local vs global optima in the discrete case. ]
5323 *
5324 *
5325 * SCHED DOMAINS
5326 *
5327 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5328 * for all i,j solution, we create a tree of cpus that follows the hardware
5329 * topology where each level pairs two lower groups (or better). This results
5330 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5331 * tree to only the first of the previous level and we decrease the frequency
5332 * of load-balance at each level inv. proportional to the number of cpus in
5333 * the groups.
5334 *
5335 * This yields:
5336 *
5337 * log_2 n 1 n
5338 * \Sum { --- * --- * 2^i } = O(n) (5)
5339 * i = 0 2^i 2^i
5340 * `- size of each group
5341 * | | `- number of cpus doing load-balance
5342 * | `- freq
5343 * `- sum over all levels
5344 *
5345 * Coupled with a limit on how many tasks we can migrate every balance pass,
5346 * this makes (5) the runtime complexity of the balancer.
5347 *
5348 * An important property here is that each CPU is still (indirectly) connected
5349 * to every other cpu in at most O(log n) steps:
5350 *
5351 * The adjacency matrix of the resulting graph is given by:
5352 *
5353 * log_2 n
5354 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5355 * k = 0
5356 *
5357 * And you'll find that:
5358 *
5359 * A^(log_2 n)_i,j != 0 for all i,j (7)
5360 *
5361 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5362 * The task movement gives a factor of O(m), giving a convergence complexity
5363 * of:
5364 *
5365 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5366 *
5367 *
5368 * WORK CONSERVING
5369 *
5370 * In order to avoid CPUs going idle while there's still work to do, new idle
5371 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5372 * tree itself instead of relying on other CPUs to bring it work.
5373 *
5374 * This adds some complexity to both (5) and (8) but it reduces the total idle
5375 * time.
5376 *
5377 * [XXX more?]
5378 *
5379 *
5380 * CGROUPS
5381 *
5382 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5383 *
5384 * s_k,i
5385 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5386 * S_k
5387 *
5388 * Where
5389 *
5390 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5391 *
5392 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5393 *
5394 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5395 * property.
5396 *
5397 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5398 * rewrite all of this once again.]
5399 */
5400
5401 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5402
5403 enum fbq_type { regular, remote, all };
5404
5405 #define LBF_ALL_PINNED 0x01
5406 #define LBF_NEED_BREAK 0x02
5407 #define LBF_DST_PINNED 0x04
5408 #define LBF_SOME_PINNED 0x08
5409
5410 struct lb_env {
5411 struct sched_domain *sd;
5412
5413 struct rq *src_rq;
5414 int src_cpu;
5415
5416 int dst_cpu;
5417 struct rq *dst_rq;
5418
5419 struct cpumask *dst_grpmask;
5420 int new_dst_cpu;
5421 enum cpu_idle_type idle;
5422 long imbalance;
5423 /* The set of CPUs under consideration for load-balancing */
5424 struct cpumask *cpus;
5425
5426 unsigned int flags;
5427
5428 unsigned int loop;
5429 unsigned int loop_break;
5430 unsigned int loop_max;
5431
5432 enum fbq_type fbq_type;
5433 struct list_head tasks;
5434 };
5435
5436 /*
5437 * Is this task likely cache-hot:
5438 */
5439 static int task_hot(struct task_struct *p, struct lb_env *env)
5440 {
5441 s64 delta;
5442
5443 lockdep_assert_held(&env->src_rq->lock);
5444
5445 if (p->sched_class != &fair_sched_class)
5446 return 0;
5447
5448 if (unlikely(p->policy == SCHED_IDLE))
5449 return 0;
5450
5451 /*
5452 * Buddy candidates are cache hot:
5453 */
5454 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
5455 (&p->se == cfs_rq_of(&p->se)->next ||
5456 &p->se == cfs_rq_of(&p->se)->last))
5457 return 1;
5458
5459 if (sysctl_sched_migration_cost == -1)
5460 return 1;
5461 if (sysctl_sched_migration_cost == 0)
5462 return 0;
5463
5464 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
5465
5466 return delta < (s64)sysctl_sched_migration_cost;
5467 }
5468
5469 #ifdef CONFIG_NUMA_BALANCING
5470 /* Returns true if the destination node has incurred more faults */
5471 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5472 {
5473 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5474 int src_nid, dst_nid;
5475
5476 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
5477 !(env->sd->flags & SD_NUMA)) {
5478 return false;
5479 }
5480
5481 src_nid = cpu_to_node(env->src_cpu);
5482 dst_nid = cpu_to_node(env->dst_cpu);
5483
5484 if (src_nid == dst_nid)
5485 return false;
5486
5487 if (numa_group) {
5488 /* Task is already in the group's interleave set. */
5489 if (node_isset(src_nid, numa_group->active_nodes))
5490 return false;
5491
5492 /* Task is moving into the group's interleave set. */
5493 if (node_isset(dst_nid, numa_group->active_nodes))
5494 return true;
5495
5496 return group_faults(p, dst_nid) > group_faults(p, src_nid);
5497 }
5498
5499 /* Encourage migration to the preferred node. */
5500 if (dst_nid == p->numa_preferred_nid)
5501 return true;
5502
5503 return task_faults(p, dst_nid) > task_faults(p, src_nid);
5504 }
5505
5506
5507 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5508 {
5509 struct numa_group *numa_group = rcu_dereference(p->numa_group);
5510 int src_nid, dst_nid;
5511
5512 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5513 return false;
5514
5515 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
5516 return false;
5517
5518 src_nid = cpu_to_node(env->src_cpu);
5519 dst_nid = cpu_to_node(env->dst_cpu);
5520
5521 if (src_nid == dst_nid)
5522 return false;
5523
5524 if (numa_group) {
5525 /* Task is moving within/into the group's interleave set. */
5526 if (node_isset(dst_nid, numa_group->active_nodes))
5527 return false;
5528
5529 /* Task is moving out of the group's interleave set. */
5530 if (node_isset(src_nid, numa_group->active_nodes))
5531 return true;
5532
5533 return group_faults(p, dst_nid) < group_faults(p, src_nid);
5534 }
5535
5536 /* Migrating away from the preferred node is always bad. */
5537 if (src_nid == p->numa_preferred_nid)
5538 return true;
5539
5540 return task_faults(p, dst_nid) < task_faults(p, src_nid);
5541 }
5542
5543 #else
5544 static inline bool migrate_improves_locality(struct task_struct *p,
5545 struct lb_env *env)
5546 {
5547 return false;
5548 }
5549
5550 static inline bool migrate_degrades_locality(struct task_struct *p,
5551 struct lb_env *env)
5552 {
5553 return false;
5554 }
5555 #endif
5556
5557 /*
5558 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5559 */
5560 static
5561 int can_migrate_task(struct task_struct *p, struct lb_env *env)
5562 {
5563 int tsk_cache_hot = 0;
5564
5565 lockdep_assert_held(&env->src_rq->lock);
5566
5567 /*
5568 * We do not migrate tasks that are:
5569 * 1) throttled_lb_pair, or
5570 * 2) cannot be migrated to this CPU due to cpus_allowed, or
5571 * 3) running (obviously), or
5572 * 4) are cache-hot on their current CPU.
5573 */
5574 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5575 return 0;
5576
5577 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5578 int cpu;
5579
5580 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5581
5582 env->flags |= LBF_SOME_PINNED;
5583
5584 /*
5585 * Remember if this task can be migrated to any other cpu in
5586 * our sched_group. We may want to revisit it if we couldn't
5587 * meet load balance goals by pulling other tasks on src_cpu.
5588 *
5589 * Also avoid computing new_dst_cpu if we have already computed
5590 * one in current iteration.
5591 */
5592 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5593 return 0;
5594
5595 /* Prevent to re-select dst_cpu via env's cpus */
5596 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5597 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5598 env->flags |= LBF_DST_PINNED;
5599 env->new_dst_cpu = cpu;
5600 break;
5601 }
5602 }
5603
5604 return 0;
5605 }
5606
5607 /* Record that we found atleast one task that could run on dst_cpu */
5608 env->flags &= ~LBF_ALL_PINNED;
5609
5610 if (task_running(env->src_rq, p)) {
5611 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5612 return 0;
5613 }
5614
5615 /*
5616 * Aggressive migration if:
5617 * 1) destination numa is preferred
5618 * 2) task is cache cold, or
5619 * 3) too many balance attempts have failed.
5620 */
5621 tsk_cache_hot = task_hot(p, env);
5622 if (!tsk_cache_hot)
5623 tsk_cache_hot = migrate_degrades_locality(p, env);
5624
5625 if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
5626 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5627 if (tsk_cache_hot) {
5628 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5629 schedstat_inc(p, se.statistics.nr_forced_migrations);
5630 }
5631 return 1;
5632 }
5633
5634 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5635 return 0;
5636 }
5637
5638 /*
5639 * detach_task() -- detach the task for the migration specified in env
5640 */
5641 static void detach_task(struct task_struct *p, struct lb_env *env)
5642 {
5643 lockdep_assert_held(&env->src_rq->lock);
5644
5645 deactivate_task(env->src_rq, p, 0);
5646 p->on_rq = TASK_ON_RQ_MIGRATING;
5647 set_task_cpu(p, env->dst_cpu);
5648 }
5649
5650 /*
5651 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
5652 * part of active balancing operations within "domain".
5653 *
5654 * Returns a task if successful and NULL otherwise.
5655 */
5656 static struct task_struct *detach_one_task(struct lb_env *env)
5657 {
5658 struct task_struct *p, *n;
5659
5660 lockdep_assert_held(&env->src_rq->lock);
5661
5662 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5663 if (!can_migrate_task(p, env))
5664 continue;
5665
5666 detach_task(p, env);
5667
5668 /*
5669 * Right now, this is only the second place where
5670 * lb_gained[env->idle] is updated (other is detach_tasks)
5671 * so we can safely collect stats here rather than
5672 * inside detach_tasks().
5673 */
5674 schedstat_inc(env->sd, lb_gained[env->idle]);
5675 return p;
5676 }
5677 return NULL;
5678 }
5679
5680 static const unsigned int sched_nr_migrate_break = 32;
5681
5682 /*
5683 * detach_tasks() -- tries to detach up to imbalance weighted load from
5684 * busiest_rq, as part of a balancing operation within domain "sd".
5685 *
5686 * Returns number of detached tasks if successful and 0 otherwise.
5687 */
5688 static int detach_tasks(struct lb_env *env)
5689 {
5690 struct list_head *tasks = &env->src_rq->cfs_tasks;
5691 struct task_struct *p;
5692 unsigned long load;
5693 int detached = 0;
5694
5695 lockdep_assert_held(&env->src_rq->lock);
5696
5697 if (env->imbalance <= 0)
5698 return 0;
5699
5700 while (!list_empty(tasks)) {
5701 p = list_first_entry(tasks, struct task_struct, se.group_node);
5702
5703 env->loop++;
5704 /* We've more or less seen every task there is, call it quits */
5705 if (env->loop > env->loop_max)
5706 break;
5707
5708 /* take a breather every nr_migrate tasks */
5709 if (env->loop > env->loop_break) {
5710 env->loop_break += sched_nr_migrate_break;
5711 env->flags |= LBF_NEED_BREAK;
5712 break;
5713 }
5714
5715 if (!can_migrate_task(p, env))
5716 goto next;
5717
5718 load = task_h_load(p);
5719
5720 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5721 goto next;
5722
5723 if ((load / 2) > env->imbalance)
5724 goto next;
5725
5726 detach_task(p, env);
5727 list_add(&p->se.group_node, &env->tasks);
5728
5729 detached++;
5730 env->imbalance -= load;
5731
5732 #ifdef CONFIG_PREEMPT
5733 /*
5734 * NEWIDLE balancing is a source of latency, so preemptible
5735 * kernels will stop after the first task is detached to minimize
5736 * the critical section.
5737 */
5738 if (env->idle == CPU_NEWLY_IDLE)
5739 break;
5740 #endif
5741
5742 /*
5743 * We only want to steal up to the prescribed amount of
5744 * weighted load.
5745 */
5746 if (env->imbalance <= 0)
5747 break;
5748
5749 continue;
5750 next:
5751 list_move_tail(&p->se.group_node, tasks);
5752 }
5753
5754 /*
5755 * Right now, this is one of only two places we collect this stat
5756 * so we can safely collect detach_one_task() stats here rather
5757 * than inside detach_one_task().
5758 */
5759 schedstat_add(env->sd, lb_gained[env->idle], detached);
5760
5761 return detached;
5762 }
5763
5764 /*
5765 * attach_task() -- attach the task detached by detach_task() to its new rq.
5766 */
5767 static void attach_task(struct rq *rq, struct task_struct *p)
5768 {
5769 lockdep_assert_held(&rq->lock);
5770
5771 BUG_ON(task_rq(p) != rq);
5772 p->on_rq = TASK_ON_RQ_QUEUED;
5773 activate_task(rq, p, 0);
5774 check_preempt_curr(rq, p, 0);
5775 }
5776
5777 /*
5778 * attach_one_task() -- attaches the task returned from detach_one_task() to
5779 * its new rq.
5780 */
5781 static void attach_one_task(struct rq *rq, struct task_struct *p)
5782 {
5783 raw_spin_lock(&rq->lock);
5784 attach_task(rq, p);
5785 raw_spin_unlock(&rq->lock);
5786 }
5787
5788 /*
5789 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
5790 * new rq.
5791 */
5792 static void attach_tasks(struct lb_env *env)
5793 {
5794 struct list_head *tasks = &env->tasks;
5795 struct task_struct *p;
5796
5797 raw_spin_lock(&env->dst_rq->lock);
5798
5799 while (!list_empty(tasks)) {
5800 p = list_first_entry(tasks, struct task_struct, se.group_node);
5801 list_del_init(&p->se.group_node);
5802
5803 attach_task(env->dst_rq, p);
5804 }
5805
5806 raw_spin_unlock(&env->dst_rq->lock);
5807 }
5808
5809 #ifdef CONFIG_FAIR_GROUP_SCHED
5810 /*
5811 * update tg->load_weight by folding this cpu's load_avg
5812 */
5813 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5814 {
5815 struct sched_entity *se = tg->se[cpu];
5816 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5817
5818 /* throttled entities do not contribute to load */
5819 if (throttled_hierarchy(cfs_rq))
5820 return;
5821
5822 update_cfs_rq_blocked_load(cfs_rq, 1);
5823
5824 if (se) {
5825 update_entity_load_avg(se, 1);
5826 /*
5827 * We pivot on our runnable average having decayed to zero for
5828 * list removal. This generally implies that all our children
5829 * have also been removed (modulo rounding error or bandwidth
5830 * control); however, such cases are rare and we can fix these
5831 * at enqueue.
5832 *
5833 * TODO: fix up out-of-order children on enqueue.
5834 */
5835 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5836 list_del_leaf_cfs_rq(cfs_rq);
5837 } else {
5838 struct rq *rq = rq_of(cfs_rq);
5839 update_rq_runnable_avg(rq, rq->nr_running);
5840 }
5841 }
5842
5843 static void update_blocked_averages(int cpu)
5844 {
5845 struct rq *rq = cpu_rq(cpu);
5846 struct cfs_rq *cfs_rq;
5847 unsigned long flags;
5848
5849 raw_spin_lock_irqsave(&rq->lock, flags);
5850 update_rq_clock(rq);
5851 /*
5852 * Iterates the task_group tree in a bottom up fashion, see
5853 * list_add_leaf_cfs_rq() for details.
5854 */
5855 for_each_leaf_cfs_rq(rq, cfs_rq) {
5856 /*
5857 * Note: We may want to consider periodically releasing
5858 * rq->lock about these updates so that creating many task
5859 * groups does not result in continually extending hold time.
5860 */
5861 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5862 }
5863
5864 raw_spin_unlock_irqrestore(&rq->lock, flags);
5865 }
5866
5867 /*
5868 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5869 * This needs to be done in a top-down fashion because the load of a child
5870 * group is a fraction of its parents load.
5871 */
5872 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5873 {
5874 struct rq *rq = rq_of(cfs_rq);
5875 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5876 unsigned long now = jiffies;
5877 unsigned long load;
5878
5879 if (cfs_rq->last_h_load_update == now)
5880 return;
5881
5882 cfs_rq->h_load_next = NULL;
5883 for_each_sched_entity(se) {
5884 cfs_rq = cfs_rq_of(se);
5885 cfs_rq->h_load_next = se;
5886 if (cfs_rq->last_h_load_update == now)
5887 break;
5888 }
5889
5890 if (!se) {
5891 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5892 cfs_rq->last_h_load_update = now;
5893 }
5894
5895 while ((se = cfs_rq->h_load_next) != NULL) {
5896 load = cfs_rq->h_load;
5897 load = div64_ul(load * se->avg.load_avg_contrib,
5898 cfs_rq->runnable_load_avg + 1);
5899 cfs_rq = group_cfs_rq(se);
5900 cfs_rq->h_load = load;
5901 cfs_rq->last_h_load_update = now;
5902 }
5903 }
5904
5905 static unsigned long task_h_load(struct task_struct *p)
5906 {
5907 struct cfs_rq *cfs_rq = task_cfs_rq(p);
5908
5909 update_cfs_rq_h_load(cfs_rq);
5910 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5911 cfs_rq->runnable_load_avg + 1);
5912 }
5913 #else
5914 static inline void update_blocked_averages(int cpu)
5915 {
5916 }
5917
5918 static unsigned long task_h_load(struct task_struct *p)
5919 {
5920 return p->se.avg.load_avg_contrib;
5921 }
5922 #endif
5923
5924 /********** Helpers for find_busiest_group ************************/
5925
5926 enum group_type {
5927 group_other = 0,
5928 group_imbalanced,
5929 group_overloaded,
5930 };
5931
5932 /*
5933 * sg_lb_stats - stats of a sched_group required for load_balancing
5934 */
5935 struct sg_lb_stats {
5936 unsigned long avg_load; /*Avg load across the CPUs of the group */
5937 unsigned long group_load; /* Total load over the CPUs of the group */
5938 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5939 unsigned long load_per_task;
5940 unsigned long group_capacity;
5941 unsigned long group_usage; /* Total usage of the group */
5942 unsigned int sum_nr_running; /* Nr tasks running in the group */
5943 unsigned int idle_cpus;
5944 unsigned int group_weight;
5945 enum group_type group_type;
5946 int group_no_capacity;
5947 #ifdef CONFIG_NUMA_BALANCING
5948 unsigned int nr_numa_running;
5949 unsigned int nr_preferred_running;
5950 #endif
5951 };
5952
5953 /*
5954 * sd_lb_stats - Structure to store the statistics of a sched_domain
5955 * during load balancing.
5956 */
5957 struct sd_lb_stats {
5958 struct sched_group *busiest; /* Busiest group in this sd */
5959 struct sched_group *local; /* Local group in this sd */
5960 unsigned long total_load; /* Total load of all groups in sd */
5961 unsigned long total_capacity; /* Total capacity of all groups in sd */
5962 unsigned long avg_load; /* Average load across all groups in sd */
5963
5964 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
5965 struct sg_lb_stats local_stat; /* Statistics of the local group */
5966 };
5967
5968 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5969 {
5970 /*
5971 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5972 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5973 * We must however clear busiest_stat::avg_load because
5974 * update_sd_pick_busiest() reads this before assignment.
5975 */
5976 *sds = (struct sd_lb_stats){
5977 .busiest = NULL,
5978 .local = NULL,
5979 .total_load = 0UL,
5980 .total_capacity = 0UL,
5981 .busiest_stat = {
5982 .avg_load = 0UL,
5983 .sum_nr_running = 0,
5984 .group_type = group_other,
5985 },
5986 };
5987 }
5988
5989 /**
5990 * get_sd_load_idx - Obtain the load index for a given sched domain.
5991 * @sd: The sched_domain whose load_idx is to be obtained.
5992 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
5993 *
5994 * Return: The load index.
5995 */
5996 static inline int get_sd_load_idx(struct sched_domain *sd,
5997 enum cpu_idle_type idle)
5998 {
5999 int load_idx;
6000
6001 switch (idle) {
6002 case CPU_NOT_IDLE:
6003 load_idx = sd->busy_idx;
6004 break;
6005
6006 case CPU_NEWLY_IDLE:
6007 load_idx = sd->newidle_idx;
6008 break;
6009 default:
6010 load_idx = sd->idle_idx;
6011 break;
6012 }
6013
6014 return load_idx;
6015 }
6016
6017 static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6018 {
6019 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
6020 return sd->smt_gain / sd->span_weight;
6021
6022 return SCHED_CAPACITY_SCALE;
6023 }
6024
6025 unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6026 {
6027 return default_scale_cpu_capacity(sd, cpu);
6028 }
6029
6030 static unsigned long scale_rt_capacity(int cpu)
6031 {
6032 struct rq *rq = cpu_rq(cpu);
6033 u64 total, used, age_stamp, avg;
6034 s64 delta;
6035
6036 /*
6037 * Since we're reading these variables without serialization make sure
6038 * we read them once before doing sanity checks on them.
6039 */
6040 age_stamp = ACCESS_ONCE(rq->age_stamp);
6041 avg = ACCESS_ONCE(rq->rt_avg);
6042 delta = __rq_clock_broken(rq) - age_stamp;
6043
6044 if (unlikely(delta < 0))
6045 delta = 0;
6046
6047 total = sched_avg_period() + delta;
6048
6049 used = div_u64(avg, total);
6050
6051 if (likely(used < SCHED_CAPACITY_SCALE))
6052 return SCHED_CAPACITY_SCALE - used;
6053
6054 return 1;
6055 }
6056
6057 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6058 {
6059 unsigned long capacity = SCHED_CAPACITY_SCALE;
6060 struct sched_group *sdg = sd->groups;
6061
6062 if (sched_feat(ARCH_CAPACITY))
6063 capacity *= arch_scale_cpu_capacity(sd, cpu);
6064 else
6065 capacity *= default_scale_cpu_capacity(sd, cpu);
6066
6067 capacity >>= SCHED_CAPACITY_SHIFT;
6068
6069 cpu_rq(cpu)->cpu_capacity_orig = capacity;
6070
6071 capacity *= scale_rt_capacity(cpu);
6072 capacity >>= SCHED_CAPACITY_SHIFT;
6073
6074 if (!capacity)
6075 capacity = 1;
6076
6077 cpu_rq(cpu)->cpu_capacity = capacity;
6078 sdg->sgc->capacity = capacity;
6079 }
6080
6081 void update_group_capacity(struct sched_domain *sd, int cpu)
6082 {
6083 struct sched_domain *child = sd->child;
6084 struct sched_group *group, *sdg = sd->groups;
6085 unsigned long capacity;
6086 unsigned long interval;
6087
6088 interval = msecs_to_jiffies(sd->balance_interval);
6089 interval = clamp(interval, 1UL, max_load_balance_interval);
6090 sdg->sgc->next_update = jiffies + interval;
6091
6092 if (!child) {
6093 update_cpu_capacity(sd, cpu);
6094 return;
6095 }
6096
6097 capacity = 0;
6098
6099 if (child->flags & SD_OVERLAP) {
6100 /*
6101 * SD_OVERLAP domains cannot assume that child groups
6102 * span the current group.
6103 */
6104
6105 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6106 struct sched_group_capacity *sgc;
6107 struct rq *rq = cpu_rq(cpu);
6108
6109 /*
6110 * build_sched_domains() -> init_sched_groups_capacity()
6111 * gets here before we've attached the domains to the
6112 * runqueues.
6113 *
6114 * Use capacity_of(), which is set irrespective of domains
6115 * in update_cpu_capacity().
6116 *
6117 * This avoids capacity from being 0 and
6118 * causing divide-by-zero issues on boot.
6119 */
6120 if (unlikely(!rq->sd)) {
6121 capacity += capacity_of(cpu);
6122 continue;
6123 }
6124
6125 sgc = rq->sd->groups->sgc;
6126 capacity += sgc->capacity;
6127 }
6128 } else {
6129 /*
6130 * !SD_OVERLAP domains can assume that child groups
6131 * span the current group.
6132 */
6133
6134 group = child->groups;
6135 do {
6136 capacity += group->sgc->capacity;
6137 group = group->next;
6138 } while (group != child->groups);
6139 }
6140
6141 sdg->sgc->capacity = capacity;
6142 }
6143
6144 /*
6145 * Check whether the capacity of the rq has been noticeably reduced by side
6146 * activity. The imbalance_pct is used for the threshold.
6147 * Return true is the capacity is reduced
6148 */
6149 static inline int
6150 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6151 {
6152 return ((rq->cpu_capacity * sd->imbalance_pct) <
6153 (rq->cpu_capacity_orig * 100));
6154 }
6155
6156 /*
6157 * Group imbalance indicates (and tries to solve) the problem where balancing
6158 * groups is inadequate due to tsk_cpus_allowed() constraints.
6159 *
6160 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6161 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6162 * Something like:
6163 *
6164 * { 0 1 2 3 } { 4 5 6 7 }
6165 * * * * *
6166 *
6167 * If we were to balance group-wise we'd place two tasks in the first group and
6168 * two tasks in the second group. Clearly this is undesired as it will overload
6169 * cpu 3 and leave one of the cpus in the second group unused.
6170 *
6171 * The current solution to this issue is detecting the skew in the first group
6172 * by noticing the lower domain failed to reach balance and had difficulty
6173 * moving tasks due to affinity constraints.
6174 *
6175 * When this is so detected; this group becomes a candidate for busiest; see
6176 * update_sd_pick_busiest(). And calculate_imbalance() and
6177 * find_busiest_group() avoid some of the usual balance conditions to allow it
6178 * to create an effective group imbalance.
6179 *
6180 * This is a somewhat tricky proposition since the next run might not find the
6181 * group imbalance and decide the groups need to be balanced again. A most
6182 * subtle and fragile situation.
6183 */
6184
6185 static inline int sg_imbalanced(struct sched_group *group)
6186 {
6187 return group->sgc->imbalance;
6188 }
6189
6190 /*
6191 * group_has_capacity returns true if the group has spare capacity that could
6192 * be used by some tasks.
6193 * We consider that a group has spare capacity if the * number of task is
6194 * smaller than the number of CPUs or if the usage is lower than the available
6195 * capacity for CFS tasks.
6196 * For the latter, we use a threshold to stabilize the state, to take into
6197 * account the variance of the tasks' load and to return true if the available
6198 * capacity in meaningful for the load balancer.
6199 * As an example, an available capacity of 1% can appear but it doesn't make
6200 * any benefit for the load balance.
6201 */
6202 static inline bool
6203 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6204 {
6205 if (sgs->sum_nr_running < sgs->group_weight)
6206 return true;
6207
6208 if ((sgs->group_capacity * 100) >
6209 (sgs->group_usage * env->sd->imbalance_pct))
6210 return true;
6211
6212 return false;
6213 }
6214
6215 /*
6216 * group_is_overloaded returns true if the group has more tasks than it can
6217 * handle.
6218 * group_is_overloaded is not equals to !group_has_capacity because a group
6219 * with the exact right number of tasks, has no more spare capacity but is not
6220 * overloaded so both group_has_capacity and group_is_overloaded return
6221 * false.
6222 */
6223 static inline bool
6224 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6225 {
6226 if (sgs->sum_nr_running <= sgs->group_weight)
6227 return false;
6228
6229 if ((sgs->group_capacity * 100) <
6230 (sgs->group_usage * env->sd->imbalance_pct))
6231 return true;
6232
6233 return false;
6234 }
6235
6236 static enum group_type group_classify(struct lb_env *env,
6237 struct sched_group *group,
6238 struct sg_lb_stats *sgs)
6239 {
6240 if (sgs->group_no_capacity)
6241 return group_overloaded;
6242
6243 if (sg_imbalanced(group))
6244 return group_imbalanced;
6245
6246 return group_other;
6247 }
6248
6249 /**
6250 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6251 * @env: The load balancing environment.
6252 * @group: sched_group whose statistics are to be updated.
6253 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6254 * @local_group: Does group contain this_cpu.
6255 * @sgs: variable to hold the statistics for this group.
6256 * @overload: Indicate more than one runnable task for any CPU.
6257 */
6258 static inline void update_sg_lb_stats(struct lb_env *env,
6259 struct sched_group *group, int load_idx,
6260 int local_group, struct sg_lb_stats *sgs,
6261 bool *overload)
6262 {
6263 unsigned long load;
6264 int i;
6265
6266 memset(sgs, 0, sizeof(*sgs));
6267
6268 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6269 struct rq *rq = cpu_rq(i);
6270
6271 /* Bias balancing toward cpus of our domain */
6272 if (local_group)
6273 load = target_load(i, load_idx);
6274 else
6275 load = source_load(i, load_idx);
6276
6277 sgs->group_load += load;
6278 sgs->group_usage += get_cpu_usage(i);
6279 sgs->sum_nr_running += rq->cfs.h_nr_running;
6280
6281 if (rq->nr_running > 1)
6282 *overload = true;
6283
6284 #ifdef CONFIG_NUMA_BALANCING
6285 sgs->nr_numa_running += rq->nr_numa_running;
6286 sgs->nr_preferred_running += rq->nr_preferred_running;
6287 #endif
6288 sgs->sum_weighted_load += weighted_cpuload(i);
6289 if (idle_cpu(i))
6290 sgs->idle_cpus++;
6291 }
6292
6293 /* Adjust by relative CPU capacity of the group */
6294 sgs->group_capacity = group->sgc->capacity;
6295 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
6296
6297 if (sgs->sum_nr_running)
6298 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
6299
6300 sgs->group_weight = group->group_weight;
6301
6302 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6303 sgs->group_type = group_classify(env, group, sgs);
6304 }
6305
6306 /**
6307 * update_sd_pick_busiest - return 1 on busiest group
6308 * @env: The load balancing environment.
6309 * @sds: sched_domain statistics
6310 * @sg: sched_group candidate to be checked for being the busiest
6311 * @sgs: sched_group statistics
6312 *
6313 * Determine if @sg is a busier group than the previously selected
6314 * busiest group.
6315 *
6316 * Return: %true if @sg is a busier group than the previously selected
6317 * busiest group. %false otherwise.
6318 */
6319 static bool update_sd_pick_busiest(struct lb_env *env,
6320 struct sd_lb_stats *sds,
6321 struct sched_group *sg,
6322 struct sg_lb_stats *sgs)
6323 {
6324 struct sg_lb_stats *busiest = &sds->busiest_stat;
6325
6326 if (sgs->group_type > busiest->group_type)
6327 return true;
6328
6329 if (sgs->group_type < busiest->group_type)
6330 return false;
6331
6332 if (sgs->avg_load <= busiest->avg_load)
6333 return false;
6334
6335 /* This is the busiest node in its class. */
6336 if (!(env->sd->flags & SD_ASYM_PACKING))
6337 return true;
6338
6339 /*
6340 * ASYM_PACKING needs to move all the work to the lowest
6341 * numbered CPUs in the group, therefore mark all groups
6342 * higher than ourself as busy.
6343 */
6344 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
6345 if (!sds->busiest)
6346 return true;
6347
6348 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6349 return true;
6350 }
6351
6352 return false;
6353 }
6354
6355 #ifdef CONFIG_NUMA_BALANCING
6356 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6357 {
6358 if (sgs->sum_nr_running > sgs->nr_numa_running)
6359 return regular;
6360 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6361 return remote;
6362 return all;
6363 }
6364
6365 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6366 {
6367 if (rq->nr_running > rq->nr_numa_running)
6368 return regular;
6369 if (rq->nr_running > rq->nr_preferred_running)
6370 return remote;
6371 return all;
6372 }
6373 #else
6374 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6375 {
6376 return all;
6377 }
6378
6379 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6380 {
6381 return regular;
6382 }
6383 #endif /* CONFIG_NUMA_BALANCING */
6384
6385 /**
6386 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
6387 * @env: The load balancing environment.
6388 * @sds: variable to hold the statistics for this sched_domain.
6389 */
6390 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
6391 {
6392 struct sched_domain *child = env->sd->child;
6393 struct sched_group *sg = env->sd->groups;
6394 struct sg_lb_stats tmp_sgs;
6395 int load_idx, prefer_sibling = 0;
6396 bool overload = false;
6397
6398 if (child && child->flags & SD_PREFER_SIBLING)
6399 prefer_sibling = 1;
6400
6401 load_idx = get_sd_load_idx(env->sd, env->idle);
6402
6403 do {
6404 struct sg_lb_stats *sgs = &tmp_sgs;
6405 int local_group;
6406
6407 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
6408 if (local_group) {
6409 sds->local = sg;
6410 sgs = &sds->local_stat;
6411
6412 if (env->idle != CPU_NEWLY_IDLE ||
6413 time_after_eq(jiffies, sg->sgc->next_update))
6414 update_group_capacity(env->sd, env->dst_cpu);
6415 }
6416
6417 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6418 &overload);
6419
6420 if (local_group)
6421 goto next_group;
6422
6423 /*
6424 * In case the child domain prefers tasks go to siblings
6425 * first, lower the sg capacity so that we'll try
6426 * and move all the excess tasks away. We lower the capacity
6427 * of a group only if the local group has the capacity to fit
6428 * these excess tasks. The extra check prevents the case where
6429 * you always pull from the heaviest group when it is already
6430 * under-utilized (possible with a large weight task outweighs
6431 * the tasks on the system).
6432 */
6433 if (prefer_sibling && sds->local &&
6434 group_has_capacity(env, &sds->local_stat) &&
6435 (sgs->sum_nr_running > 1)) {
6436 sgs->group_no_capacity = 1;
6437 sgs->group_type = group_overloaded;
6438 }
6439
6440 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
6441 sds->busiest = sg;
6442 sds->busiest_stat = *sgs;
6443 }
6444
6445 next_group:
6446 /* Now, start updating sd_lb_stats */
6447 sds->total_load += sgs->group_load;
6448 sds->total_capacity += sgs->group_capacity;
6449
6450 sg = sg->next;
6451 } while (sg != env->sd->groups);
6452
6453 if (env->sd->flags & SD_NUMA)
6454 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
6455
6456 if (!env->sd->parent) {
6457 /* update overload indicator if we are at root domain */
6458 if (env->dst_rq->rd->overload != overload)
6459 env->dst_rq->rd->overload = overload;
6460 }
6461
6462 }
6463
6464 /**
6465 * check_asym_packing - Check to see if the group is packed into the
6466 * sched doman.
6467 *
6468 * This is primarily intended to used at the sibling level. Some
6469 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6470 * case of POWER7, it can move to lower SMT modes only when higher
6471 * threads are idle. When in lower SMT modes, the threads will
6472 * perform better since they share less core resources. Hence when we
6473 * have idle threads, we want them to be the higher ones.
6474 *
6475 * This packing function is run on idle threads. It checks to see if
6476 * the busiest CPU in this domain (core in the P7 case) has a higher
6477 * CPU number than the packing function is being run on. Here we are
6478 * assuming lower CPU number will be equivalent to lower a SMT thread
6479 * number.
6480 *
6481 * Return: 1 when packing is required and a task should be moved to
6482 * this CPU. The amount of the imbalance is returned in *imbalance.
6483 *
6484 * @env: The load balancing environment.
6485 * @sds: Statistics of the sched_domain which is to be packed
6486 */
6487 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6488 {
6489 int busiest_cpu;
6490
6491 if (!(env->sd->flags & SD_ASYM_PACKING))
6492 return 0;
6493
6494 if (!sds->busiest)
6495 return 0;
6496
6497 busiest_cpu = group_first_cpu(sds->busiest);
6498 if (env->dst_cpu > busiest_cpu)
6499 return 0;
6500
6501 env->imbalance = DIV_ROUND_CLOSEST(
6502 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
6503 SCHED_CAPACITY_SCALE);
6504
6505 return 1;
6506 }
6507
6508 /**
6509 * fix_small_imbalance - Calculate the minor imbalance that exists
6510 * amongst the groups of a sched_domain, during
6511 * load balancing.
6512 * @env: The load balancing environment.
6513 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
6514 */
6515 static inline
6516 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6517 {
6518 unsigned long tmp, capa_now = 0, capa_move = 0;
6519 unsigned int imbn = 2;
6520 unsigned long scaled_busy_load_per_task;
6521 struct sg_lb_stats *local, *busiest;
6522
6523 local = &sds->local_stat;
6524 busiest = &sds->busiest_stat;
6525
6526 if (!local->sum_nr_running)
6527 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6528 else if (busiest->load_per_task > local->load_per_task)
6529 imbn = 1;
6530
6531 scaled_busy_load_per_task =
6532 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6533 busiest->group_capacity;
6534
6535 if (busiest->avg_load + scaled_busy_load_per_task >=
6536 local->avg_load + (scaled_busy_load_per_task * imbn)) {
6537 env->imbalance = busiest->load_per_task;
6538 return;
6539 }
6540
6541 /*
6542 * OK, we don't have enough imbalance to justify moving tasks,
6543 * however we may be able to increase total CPU capacity used by
6544 * moving them.
6545 */
6546
6547 capa_now += busiest->group_capacity *
6548 min(busiest->load_per_task, busiest->avg_load);
6549 capa_now += local->group_capacity *
6550 min(local->load_per_task, local->avg_load);
6551 capa_now /= SCHED_CAPACITY_SCALE;
6552
6553 /* Amount of load we'd subtract */
6554 if (busiest->avg_load > scaled_busy_load_per_task) {
6555 capa_move += busiest->group_capacity *
6556 min(busiest->load_per_task,
6557 busiest->avg_load - scaled_busy_load_per_task);
6558 }
6559
6560 /* Amount of load we'd add */
6561 if (busiest->avg_load * busiest->group_capacity <
6562 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
6563 tmp = (busiest->avg_load * busiest->group_capacity) /
6564 local->group_capacity;
6565 } else {
6566 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
6567 local->group_capacity;
6568 }
6569 capa_move += local->group_capacity *
6570 min(local->load_per_task, local->avg_load + tmp);
6571 capa_move /= SCHED_CAPACITY_SCALE;
6572
6573 /* Move if we gain throughput */
6574 if (capa_move > capa_now)
6575 env->imbalance = busiest->load_per_task;
6576 }
6577
6578 /**
6579 * calculate_imbalance - Calculate the amount of imbalance present within the
6580 * groups of a given sched_domain during load balance.
6581 * @env: load balance environment
6582 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
6583 */
6584 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6585 {
6586 unsigned long max_pull, load_above_capacity = ~0UL;
6587 struct sg_lb_stats *local, *busiest;
6588
6589 local = &sds->local_stat;
6590 busiest = &sds->busiest_stat;
6591
6592 if (busiest->group_type == group_imbalanced) {
6593 /*
6594 * In the group_imb case we cannot rely on group-wide averages
6595 * to ensure cpu-load equilibrium, look at wider averages. XXX
6596 */
6597 busiest->load_per_task =
6598 min(busiest->load_per_task, sds->avg_load);
6599 }
6600
6601 /*
6602 * In the presence of smp nice balancing, certain scenarios can have
6603 * max load less than avg load(as we skip the groups at or below
6604 * its cpu_capacity, while calculating max_load..)
6605 */
6606 if (busiest->avg_load <= sds->avg_load ||
6607 local->avg_load >= sds->avg_load) {
6608 env->imbalance = 0;
6609 return fix_small_imbalance(env, sds);
6610 }
6611
6612 /*
6613 * If there aren't any idle cpus, avoid creating some.
6614 */
6615 if (busiest->group_type == group_overloaded &&
6616 local->group_type == group_overloaded) {
6617 load_above_capacity = busiest->sum_nr_running *
6618 SCHED_LOAD_SCALE;
6619 if (load_above_capacity > busiest->group_capacity)
6620 load_above_capacity -= busiest->group_capacity;
6621 else
6622 load_above_capacity = ~0UL;
6623 }
6624
6625 /*
6626 * We're trying to get all the cpus to the average_load, so we don't
6627 * want to push ourselves above the average load, nor do we wish to
6628 * reduce the max loaded cpu below the average load. At the same time,
6629 * we also don't want to reduce the group load below the group capacity
6630 * (so that we can implement power-savings policies etc). Thus we look
6631 * for the minimum possible imbalance.
6632 */
6633 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6634
6635 /* How much load to actually move to equalise the imbalance */
6636 env->imbalance = min(
6637 max_pull * busiest->group_capacity,
6638 (sds->avg_load - local->avg_load) * local->group_capacity
6639 ) / SCHED_CAPACITY_SCALE;
6640
6641 /*
6642 * if *imbalance is less than the average load per runnable task
6643 * there is no guarantee that any tasks will be moved so we'll have
6644 * a think about bumping its value to force at least one task to be
6645 * moved
6646 */
6647 if (env->imbalance < busiest->load_per_task)
6648 return fix_small_imbalance(env, sds);
6649 }
6650
6651 /******* find_busiest_group() helpers end here *********************/
6652
6653 /**
6654 * find_busiest_group - Returns the busiest group within the sched_domain
6655 * if there is an imbalance. If there isn't an imbalance, and
6656 * the user has opted for power-savings, it returns a group whose
6657 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6658 * such a group exists.
6659 *
6660 * Also calculates the amount of weighted load which should be moved
6661 * to restore balance.
6662 *
6663 * @env: The load balancing environment.
6664 *
6665 * Return: - The busiest group if imbalance exists.
6666 * - If no imbalance and user has opted for power-savings balance,
6667 * return the least loaded group whose CPUs can be
6668 * put to idle by rebalancing its tasks onto our group.
6669 */
6670 static struct sched_group *find_busiest_group(struct lb_env *env)
6671 {
6672 struct sg_lb_stats *local, *busiest;
6673 struct sd_lb_stats sds;
6674
6675 init_sd_lb_stats(&sds);
6676
6677 /*
6678 * Compute the various statistics relavent for load balancing at
6679 * this level.
6680 */
6681 update_sd_lb_stats(env, &sds);
6682 local = &sds.local_stat;
6683 busiest = &sds.busiest_stat;
6684
6685 /* ASYM feature bypasses nice load balance check */
6686 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6687 check_asym_packing(env, &sds))
6688 return sds.busiest;
6689
6690 /* There is no busy sibling group to pull tasks from */
6691 if (!sds.busiest || busiest->sum_nr_running == 0)
6692 goto out_balanced;
6693
6694 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6695 / sds.total_capacity;
6696
6697 /*
6698 * If the busiest group is imbalanced the below checks don't
6699 * work because they assume all things are equal, which typically
6700 * isn't true due to cpus_allowed constraints and the like.
6701 */
6702 if (busiest->group_type == group_imbalanced)
6703 goto force_balance;
6704
6705 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6706 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
6707 busiest->group_no_capacity)
6708 goto force_balance;
6709
6710 /*
6711 * If the local group is busier than the selected busiest group
6712 * don't try and pull any tasks.
6713 */
6714 if (local->avg_load >= busiest->avg_load)
6715 goto out_balanced;
6716
6717 /*
6718 * Don't pull any tasks if this group is already above the domain
6719 * average load.
6720 */
6721 if (local->avg_load >= sds.avg_load)
6722 goto out_balanced;
6723
6724 if (env->idle == CPU_IDLE) {
6725 /*
6726 * This cpu is idle. If the busiest group is not overloaded
6727 * and there is no imbalance between this and busiest group
6728 * wrt idle cpus, it is balanced. The imbalance becomes
6729 * significant if the diff is greater than 1 otherwise we
6730 * might end up to just move the imbalance on another group
6731 */
6732 if ((busiest->group_type != group_overloaded) &&
6733 (local->idle_cpus <= (busiest->idle_cpus + 1)))
6734 goto out_balanced;
6735 } else {
6736 /*
6737 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6738 * imbalance_pct to be conservative.
6739 */
6740 if (100 * busiest->avg_load <=
6741 env->sd->imbalance_pct * local->avg_load)
6742 goto out_balanced;
6743 }
6744
6745 force_balance:
6746 /* Looks like there is an imbalance. Compute it */
6747 calculate_imbalance(env, &sds);
6748 return sds.busiest;
6749
6750 out_balanced:
6751 env->imbalance = 0;
6752 return NULL;
6753 }
6754
6755 /*
6756 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6757 */
6758 static struct rq *find_busiest_queue(struct lb_env *env,
6759 struct sched_group *group)
6760 {
6761 struct rq *busiest = NULL, *rq;
6762 unsigned long busiest_load = 0, busiest_capacity = 1;
6763 int i;
6764
6765 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6766 unsigned long capacity, wl;
6767 enum fbq_type rt;
6768
6769 rq = cpu_rq(i);
6770 rt = fbq_classify_rq(rq);
6771
6772 /*
6773 * We classify groups/runqueues into three groups:
6774 * - regular: there are !numa tasks
6775 * - remote: there are numa tasks that run on the 'wrong' node
6776 * - all: there is no distinction
6777 *
6778 * In order to avoid migrating ideally placed numa tasks,
6779 * ignore those when there's better options.
6780 *
6781 * If we ignore the actual busiest queue to migrate another
6782 * task, the next balance pass can still reduce the busiest
6783 * queue by moving tasks around inside the node.
6784 *
6785 * If we cannot move enough load due to this classification
6786 * the next pass will adjust the group classification and
6787 * allow migration of more tasks.
6788 *
6789 * Both cases only affect the total convergence complexity.
6790 */
6791 if (rt > env->fbq_type)
6792 continue;
6793
6794 capacity = capacity_of(i);
6795
6796 wl = weighted_cpuload(i);
6797
6798 /*
6799 * When comparing with imbalance, use weighted_cpuload()
6800 * which is not scaled with the cpu capacity.
6801 */
6802
6803 if (rq->nr_running == 1 && wl > env->imbalance &&
6804 !check_cpu_capacity(rq, env->sd))
6805 continue;
6806
6807 /*
6808 * For the load comparisons with the other cpu's, consider
6809 * the weighted_cpuload() scaled with the cpu capacity, so
6810 * that the load can be moved away from the cpu that is
6811 * potentially running at a lower capacity.
6812 *
6813 * Thus we're looking for max(wl_i / capacity_i), crosswise
6814 * multiplication to rid ourselves of the division works out
6815 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6816 * our previous maximum.
6817 */
6818 if (wl * busiest_capacity > busiest_load * capacity) {
6819 busiest_load = wl;
6820 busiest_capacity = capacity;
6821 busiest = rq;
6822 }
6823 }
6824
6825 return busiest;
6826 }
6827
6828 /*
6829 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6830 * so long as it is large enough.
6831 */
6832 #define MAX_PINNED_INTERVAL 512
6833
6834 /* Working cpumask for load_balance and load_balance_newidle. */
6835 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6836
6837 static int need_active_balance(struct lb_env *env)
6838 {
6839 struct sched_domain *sd = env->sd;
6840
6841 if (env->idle == CPU_NEWLY_IDLE) {
6842
6843 /*
6844 * ASYM_PACKING needs to force migrate tasks from busy but
6845 * higher numbered CPUs in order to pack all tasks in the
6846 * lowest numbered CPUs.
6847 */
6848 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6849 return 1;
6850 }
6851
6852 /*
6853 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
6854 * It's worth migrating the task if the src_cpu's capacity is reduced
6855 * because of other sched_class or IRQs if more capacity stays
6856 * available on dst_cpu.
6857 */
6858 if ((env->idle != CPU_NOT_IDLE) &&
6859 (env->src_rq->cfs.h_nr_running == 1)) {
6860 if ((check_cpu_capacity(env->src_rq, sd)) &&
6861 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
6862 return 1;
6863 }
6864
6865 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6866 }
6867
6868 static int active_load_balance_cpu_stop(void *data);
6869
6870 static int should_we_balance(struct lb_env *env)
6871 {
6872 struct sched_group *sg = env->sd->groups;
6873 struct cpumask *sg_cpus, *sg_mask;
6874 int cpu, balance_cpu = -1;
6875
6876 /*
6877 * In the newly idle case, we will allow all the cpu's
6878 * to do the newly idle load balance.
6879 */
6880 if (env->idle == CPU_NEWLY_IDLE)
6881 return 1;
6882
6883 sg_cpus = sched_group_cpus(sg);
6884 sg_mask = sched_group_mask(sg);
6885 /* Try to find first idle cpu */
6886 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6887 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6888 continue;
6889
6890 balance_cpu = cpu;
6891 break;
6892 }
6893
6894 if (balance_cpu == -1)
6895 balance_cpu = group_balance_cpu(sg);
6896
6897 /*
6898 * First idle cpu or the first cpu(busiest) in this sched group
6899 * is eligible for doing load balancing at this and above domains.
6900 */
6901 return balance_cpu == env->dst_cpu;
6902 }
6903
6904 /*
6905 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6906 * tasks if there is an imbalance.
6907 */
6908 static int load_balance(int this_cpu, struct rq *this_rq,
6909 struct sched_domain *sd, enum cpu_idle_type idle,
6910 int *continue_balancing)
6911 {
6912 int ld_moved, cur_ld_moved, active_balance = 0;
6913 struct sched_domain *sd_parent = sd->parent;
6914 struct sched_group *group;
6915 struct rq *busiest;
6916 unsigned long flags;
6917 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
6918
6919 struct lb_env env = {
6920 .sd = sd,
6921 .dst_cpu = this_cpu,
6922 .dst_rq = this_rq,
6923 .dst_grpmask = sched_group_cpus(sd->groups),
6924 .idle = idle,
6925 .loop_break = sched_nr_migrate_break,
6926 .cpus = cpus,
6927 .fbq_type = all,
6928 .tasks = LIST_HEAD_INIT(env.tasks),
6929 };
6930
6931 /*
6932 * For NEWLY_IDLE load_balancing, we don't need to consider
6933 * other cpus in our group
6934 */
6935 if (idle == CPU_NEWLY_IDLE)
6936 env.dst_grpmask = NULL;
6937
6938 cpumask_copy(cpus, cpu_active_mask);
6939
6940 schedstat_inc(sd, lb_count[idle]);
6941
6942 redo:
6943 if (!should_we_balance(&env)) {
6944 *continue_balancing = 0;
6945 goto out_balanced;
6946 }
6947
6948 group = find_busiest_group(&env);
6949 if (!group) {
6950 schedstat_inc(sd, lb_nobusyg[idle]);
6951 goto out_balanced;
6952 }
6953
6954 busiest = find_busiest_queue(&env, group);
6955 if (!busiest) {
6956 schedstat_inc(sd, lb_nobusyq[idle]);
6957 goto out_balanced;
6958 }
6959
6960 BUG_ON(busiest == env.dst_rq);
6961
6962 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6963
6964 env.src_cpu = busiest->cpu;
6965 env.src_rq = busiest;
6966
6967 ld_moved = 0;
6968 if (busiest->nr_running > 1) {
6969 /*
6970 * Attempt to move tasks. If find_busiest_group has found
6971 * an imbalance but busiest->nr_running <= 1, the group is
6972 * still unbalanced. ld_moved simply stays zero, so it is
6973 * correctly treated as an imbalance.
6974 */
6975 env.flags |= LBF_ALL_PINNED;
6976 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
6977
6978 more_balance:
6979 raw_spin_lock_irqsave(&busiest->lock, flags);
6980
6981 /*
6982 * cur_ld_moved - load moved in current iteration
6983 * ld_moved - cumulative load moved across iterations
6984 */
6985 cur_ld_moved = detach_tasks(&env);
6986
6987 /*
6988 * We've detached some tasks from busiest_rq. Every
6989 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
6990 * unlock busiest->lock, and we are able to be sure
6991 * that nobody can manipulate the tasks in parallel.
6992 * See task_rq_lock() family for the details.
6993 */
6994
6995 raw_spin_unlock(&busiest->lock);
6996
6997 if (cur_ld_moved) {
6998 attach_tasks(&env);
6999 ld_moved += cur_ld_moved;
7000 }
7001
7002 local_irq_restore(flags);
7003
7004 if (env.flags & LBF_NEED_BREAK) {
7005 env.flags &= ~LBF_NEED_BREAK;
7006 goto more_balance;
7007 }
7008
7009 /*
7010 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7011 * us and move them to an alternate dst_cpu in our sched_group
7012 * where they can run. The upper limit on how many times we
7013 * iterate on same src_cpu is dependent on number of cpus in our
7014 * sched_group.
7015 *
7016 * This changes load balance semantics a bit on who can move
7017 * load to a given_cpu. In addition to the given_cpu itself
7018 * (or a ilb_cpu acting on its behalf where given_cpu is
7019 * nohz-idle), we now have balance_cpu in a position to move
7020 * load to given_cpu. In rare situations, this may cause
7021 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7022 * _independently_ and at _same_ time to move some load to
7023 * given_cpu) causing exceess load to be moved to given_cpu.
7024 * This however should not happen so much in practice and
7025 * moreover subsequent load balance cycles should correct the
7026 * excess load moved.
7027 */
7028 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7029
7030 /* Prevent to re-select dst_cpu via env's cpus */
7031 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7032
7033 env.dst_rq = cpu_rq(env.new_dst_cpu);
7034 env.dst_cpu = env.new_dst_cpu;
7035 env.flags &= ~LBF_DST_PINNED;
7036 env.loop = 0;
7037 env.loop_break = sched_nr_migrate_break;
7038
7039 /*
7040 * Go back to "more_balance" rather than "redo" since we
7041 * need to continue with same src_cpu.
7042 */
7043 goto more_balance;
7044 }
7045
7046 /*
7047 * We failed to reach balance because of affinity.
7048 */
7049 if (sd_parent) {
7050 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7051
7052 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7053 *group_imbalance = 1;
7054 }
7055
7056 /* All tasks on this runqueue were pinned by CPU affinity */
7057 if (unlikely(env.flags & LBF_ALL_PINNED)) {
7058 cpumask_clear_cpu(cpu_of(busiest), cpus);
7059 if (!cpumask_empty(cpus)) {
7060 env.loop = 0;
7061 env.loop_break = sched_nr_migrate_break;
7062 goto redo;
7063 }
7064 goto out_all_pinned;
7065 }
7066 }
7067
7068 if (!ld_moved) {
7069 schedstat_inc(sd, lb_failed[idle]);
7070 /*
7071 * Increment the failure counter only on periodic balance.
7072 * We do not want newidle balance, which can be very
7073 * frequent, pollute the failure counter causing
7074 * excessive cache_hot migrations and active balances.
7075 */
7076 if (idle != CPU_NEWLY_IDLE)
7077 sd->nr_balance_failed++;
7078
7079 if (need_active_balance(&env)) {
7080 raw_spin_lock_irqsave(&busiest->lock, flags);
7081
7082 /* don't kick the active_load_balance_cpu_stop,
7083 * if the curr task on busiest cpu can't be
7084 * moved to this_cpu
7085 */
7086 if (!cpumask_test_cpu(this_cpu,
7087 tsk_cpus_allowed(busiest->curr))) {
7088 raw_spin_unlock_irqrestore(&busiest->lock,
7089 flags);
7090 env.flags |= LBF_ALL_PINNED;
7091 goto out_one_pinned;
7092 }
7093
7094 /*
7095 * ->active_balance synchronizes accesses to
7096 * ->active_balance_work. Once set, it's cleared
7097 * only after active load balance is finished.
7098 */
7099 if (!busiest->active_balance) {
7100 busiest->active_balance = 1;
7101 busiest->push_cpu = this_cpu;
7102 active_balance = 1;
7103 }
7104 raw_spin_unlock_irqrestore(&busiest->lock, flags);
7105
7106 if (active_balance) {
7107 stop_one_cpu_nowait(cpu_of(busiest),
7108 active_load_balance_cpu_stop, busiest,
7109 &busiest->active_balance_work);
7110 }
7111
7112 /*
7113 * We've kicked active balancing, reset the failure
7114 * counter.
7115 */
7116 sd->nr_balance_failed = sd->cache_nice_tries+1;
7117 }
7118 } else
7119 sd->nr_balance_failed = 0;
7120
7121 if (likely(!active_balance)) {
7122 /* We were unbalanced, so reset the balancing interval */
7123 sd->balance_interval = sd->min_interval;
7124 } else {
7125 /*
7126 * If we've begun active balancing, start to back off. This
7127 * case may not be covered by the all_pinned logic if there
7128 * is only 1 task on the busy runqueue (because we don't call
7129 * detach_tasks).
7130 */
7131 if (sd->balance_interval < sd->max_interval)
7132 sd->balance_interval *= 2;
7133 }
7134
7135 goto out;
7136
7137 out_balanced:
7138 /*
7139 * We reach balance although we may have faced some affinity
7140 * constraints. Clear the imbalance flag if it was set.
7141 */
7142 if (sd_parent) {
7143 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7144
7145 if (*group_imbalance)
7146 *group_imbalance = 0;
7147 }
7148
7149 out_all_pinned:
7150 /*
7151 * We reach balance because all tasks are pinned at this level so
7152 * we can't migrate them. Let the imbalance flag set so parent level
7153 * can try to migrate them.
7154 */
7155 schedstat_inc(sd, lb_balanced[idle]);
7156
7157 sd->nr_balance_failed = 0;
7158
7159 out_one_pinned:
7160 /* tune up the balancing interval */
7161 if (((env.flags & LBF_ALL_PINNED) &&
7162 sd->balance_interval < MAX_PINNED_INTERVAL) ||
7163 (sd->balance_interval < sd->max_interval))
7164 sd->balance_interval *= 2;
7165
7166 ld_moved = 0;
7167 out:
7168 return ld_moved;
7169 }
7170
7171 static inline unsigned long
7172 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7173 {
7174 unsigned long interval = sd->balance_interval;
7175
7176 if (cpu_busy)
7177 interval *= sd->busy_factor;
7178
7179 /* scale ms to jiffies */
7180 interval = msecs_to_jiffies(interval);
7181 interval = clamp(interval, 1UL, max_load_balance_interval);
7182
7183 return interval;
7184 }
7185
7186 static inline void
7187 update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7188 {
7189 unsigned long interval, next;
7190
7191 interval = get_sd_balance_interval(sd, cpu_busy);
7192 next = sd->last_balance + interval;
7193
7194 if (time_after(*next_balance, next))
7195 *next_balance = next;
7196 }
7197
7198 /*
7199 * idle_balance is called by schedule() if this_cpu is about to become
7200 * idle. Attempts to pull tasks from other CPUs.
7201 */
7202 static int idle_balance(struct rq *this_rq)
7203 {
7204 unsigned long next_balance = jiffies + HZ;
7205 int this_cpu = this_rq->cpu;
7206 struct sched_domain *sd;
7207 int pulled_task = 0;
7208 u64 curr_cost = 0;
7209
7210 idle_enter_fair(this_rq);
7211
7212 /*
7213 * We must set idle_stamp _before_ calling idle_balance(), such that we
7214 * measure the duration of idle_balance() as idle time.
7215 */
7216 this_rq->idle_stamp = rq_clock(this_rq);
7217
7218 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7219 !this_rq->rd->overload) {
7220 rcu_read_lock();
7221 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7222 if (sd)
7223 update_next_balance(sd, 0, &next_balance);
7224 rcu_read_unlock();
7225
7226 goto out;
7227 }
7228
7229 /*
7230 * Drop the rq->lock, but keep IRQ/preempt disabled.
7231 */
7232 raw_spin_unlock(&this_rq->lock);
7233
7234 update_blocked_averages(this_cpu);
7235 rcu_read_lock();
7236 for_each_domain(this_cpu, sd) {
7237 int continue_balancing = 1;
7238 u64 t0, domain_cost;
7239
7240 if (!(sd->flags & SD_LOAD_BALANCE))
7241 continue;
7242
7243 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7244 update_next_balance(sd, 0, &next_balance);
7245 break;
7246 }
7247
7248 if (sd->flags & SD_BALANCE_NEWIDLE) {
7249 t0 = sched_clock_cpu(this_cpu);
7250
7251 pulled_task = load_balance(this_cpu, this_rq,
7252 sd, CPU_NEWLY_IDLE,
7253 &continue_balancing);
7254
7255 domain_cost = sched_clock_cpu(this_cpu) - t0;
7256 if (domain_cost > sd->max_newidle_lb_cost)
7257 sd->max_newidle_lb_cost = domain_cost;
7258
7259 curr_cost += domain_cost;
7260 }
7261
7262 update_next_balance(sd, 0, &next_balance);
7263
7264 /*
7265 * Stop searching for tasks to pull if there are
7266 * now runnable tasks on this rq.
7267 */
7268 if (pulled_task || this_rq->nr_running > 0)
7269 break;
7270 }
7271 rcu_read_unlock();
7272
7273 raw_spin_lock(&this_rq->lock);
7274
7275 if (curr_cost > this_rq->max_idle_balance_cost)
7276 this_rq->max_idle_balance_cost = curr_cost;
7277
7278 /*
7279 * While browsing the domains, we released the rq lock, a task could
7280 * have been enqueued in the meantime. Since we're not going idle,
7281 * pretend we pulled a task.
7282 */
7283 if (this_rq->cfs.h_nr_running && !pulled_task)
7284 pulled_task = 1;
7285
7286 out:
7287 /* Move the next balance forward */
7288 if (time_after(this_rq->next_balance, next_balance))
7289 this_rq->next_balance = next_balance;
7290
7291 /* Is there a task of a high priority class? */
7292 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7293 pulled_task = -1;
7294
7295 if (pulled_task) {
7296 idle_exit_fair(this_rq);
7297 this_rq->idle_stamp = 0;
7298 }
7299
7300 return pulled_task;
7301 }
7302
7303 /*
7304 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7305 * running tasks off the busiest CPU onto idle CPUs. It requires at
7306 * least 1 task to be running on each physical CPU where possible, and
7307 * avoids physical / logical imbalances.
7308 */
7309 static int active_load_balance_cpu_stop(void *data)
7310 {
7311 struct rq *busiest_rq = data;
7312 int busiest_cpu = cpu_of(busiest_rq);
7313 int target_cpu = busiest_rq->push_cpu;
7314 struct rq *target_rq = cpu_rq(target_cpu);
7315 struct sched_domain *sd;
7316 struct task_struct *p = NULL;
7317
7318 raw_spin_lock_irq(&busiest_rq->lock);
7319
7320 /* make sure the requested cpu hasn't gone down in the meantime */
7321 if (unlikely(busiest_cpu != smp_processor_id() ||
7322 !busiest_rq->active_balance))
7323 goto out_unlock;
7324
7325 /* Is there any task to move? */
7326 if (busiest_rq->nr_running <= 1)
7327 goto out_unlock;
7328
7329 /*
7330 * This condition is "impossible", if it occurs
7331 * we need to fix it. Originally reported by
7332 * Bjorn Helgaas on a 128-cpu setup.
7333 */
7334 BUG_ON(busiest_rq == target_rq);
7335
7336 /* Search for an sd spanning us and the target CPU. */
7337 rcu_read_lock();
7338 for_each_domain(target_cpu, sd) {
7339 if ((sd->flags & SD_LOAD_BALANCE) &&
7340 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7341 break;
7342 }
7343
7344 if (likely(sd)) {
7345 struct lb_env env = {
7346 .sd = sd,
7347 .dst_cpu = target_cpu,
7348 .dst_rq = target_rq,
7349 .src_cpu = busiest_rq->cpu,
7350 .src_rq = busiest_rq,
7351 .idle = CPU_IDLE,
7352 };
7353
7354 schedstat_inc(sd, alb_count);
7355
7356 p = detach_one_task(&env);
7357 if (p)
7358 schedstat_inc(sd, alb_pushed);
7359 else
7360 schedstat_inc(sd, alb_failed);
7361 }
7362 rcu_read_unlock();
7363 out_unlock:
7364 busiest_rq->active_balance = 0;
7365 raw_spin_unlock(&busiest_rq->lock);
7366
7367 if (p)
7368 attach_one_task(target_rq, p);
7369
7370 local_irq_enable();
7371
7372 return 0;
7373 }
7374
7375 static inline int on_null_domain(struct rq *rq)
7376 {
7377 return unlikely(!rcu_dereference_sched(rq->sd));
7378 }
7379
7380 #ifdef CONFIG_NO_HZ_COMMON
7381 /*
7382 * idle load balancing details
7383 * - When one of the busy CPUs notice that there may be an idle rebalancing
7384 * needed, they will kick the idle load balancer, which then does idle
7385 * load balancing for all the idle CPUs.
7386 */
7387 static struct {
7388 cpumask_var_t idle_cpus_mask;
7389 atomic_t nr_cpus;
7390 unsigned long next_balance; /* in jiffy units */
7391 } nohz ____cacheline_aligned;
7392
7393 static inline int find_new_ilb(void)
7394 {
7395 int ilb = cpumask_first(nohz.idle_cpus_mask);
7396
7397 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7398 return ilb;
7399
7400 return nr_cpu_ids;
7401 }
7402
7403 /*
7404 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7405 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7406 * CPU (if there is one).
7407 */
7408 static void nohz_balancer_kick(void)
7409 {
7410 int ilb_cpu;
7411
7412 nohz.next_balance++;
7413
7414 ilb_cpu = find_new_ilb();
7415
7416 if (ilb_cpu >= nr_cpu_ids)
7417 return;
7418
7419 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
7420 return;
7421 /*
7422 * Use smp_send_reschedule() instead of resched_cpu().
7423 * This way we generate a sched IPI on the target cpu which
7424 * is idle. And the softirq performing nohz idle load balance
7425 * will be run before returning from the IPI.
7426 */
7427 smp_send_reschedule(ilb_cpu);
7428 return;
7429 }
7430
7431 static inline void nohz_balance_exit_idle(int cpu)
7432 {
7433 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
7434 /*
7435 * Completely isolated CPUs don't ever set, so we must test.
7436 */
7437 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7438 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7439 atomic_dec(&nohz.nr_cpus);
7440 }
7441 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7442 }
7443 }
7444
7445 static inline void set_cpu_sd_state_busy(void)
7446 {
7447 struct sched_domain *sd;
7448 int cpu = smp_processor_id();
7449
7450 rcu_read_lock();
7451 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7452
7453 if (!sd || !sd->nohz_idle)
7454 goto unlock;
7455 sd->nohz_idle = 0;
7456
7457 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
7458 unlock:
7459 rcu_read_unlock();
7460 }
7461
7462 void set_cpu_sd_state_idle(void)
7463 {
7464 struct sched_domain *sd;
7465 int cpu = smp_processor_id();
7466
7467 rcu_read_lock();
7468 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7469
7470 if (!sd || sd->nohz_idle)
7471 goto unlock;
7472 sd->nohz_idle = 1;
7473
7474 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
7475 unlock:
7476 rcu_read_unlock();
7477 }
7478
7479 /*
7480 * This routine will record that the cpu is going idle with tick stopped.
7481 * This info will be used in performing idle load balancing in the future.
7482 */
7483 void nohz_balance_enter_idle(int cpu)
7484 {
7485 /*
7486 * If this cpu is going down, then nothing needs to be done.
7487 */
7488 if (!cpu_active(cpu))
7489 return;
7490
7491 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7492 return;
7493
7494 /*
7495 * If we're a completely isolated CPU, we don't play.
7496 */
7497 if (on_null_domain(cpu_rq(cpu)))
7498 return;
7499
7500 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7501 atomic_inc(&nohz.nr_cpus);
7502 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7503 }
7504
7505 static int sched_ilb_notifier(struct notifier_block *nfb,
7506 unsigned long action, void *hcpu)
7507 {
7508 switch (action & ~CPU_TASKS_FROZEN) {
7509 case CPU_DYING:
7510 nohz_balance_exit_idle(smp_processor_id());
7511 return NOTIFY_OK;
7512 default:
7513 return NOTIFY_DONE;
7514 }
7515 }
7516 #endif
7517
7518 static DEFINE_SPINLOCK(balancing);
7519
7520 /*
7521 * Scale the max load_balance interval with the number of CPUs in the system.
7522 * This trades load-balance latency on larger machines for less cross talk.
7523 */
7524 void update_max_interval(void)
7525 {
7526 max_load_balance_interval = HZ*num_online_cpus()/10;
7527 }
7528
7529 /*
7530 * It checks each scheduling domain to see if it is due to be balanced,
7531 * and initiates a balancing operation if so.
7532 *
7533 * Balancing parameters are set up in init_sched_domains.
7534 */
7535 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
7536 {
7537 int continue_balancing = 1;
7538 int cpu = rq->cpu;
7539 unsigned long interval;
7540 struct sched_domain *sd;
7541 /* Earliest time when we have to do rebalance again */
7542 unsigned long next_balance = jiffies + 60*HZ;
7543 int update_next_balance = 0;
7544 int need_serialize, need_decay = 0;
7545 u64 max_cost = 0;
7546
7547 update_blocked_averages(cpu);
7548
7549 rcu_read_lock();
7550 for_each_domain(cpu, sd) {
7551 /*
7552 * Decay the newidle max times here because this is a regular
7553 * visit to all the domains. Decay ~1% per second.
7554 */
7555 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7556 sd->max_newidle_lb_cost =
7557 (sd->max_newidle_lb_cost * 253) / 256;
7558 sd->next_decay_max_lb_cost = jiffies + HZ;
7559 need_decay = 1;
7560 }
7561 max_cost += sd->max_newidle_lb_cost;
7562
7563 if (!(sd->flags & SD_LOAD_BALANCE))
7564 continue;
7565
7566 /*
7567 * Stop the load balance at this level. There is another
7568 * CPU in our sched group which is doing load balancing more
7569 * actively.
7570 */
7571 if (!continue_balancing) {
7572 if (need_decay)
7573 continue;
7574 break;
7575 }
7576
7577 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7578
7579 need_serialize = sd->flags & SD_SERIALIZE;
7580 if (need_serialize) {
7581 if (!spin_trylock(&balancing))
7582 goto out;
7583 }
7584
7585 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7586 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7587 /*
7588 * The LBF_DST_PINNED logic could have changed
7589 * env->dst_cpu, so we can't know our idle
7590 * state even if we migrated tasks. Update it.
7591 */
7592 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7593 }
7594 sd->last_balance = jiffies;
7595 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
7596 }
7597 if (need_serialize)
7598 spin_unlock(&balancing);
7599 out:
7600 if (time_after(next_balance, sd->last_balance + interval)) {
7601 next_balance = sd->last_balance + interval;
7602 update_next_balance = 1;
7603 }
7604 }
7605 if (need_decay) {
7606 /*
7607 * Ensure the rq-wide value also decays but keep it at a
7608 * reasonable floor to avoid funnies with rq->avg_idle.
7609 */
7610 rq->max_idle_balance_cost =
7611 max((u64)sysctl_sched_migration_cost, max_cost);
7612 }
7613 rcu_read_unlock();
7614
7615 /*
7616 * next_balance will be updated only when there is a need.
7617 * When the cpu is attached to null domain for ex, it will not be
7618 * updated.
7619 */
7620 if (likely(update_next_balance))
7621 rq->next_balance = next_balance;
7622 }
7623
7624 #ifdef CONFIG_NO_HZ_COMMON
7625 /*
7626 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
7627 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7628 */
7629 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7630 {
7631 int this_cpu = this_rq->cpu;
7632 struct rq *rq;
7633 int balance_cpu;
7634
7635 if (idle != CPU_IDLE ||
7636 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7637 goto end;
7638
7639 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7640 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7641 continue;
7642
7643 /*
7644 * If this cpu gets work to do, stop the load balancing
7645 * work being done for other cpus. Next load
7646 * balancing owner will pick it up.
7647 */
7648 if (need_resched())
7649 break;
7650
7651 rq = cpu_rq(balance_cpu);
7652
7653 /*
7654 * If time for next balance is due,
7655 * do the balance.
7656 */
7657 if (time_after_eq(jiffies, rq->next_balance)) {
7658 raw_spin_lock_irq(&rq->lock);
7659 update_rq_clock(rq);
7660 update_idle_cpu_load(rq);
7661 raw_spin_unlock_irq(&rq->lock);
7662 rebalance_domains(rq, CPU_IDLE);
7663 }
7664
7665 if (time_after(this_rq->next_balance, rq->next_balance))
7666 this_rq->next_balance = rq->next_balance;
7667 }
7668 nohz.next_balance = this_rq->next_balance;
7669 end:
7670 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7671 }
7672
7673 /*
7674 * Current heuristic for kicking the idle load balancer in the presence
7675 * of an idle cpu in the system.
7676 * - This rq has more than one task.
7677 * - This rq has at least one CFS task and the capacity of the CPU is
7678 * significantly reduced because of RT tasks or IRQs.
7679 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
7680 * multiple busy cpu.
7681 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7682 * domain span are idle.
7683 */
7684 static inline bool nohz_kick_needed(struct rq *rq)
7685 {
7686 unsigned long now = jiffies;
7687 struct sched_domain *sd;
7688 struct sched_group_capacity *sgc;
7689 int nr_busy, cpu = rq->cpu;
7690 bool kick = false;
7691
7692 if (unlikely(rq->idle_balance))
7693 return false;
7694
7695 /*
7696 * We may be recently in ticked or tickless idle mode. At the first
7697 * busy tick after returning from idle, we will update the busy stats.
7698 */
7699 set_cpu_sd_state_busy();
7700 nohz_balance_exit_idle(cpu);
7701
7702 /*
7703 * None are in tickless mode and hence no need for NOHZ idle load
7704 * balancing.
7705 */
7706 if (likely(!atomic_read(&nohz.nr_cpus)))
7707 return false;
7708
7709 if (time_before(now, nohz.next_balance))
7710 return false;
7711
7712 if (rq->nr_running >= 2)
7713 return true;
7714
7715 rcu_read_lock();
7716 sd = rcu_dereference(per_cpu(sd_busy, cpu));
7717 if (sd) {
7718 sgc = sd->groups->sgc;
7719 nr_busy = atomic_read(&sgc->nr_busy_cpus);
7720
7721 if (nr_busy > 1) {
7722 kick = true;
7723 goto unlock;
7724 }
7725
7726 }
7727
7728 sd = rcu_dereference(rq->sd);
7729 if (sd) {
7730 if ((rq->cfs.h_nr_running >= 1) &&
7731 check_cpu_capacity(rq, sd)) {
7732 kick = true;
7733 goto unlock;
7734 }
7735 }
7736
7737 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7738 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7739 sched_domain_span(sd)) < cpu)) {
7740 kick = true;
7741 goto unlock;
7742 }
7743
7744 unlock:
7745 rcu_read_unlock();
7746 return kick;
7747 }
7748 #else
7749 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7750 #endif
7751
7752 /*
7753 * run_rebalance_domains is triggered when needed from the scheduler tick.
7754 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7755 */
7756 static void run_rebalance_domains(struct softirq_action *h)
7757 {
7758 struct rq *this_rq = this_rq();
7759 enum cpu_idle_type idle = this_rq->idle_balance ?
7760 CPU_IDLE : CPU_NOT_IDLE;
7761
7762 /*
7763 * If this cpu has a pending nohz_balance_kick, then do the
7764 * balancing on behalf of the other idle cpus whose ticks are
7765 * stopped. Do nohz_idle_balance *before* rebalance_domains to
7766 * give the idle cpus a chance to load balance. Else we may
7767 * load balance only within the local sched_domain hierarchy
7768 * and abort nohz_idle_balance altogether if we pull some load.
7769 */
7770 nohz_idle_balance(this_rq, idle);
7771 rebalance_domains(this_rq, idle);
7772 }
7773
7774 /*
7775 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
7776 */
7777 void trigger_load_balance(struct rq *rq)
7778 {
7779 /* Don't need to rebalance while attached to NULL domain */
7780 if (unlikely(on_null_domain(rq)))
7781 return;
7782
7783 if (time_after_eq(jiffies, rq->next_balance))
7784 raise_softirq(SCHED_SOFTIRQ);
7785 #ifdef CONFIG_NO_HZ_COMMON
7786 if (nohz_kick_needed(rq))
7787 nohz_balancer_kick();
7788 #endif
7789 }
7790
7791 static void rq_online_fair(struct rq *rq)
7792 {
7793 update_sysctl();
7794
7795 update_runtime_enabled(rq);
7796 }
7797
7798 static void rq_offline_fair(struct rq *rq)
7799 {
7800 update_sysctl();
7801
7802 /* Ensure any throttled groups are reachable by pick_next_task */
7803 unthrottle_offline_cfs_rqs(rq);
7804 }
7805
7806 #endif /* CONFIG_SMP */
7807
7808 /*
7809 * scheduler tick hitting a task of our scheduling class:
7810 */
7811 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7812 {
7813 struct cfs_rq *cfs_rq;
7814 struct sched_entity *se = &curr->se;
7815
7816 for_each_sched_entity(se) {
7817 cfs_rq = cfs_rq_of(se);
7818 entity_tick(cfs_rq, se, queued);
7819 }
7820
7821 if (numabalancing_enabled)
7822 task_tick_numa(rq, curr);
7823
7824 update_rq_runnable_avg(rq, 1);
7825 }
7826
7827 /*
7828 * called on fork with the child task as argument from the parent's context
7829 * - child not yet on the tasklist
7830 * - preemption disabled
7831 */
7832 static void task_fork_fair(struct task_struct *p)
7833 {
7834 struct cfs_rq *cfs_rq;
7835 struct sched_entity *se = &p->se, *curr;
7836 int this_cpu = smp_processor_id();
7837 struct rq *rq = this_rq();
7838 unsigned long flags;
7839
7840 raw_spin_lock_irqsave(&rq->lock, flags);
7841
7842 update_rq_clock(rq);
7843
7844 cfs_rq = task_cfs_rq(current);
7845 curr = cfs_rq->curr;
7846
7847 /*
7848 * Not only the cpu but also the task_group of the parent might have
7849 * been changed after parent->se.parent,cfs_rq were copied to
7850 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7851 * of child point to valid ones.
7852 */
7853 rcu_read_lock();
7854 __set_task_cpu(p, this_cpu);
7855 rcu_read_unlock();
7856
7857 update_curr(cfs_rq);
7858
7859 if (curr)
7860 se->vruntime = curr->vruntime;
7861 place_entity(cfs_rq, se, 1);
7862
7863 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
7864 /*
7865 * Upon rescheduling, sched_class::put_prev_task() will place
7866 * 'current' within the tree based on its new key value.
7867 */
7868 swap(curr->vruntime, se->vruntime);
7869 resched_curr(rq);
7870 }
7871
7872 se->vruntime -= cfs_rq->min_vruntime;
7873
7874 raw_spin_unlock_irqrestore(&rq->lock, flags);
7875 }
7876
7877 /*
7878 * Priority of the task has changed. Check to see if we preempt
7879 * the current task.
7880 */
7881 static void
7882 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7883 {
7884 if (!task_on_rq_queued(p))
7885 return;
7886
7887 /*
7888 * Reschedule if we are currently running on this runqueue and
7889 * our priority decreased, or if we are not currently running on
7890 * this runqueue and our priority is higher than the current's
7891 */
7892 if (rq->curr == p) {
7893 if (p->prio > oldprio)
7894 resched_curr(rq);
7895 } else
7896 check_preempt_curr(rq, p, 0);
7897 }
7898
7899 static void switched_from_fair(struct rq *rq, struct task_struct *p)
7900 {
7901 struct sched_entity *se = &p->se;
7902 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7903
7904 /*
7905 * Ensure the task's vruntime is normalized, so that when it's
7906 * switched back to the fair class the enqueue_entity(.flags=0) will
7907 * do the right thing.
7908 *
7909 * If it's queued, then the dequeue_entity(.flags=0) will already
7910 * have normalized the vruntime, if it's !queued, then only when
7911 * the task is sleeping will it still have non-normalized vruntime.
7912 */
7913 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
7914 /*
7915 * Fix up our vruntime so that the current sleep doesn't
7916 * cause 'unlimited' sleep bonus.
7917 */
7918 place_entity(cfs_rq, se, 0);
7919 se->vruntime -= cfs_rq->min_vruntime;
7920 }
7921
7922 #ifdef CONFIG_SMP
7923 /*
7924 * Remove our load from contribution when we leave sched_fair
7925 * and ensure we don't carry in an old decay_count if we
7926 * switch back.
7927 */
7928 if (se->avg.decay_count) {
7929 __synchronize_entity_decay(se);
7930 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7931 }
7932 #endif
7933 }
7934
7935 /*
7936 * We switched to the sched_fair class.
7937 */
7938 static void switched_to_fair(struct rq *rq, struct task_struct *p)
7939 {
7940 #ifdef CONFIG_FAIR_GROUP_SCHED
7941 struct sched_entity *se = &p->se;
7942 /*
7943 * Since the real-depth could have been changed (only FAIR
7944 * class maintain depth value), reset depth properly.
7945 */
7946 se->depth = se->parent ? se->parent->depth + 1 : 0;
7947 #endif
7948 if (!task_on_rq_queued(p))
7949 return;
7950
7951 /*
7952 * We were most likely switched from sched_rt, so
7953 * kick off the schedule if running, otherwise just see
7954 * if we can still preempt the current task.
7955 */
7956 if (rq->curr == p)
7957 resched_curr(rq);
7958 else
7959 check_preempt_curr(rq, p, 0);
7960 }
7961
7962 /* Account for a task changing its policy or group.
7963 *
7964 * This routine is mostly called to set cfs_rq->curr field when a task
7965 * migrates between groups/classes.
7966 */
7967 static void set_curr_task_fair(struct rq *rq)
7968 {
7969 struct sched_entity *se = &rq->curr->se;
7970
7971 for_each_sched_entity(se) {
7972 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7973
7974 set_next_entity(cfs_rq, se);
7975 /* ensure bandwidth has been allocated on our new cfs_rq */
7976 account_cfs_rq_runtime(cfs_rq, 0);
7977 }
7978 }
7979
7980 void init_cfs_rq(struct cfs_rq *cfs_rq)
7981 {
7982 cfs_rq->tasks_timeline = RB_ROOT;
7983 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7984 #ifndef CONFIG_64BIT
7985 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7986 #endif
7987 #ifdef CONFIG_SMP
7988 atomic64_set(&cfs_rq->decay_counter, 1);
7989 atomic_long_set(&cfs_rq->removed_load, 0);
7990 #endif
7991 }
7992
7993 #ifdef CONFIG_FAIR_GROUP_SCHED
7994 static void task_move_group_fair(struct task_struct *p, int queued)
7995 {
7996 struct sched_entity *se = &p->se;
7997 struct cfs_rq *cfs_rq;
7998
7999 /*
8000 * If the task was not on the rq at the time of this cgroup movement
8001 * it must have been asleep, sleeping tasks keep their ->vruntime
8002 * absolute on their old rq until wakeup (needed for the fair sleeper
8003 * bonus in place_entity()).
8004 *
8005 * If it was on the rq, we've just 'preempted' it, which does convert
8006 * ->vruntime to a relative base.
8007 *
8008 * Make sure both cases convert their relative position when migrating
8009 * to another cgroup's rq. This does somewhat interfere with the
8010 * fair sleeper stuff for the first placement, but who cares.
8011 */
8012 /*
8013 * When !queued, vruntime of the task has usually NOT been normalized.
8014 * But there are some cases where it has already been normalized:
8015 *
8016 * - Moving a forked child which is waiting for being woken up by
8017 * wake_up_new_task().
8018 * - Moving a task which has been woken up by try_to_wake_up() and
8019 * waiting for actually being woken up by sched_ttwu_pending().
8020 *
8021 * To prevent boost or penalty in the new cfs_rq caused by delta
8022 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
8023 */
8024 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
8025 queued = 1;
8026
8027 if (!queued)
8028 se->vruntime -= cfs_rq_of(se)->min_vruntime;
8029 set_task_rq(p, task_cpu(p));
8030 se->depth = se->parent ? se->parent->depth + 1 : 0;
8031 if (!queued) {
8032 cfs_rq = cfs_rq_of(se);
8033 se->vruntime += cfs_rq->min_vruntime;
8034 #ifdef CONFIG_SMP
8035 /*
8036 * migrate_task_rq_fair() will have removed our previous
8037 * contribution, but we must synchronize for ongoing future
8038 * decay.
8039 */
8040 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
8041 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
8042 #endif
8043 }
8044 }
8045
8046 void free_fair_sched_group(struct task_group *tg)
8047 {
8048 int i;
8049
8050 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8051
8052 for_each_possible_cpu(i) {
8053 if (tg->cfs_rq)
8054 kfree(tg->cfs_rq[i]);
8055 if (tg->se)
8056 kfree(tg->se[i]);
8057 }
8058
8059 kfree(tg->cfs_rq);
8060 kfree(tg->se);
8061 }
8062
8063 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8064 {
8065 struct cfs_rq *cfs_rq;
8066 struct sched_entity *se;
8067 int i;
8068
8069 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8070 if (!tg->cfs_rq)
8071 goto err;
8072 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8073 if (!tg->se)
8074 goto err;
8075
8076 tg->shares = NICE_0_LOAD;
8077
8078 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8079
8080 for_each_possible_cpu(i) {
8081 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8082 GFP_KERNEL, cpu_to_node(i));
8083 if (!cfs_rq)
8084 goto err;
8085
8086 se = kzalloc_node(sizeof(struct sched_entity),
8087 GFP_KERNEL, cpu_to_node(i));
8088 if (!se)
8089 goto err_free_rq;
8090
8091 init_cfs_rq(cfs_rq);
8092 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8093 }
8094
8095 return 1;
8096
8097 err_free_rq:
8098 kfree(cfs_rq);
8099 err:
8100 return 0;
8101 }
8102
8103 void unregister_fair_sched_group(struct task_group *tg, int cpu)
8104 {
8105 struct rq *rq = cpu_rq(cpu);
8106 unsigned long flags;
8107
8108 /*
8109 * Only empty task groups can be destroyed; so we can speculatively
8110 * check on_list without danger of it being re-added.
8111 */
8112 if (!tg->cfs_rq[cpu]->on_list)
8113 return;
8114
8115 raw_spin_lock_irqsave(&rq->lock, flags);
8116 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8117 raw_spin_unlock_irqrestore(&rq->lock, flags);
8118 }
8119
8120 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8121 struct sched_entity *se, int cpu,
8122 struct sched_entity *parent)
8123 {
8124 struct rq *rq = cpu_rq(cpu);
8125
8126 cfs_rq->tg = tg;
8127 cfs_rq->rq = rq;
8128 init_cfs_rq_runtime(cfs_rq);
8129
8130 tg->cfs_rq[cpu] = cfs_rq;
8131 tg->se[cpu] = se;
8132
8133 /* se could be NULL for root_task_group */
8134 if (!se)
8135 return;
8136
8137 if (!parent) {
8138 se->cfs_rq = &rq->cfs;
8139 se->depth = 0;
8140 } else {
8141 se->cfs_rq = parent->my_q;
8142 se->depth = parent->depth + 1;
8143 }
8144
8145 se->my_q = cfs_rq;
8146 /* guarantee group entities always have weight */
8147 update_load_set(&se->load, NICE_0_LOAD);
8148 se->parent = parent;
8149 }
8150
8151 static DEFINE_MUTEX(shares_mutex);
8152
8153 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8154 {
8155 int i;
8156 unsigned long flags;
8157
8158 /*
8159 * We can't change the weight of the root cgroup.
8160 */
8161 if (!tg->se[0])
8162 return -EINVAL;
8163
8164 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8165
8166 mutex_lock(&shares_mutex);
8167 if (tg->shares == shares)
8168 goto done;
8169
8170 tg->shares = shares;
8171 for_each_possible_cpu(i) {
8172 struct rq *rq = cpu_rq(i);
8173 struct sched_entity *se;
8174
8175 se = tg->se[i];
8176 /* Propagate contribution to hierarchy */
8177 raw_spin_lock_irqsave(&rq->lock, flags);
8178
8179 /* Possible calls to update_curr() need rq clock */
8180 update_rq_clock(rq);
8181 for_each_sched_entity(se)
8182 update_cfs_shares(group_cfs_rq(se));
8183 raw_spin_unlock_irqrestore(&rq->lock, flags);
8184 }
8185
8186 done:
8187 mutex_unlock(&shares_mutex);
8188 return 0;
8189 }
8190 #else /* CONFIG_FAIR_GROUP_SCHED */
8191
8192 void free_fair_sched_group(struct task_group *tg) { }
8193
8194 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8195 {
8196 return 1;
8197 }
8198
8199 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
8200
8201 #endif /* CONFIG_FAIR_GROUP_SCHED */
8202
8203
8204 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
8205 {
8206 struct sched_entity *se = &task->se;
8207 unsigned int rr_interval = 0;
8208
8209 /*
8210 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
8211 * idle runqueue:
8212 */
8213 if (rq->cfs.load.weight)
8214 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
8215
8216 return rr_interval;
8217 }
8218
8219 /*
8220 * All the scheduling class methods:
8221 */
8222 const struct sched_class fair_sched_class = {
8223 .next = &idle_sched_class,
8224 .enqueue_task = enqueue_task_fair,
8225 .dequeue_task = dequeue_task_fair,
8226 .yield_task = yield_task_fair,
8227 .yield_to_task = yield_to_task_fair,
8228
8229 .check_preempt_curr = check_preempt_wakeup,
8230
8231 .pick_next_task = pick_next_task_fair,
8232 .put_prev_task = put_prev_task_fair,
8233
8234 #ifdef CONFIG_SMP
8235 .select_task_rq = select_task_rq_fair,
8236 .migrate_task_rq = migrate_task_rq_fair,
8237
8238 .rq_online = rq_online_fair,
8239 .rq_offline = rq_offline_fair,
8240
8241 .task_waking = task_waking_fair,
8242 #endif
8243
8244 .set_curr_task = set_curr_task_fair,
8245 .task_tick = task_tick_fair,
8246 .task_fork = task_fork_fair,
8247
8248 .prio_changed = prio_changed_fair,
8249 .switched_from = switched_from_fair,
8250 .switched_to = switched_to_fair,
8251
8252 .get_rr_interval = get_rr_interval_fair,
8253
8254 .update_curr = update_curr_fair,
8255
8256 #ifdef CONFIG_FAIR_GROUP_SCHED
8257 .task_move_group = task_move_group_fair,
8258 #endif
8259 };
8260
8261 #ifdef CONFIG_SCHED_DEBUG
8262 void print_cfs_stats(struct seq_file *m, int cpu)
8263 {
8264 struct cfs_rq *cfs_rq;
8265
8266 rcu_read_lock();
8267 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
8268 print_cfs_rq(m, cpu, cfs_rq);
8269 rcu_read_unlock();
8270 }
8271 #endif
8272
8273 __init void init_sched_fair_class(void)
8274 {
8275 #ifdef CONFIG_SMP
8276 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8277
8278 #ifdef CONFIG_NO_HZ_COMMON
8279 nohz.next_balance = jiffies;
8280 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8281 cpu_notifier(sched_ilb_notifier, 0);
8282 #endif
8283 #endif /* SMP */
8284
8285 }