]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/sched/sched.h
sched/core: Simplify helpers for rq clock update skip requests
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Scheduler internal types and methods:
4 */
5 #include <linux/sched.h>
6
7 #include <linux/sched/autogroup.h>
8 #include <linux/sched/clock.h>
9 #include <linux/sched/coredump.h>
10 #include <linux/sched/cpufreq.h>
11 #include <linux/sched/cputime.h>
12 #include <linux/sched/deadline.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/hotplug.h>
15 #include <linux/sched/idle.h>
16 #include <linux/sched/init.h>
17 #include <linux/sched/isolation.h>
18 #include <linux/sched/jobctl.h>
19 #include <linux/sched/loadavg.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/nohz.h>
22 #include <linux/sched/numa_balancing.h>
23 #include <linux/sched/prio.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/stat.h>
27 #include <linux/sched/sysctl.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/sched/topology.h>
31 #include <linux/sched/user.h>
32 #include <linux/sched/wake_q.h>
33 #include <linux/sched/xacct.h>
34
35 #include <uapi/linux/sched/types.h>
36
37 #include <linux/binfmts.h>
38 #include <linux/blkdev.h>
39 #include <linux/compat.h>
40 #include <linux/context_tracking.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpuidle.h>
43 #include <linux/cpuset.h>
44 #include <linux/ctype.h>
45 #include <linux/debugfs.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/kprobes.h>
49 #include <linux/kthread.h>
50 #include <linux/membarrier.h>
51 #include <linux/migrate.h>
52 #include <linux/mmu_context.h>
53 #include <linux/nmi.h>
54 #include <linux/proc_fs.h>
55 #include <linux/prefetch.h>
56 #include <linux/profile.h>
57 #include <linux/rcupdate_wait.h>
58 #include <linux/security.h>
59 #include <linux/stackprotector.h>
60 #include <linux/stop_machine.h>
61 #include <linux/suspend.h>
62 #include <linux/swait.h>
63 #include <linux/syscalls.h>
64 #include <linux/task_work.h>
65 #include <linux/tsacct_kern.h>
66
67 #include <asm/tlb.h>
68
69 #ifdef CONFIG_PARAVIRT
70 # include <asm/paravirt.h>
71 #endif
72
73 #include "cpupri.h"
74 #include "cpudeadline.h"
75
76 #ifdef CONFIG_SCHED_DEBUG
77 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
78 #else
79 # define SCHED_WARN_ON(x) ({ (void)(x), 0; })
80 #endif
81
82 struct rq;
83 struct cpuidle_state;
84
85 /* task_struct::on_rq states: */
86 #define TASK_ON_RQ_QUEUED 1
87 #define TASK_ON_RQ_MIGRATING 2
88
89 extern __read_mostly int scheduler_running;
90
91 extern unsigned long calc_load_update;
92 extern atomic_long_t calc_load_tasks;
93
94 extern void calc_global_load_tick(struct rq *this_rq);
95 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
96
97 #ifdef CONFIG_SMP
98 extern void cpu_load_update_active(struct rq *this_rq);
99 #else
100 static inline void cpu_load_update_active(struct rq *this_rq) { }
101 #endif
102
103 /*
104 * Helpers for converting nanosecond timing to jiffy resolution
105 */
106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
107
108 /*
109 * Increase resolution of nice-level calculations for 64-bit architectures.
110 * The extra resolution improves shares distribution and load balancing of
111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
112 * hierarchies, especially on larger systems. This is not a user-visible change
113 * and does not change the user-interface for setting shares/weights.
114 *
115 * We increase resolution only if we have enough bits to allow this increased
116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
117 * are pretty high and the returns do not justify the increased costs.
118 *
119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
120 * increase coverage and consistency always enable it on 64-bit platforms.
121 */
122 #ifdef CONFIG_64BIT
123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
124 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
125 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
126 #else
127 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
128 # define scale_load(w) (w)
129 # define scale_load_down(w) (w)
130 #endif
131
132 /*
133 * Task weight (visible to users) and its load (invisible to users) have
134 * independent resolution, but they should be well calibrated. We use
135 * scale_load() and scale_load_down(w) to convert between them. The
136 * following must be true:
137 *
138 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
139 *
140 */
141 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
142
143 /*
144 * Single value that decides SCHED_DEADLINE internal math precision.
145 * 10 -> just above 1us
146 * 9 -> just above 0.5us
147 */
148 #define DL_SCALE 10
149
150 /*
151 * Single value that denotes runtime == period, ie unlimited time.
152 */
153 #define RUNTIME_INF ((u64)~0ULL)
154
155 static inline int idle_policy(int policy)
156 {
157 return policy == SCHED_IDLE;
158 }
159 static inline int fair_policy(int policy)
160 {
161 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
162 }
163
164 static inline int rt_policy(int policy)
165 {
166 return policy == SCHED_FIFO || policy == SCHED_RR;
167 }
168
169 static inline int dl_policy(int policy)
170 {
171 return policy == SCHED_DEADLINE;
172 }
173 static inline bool valid_policy(int policy)
174 {
175 return idle_policy(policy) || fair_policy(policy) ||
176 rt_policy(policy) || dl_policy(policy);
177 }
178
179 static inline int task_has_rt_policy(struct task_struct *p)
180 {
181 return rt_policy(p->policy);
182 }
183
184 static inline int task_has_dl_policy(struct task_struct *p)
185 {
186 return dl_policy(p->policy);
187 }
188
189 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
190
191 /*
192 * !! For sched_setattr_nocheck() (kernel) only !!
193 *
194 * This is actually gross. :(
195 *
196 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
197 * tasks, but still be able to sleep. We need this on platforms that cannot
198 * atomically change clock frequency. Remove once fast switching will be
199 * available on such platforms.
200 *
201 * SUGOV stands for SchedUtil GOVernor.
202 */
203 #define SCHED_FLAG_SUGOV 0x10000000
204
205 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
206 {
207 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
208 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
209 #else
210 return false;
211 #endif
212 }
213
214 /*
215 * Tells if entity @a should preempt entity @b.
216 */
217 static inline bool
218 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
219 {
220 return dl_entity_is_special(a) ||
221 dl_time_before(a->deadline, b->deadline);
222 }
223
224 /*
225 * This is the priority-queue data structure of the RT scheduling class:
226 */
227 struct rt_prio_array {
228 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
229 struct list_head queue[MAX_RT_PRIO];
230 };
231
232 struct rt_bandwidth {
233 /* nests inside the rq lock: */
234 raw_spinlock_t rt_runtime_lock;
235 ktime_t rt_period;
236 u64 rt_runtime;
237 struct hrtimer rt_period_timer;
238 unsigned int rt_period_active;
239 };
240
241 void __dl_clear_params(struct task_struct *p);
242
243 /*
244 * To keep the bandwidth of -deadline tasks and groups under control
245 * we need some place where:
246 * - store the maximum -deadline bandwidth of the system (the group);
247 * - cache the fraction of that bandwidth that is currently allocated.
248 *
249 * This is all done in the data structure below. It is similar to the
250 * one used for RT-throttling (rt_bandwidth), with the main difference
251 * that, since here we are only interested in admission control, we
252 * do not decrease any runtime while the group "executes", neither we
253 * need a timer to replenish it.
254 *
255 * With respect to SMP, the bandwidth is given on a per-CPU basis,
256 * meaning that:
257 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
258 * - dl_total_bw array contains, in the i-eth element, the currently
259 * allocated bandwidth on the i-eth CPU.
260 * Moreover, groups consume bandwidth on each CPU, while tasks only
261 * consume bandwidth on the CPU they're running on.
262 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
263 * that will be shown the next time the proc or cgroup controls will
264 * be red. It on its turn can be changed by writing on its own
265 * control.
266 */
267 struct dl_bandwidth {
268 raw_spinlock_t dl_runtime_lock;
269 u64 dl_runtime;
270 u64 dl_period;
271 };
272
273 static inline int dl_bandwidth_enabled(void)
274 {
275 return sysctl_sched_rt_runtime >= 0;
276 }
277
278 struct dl_bw {
279 raw_spinlock_t lock;
280 u64 bw;
281 u64 total_bw;
282 };
283
284 static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
285
286 static inline
287 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
288 {
289 dl_b->total_bw -= tsk_bw;
290 __dl_update(dl_b, (s32)tsk_bw / cpus);
291 }
292
293 static inline
294 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
295 {
296 dl_b->total_bw += tsk_bw;
297 __dl_update(dl_b, -((s32)tsk_bw / cpus));
298 }
299
300 static inline
301 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
302 {
303 return dl_b->bw != -1 &&
304 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
305 }
306
307 extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
308 extern void init_dl_bw(struct dl_bw *dl_b);
309 extern int sched_dl_global_validate(void);
310 extern void sched_dl_do_global(void);
311 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
312 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
313 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
314 extern bool __checkparam_dl(const struct sched_attr *attr);
315 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
316 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
317 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
318 extern bool dl_cpu_busy(unsigned int cpu);
319
320 #ifdef CONFIG_CGROUP_SCHED
321
322 #include <linux/cgroup.h>
323
324 struct cfs_rq;
325 struct rt_rq;
326
327 extern struct list_head task_groups;
328
329 struct cfs_bandwidth {
330 #ifdef CONFIG_CFS_BANDWIDTH
331 raw_spinlock_t lock;
332 ktime_t period;
333 u64 quota;
334 u64 runtime;
335 s64 hierarchical_quota;
336 u64 runtime_expires;
337
338 int idle;
339 int period_active;
340 struct hrtimer period_timer;
341 struct hrtimer slack_timer;
342 struct list_head throttled_cfs_rq;
343
344 /* Statistics: */
345 int nr_periods;
346 int nr_throttled;
347 u64 throttled_time;
348 #endif
349 };
350
351 /* Task group related information */
352 struct task_group {
353 struct cgroup_subsys_state css;
354
355 #ifdef CONFIG_FAIR_GROUP_SCHED
356 /* schedulable entities of this group on each CPU */
357 struct sched_entity **se;
358 /* runqueue "owned" by this group on each CPU */
359 struct cfs_rq **cfs_rq;
360 unsigned long shares;
361
362 #ifdef CONFIG_SMP
363 /*
364 * load_avg can be heavily contended at clock tick time, so put
365 * it in its own cacheline separated from the fields above which
366 * will also be accessed at each tick.
367 */
368 atomic_long_t load_avg ____cacheline_aligned;
369 #endif
370 #endif
371
372 #ifdef CONFIG_RT_GROUP_SCHED
373 struct sched_rt_entity **rt_se;
374 struct rt_rq **rt_rq;
375
376 struct rt_bandwidth rt_bandwidth;
377 #endif
378
379 struct rcu_head rcu;
380 struct list_head list;
381
382 struct task_group *parent;
383 struct list_head siblings;
384 struct list_head children;
385
386 #ifdef CONFIG_SCHED_AUTOGROUP
387 struct autogroup *autogroup;
388 #endif
389
390 struct cfs_bandwidth cfs_bandwidth;
391 };
392
393 #ifdef CONFIG_FAIR_GROUP_SCHED
394 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
395
396 /*
397 * A weight of 0 or 1 can cause arithmetics problems.
398 * A weight of a cfs_rq is the sum of weights of which entities
399 * are queued on this cfs_rq, so a weight of a entity should not be
400 * too large, so as the shares value of a task group.
401 * (The default weight is 1024 - so there's no practical
402 * limitation from this.)
403 */
404 #define MIN_SHARES (1UL << 1)
405 #define MAX_SHARES (1UL << 18)
406 #endif
407
408 typedef int (*tg_visitor)(struct task_group *, void *);
409
410 extern int walk_tg_tree_from(struct task_group *from,
411 tg_visitor down, tg_visitor up, void *data);
412
413 /*
414 * Iterate the full tree, calling @down when first entering a node and @up when
415 * leaving it for the final time.
416 *
417 * Caller must hold rcu_lock or sufficient equivalent.
418 */
419 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
420 {
421 return walk_tg_tree_from(&root_task_group, down, up, data);
422 }
423
424 extern int tg_nop(struct task_group *tg, void *data);
425
426 extern void free_fair_sched_group(struct task_group *tg);
427 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
428 extern void online_fair_sched_group(struct task_group *tg);
429 extern void unregister_fair_sched_group(struct task_group *tg);
430 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
431 struct sched_entity *se, int cpu,
432 struct sched_entity *parent);
433 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
434
435 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
436 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
437 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
438
439 extern void free_rt_sched_group(struct task_group *tg);
440 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
441 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
442 struct sched_rt_entity *rt_se, int cpu,
443 struct sched_rt_entity *parent);
444 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
445 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
446 extern long sched_group_rt_runtime(struct task_group *tg);
447 extern long sched_group_rt_period(struct task_group *tg);
448 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
449
450 extern struct task_group *sched_create_group(struct task_group *parent);
451 extern void sched_online_group(struct task_group *tg,
452 struct task_group *parent);
453 extern void sched_destroy_group(struct task_group *tg);
454 extern void sched_offline_group(struct task_group *tg);
455
456 extern void sched_move_task(struct task_struct *tsk);
457
458 #ifdef CONFIG_FAIR_GROUP_SCHED
459 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
460
461 #ifdef CONFIG_SMP
462 extern void set_task_rq_fair(struct sched_entity *se,
463 struct cfs_rq *prev, struct cfs_rq *next);
464 #else /* !CONFIG_SMP */
465 static inline void set_task_rq_fair(struct sched_entity *se,
466 struct cfs_rq *prev, struct cfs_rq *next) { }
467 #endif /* CONFIG_SMP */
468 #endif /* CONFIG_FAIR_GROUP_SCHED */
469
470 #else /* CONFIG_CGROUP_SCHED */
471
472 struct cfs_bandwidth { };
473
474 #endif /* CONFIG_CGROUP_SCHED */
475
476 /* CFS-related fields in a runqueue */
477 struct cfs_rq {
478 struct load_weight load;
479 unsigned long runnable_weight;
480 unsigned int nr_running;
481 unsigned int h_nr_running;
482
483 u64 exec_clock;
484 u64 min_vruntime;
485 #ifndef CONFIG_64BIT
486 u64 min_vruntime_copy;
487 #endif
488
489 struct rb_root_cached tasks_timeline;
490
491 /*
492 * 'curr' points to currently running entity on this cfs_rq.
493 * It is set to NULL otherwise (i.e when none are currently running).
494 */
495 struct sched_entity *curr;
496 struct sched_entity *next;
497 struct sched_entity *last;
498 struct sched_entity *skip;
499
500 #ifdef CONFIG_SCHED_DEBUG
501 unsigned int nr_spread_over;
502 #endif
503
504 #ifdef CONFIG_SMP
505 /*
506 * CFS load tracking
507 */
508 struct sched_avg avg;
509 #ifndef CONFIG_64BIT
510 u64 load_last_update_time_copy;
511 #endif
512 struct {
513 raw_spinlock_t lock ____cacheline_aligned;
514 int nr;
515 unsigned long load_avg;
516 unsigned long util_avg;
517 unsigned long runnable_sum;
518 } removed;
519
520 #ifdef CONFIG_FAIR_GROUP_SCHED
521 unsigned long tg_load_avg_contrib;
522 long propagate;
523 long prop_runnable_sum;
524
525 /*
526 * h_load = weight * f(tg)
527 *
528 * Where f(tg) is the recursive weight fraction assigned to
529 * this group.
530 */
531 unsigned long h_load;
532 u64 last_h_load_update;
533 struct sched_entity *h_load_next;
534 #endif /* CONFIG_FAIR_GROUP_SCHED */
535 #endif /* CONFIG_SMP */
536
537 #ifdef CONFIG_FAIR_GROUP_SCHED
538 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
539
540 /*
541 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
542 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
543 * (like users, containers etc.)
544 *
545 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
546 * This list is used during load balance.
547 */
548 int on_list;
549 struct list_head leaf_cfs_rq_list;
550 struct task_group *tg; /* group that "owns" this runqueue */
551
552 #ifdef CONFIG_CFS_BANDWIDTH
553 int runtime_enabled;
554 u64 runtime_expires;
555 s64 runtime_remaining;
556
557 u64 throttled_clock;
558 u64 throttled_clock_task;
559 u64 throttled_clock_task_time;
560 int throttled;
561 int throttle_count;
562 struct list_head throttled_list;
563 #endif /* CONFIG_CFS_BANDWIDTH */
564 #endif /* CONFIG_FAIR_GROUP_SCHED */
565 };
566
567 static inline int rt_bandwidth_enabled(void)
568 {
569 return sysctl_sched_rt_runtime >= 0;
570 }
571
572 /* RT IPI pull logic requires IRQ_WORK */
573 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
574 # define HAVE_RT_PUSH_IPI
575 #endif
576
577 /* Real-Time classes' related field in a runqueue: */
578 struct rt_rq {
579 struct rt_prio_array active;
580 unsigned int rt_nr_running;
581 unsigned int rr_nr_running;
582 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
583 struct {
584 int curr; /* highest queued rt task prio */
585 #ifdef CONFIG_SMP
586 int next; /* next highest */
587 #endif
588 } highest_prio;
589 #endif
590 #ifdef CONFIG_SMP
591 unsigned long rt_nr_migratory;
592 unsigned long rt_nr_total;
593 int overloaded;
594 struct plist_head pushable_tasks;
595 #endif /* CONFIG_SMP */
596 int rt_queued;
597
598 int rt_throttled;
599 u64 rt_time;
600 u64 rt_runtime;
601 /* Nests inside the rq lock: */
602 raw_spinlock_t rt_runtime_lock;
603
604 #ifdef CONFIG_RT_GROUP_SCHED
605 unsigned long rt_nr_boosted;
606
607 struct rq *rq;
608 struct task_group *tg;
609 #endif
610 };
611
612 /* Deadline class' related fields in a runqueue */
613 struct dl_rq {
614 /* runqueue is an rbtree, ordered by deadline */
615 struct rb_root_cached root;
616
617 unsigned long dl_nr_running;
618
619 #ifdef CONFIG_SMP
620 /*
621 * Deadline values of the currently executing and the
622 * earliest ready task on this rq. Caching these facilitates
623 * the decision wether or not a ready but not running task
624 * should migrate somewhere else.
625 */
626 struct {
627 u64 curr;
628 u64 next;
629 } earliest_dl;
630
631 unsigned long dl_nr_migratory;
632 int overloaded;
633
634 /*
635 * Tasks on this rq that can be pushed away. They are kept in
636 * an rb-tree, ordered by tasks' deadlines, with caching
637 * of the leftmost (earliest deadline) element.
638 */
639 struct rb_root_cached pushable_dl_tasks_root;
640 #else
641 struct dl_bw dl_bw;
642 #endif
643 /*
644 * "Active utilization" for this runqueue: increased when a
645 * task wakes up (becomes TASK_RUNNING) and decreased when a
646 * task blocks
647 */
648 u64 running_bw;
649
650 /*
651 * Utilization of the tasks "assigned" to this runqueue (including
652 * the tasks that are in runqueue and the tasks that executed on this
653 * CPU and blocked). Increased when a task moves to this runqueue, and
654 * decreased when the task moves away (migrates, changes scheduling
655 * policy, or terminates).
656 * This is needed to compute the "inactive utilization" for the
657 * runqueue (inactive utilization = this_bw - running_bw).
658 */
659 u64 this_bw;
660 u64 extra_bw;
661
662 /*
663 * Inverse of the fraction of CPU utilization that can be reclaimed
664 * by the GRUB algorithm.
665 */
666 u64 bw_ratio;
667 };
668
669 #ifdef CONFIG_SMP
670
671 static inline bool sched_asym_prefer(int a, int b)
672 {
673 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
674 }
675
676 /*
677 * We add the notion of a root-domain which will be used to define per-domain
678 * variables. Each exclusive cpuset essentially defines an island domain by
679 * fully partitioning the member CPUs from any other cpuset. Whenever a new
680 * exclusive cpuset is created, we also create and attach a new root-domain
681 * object.
682 *
683 */
684 struct root_domain {
685 atomic_t refcount;
686 atomic_t rto_count;
687 struct rcu_head rcu;
688 cpumask_var_t span;
689 cpumask_var_t online;
690
691 /* Indicate more than one runnable task for any CPU */
692 bool overload;
693
694 /*
695 * The bit corresponding to a CPU gets set here if such CPU has more
696 * than one runnable -deadline task (as it is below for RT tasks).
697 */
698 cpumask_var_t dlo_mask;
699 atomic_t dlo_count;
700 struct dl_bw dl_bw;
701 struct cpudl cpudl;
702
703 #ifdef HAVE_RT_PUSH_IPI
704 /*
705 * For IPI pull requests, loop across the rto_mask.
706 */
707 struct irq_work rto_push_work;
708 raw_spinlock_t rto_lock;
709 /* These are only updated and read within rto_lock */
710 int rto_loop;
711 int rto_cpu;
712 /* These atomics are updated outside of a lock */
713 atomic_t rto_loop_next;
714 atomic_t rto_loop_start;
715 #endif
716 /*
717 * The "RT overload" flag: it gets set if a CPU has more than
718 * one runnable RT task.
719 */
720 cpumask_var_t rto_mask;
721 struct cpupri cpupri;
722
723 unsigned long max_cpu_capacity;
724 };
725
726 extern struct root_domain def_root_domain;
727 extern struct mutex sched_domains_mutex;
728
729 extern void init_defrootdomain(void);
730 extern int sched_init_domains(const struct cpumask *cpu_map);
731 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
732 extern void sched_get_rd(struct root_domain *rd);
733 extern void sched_put_rd(struct root_domain *rd);
734
735 #ifdef HAVE_RT_PUSH_IPI
736 extern void rto_push_irq_work_func(struct irq_work *work);
737 #endif
738 #endif /* CONFIG_SMP */
739
740 /*
741 * This is the main, per-CPU runqueue data structure.
742 *
743 * Locking rule: those places that want to lock multiple runqueues
744 * (such as the load balancing or the thread migration code), lock
745 * acquire operations must be ordered by ascending &runqueue.
746 */
747 struct rq {
748 /* runqueue lock: */
749 raw_spinlock_t lock;
750
751 /*
752 * nr_running and cpu_load should be in the same cacheline because
753 * remote CPUs use both these fields when doing load calculation.
754 */
755 unsigned int nr_running;
756 #ifdef CONFIG_NUMA_BALANCING
757 unsigned int nr_numa_running;
758 unsigned int nr_preferred_running;
759 #endif
760 #define CPU_LOAD_IDX_MAX 5
761 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
762 #ifdef CONFIG_NO_HZ_COMMON
763 #ifdef CONFIG_SMP
764 unsigned long last_load_update_tick;
765 unsigned long last_blocked_load_update_tick;
766 unsigned int has_blocked_load;
767 #endif /* CONFIG_SMP */
768 unsigned int nohz_tick_stopped;
769 atomic_t nohz_flags;
770 #endif /* CONFIG_NO_HZ_COMMON */
771
772 /* capture load from *all* tasks on this CPU: */
773 struct load_weight load;
774 unsigned long nr_load_updates;
775 u64 nr_switches;
776
777 struct cfs_rq cfs;
778 struct rt_rq rt;
779 struct dl_rq dl;
780
781 #ifdef CONFIG_FAIR_GROUP_SCHED
782 /* list of leaf cfs_rq on this CPU: */
783 struct list_head leaf_cfs_rq_list;
784 struct list_head *tmp_alone_branch;
785 #endif /* CONFIG_FAIR_GROUP_SCHED */
786
787 /*
788 * This is part of a global counter where only the total sum
789 * over all CPUs matters. A task can increase this counter on
790 * one CPU and if it got migrated afterwards it may decrease
791 * it on another CPU. Always updated under the runqueue lock:
792 */
793 unsigned long nr_uninterruptible;
794
795 struct task_struct *curr;
796 struct task_struct *idle;
797 struct task_struct *stop;
798 unsigned long next_balance;
799 struct mm_struct *prev_mm;
800
801 unsigned int clock_update_flags;
802 u64 clock;
803 u64 clock_task;
804
805 atomic_t nr_iowait;
806
807 #ifdef CONFIG_SMP
808 struct root_domain *rd;
809 struct sched_domain *sd;
810
811 unsigned long cpu_capacity;
812 unsigned long cpu_capacity_orig;
813
814 struct callback_head *balance_callback;
815
816 unsigned char idle_balance;
817
818 /* For active balancing */
819 int active_balance;
820 int push_cpu;
821 struct cpu_stop_work active_balance_work;
822
823 /* CPU of this runqueue: */
824 int cpu;
825 int online;
826
827 struct list_head cfs_tasks;
828
829 u64 rt_avg;
830 u64 age_stamp;
831 u64 idle_stamp;
832 u64 avg_idle;
833
834 /* This is used to determine avg_idle's max value */
835 u64 max_idle_balance_cost;
836 #endif
837
838 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
839 u64 prev_irq_time;
840 #endif
841 #ifdef CONFIG_PARAVIRT
842 u64 prev_steal_time;
843 #endif
844 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
845 u64 prev_steal_time_rq;
846 #endif
847
848 /* calc_load related fields */
849 unsigned long calc_load_update;
850 long calc_load_active;
851
852 #ifdef CONFIG_SCHED_HRTICK
853 #ifdef CONFIG_SMP
854 int hrtick_csd_pending;
855 call_single_data_t hrtick_csd;
856 #endif
857 struct hrtimer hrtick_timer;
858 #endif
859
860 #ifdef CONFIG_SCHEDSTATS
861 /* latency stats */
862 struct sched_info rq_sched_info;
863 unsigned long long rq_cpu_time;
864 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
865
866 /* sys_sched_yield() stats */
867 unsigned int yld_count;
868
869 /* schedule() stats */
870 unsigned int sched_count;
871 unsigned int sched_goidle;
872
873 /* try_to_wake_up() stats */
874 unsigned int ttwu_count;
875 unsigned int ttwu_local;
876 #endif
877
878 #ifdef CONFIG_SMP
879 struct llist_head wake_list;
880 #endif
881
882 #ifdef CONFIG_CPU_IDLE
883 /* Must be inspected within a rcu lock section */
884 struct cpuidle_state *idle_state;
885 #endif
886 };
887
888 static inline int cpu_of(struct rq *rq)
889 {
890 #ifdef CONFIG_SMP
891 return rq->cpu;
892 #else
893 return 0;
894 #endif
895 }
896
897
898 #ifdef CONFIG_SCHED_SMT
899
900 extern struct static_key_false sched_smt_present;
901
902 extern void __update_idle_core(struct rq *rq);
903
904 static inline void update_idle_core(struct rq *rq)
905 {
906 if (static_branch_unlikely(&sched_smt_present))
907 __update_idle_core(rq);
908 }
909
910 #else
911 static inline void update_idle_core(struct rq *rq) { }
912 #endif
913
914 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
915
916 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
917 #define this_rq() this_cpu_ptr(&runqueues)
918 #define task_rq(p) cpu_rq(task_cpu(p))
919 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
920 #define raw_rq() raw_cpu_ptr(&runqueues)
921
922 static inline u64 __rq_clock_broken(struct rq *rq)
923 {
924 return READ_ONCE(rq->clock);
925 }
926
927 /*
928 * rq::clock_update_flags bits
929 *
930 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
931 * call to __schedule(). This is an optimisation to avoid
932 * neighbouring rq clock updates.
933 *
934 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
935 * in effect and calls to update_rq_clock() are being ignored.
936 *
937 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
938 * made to update_rq_clock() since the last time rq::lock was pinned.
939 *
940 * If inside of __schedule(), clock_update_flags will have been
941 * shifted left (a left shift is a cheap operation for the fast path
942 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
943 *
944 * if (rq-clock_update_flags >= RQCF_UPDATED)
945 *
946 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
947 * one position though, because the next rq_unpin_lock() will shift it
948 * back.
949 */
950 #define RQCF_REQ_SKIP 0x01
951 #define RQCF_ACT_SKIP 0x02
952 #define RQCF_UPDATED 0x04
953
954 static inline void assert_clock_updated(struct rq *rq)
955 {
956 /*
957 * The only reason for not seeing a clock update since the
958 * last rq_pin_lock() is if we're currently skipping updates.
959 */
960 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
961 }
962
963 static inline u64 rq_clock(struct rq *rq)
964 {
965 lockdep_assert_held(&rq->lock);
966 assert_clock_updated(rq);
967
968 return rq->clock;
969 }
970
971 static inline u64 rq_clock_task(struct rq *rq)
972 {
973 lockdep_assert_held(&rq->lock);
974 assert_clock_updated(rq);
975
976 return rq->clock_task;
977 }
978
979 static inline void rq_clock_skip_update(struct rq *rq)
980 {
981 lockdep_assert_held(&rq->lock);
982 rq->clock_update_flags |= RQCF_REQ_SKIP;
983 }
984
985 /*
986 * See rt task throttoling, which is the only time a skip
987 * request is cancelled.
988 */
989 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990 {
991 lockdep_assert_held(&rq->lock);
992 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
993 }
994
995 struct rq_flags {
996 unsigned long flags;
997 struct pin_cookie cookie;
998 #ifdef CONFIG_SCHED_DEBUG
999 /*
1000 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1001 * current pin context is stashed here in case it needs to be
1002 * restored in rq_repin_lock().
1003 */
1004 unsigned int clock_update_flags;
1005 #endif
1006 };
1007
1008 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1009 {
1010 rf->cookie = lockdep_pin_lock(&rq->lock);
1011
1012 #ifdef CONFIG_SCHED_DEBUG
1013 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1014 rf->clock_update_flags = 0;
1015 #endif
1016 }
1017
1018 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1019 {
1020 #ifdef CONFIG_SCHED_DEBUG
1021 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1022 rf->clock_update_flags = RQCF_UPDATED;
1023 #endif
1024
1025 lockdep_unpin_lock(&rq->lock, rf->cookie);
1026 }
1027
1028 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1029 {
1030 lockdep_repin_lock(&rq->lock, rf->cookie);
1031
1032 #ifdef CONFIG_SCHED_DEBUG
1033 /*
1034 * Restore the value we stashed in @rf for this pin context.
1035 */
1036 rq->clock_update_flags |= rf->clock_update_flags;
1037 #endif
1038 }
1039
1040 #ifdef CONFIG_NUMA
1041 enum numa_topology_type {
1042 NUMA_DIRECT,
1043 NUMA_GLUELESS_MESH,
1044 NUMA_BACKPLANE,
1045 };
1046 extern enum numa_topology_type sched_numa_topology_type;
1047 extern int sched_max_numa_distance;
1048 extern bool find_numa_distance(int distance);
1049 #endif
1050
1051 #ifdef CONFIG_NUMA
1052 extern void sched_init_numa(void);
1053 extern void sched_domains_numa_masks_set(unsigned int cpu);
1054 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1055 #else
1056 static inline void sched_init_numa(void) { }
1057 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1058 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1059 #endif
1060
1061 #ifdef CONFIG_NUMA_BALANCING
1062 /* The regions in numa_faults array from task_struct */
1063 enum numa_faults_stats {
1064 NUMA_MEM = 0,
1065 NUMA_CPU,
1066 NUMA_MEMBUF,
1067 NUMA_CPUBUF
1068 };
1069 extern void sched_setnuma(struct task_struct *p, int node);
1070 extern int migrate_task_to(struct task_struct *p, int cpu);
1071 extern int migrate_swap(struct task_struct *, struct task_struct *);
1072 #endif /* CONFIG_NUMA_BALANCING */
1073
1074 #ifdef CONFIG_SMP
1075
1076 static inline void
1077 queue_balance_callback(struct rq *rq,
1078 struct callback_head *head,
1079 void (*func)(struct rq *rq))
1080 {
1081 lockdep_assert_held(&rq->lock);
1082
1083 if (unlikely(head->next))
1084 return;
1085
1086 head->func = (void (*)(struct callback_head *))func;
1087 head->next = rq->balance_callback;
1088 rq->balance_callback = head;
1089 }
1090
1091 extern void sched_ttwu_pending(void);
1092
1093 #define rcu_dereference_check_sched_domain(p) \
1094 rcu_dereference_check((p), \
1095 lockdep_is_held(&sched_domains_mutex))
1096
1097 /*
1098 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1099 * See detach_destroy_domains: synchronize_sched for details.
1100 *
1101 * The domain tree of any CPU may only be accessed from within
1102 * preempt-disabled sections.
1103 */
1104 #define for_each_domain(cpu, __sd) \
1105 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1106 __sd; __sd = __sd->parent)
1107
1108 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1109
1110 /**
1111 * highest_flag_domain - Return highest sched_domain containing flag.
1112 * @cpu: The CPU whose highest level of sched domain is to
1113 * be returned.
1114 * @flag: The flag to check for the highest sched_domain
1115 * for the given CPU.
1116 *
1117 * Returns the highest sched_domain of a CPU which contains the given flag.
1118 */
1119 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1120 {
1121 struct sched_domain *sd, *hsd = NULL;
1122
1123 for_each_domain(cpu, sd) {
1124 if (!(sd->flags & flag))
1125 break;
1126 hsd = sd;
1127 }
1128
1129 return hsd;
1130 }
1131
1132 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1133 {
1134 struct sched_domain *sd;
1135
1136 for_each_domain(cpu, sd) {
1137 if (sd->flags & flag)
1138 break;
1139 }
1140
1141 return sd;
1142 }
1143
1144 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1145 DECLARE_PER_CPU(int, sd_llc_size);
1146 DECLARE_PER_CPU(int, sd_llc_id);
1147 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1148 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1149 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
1150
1151 struct sched_group_capacity {
1152 atomic_t ref;
1153 /*
1154 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1155 * for a single CPU.
1156 */
1157 unsigned long capacity;
1158 unsigned long min_capacity; /* Min per-CPU capacity in group */
1159 unsigned long next_update;
1160 int imbalance; /* XXX unrelated to capacity but shared group state */
1161
1162 #ifdef CONFIG_SCHED_DEBUG
1163 int id;
1164 #endif
1165
1166 unsigned long cpumask[0]; /* Balance mask */
1167 };
1168
1169 struct sched_group {
1170 struct sched_group *next; /* Must be a circular list */
1171 atomic_t ref;
1172
1173 unsigned int group_weight;
1174 struct sched_group_capacity *sgc;
1175 int asym_prefer_cpu; /* CPU of highest priority in group */
1176
1177 /*
1178 * The CPUs this group covers.
1179 *
1180 * NOTE: this field is variable length. (Allocated dynamically
1181 * by attaching extra space to the end of the structure,
1182 * depending on how many CPUs the kernel has booted up with)
1183 */
1184 unsigned long cpumask[0];
1185 };
1186
1187 static inline struct cpumask *sched_group_span(struct sched_group *sg)
1188 {
1189 return to_cpumask(sg->cpumask);
1190 }
1191
1192 /*
1193 * See build_balance_mask().
1194 */
1195 static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1196 {
1197 return to_cpumask(sg->sgc->cpumask);
1198 }
1199
1200 /**
1201 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1202 * @group: The group whose first CPU is to be returned.
1203 */
1204 static inline unsigned int group_first_cpu(struct sched_group *group)
1205 {
1206 return cpumask_first(sched_group_span(group));
1207 }
1208
1209 extern int group_balance_cpu(struct sched_group *sg);
1210
1211 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1212 void register_sched_domain_sysctl(void);
1213 void dirty_sched_domain_sysctl(int cpu);
1214 void unregister_sched_domain_sysctl(void);
1215 #else
1216 static inline void register_sched_domain_sysctl(void)
1217 {
1218 }
1219 static inline void dirty_sched_domain_sysctl(int cpu)
1220 {
1221 }
1222 static inline void unregister_sched_domain_sysctl(void)
1223 {
1224 }
1225 #endif
1226
1227 #else
1228
1229 static inline void sched_ttwu_pending(void) { }
1230
1231 #endif /* CONFIG_SMP */
1232
1233 #include "stats.h"
1234 #include "autogroup.h"
1235
1236 #ifdef CONFIG_CGROUP_SCHED
1237
1238 /*
1239 * Return the group to which this tasks belongs.
1240 *
1241 * We cannot use task_css() and friends because the cgroup subsystem
1242 * changes that value before the cgroup_subsys::attach() method is called,
1243 * therefore we cannot pin it and might observe the wrong value.
1244 *
1245 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1246 * core changes this before calling sched_move_task().
1247 *
1248 * Instead we use a 'copy' which is updated from sched_move_task() while
1249 * holding both task_struct::pi_lock and rq::lock.
1250 */
1251 static inline struct task_group *task_group(struct task_struct *p)
1252 {
1253 return p->sched_task_group;
1254 }
1255
1256 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1257 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1258 {
1259 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1260 struct task_group *tg = task_group(p);
1261 #endif
1262
1263 #ifdef CONFIG_FAIR_GROUP_SCHED
1264 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1265 p->se.cfs_rq = tg->cfs_rq[cpu];
1266 p->se.parent = tg->se[cpu];
1267 #endif
1268
1269 #ifdef CONFIG_RT_GROUP_SCHED
1270 p->rt.rt_rq = tg->rt_rq[cpu];
1271 p->rt.parent = tg->rt_se[cpu];
1272 #endif
1273 }
1274
1275 #else /* CONFIG_CGROUP_SCHED */
1276
1277 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1278 static inline struct task_group *task_group(struct task_struct *p)
1279 {
1280 return NULL;
1281 }
1282
1283 #endif /* CONFIG_CGROUP_SCHED */
1284
1285 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1286 {
1287 set_task_rq(p, cpu);
1288 #ifdef CONFIG_SMP
1289 /*
1290 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1291 * successfuly executed on another CPU. We must ensure that updates of
1292 * per-task data have been completed by this moment.
1293 */
1294 smp_wmb();
1295 #ifdef CONFIG_THREAD_INFO_IN_TASK
1296 p->cpu = cpu;
1297 #else
1298 task_thread_info(p)->cpu = cpu;
1299 #endif
1300 p->wake_cpu = cpu;
1301 #endif
1302 }
1303
1304 /*
1305 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1306 */
1307 #ifdef CONFIG_SCHED_DEBUG
1308 # include <linux/static_key.h>
1309 # define const_debug __read_mostly
1310 #else
1311 # define const_debug const
1312 #endif
1313
1314 #define SCHED_FEAT(name, enabled) \
1315 __SCHED_FEAT_##name ,
1316
1317 enum {
1318 #include "features.h"
1319 __SCHED_FEAT_NR,
1320 };
1321
1322 #undef SCHED_FEAT
1323
1324 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1325
1326 /*
1327 * To support run-time toggling of sched features, all the translation units
1328 * (but core.c) reference the sysctl_sched_features defined in core.c.
1329 */
1330 extern const_debug unsigned int sysctl_sched_features;
1331
1332 #define SCHED_FEAT(name, enabled) \
1333 static __always_inline bool static_branch_##name(struct static_key *key) \
1334 { \
1335 return static_key_##enabled(key); \
1336 }
1337
1338 #include "features.h"
1339 #undef SCHED_FEAT
1340
1341 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1342 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1343
1344 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1345
1346 /*
1347 * Each translation unit has its own copy of sysctl_sched_features to allow
1348 * constants propagation at compile time and compiler optimization based on
1349 * features default.
1350 */
1351 #define SCHED_FEAT(name, enabled) \
1352 (1UL << __SCHED_FEAT_##name) * enabled |
1353 static const_debug __maybe_unused unsigned int sysctl_sched_features =
1354 #include "features.h"
1355 0;
1356 #undef SCHED_FEAT
1357
1358 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1359
1360 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1361
1362 extern struct static_key_false sched_numa_balancing;
1363 extern struct static_key_false sched_schedstats;
1364
1365 static inline u64 global_rt_period(void)
1366 {
1367 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1368 }
1369
1370 static inline u64 global_rt_runtime(void)
1371 {
1372 if (sysctl_sched_rt_runtime < 0)
1373 return RUNTIME_INF;
1374
1375 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1376 }
1377
1378 static inline int task_current(struct rq *rq, struct task_struct *p)
1379 {
1380 return rq->curr == p;
1381 }
1382
1383 static inline int task_running(struct rq *rq, struct task_struct *p)
1384 {
1385 #ifdef CONFIG_SMP
1386 return p->on_cpu;
1387 #else
1388 return task_current(rq, p);
1389 #endif
1390 }
1391
1392 static inline int task_on_rq_queued(struct task_struct *p)
1393 {
1394 return p->on_rq == TASK_ON_RQ_QUEUED;
1395 }
1396
1397 static inline int task_on_rq_migrating(struct task_struct *p)
1398 {
1399 return p->on_rq == TASK_ON_RQ_MIGRATING;
1400 }
1401
1402 /*
1403 * wake flags
1404 */
1405 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
1406 #define WF_FORK 0x02 /* Child wakeup after fork */
1407 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */
1408
1409 /*
1410 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1411 * of tasks with abnormal "nice" values across CPUs the contribution that
1412 * each task makes to its run queue's load is weighted according to its
1413 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1414 * scaled version of the new time slice allocation that they receive on time
1415 * slice expiry etc.
1416 */
1417
1418 #define WEIGHT_IDLEPRIO 3
1419 #define WMULT_IDLEPRIO 1431655765
1420
1421 extern const int sched_prio_to_weight[40];
1422 extern const u32 sched_prio_to_wmult[40];
1423
1424 /*
1425 * {de,en}queue flags:
1426 *
1427 * DEQUEUE_SLEEP - task is no longer runnable
1428 * ENQUEUE_WAKEUP - task just became runnable
1429 *
1430 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1431 * are in a known state which allows modification. Such pairs
1432 * should preserve as much state as possible.
1433 *
1434 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1435 * in the runqueue.
1436 *
1437 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1438 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1439 * ENQUEUE_MIGRATED - the task was migrated during wakeup
1440 *
1441 */
1442
1443 #define DEQUEUE_SLEEP 0x01
1444 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
1445 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
1446 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
1447
1448 #define ENQUEUE_WAKEUP 0x01
1449 #define ENQUEUE_RESTORE 0x02
1450 #define ENQUEUE_MOVE 0x04
1451 #define ENQUEUE_NOCLOCK 0x08
1452
1453 #define ENQUEUE_HEAD 0x10
1454 #define ENQUEUE_REPLENISH 0x20
1455 #ifdef CONFIG_SMP
1456 #define ENQUEUE_MIGRATED 0x40
1457 #else
1458 #define ENQUEUE_MIGRATED 0x00
1459 #endif
1460
1461 #define RETRY_TASK ((void *)-1UL)
1462
1463 struct sched_class {
1464 const struct sched_class *next;
1465
1466 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1467 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1468 void (*yield_task) (struct rq *rq);
1469 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1470
1471 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1472
1473 /*
1474 * It is the responsibility of the pick_next_task() method that will
1475 * return the next task to call put_prev_task() on the @prev task or
1476 * something equivalent.
1477 *
1478 * May return RETRY_TASK when it finds a higher prio class has runnable
1479 * tasks.
1480 */
1481 struct task_struct * (*pick_next_task)(struct rq *rq,
1482 struct task_struct *prev,
1483 struct rq_flags *rf);
1484 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1485
1486 #ifdef CONFIG_SMP
1487 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1488 void (*migrate_task_rq)(struct task_struct *p);
1489
1490 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1491
1492 void (*set_cpus_allowed)(struct task_struct *p,
1493 const struct cpumask *newmask);
1494
1495 void (*rq_online)(struct rq *rq);
1496 void (*rq_offline)(struct rq *rq);
1497 #endif
1498
1499 void (*set_curr_task)(struct rq *rq);
1500 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1501 void (*task_fork)(struct task_struct *p);
1502 void (*task_dead)(struct task_struct *p);
1503
1504 /*
1505 * The switched_from() call is allowed to drop rq->lock, therefore we
1506 * cannot assume the switched_from/switched_to pair is serliazed by
1507 * rq->lock. They are however serialized by p->pi_lock.
1508 */
1509 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1510 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1511 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1512 int oldprio);
1513
1514 unsigned int (*get_rr_interval)(struct rq *rq,
1515 struct task_struct *task);
1516
1517 void (*update_curr)(struct rq *rq);
1518
1519 #define TASK_SET_GROUP 0
1520 #define TASK_MOVE_GROUP 1
1521
1522 #ifdef CONFIG_FAIR_GROUP_SCHED
1523 void (*task_change_group)(struct task_struct *p, int type);
1524 #endif
1525 };
1526
1527 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1528 {
1529 prev->sched_class->put_prev_task(rq, prev);
1530 }
1531
1532 static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1533 {
1534 curr->sched_class->set_curr_task(rq);
1535 }
1536
1537 #ifdef CONFIG_SMP
1538 #define sched_class_highest (&stop_sched_class)
1539 #else
1540 #define sched_class_highest (&dl_sched_class)
1541 #endif
1542 #define for_each_class(class) \
1543 for (class = sched_class_highest; class; class = class->next)
1544
1545 extern const struct sched_class stop_sched_class;
1546 extern const struct sched_class dl_sched_class;
1547 extern const struct sched_class rt_sched_class;
1548 extern const struct sched_class fair_sched_class;
1549 extern const struct sched_class idle_sched_class;
1550
1551
1552 #ifdef CONFIG_SMP
1553
1554 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1555
1556 extern void trigger_load_balance(struct rq *rq);
1557
1558 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1559
1560 #endif
1561
1562 #ifdef CONFIG_CPU_IDLE
1563 static inline void idle_set_state(struct rq *rq,
1564 struct cpuidle_state *idle_state)
1565 {
1566 rq->idle_state = idle_state;
1567 }
1568
1569 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1570 {
1571 SCHED_WARN_ON(!rcu_read_lock_held());
1572
1573 return rq->idle_state;
1574 }
1575 #else
1576 static inline void idle_set_state(struct rq *rq,
1577 struct cpuidle_state *idle_state)
1578 {
1579 }
1580
1581 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1582 {
1583 return NULL;
1584 }
1585 #endif
1586
1587 extern void schedule_idle(void);
1588
1589 extern void sysrq_sched_debug_show(void);
1590 extern void sched_init_granularity(void);
1591 extern void update_max_interval(void);
1592
1593 extern void init_sched_dl_class(void);
1594 extern void init_sched_rt_class(void);
1595 extern void init_sched_fair_class(void);
1596
1597 extern void reweight_task(struct task_struct *p, int prio);
1598
1599 extern void resched_curr(struct rq *rq);
1600 extern void resched_cpu(int cpu);
1601
1602 extern struct rt_bandwidth def_rt_bandwidth;
1603 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1604
1605 extern struct dl_bandwidth def_dl_bandwidth;
1606 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1607 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1608 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1609 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1610
1611 #define BW_SHIFT 20
1612 #define BW_UNIT (1 << BW_SHIFT)
1613 #define RATIO_SHIFT 8
1614 unsigned long to_ratio(u64 period, u64 runtime);
1615
1616 extern void init_entity_runnable_average(struct sched_entity *se);
1617 extern void post_init_entity_util_avg(struct sched_entity *se);
1618
1619 #ifdef CONFIG_NO_HZ_FULL
1620 extern bool sched_can_stop_tick(struct rq *rq);
1621 extern int __init sched_tick_offload_init(void);
1622
1623 /*
1624 * Tick may be needed by tasks in the runqueue depending on their policy and
1625 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1626 * nohz mode if necessary.
1627 */
1628 static inline void sched_update_tick_dependency(struct rq *rq)
1629 {
1630 int cpu;
1631
1632 if (!tick_nohz_full_enabled())
1633 return;
1634
1635 cpu = cpu_of(rq);
1636
1637 if (!tick_nohz_full_cpu(cpu))
1638 return;
1639
1640 if (sched_can_stop_tick(rq))
1641 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1642 else
1643 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1644 }
1645 #else
1646 static inline int sched_tick_offload_init(void) { return 0; }
1647 static inline void sched_update_tick_dependency(struct rq *rq) { }
1648 #endif
1649
1650 static inline void add_nr_running(struct rq *rq, unsigned count)
1651 {
1652 unsigned prev_nr = rq->nr_running;
1653
1654 rq->nr_running = prev_nr + count;
1655
1656 if (prev_nr < 2 && rq->nr_running >= 2) {
1657 #ifdef CONFIG_SMP
1658 if (!rq->rd->overload)
1659 rq->rd->overload = true;
1660 #endif
1661 }
1662
1663 sched_update_tick_dependency(rq);
1664 }
1665
1666 static inline void sub_nr_running(struct rq *rq, unsigned count)
1667 {
1668 rq->nr_running -= count;
1669 /* Check if we still need preemption */
1670 sched_update_tick_dependency(rq);
1671 }
1672
1673 extern void update_rq_clock(struct rq *rq);
1674
1675 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1676 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1677
1678 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1679
1680 extern const_debug unsigned int sysctl_sched_time_avg;
1681 extern const_debug unsigned int sysctl_sched_nr_migrate;
1682 extern const_debug unsigned int sysctl_sched_migration_cost;
1683
1684 static inline u64 sched_avg_period(void)
1685 {
1686 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1687 }
1688
1689 #ifdef CONFIG_SCHED_HRTICK
1690
1691 /*
1692 * Use hrtick when:
1693 * - enabled by features
1694 * - hrtimer is actually high res
1695 */
1696 static inline int hrtick_enabled(struct rq *rq)
1697 {
1698 if (!sched_feat(HRTICK))
1699 return 0;
1700 if (!cpu_active(cpu_of(rq)))
1701 return 0;
1702 return hrtimer_is_hres_active(&rq->hrtick_timer);
1703 }
1704
1705 void hrtick_start(struct rq *rq, u64 delay);
1706
1707 #else
1708
1709 static inline int hrtick_enabled(struct rq *rq)
1710 {
1711 return 0;
1712 }
1713
1714 #endif /* CONFIG_SCHED_HRTICK */
1715
1716 #ifndef arch_scale_freq_capacity
1717 static __always_inline
1718 unsigned long arch_scale_freq_capacity(int cpu)
1719 {
1720 return SCHED_CAPACITY_SCALE;
1721 }
1722 #endif
1723
1724 #ifdef CONFIG_SMP
1725 extern void sched_avg_update(struct rq *rq);
1726
1727 #ifndef arch_scale_cpu_capacity
1728 static __always_inline
1729 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1730 {
1731 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1732 return sd->smt_gain / sd->span_weight;
1733
1734 return SCHED_CAPACITY_SCALE;
1735 }
1736 #endif
1737
1738 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1739 {
1740 rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
1741 sched_avg_update(rq);
1742 }
1743 #else
1744 #ifndef arch_scale_cpu_capacity
1745 static __always_inline
1746 unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
1747 {
1748 return SCHED_CAPACITY_SCALE;
1749 }
1750 #endif
1751 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1752 static inline void sched_avg_update(struct rq *rq) { }
1753 #endif
1754
1755 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1756 __acquires(rq->lock);
1757
1758 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1759 __acquires(p->pi_lock)
1760 __acquires(rq->lock);
1761
1762 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1763 __releases(rq->lock)
1764 {
1765 rq_unpin_lock(rq, rf);
1766 raw_spin_unlock(&rq->lock);
1767 }
1768
1769 static inline void
1770 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1771 __releases(rq->lock)
1772 __releases(p->pi_lock)
1773 {
1774 rq_unpin_lock(rq, rf);
1775 raw_spin_unlock(&rq->lock);
1776 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1777 }
1778
1779 static inline void
1780 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1781 __acquires(rq->lock)
1782 {
1783 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1784 rq_pin_lock(rq, rf);
1785 }
1786
1787 static inline void
1788 rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1789 __acquires(rq->lock)
1790 {
1791 raw_spin_lock_irq(&rq->lock);
1792 rq_pin_lock(rq, rf);
1793 }
1794
1795 static inline void
1796 rq_lock(struct rq *rq, struct rq_flags *rf)
1797 __acquires(rq->lock)
1798 {
1799 raw_spin_lock(&rq->lock);
1800 rq_pin_lock(rq, rf);
1801 }
1802
1803 static inline void
1804 rq_relock(struct rq *rq, struct rq_flags *rf)
1805 __acquires(rq->lock)
1806 {
1807 raw_spin_lock(&rq->lock);
1808 rq_repin_lock(rq, rf);
1809 }
1810
1811 static inline void
1812 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1813 __releases(rq->lock)
1814 {
1815 rq_unpin_lock(rq, rf);
1816 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1817 }
1818
1819 static inline void
1820 rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1821 __releases(rq->lock)
1822 {
1823 rq_unpin_lock(rq, rf);
1824 raw_spin_unlock_irq(&rq->lock);
1825 }
1826
1827 static inline void
1828 rq_unlock(struct rq *rq, struct rq_flags *rf)
1829 __releases(rq->lock)
1830 {
1831 rq_unpin_lock(rq, rf);
1832 raw_spin_unlock(&rq->lock);
1833 }
1834
1835 #ifdef CONFIG_SMP
1836 #ifdef CONFIG_PREEMPT
1837
1838 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1839
1840 /*
1841 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1842 * way at the expense of forcing extra atomic operations in all
1843 * invocations. This assures that the double_lock is acquired using the
1844 * same underlying policy as the spinlock_t on this architecture, which
1845 * reduces latency compared to the unfair variant below. However, it
1846 * also adds more overhead and therefore may reduce throughput.
1847 */
1848 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1849 __releases(this_rq->lock)
1850 __acquires(busiest->lock)
1851 __acquires(this_rq->lock)
1852 {
1853 raw_spin_unlock(&this_rq->lock);
1854 double_rq_lock(this_rq, busiest);
1855
1856 return 1;
1857 }
1858
1859 #else
1860 /*
1861 * Unfair double_lock_balance: Optimizes throughput at the expense of
1862 * latency by eliminating extra atomic operations when the locks are
1863 * already in proper order on entry. This favors lower CPU-ids and will
1864 * grant the double lock to lower CPUs over higher ids under contention,
1865 * regardless of entry order into the function.
1866 */
1867 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1868 __releases(this_rq->lock)
1869 __acquires(busiest->lock)
1870 __acquires(this_rq->lock)
1871 {
1872 int ret = 0;
1873
1874 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1875 if (busiest < this_rq) {
1876 raw_spin_unlock(&this_rq->lock);
1877 raw_spin_lock(&busiest->lock);
1878 raw_spin_lock_nested(&this_rq->lock,
1879 SINGLE_DEPTH_NESTING);
1880 ret = 1;
1881 } else
1882 raw_spin_lock_nested(&busiest->lock,
1883 SINGLE_DEPTH_NESTING);
1884 }
1885 return ret;
1886 }
1887
1888 #endif /* CONFIG_PREEMPT */
1889
1890 /*
1891 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1892 */
1893 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1894 {
1895 if (unlikely(!irqs_disabled())) {
1896 /* printk() doesn't work well under rq->lock */
1897 raw_spin_unlock(&this_rq->lock);
1898 BUG_ON(1);
1899 }
1900
1901 return _double_lock_balance(this_rq, busiest);
1902 }
1903
1904 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1905 __releases(busiest->lock)
1906 {
1907 raw_spin_unlock(&busiest->lock);
1908 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1909 }
1910
1911 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1912 {
1913 if (l1 > l2)
1914 swap(l1, l2);
1915
1916 spin_lock(l1);
1917 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1918 }
1919
1920 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1921 {
1922 if (l1 > l2)
1923 swap(l1, l2);
1924
1925 spin_lock_irq(l1);
1926 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1927 }
1928
1929 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1930 {
1931 if (l1 > l2)
1932 swap(l1, l2);
1933
1934 raw_spin_lock(l1);
1935 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1936 }
1937
1938 /*
1939 * double_rq_lock - safely lock two runqueues
1940 *
1941 * Note this does not disable interrupts like task_rq_lock,
1942 * you need to do so manually before calling.
1943 */
1944 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1945 __acquires(rq1->lock)
1946 __acquires(rq2->lock)
1947 {
1948 BUG_ON(!irqs_disabled());
1949 if (rq1 == rq2) {
1950 raw_spin_lock(&rq1->lock);
1951 __acquire(rq2->lock); /* Fake it out ;) */
1952 } else {
1953 if (rq1 < rq2) {
1954 raw_spin_lock(&rq1->lock);
1955 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1956 } else {
1957 raw_spin_lock(&rq2->lock);
1958 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1959 }
1960 }
1961 }
1962
1963 /*
1964 * double_rq_unlock - safely unlock two runqueues
1965 *
1966 * Note this does not restore interrupts like task_rq_unlock,
1967 * you need to do so manually after calling.
1968 */
1969 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1970 __releases(rq1->lock)
1971 __releases(rq2->lock)
1972 {
1973 raw_spin_unlock(&rq1->lock);
1974 if (rq1 != rq2)
1975 raw_spin_unlock(&rq2->lock);
1976 else
1977 __release(rq2->lock);
1978 }
1979
1980 extern void set_rq_online (struct rq *rq);
1981 extern void set_rq_offline(struct rq *rq);
1982 extern bool sched_smp_initialized;
1983
1984 #else /* CONFIG_SMP */
1985
1986 /*
1987 * double_rq_lock - safely lock two runqueues
1988 *
1989 * Note this does not disable interrupts like task_rq_lock,
1990 * you need to do so manually before calling.
1991 */
1992 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1993 __acquires(rq1->lock)
1994 __acquires(rq2->lock)
1995 {
1996 BUG_ON(!irqs_disabled());
1997 BUG_ON(rq1 != rq2);
1998 raw_spin_lock(&rq1->lock);
1999 __acquire(rq2->lock); /* Fake it out ;) */
2000 }
2001
2002 /*
2003 * double_rq_unlock - safely unlock two runqueues
2004 *
2005 * Note this does not restore interrupts like task_rq_unlock,
2006 * you need to do so manually after calling.
2007 */
2008 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2009 __releases(rq1->lock)
2010 __releases(rq2->lock)
2011 {
2012 BUG_ON(rq1 != rq2);
2013 raw_spin_unlock(&rq1->lock);
2014 __release(rq2->lock);
2015 }
2016
2017 #endif
2018
2019 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2020 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2021
2022 #ifdef CONFIG_SCHED_DEBUG
2023 extern bool sched_debug_enabled;
2024
2025 extern void print_cfs_stats(struct seq_file *m, int cpu);
2026 extern void print_rt_stats(struct seq_file *m, int cpu);
2027 extern void print_dl_stats(struct seq_file *m, int cpu);
2028 extern void
2029 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2030 #ifdef CONFIG_NUMA_BALANCING
2031 extern void
2032 show_numa_stats(struct task_struct *p, struct seq_file *m);
2033 extern void
2034 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2035 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2036 #endif /* CONFIG_NUMA_BALANCING */
2037 #endif /* CONFIG_SCHED_DEBUG */
2038
2039 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2040 extern void init_rt_rq(struct rt_rq *rt_rq);
2041 extern void init_dl_rq(struct dl_rq *dl_rq);
2042
2043 extern void cfs_bandwidth_usage_inc(void);
2044 extern void cfs_bandwidth_usage_dec(void);
2045
2046 #ifdef CONFIG_NO_HZ_COMMON
2047 #define NOHZ_BALANCE_KICK_BIT 0
2048 #define NOHZ_STATS_KICK_BIT 1
2049
2050 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2051 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2052
2053 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2054
2055 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2056
2057 extern void nohz_balance_exit_idle(struct rq *rq);
2058 #else
2059 static inline void nohz_balance_exit_idle(struct rq *rq) { }
2060 #endif
2061
2062
2063 #ifdef CONFIG_SMP
2064 static inline
2065 void __dl_update(struct dl_bw *dl_b, s64 bw)
2066 {
2067 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2068 int i;
2069
2070 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2071 "sched RCU must be held");
2072 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2073 struct rq *rq = cpu_rq(i);
2074
2075 rq->dl.extra_bw += bw;
2076 }
2077 }
2078 #else
2079 static inline
2080 void __dl_update(struct dl_bw *dl_b, s64 bw)
2081 {
2082 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2083
2084 dl->extra_bw += bw;
2085 }
2086 #endif
2087
2088
2089 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2090 struct irqtime {
2091 u64 total;
2092 u64 tick_delta;
2093 u64 irq_start_time;
2094 struct u64_stats_sync sync;
2095 };
2096
2097 DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2098
2099 /*
2100 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2101 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2102 * and never move forward.
2103 */
2104 static inline u64 irq_time_read(int cpu)
2105 {
2106 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2107 unsigned int seq;
2108 u64 total;
2109
2110 do {
2111 seq = __u64_stats_fetch_begin(&irqtime->sync);
2112 total = irqtime->total;
2113 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2114
2115 return total;
2116 }
2117 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2118
2119 #ifdef CONFIG_CPU_FREQ
2120 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2121
2122 /**
2123 * cpufreq_update_util - Take a note about CPU utilization changes.
2124 * @rq: Runqueue to carry out the update for.
2125 * @flags: Update reason flags.
2126 *
2127 * This function is called by the scheduler on the CPU whose utilization is
2128 * being updated.
2129 *
2130 * It can only be called from RCU-sched read-side critical sections.
2131 *
2132 * The way cpufreq is currently arranged requires it to evaluate the CPU
2133 * performance state (frequency/voltage) on a regular basis to prevent it from
2134 * being stuck in a completely inadequate performance level for too long.
2135 * That is not guaranteed to happen if the updates are only triggered from CFS
2136 * and DL, though, because they may not be coming in if only RT tasks are
2137 * active all the time (or there are RT tasks only).
2138 *
2139 * As a workaround for that issue, this function is called periodically by the
2140 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2141 * but that really is a band-aid. Going forward it should be replaced with
2142 * solutions targeted more specifically at RT tasks.
2143 */
2144 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2145 {
2146 struct update_util_data *data;
2147
2148 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2149 cpu_of(rq)));
2150 if (data)
2151 data->func(data, rq_clock(rq), flags);
2152 }
2153 #else
2154 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2155 #endif /* CONFIG_CPU_FREQ */
2156
2157 #ifdef arch_scale_freq_capacity
2158 # ifndef arch_scale_freq_invariant
2159 # define arch_scale_freq_invariant() true
2160 # endif
2161 #else
2162 # define arch_scale_freq_invariant() false
2163 #endif
2164
2165 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2166 static inline unsigned long cpu_util_dl(struct rq *rq)
2167 {
2168 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2169 }
2170
2171 static inline unsigned long cpu_util_cfs(struct rq *rq)
2172 {
2173 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2174
2175 if (sched_feat(UTIL_EST)) {
2176 util = max_t(unsigned long, util,
2177 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2178 }
2179
2180 return util;
2181 }
2182 #endif