]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - kernel/sched/sched.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-disco-kernel.git] / kernel / sched / sched.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
029632fb
PZ
2
3#include <linux/sched.h>
dfc3401a 4#include <linux/sched/autogroup.h>
cf4aebc2 5#include <linux/sched/sysctl.h>
105ab3d8 6#include <linux/sched/topology.h>
8bd75c77 7#include <linux/sched/rt.h>
ef8bd77f 8#include <linux/sched/deadline.h>
e6017571 9#include <linux/sched/clock.h>
84f001e1 10#include <linux/sched/wake_q.h>
3f07c014 11#include <linux/sched/signal.h>
6a3827d7 12#include <linux/sched/numa_balancing.h>
6e84f315 13#include <linux/sched/mm.h>
55687da1 14#include <linux/sched/cpufreq.h>
03441a34 15#include <linux/sched/stat.h>
370c9135 16#include <linux/sched/nohz.h>
b17b0153 17#include <linux/sched/debug.h>
ef8bd77f 18#include <linux/sched/hotplug.h>
29930025 19#include <linux/sched/task.h>
68db0cf1 20#include <linux/sched/task_stack.h>
32ef5517 21#include <linux/sched/cputime.h>
1777e463 22#include <linux/sched/init.h>
ef8bd77f 23
19d23dbf 24#include <linux/u64_stats_sync.h>
a499a5a1 25#include <linux/kernel_stat.h>
3866e845 26#include <linux/binfmts.h>
029632fb
PZ
27#include <linux/mutex.h>
28#include <linux/spinlock.h>
29#include <linux/stop_machine.h>
b6366f04 30#include <linux/irq_work.h>
9f3660c2 31#include <linux/tick.h>
f809ca9a 32#include <linux/slab.h>
029632fb 33
7fce777c
IM
34#ifdef CONFIG_PARAVIRT
35#include <asm/paravirt.h>
36#endif
37
391e43da 38#include "cpupri.h"
6bfd6d72 39#include "cpudeadline.h"
60fed789 40#include "cpuacct.h"
029632fb 41
9148a3a1 42#ifdef CONFIG_SCHED_DEBUG
6d3aed3d 43# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
9148a3a1 44#else
6d3aed3d 45# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
9148a3a1
PZ
46#endif
47
45ceebf7 48struct rq;
442bf3aa 49struct cpuidle_state;
45ceebf7 50
da0c1e65
KT
51/* task_struct::on_rq states: */
52#define TASK_ON_RQ_QUEUED 1
cca26e80 53#define TASK_ON_RQ_MIGRATING 2
da0c1e65 54
029632fb
PZ
55extern __read_mostly int scheduler_running;
56
45ceebf7
PG
57extern unsigned long calc_load_update;
58extern atomic_long_t calc_load_tasks;
59
3289bdb4 60extern void calc_global_load_tick(struct rq *this_rq);
d60585c5 61extern long calc_load_fold_active(struct rq *this_rq, long adjust);
3289bdb4
PZ
62
63#ifdef CONFIG_SMP
cee1afce 64extern void cpu_load_update_active(struct rq *this_rq);
3289bdb4 65#else
cee1afce 66static inline void cpu_load_update_active(struct rq *this_rq) { }
3289bdb4 67#endif
45ceebf7 68
029632fb
PZ
69/*
70 * Helpers for converting nanosecond timing to jiffy resolution
71 */
72#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
73
cc1f4b1f
LZ
74/*
75 * Increase resolution of nice-level calculations for 64-bit architectures.
76 * The extra resolution improves shares distribution and load balancing of
77 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
78 * hierarchies, especially on larger systems. This is not a user-visible change
79 * and does not change the user-interface for setting shares/weights.
80 *
81 * We increase resolution only if we have enough bits to allow this increased
2159197d
PZ
82 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
83 * pretty high and the returns do not justify the increased costs.
84 *
85 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
86 * increase coverage and consistency always enable it on 64bit platforms.
cc1f4b1f 87 */
2159197d 88#ifdef CONFIG_64BIT
172895e6 89# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
6ecdd749
YD
90# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
91# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f 92#else
172895e6 93# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f
LZ
94# define scale_load(w) (w)
95# define scale_load_down(w) (w)
96#endif
97
6ecdd749 98/*
172895e6
YD
99 * Task weight (visible to users) and its load (invisible to users) have
100 * independent resolution, but they should be well calibrated. We use
101 * scale_load() and scale_load_down(w) to convert between them. The
102 * following must be true:
103 *
104 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
105 *
6ecdd749 106 */
172895e6 107#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
029632fb 108
332ac17e
DF
109/*
110 * Single value that decides SCHED_DEADLINE internal math precision.
111 * 10 -> just above 1us
112 * 9 -> just above 0.5us
113 */
114#define DL_SCALE (10)
115
029632fb
PZ
116/*
117 * These are the 'tuning knobs' of the scheduler:
029632fb 118 */
029632fb
PZ
119
120/*
121 * single value that denotes runtime == period, ie unlimited time.
122 */
123#define RUNTIME_INF ((u64)~0ULL)
124
20f9cd2a
HA
125static inline int idle_policy(int policy)
126{
127 return policy == SCHED_IDLE;
128}
d50dde5a
DF
129static inline int fair_policy(int policy)
130{
131 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
132}
133
029632fb
PZ
134static inline int rt_policy(int policy)
135{
d50dde5a 136 return policy == SCHED_FIFO || policy == SCHED_RR;
029632fb
PZ
137}
138
aab03e05
DF
139static inline int dl_policy(int policy)
140{
141 return policy == SCHED_DEADLINE;
142}
20f9cd2a
HA
143static inline bool valid_policy(int policy)
144{
145 return idle_policy(policy) || fair_policy(policy) ||
146 rt_policy(policy) || dl_policy(policy);
147}
aab03e05 148
029632fb
PZ
149static inline int task_has_rt_policy(struct task_struct *p)
150{
151 return rt_policy(p->policy);
152}
153
aab03e05
DF
154static inline int task_has_dl_policy(struct task_struct *p)
155{
156 return dl_policy(p->policy);
157}
158
2d3d891d
DF
159/*
160 * Tells if entity @a should preempt entity @b.
161 */
332ac17e
DF
162static inline bool
163dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
2d3d891d
DF
164{
165 return dl_time_before(a->deadline, b->deadline);
166}
167
029632fb
PZ
168/*
169 * This is the priority-queue data structure of the RT scheduling class:
170 */
171struct rt_prio_array {
172 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
173 struct list_head queue[MAX_RT_PRIO];
174};
175
176struct rt_bandwidth {
177 /* nests inside the rq lock: */
178 raw_spinlock_t rt_runtime_lock;
179 ktime_t rt_period;
180 u64 rt_runtime;
181 struct hrtimer rt_period_timer;
4cfafd30 182 unsigned int rt_period_active;
029632fb 183};
a5e7be3b
JL
184
185void __dl_clear_params(struct task_struct *p);
186
332ac17e
DF
187/*
188 * To keep the bandwidth of -deadline tasks and groups under control
189 * we need some place where:
190 * - store the maximum -deadline bandwidth of the system (the group);
191 * - cache the fraction of that bandwidth that is currently allocated.
192 *
193 * This is all done in the data structure below. It is similar to the
194 * one used for RT-throttling (rt_bandwidth), with the main difference
195 * that, since here we are only interested in admission control, we
196 * do not decrease any runtime while the group "executes", neither we
197 * need a timer to replenish it.
198 *
199 * With respect to SMP, the bandwidth is given on a per-CPU basis,
200 * meaning that:
201 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
202 * - dl_total_bw array contains, in the i-eth element, the currently
203 * allocated bandwidth on the i-eth CPU.
204 * Moreover, groups consume bandwidth on each CPU, while tasks only
205 * consume bandwidth on the CPU they're running on.
206 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
207 * that will be shown the next time the proc or cgroup controls will
208 * be red. It on its turn can be changed by writing on its own
209 * control.
210 */
211struct dl_bandwidth {
212 raw_spinlock_t dl_runtime_lock;
213 u64 dl_runtime;
214 u64 dl_period;
215};
216
217static inline int dl_bandwidth_enabled(void)
218{
1724813d 219 return sysctl_sched_rt_runtime >= 0;
332ac17e
DF
220}
221
332ac17e
DF
222struct dl_bw {
223 raw_spinlock_t lock;
224 u64 bw, total_bw;
225};
226
daec5798
LA
227static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
228
7f51412a 229static inline
daec5798 230void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
7f51412a
JL
231{
232 dl_b->total_bw -= tsk_bw;
daec5798 233 __dl_update(dl_b, (s32)tsk_bw / cpus);
7f51412a
JL
234}
235
236static inline
daec5798 237void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
7f51412a
JL
238{
239 dl_b->total_bw += tsk_bw;
daec5798 240 __dl_update(dl_b, -((s32)tsk_bw / cpus));
7f51412a
JL
241}
242
243static inline
244bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
245{
246 return dl_b->bw != -1 &&
247 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
248}
249
209a0cbd 250void dl_change_utilization(struct task_struct *p, u64 new_bw);
f2cb1360 251extern void init_dl_bw(struct dl_bw *dl_b);
06a76fe0
NP
252extern int sched_dl_global_validate(void);
253extern void sched_dl_do_global(void);
254extern int sched_dl_overflow(struct task_struct *p, int policy,
255 const struct sched_attr *attr);
256extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
257extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
258extern bool __checkparam_dl(const struct sched_attr *attr);
259extern void __dl_clear_params(struct task_struct *p);
260extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
261extern int dl_task_can_attach(struct task_struct *p,
262 const struct cpumask *cs_cpus_allowed);
263extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
264 const struct cpumask *trial);
265extern bool dl_cpu_busy(unsigned int cpu);
029632fb
PZ
266
267#ifdef CONFIG_CGROUP_SCHED
268
269#include <linux/cgroup.h>
270
271struct cfs_rq;
272struct rt_rq;
273
35cf4e50 274extern struct list_head task_groups;
029632fb
PZ
275
276struct cfs_bandwidth {
277#ifdef CONFIG_CFS_BANDWIDTH
278 raw_spinlock_t lock;
279 ktime_t period;
280 u64 quota, runtime;
9c58c79a 281 s64 hierarchical_quota;
029632fb
PZ
282 u64 runtime_expires;
283
4cfafd30 284 int idle, period_active;
029632fb
PZ
285 struct hrtimer period_timer, slack_timer;
286 struct list_head throttled_cfs_rq;
287
288 /* statistics */
289 int nr_periods, nr_throttled;
290 u64 throttled_time;
291#endif
292};
293
294/* task group related information */
295struct task_group {
296 struct cgroup_subsys_state css;
297
298#ifdef CONFIG_FAIR_GROUP_SCHED
299 /* schedulable entities of this group on each cpu */
300 struct sched_entity **se;
301 /* runqueue "owned" by this group on each cpu */
302 struct cfs_rq **cfs_rq;
303 unsigned long shares;
304
fa6bddeb 305#ifdef CONFIG_SMP
b0367629
WL
306 /*
307 * load_avg can be heavily contended at clock tick time, so put
308 * it in its own cacheline separated from the fields above which
309 * will also be accessed at each tick.
310 */
311 atomic_long_t load_avg ____cacheline_aligned;
029632fb 312#endif
fa6bddeb 313#endif
029632fb
PZ
314
315#ifdef CONFIG_RT_GROUP_SCHED
316 struct sched_rt_entity **rt_se;
317 struct rt_rq **rt_rq;
318
319 struct rt_bandwidth rt_bandwidth;
320#endif
321
322 struct rcu_head rcu;
323 struct list_head list;
324
325 struct task_group *parent;
326 struct list_head siblings;
327 struct list_head children;
328
329#ifdef CONFIG_SCHED_AUTOGROUP
330 struct autogroup *autogroup;
331#endif
332
333 struct cfs_bandwidth cfs_bandwidth;
334};
335
336#ifdef CONFIG_FAIR_GROUP_SCHED
337#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
338
339/*
340 * A weight of 0 or 1 can cause arithmetics problems.
341 * A weight of a cfs_rq is the sum of weights of which entities
342 * are queued on this cfs_rq, so a weight of a entity should not be
343 * too large, so as the shares value of a task group.
344 * (The default weight is 1024 - so there's no practical
345 * limitation from this.)
346 */
347#define MIN_SHARES (1UL << 1)
348#define MAX_SHARES (1UL << 18)
349#endif
350
029632fb
PZ
351typedef int (*tg_visitor)(struct task_group *, void *);
352
353extern int walk_tg_tree_from(struct task_group *from,
354 tg_visitor down, tg_visitor up, void *data);
355
356/*
357 * Iterate the full tree, calling @down when first entering a node and @up when
358 * leaving it for the final time.
359 *
360 * Caller must hold rcu_lock or sufficient equivalent.
361 */
362static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
363{
364 return walk_tg_tree_from(&root_task_group, down, up, data);
365}
366
367extern int tg_nop(struct task_group *tg, void *data);
368
369extern void free_fair_sched_group(struct task_group *tg);
370extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
8663e24d 371extern void online_fair_sched_group(struct task_group *tg);
6fe1f348 372extern void unregister_fair_sched_group(struct task_group *tg);
029632fb
PZ
373extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
374 struct sched_entity *se, int cpu,
375 struct sched_entity *parent);
376extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
377
378extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
77a4d1a1 379extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
380extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
381
382extern void free_rt_sched_group(struct task_group *tg);
383extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
384extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
385 struct sched_rt_entity *rt_se, int cpu,
386 struct sched_rt_entity *parent);
8887cd99
NP
387extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
388extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
389extern long sched_group_rt_runtime(struct task_group *tg);
390extern long sched_group_rt_period(struct task_group *tg);
391extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
029632fb 392
25cc7da7
LZ
393extern struct task_group *sched_create_group(struct task_group *parent);
394extern void sched_online_group(struct task_group *tg,
395 struct task_group *parent);
396extern void sched_destroy_group(struct task_group *tg);
397extern void sched_offline_group(struct task_group *tg);
398
399extern void sched_move_task(struct task_struct *tsk);
400
401#ifdef CONFIG_FAIR_GROUP_SCHED
402extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
ad936d86
BP
403
404#ifdef CONFIG_SMP
405extern void set_task_rq_fair(struct sched_entity *se,
406 struct cfs_rq *prev, struct cfs_rq *next);
407#else /* !CONFIG_SMP */
408static inline void set_task_rq_fair(struct sched_entity *se,
409 struct cfs_rq *prev, struct cfs_rq *next) { }
410#endif /* CONFIG_SMP */
411#endif /* CONFIG_FAIR_GROUP_SCHED */
25cc7da7 412
029632fb
PZ
413#else /* CONFIG_CGROUP_SCHED */
414
415struct cfs_bandwidth { };
416
417#endif /* CONFIG_CGROUP_SCHED */
418
419/* CFS-related fields in a runqueue */
420struct cfs_rq {
421 struct load_weight load;
c82513e5 422 unsigned int nr_running, h_nr_running;
029632fb
PZ
423
424 u64 exec_clock;
425 u64 min_vruntime;
426#ifndef CONFIG_64BIT
427 u64 min_vruntime_copy;
428#endif
429
bfb06889 430 struct rb_root_cached tasks_timeline;
029632fb 431
029632fb
PZ
432 /*
433 * 'curr' points to currently running entity on this cfs_rq.
434 * It is set to NULL otherwise (i.e when none are currently running).
435 */
436 struct sched_entity *curr, *next, *last, *skip;
437
438#ifdef CONFIG_SCHED_DEBUG
439 unsigned int nr_spread_over;
440#endif
441
2dac754e
PT
442#ifdef CONFIG_SMP
443 /*
9d89c257 444 * CFS load tracking
2dac754e 445 */
9d89c257 446 struct sched_avg avg;
13962234
YD
447 u64 runnable_load_sum;
448 unsigned long runnable_load_avg;
c566e8e9 449#ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257 450 unsigned long tg_load_avg_contrib;
09a43ace 451 unsigned long propagate_avg;
9d89c257
YD
452#endif
453 atomic_long_t removed_load_avg, removed_util_avg;
454#ifndef CONFIG_64BIT
455 u64 load_last_update_time_copy;
456#endif
82958366 457
9d89c257 458#ifdef CONFIG_FAIR_GROUP_SCHED
82958366
PT
459 /*
460 * h_load = weight * f(tg)
461 *
462 * Where f(tg) is the recursive weight fraction assigned to
463 * this group.
464 */
465 unsigned long h_load;
68520796
VD
466 u64 last_h_load_update;
467 struct sched_entity *h_load_next;
468#endif /* CONFIG_FAIR_GROUP_SCHED */
82958366
PT
469#endif /* CONFIG_SMP */
470
029632fb
PZ
471#ifdef CONFIG_FAIR_GROUP_SCHED
472 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
473
474 /*
475 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
476 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
477 * (like users, containers etc.)
478 *
479 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
480 * list is used during load balance.
481 */
482 int on_list;
483 struct list_head leaf_cfs_rq_list;
484 struct task_group *tg; /* group that "owns" this runqueue */
485
029632fb
PZ
486#ifdef CONFIG_CFS_BANDWIDTH
487 int runtime_enabled;
488 u64 runtime_expires;
489 s64 runtime_remaining;
490
f1b17280
PT
491 u64 throttled_clock, throttled_clock_task;
492 u64 throttled_clock_task_time;
55e16d30 493 int throttled, throttle_count;
029632fb
PZ
494 struct list_head throttled_list;
495#endif /* CONFIG_CFS_BANDWIDTH */
496#endif /* CONFIG_FAIR_GROUP_SCHED */
497};
498
499static inline int rt_bandwidth_enabled(void)
500{
501 return sysctl_sched_rt_runtime >= 0;
502}
503
b6366f04
SR
504/* RT IPI pull logic requires IRQ_WORK */
505#ifdef CONFIG_IRQ_WORK
506# define HAVE_RT_PUSH_IPI
507#endif
508
029632fb
PZ
509/* Real-Time classes' related field in a runqueue: */
510struct rt_rq {
511 struct rt_prio_array active;
c82513e5 512 unsigned int rt_nr_running;
01d36d0a 513 unsigned int rr_nr_running;
029632fb
PZ
514#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
515 struct {
516 int curr; /* highest queued rt task prio */
517#ifdef CONFIG_SMP
518 int next; /* next highest */
519#endif
520 } highest_prio;
521#endif
522#ifdef CONFIG_SMP
523 unsigned long rt_nr_migratory;
524 unsigned long rt_nr_total;
525 int overloaded;
526 struct plist_head pushable_tasks;
b6366f04
SR
527#ifdef HAVE_RT_PUSH_IPI
528 int push_flags;
529 int push_cpu;
530 struct irq_work push_work;
531 raw_spinlock_t push_lock;
029632fb 532#endif
b6366f04 533#endif /* CONFIG_SMP */
f4ebcbc0
KT
534 int rt_queued;
535
029632fb
PZ
536 int rt_throttled;
537 u64 rt_time;
538 u64 rt_runtime;
539 /* Nests inside the rq lock: */
540 raw_spinlock_t rt_runtime_lock;
541
542#ifdef CONFIG_RT_GROUP_SCHED
543 unsigned long rt_nr_boosted;
544
545 struct rq *rq;
029632fb
PZ
546 struct task_group *tg;
547#endif
548};
549
aab03e05
DF
550/* Deadline class' related fields in a runqueue */
551struct dl_rq {
552 /* runqueue is an rbtree, ordered by deadline */
2161573e 553 struct rb_root_cached root;
aab03e05
DF
554
555 unsigned long dl_nr_running;
1baca4ce
JL
556
557#ifdef CONFIG_SMP
558 /*
559 * Deadline values of the currently executing and the
560 * earliest ready task on this rq. Caching these facilitates
561 * the decision wether or not a ready but not running task
562 * should migrate somewhere else.
563 */
564 struct {
565 u64 curr;
566 u64 next;
567 } earliest_dl;
568
569 unsigned long dl_nr_migratory;
1baca4ce
JL
570 int overloaded;
571
572 /*
573 * Tasks on this rq that can be pushed away. They are kept in
574 * an rb-tree, ordered by tasks' deadlines, with caching
575 * of the leftmost (earliest deadline) element.
576 */
2161573e 577 struct rb_root_cached pushable_dl_tasks_root;
332ac17e
DF
578#else
579 struct dl_bw dl_bw;
1baca4ce 580#endif
e36d8677
LA
581 /*
582 * "Active utilization" for this runqueue: increased when a
583 * task wakes up (becomes TASK_RUNNING) and decreased when a
584 * task blocks
585 */
586 u64 running_bw;
4da3abce 587
8fd27231
LA
588 /*
589 * Utilization of the tasks "assigned" to this runqueue (including
590 * the tasks that are in runqueue and the tasks that executed on this
591 * CPU and blocked). Increased when a task moves to this runqueue, and
592 * decreased when the task moves away (migrates, changes scheduling
593 * policy, or terminates).
594 * This is needed to compute the "inactive utilization" for the
595 * runqueue (inactive utilization = this_bw - running_bw).
596 */
597 u64 this_bw;
daec5798 598 u64 extra_bw;
8fd27231 599
4da3abce
LA
600 /*
601 * Inverse of the fraction of CPU utilization that can be reclaimed
602 * by the GRUB algorithm.
603 */
604 u64 bw_ratio;
aab03e05
DF
605};
606
029632fb
PZ
607#ifdef CONFIG_SMP
608
afe06efd
TC
609static inline bool sched_asym_prefer(int a, int b)
610{
611 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
612}
613
029632fb
PZ
614/*
615 * We add the notion of a root-domain which will be used to define per-domain
616 * variables. Each exclusive cpuset essentially defines an island domain by
617 * fully partitioning the member cpus from any other cpuset. Whenever a new
618 * exclusive cpuset is created, we also create and attach a new root-domain
619 * object.
620 *
621 */
622struct root_domain {
623 atomic_t refcount;
624 atomic_t rto_count;
625 struct rcu_head rcu;
626 cpumask_var_t span;
627 cpumask_var_t online;
628
4486edd1
TC
629 /* Indicate more than one runnable task for any CPU */
630 bool overload;
631
1baca4ce
JL
632 /*
633 * The bit corresponding to a CPU gets set here if such CPU has more
634 * than one runnable -deadline task (as it is below for RT tasks).
635 */
636 cpumask_var_t dlo_mask;
637 atomic_t dlo_count;
332ac17e 638 struct dl_bw dl_bw;
6bfd6d72 639 struct cpudl cpudl;
1baca4ce 640
029632fb
PZ
641 /*
642 * The "RT overload" flag: it gets set if a CPU has more than
643 * one runnable RT task.
644 */
645 cpumask_var_t rto_mask;
646 struct cpupri cpupri;
cd92bfd3
DE
647
648 unsigned long max_cpu_capacity;
029632fb
PZ
649};
650
651extern struct root_domain def_root_domain;
f2cb1360 652extern struct mutex sched_domains_mutex;
f2cb1360
IM
653
654extern void init_defrootdomain(void);
8d5dc512 655extern int sched_init_domains(const struct cpumask *cpu_map);
f2cb1360 656extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
029632fb
PZ
657
658#endif /* CONFIG_SMP */
659
660/*
661 * This is the main, per-CPU runqueue data structure.
662 *
663 * Locking rule: those places that want to lock multiple runqueues
664 * (such as the load balancing or the thread migration code), lock
665 * acquire operations must be ordered by ascending &runqueue.
666 */
667struct rq {
668 /* runqueue lock: */
669 raw_spinlock_t lock;
670
671 /*
672 * nr_running and cpu_load should be in the same cacheline because
673 * remote CPUs use both these fields when doing load calculation.
674 */
c82513e5 675 unsigned int nr_running;
0ec8aa00
PZ
676#ifdef CONFIG_NUMA_BALANCING
677 unsigned int nr_numa_running;
678 unsigned int nr_preferred_running;
679#endif
029632fb
PZ
680 #define CPU_LOAD_IDX_MAX 5
681 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
3451d024 682#ifdef CONFIG_NO_HZ_COMMON
9fd81dd5
FW
683#ifdef CONFIG_SMP
684 unsigned long last_load_update_tick;
685#endif /* CONFIG_SMP */
1c792db7 686 unsigned long nohz_flags;
9fd81dd5 687#endif /* CONFIG_NO_HZ_COMMON */
265f22a9
FW
688#ifdef CONFIG_NO_HZ_FULL
689 unsigned long last_sched_tick;
029632fb 690#endif
029632fb
PZ
691 /* capture load from *all* tasks on this cpu: */
692 struct load_weight load;
693 unsigned long nr_load_updates;
694 u64 nr_switches;
695
696 struct cfs_rq cfs;
697 struct rt_rq rt;
aab03e05 698 struct dl_rq dl;
029632fb
PZ
699
700#ifdef CONFIG_FAIR_GROUP_SCHED
701 /* list of leaf cfs_rq on this cpu: */
702 struct list_head leaf_cfs_rq_list;
9c2791f9 703 struct list_head *tmp_alone_branch;
a35b6466
PZ
704#endif /* CONFIG_FAIR_GROUP_SCHED */
705
029632fb
PZ
706 /*
707 * This is part of a global counter where only the total sum
708 * over all CPUs matters. A task can increase this counter on
709 * one CPU and if it got migrated afterwards it may decrease
710 * it on another CPU. Always updated under the runqueue lock:
711 */
712 unsigned long nr_uninterruptible;
713
714 struct task_struct *curr, *idle, *stop;
715 unsigned long next_balance;
716 struct mm_struct *prev_mm;
717
cb42c9a3 718 unsigned int clock_update_flags;
029632fb
PZ
719 u64 clock;
720 u64 clock_task;
721
722 atomic_t nr_iowait;
723
724#ifdef CONFIG_SMP
725 struct root_domain *rd;
726 struct sched_domain *sd;
727
ced549fa 728 unsigned long cpu_capacity;
ca6d75e6 729 unsigned long cpu_capacity_orig;
029632fb 730
e3fca9e7
PZ
731 struct callback_head *balance_callback;
732
029632fb
PZ
733 unsigned char idle_balance;
734 /* For active balancing */
029632fb
PZ
735 int active_balance;
736 int push_cpu;
737 struct cpu_stop_work active_balance_work;
738 /* cpu of this runqueue: */
739 int cpu;
740 int online;
741
367456c7
PZ
742 struct list_head cfs_tasks;
743
029632fb
PZ
744 u64 rt_avg;
745 u64 age_stamp;
746 u64 idle_stamp;
747 u64 avg_idle;
9bd721c5
JL
748
749 /* This is used to determine avg_idle's max value */
750 u64 max_idle_balance_cost;
029632fb
PZ
751#endif
752
753#ifdef CONFIG_IRQ_TIME_ACCOUNTING
754 u64 prev_irq_time;
755#endif
756#ifdef CONFIG_PARAVIRT
757 u64 prev_steal_time;
758#endif
759#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
760 u64 prev_steal_time_rq;
761#endif
762
763 /* calc_load related fields */
764 unsigned long calc_load_update;
765 long calc_load_active;
766
767#ifdef CONFIG_SCHED_HRTICK
768#ifdef CONFIG_SMP
769 int hrtick_csd_pending;
966a9671 770 call_single_data_t hrtick_csd;
029632fb
PZ
771#endif
772 struct hrtimer hrtick_timer;
773#endif
774
775#ifdef CONFIG_SCHEDSTATS
776 /* latency stats */
777 struct sched_info rq_sched_info;
778 unsigned long long rq_cpu_time;
779 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
780
781 /* sys_sched_yield() stats */
782 unsigned int yld_count;
783
784 /* schedule() stats */
029632fb
PZ
785 unsigned int sched_count;
786 unsigned int sched_goidle;
787
788 /* try_to_wake_up() stats */
789 unsigned int ttwu_count;
790 unsigned int ttwu_local;
791#endif
792
793#ifdef CONFIG_SMP
794 struct llist_head wake_list;
795#endif
442bf3aa
DL
796
797#ifdef CONFIG_CPU_IDLE
798 /* Must be inspected within a rcu lock section */
799 struct cpuidle_state *idle_state;
800#endif
029632fb
PZ
801};
802
803static inline int cpu_of(struct rq *rq)
804{
805#ifdef CONFIG_SMP
806 return rq->cpu;
807#else
808 return 0;
809#endif
810}
811
1b568f0a
PZ
812
813#ifdef CONFIG_SCHED_SMT
814
815extern struct static_key_false sched_smt_present;
816
817extern void __update_idle_core(struct rq *rq);
818
819static inline void update_idle_core(struct rq *rq)
820{
821 if (static_branch_unlikely(&sched_smt_present))
822 __update_idle_core(rq);
823}
824
825#else
826static inline void update_idle_core(struct rq *rq) { }
827#endif
828
8b06c55b 829DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
029632fb 830
518cd623 831#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
4a32fea9 832#define this_rq() this_cpu_ptr(&runqueues)
518cd623
PZ
833#define task_rq(p) cpu_rq(task_cpu(p))
834#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
4a32fea9 835#define raw_rq() raw_cpu_ptr(&runqueues)
518cd623 836
cebde6d6
PZ
837static inline u64 __rq_clock_broken(struct rq *rq)
838{
316c1608 839 return READ_ONCE(rq->clock);
cebde6d6
PZ
840}
841
cb42c9a3
MF
842/*
843 * rq::clock_update_flags bits
844 *
845 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
846 * call to __schedule(). This is an optimisation to avoid
847 * neighbouring rq clock updates.
848 *
849 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
850 * in effect and calls to update_rq_clock() are being ignored.
851 *
852 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
853 * made to update_rq_clock() since the last time rq::lock was pinned.
854 *
855 * If inside of __schedule(), clock_update_flags will have been
856 * shifted left (a left shift is a cheap operation for the fast path
857 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
858 *
859 * if (rq-clock_update_flags >= RQCF_UPDATED)
860 *
861 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
862 * one position though, because the next rq_unpin_lock() will shift it
863 * back.
864 */
865#define RQCF_REQ_SKIP 0x01
866#define RQCF_ACT_SKIP 0x02
867#define RQCF_UPDATED 0x04
868
869static inline void assert_clock_updated(struct rq *rq)
870{
871 /*
872 * The only reason for not seeing a clock update since the
873 * last rq_pin_lock() is if we're currently skipping updates.
874 */
875 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
876}
877
78becc27
FW
878static inline u64 rq_clock(struct rq *rq)
879{
cebde6d6 880 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
881 assert_clock_updated(rq);
882
78becc27
FW
883 return rq->clock;
884}
885
886static inline u64 rq_clock_task(struct rq *rq)
887{
cebde6d6 888 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
889 assert_clock_updated(rq);
890
78becc27
FW
891 return rq->clock_task;
892}
893
9edfbfed
PZ
894static inline void rq_clock_skip_update(struct rq *rq, bool skip)
895{
896 lockdep_assert_held(&rq->lock);
897 if (skip)
cb42c9a3 898 rq->clock_update_flags |= RQCF_REQ_SKIP;
9edfbfed 899 else
cb42c9a3 900 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
9edfbfed
PZ
901}
902
d8ac8971
MF
903struct rq_flags {
904 unsigned long flags;
905 struct pin_cookie cookie;
cb42c9a3
MF
906#ifdef CONFIG_SCHED_DEBUG
907 /*
908 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
909 * current pin context is stashed here in case it needs to be
910 * restored in rq_repin_lock().
911 */
912 unsigned int clock_update_flags;
913#endif
d8ac8971
MF
914};
915
916static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
917{
918 rf->cookie = lockdep_pin_lock(&rq->lock);
cb42c9a3
MF
919
920#ifdef CONFIG_SCHED_DEBUG
921 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
922 rf->clock_update_flags = 0;
923#endif
d8ac8971
MF
924}
925
926static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
927{
cb42c9a3
MF
928#ifdef CONFIG_SCHED_DEBUG
929 if (rq->clock_update_flags > RQCF_ACT_SKIP)
930 rf->clock_update_flags = RQCF_UPDATED;
931#endif
932
d8ac8971
MF
933 lockdep_unpin_lock(&rq->lock, rf->cookie);
934}
935
936static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
937{
938 lockdep_repin_lock(&rq->lock, rf->cookie);
cb42c9a3
MF
939
940#ifdef CONFIG_SCHED_DEBUG
941 /*
942 * Restore the value we stashed in @rf for this pin context.
943 */
944 rq->clock_update_flags |= rf->clock_update_flags;
945#endif
d8ac8971
MF
946}
947
9942f79b 948#ifdef CONFIG_NUMA
e3fe70b1
RR
949enum numa_topology_type {
950 NUMA_DIRECT,
951 NUMA_GLUELESS_MESH,
952 NUMA_BACKPLANE,
953};
954extern enum numa_topology_type sched_numa_topology_type;
9942f79b
RR
955extern int sched_max_numa_distance;
956extern bool find_numa_distance(int distance);
957#endif
958
f2cb1360
IM
959#ifdef CONFIG_NUMA
960extern void sched_init_numa(void);
961extern void sched_domains_numa_masks_set(unsigned int cpu);
962extern void sched_domains_numa_masks_clear(unsigned int cpu);
963#else
964static inline void sched_init_numa(void) { }
965static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
966static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
967#endif
968
f809ca9a 969#ifdef CONFIG_NUMA_BALANCING
44dba3d5
IM
970/* The regions in numa_faults array from task_struct */
971enum numa_faults_stats {
972 NUMA_MEM = 0,
973 NUMA_CPU,
974 NUMA_MEMBUF,
975 NUMA_CPUBUF
976};
0ec8aa00 977extern void sched_setnuma(struct task_struct *p, int node);
e6628d5b 978extern int migrate_task_to(struct task_struct *p, int cpu);
ac66f547 979extern int migrate_swap(struct task_struct *, struct task_struct *);
f809ca9a
MG
980#endif /* CONFIG_NUMA_BALANCING */
981
518cd623
PZ
982#ifdef CONFIG_SMP
983
e3fca9e7
PZ
984static inline void
985queue_balance_callback(struct rq *rq,
986 struct callback_head *head,
987 void (*func)(struct rq *rq))
988{
989 lockdep_assert_held(&rq->lock);
990
991 if (unlikely(head->next))
992 return;
993
994 head->func = (void (*)(struct callback_head *))func;
995 head->next = rq->balance_callback;
996 rq->balance_callback = head;
997}
998
e3baac47
PZ
999extern void sched_ttwu_pending(void);
1000
029632fb
PZ
1001#define rcu_dereference_check_sched_domain(p) \
1002 rcu_dereference_check((p), \
1003 lockdep_is_held(&sched_domains_mutex))
1004
1005/*
1006 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1007 * See detach_destroy_domains: synchronize_sched for details.
1008 *
1009 * The domain tree of any CPU may only be accessed from within
1010 * preempt-disabled sections.
1011 */
1012#define for_each_domain(cpu, __sd) \
518cd623
PZ
1013 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1014 __sd; __sd = __sd->parent)
029632fb 1015
77e81365
SS
1016#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1017
518cd623
PZ
1018/**
1019 * highest_flag_domain - Return highest sched_domain containing flag.
1020 * @cpu: The cpu whose highest level of sched domain is to
1021 * be returned.
1022 * @flag: The flag to check for the highest sched_domain
1023 * for the given cpu.
1024 *
1025 * Returns the highest sched_domain of a cpu which contains the given flag.
1026 */
1027static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1028{
1029 struct sched_domain *sd, *hsd = NULL;
1030
1031 for_each_domain(cpu, sd) {
1032 if (!(sd->flags & flag))
1033 break;
1034 hsd = sd;
1035 }
1036
1037 return hsd;
1038}
1039
fb13c7ee
MG
1040static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1041{
1042 struct sched_domain *sd;
1043
1044 for_each_domain(cpu, sd) {
1045 if (sd->flags & flag)
1046 break;
1047 }
1048
1049 return sd;
1050}
1051
518cd623 1052DECLARE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 1053DECLARE_PER_CPU(int, sd_llc_size);
518cd623 1054DECLARE_PER_CPU(int, sd_llc_id);
0e369d75 1055DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
fb13c7ee 1056DECLARE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50 1057DECLARE_PER_CPU(struct sched_domain *, sd_asym);
518cd623 1058
63b2ca30 1059struct sched_group_capacity {
5e6521ea
LZ
1060 atomic_t ref;
1061 /*
172895e6 1062 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
63b2ca30 1063 * for a single CPU.
5e6521ea 1064 */
bf475ce0
MR
1065 unsigned long capacity;
1066 unsigned long min_capacity; /* Min per-CPU capacity in group */
5e6521ea 1067 unsigned long next_update;
63b2ca30 1068 int imbalance; /* XXX unrelated to capacity but shared group state */
5e6521ea 1069
005f874d
PZ
1070#ifdef CONFIG_SCHED_DEBUG
1071 int id;
1072#endif
1073
e5c14b1f 1074 unsigned long cpumask[0]; /* balance mask */
5e6521ea
LZ
1075};
1076
1077struct sched_group {
1078 struct sched_group *next; /* Must be a circular list */
1079 atomic_t ref;
1080
1081 unsigned int group_weight;
63b2ca30 1082 struct sched_group_capacity *sgc;
afe06efd 1083 int asym_prefer_cpu; /* cpu of highest priority in group */
5e6521ea
LZ
1084
1085 /*
1086 * The CPUs this group covers.
1087 *
1088 * NOTE: this field is variable length. (Allocated dynamically
1089 * by attaching extra space to the end of the structure,
1090 * depending on how many CPUs the kernel has booted up with)
1091 */
1092 unsigned long cpumask[0];
1093};
1094
ae4df9d6 1095static inline struct cpumask *sched_group_span(struct sched_group *sg)
5e6521ea
LZ
1096{
1097 return to_cpumask(sg->cpumask);
1098}
1099
1100/*
e5c14b1f 1101 * See build_balance_mask().
5e6521ea 1102 */
e5c14b1f 1103static inline struct cpumask *group_balance_mask(struct sched_group *sg)
5e6521ea 1104{
63b2ca30 1105 return to_cpumask(sg->sgc->cpumask);
5e6521ea
LZ
1106}
1107
1108/**
1109 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
1110 * @group: The group whose first cpu is to be returned.
1111 */
1112static inline unsigned int group_first_cpu(struct sched_group *group)
1113{
ae4df9d6 1114 return cpumask_first(sched_group_span(group));
5e6521ea
LZ
1115}
1116
c1174876
PZ
1117extern int group_balance_cpu(struct sched_group *sg);
1118
3866e845
SRRH
1119#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1120void register_sched_domain_sysctl(void);
bbdacdfe 1121void dirty_sched_domain_sysctl(int cpu);
3866e845
SRRH
1122void unregister_sched_domain_sysctl(void);
1123#else
1124static inline void register_sched_domain_sysctl(void)
1125{
1126}
bbdacdfe
PZ
1127static inline void dirty_sched_domain_sysctl(int cpu)
1128{
1129}
3866e845
SRRH
1130static inline void unregister_sched_domain_sysctl(void)
1131{
1132}
1133#endif
1134
e3baac47
PZ
1135#else
1136
1137static inline void sched_ttwu_pending(void) { }
1138
518cd623 1139#endif /* CONFIG_SMP */
029632fb 1140
391e43da 1141#include "stats.h"
1051408f 1142#include "autogroup.h"
029632fb
PZ
1143
1144#ifdef CONFIG_CGROUP_SCHED
1145
1146/*
1147 * Return the group to which this tasks belongs.
1148 *
8af01f56
TH
1149 * We cannot use task_css() and friends because the cgroup subsystem
1150 * changes that value before the cgroup_subsys::attach() method is called,
1151 * therefore we cannot pin it and might observe the wrong value.
8323f26c
PZ
1152 *
1153 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1154 * core changes this before calling sched_move_task().
1155 *
1156 * Instead we use a 'copy' which is updated from sched_move_task() while
1157 * holding both task_struct::pi_lock and rq::lock.
029632fb
PZ
1158 */
1159static inline struct task_group *task_group(struct task_struct *p)
1160{
8323f26c 1161 return p->sched_task_group;
029632fb
PZ
1162}
1163
1164/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1165static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1166{
1167#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1168 struct task_group *tg = task_group(p);
1169#endif
1170
1171#ifdef CONFIG_FAIR_GROUP_SCHED
ad936d86 1172 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
029632fb
PZ
1173 p->se.cfs_rq = tg->cfs_rq[cpu];
1174 p->se.parent = tg->se[cpu];
1175#endif
1176
1177#ifdef CONFIG_RT_GROUP_SCHED
1178 p->rt.rt_rq = tg->rt_rq[cpu];
1179 p->rt.parent = tg->rt_se[cpu];
1180#endif
1181}
1182
1183#else /* CONFIG_CGROUP_SCHED */
1184
1185static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1186static inline struct task_group *task_group(struct task_struct *p)
1187{
1188 return NULL;
1189}
1190
1191#endif /* CONFIG_CGROUP_SCHED */
1192
1193static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1194{
1195 set_task_rq(p, cpu);
1196#ifdef CONFIG_SMP
1197 /*
1198 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1199 * successfuly executed on another CPU. We must ensure that updates of
1200 * per-task data have been completed by this moment.
1201 */
1202 smp_wmb();
c65eacbe
AL
1203#ifdef CONFIG_THREAD_INFO_IN_TASK
1204 p->cpu = cpu;
1205#else
029632fb 1206 task_thread_info(p)->cpu = cpu;
c65eacbe 1207#endif
ac66f547 1208 p->wake_cpu = cpu;
029632fb
PZ
1209#endif
1210}
1211
1212/*
1213 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1214 */
1215#ifdef CONFIG_SCHED_DEBUG
c5905afb 1216# include <linux/static_key.h>
029632fb
PZ
1217# define const_debug __read_mostly
1218#else
1219# define const_debug const
1220#endif
1221
1222extern const_debug unsigned int sysctl_sched_features;
1223
1224#define SCHED_FEAT(name, enabled) \
1225 __SCHED_FEAT_##name ,
1226
1227enum {
391e43da 1228#include "features.h"
f8b6d1cc 1229 __SCHED_FEAT_NR,
029632fb
PZ
1230};
1231
1232#undef SCHED_FEAT
1233
f8b6d1cc 1234#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
f8b6d1cc 1235#define SCHED_FEAT(name, enabled) \
c5905afb 1236static __always_inline bool static_branch_##name(struct static_key *key) \
f8b6d1cc 1237{ \
6e76ea8a 1238 return static_key_##enabled(key); \
f8b6d1cc
PZ
1239}
1240
1241#include "features.h"
1242
1243#undef SCHED_FEAT
1244
c5905afb 1245extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
f8b6d1cc
PZ
1246#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1247#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
029632fb 1248#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
f8b6d1cc 1249#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb 1250
2a595721 1251extern struct static_key_false sched_numa_balancing;
cb251765 1252extern struct static_key_false sched_schedstats;
cbee9f88 1253
029632fb
PZ
1254static inline u64 global_rt_period(void)
1255{
1256 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1257}
1258
1259static inline u64 global_rt_runtime(void)
1260{
1261 if (sysctl_sched_rt_runtime < 0)
1262 return RUNTIME_INF;
1263
1264 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1265}
1266
029632fb
PZ
1267static inline int task_current(struct rq *rq, struct task_struct *p)
1268{
1269 return rq->curr == p;
1270}
1271
1272static inline int task_running(struct rq *rq, struct task_struct *p)
1273{
1274#ifdef CONFIG_SMP
1275 return p->on_cpu;
1276#else
1277 return task_current(rq, p);
1278#endif
1279}
1280
da0c1e65
KT
1281static inline int task_on_rq_queued(struct task_struct *p)
1282{
1283 return p->on_rq == TASK_ON_RQ_QUEUED;
1284}
029632fb 1285
cca26e80
KT
1286static inline int task_on_rq_migrating(struct task_struct *p)
1287{
1288 return p->on_rq == TASK_ON_RQ_MIGRATING;
1289}
1290
029632fb
PZ
1291#ifndef prepare_arch_switch
1292# define prepare_arch_switch(next) do { } while (0)
1293#endif
01f23e16
CM
1294#ifndef finish_arch_post_lock_switch
1295# define finish_arch_post_lock_switch() do { } while (0)
1296#endif
029632fb 1297
029632fb
PZ
1298static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1299{
1300#ifdef CONFIG_SMP
1301 /*
1302 * We can optimise this out completely for !SMP, because the
1303 * SMP rebalancing from interrupt is the only thing that cares
1304 * here.
1305 */
1306 next->on_cpu = 1;
1307#endif
1308}
1309
1310static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1311{
1312#ifdef CONFIG_SMP
1313 /*
1314 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1315 * We must ensure this doesn't happen until the switch is completely
1316 * finished.
95913d97 1317 *
b75a2253
PZ
1318 * In particular, the load of prev->state in finish_task_switch() must
1319 * happen before this.
1320 *
1f03e8d2 1321 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
029632fb 1322 */
95913d97 1323 smp_store_release(&prev->on_cpu, 0);
029632fb
PZ
1324#endif
1325#ifdef CONFIG_DEBUG_SPINLOCK
1326 /* this is a valid case when another task releases the spinlock */
1327 rq->lock.owner = current;
1328#endif
1329 /*
1330 * If we are tracking spinlock dependencies then we have to
1331 * fix up the runqueue lock - which gets 'carried over' from
1332 * prev into current:
1333 */
1334 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1335
1336 raw_spin_unlock_irq(&rq->lock);
1337}
1338
b13095f0
LZ
1339/*
1340 * wake flags
1341 */
1342#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1343#define WF_FORK 0x02 /* child wakeup after fork */
1344#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1345
029632fb
PZ
1346/*
1347 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1348 * of tasks with abnormal "nice" values across CPUs the contribution that
1349 * each task makes to its run queue's load is weighted according to its
1350 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1351 * scaled version of the new time slice allocation that they receive on time
1352 * slice expiry etc.
1353 */
1354
1355#define WEIGHT_IDLEPRIO 3
1356#define WMULT_IDLEPRIO 1431655765
1357
ed82b8a1
AK
1358extern const int sched_prio_to_weight[40];
1359extern const u32 sched_prio_to_wmult[40];
029632fb 1360
ff77e468
PZ
1361/*
1362 * {de,en}queue flags:
1363 *
1364 * DEQUEUE_SLEEP - task is no longer runnable
1365 * ENQUEUE_WAKEUP - task just became runnable
1366 *
1367 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1368 * are in a known state which allows modification. Such pairs
1369 * should preserve as much state as possible.
1370 *
1371 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1372 * in the runqueue.
1373 *
1374 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1375 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
59efa0ba 1376 * ENQUEUE_MIGRATED - the task was migrated during wakeup
ff77e468
PZ
1377 *
1378 */
1379
1380#define DEQUEUE_SLEEP 0x01
1381#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
1382#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
0a67d1ee 1383#define DEQUEUE_NOCLOCK 0x08 /* matches ENQUEUE_NOCLOCK */
ff77e468 1384
1de64443 1385#define ENQUEUE_WAKEUP 0x01
ff77e468
PZ
1386#define ENQUEUE_RESTORE 0x02
1387#define ENQUEUE_MOVE 0x04
0a67d1ee 1388#define ENQUEUE_NOCLOCK 0x08
ff77e468 1389
0a67d1ee
PZ
1390#define ENQUEUE_HEAD 0x10
1391#define ENQUEUE_REPLENISH 0x20
c82ba9fa 1392#ifdef CONFIG_SMP
0a67d1ee 1393#define ENQUEUE_MIGRATED 0x40
c82ba9fa 1394#else
59efa0ba 1395#define ENQUEUE_MIGRATED 0x00
c82ba9fa 1396#endif
c82ba9fa 1397
37e117c0
PZ
1398#define RETRY_TASK ((void *)-1UL)
1399
c82ba9fa
LZ
1400struct sched_class {
1401 const struct sched_class *next;
1402
1403 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1404 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1405 void (*yield_task) (struct rq *rq);
1406 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1407
1408 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1409
606dba2e
PZ
1410 /*
1411 * It is the responsibility of the pick_next_task() method that will
1412 * return the next task to call put_prev_task() on the @prev task or
1413 * something equivalent.
37e117c0
PZ
1414 *
1415 * May return RETRY_TASK when it finds a higher prio class has runnable
1416 * tasks.
606dba2e
PZ
1417 */
1418 struct task_struct * (*pick_next_task) (struct rq *rq,
e7904a28 1419 struct task_struct *prev,
d8ac8971 1420 struct rq_flags *rf);
c82ba9fa
LZ
1421 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1422
1423#ifdef CONFIG_SMP
ac66f547 1424 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
5a4fd036 1425 void (*migrate_task_rq)(struct task_struct *p);
c82ba9fa 1426
c82ba9fa
LZ
1427 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1428
1429 void (*set_cpus_allowed)(struct task_struct *p,
1430 const struct cpumask *newmask);
1431
1432 void (*rq_online)(struct rq *rq);
1433 void (*rq_offline)(struct rq *rq);
1434#endif
1435
1436 void (*set_curr_task) (struct rq *rq);
1437 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1438 void (*task_fork) (struct task_struct *p);
e6c390f2 1439 void (*task_dead) (struct task_struct *p);
c82ba9fa 1440
67dfa1b7
KT
1441 /*
1442 * The switched_from() call is allowed to drop rq->lock, therefore we
1443 * cannot assume the switched_from/switched_to pair is serliazed by
1444 * rq->lock. They are however serialized by p->pi_lock.
1445 */
c82ba9fa
LZ
1446 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1447 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1448 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1449 int oldprio);
1450
1451 unsigned int (*get_rr_interval) (struct rq *rq,
1452 struct task_struct *task);
1453
6e998916
SG
1454 void (*update_curr) (struct rq *rq);
1455
ea86cb4b
VG
1456#define TASK_SET_GROUP 0
1457#define TASK_MOVE_GROUP 1
1458
c82ba9fa 1459#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 1460 void (*task_change_group) (struct task_struct *p, int type);
c82ba9fa
LZ
1461#endif
1462};
029632fb 1463
3f1d2a31
PZ
1464static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1465{
1466 prev->sched_class->put_prev_task(rq, prev);
1467}
1468
b2bf6c31
PZ
1469static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1470{
1471 curr->sched_class->set_curr_task(rq);
1472}
1473
f5832c19 1474#ifdef CONFIG_SMP
029632fb 1475#define sched_class_highest (&stop_sched_class)
f5832c19
NP
1476#else
1477#define sched_class_highest (&dl_sched_class)
1478#endif
029632fb
PZ
1479#define for_each_class(class) \
1480 for (class = sched_class_highest; class; class = class->next)
1481
1482extern const struct sched_class stop_sched_class;
aab03e05 1483extern const struct sched_class dl_sched_class;
029632fb
PZ
1484extern const struct sched_class rt_sched_class;
1485extern const struct sched_class fair_sched_class;
1486extern const struct sched_class idle_sched_class;
1487
1488
1489#ifdef CONFIG_SMP
1490
63b2ca30 1491extern void update_group_capacity(struct sched_domain *sd, int cpu);
b719203b 1492
7caff66f 1493extern void trigger_load_balance(struct rq *rq);
029632fb 1494
c5b28038
PZ
1495extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1496
029632fb
PZ
1497#endif
1498
442bf3aa
DL
1499#ifdef CONFIG_CPU_IDLE
1500static inline void idle_set_state(struct rq *rq,
1501 struct cpuidle_state *idle_state)
1502{
1503 rq->idle_state = idle_state;
1504}
1505
1506static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1507{
9148a3a1 1508 SCHED_WARN_ON(!rcu_read_lock_held());
442bf3aa
DL
1509 return rq->idle_state;
1510}
1511#else
1512static inline void idle_set_state(struct rq *rq,
1513 struct cpuidle_state *idle_state)
1514{
1515}
1516
1517static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1518{
1519 return NULL;
1520}
1521#endif
1522
8663effb
SRV
1523extern void schedule_idle(void);
1524
029632fb
PZ
1525extern void sysrq_sched_debug_show(void);
1526extern void sched_init_granularity(void);
1527extern void update_max_interval(void);
1baca4ce
JL
1528
1529extern void init_sched_dl_class(void);
029632fb
PZ
1530extern void init_sched_rt_class(void);
1531extern void init_sched_fair_class(void);
1532
8875125e 1533extern void resched_curr(struct rq *rq);
029632fb
PZ
1534extern void resched_cpu(int cpu);
1535
1536extern struct rt_bandwidth def_rt_bandwidth;
1537extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1538
332ac17e
DF
1539extern struct dl_bandwidth def_dl_bandwidth;
1540extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
aab03e05 1541extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
209a0cbd 1542extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
4da3abce 1543extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
aab03e05 1544
c52f14d3
LA
1545#define BW_SHIFT 20
1546#define BW_UNIT (1 << BW_SHIFT)
4da3abce 1547#define RATIO_SHIFT 8
332ac17e
DF
1548unsigned long to_ratio(u64 period, u64 runtime);
1549
540247fb 1550extern void init_entity_runnable_average(struct sched_entity *se);
2b8c41da 1551extern void post_init_entity_util_avg(struct sched_entity *se);
a75cdaa9 1552
76d92ac3
FW
1553#ifdef CONFIG_NO_HZ_FULL
1554extern bool sched_can_stop_tick(struct rq *rq);
1555
1556/*
1557 * Tick may be needed by tasks in the runqueue depending on their policy and
1558 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1559 * nohz mode if necessary.
1560 */
1561static inline void sched_update_tick_dependency(struct rq *rq)
1562{
1563 int cpu;
1564
1565 if (!tick_nohz_full_enabled())
1566 return;
1567
1568 cpu = cpu_of(rq);
1569
1570 if (!tick_nohz_full_cpu(cpu))
1571 return;
1572
1573 if (sched_can_stop_tick(rq))
1574 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1575 else
1576 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1577}
1578#else
1579static inline void sched_update_tick_dependency(struct rq *rq) { }
1580#endif
1581
72465447 1582static inline void add_nr_running(struct rq *rq, unsigned count)
029632fb 1583{
72465447
KT
1584 unsigned prev_nr = rq->nr_running;
1585
1586 rq->nr_running = prev_nr + count;
9f3660c2 1587
72465447 1588 if (prev_nr < 2 && rq->nr_running >= 2) {
4486edd1
TC
1589#ifdef CONFIG_SMP
1590 if (!rq->rd->overload)
1591 rq->rd->overload = true;
1592#endif
4486edd1 1593 }
76d92ac3
FW
1594
1595 sched_update_tick_dependency(rq);
029632fb
PZ
1596}
1597
72465447 1598static inline void sub_nr_running(struct rq *rq, unsigned count)
029632fb 1599{
72465447 1600 rq->nr_running -= count;
76d92ac3
FW
1601 /* Check if we still need preemption */
1602 sched_update_tick_dependency(rq);
029632fb
PZ
1603}
1604
265f22a9
FW
1605static inline void rq_last_tick_reset(struct rq *rq)
1606{
1607#ifdef CONFIG_NO_HZ_FULL
1608 rq->last_sched_tick = jiffies;
1609#endif
1610}
1611
029632fb
PZ
1612extern void update_rq_clock(struct rq *rq);
1613
1614extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1615extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1616
1617extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1618
1619extern const_debug unsigned int sysctl_sched_time_avg;
1620extern const_debug unsigned int sysctl_sched_nr_migrate;
1621extern const_debug unsigned int sysctl_sched_migration_cost;
1622
1623static inline u64 sched_avg_period(void)
1624{
1625 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1626}
1627
029632fb
PZ
1628#ifdef CONFIG_SCHED_HRTICK
1629
1630/*
1631 * Use hrtick when:
1632 * - enabled by features
1633 * - hrtimer is actually high res
1634 */
1635static inline int hrtick_enabled(struct rq *rq)
1636{
1637 if (!sched_feat(HRTICK))
1638 return 0;
1639 if (!cpu_active(cpu_of(rq)))
1640 return 0;
1641 return hrtimer_is_hres_active(&rq->hrtick_timer);
1642}
1643
1644void hrtick_start(struct rq *rq, u64 delay);
1645
b39e66ea
MG
1646#else
1647
1648static inline int hrtick_enabled(struct rq *rq)
1649{
1650 return 0;
1651}
1652
029632fb
PZ
1653#endif /* CONFIG_SCHED_HRTICK */
1654
1655#ifdef CONFIG_SMP
1656extern void sched_avg_update(struct rq *rq);
dfbca41f
PZ
1657
1658#ifndef arch_scale_freq_capacity
1659static __always_inline
1660unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1661{
1662 return SCHED_CAPACITY_SCALE;
1663}
1664#endif
b5b4860d 1665
8cd5601c
MR
1666#ifndef arch_scale_cpu_capacity
1667static __always_inline
1668unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1669{
e3279a2e 1670 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
8cd5601c
MR
1671 return sd->smt_gain / sd->span_weight;
1672
1673 return SCHED_CAPACITY_SCALE;
1674}
1675#endif
1676
029632fb
PZ
1677static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1678{
b5b4860d 1679 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
029632fb
PZ
1680 sched_avg_update(rq);
1681}
1682#else
1683static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1684static inline void sched_avg_update(struct rq *rq) { }
1685#endif
1686
eb580751 1687struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462 1688 __acquires(rq->lock);
8a8c69c3 1689
eb580751 1690struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3960c8c0 1691 __acquires(p->pi_lock)
3e71a462 1692 __acquires(rq->lock);
3960c8c0 1693
eb580751 1694static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
3960c8c0
PZ
1695 __releases(rq->lock)
1696{
d8ac8971 1697 rq_unpin_lock(rq, rf);
3960c8c0
PZ
1698 raw_spin_unlock(&rq->lock);
1699}
1700
1701static inline void
eb580751 1702task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
3960c8c0
PZ
1703 __releases(rq->lock)
1704 __releases(p->pi_lock)
1705{
d8ac8971 1706 rq_unpin_lock(rq, rf);
3960c8c0 1707 raw_spin_unlock(&rq->lock);
eb580751 1708 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3960c8c0
PZ
1709}
1710
8a8c69c3
PZ
1711static inline void
1712rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1713 __acquires(rq->lock)
1714{
1715 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1716 rq_pin_lock(rq, rf);
1717}
1718
1719static inline void
1720rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1721 __acquires(rq->lock)
1722{
1723 raw_spin_lock_irq(&rq->lock);
1724 rq_pin_lock(rq, rf);
1725}
1726
1727static inline void
1728rq_lock(struct rq *rq, struct rq_flags *rf)
1729 __acquires(rq->lock)
1730{
1731 raw_spin_lock(&rq->lock);
1732 rq_pin_lock(rq, rf);
1733}
1734
1735static inline void
1736rq_relock(struct rq *rq, struct rq_flags *rf)
1737 __acquires(rq->lock)
1738{
1739 raw_spin_lock(&rq->lock);
1740 rq_repin_lock(rq, rf);
1741}
1742
1743static inline void
1744rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1745 __releases(rq->lock)
1746{
1747 rq_unpin_lock(rq, rf);
1748 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1749}
1750
1751static inline void
1752rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1753 __releases(rq->lock)
1754{
1755 rq_unpin_lock(rq, rf);
1756 raw_spin_unlock_irq(&rq->lock);
1757}
1758
1759static inline void
1760rq_unlock(struct rq *rq, struct rq_flags *rf)
1761 __releases(rq->lock)
1762{
1763 rq_unpin_lock(rq, rf);
1764 raw_spin_unlock(&rq->lock);
1765}
1766
029632fb
PZ
1767#ifdef CONFIG_SMP
1768#ifdef CONFIG_PREEMPT
1769
1770static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1771
1772/*
1773 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1774 * way at the expense of forcing extra atomic operations in all
1775 * invocations. This assures that the double_lock is acquired using the
1776 * same underlying policy as the spinlock_t on this architecture, which
1777 * reduces latency compared to the unfair variant below. However, it
1778 * also adds more overhead and therefore may reduce throughput.
1779 */
1780static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1781 __releases(this_rq->lock)
1782 __acquires(busiest->lock)
1783 __acquires(this_rq->lock)
1784{
1785 raw_spin_unlock(&this_rq->lock);
1786 double_rq_lock(this_rq, busiest);
1787
1788 return 1;
1789}
1790
1791#else
1792/*
1793 * Unfair double_lock_balance: Optimizes throughput at the expense of
1794 * latency by eliminating extra atomic operations when the locks are
1795 * already in proper order on entry. This favors lower cpu-ids and will
1796 * grant the double lock to lower cpus over higher ids under contention,
1797 * regardless of entry order into the function.
1798 */
1799static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1800 __releases(this_rq->lock)
1801 __acquires(busiest->lock)
1802 __acquires(this_rq->lock)
1803{
1804 int ret = 0;
1805
1806 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1807 if (busiest < this_rq) {
1808 raw_spin_unlock(&this_rq->lock);
1809 raw_spin_lock(&busiest->lock);
1810 raw_spin_lock_nested(&this_rq->lock,
1811 SINGLE_DEPTH_NESTING);
1812 ret = 1;
1813 } else
1814 raw_spin_lock_nested(&busiest->lock,
1815 SINGLE_DEPTH_NESTING);
1816 }
1817 return ret;
1818}
1819
1820#endif /* CONFIG_PREEMPT */
1821
1822/*
1823 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1824 */
1825static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1826{
1827 if (unlikely(!irqs_disabled())) {
1828 /* printk() doesn't work good under rq->lock */
1829 raw_spin_unlock(&this_rq->lock);
1830 BUG_ON(1);
1831 }
1832
1833 return _double_lock_balance(this_rq, busiest);
1834}
1835
1836static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1837 __releases(busiest->lock)
1838{
1839 raw_spin_unlock(&busiest->lock);
1840 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1841}
1842
74602315
PZ
1843static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1844{
1845 if (l1 > l2)
1846 swap(l1, l2);
1847
1848 spin_lock(l1);
1849 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1850}
1851
60e69eed
MG
1852static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1853{
1854 if (l1 > l2)
1855 swap(l1, l2);
1856
1857 spin_lock_irq(l1);
1858 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1859}
1860
74602315
PZ
1861static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1862{
1863 if (l1 > l2)
1864 swap(l1, l2);
1865
1866 raw_spin_lock(l1);
1867 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1868}
1869
029632fb
PZ
1870/*
1871 * double_rq_lock - safely lock two runqueues
1872 *
1873 * Note this does not disable interrupts like task_rq_lock,
1874 * you need to do so manually before calling.
1875 */
1876static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1877 __acquires(rq1->lock)
1878 __acquires(rq2->lock)
1879{
1880 BUG_ON(!irqs_disabled());
1881 if (rq1 == rq2) {
1882 raw_spin_lock(&rq1->lock);
1883 __acquire(rq2->lock); /* Fake it out ;) */
1884 } else {
1885 if (rq1 < rq2) {
1886 raw_spin_lock(&rq1->lock);
1887 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1888 } else {
1889 raw_spin_lock(&rq2->lock);
1890 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1891 }
1892 }
1893}
1894
1895/*
1896 * double_rq_unlock - safely unlock two runqueues
1897 *
1898 * Note this does not restore interrupts like task_rq_unlock,
1899 * you need to do so manually after calling.
1900 */
1901static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1902 __releases(rq1->lock)
1903 __releases(rq2->lock)
1904{
1905 raw_spin_unlock(&rq1->lock);
1906 if (rq1 != rq2)
1907 raw_spin_unlock(&rq2->lock);
1908 else
1909 __release(rq2->lock);
1910}
1911
f2cb1360
IM
1912extern void set_rq_online (struct rq *rq);
1913extern void set_rq_offline(struct rq *rq);
1914extern bool sched_smp_initialized;
1915
029632fb
PZ
1916#else /* CONFIG_SMP */
1917
1918/*
1919 * double_rq_lock - safely lock two runqueues
1920 *
1921 * Note this does not disable interrupts like task_rq_lock,
1922 * you need to do so manually before calling.
1923 */
1924static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1925 __acquires(rq1->lock)
1926 __acquires(rq2->lock)
1927{
1928 BUG_ON(!irqs_disabled());
1929 BUG_ON(rq1 != rq2);
1930 raw_spin_lock(&rq1->lock);
1931 __acquire(rq2->lock); /* Fake it out ;) */
1932}
1933
1934/*
1935 * double_rq_unlock - safely unlock two runqueues
1936 *
1937 * Note this does not restore interrupts like task_rq_unlock,
1938 * you need to do so manually after calling.
1939 */
1940static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1941 __releases(rq1->lock)
1942 __releases(rq2->lock)
1943{
1944 BUG_ON(rq1 != rq2);
1945 raw_spin_unlock(&rq1->lock);
1946 __release(rq2->lock);
1947}
1948
1949#endif
1950
1951extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1952extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
6b55c965
SD
1953
1954#ifdef CONFIG_SCHED_DEBUG
9469eb01
PZ
1955extern bool sched_debug_enabled;
1956
029632fb
PZ
1957extern void print_cfs_stats(struct seq_file *m, int cpu);
1958extern void print_rt_stats(struct seq_file *m, int cpu);
acb32132 1959extern void print_dl_stats(struct seq_file *m, int cpu);
6b55c965
SD
1960extern void
1961print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
397f2378
SD
1962#ifdef CONFIG_NUMA_BALANCING
1963extern void
1964show_numa_stats(struct task_struct *p, struct seq_file *m);
1965extern void
1966print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1967 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1968#endif /* CONFIG_NUMA_BALANCING */
1969#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
1970
1971extern void init_cfs_rq(struct cfs_rq *cfs_rq);
07c54f7a
AV
1972extern void init_rt_rq(struct rt_rq *rt_rq);
1973extern void init_dl_rq(struct dl_rq *dl_rq);
029632fb 1974
1ee14e6c
BS
1975extern void cfs_bandwidth_usage_inc(void);
1976extern void cfs_bandwidth_usage_dec(void);
1c792db7 1977
3451d024 1978#ifdef CONFIG_NO_HZ_COMMON
1c792db7
SS
1979enum rq_nohz_flag_bits {
1980 NOHZ_TICK_STOPPED,
1981 NOHZ_BALANCE_KICK,
1982};
1983
1984#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
20a5c8cc
TG
1985
1986extern void nohz_balance_exit_idle(unsigned int cpu);
1987#else
1988static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1c792db7 1989#endif
73fbec60 1990
daec5798
LA
1991
1992#ifdef CONFIG_SMP
1993static inline
1994void __dl_update(struct dl_bw *dl_b, s64 bw)
1995{
1996 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
1997 int i;
1998
1999 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2000 "sched RCU must be held");
2001 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2002 struct rq *rq = cpu_rq(i);
2003
2004 rq->dl.extra_bw += bw;
2005 }
2006}
2007#else
2008static inline
2009void __dl_update(struct dl_bw *dl_b, s64 bw)
2010{
2011 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2012
2013 dl->extra_bw += bw;
2014}
2015#endif
2016
2017
73fbec60 2018#ifdef CONFIG_IRQ_TIME_ACCOUNTING
19d23dbf 2019struct irqtime {
25e2d8c1 2020 u64 total;
a499a5a1 2021 u64 tick_delta;
19d23dbf
FW
2022 u64 irq_start_time;
2023 struct u64_stats_sync sync;
2024};
73fbec60 2025
19d23dbf 2026DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec60 2027
25e2d8c1
FW
2028/*
2029 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2030 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2031 * and never move forward.
2032 */
73fbec60
FW
2033static inline u64 irq_time_read(int cpu)
2034{
19d23dbf
FW
2035 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2036 unsigned int seq;
2037 u64 total;
73fbec60
FW
2038
2039 do {
19d23dbf 2040 seq = __u64_stats_fetch_begin(&irqtime->sync);
25e2d8c1 2041 total = irqtime->total;
19d23dbf 2042 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
73fbec60 2043
19d23dbf 2044 return total;
73fbec60 2045}
73fbec60 2046#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
adaf9fcd
RW
2047
2048#ifdef CONFIG_CPU_FREQ
2049DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2050
2051/**
2052 * cpufreq_update_util - Take a note about CPU utilization changes.
12bde33d 2053 * @rq: Runqueue to carry out the update for.
58919e83 2054 * @flags: Update reason flags.
adaf9fcd 2055 *
58919e83
RW
2056 * This function is called by the scheduler on the CPU whose utilization is
2057 * being updated.
adaf9fcd
RW
2058 *
2059 * It can only be called from RCU-sched read-side critical sections.
adaf9fcd
RW
2060 *
2061 * The way cpufreq is currently arranged requires it to evaluate the CPU
2062 * performance state (frequency/voltage) on a regular basis to prevent it from
2063 * being stuck in a completely inadequate performance level for too long.
2064 * That is not guaranteed to happen if the updates are only triggered from CFS,
2065 * though, because they may not be coming in if RT or deadline tasks are active
2066 * all the time (or there are RT and DL tasks only).
2067 *
2068 * As a workaround for that issue, this function is called by the RT and DL
2069 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
2070 * but that really is a band-aid. Going forward it should be replaced with
2071 * solutions targeted more specifically at RT and DL tasks.
2072 */
12bde33d 2073static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
adaf9fcd 2074{
58919e83
RW
2075 struct update_util_data *data;
2076
674e7541
VK
2077 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2078 cpu_of(rq)));
58919e83 2079 if (data)
12bde33d
RW
2080 data->func(data, rq_clock(rq), flags);
2081}
adaf9fcd 2082#else
12bde33d 2083static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
adaf9fcd 2084#endif /* CONFIG_CPU_FREQ */
be53f58f 2085
9bdcb44e
RW
2086#ifdef arch_scale_freq_capacity
2087#ifndef arch_scale_freq_invariant
2088#define arch_scale_freq_invariant() (true)
2089#endif
2090#else /* arch_scale_freq_capacity */
2091#define arch_scale_freq_invariant() (false)
2092#endif