]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/sched.h
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / kernel / sched / sched.h
CommitLineData
029632fb
PZ
1
2#include <linux/sched.h>
cf4aebc2 3#include <linux/sched/sysctl.h>
105ab3d8 4#include <linux/sched/topology.h>
8bd75c77 5#include <linux/sched/rt.h>
e6017571 6#include <linux/sched/clock.h>
84f001e1 7#include <linux/sched/wake_q.h>
19d23dbf 8#include <linux/u64_stats_sync.h>
aab03e05 9#include <linux/sched/deadline.h>
a499a5a1 10#include <linux/kernel_stat.h>
3866e845 11#include <linux/binfmts.h>
029632fb
PZ
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/stop_machine.h>
b6366f04 15#include <linux/irq_work.h>
9f3660c2 16#include <linux/tick.h>
f809ca9a 17#include <linux/slab.h>
029632fb 18
391e43da 19#include "cpupri.h"
6bfd6d72 20#include "cpudeadline.h"
60fed789 21#include "cpuacct.h"
029632fb 22
9148a3a1
PZ
23#ifdef CONFIG_SCHED_DEBUG
24#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
25#else
26#define SCHED_WARN_ON(x) ((void)(x))
27#endif
28
45ceebf7 29struct rq;
442bf3aa 30struct cpuidle_state;
45ceebf7 31
da0c1e65
KT
32/* task_struct::on_rq states: */
33#define TASK_ON_RQ_QUEUED 1
cca26e80 34#define TASK_ON_RQ_MIGRATING 2
da0c1e65 35
029632fb
PZ
36extern __read_mostly int scheduler_running;
37
45ceebf7
PG
38extern unsigned long calc_load_update;
39extern atomic_long_t calc_load_tasks;
40
3289bdb4 41extern void calc_global_load_tick(struct rq *this_rq);
d60585c5 42extern long calc_load_fold_active(struct rq *this_rq, long adjust);
3289bdb4
PZ
43
44#ifdef CONFIG_SMP
cee1afce 45extern void cpu_load_update_active(struct rq *this_rq);
3289bdb4 46#else
cee1afce 47static inline void cpu_load_update_active(struct rq *this_rq) { }
3289bdb4 48#endif
45ceebf7 49
029632fb
PZ
50/*
51 * Helpers for converting nanosecond timing to jiffy resolution
52 */
53#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
54
cc1f4b1f
LZ
55/*
56 * Increase resolution of nice-level calculations for 64-bit architectures.
57 * The extra resolution improves shares distribution and load balancing of
58 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
59 * hierarchies, especially on larger systems. This is not a user-visible change
60 * and does not change the user-interface for setting shares/weights.
61 *
62 * We increase resolution only if we have enough bits to allow this increased
2159197d
PZ
63 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
64 * pretty high and the returns do not justify the increased costs.
65 *
66 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
67 * increase coverage and consistency always enable it on 64bit platforms.
cc1f4b1f 68 */
2159197d 69#ifdef CONFIG_64BIT
172895e6 70# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
6ecdd749
YD
71# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
72# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f 73#else
172895e6 74# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f
LZ
75# define scale_load(w) (w)
76# define scale_load_down(w) (w)
77#endif
78
6ecdd749 79/*
172895e6
YD
80 * Task weight (visible to users) and its load (invisible to users) have
81 * independent resolution, but they should be well calibrated. We use
82 * scale_load() and scale_load_down(w) to convert between them. The
83 * following must be true:
84 *
85 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
86 *
6ecdd749 87 */
172895e6 88#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
029632fb 89
332ac17e
DF
90/*
91 * Single value that decides SCHED_DEADLINE internal math precision.
92 * 10 -> just above 1us
93 * 9 -> just above 0.5us
94 */
95#define DL_SCALE (10)
96
029632fb
PZ
97/*
98 * These are the 'tuning knobs' of the scheduler:
029632fb 99 */
029632fb
PZ
100
101/*
102 * single value that denotes runtime == period, ie unlimited time.
103 */
104#define RUNTIME_INF ((u64)~0ULL)
105
20f9cd2a
HA
106static inline int idle_policy(int policy)
107{
108 return policy == SCHED_IDLE;
109}
d50dde5a
DF
110static inline int fair_policy(int policy)
111{
112 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
113}
114
029632fb
PZ
115static inline int rt_policy(int policy)
116{
d50dde5a 117 return policy == SCHED_FIFO || policy == SCHED_RR;
029632fb
PZ
118}
119
aab03e05
DF
120static inline int dl_policy(int policy)
121{
122 return policy == SCHED_DEADLINE;
123}
20f9cd2a
HA
124static inline bool valid_policy(int policy)
125{
126 return idle_policy(policy) || fair_policy(policy) ||
127 rt_policy(policy) || dl_policy(policy);
128}
aab03e05 129
029632fb
PZ
130static inline int task_has_rt_policy(struct task_struct *p)
131{
132 return rt_policy(p->policy);
133}
134
aab03e05
DF
135static inline int task_has_dl_policy(struct task_struct *p)
136{
137 return dl_policy(p->policy);
138}
139
2d3d891d
DF
140/*
141 * Tells if entity @a should preempt entity @b.
142 */
332ac17e
DF
143static inline bool
144dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
2d3d891d
DF
145{
146 return dl_time_before(a->deadline, b->deadline);
147}
148
029632fb
PZ
149/*
150 * This is the priority-queue data structure of the RT scheduling class:
151 */
152struct rt_prio_array {
153 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
154 struct list_head queue[MAX_RT_PRIO];
155};
156
157struct rt_bandwidth {
158 /* nests inside the rq lock: */
159 raw_spinlock_t rt_runtime_lock;
160 ktime_t rt_period;
161 u64 rt_runtime;
162 struct hrtimer rt_period_timer;
4cfafd30 163 unsigned int rt_period_active;
029632fb 164};
a5e7be3b
JL
165
166void __dl_clear_params(struct task_struct *p);
167
332ac17e
DF
168/*
169 * To keep the bandwidth of -deadline tasks and groups under control
170 * we need some place where:
171 * - store the maximum -deadline bandwidth of the system (the group);
172 * - cache the fraction of that bandwidth that is currently allocated.
173 *
174 * This is all done in the data structure below. It is similar to the
175 * one used for RT-throttling (rt_bandwidth), with the main difference
176 * that, since here we are only interested in admission control, we
177 * do not decrease any runtime while the group "executes", neither we
178 * need a timer to replenish it.
179 *
180 * With respect to SMP, the bandwidth is given on a per-CPU basis,
181 * meaning that:
182 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
183 * - dl_total_bw array contains, in the i-eth element, the currently
184 * allocated bandwidth on the i-eth CPU.
185 * Moreover, groups consume bandwidth on each CPU, while tasks only
186 * consume bandwidth on the CPU they're running on.
187 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
188 * that will be shown the next time the proc or cgroup controls will
189 * be red. It on its turn can be changed by writing on its own
190 * control.
191 */
192struct dl_bandwidth {
193 raw_spinlock_t dl_runtime_lock;
194 u64 dl_runtime;
195 u64 dl_period;
196};
197
198static inline int dl_bandwidth_enabled(void)
199{
1724813d 200 return sysctl_sched_rt_runtime >= 0;
332ac17e
DF
201}
202
203extern struct dl_bw *dl_bw_of(int i);
204
205struct dl_bw {
206 raw_spinlock_t lock;
207 u64 bw, total_bw;
208};
209
7f51412a
JL
210static inline
211void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
212{
213 dl_b->total_bw -= tsk_bw;
214}
215
216static inline
217void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
218{
219 dl_b->total_bw += tsk_bw;
220}
221
222static inline
223bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
224{
225 return dl_b->bw != -1 &&
226 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
227}
228
f2cb1360 229extern void init_dl_bw(struct dl_bw *dl_b);
029632fb
PZ
230
231#ifdef CONFIG_CGROUP_SCHED
232
233#include <linux/cgroup.h>
234
235struct cfs_rq;
236struct rt_rq;
237
35cf4e50 238extern struct list_head task_groups;
029632fb
PZ
239
240struct cfs_bandwidth {
241#ifdef CONFIG_CFS_BANDWIDTH
242 raw_spinlock_t lock;
243 ktime_t period;
244 u64 quota, runtime;
9c58c79a 245 s64 hierarchical_quota;
029632fb
PZ
246 u64 runtime_expires;
247
4cfafd30 248 int idle, period_active;
029632fb
PZ
249 struct hrtimer period_timer, slack_timer;
250 struct list_head throttled_cfs_rq;
251
252 /* statistics */
253 int nr_periods, nr_throttled;
254 u64 throttled_time;
255#endif
256};
257
258/* task group related information */
259struct task_group {
260 struct cgroup_subsys_state css;
261
262#ifdef CONFIG_FAIR_GROUP_SCHED
263 /* schedulable entities of this group on each cpu */
264 struct sched_entity **se;
265 /* runqueue "owned" by this group on each cpu */
266 struct cfs_rq **cfs_rq;
267 unsigned long shares;
268
fa6bddeb 269#ifdef CONFIG_SMP
b0367629
WL
270 /*
271 * load_avg can be heavily contended at clock tick time, so put
272 * it in its own cacheline separated from the fields above which
273 * will also be accessed at each tick.
274 */
275 atomic_long_t load_avg ____cacheline_aligned;
029632fb 276#endif
fa6bddeb 277#endif
029632fb
PZ
278
279#ifdef CONFIG_RT_GROUP_SCHED
280 struct sched_rt_entity **rt_se;
281 struct rt_rq **rt_rq;
282
283 struct rt_bandwidth rt_bandwidth;
284#endif
285
286 struct rcu_head rcu;
287 struct list_head list;
288
289 struct task_group *parent;
290 struct list_head siblings;
291 struct list_head children;
292
293#ifdef CONFIG_SCHED_AUTOGROUP
294 struct autogroup *autogroup;
295#endif
296
297 struct cfs_bandwidth cfs_bandwidth;
298};
299
300#ifdef CONFIG_FAIR_GROUP_SCHED
301#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
302
303/*
304 * A weight of 0 or 1 can cause arithmetics problems.
305 * A weight of a cfs_rq is the sum of weights of which entities
306 * are queued on this cfs_rq, so a weight of a entity should not be
307 * too large, so as the shares value of a task group.
308 * (The default weight is 1024 - so there's no practical
309 * limitation from this.)
310 */
311#define MIN_SHARES (1UL << 1)
312#define MAX_SHARES (1UL << 18)
313#endif
314
029632fb
PZ
315typedef int (*tg_visitor)(struct task_group *, void *);
316
317extern int walk_tg_tree_from(struct task_group *from,
318 tg_visitor down, tg_visitor up, void *data);
319
320/*
321 * Iterate the full tree, calling @down when first entering a node and @up when
322 * leaving it for the final time.
323 *
324 * Caller must hold rcu_lock or sufficient equivalent.
325 */
326static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
327{
328 return walk_tg_tree_from(&root_task_group, down, up, data);
329}
330
331extern int tg_nop(struct task_group *tg, void *data);
332
333extern void free_fair_sched_group(struct task_group *tg);
334extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
8663e24d 335extern void online_fair_sched_group(struct task_group *tg);
6fe1f348 336extern void unregister_fair_sched_group(struct task_group *tg);
029632fb
PZ
337extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
338 struct sched_entity *se, int cpu,
339 struct sched_entity *parent);
340extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
341
342extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
77a4d1a1 343extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
344extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
345
346extern void free_rt_sched_group(struct task_group *tg);
347extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
348extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
349 struct sched_rt_entity *rt_se, int cpu,
350 struct sched_rt_entity *parent);
351
25cc7da7
LZ
352extern struct task_group *sched_create_group(struct task_group *parent);
353extern void sched_online_group(struct task_group *tg,
354 struct task_group *parent);
355extern void sched_destroy_group(struct task_group *tg);
356extern void sched_offline_group(struct task_group *tg);
357
358extern void sched_move_task(struct task_struct *tsk);
359
360#ifdef CONFIG_FAIR_GROUP_SCHED
361extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
ad936d86
BP
362
363#ifdef CONFIG_SMP
364extern void set_task_rq_fair(struct sched_entity *se,
365 struct cfs_rq *prev, struct cfs_rq *next);
366#else /* !CONFIG_SMP */
367static inline void set_task_rq_fair(struct sched_entity *se,
368 struct cfs_rq *prev, struct cfs_rq *next) { }
369#endif /* CONFIG_SMP */
370#endif /* CONFIG_FAIR_GROUP_SCHED */
25cc7da7 371
029632fb
PZ
372#else /* CONFIG_CGROUP_SCHED */
373
374struct cfs_bandwidth { };
375
376#endif /* CONFIG_CGROUP_SCHED */
377
378/* CFS-related fields in a runqueue */
379struct cfs_rq {
380 struct load_weight load;
c82513e5 381 unsigned int nr_running, h_nr_running;
029632fb
PZ
382
383 u64 exec_clock;
384 u64 min_vruntime;
385#ifndef CONFIG_64BIT
386 u64 min_vruntime_copy;
387#endif
388
389 struct rb_root tasks_timeline;
390 struct rb_node *rb_leftmost;
391
029632fb
PZ
392 /*
393 * 'curr' points to currently running entity on this cfs_rq.
394 * It is set to NULL otherwise (i.e when none are currently running).
395 */
396 struct sched_entity *curr, *next, *last, *skip;
397
398#ifdef CONFIG_SCHED_DEBUG
399 unsigned int nr_spread_over;
400#endif
401
2dac754e
PT
402#ifdef CONFIG_SMP
403 /*
9d89c257 404 * CFS load tracking
2dac754e 405 */
9d89c257 406 struct sched_avg avg;
13962234
YD
407 u64 runnable_load_sum;
408 unsigned long runnable_load_avg;
c566e8e9 409#ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257 410 unsigned long tg_load_avg_contrib;
09a43ace 411 unsigned long propagate_avg;
9d89c257
YD
412#endif
413 atomic_long_t removed_load_avg, removed_util_avg;
414#ifndef CONFIG_64BIT
415 u64 load_last_update_time_copy;
416#endif
82958366 417
9d89c257 418#ifdef CONFIG_FAIR_GROUP_SCHED
82958366
PT
419 /*
420 * h_load = weight * f(tg)
421 *
422 * Where f(tg) is the recursive weight fraction assigned to
423 * this group.
424 */
425 unsigned long h_load;
68520796
VD
426 u64 last_h_load_update;
427 struct sched_entity *h_load_next;
428#endif /* CONFIG_FAIR_GROUP_SCHED */
82958366
PT
429#endif /* CONFIG_SMP */
430
029632fb
PZ
431#ifdef CONFIG_FAIR_GROUP_SCHED
432 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
433
434 /*
435 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
436 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
437 * (like users, containers etc.)
438 *
439 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
440 * list is used during load balance.
441 */
442 int on_list;
443 struct list_head leaf_cfs_rq_list;
444 struct task_group *tg; /* group that "owns" this runqueue */
445
029632fb
PZ
446#ifdef CONFIG_CFS_BANDWIDTH
447 int runtime_enabled;
448 u64 runtime_expires;
449 s64 runtime_remaining;
450
f1b17280
PT
451 u64 throttled_clock, throttled_clock_task;
452 u64 throttled_clock_task_time;
55e16d30 453 int throttled, throttle_count;
029632fb
PZ
454 struct list_head throttled_list;
455#endif /* CONFIG_CFS_BANDWIDTH */
456#endif /* CONFIG_FAIR_GROUP_SCHED */
457};
458
459static inline int rt_bandwidth_enabled(void)
460{
461 return sysctl_sched_rt_runtime >= 0;
462}
463
b6366f04
SR
464/* RT IPI pull logic requires IRQ_WORK */
465#ifdef CONFIG_IRQ_WORK
466# define HAVE_RT_PUSH_IPI
467#endif
468
029632fb
PZ
469/* Real-Time classes' related field in a runqueue: */
470struct rt_rq {
471 struct rt_prio_array active;
c82513e5 472 unsigned int rt_nr_running;
01d36d0a 473 unsigned int rr_nr_running;
029632fb
PZ
474#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
475 struct {
476 int curr; /* highest queued rt task prio */
477#ifdef CONFIG_SMP
478 int next; /* next highest */
479#endif
480 } highest_prio;
481#endif
482#ifdef CONFIG_SMP
483 unsigned long rt_nr_migratory;
484 unsigned long rt_nr_total;
485 int overloaded;
486 struct plist_head pushable_tasks;
b6366f04
SR
487#ifdef HAVE_RT_PUSH_IPI
488 int push_flags;
489 int push_cpu;
490 struct irq_work push_work;
491 raw_spinlock_t push_lock;
029632fb 492#endif
b6366f04 493#endif /* CONFIG_SMP */
f4ebcbc0
KT
494 int rt_queued;
495
029632fb
PZ
496 int rt_throttled;
497 u64 rt_time;
498 u64 rt_runtime;
499 /* Nests inside the rq lock: */
500 raw_spinlock_t rt_runtime_lock;
501
502#ifdef CONFIG_RT_GROUP_SCHED
503 unsigned long rt_nr_boosted;
504
505 struct rq *rq;
029632fb
PZ
506 struct task_group *tg;
507#endif
508};
509
aab03e05
DF
510/* Deadline class' related fields in a runqueue */
511struct dl_rq {
512 /* runqueue is an rbtree, ordered by deadline */
513 struct rb_root rb_root;
514 struct rb_node *rb_leftmost;
515
516 unsigned long dl_nr_running;
1baca4ce
JL
517
518#ifdef CONFIG_SMP
519 /*
520 * Deadline values of the currently executing and the
521 * earliest ready task on this rq. Caching these facilitates
522 * the decision wether or not a ready but not running task
523 * should migrate somewhere else.
524 */
525 struct {
526 u64 curr;
527 u64 next;
528 } earliest_dl;
529
530 unsigned long dl_nr_migratory;
1baca4ce
JL
531 int overloaded;
532
533 /*
534 * Tasks on this rq that can be pushed away. They are kept in
535 * an rb-tree, ordered by tasks' deadlines, with caching
536 * of the leftmost (earliest deadline) element.
537 */
538 struct rb_root pushable_dl_tasks_root;
539 struct rb_node *pushable_dl_tasks_leftmost;
332ac17e
DF
540#else
541 struct dl_bw dl_bw;
1baca4ce 542#endif
aab03e05
DF
543};
544
029632fb
PZ
545#ifdef CONFIG_SMP
546
afe06efd
TC
547static inline bool sched_asym_prefer(int a, int b)
548{
549 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
550}
551
029632fb
PZ
552/*
553 * We add the notion of a root-domain which will be used to define per-domain
554 * variables. Each exclusive cpuset essentially defines an island domain by
555 * fully partitioning the member cpus from any other cpuset. Whenever a new
556 * exclusive cpuset is created, we also create and attach a new root-domain
557 * object.
558 *
559 */
560struct root_domain {
561 atomic_t refcount;
562 atomic_t rto_count;
563 struct rcu_head rcu;
564 cpumask_var_t span;
565 cpumask_var_t online;
566
4486edd1
TC
567 /* Indicate more than one runnable task for any CPU */
568 bool overload;
569
1baca4ce
JL
570 /*
571 * The bit corresponding to a CPU gets set here if such CPU has more
572 * than one runnable -deadline task (as it is below for RT tasks).
573 */
574 cpumask_var_t dlo_mask;
575 atomic_t dlo_count;
332ac17e 576 struct dl_bw dl_bw;
6bfd6d72 577 struct cpudl cpudl;
1baca4ce 578
029632fb
PZ
579 /*
580 * The "RT overload" flag: it gets set if a CPU has more than
581 * one runnable RT task.
582 */
583 cpumask_var_t rto_mask;
584 struct cpupri cpupri;
cd92bfd3
DE
585
586 unsigned long max_cpu_capacity;
029632fb
PZ
587};
588
589extern struct root_domain def_root_domain;
f2cb1360
IM
590extern struct mutex sched_domains_mutex;
591extern cpumask_var_t fallback_doms;
592extern cpumask_var_t sched_domains_tmpmask;
593
594extern void init_defrootdomain(void);
595extern int init_sched_domains(const struct cpumask *cpu_map);
596extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
029632fb
PZ
597
598#endif /* CONFIG_SMP */
599
600/*
601 * This is the main, per-CPU runqueue data structure.
602 *
603 * Locking rule: those places that want to lock multiple runqueues
604 * (such as the load balancing or the thread migration code), lock
605 * acquire operations must be ordered by ascending &runqueue.
606 */
607struct rq {
608 /* runqueue lock: */
609 raw_spinlock_t lock;
610
611 /*
612 * nr_running and cpu_load should be in the same cacheline because
613 * remote CPUs use both these fields when doing load calculation.
614 */
c82513e5 615 unsigned int nr_running;
0ec8aa00
PZ
616#ifdef CONFIG_NUMA_BALANCING
617 unsigned int nr_numa_running;
618 unsigned int nr_preferred_running;
619#endif
029632fb
PZ
620 #define CPU_LOAD_IDX_MAX 5
621 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
3451d024 622#ifdef CONFIG_NO_HZ_COMMON
9fd81dd5
FW
623#ifdef CONFIG_SMP
624 unsigned long last_load_update_tick;
625#endif /* CONFIG_SMP */
1c792db7 626 unsigned long nohz_flags;
9fd81dd5 627#endif /* CONFIG_NO_HZ_COMMON */
265f22a9
FW
628#ifdef CONFIG_NO_HZ_FULL
629 unsigned long last_sched_tick;
029632fb 630#endif
029632fb
PZ
631 /* capture load from *all* tasks on this cpu: */
632 struct load_weight load;
633 unsigned long nr_load_updates;
634 u64 nr_switches;
635
636 struct cfs_rq cfs;
637 struct rt_rq rt;
aab03e05 638 struct dl_rq dl;
029632fb
PZ
639
640#ifdef CONFIG_FAIR_GROUP_SCHED
641 /* list of leaf cfs_rq on this cpu: */
642 struct list_head leaf_cfs_rq_list;
9c2791f9 643 struct list_head *tmp_alone_branch;
a35b6466
PZ
644#endif /* CONFIG_FAIR_GROUP_SCHED */
645
029632fb
PZ
646 /*
647 * This is part of a global counter where only the total sum
648 * over all CPUs matters. A task can increase this counter on
649 * one CPU and if it got migrated afterwards it may decrease
650 * it on another CPU. Always updated under the runqueue lock:
651 */
652 unsigned long nr_uninterruptible;
653
654 struct task_struct *curr, *idle, *stop;
655 unsigned long next_balance;
656 struct mm_struct *prev_mm;
657
cb42c9a3 658 unsigned int clock_update_flags;
029632fb
PZ
659 u64 clock;
660 u64 clock_task;
661
662 atomic_t nr_iowait;
663
664#ifdef CONFIG_SMP
665 struct root_domain *rd;
666 struct sched_domain *sd;
667
ced549fa 668 unsigned long cpu_capacity;
ca6d75e6 669 unsigned long cpu_capacity_orig;
029632fb 670
e3fca9e7
PZ
671 struct callback_head *balance_callback;
672
029632fb
PZ
673 unsigned char idle_balance;
674 /* For active balancing */
029632fb
PZ
675 int active_balance;
676 int push_cpu;
677 struct cpu_stop_work active_balance_work;
678 /* cpu of this runqueue: */
679 int cpu;
680 int online;
681
367456c7
PZ
682 struct list_head cfs_tasks;
683
029632fb
PZ
684 u64 rt_avg;
685 u64 age_stamp;
686 u64 idle_stamp;
687 u64 avg_idle;
9bd721c5
JL
688
689 /* This is used to determine avg_idle's max value */
690 u64 max_idle_balance_cost;
029632fb
PZ
691#endif
692
693#ifdef CONFIG_IRQ_TIME_ACCOUNTING
694 u64 prev_irq_time;
695#endif
696#ifdef CONFIG_PARAVIRT
697 u64 prev_steal_time;
698#endif
699#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
700 u64 prev_steal_time_rq;
701#endif
702
703 /* calc_load related fields */
704 unsigned long calc_load_update;
705 long calc_load_active;
706
707#ifdef CONFIG_SCHED_HRTICK
708#ifdef CONFIG_SMP
709 int hrtick_csd_pending;
710 struct call_single_data hrtick_csd;
711#endif
712 struct hrtimer hrtick_timer;
713#endif
714
715#ifdef CONFIG_SCHEDSTATS
716 /* latency stats */
717 struct sched_info rq_sched_info;
718 unsigned long long rq_cpu_time;
719 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
720
721 /* sys_sched_yield() stats */
722 unsigned int yld_count;
723
724 /* schedule() stats */
029632fb
PZ
725 unsigned int sched_count;
726 unsigned int sched_goidle;
727
728 /* try_to_wake_up() stats */
729 unsigned int ttwu_count;
730 unsigned int ttwu_local;
731#endif
732
733#ifdef CONFIG_SMP
734 struct llist_head wake_list;
735#endif
442bf3aa
DL
736
737#ifdef CONFIG_CPU_IDLE
738 /* Must be inspected within a rcu lock section */
739 struct cpuidle_state *idle_state;
740#endif
029632fb
PZ
741};
742
743static inline int cpu_of(struct rq *rq)
744{
745#ifdef CONFIG_SMP
746 return rq->cpu;
747#else
748 return 0;
749#endif
750}
751
1b568f0a
PZ
752
753#ifdef CONFIG_SCHED_SMT
754
755extern struct static_key_false sched_smt_present;
756
757extern void __update_idle_core(struct rq *rq);
758
759static inline void update_idle_core(struct rq *rq)
760{
761 if (static_branch_unlikely(&sched_smt_present))
762 __update_idle_core(rq);
763}
764
765#else
766static inline void update_idle_core(struct rq *rq) { }
767#endif
768
8b06c55b 769DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
029632fb 770
518cd623 771#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
4a32fea9 772#define this_rq() this_cpu_ptr(&runqueues)
518cd623
PZ
773#define task_rq(p) cpu_rq(task_cpu(p))
774#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
4a32fea9 775#define raw_rq() raw_cpu_ptr(&runqueues)
518cd623 776
cebde6d6
PZ
777static inline u64 __rq_clock_broken(struct rq *rq)
778{
316c1608 779 return READ_ONCE(rq->clock);
cebde6d6
PZ
780}
781
cb42c9a3
MF
782/*
783 * rq::clock_update_flags bits
784 *
785 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
786 * call to __schedule(). This is an optimisation to avoid
787 * neighbouring rq clock updates.
788 *
789 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
790 * in effect and calls to update_rq_clock() are being ignored.
791 *
792 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
793 * made to update_rq_clock() since the last time rq::lock was pinned.
794 *
795 * If inside of __schedule(), clock_update_flags will have been
796 * shifted left (a left shift is a cheap operation for the fast path
797 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
798 *
799 * if (rq-clock_update_flags >= RQCF_UPDATED)
800 *
801 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
802 * one position though, because the next rq_unpin_lock() will shift it
803 * back.
804 */
805#define RQCF_REQ_SKIP 0x01
806#define RQCF_ACT_SKIP 0x02
807#define RQCF_UPDATED 0x04
808
809static inline void assert_clock_updated(struct rq *rq)
810{
811 /*
812 * The only reason for not seeing a clock update since the
813 * last rq_pin_lock() is if we're currently skipping updates.
814 */
815 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
816}
817
78becc27
FW
818static inline u64 rq_clock(struct rq *rq)
819{
cebde6d6 820 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
821 assert_clock_updated(rq);
822
78becc27
FW
823 return rq->clock;
824}
825
826static inline u64 rq_clock_task(struct rq *rq)
827{
cebde6d6 828 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
829 assert_clock_updated(rq);
830
78becc27
FW
831 return rq->clock_task;
832}
833
9edfbfed
PZ
834static inline void rq_clock_skip_update(struct rq *rq, bool skip)
835{
836 lockdep_assert_held(&rq->lock);
837 if (skip)
cb42c9a3 838 rq->clock_update_flags |= RQCF_REQ_SKIP;
9edfbfed 839 else
cb42c9a3 840 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
9edfbfed
PZ
841}
842
d8ac8971
MF
843struct rq_flags {
844 unsigned long flags;
845 struct pin_cookie cookie;
cb42c9a3
MF
846#ifdef CONFIG_SCHED_DEBUG
847 /*
848 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
849 * current pin context is stashed here in case it needs to be
850 * restored in rq_repin_lock().
851 */
852 unsigned int clock_update_flags;
853#endif
d8ac8971
MF
854};
855
856static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
857{
858 rf->cookie = lockdep_pin_lock(&rq->lock);
cb42c9a3
MF
859
860#ifdef CONFIG_SCHED_DEBUG
861 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
862 rf->clock_update_flags = 0;
863#endif
d8ac8971
MF
864}
865
866static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
867{
cb42c9a3
MF
868#ifdef CONFIG_SCHED_DEBUG
869 if (rq->clock_update_flags > RQCF_ACT_SKIP)
870 rf->clock_update_flags = RQCF_UPDATED;
871#endif
872
d8ac8971
MF
873 lockdep_unpin_lock(&rq->lock, rf->cookie);
874}
875
876static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
877{
878 lockdep_repin_lock(&rq->lock, rf->cookie);
cb42c9a3
MF
879
880#ifdef CONFIG_SCHED_DEBUG
881 /*
882 * Restore the value we stashed in @rf for this pin context.
883 */
884 rq->clock_update_flags |= rf->clock_update_flags;
885#endif
d8ac8971
MF
886}
887
9942f79b 888#ifdef CONFIG_NUMA
e3fe70b1
RR
889enum numa_topology_type {
890 NUMA_DIRECT,
891 NUMA_GLUELESS_MESH,
892 NUMA_BACKPLANE,
893};
894extern enum numa_topology_type sched_numa_topology_type;
9942f79b
RR
895extern int sched_max_numa_distance;
896extern bool find_numa_distance(int distance);
897#endif
898
f2cb1360
IM
899#ifdef CONFIG_NUMA
900extern void sched_init_numa(void);
901extern void sched_domains_numa_masks_set(unsigned int cpu);
902extern void sched_domains_numa_masks_clear(unsigned int cpu);
903#else
904static inline void sched_init_numa(void) { }
905static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
906static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
907#endif
908
f809ca9a 909#ifdef CONFIG_NUMA_BALANCING
44dba3d5
IM
910/* The regions in numa_faults array from task_struct */
911enum numa_faults_stats {
912 NUMA_MEM = 0,
913 NUMA_CPU,
914 NUMA_MEMBUF,
915 NUMA_CPUBUF
916};
0ec8aa00 917extern void sched_setnuma(struct task_struct *p, int node);
e6628d5b 918extern int migrate_task_to(struct task_struct *p, int cpu);
ac66f547 919extern int migrate_swap(struct task_struct *, struct task_struct *);
f809ca9a
MG
920#endif /* CONFIG_NUMA_BALANCING */
921
518cd623
PZ
922#ifdef CONFIG_SMP
923
e3fca9e7
PZ
924static inline void
925queue_balance_callback(struct rq *rq,
926 struct callback_head *head,
927 void (*func)(struct rq *rq))
928{
929 lockdep_assert_held(&rq->lock);
930
931 if (unlikely(head->next))
932 return;
933
934 head->func = (void (*)(struct callback_head *))func;
935 head->next = rq->balance_callback;
936 rq->balance_callback = head;
937}
938
e3baac47
PZ
939extern void sched_ttwu_pending(void);
940
029632fb
PZ
941#define rcu_dereference_check_sched_domain(p) \
942 rcu_dereference_check((p), \
943 lockdep_is_held(&sched_domains_mutex))
944
945/*
946 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
947 * See detach_destroy_domains: synchronize_sched for details.
948 *
949 * The domain tree of any CPU may only be accessed from within
950 * preempt-disabled sections.
951 */
952#define for_each_domain(cpu, __sd) \
518cd623
PZ
953 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
954 __sd; __sd = __sd->parent)
029632fb 955
77e81365
SS
956#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
957
518cd623
PZ
958/**
959 * highest_flag_domain - Return highest sched_domain containing flag.
960 * @cpu: The cpu whose highest level of sched domain is to
961 * be returned.
962 * @flag: The flag to check for the highest sched_domain
963 * for the given cpu.
964 *
965 * Returns the highest sched_domain of a cpu which contains the given flag.
966 */
967static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
968{
969 struct sched_domain *sd, *hsd = NULL;
970
971 for_each_domain(cpu, sd) {
972 if (!(sd->flags & flag))
973 break;
974 hsd = sd;
975 }
976
977 return hsd;
978}
979
fb13c7ee
MG
980static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
981{
982 struct sched_domain *sd;
983
984 for_each_domain(cpu, sd) {
985 if (sd->flags & flag)
986 break;
987 }
988
989 return sd;
990}
991
518cd623 992DECLARE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 993DECLARE_PER_CPU(int, sd_llc_size);
518cd623 994DECLARE_PER_CPU(int, sd_llc_id);
0e369d75 995DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
fb13c7ee 996DECLARE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50 997DECLARE_PER_CPU(struct sched_domain *, sd_asym);
518cd623 998
63b2ca30 999struct sched_group_capacity {
5e6521ea
LZ
1000 atomic_t ref;
1001 /*
172895e6 1002 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
63b2ca30 1003 * for a single CPU.
5e6521ea 1004 */
bf475ce0
MR
1005 unsigned long capacity;
1006 unsigned long min_capacity; /* Min per-CPU capacity in group */
5e6521ea 1007 unsigned long next_update;
63b2ca30 1008 int imbalance; /* XXX unrelated to capacity but shared group state */
5e6521ea
LZ
1009
1010 unsigned long cpumask[0]; /* iteration mask */
1011};
1012
1013struct sched_group {
1014 struct sched_group *next; /* Must be a circular list */
1015 atomic_t ref;
1016
1017 unsigned int group_weight;
63b2ca30 1018 struct sched_group_capacity *sgc;
afe06efd 1019 int asym_prefer_cpu; /* cpu of highest priority in group */
5e6521ea
LZ
1020
1021 /*
1022 * The CPUs this group covers.
1023 *
1024 * NOTE: this field is variable length. (Allocated dynamically
1025 * by attaching extra space to the end of the structure,
1026 * depending on how many CPUs the kernel has booted up with)
1027 */
1028 unsigned long cpumask[0];
1029};
1030
1031static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
1032{
1033 return to_cpumask(sg->cpumask);
1034}
1035
1036/*
1037 * cpumask masking which cpus in the group are allowed to iterate up the domain
1038 * tree.
1039 */
1040static inline struct cpumask *sched_group_mask(struct sched_group *sg)
1041{
63b2ca30 1042 return to_cpumask(sg->sgc->cpumask);
5e6521ea
LZ
1043}
1044
1045/**
1046 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
1047 * @group: The group whose first cpu is to be returned.
1048 */
1049static inline unsigned int group_first_cpu(struct sched_group *group)
1050{
1051 return cpumask_first(sched_group_cpus(group));
1052}
1053
c1174876
PZ
1054extern int group_balance_cpu(struct sched_group *sg);
1055
3866e845
SRRH
1056#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1057void register_sched_domain_sysctl(void);
1058void unregister_sched_domain_sysctl(void);
1059#else
1060static inline void register_sched_domain_sysctl(void)
1061{
1062}
1063static inline void unregister_sched_domain_sysctl(void)
1064{
1065}
1066#endif
1067
e3baac47
PZ
1068#else
1069
1070static inline void sched_ttwu_pending(void) { }
1071
518cd623 1072#endif /* CONFIG_SMP */
029632fb 1073
391e43da 1074#include "stats.h"
1051408f 1075#include "autogroup.h"
029632fb
PZ
1076
1077#ifdef CONFIG_CGROUP_SCHED
1078
1079/*
1080 * Return the group to which this tasks belongs.
1081 *
8af01f56
TH
1082 * We cannot use task_css() and friends because the cgroup subsystem
1083 * changes that value before the cgroup_subsys::attach() method is called,
1084 * therefore we cannot pin it and might observe the wrong value.
8323f26c
PZ
1085 *
1086 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1087 * core changes this before calling sched_move_task().
1088 *
1089 * Instead we use a 'copy' which is updated from sched_move_task() while
1090 * holding both task_struct::pi_lock and rq::lock.
029632fb
PZ
1091 */
1092static inline struct task_group *task_group(struct task_struct *p)
1093{
8323f26c 1094 return p->sched_task_group;
029632fb
PZ
1095}
1096
1097/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1098static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1099{
1100#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1101 struct task_group *tg = task_group(p);
1102#endif
1103
1104#ifdef CONFIG_FAIR_GROUP_SCHED
ad936d86 1105 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
029632fb
PZ
1106 p->se.cfs_rq = tg->cfs_rq[cpu];
1107 p->se.parent = tg->se[cpu];
1108#endif
1109
1110#ifdef CONFIG_RT_GROUP_SCHED
1111 p->rt.rt_rq = tg->rt_rq[cpu];
1112 p->rt.parent = tg->rt_se[cpu];
1113#endif
1114}
1115
1116#else /* CONFIG_CGROUP_SCHED */
1117
1118static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1119static inline struct task_group *task_group(struct task_struct *p)
1120{
1121 return NULL;
1122}
1123
1124#endif /* CONFIG_CGROUP_SCHED */
1125
1126static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1127{
1128 set_task_rq(p, cpu);
1129#ifdef CONFIG_SMP
1130 /*
1131 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1132 * successfuly executed on another CPU. We must ensure that updates of
1133 * per-task data have been completed by this moment.
1134 */
1135 smp_wmb();
c65eacbe
AL
1136#ifdef CONFIG_THREAD_INFO_IN_TASK
1137 p->cpu = cpu;
1138#else
029632fb 1139 task_thread_info(p)->cpu = cpu;
c65eacbe 1140#endif
ac66f547 1141 p->wake_cpu = cpu;
029632fb
PZ
1142#endif
1143}
1144
1145/*
1146 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1147 */
1148#ifdef CONFIG_SCHED_DEBUG
c5905afb 1149# include <linux/static_key.h>
029632fb
PZ
1150# define const_debug __read_mostly
1151#else
1152# define const_debug const
1153#endif
1154
1155extern const_debug unsigned int sysctl_sched_features;
1156
1157#define SCHED_FEAT(name, enabled) \
1158 __SCHED_FEAT_##name ,
1159
1160enum {
391e43da 1161#include "features.h"
f8b6d1cc 1162 __SCHED_FEAT_NR,
029632fb
PZ
1163};
1164
1165#undef SCHED_FEAT
1166
f8b6d1cc 1167#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
f8b6d1cc 1168#define SCHED_FEAT(name, enabled) \
c5905afb 1169static __always_inline bool static_branch_##name(struct static_key *key) \
f8b6d1cc 1170{ \
6e76ea8a 1171 return static_key_##enabled(key); \
f8b6d1cc
PZ
1172}
1173
1174#include "features.h"
1175
1176#undef SCHED_FEAT
1177
c5905afb 1178extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
f8b6d1cc
PZ
1179#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1180#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
029632fb 1181#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
f8b6d1cc 1182#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb 1183
2a595721 1184extern struct static_key_false sched_numa_balancing;
cb251765 1185extern struct static_key_false sched_schedstats;
cbee9f88 1186
029632fb
PZ
1187static inline u64 global_rt_period(void)
1188{
1189 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1190}
1191
1192static inline u64 global_rt_runtime(void)
1193{
1194 if (sysctl_sched_rt_runtime < 0)
1195 return RUNTIME_INF;
1196
1197 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1198}
1199
029632fb
PZ
1200static inline int task_current(struct rq *rq, struct task_struct *p)
1201{
1202 return rq->curr == p;
1203}
1204
1205static inline int task_running(struct rq *rq, struct task_struct *p)
1206{
1207#ifdef CONFIG_SMP
1208 return p->on_cpu;
1209#else
1210 return task_current(rq, p);
1211#endif
1212}
1213
da0c1e65
KT
1214static inline int task_on_rq_queued(struct task_struct *p)
1215{
1216 return p->on_rq == TASK_ON_RQ_QUEUED;
1217}
029632fb 1218
cca26e80
KT
1219static inline int task_on_rq_migrating(struct task_struct *p)
1220{
1221 return p->on_rq == TASK_ON_RQ_MIGRATING;
1222}
1223
029632fb
PZ
1224#ifndef prepare_arch_switch
1225# define prepare_arch_switch(next) do { } while (0)
1226#endif
01f23e16
CM
1227#ifndef finish_arch_post_lock_switch
1228# define finish_arch_post_lock_switch() do { } while (0)
1229#endif
029632fb 1230
029632fb
PZ
1231static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1232{
1233#ifdef CONFIG_SMP
1234 /*
1235 * We can optimise this out completely for !SMP, because the
1236 * SMP rebalancing from interrupt is the only thing that cares
1237 * here.
1238 */
1239 next->on_cpu = 1;
1240#endif
1241}
1242
1243static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1244{
1245#ifdef CONFIG_SMP
1246 /*
1247 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1248 * We must ensure this doesn't happen until the switch is completely
1249 * finished.
95913d97 1250 *
b75a2253
PZ
1251 * In particular, the load of prev->state in finish_task_switch() must
1252 * happen before this.
1253 *
1f03e8d2 1254 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
029632fb 1255 */
95913d97 1256 smp_store_release(&prev->on_cpu, 0);
029632fb
PZ
1257#endif
1258#ifdef CONFIG_DEBUG_SPINLOCK
1259 /* this is a valid case when another task releases the spinlock */
1260 rq->lock.owner = current;
1261#endif
1262 /*
1263 * If we are tracking spinlock dependencies then we have to
1264 * fix up the runqueue lock - which gets 'carried over' from
1265 * prev into current:
1266 */
1267 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1268
1269 raw_spin_unlock_irq(&rq->lock);
1270}
1271
b13095f0
LZ
1272/*
1273 * wake flags
1274 */
1275#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1276#define WF_FORK 0x02 /* child wakeup after fork */
1277#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1278
029632fb
PZ
1279/*
1280 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1281 * of tasks with abnormal "nice" values across CPUs the contribution that
1282 * each task makes to its run queue's load is weighted according to its
1283 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1284 * scaled version of the new time slice allocation that they receive on time
1285 * slice expiry etc.
1286 */
1287
1288#define WEIGHT_IDLEPRIO 3
1289#define WMULT_IDLEPRIO 1431655765
1290
ed82b8a1
AK
1291extern const int sched_prio_to_weight[40];
1292extern const u32 sched_prio_to_wmult[40];
029632fb 1293
ff77e468
PZ
1294/*
1295 * {de,en}queue flags:
1296 *
1297 * DEQUEUE_SLEEP - task is no longer runnable
1298 * ENQUEUE_WAKEUP - task just became runnable
1299 *
1300 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1301 * are in a known state which allows modification. Such pairs
1302 * should preserve as much state as possible.
1303 *
1304 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1305 * in the runqueue.
1306 *
1307 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1308 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
59efa0ba 1309 * ENQUEUE_MIGRATED - the task was migrated during wakeup
ff77e468
PZ
1310 *
1311 */
1312
1313#define DEQUEUE_SLEEP 0x01
1314#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
1315#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
1316
1de64443 1317#define ENQUEUE_WAKEUP 0x01
ff77e468
PZ
1318#define ENQUEUE_RESTORE 0x02
1319#define ENQUEUE_MOVE 0x04
1320
1321#define ENQUEUE_HEAD 0x08
1322#define ENQUEUE_REPLENISH 0x10
c82ba9fa 1323#ifdef CONFIG_SMP
59efa0ba 1324#define ENQUEUE_MIGRATED 0x20
c82ba9fa 1325#else
59efa0ba 1326#define ENQUEUE_MIGRATED 0x00
c82ba9fa 1327#endif
c82ba9fa 1328
37e117c0
PZ
1329#define RETRY_TASK ((void *)-1UL)
1330
c82ba9fa
LZ
1331struct sched_class {
1332 const struct sched_class *next;
1333
1334 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1335 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1336 void (*yield_task) (struct rq *rq);
1337 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1338
1339 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1340
606dba2e
PZ
1341 /*
1342 * It is the responsibility of the pick_next_task() method that will
1343 * return the next task to call put_prev_task() on the @prev task or
1344 * something equivalent.
37e117c0
PZ
1345 *
1346 * May return RETRY_TASK when it finds a higher prio class has runnable
1347 * tasks.
606dba2e
PZ
1348 */
1349 struct task_struct * (*pick_next_task) (struct rq *rq,
e7904a28 1350 struct task_struct *prev,
d8ac8971 1351 struct rq_flags *rf);
c82ba9fa
LZ
1352 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1353
1354#ifdef CONFIG_SMP
ac66f547 1355 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
5a4fd036 1356 void (*migrate_task_rq)(struct task_struct *p);
c82ba9fa 1357
c82ba9fa
LZ
1358 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1359
1360 void (*set_cpus_allowed)(struct task_struct *p,
1361 const struct cpumask *newmask);
1362
1363 void (*rq_online)(struct rq *rq);
1364 void (*rq_offline)(struct rq *rq);
1365#endif
1366
1367 void (*set_curr_task) (struct rq *rq);
1368 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1369 void (*task_fork) (struct task_struct *p);
e6c390f2 1370 void (*task_dead) (struct task_struct *p);
c82ba9fa 1371
67dfa1b7
KT
1372 /*
1373 * The switched_from() call is allowed to drop rq->lock, therefore we
1374 * cannot assume the switched_from/switched_to pair is serliazed by
1375 * rq->lock. They are however serialized by p->pi_lock.
1376 */
c82ba9fa
LZ
1377 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1378 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1379 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1380 int oldprio);
1381
1382 unsigned int (*get_rr_interval) (struct rq *rq,
1383 struct task_struct *task);
1384
6e998916
SG
1385 void (*update_curr) (struct rq *rq);
1386
ea86cb4b
VG
1387#define TASK_SET_GROUP 0
1388#define TASK_MOVE_GROUP 1
1389
c82ba9fa 1390#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 1391 void (*task_change_group) (struct task_struct *p, int type);
c82ba9fa
LZ
1392#endif
1393};
029632fb 1394
3f1d2a31
PZ
1395static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1396{
1397 prev->sched_class->put_prev_task(rq, prev);
1398}
1399
b2bf6c31
PZ
1400static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1401{
1402 curr->sched_class->set_curr_task(rq);
1403}
1404
029632fb
PZ
1405#define sched_class_highest (&stop_sched_class)
1406#define for_each_class(class) \
1407 for (class = sched_class_highest; class; class = class->next)
1408
1409extern const struct sched_class stop_sched_class;
aab03e05 1410extern const struct sched_class dl_sched_class;
029632fb
PZ
1411extern const struct sched_class rt_sched_class;
1412extern const struct sched_class fair_sched_class;
1413extern const struct sched_class idle_sched_class;
1414
1415
1416#ifdef CONFIG_SMP
1417
63b2ca30 1418extern void update_group_capacity(struct sched_domain *sd, int cpu);
b719203b 1419
7caff66f 1420extern void trigger_load_balance(struct rq *rq);
029632fb 1421
c5b28038
PZ
1422extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1423
029632fb
PZ
1424#endif
1425
442bf3aa
DL
1426#ifdef CONFIG_CPU_IDLE
1427static inline void idle_set_state(struct rq *rq,
1428 struct cpuidle_state *idle_state)
1429{
1430 rq->idle_state = idle_state;
1431}
1432
1433static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1434{
9148a3a1 1435 SCHED_WARN_ON(!rcu_read_lock_held());
442bf3aa
DL
1436 return rq->idle_state;
1437}
1438#else
1439static inline void idle_set_state(struct rq *rq,
1440 struct cpuidle_state *idle_state)
1441{
1442}
1443
1444static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1445{
1446 return NULL;
1447}
1448#endif
1449
029632fb
PZ
1450extern void sysrq_sched_debug_show(void);
1451extern void sched_init_granularity(void);
1452extern void update_max_interval(void);
1baca4ce
JL
1453
1454extern void init_sched_dl_class(void);
029632fb
PZ
1455extern void init_sched_rt_class(void);
1456extern void init_sched_fair_class(void);
1457
8875125e 1458extern void resched_curr(struct rq *rq);
029632fb
PZ
1459extern void resched_cpu(int cpu);
1460
1461extern struct rt_bandwidth def_rt_bandwidth;
1462extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1463
332ac17e
DF
1464extern struct dl_bandwidth def_dl_bandwidth;
1465extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
aab03e05
DF
1466extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1467
332ac17e
DF
1468unsigned long to_ratio(u64 period, u64 runtime);
1469
540247fb 1470extern void init_entity_runnable_average(struct sched_entity *se);
2b8c41da 1471extern void post_init_entity_util_avg(struct sched_entity *se);
a75cdaa9 1472
76d92ac3
FW
1473#ifdef CONFIG_NO_HZ_FULL
1474extern bool sched_can_stop_tick(struct rq *rq);
1475
1476/*
1477 * Tick may be needed by tasks in the runqueue depending on their policy and
1478 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1479 * nohz mode if necessary.
1480 */
1481static inline void sched_update_tick_dependency(struct rq *rq)
1482{
1483 int cpu;
1484
1485 if (!tick_nohz_full_enabled())
1486 return;
1487
1488 cpu = cpu_of(rq);
1489
1490 if (!tick_nohz_full_cpu(cpu))
1491 return;
1492
1493 if (sched_can_stop_tick(rq))
1494 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1495 else
1496 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1497}
1498#else
1499static inline void sched_update_tick_dependency(struct rq *rq) { }
1500#endif
1501
72465447 1502static inline void add_nr_running(struct rq *rq, unsigned count)
029632fb 1503{
72465447
KT
1504 unsigned prev_nr = rq->nr_running;
1505
1506 rq->nr_running = prev_nr + count;
9f3660c2 1507
72465447 1508 if (prev_nr < 2 && rq->nr_running >= 2) {
4486edd1
TC
1509#ifdef CONFIG_SMP
1510 if (!rq->rd->overload)
1511 rq->rd->overload = true;
1512#endif
4486edd1 1513 }
76d92ac3
FW
1514
1515 sched_update_tick_dependency(rq);
029632fb
PZ
1516}
1517
72465447 1518static inline void sub_nr_running(struct rq *rq, unsigned count)
029632fb 1519{
72465447 1520 rq->nr_running -= count;
76d92ac3
FW
1521 /* Check if we still need preemption */
1522 sched_update_tick_dependency(rq);
029632fb
PZ
1523}
1524
265f22a9
FW
1525static inline void rq_last_tick_reset(struct rq *rq)
1526{
1527#ifdef CONFIG_NO_HZ_FULL
1528 rq->last_sched_tick = jiffies;
1529#endif
1530}
1531
029632fb
PZ
1532extern void update_rq_clock(struct rq *rq);
1533
1534extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1535extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1536
1537extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1538
1539extern const_debug unsigned int sysctl_sched_time_avg;
1540extern const_debug unsigned int sysctl_sched_nr_migrate;
1541extern const_debug unsigned int sysctl_sched_migration_cost;
1542
1543static inline u64 sched_avg_period(void)
1544{
1545 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1546}
1547
029632fb
PZ
1548#ifdef CONFIG_SCHED_HRTICK
1549
1550/*
1551 * Use hrtick when:
1552 * - enabled by features
1553 * - hrtimer is actually high res
1554 */
1555static inline int hrtick_enabled(struct rq *rq)
1556{
1557 if (!sched_feat(HRTICK))
1558 return 0;
1559 if (!cpu_active(cpu_of(rq)))
1560 return 0;
1561 return hrtimer_is_hres_active(&rq->hrtick_timer);
1562}
1563
1564void hrtick_start(struct rq *rq, u64 delay);
1565
b39e66ea
MG
1566#else
1567
1568static inline int hrtick_enabled(struct rq *rq)
1569{
1570 return 0;
1571}
1572
029632fb
PZ
1573#endif /* CONFIG_SCHED_HRTICK */
1574
1575#ifdef CONFIG_SMP
1576extern void sched_avg_update(struct rq *rq);
dfbca41f
PZ
1577
1578#ifndef arch_scale_freq_capacity
1579static __always_inline
1580unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1581{
1582 return SCHED_CAPACITY_SCALE;
1583}
1584#endif
b5b4860d 1585
8cd5601c
MR
1586#ifndef arch_scale_cpu_capacity
1587static __always_inline
1588unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1589{
e3279a2e 1590 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
8cd5601c
MR
1591 return sd->smt_gain / sd->span_weight;
1592
1593 return SCHED_CAPACITY_SCALE;
1594}
1595#endif
1596
029632fb
PZ
1597static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1598{
b5b4860d 1599 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
029632fb
PZ
1600 sched_avg_update(rq);
1601}
1602#else
1603static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1604static inline void sched_avg_update(struct rq *rq) { }
1605#endif
1606
eb580751 1607struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462 1608 __acquires(rq->lock);
eb580751 1609struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3960c8c0 1610 __acquires(p->pi_lock)
3e71a462 1611 __acquires(rq->lock);
3960c8c0 1612
eb580751 1613static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
3960c8c0
PZ
1614 __releases(rq->lock)
1615{
d8ac8971 1616 rq_unpin_lock(rq, rf);
3960c8c0
PZ
1617 raw_spin_unlock(&rq->lock);
1618}
1619
1620static inline void
eb580751 1621task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
3960c8c0
PZ
1622 __releases(rq->lock)
1623 __releases(p->pi_lock)
1624{
d8ac8971 1625 rq_unpin_lock(rq, rf);
3960c8c0 1626 raw_spin_unlock(&rq->lock);
eb580751 1627 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3960c8c0
PZ
1628}
1629
029632fb
PZ
1630#ifdef CONFIG_SMP
1631#ifdef CONFIG_PREEMPT
1632
1633static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1634
1635/*
1636 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1637 * way at the expense of forcing extra atomic operations in all
1638 * invocations. This assures that the double_lock is acquired using the
1639 * same underlying policy as the spinlock_t on this architecture, which
1640 * reduces latency compared to the unfair variant below. However, it
1641 * also adds more overhead and therefore may reduce throughput.
1642 */
1643static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1644 __releases(this_rq->lock)
1645 __acquires(busiest->lock)
1646 __acquires(this_rq->lock)
1647{
1648 raw_spin_unlock(&this_rq->lock);
1649 double_rq_lock(this_rq, busiest);
1650
1651 return 1;
1652}
1653
1654#else
1655/*
1656 * Unfair double_lock_balance: Optimizes throughput at the expense of
1657 * latency by eliminating extra atomic operations when the locks are
1658 * already in proper order on entry. This favors lower cpu-ids and will
1659 * grant the double lock to lower cpus over higher ids under contention,
1660 * regardless of entry order into the function.
1661 */
1662static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1663 __releases(this_rq->lock)
1664 __acquires(busiest->lock)
1665 __acquires(this_rq->lock)
1666{
1667 int ret = 0;
1668
1669 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1670 if (busiest < this_rq) {
1671 raw_spin_unlock(&this_rq->lock);
1672 raw_spin_lock(&busiest->lock);
1673 raw_spin_lock_nested(&this_rq->lock,
1674 SINGLE_DEPTH_NESTING);
1675 ret = 1;
1676 } else
1677 raw_spin_lock_nested(&busiest->lock,
1678 SINGLE_DEPTH_NESTING);
1679 }
1680 return ret;
1681}
1682
1683#endif /* CONFIG_PREEMPT */
1684
1685/*
1686 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1687 */
1688static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1689{
1690 if (unlikely(!irqs_disabled())) {
1691 /* printk() doesn't work good under rq->lock */
1692 raw_spin_unlock(&this_rq->lock);
1693 BUG_ON(1);
1694 }
1695
1696 return _double_lock_balance(this_rq, busiest);
1697}
1698
1699static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1700 __releases(busiest->lock)
1701{
1702 raw_spin_unlock(&busiest->lock);
1703 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1704}
1705
74602315
PZ
1706static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1707{
1708 if (l1 > l2)
1709 swap(l1, l2);
1710
1711 spin_lock(l1);
1712 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1713}
1714
60e69eed
MG
1715static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1716{
1717 if (l1 > l2)
1718 swap(l1, l2);
1719
1720 spin_lock_irq(l1);
1721 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1722}
1723
74602315
PZ
1724static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1725{
1726 if (l1 > l2)
1727 swap(l1, l2);
1728
1729 raw_spin_lock(l1);
1730 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1731}
1732
029632fb
PZ
1733/*
1734 * double_rq_lock - safely lock two runqueues
1735 *
1736 * Note this does not disable interrupts like task_rq_lock,
1737 * you need to do so manually before calling.
1738 */
1739static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1740 __acquires(rq1->lock)
1741 __acquires(rq2->lock)
1742{
1743 BUG_ON(!irqs_disabled());
1744 if (rq1 == rq2) {
1745 raw_spin_lock(&rq1->lock);
1746 __acquire(rq2->lock); /* Fake it out ;) */
1747 } else {
1748 if (rq1 < rq2) {
1749 raw_spin_lock(&rq1->lock);
1750 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1751 } else {
1752 raw_spin_lock(&rq2->lock);
1753 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1754 }
1755 }
1756}
1757
1758/*
1759 * double_rq_unlock - safely unlock two runqueues
1760 *
1761 * Note this does not restore interrupts like task_rq_unlock,
1762 * you need to do so manually after calling.
1763 */
1764static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1765 __releases(rq1->lock)
1766 __releases(rq2->lock)
1767{
1768 raw_spin_unlock(&rq1->lock);
1769 if (rq1 != rq2)
1770 raw_spin_unlock(&rq2->lock);
1771 else
1772 __release(rq2->lock);
1773}
1774
f2cb1360
IM
1775extern void set_rq_online (struct rq *rq);
1776extern void set_rq_offline(struct rq *rq);
1777extern bool sched_smp_initialized;
1778
029632fb
PZ
1779#else /* CONFIG_SMP */
1780
1781/*
1782 * double_rq_lock - safely lock two runqueues
1783 *
1784 * Note this does not disable interrupts like task_rq_lock,
1785 * you need to do so manually before calling.
1786 */
1787static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1788 __acquires(rq1->lock)
1789 __acquires(rq2->lock)
1790{
1791 BUG_ON(!irqs_disabled());
1792 BUG_ON(rq1 != rq2);
1793 raw_spin_lock(&rq1->lock);
1794 __acquire(rq2->lock); /* Fake it out ;) */
1795}
1796
1797/*
1798 * double_rq_unlock - safely unlock two runqueues
1799 *
1800 * Note this does not restore interrupts like task_rq_unlock,
1801 * you need to do so manually after calling.
1802 */
1803static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1804 __releases(rq1->lock)
1805 __releases(rq2->lock)
1806{
1807 BUG_ON(rq1 != rq2);
1808 raw_spin_unlock(&rq1->lock);
1809 __release(rq2->lock);
1810}
1811
1812#endif
1813
1814extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1815extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
6b55c965
SD
1816
1817#ifdef CONFIG_SCHED_DEBUG
029632fb
PZ
1818extern void print_cfs_stats(struct seq_file *m, int cpu);
1819extern void print_rt_stats(struct seq_file *m, int cpu);
acb32132 1820extern void print_dl_stats(struct seq_file *m, int cpu);
6b55c965
SD
1821extern void
1822print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
397f2378
SD
1823
1824#ifdef CONFIG_NUMA_BALANCING
1825extern void
1826show_numa_stats(struct task_struct *p, struct seq_file *m);
1827extern void
1828print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1829 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1830#endif /* CONFIG_NUMA_BALANCING */
1831#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
1832
1833extern void init_cfs_rq(struct cfs_rq *cfs_rq);
07c54f7a
AV
1834extern void init_rt_rq(struct rt_rq *rt_rq);
1835extern void init_dl_rq(struct dl_rq *dl_rq);
029632fb 1836
1ee14e6c
BS
1837extern void cfs_bandwidth_usage_inc(void);
1838extern void cfs_bandwidth_usage_dec(void);
1c792db7 1839
3451d024 1840#ifdef CONFIG_NO_HZ_COMMON
1c792db7
SS
1841enum rq_nohz_flag_bits {
1842 NOHZ_TICK_STOPPED,
1843 NOHZ_BALANCE_KICK,
1844};
1845
1846#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
20a5c8cc
TG
1847
1848extern void nohz_balance_exit_idle(unsigned int cpu);
1849#else
1850static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1c792db7 1851#endif
73fbec60
FW
1852
1853#ifdef CONFIG_IRQ_TIME_ACCOUNTING
19d23dbf 1854struct irqtime {
a499a5a1 1855 u64 tick_delta;
19d23dbf
FW
1856 u64 irq_start_time;
1857 struct u64_stats_sync sync;
1858};
73fbec60 1859
19d23dbf 1860DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec60
FW
1861
1862static inline u64 irq_time_read(int cpu)
1863{
19d23dbf 1864 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
a499a5a1 1865 u64 *cpustat = kcpustat_cpu(cpu).cpustat;
19d23dbf
FW
1866 unsigned int seq;
1867 u64 total;
73fbec60
FW
1868
1869 do {
19d23dbf 1870 seq = __u64_stats_fetch_begin(&irqtime->sync);
a499a5a1 1871 total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
19d23dbf 1872 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
73fbec60 1873
19d23dbf 1874 return total;
73fbec60 1875}
73fbec60 1876#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
adaf9fcd
RW
1877
1878#ifdef CONFIG_CPU_FREQ
1879DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1880
1881/**
1882 * cpufreq_update_util - Take a note about CPU utilization changes.
12bde33d 1883 * @rq: Runqueue to carry out the update for.
58919e83 1884 * @flags: Update reason flags.
adaf9fcd 1885 *
58919e83
RW
1886 * This function is called by the scheduler on the CPU whose utilization is
1887 * being updated.
adaf9fcd
RW
1888 *
1889 * It can only be called from RCU-sched read-side critical sections.
adaf9fcd
RW
1890 *
1891 * The way cpufreq is currently arranged requires it to evaluate the CPU
1892 * performance state (frequency/voltage) on a regular basis to prevent it from
1893 * being stuck in a completely inadequate performance level for too long.
1894 * That is not guaranteed to happen if the updates are only triggered from CFS,
1895 * though, because they may not be coming in if RT or deadline tasks are active
1896 * all the time (or there are RT and DL tasks only).
1897 *
1898 * As a workaround for that issue, this function is called by the RT and DL
1899 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1900 * but that really is a band-aid. Going forward it should be replaced with
1901 * solutions targeted more specifically at RT and DL tasks.
1902 */
12bde33d 1903static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
adaf9fcd 1904{
58919e83
RW
1905 struct update_util_data *data;
1906
1907 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1908 if (data)
12bde33d
RW
1909 data->func(data, rq_clock(rq), flags);
1910}
1911
1912static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1913{
1914 if (cpu_of(rq) == smp_processor_id())
1915 cpufreq_update_util(rq, flags);
adaf9fcd
RW
1916}
1917#else
12bde33d
RW
1918static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1919static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
adaf9fcd 1920#endif /* CONFIG_CPU_FREQ */
be53f58f 1921
9bdcb44e
RW
1922#ifdef arch_scale_freq_capacity
1923#ifndef arch_scale_freq_invariant
1924#define arch_scale_freq_invariant() (true)
1925#endif
1926#else /* arch_scale_freq_capacity */
1927#define arch_scale_freq_invariant() (false)
1928#endif