]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - kernel/sched/pelt.h
Merge tag 'powerpc-4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-eoan-kernel.git] / kernel / sched / pelt.h
CommitLineData
c0796298
VG
1#ifdef CONFIG_SMP
2
3int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
4int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
5int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
371bf427 6int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
3727e0e1 7int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
c0796298 8
11d4afd4 9#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
91c27493
VG
10int update_irq_load_avg(struct rq *rq, u64 running);
11#else
12static inline int
13update_irq_load_avg(struct rq *rq, u64 running)
14{
15 return 0;
16}
17#endif
18
c0796298
VG
19/*
20 * When a task is dequeued, its estimated utilization should not be update if
21 * its util_avg has not been updated at least once.
22 * This flag is used to synchronize util_avg updates with util_est updates.
23 * We map this information into the LSB bit of the utilization saved at
24 * dequeue time (i.e. util_est.dequeued).
25 */
26#define UTIL_AVG_UNCHANGED 0x1
27
28static inline void cfs_se_util_change(struct sched_avg *avg)
29{
30 unsigned int enqueued;
31
32 if (!sched_feat(UTIL_EST))
33 return;
34
35 /* Avoid store if the flag has been already set */
36 enqueued = avg->util_est.enqueued;
37 if (!(enqueued & UTIL_AVG_UNCHANGED))
38 return;
39
40 /* Reset flag to report util_avg has been updated */
41 enqueued &= ~UTIL_AVG_UNCHANGED;
42 WRITE_ONCE(avg->util_est.enqueued, enqueued);
43}
44
45#else
46
47static inline int
48update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
49{
50 return 0;
51}
52
371bf427
VG
53static inline int
54update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
55{
56 return 0;
57}
58
3727e0e1
VG
59static inline int
60update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
61{
62 return 0;
63}
91c27493
VG
64
65static inline int
66update_irq_load_avg(struct rq *rq, u64 running)
67{
68 return 0;
69}
c0796298
VG
70#endif
71
72