]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/sched/deadline.c
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / deadline.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
aab03e05
DF
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 14 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18#include "sched.h"
19
6bfd6d72 20#include <linux/slab.h>
06a76fe0 21#include <uapi/linux/sched/types.h>
6bfd6d72 22
332ac17e
DF
23struct dl_bandwidth def_dl_bandwidth;
24
aab03e05
DF
25static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
26{
27 return container_of(dl_se, struct task_struct, dl);
28}
29
30static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
31{
32 return container_of(dl_rq, struct rq, dl);
33}
34
35static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
36{
37 struct task_struct *p = dl_task_of(dl_se);
38 struct rq *rq = task_rq(p);
39
40 return &rq->dl;
41}
42
43static inline int on_dl_rq(struct sched_dl_entity *dl_se)
44{
45 return !RB_EMPTY_NODE(&dl_se->rb_node);
46}
47
06a76fe0
NP
48#ifdef CONFIG_SMP
49static inline struct dl_bw *dl_bw_of(int i)
50{
51 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
52 "sched RCU must be held");
53 return &cpu_rq(i)->rd->dl_bw;
54}
55
56static inline int dl_bw_cpus(int i)
57{
58 struct root_domain *rd = cpu_rq(i)->rd;
59 int cpus = 0;
60
61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
62 "sched RCU must be held");
63 for_each_cpu_and(i, rd->span, cpu_active_mask)
64 cpus++;
65
66 return cpus;
67}
68#else
69static inline struct dl_bw *dl_bw_of(int i)
70{
71 return &cpu_rq(i)->dl.dl_bw;
72}
73
74static inline int dl_bw_cpus(int i)
75{
76 return 1;
77}
78#endif
79
e36d8677
LA
80static inline
81void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
82{
83 u64 old = dl_rq->running_bw;
84
85 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
86 dl_rq->running_bw += dl_bw;
87 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
8fd27231 88 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
e36d8677
LA
89}
90
91static inline
92void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
93{
94 u64 old = dl_rq->running_bw;
95
96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
97 dl_rq->running_bw -= dl_bw;
98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
99 if (dl_rq->running_bw > old)
100 dl_rq->running_bw = 0;
101}
102
8fd27231
LA
103static inline
104void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
105{
106 u64 old = dl_rq->this_bw;
107
108 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
109 dl_rq->this_bw += dl_bw;
110 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
111}
112
113static inline
114void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
115{
116 u64 old = dl_rq->this_bw;
117
118 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
119 dl_rq->this_bw -= dl_bw;
120 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
121 if (dl_rq->this_bw > old)
122 dl_rq->this_bw = 0;
123 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
124}
125
209a0cbd
LA
126void dl_change_utilization(struct task_struct *p, u64 new_bw)
127{
8fd27231 128 struct rq *rq;
209a0cbd 129
8fd27231 130 if (task_on_rq_queued(p))
209a0cbd
LA
131 return;
132
8fd27231
LA
133 rq = task_rq(p);
134 if (p->dl.dl_non_contending) {
135 sub_running_bw(p->dl.dl_bw, &rq->dl);
136 p->dl.dl_non_contending = 0;
137 /*
138 * If the timer handler is currently running and the
139 * timer cannot be cancelled, inactive_task_timer()
140 * will see that dl_not_contending is not set, and
141 * will not touch the rq's active utilization,
142 * so we are still safe.
143 */
144 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
145 put_task_struct(p);
146 }
147 sub_rq_bw(p->dl.dl_bw, &rq->dl);
148 add_rq_bw(new_bw, &rq->dl);
209a0cbd
LA
149}
150
151/*
152 * The utilization of a task cannot be immediately removed from
153 * the rq active utilization (running_bw) when the task blocks.
154 * Instead, we have to wait for the so called "0-lag time".
155 *
156 * If a task blocks before the "0-lag time", a timer (the inactive
157 * timer) is armed, and running_bw is decreased when the timer
158 * fires.
159 *
160 * If the task wakes up again before the inactive timer fires,
161 * the timer is cancelled, whereas if the task wakes up after the
162 * inactive timer fired (and running_bw has been decreased) the
163 * task's utilization has to be added to running_bw again.
164 * A flag in the deadline scheduling entity (dl_non_contending)
165 * is used to avoid race conditions between the inactive timer handler
166 * and task wakeups.
167 *
168 * The following diagram shows how running_bw is updated. A task is
169 * "ACTIVE" when its utilization contributes to running_bw; an
170 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
171 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
172 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
173 * time already passed, which does not contribute to running_bw anymore.
174 * +------------------+
175 * wakeup | ACTIVE |
176 * +------------------>+ contending |
177 * | add_running_bw | |
178 * | +----+------+------+
179 * | | ^
180 * | dequeue | |
181 * +--------+-------+ | |
182 * | | t >= 0-lag | | wakeup
183 * | INACTIVE |<---------------+ |
184 * | | sub_running_bw | |
185 * +--------+-------+ | |
186 * ^ | |
187 * | t < 0-lag | |
188 * | | |
189 * | V |
190 * | +----+------+------+
191 * | sub_running_bw | ACTIVE |
192 * +-------------------+ |
193 * inactive timer | non contending |
194 * fired +------------------+
195 *
196 * The task_non_contending() function is invoked when a task
197 * blocks, and checks if the 0-lag time already passed or
198 * not (in the first case, it directly updates running_bw;
199 * in the second case, it arms the inactive timer).
200 *
201 * The task_contending() function is invoked when a task wakes
202 * up, and checks if the task is still in the "ACTIVE non contending"
203 * state or not (in the second case, it updates running_bw).
204 */
205static void task_non_contending(struct task_struct *p)
206{
207 struct sched_dl_entity *dl_se = &p->dl;
208 struct hrtimer *timer = &dl_se->inactive_timer;
209 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
210 struct rq *rq = rq_of_dl_rq(dl_rq);
211 s64 zerolag_time;
212
213 /*
214 * If this is a non-deadline task that has been boosted,
215 * do nothing
216 */
217 if (dl_se->dl_runtime == 0)
218 return;
219
220 WARN_ON(hrtimer_active(&dl_se->inactive_timer));
221 WARN_ON(dl_se->dl_non_contending);
222
223 zerolag_time = dl_se->deadline -
224 div64_long((dl_se->runtime * dl_se->dl_period),
225 dl_se->dl_runtime);
226
227 /*
228 * Using relative times instead of the absolute "0-lag time"
229 * allows to simplify the code
230 */
231 zerolag_time -= rq_clock(rq);
232
233 /*
234 * If the "0-lag time" already passed, decrease the active
235 * utilization now, instead of starting a timer
236 */
237 if (zerolag_time < 0) {
238 if (dl_task(p))
239 sub_running_bw(dl_se->dl_bw, dl_rq);
387e3130
LA
240 if (!dl_task(p) || p->state == TASK_DEAD) {
241 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
242
8fd27231
LA
243 if (p->state == TASK_DEAD)
244 sub_rq_bw(p->dl.dl_bw, &rq->dl);
387e3130 245 raw_spin_lock(&dl_b->lock);
8c0944ce 246 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
209a0cbd 247 __dl_clear_params(p);
387e3130
LA
248 raw_spin_unlock(&dl_b->lock);
249 }
209a0cbd
LA
250
251 return;
252 }
253
254 dl_se->dl_non_contending = 1;
255 get_task_struct(p);
256 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
257}
258
8fd27231 259static void task_contending(struct sched_dl_entity *dl_se, int flags)
209a0cbd
LA
260{
261 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
262
263 /*
264 * If this is a non-deadline task that has been boosted,
265 * do nothing
266 */
267 if (dl_se->dl_runtime == 0)
268 return;
269
8fd27231
LA
270 if (flags & ENQUEUE_MIGRATED)
271 add_rq_bw(dl_se->dl_bw, dl_rq);
272
209a0cbd
LA
273 if (dl_se->dl_non_contending) {
274 dl_se->dl_non_contending = 0;
275 /*
276 * If the timer handler is currently running and the
277 * timer cannot be cancelled, inactive_task_timer()
278 * will see that dl_not_contending is not set, and
279 * will not touch the rq's active utilization,
280 * so we are still safe.
281 */
282 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
283 put_task_struct(dl_task_of(dl_se));
284 } else {
285 /*
286 * Since "dl_non_contending" is not set, the
287 * task's utilization has already been removed from
288 * active utilization (either when the task blocked,
289 * when the "inactive timer" fired).
290 * So, add it back.
291 */
292 add_running_bw(dl_se->dl_bw, dl_rq);
293 }
294}
295
aab03e05
DF
296static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
297{
298 struct sched_dl_entity *dl_se = &p->dl;
299
2161573e 300 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
aab03e05
DF
301}
302
332ac17e
DF
303void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
304{
305 raw_spin_lock_init(&dl_b->dl_runtime_lock);
306 dl_b->dl_period = period;
307 dl_b->dl_runtime = runtime;
308}
309
332ac17e
DF
310void init_dl_bw(struct dl_bw *dl_b)
311{
312 raw_spin_lock_init(&dl_b->lock);
313 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 314 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
315 dl_b->bw = -1;
316 else
1724813d 317 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
318 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
319 dl_b->total_bw = 0;
320}
321
07c54f7a 322void init_dl_rq(struct dl_rq *dl_rq)
aab03e05 323{
2161573e 324 dl_rq->root = RB_ROOT_CACHED;
1baca4ce
JL
325
326#ifdef CONFIG_SMP
327 /* zero means no -deadline tasks */
328 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
329
330 dl_rq->dl_nr_migratory = 0;
331 dl_rq->overloaded = 0;
2161573e 332 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332ac17e
DF
333#else
334 init_dl_bw(&dl_rq->dl_bw);
1baca4ce 335#endif
e36d8677
LA
336
337 dl_rq->running_bw = 0;
8fd27231 338 dl_rq->this_bw = 0;
4da3abce 339 init_dl_rq_bw_ratio(dl_rq);
1baca4ce
JL
340}
341
342#ifdef CONFIG_SMP
343
344static inline int dl_overloaded(struct rq *rq)
345{
346 return atomic_read(&rq->rd->dlo_count);
347}
348
349static inline void dl_set_overload(struct rq *rq)
350{
351 if (!rq->online)
352 return;
353
354 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
355 /*
356 * Must be visible before the overload count is
357 * set (as in sched_rt.c).
358 *
359 * Matched by the barrier in pull_dl_task().
360 */
361 smp_wmb();
362 atomic_inc(&rq->rd->dlo_count);
363}
364
365static inline void dl_clear_overload(struct rq *rq)
366{
367 if (!rq->online)
368 return;
369
370 atomic_dec(&rq->rd->dlo_count);
371 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
372}
373
374static void update_dl_migration(struct dl_rq *dl_rq)
375{
995b9ea4 376 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
1baca4ce
JL
377 if (!dl_rq->overloaded) {
378 dl_set_overload(rq_of_dl_rq(dl_rq));
379 dl_rq->overloaded = 1;
380 }
381 } else if (dl_rq->overloaded) {
382 dl_clear_overload(rq_of_dl_rq(dl_rq));
383 dl_rq->overloaded = 0;
384 }
385}
386
387static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
388{
389 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 390
4b53a341 391 if (p->nr_cpus_allowed > 1)
1baca4ce
JL
392 dl_rq->dl_nr_migratory++;
393
394 update_dl_migration(dl_rq);
395}
396
397static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
398{
399 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 400
4b53a341 401 if (p->nr_cpus_allowed > 1)
1baca4ce
JL
402 dl_rq->dl_nr_migratory--;
403
404 update_dl_migration(dl_rq);
405}
406
407/*
408 * The list of pushable -deadline task is not a plist, like in
409 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
410 */
411static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
412{
413 struct dl_rq *dl_rq = &rq->dl;
2161573e 414 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
1baca4ce
JL
415 struct rb_node *parent = NULL;
416 struct task_struct *entry;
2161573e 417 bool leftmost = true;
1baca4ce
JL
418
419 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
420
421 while (*link) {
422 parent = *link;
423 entry = rb_entry(parent, struct task_struct,
424 pushable_dl_tasks);
425 if (dl_entity_preempt(&p->dl, &entry->dl))
426 link = &parent->rb_left;
427 else {
428 link = &parent->rb_right;
2161573e 429 leftmost = false;
1baca4ce
JL
430 }
431 }
432
2161573e 433 if (leftmost)
7d92de3a 434 dl_rq->earliest_dl.next = p->dl.deadline;
1baca4ce
JL
435
436 rb_link_node(&p->pushable_dl_tasks, parent, link);
2161573e
DB
437 rb_insert_color_cached(&p->pushable_dl_tasks,
438 &dl_rq->pushable_dl_tasks_root, leftmost);
aab03e05
DF
439}
440
1baca4ce
JL
441static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
442{
443 struct dl_rq *dl_rq = &rq->dl;
444
445 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
446 return;
447
2161573e 448 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
1baca4ce
JL
449 struct rb_node *next_node;
450
451 next_node = rb_next(&p->pushable_dl_tasks);
7d92de3a
WL
452 if (next_node) {
453 dl_rq->earliest_dl.next = rb_entry(next_node,
454 struct task_struct, pushable_dl_tasks)->dl.deadline;
455 }
1baca4ce
JL
456 }
457
2161573e 458 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
1baca4ce
JL
459 RB_CLEAR_NODE(&p->pushable_dl_tasks);
460}
461
462static inline int has_pushable_dl_tasks(struct rq *rq)
463{
2161573e 464 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
1baca4ce
JL
465}
466
467static int push_dl_task(struct rq *rq);
468
dc877341
PZ
469static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
470{
471 return dl_task(prev);
472}
473
9916e214
PZ
474static DEFINE_PER_CPU(struct callback_head, dl_push_head);
475static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
e3fca9e7
PZ
476
477static void push_dl_tasks(struct rq *);
9916e214 478static void pull_dl_task(struct rq *);
e3fca9e7
PZ
479
480static inline void queue_push_tasks(struct rq *rq)
dc877341 481{
e3fca9e7
PZ
482 if (!has_pushable_dl_tasks(rq))
483 return;
484
9916e214
PZ
485 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
486}
487
488static inline void queue_pull_task(struct rq *rq)
489{
490 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
dc877341
PZ
491}
492
fa9c9d10
WL
493static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
494
a649f237 495static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
fa9c9d10
WL
496{
497 struct rq *later_rq = NULL;
fa9c9d10
WL
498
499 later_rq = find_lock_later_rq(p, rq);
fa9c9d10
WL
500 if (!later_rq) {
501 int cpu;
502
503 /*
504 * If we cannot preempt any rq, fall back to pick any
505 * online cpu.
506 */
0c98d344 507 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
fa9c9d10
WL
508 if (cpu >= nr_cpu_ids) {
509 /*
510 * Fail to find any suitable cpu.
511 * The task will never come back!
512 */
513 BUG_ON(dl_bandwidth_enabled());
514
515 /*
516 * If admission control is disabled we
517 * try a little harder to let the task
518 * run.
519 */
520 cpu = cpumask_any(cpu_active_mask);
521 }
522 later_rq = cpu_rq(cpu);
523 double_lock_balance(rq, later_rq);
524 }
525
fa9c9d10 526 set_task_cpu(p, later_rq->cpu);
a649f237
PZ
527 double_unlock_balance(later_rq, rq);
528
529 return later_rq;
fa9c9d10
WL
530}
531
1baca4ce
JL
532#else
533
534static inline
535void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
536{
537}
538
539static inline
540void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
541{
542}
543
544static inline
545void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
546{
547}
548
549static inline
550void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
551{
552}
553
dc877341
PZ
554static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
555{
556 return false;
557}
558
0ea60c20 559static inline void pull_dl_task(struct rq *rq)
dc877341 560{
dc877341
PZ
561}
562
e3fca9e7 563static inline void queue_push_tasks(struct rq *rq)
dc877341 564{
dc877341
PZ
565}
566
9916e214 567static inline void queue_pull_task(struct rq *rq)
dc877341
PZ
568{
569}
1baca4ce
JL
570#endif /* CONFIG_SMP */
571
aab03e05
DF
572static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
573static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
574static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
575 int flags);
576
577/*
578 * We are being explicitly informed that a new instance is starting,
579 * and this means that:
580 * - the absolute deadline of the entity has to be placed at
581 * current time + relative deadline;
582 * - the runtime of the entity has to be set to the maximum value.
583 *
584 * The capability of specifying such event is useful whenever a -deadline
585 * entity wants to (try to!) synchronize its behaviour with the scheduler's
586 * one, and to (try to!) reconcile itself with its own scheduling
587 * parameters.
588 */
98b0a857 589static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
590{
591 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
592 struct rq *rq = rq_of_dl_rq(dl_rq);
593
98b0a857 594 WARN_ON(dl_se->dl_boosted);
72f9f3fd
LA
595 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
596
597 /*
598 * We are racing with the deadline timer. So, do nothing because
599 * the deadline timer handler will take care of properly recharging
600 * the runtime and postponing the deadline
601 */
602 if (dl_se->dl_throttled)
603 return;
aab03e05
DF
604
605 /*
606 * We use the regular wall clock time to set deadlines in the
607 * future; in fact, we must consider execution overheads (time
608 * spent on hardirq context, etc.).
609 */
98b0a857
JL
610 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
611 dl_se->runtime = dl_se->dl_runtime;
aab03e05
DF
612}
613
614/*
615 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
616 * possibility of a entity lasting more than what it declared, and thus
617 * exhausting its runtime.
618 *
619 * Here we are interested in making runtime overrun possible, but we do
620 * not want a entity which is misbehaving to affect the scheduling of all
621 * other entities.
622 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
623 * is used, in order to confine each entity within its own bandwidth.
624 *
625 * This function deals exactly with that, and ensures that when the runtime
626 * of a entity is replenished, its deadline is also postponed. That ensures
627 * the overrunning entity can't interfere with other entity in the system and
628 * can't make them miss their deadlines. Reasons why this kind of overruns
629 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 630 * runtime, or it just underestimated it during sched_setattr().
aab03e05 631 */
2d3d891d
DF
632static void replenish_dl_entity(struct sched_dl_entity *dl_se,
633 struct sched_dl_entity *pi_se)
aab03e05
DF
634{
635 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
636 struct rq *rq = rq_of_dl_rq(dl_rq);
637
2d3d891d
DF
638 BUG_ON(pi_se->dl_runtime <= 0);
639
640 /*
641 * This could be the case for a !-dl task that is boosted.
642 * Just go with full inherited parameters.
643 */
644 if (dl_se->dl_deadline == 0) {
645 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
646 dl_se->runtime = pi_se->dl_runtime;
647 }
648
48be3a67
PZ
649 if (dl_se->dl_yielded && dl_se->runtime > 0)
650 dl_se->runtime = 0;
651
aab03e05
DF
652 /*
653 * We keep moving the deadline away until we get some
654 * available runtime for the entity. This ensures correct
655 * handling of situations where the runtime overrun is
656 * arbitrary large.
657 */
658 while (dl_se->runtime <= 0) {
2d3d891d
DF
659 dl_se->deadline += pi_se->dl_period;
660 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
661 }
662
663 /*
664 * At this point, the deadline really should be "in
665 * the future" with respect to rq->clock. If it's
666 * not, we are, for some reason, lagging too much!
667 * Anyway, after having warn userspace abut that,
668 * we still try to keep the things running by
669 * resetting the deadline and the budget of the
670 * entity.
671 */
672 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c219b7dd 673 printk_deferred_once("sched: DL replenish lagged too much\n");
2d3d891d
DF
674 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
675 dl_se->runtime = pi_se->dl_runtime;
aab03e05 676 }
1019a359
PZ
677
678 if (dl_se->dl_yielded)
679 dl_se->dl_yielded = 0;
680 if (dl_se->dl_throttled)
681 dl_se->dl_throttled = 0;
aab03e05
DF
682}
683
684/*
685 * Here we check if --at time t-- an entity (which is probably being
686 * [re]activated or, in general, enqueued) can use its remaining runtime
687 * and its current deadline _without_ exceeding the bandwidth it is
688 * assigned (function returns true if it can't). We are in fact applying
689 * one of the CBS rules: when a task wakes up, if the residual runtime
690 * over residual deadline fits within the allocated bandwidth, then we
691 * can keep the current (absolute) deadline and residual budget without
692 * disrupting the schedulability of the system. Otherwise, we should
693 * refill the runtime and set the deadline a period in the future,
694 * because keeping the current (absolute) deadline of the task would
712e5e34
DF
695 * result in breaking guarantees promised to other tasks (refer to
696 * Documentation/scheduler/sched-deadline.txt for more informations).
aab03e05
DF
697 *
698 * This function returns true if:
699 *
2317d5f1 700 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
aab03e05
DF
701 *
702 * IOW we can't recycle current parameters.
755378a4 703 *
2317d5f1 704 * Notice that the bandwidth check is done against the deadline. For
755378a4 705 * task with deadline equal to period this is the same of using
2317d5f1 706 * dl_period instead of dl_deadline in the equation above.
aab03e05 707 */
2d3d891d
DF
708static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
709 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
710{
711 u64 left, right;
712
713 /*
714 * left and right are the two sides of the equation above,
715 * after a bit of shuffling to use multiplications instead
716 * of divisions.
717 *
718 * Note that none of the time values involved in the two
719 * multiplications are absolute: dl_deadline and dl_runtime
720 * are the relative deadline and the maximum runtime of each
721 * instance, runtime is the runtime left for the last instance
722 * and (deadline - t), since t is rq->clock, is the time left
723 * to the (absolute) deadline. Even if overflowing the u64 type
724 * is very unlikely to occur in both cases, here we scale down
725 * as we want to avoid that risk at all. Scaling down by 10
726 * means that we reduce granularity to 1us. We are fine with it,
727 * since this is only a true/false check and, anyway, thinking
728 * of anything below microseconds resolution is actually fiction
729 * (but still we want to give the user that illusion >;).
730 */
2317d5f1 731 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
332ac17e
DF
732 right = ((dl_se->deadline - t) >> DL_SCALE) *
733 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
734
735 return dl_time_before(right, left);
736}
737
738/*
3effcb42
DBO
739 * Revised wakeup rule [1]: For self-suspending tasks, rather then
740 * re-initializing task's runtime and deadline, the revised wakeup
741 * rule adjusts the task's runtime to avoid the task to overrun its
742 * density.
aab03e05 743 *
3effcb42
DBO
744 * Reasoning: a task may overrun the density if:
745 * runtime / (deadline - t) > dl_runtime / dl_deadline
746 *
747 * Therefore, runtime can be adjusted to:
748 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
749 *
750 * In such way that runtime will be equal to the maximum density
751 * the task can use without breaking any rule.
752 *
753 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
754 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
755 */
756static void
757update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
758{
759 u64 laxity = dl_se->deadline - rq_clock(rq);
760
761 /*
762 * If the task has deadline < period, and the deadline is in the past,
763 * it should already be throttled before this check.
764 *
765 * See update_dl_entity() comments for further details.
766 */
767 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
768
769 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
770}
771
772/*
773 * Regarding the deadline, a task with implicit deadline has a relative
774 * deadline == relative period. A task with constrained deadline has a
775 * relative deadline <= relative period.
776 *
777 * We support constrained deadline tasks. However, there are some restrictions
778 * applied only for tasks which do not have an implicit deadline. See
779 * update_dl_entity() to know more about such restrictions.
780 *
781 * The dl_is_implicit() returns true if the task has an implicit deadline.
782 */
783static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
784{
785 return dl_se->dl_deadline == dl_se->dl_period;
786}
787
788/*
789 * When a deadline entity is placed in the runqueue, its runtime and deadline
790 * might need to be updated. This is done by a CBS wake up rule. There are two
791 * different rules: 1) the original CBS; and 2) the Revisited CBS.
792 *
793 * When the task is starting a new period, the Original CBS is used. In this
794 * case, the runtime is replenished and a new absolute deadline is set.
795 *
796 * When a task is queued before the begin of the next period, using the
797 * remaining runtime and deadline could make the entity to overflow, see
798 * dl_entity_overflow() to find more about runtime overflow. When such case
799 * is detected, the runtime and deadline need to be updated.
800 *
801 * If the task has an implicit deadline, i.e., deadline == period, the Original
802 * CBS is applied. the runtime is replenished and a new absolute deadline is
803 * set, as in the previous cases.
804 *
805 * However, the Original CBS does not work properly for tasks with
806 * deadline < period, which are said to have a constrained deadline. By
807 * applying the Original CBS, a constrained deadline task would be able to run
808 * runtime/deadline in a period. With deadline < period, the task would
809 * overrun the runtime/period allowed bandwidth, breaking the admission test.
810 *
811 * In order to prevent this misbehave, the Revisited CBS is used for
812 * constrained deadline tasks when a runtime overflow is detected. In the
813 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
814 * the remaining runtime of the task is reduced to avoid runtime overflow.
815 * Please refer to the comments update_dl_revised_wakeup() function to find
816 * more about the Revised CBS rule.
aab03e05 817 */
2d3d891d
DF
818static void update_dl_entity(struct sched_dl_entity *dl_se,
819 struct sched_dl_entity *pi_se)
aab03e05
DF
820{
821 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
822 struct rq *rq = rq_of_dl_rq(dl_rq);
823
aab03e05 824 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d 825 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
3effcb42
DBO
826
827 if (unlikely(!dl_is_implicit(dl_se) &&
828 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
829 !dl_se->dl_boosted)){
830 update_dl_revised_wakeup(dl_se, rq);
831 return;
832 }
833
2d3d891d
DF
834 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
835 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
836 }
837}
838
5ac69d37
DBO
839static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
840{
841 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
842}
843
aab03e05
DF
844/*
845 * If the entity depleted all its runtime, and if we want it to sleep
846 * while waiting for some new execution time to become available, we
5ac69d37 847 * set the bandwidth replenishment timer to the replenishment instant
aab03e05
DF
848 * and try to activate it.
849 *
850 * Notice that it is important for the caller to know if the timer
851 * actually started or not (i.e., the replenishment instant is in
852 * the future or in the past).
853 */
a649f237 854static int start_dl_timer(struct task_struct *p)
aab03e05 855{
a649f237
PZ
856 struct sched_dl_entity *dl_se = &p->dl;
857 struct hrtimer *timer = &dl_se->dl_timer;
858 struct rq *rq = task_rq(p);
aab03e05 859 ktime_t now, act;
aab03e05
DF
860 s64 delta;
861
a649f237
PZ
862 lockdep_assert_held(&rq->lock);
863
aab03e05
DF
864 /*
865 * We want the timer to fire at the deadline, but considering
866 * that it is actually coming from rq->clock and not from
867 * hrtimer's time base reading.
868 */
5ac69d37 869 act = ns_to_ktime(dl_next_period(dl_se));
a649f237 870 now = hrtimer_cb_get_time(timer);
aab03e05
DF
871 delta = ktime_to_ns(now) - rq_clock(rq);
872 act = ktime_add_ns(act, delta);
873
874 /*
875 * If the expiry time already passed, e.g., because the value
876 * chosen as the deadline is too small, don't even try to
877 * start the timer in the past!
878 */
879 if (ktime_us_delta(act, now) < 0)
880 return 0;
881
a649f237
PZ
882 /*
883 * !enqueued will guarantee another callback; even if one is already in
884 * progress. This ensures a balanced {get,put}_task_struct().
885 *
886 * The race against __run_timer() clearing the enqueued state is
887 * harmless because we're holding task_rq()->lock, therefore the timer
888 * expiring after we've done the check will wait on its task_rq_lock()
889 * and observe our state.
890 */
891 if (!hrtimer_is_queued(timer)) {
892 get_task_struct(p);
893 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
894 }
aab03e05 895
cc9684d3 896 return 1;
aab03e05
DF
897}
898
899/*
900 * This is the bandwidth enforcement timer callback. If here, we know
901 * a task is not on its dl_rq, since the fact that the timer was running
902 * means the task is throttled and needs a runtime replenishment.
903 *
904 * However, what we actually do depends on the fact the task is active,
905 * (it is on its rq) or has been removed from there by a call to
906 * dequeue_task_dl(). In the former case we must issue the runtime
907 * replenishment and add the task back to the dl_rq; in the latter, we just
908 * do nothing but clearing dl_throttled, so that runtime and deadline
909 * updating (and the queueing back to dl_rq) will be done by the
910 * next call to enqueue_task_dl().
911 */
912static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
913{
914 struct sched_dl_entity *dl_se = container_of(timer,
915 struct sched_dl_entity,
916 dl_timer);
917 struct task_struct *p = dl_task_of(dl_se);
eb580751 918 struct rq_flags rf;
0f397f2c 919 struct rq *rq;
3960c8c0 920
eb580751 921 rq = task_rq_lock(p, &rf);
0f397f2c 922
aab03e05 923 /*
a649f237 924 * The task might have changed its scheduling policy to something
9846d50d 925 * different than SCHED_DEADLINE (through switched_from_dl()).
a649f237 926 */
209a0cbd 927 if (!dl_task(p))
a649f237 928 goto unlock;
a649f237 929
a649f237
PZ
930 /*
931 * The task might have been boosted by someone else and might be in the
932 * boosting/deboosting path, its not throttled.
933 */
934 if (dl_se->dl_boosted)
935 goto unlock;
a79ec89f 936
fa9c9d10 937 /*
a649f237
PZ
938 * Spurious timer due to start_dl_timer() race; or we already received
939 * a replenishment from rt_mutex_setprio().
fa9c9d10 940 */
a649f237 941 if (!dl_se->dl_throttled)
fa9c9d10 942 goto unlock;
a649f237
PZ
943
944 sched_clock_tick();
945 update_rq_clock(rq);
fa9c9d10 946
a79ec89f
KT
947 /*
948 * If the throttle happened during sched-out; like:
949 *
950 * schedule()
951 * deactivate_task()
952 * dequeue_task_dl()
953 * update_curr_dl()
954 * start_dl_timer()
955 * __dequeue_task_dl()
956 * prev->on_rq = 0;
957 *
958 * We can be both throttled and !queued. Replenish the counter
959 * but do not enqueue -- wait for our wakeup to do that.
960 */
961 if (!task_on_rq_queued(p)) {
962 replenish_dl_entity(dl_se, dl_se);
963 goto unlock;
964 }
965
1baca4ce 966#ifdef CONFIG_SMP
c0c8c9fa 967 if (unlikely(!rq->online)) {
61c7aca6
WL
968 /*
969 * If the runqueue is no longer available, migrate the
970 * task elsewhere. This necessarily changes rq.
971 */
c0c8c9fa 972 lockdep_unpin_lock(&rq->lock, rf.cookie);
a649f237 973 rq = dl_task_offline_migration(rq, p);
c0c8c9fa 974 rf.cookie = lockdep_pin_lock(&rq->lock);
dcc3b5ff 975 update_rq_clock(rq);
61c7aca6
WL
976
977 /*
978 * Now that the task has been migrated to the new RQ and we
979 * have that locked, proceed as normal and enqueue the task
980 * there.
981 */
c0c8c9fa 982 }
61c7aca6 983#endif
a649f237 984
61c7aca6
WL
985 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
986 if (dl_task(rq->curr))
987 check_preempt_curr_dl(rq, p, 0);
988 else
989 resched_curr(rq);
a649f237 990
61c7aca6 991#ifdef CONFIG_SMP
a649f237
PZ
992 /*
993 * Queueing this task back might have overloaded rq, check if we need
994 * to kick someone away.
1019a359 995 */
0aaafaab
PZ
996 if (has_pushable_dl_tasks(rq)) {
997 /*
998 * Nothing relies on rq->lock after this, so its safe to drop
999 * rq->lock.
1000 */
d8ac8971 1001 rq_unpin_lock(rq, &rf);
1019a359 1002 push_dl_task(rq);
d8ac8971 1003 rq_repin_lock(rq, &rf);
0aaafaab 1004 }
1baca4ce 1005#endif
a649f237 1006
aab03e05 1007unlock:
eb580751 1008 task_rq_unlock(rq, p, &rf);
aab03e05 1009
a649f237
PZ
1010 /*
1011 * This can free the task_struct, including this hrtimer, do not touch
1012 * anything related to that after this.
1013 */
1014 put_task_struct(p);
1015
aab03e05
DF
1016 return HRTIMER_NORESTART;
1017}
1018
1019void init_dl_task_timer(struct sched_dl_entity *dl_se)
1020{
1021 struct hrtimer *timer = &dl_se->dl_timer;
1022
aab03e05
DF
1023 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1024 timer->function = dl_task_timer;
1025}
1026
df8eac8c
DBO
1027/*
1028 * During the activation, CBS checks if it can reuse the current task's
1029 * runtime and period. If the deadline of the task is in the past, CBS
1030 * cannot use the runtime, and so it replenishes the task. This rule
1031 * works fine for implicit deadline tasks (deadline == period), and the
1032 * CBS was designed for implicit deadline tasks. However, a task with
1033 * constrained deadline (deadine < period) might be awakened after the
1034 * deadline, but before the next period. In this case, replenishing the
1035 * task would allow it to run for runtime / deadline. As in this case
1036 * deadline < period, CBS enables a task to run for more than the
1037 * runtime / period. In a very loaded system, this can cause a domino
1038 * effect, making other tasks miss their deadlines.
1039 *
1040 * To avoid this problem, in the activation of a constrained deadline
1041 * task after the deadline but before the next period, throttle the
1042 * task and set the replenishing timer to the begin of the next period,
1043 * unless it is boosted.
1044 */
1045static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1046{
1047 struct task_struct *p = dl_task_of(dl_se);
1048 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1049
1050 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1051 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1052 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1053 return;
1054 dl_se->dl_throttled = 1;
ae83b56a
XP
1055 if (dl_se->runtime > 0)
1056 dl_se->runtime = 0;
df8eac8c
DBO
1057 }
1058}
1059
aab03e05 1060static
6fab5410 1061int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
aab03e05 1062{
269ad801 1063 return (dl_se->runtime <= 0);
aab03e05
DF
1064}
1065
faa59937
JL
1066extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1067
c52f14d3
LA
1068/*
1069 * This function implements the GRUB accounting rule:
1070 * according to the GRUB reclaiming algorithm, the runtime is
daec5798
LA
1071 * not decreased as "dq = -dt", but as
1072 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1073 * where u is the utilization of the task, Umax is the maximum reclaimable
1074 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1075 * as the difference between the "total runqueue utilization" and the
1076 * runqueue active utilization, and Uextra is the (per runqueue) extra
1077 * reclaimable utilization.
9f0d1a50 1078 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
daec5798
LA
1079 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1080 * BW_SHIFT.
1081 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1082 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1083 * Since delta is a 64 bit variable, to have an overflow its value
1084 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1085 * So, overflow is not an issue here.
c52f14d3 1086 */
9f0d1a50 1087u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
c52f14d3 1088{
9f0d1a50
LA
1089 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1090 u64 u_act;
daec5798 1091 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
c52f14d3 1092
9f0d1a50 1093 /*
daec5798
LA
1094 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1095 * we compare u_inact + rq->dl.extra_bw with
1096 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1097 * u_inact + rq->dl.extra_bw can be larger than
1098 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1099 * leading to wrong results)
9f0d1a50 1100 */
daec5798
LA
1101 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1102 u_act = u_act_min;
9f0d1a50 1103 else
daec5798 1104 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
9f0d1a50
LA
1105
1106 return (delta * u_act) >> BW_SHIFT;
c52f14d3
LA
1107}
1108
aab03e05
DF
1109/*
1110 * Update the current task's runtime statistics (provided it is still
1111 * a -deadline task and has not been removed from the dl_rq).
1112 */
1113static void update_curr_dl(struct rq *rq)
1114{
1115 struct task_struct *curr = rq->curr;
1116 struct sched_dl_entity *dl_se = &curr->dl;
1117 u64 delta_exec;
1118
1119 if (!dl_task(curr) || !on_dl_rq(dl_se))
1120 return;
1121
1122 /*
1123 * Consumed budget is computed considering the time as
1124 * observed by schedulable tasks (excluding time spent
1125 * in hardirq context, etc.). Deadlines are instead
1126 * computed using hard walltime. This seems to be the more
1127 * natural solution, but the full ramifications of this
1128 * approach need further study.
1129 */
1130 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
48be3a67
PZ
1131 if (unlikely((s64)delta_exec <= 0)) {
1132 if (unlikely(dl_se->dl_yielded))
1133 goto throttle;
734ff2a7 1134 return;
48be3a67 1135 }
aab03e05 1136
58919e83 1137 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
674e7541 1138 cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
594dd290 1139
aab03e05
DF
1140 schedstat_set(curr->se.statistics.exec_max,
1141 max(curr->se.statistics.exec_max, delta_exec));
1142
1143 curr->se.sum_exec_runtime += delta_exec;
1144 account_group_exec_runtime(curr, delta_exec);
1145
1146 curr->se.exec_start = rq_clock_task(rq);
d2cc5ed6 1147 cgroup_account_cputime(curr, delta_exec);
aab03e05 1148
239be4a9
DF
1149 sched_rt_avg_update(rq, delta_exec);
1150
2d4283e9 1151 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
9f0d1a50 1152 delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
48be3a67
PZ
1153 dl_se->runtime -= delta_exec;
1154
1155throttle:
1156 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1019a359 1157 dl_se->dl_throttled = 1;
aab03e05 1158 __dequeue_task_dl(rq, curr, 0);
a649f237 1159 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
aab03e05
DF
1160 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1161
1162 if (!is_leftmost(curr, &rq->dl))
8875125e 1163 resched_curr(rq);
aab03e05 1164 }
1724813d
PZ
1165
1166 /*
1167 * Because -- for now -- we share the rt bandwidth, we need to
1168 * account our runtime there too, otherwise actual rt tasks
1169 * would be able to exceed the shared quota.
1170 *
1171 * Account to the root rt group for now.
1172 *
1173 * The solution we're working towards is having the RT groups scheduled
1174 * using deadline servers -- however there's a few nasties to figure
1175 * out before that can happen.
1176 */
1177 if (rt_bandwidth_enabled()) {
1178 struct rt_rq *rt_rq = &rq->rt;
1179
1180 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
1181 /*
1182 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
1183 * have our own CBS to keep us inline; only account when RT
1184 * bandwidth is relevant.
1724813d 1185 */
faa59937
JL
1186 if (sched_rt_bandwidth_account(rt_rq))
1187 rt_rq->rt_time += delta_exec;
1724813d
PZ
1188 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1189 }
aab03e05
DF
1190}
1191
209a0cbd
LA
1192static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1193{
1194 struct sched_dl_entity *dl_se = container_of(timer,
1195 struct sched_dl_entity,
1196 inactive_timer);
1197 struct task_struct *p = dl_task_of(dl_se);
1198 struct rq_flags rf;
1199 struct rq *rq;
1200
1201 rq = task_rq_lock(p, &rf);
1202
1203 if (!dl_task(p) || p->state == TASK_DEAD) {
387e3130
LA
1204 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1205
209a0cbd
LA
1206 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1207 sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
8fd27231 1208 sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
209a0cbd
LA
1209 dl_se->dl_non_contending = 0;
1210 }
387e3130
LA
1211
1212 raw_spin_lock(&dl_b->lock);
8c0944ce 1213 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
387e3130 1214 raw_spin_unlock(&dl_b->lock);
209a0cbd
LA
1215 __dl_clear_params(p);
1216
1217 goto unlock;
1218 }
1219 if (dl_se->dl_non_contending == 0)
1220 goto unlock;
1221
1222 sched_clock_tick();
1223 update_rq_clock(rq);
1224
1225 sub_running_bw(dl_se->dl_bw, &rq->dl);
1226 dl_se->dl_non_contending = 0;
1227unlock:
1228 task_rq_unlock(rq, p, &rf);
1229 put_task_struct(p);
1230
1231 return HRTIMER_NORESTART;
1232}
1233
1234void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1235{
1236 struct hrtimer *timer = &dl_se->inactive_timer;
1237
1238 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1239 timer->function = inactive_task_timer;
1240}
1241
1baca4ce
JL
1242#ifdef CONFIG_SMP
1243
1baca4ce
JL
1244static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1245{
1246 struct rq *rq = rq_of_dl_rq(dl_rq);
1247
1248 if (dl_rq->earliest_dl.curr == 0 ||
1249 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1baca4ce 1250 dl_rq->earliest_dl.curr = deadline;
d8206bb3 1251 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1baca4ce
JL
1252 }
1253}
1254
1255static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1256{
1257 struct rq *rq = rq_of_dl_rq(dl_rq);
1258
1259 /*
1260 * Since we may have removed our earliest (and/or next earliest)
1261 * task we must recompute them.
1262 */
1263 if (!dl_rq->dl_nr_running) {
1264 dl_rq->earliest_dl.curr = 0;
1265 dl_rq->earliest_dl.next = 0;
d8206bb3 1266 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1baca4ce 1267 } else {
2161573e 1268 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1baca4ce
JL
1269 struct sched_dl_entity *entry;
1270
1271 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1272 dl_rq->earliest_dl.curr = entry->deadline;
d8206bb3 1273 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1baca4ce
JL
1274 }
1275}
1276
1277#else
1278
1279static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1280static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1281
1282#endif /* CONFIG_SMP */
1283
1284static inline
1285void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1286{
1287 int prio = dl_task_of(dl_se)->prio;
1288 u64 deadline = dl_se->deadline;
1289
1290 WARN_ON(!dl_prio(prio));
1291 dl_rq->dl_nr_running++;
72465447 1292 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1293
1294 inc_dl_deadline(dl_rq, deadline);
1295 inc_dl_migration(dl_se, dl_rq);
1296}
1297
1298static inline
1299void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1300{
1301 int prio = dl_task_of(dl_se)->prio;
1302
1303 WARN_ON(!dl_prio(prio));
1304 WARN_ON(!dl_rq->dl_nr_running);
1305 dl_rq->dl_nr_running--;
72465447 1306 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1307
1308 dec_dl_deadline(dl_rq, dl_se->deadline);
1309 dec_dl_migration(dl_se, dl_rq);
1310}
1311
aab03e05
DF
1312static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1313{
1314 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2161573e 1315 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
aab03e05
DF
1316 struct rb_node *parent = NULL;
1317 struct sched_dl_entity *entry;
1318 int leftmost = 1;
1319
1320 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1321
1322 while (*link) {
1323 parent = *link;
1324 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1325 if (dl_time_before(dl_se->deadline, entry->deadline))
1326 link = &parent->rb_left;
1327 else {
1328 link = &parent->rb_right;
1329 leftmost = 0;
1330 }
1331 }
1332
aab03e05 1333 rb_link_node(&dl_se->rb_node, parent, link);
2161573e 1334 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
aab03e05 1335
1baca4ce 1336 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1337}
1338
1339static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1340{
1341 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1342
1343 if (RB_EMPTY_NODE(&dl_se->rb_node))
1344 return;
1345
2161573e 1346 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
aab03e05
DF
1347 RB_CLEAR_NODE(&dl_se->rb_node);
1348
1baca4ce 1349 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1350}
1351
1352static void
2d3d891d
DF
1353enqueue_dl_entity(struct sched_dl_entity *dl_se,
1354 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
1355{
1356 BUG_ON(on_dl_rq(dl_se));
1357
1358 /*
1359 * If this is a wakeup or a new instance, the scheduling
1360 * parameters of the task might need updating. Otherwise,
1361 * we want a replenishment of its runtime.
1362 */
e36d8677 1363 if (flags & ENQUEUE_WAKEUP) {
8fd27231 1364 task_contending(dl_se, flags);
2d3d891d 1365 update_dl_entity(dl_se, pi_se);
e36d8677 1366 } else if (flags & ENQUEUE_REPLENISH) {
6a503c3b 1367 replenish_dl_entity(dl_se, pi_se);
295d6d5e
LA
1368 } else if ((flags & ENQUEUE_RESTORE) &&
1369 dl_time_before(dl_se->deadline,
1370 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1371 setup_new_dl_entity(dl_se);
e36d8677 1372 }
aab03e05
DF
1373
1374 __enqueue_dl_entity(dl_se);
1375}
1376
1377static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1378{
1379 __dequeue_dl_entity(dl_se);
1380}
1381
1382static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1383{
2d3d891d
DF
1384 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1385 struct sched_dl_entity *pi_se = &p->dl;
1386
1387 /*
193be41e
JF
1388 * Use the scheduling parameters of the top pi-waiter task if:
1389 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1390 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1391 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1392 * boosted due to a SCHED_DEADLINE pi-waiter).
1393 * Otherwise we keep our runtime and deadline.
2d3d891d 1394 */
193be41e 1395 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
2d3d891d 1396 pi_se = &pi_task->dl;
64be6f1f
JL
1397 } else if (!dl_prio(p->normal_prio)) {
1398 /*
1399 * Special case in which we have a !SCHED_DEADLINE task
193be41e 1400 * that is going to be deboosted, but exceeds its
64be6f1f
JL
1401 * runtime while doing so. No point in replenishing
1402 * it, as it's going to return back to its original
1403 * scheduling class after this.
1404 */
1405 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1406 return;
1407 }
2d3d891d 1408
df8eac8c
DBO
1409 /*
1410 * Check if a constrained deadline task was activated
1411 * after the deadline but before the next period.
1412 * If that is the case, the task will be throttled and
1413 * the replenishment timer will be set to the next period.
1414 */
3effcb42 1415 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
df8eac8c
DBO
1416 dl_check_constrained_dl(&p->dl);
1417
8fd27231
LA
1418 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1419 add_rq_bw(p->dl.dl_bw, &rq->dl);
e36d8677 1420 add_running_bw(p->dl.dl_bw, &rq->dl);
8fd27231 1421 }
e36d8677 1422
aab03e05 1423 /*
e36d8677 1424 * If p is throttled, we do not enqueue it. In fact, if it exhausted
aab03e05
DF
1425 * its budget it needs a replenishment and, since it now is on
1426 * its rq, the bandwidth timer callback (which clearly has not
1427 * run yet) will take care of this.
e36d8677
LA
1428 * However, the active utilization does not depend on the fact
1429 * that the task is on the runqueue or not (but depends on the
1430 * task's state - in GRUB parlance, "inactive" vs "active contending").
1431 * In other words, even if a task is throttled its utilization must
1432 * be counted in the active utilization; hence, we need to call
1433 * add_running_bw().
aab03e05 1434 */
e36d8677 1435 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
209a0cbd 1436 if (flags & ENQUEUE_WAKEUP)
8fd27231 1437 task_contending(&p->dl, flags);
209a0cbd 1438
aab03e05 1439 return;
e36d8677 1440 }
aab03e05 1441
2d3d891d 1442 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce 1443
4b53a341 1444 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1baca4ce 1445 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1446}
1447
1448static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1449{
1450 dequeue_dl_entity(&p->dl);
1baca4ce 1451 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
1452}
1453
1454static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1455{
1456 update_curr_dl(rq);
1457 __dequeue_task_dl(rq, p, flags);
e36d8677 1458
8fd27231 1459 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
e36d8677 1460 sub_running_bw(p->dl.dl_bw, &rq->dl);
8fd27231
LA
1461 sub_rq_bw(p->dl.dl_bw, &rq->dl);
1462 }
e36d8677
LA
1463
1464 /*
209a0cbd
LA
1465 * This check allows to start the inactive timer (or to immediately
1466 * decrease the active utilization, if needed) in two cases:
e36d8677
LA
1467 * when the task blocks and when it is terminating
1468 * (p->state == TASK_DEAD). We can handle the two cases in the same
1469 * way, because from GRUB's point of view the same thing is happening
1470 * (the task moves from "active contending" to "active non contending"
1471 * or "inactive")
1472 */
1473 if (flags & DEQUEUE_SLEEP)
209a0cbd 1474 task_non_contending(p);
aab03e05
DF
1475}
1476
1477/*
1478 * Yield task semantic for -deadline tasks is:
1479 *
1480 * get off from the CPU until our next instance, with
1481 * a new runtime. This is of little use now, since we
1482 * don't have a bandwidth reclaiming mechanism. Anyway,
1483 * bandwidth reclaiming is planned for the future, and
1484 * yield_task_dl will indicate that some spare budget
1485 * is available for other task instances to use it.
1486 */
1487static void yield_task_dl(struct rq *rq)
1488{
aab03e05
DF
1489 /*
1490 * We make the task go to sleep until its current deadline by
1491 * forcing its runtime to zero. This way, update_curr_dl() stops
1492 * it and the bandwidth timer will wake it up and will give it
5bfd126e 1493 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05 1494 */
48be3a67
PZ
1495 rq->curr->dl.dl_yielded = 1;
1496
6f1607f1 1497 update_rq_clock(rq);
aab03e05 1498 update_curr_dl(rq);
44fb085b
WL
1499 /*
1500 * Tell update_rq_clock() that we've just updated,
1501 * so we don't do microscopic update in schedule()
1502 * and double the fastpath cost.
1503 */
1504 rq_clock_skip_update(rq, true);
aab03e05
DF
1505}
1506
1baca4ce
JL
1507#ifdef CONFIG_SMP
1508
1509static int find_later_rq(struct task_struct *task);
1baca4ce
JL
1510
1511static int
1512select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1513{
1514 struct task_struct *curr;
1515 struct rq *rq;
1516
1d7e974c 1517 if (sd_flag != SD_BALANCE_WAKE)
1baca4ce
JL
1518 goto out;
1519
1520 rq = cpu_rq(cpu);
1521
1522 rcu_read_lock();
316c1608 1523 curr = READ_ONCE(rq->curr); /* unlocked access */
1baca4ce
JL
1524
1525 /*
1526 * If we are dealing with a -deadline task, we must
1527 * decide where to wake it up.
1528 * If it has a later deadline and the current task
1529 * on this rq can't move (provided the waking task
1530 * can!) we prefer to send it somewhere else. On the
1531 * other hand, if it has a shorter deadline, we
1532 * try to make it stay here, it might be important.
1533 */
1534 if (unlikely(dl_task(curr)) &&
4b53a341 1535 (curr->nr_cpus_allowed < 2 ||
1baca4ce 1536 !dl_entity_preempt(&p->dl, &curr->dl)) &&
4b53a341 1537 (p->nr_cpus_allowed > 1)) {
1baca4ce
JL
1538 int target = find_later_rq(p);
1539
9d514262 1540 if (target != -1 &&
5aa50507
LA
1541 (dl_time_before(p->dl.deadline,
1542 cpu_rq(target)->dl.earliest_dl.curr) ||
1543 (cpu_rq(target)->dl.dl_nr_running == 0)))
1baca4ce
JL
1544 cpu = target;
1545 }
1546 rcu_read_unlock();
1547
1548out:
1549 return cpu;
1550}
1551
209a0cbd
LA
1552static void migrate_task_rq_dl(struct task_struct *p)
1553{
1554 struct rq *rq;
1555
8fd27231 1556 if (p->state != TASK_WAKING)
209a0cbd
LA
1557 return;
1558
1559 rq = task_rq(p);
1560 /*
1561 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1562 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1563 * rq->lock is not... So, lock it
1564 */
1565 raw_spin_lock(&rq->lock);
8fd27231
LA
1566 if (p->dl.dl_non_contending) {
1567 sub_running_bw(p->dl.dl_bw, &rq->dl);
1568 p->dl.dl_non_contending = 0;
1569 /*
1570 * If the timer handler is currently running and the
1571 * timer cannot be cancelled, inactive_task_timer()
1572 * will see that dl_not_contending is not set, and
1573 * will not touch the rq's active utilization,
1574 * so we are still safe.
1575 */
1576 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1577 put_task_struct(p);
1578 }
1579 sub_rq_bw(p->dl.dl_bw, &rq->dl);
209a0cbd
LA
1580 raw_spin_unlock(&rq->lock);
1581}
1582
1baca4ce
JL
1583static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1584{
1585 /*
1586 * Current can't be migrated, useless to reschedule,
1587 * let's hope p can move out.
1588 */
4b53a341 1589 if (rq->curr->nr_cpus_allowed == 1 ||
3261ed0b 1590 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1baca4ce
JL
1591 return;
1592
1593 /*
1594 * p is migratable, so let's not schedule it and
1595 * see if it is pushed or pulled somewhere else.
1596 */
4b53a341 1597 if (p->nr_cpus_allowed != 1 &&
3261ed0b 1598 cpudl_find(&rq->rd->cpudl, p, NULL))
1baca4ce
JL
1599 return;
1600
8875125e 1601 resched_curr(rq);
1baca4ce
JL
1602}
1603
1604#endif /* CONFIG_SMP */
1605
aab03e05
DF
1606/*
1607 * Only called when both the current and waking task are -deadline
1608 * tasks.
1609 */
1610static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1611 int flags)
1612{
1baca4ce 1613 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 1614 resched_curr(rq);
1baca4ce
JL
1615 return;
1616 }
1617
1618#ifdef CONFIG_SMP
1619 /*
1620 * In the unlikely case current and p have the same deadline
1621 * let us try to decide what's the best thing to do...
1622 */
332ac17e
DF
1623 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1624 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
1625 check_preempt_equal_dl(rq, p);
1626#endif /* CONFIG_SMP */
aab03e05
DF
1627}
1628
1629#ifdef CONFIG_SCHED_HRTICK
1630static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1631{
177ef2a6 1632 hrtick_start(rq, p->dl.runtime);
aab03e05 1633}
36ce9881
WL
1634#else /* !CONFIG_SCHED_HRTICK */
1635static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1636{
1637}
aab03e05
DF
1638#endif
1639
1640static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1641 struct dl_rq *dl_rq)
1642{
2161573e 1643 struct rb_node *left = rb_first_cached(&dl_rq->root);
aab03e05
DF
1644
1645 if (!left)
1646 return NULL;
1647
1648 return rb_entry(left, struct sched_dl_entity, rb_node);
1649}
1650
181a80d1 1651static struct task_struct *
d8ac8971 1652pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
aab03e05
DF
1653{
1654 struct sched_dl_entity *dl_se;
1655 struct task_struct *p;
1656 struct dl_rq *dl_rq;
1657
1658 dl_rq = &rq->dl;
1659
a1d9a323 1660 if (need_pull_dl_task(rq, prev)) {
cbce1a68
PZ
1661 /*
1662 * This is OK, because current is on_cpu, which avoids it being
1663 * picked for load-balance and preemption/IRQs are still
1664 * disabled avoiding further scheduler activity on it and we're
1665 * being very careful to re-start the picking loop.
1666 */
d8ac8971 1667 rq_unpin_lock(rq, rf);
38033c37 1668 pull_dl_task(rq);
d8ac8971 1669 rq_repin_lock(rq, rf);
a1d9a323 1670 /*
176cedc4 1671 * pull_dl_task() can drop (and re-acquire) rq->lock; this
a1d9a323
KT
1672 * means a stop task can slip in, in which case we need to
1673 * re-start task selection.
1674 */
da0c1e65 1675 if (rq->stop && task_on_rq_queued(rq->stop))
a1d9a323
KT
1676 return RETRY_TASK;
1677 }
1678
734ff2a7
KT
1679 /*
1680 * When prev is DL, we may throttle it in put_prev_task().
1681 * So, we update time before we check for dl_nr_running.
1682 */
1683 if (prev->sched_class == &dl_sched_class)
1684 update_curr_dl(rq);
38033c37 1685
aab03e05
DF
1686 if (unlikely(!dl_rq->dl_nr_running))
1687 return NULL;
1688
3f1d2a31 1689 put_prev_task(rq, prev);
606dba2e 1690
aab03e05
DF
1691 dl_se = pick_next_dl_entity(rq, dl_rq);
1692 BUG_ON(!dl_se);
1693
1694 p = dl_task_of(dl_se);
1695 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1696
1697 /* Running task will never be pushed. */
71362650 1698 dequeue_pushable_dl_task(rq, p);
1baca4ce 1699
aab03e05
DF
1700 if (hrtick_enabled(rq))
1701 start_hrtick_dl(rq, p);
1baca4ce 1702
e3fca9e7 1703 queue_push_tasks(rq);
1baca4ce 1704
aab03e05
DF
1705 return p;
1706}
1707
1708static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1709{
1710 update_curr_dl(rq);
1baca4ce 1711
4b53a341 1712 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1baca4ce 1713 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1714}
1715
1716static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1717{
1718 update_curr_dl(rq);
1719
a7bebf48
WL
1720 /*
1721 * Even when we have runtime, update_curr_dl() might have resulted in us
1722 * not being the leftmost task anymore. In that case NEED_RESCHED will
1723 * be set and schedule() will start a new hrtick for the next task.
1724 */
1725 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1726 is_leftmost(p, &rq->dl))
aab03e05 1727 start_hrtick_dl(rq, p);
aab03e05
DF
1728}
1729
1730static void task_fork_dl(struct task_struct *p)
1731{
1732 /*
1733 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1734 * sched_fork()
1735 */
1736}
1737
aab03e05
DF
1738static void set_curr_task_dl(struct rq *rq)
1739{
1740 struct task_struct *p = rq->curr;
1741
1742 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1743
1744 /* You can't push away the running task */
1745 dequeue_pushable_dl_task(rq, p);
1746}
1747
1748#ifdef CONFIG_SMP
1749
1750/* Only try algorithms three times */
1751#define DL_MAX_TRIES 3
1752
1753static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1754{
1755 if (!task_running(rq, p) &&
0c98d344 1756 cpumask_test_cpu(cpu, &p->cpus_allowed))
1baca4ce 1757 return 1;
1baca4ce
JL
1758 return 0;
1759}
1760
8b5e770e
WL
1761/*
1762 * Return the earliest pushable rq's task, which is suitable to be executed
1763 * on the CPU, NULL otherwise:
1764 */
1765static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1766{
2161573e 1767 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
8b5e770e
WL
1768 struct task_struct *p = NULL;
1769
1770 if (!has_pushable_dl_tasks(rq))
1771 return NULL;
1772
1773next_node:
1774 if (next_node) {
1775 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1776
1777 if (pick_dl_task(rq, p, cpu))
1778 return p;
1779
1780 next_node = rb_next(next_node);
1781 goto next_node;
1782 }
1783
1784 return NULL;
1785}
1786
1baca4ce
JL
1787static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1788
1789static int find_later_rq(struct task_struct *task)
1790{
1791 struct sched_domain *sd;
4ba29684 1792 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1baca4ce 1793 int this_cpu = smp_processor_id();
b18c3ca1 1794 int cpu = task_cpu(task);
1baca4ce
JL
1795
1796 /* Make sure the mask is initialized first */
1797 if (unlikely(!later_mask))
1798 return -1;
1799
4b53a341 1800 if (task->nr_cpus_allowed == 1)
1baca4ce
JL
1801 return -1;
1802
91ec6778
JL
1803 /*
1804 * We have to consider system topology and task affinity
1805 * first, then we can look for a suitable cpu.
1806 */
3261ed0b 1807 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1baca4ce
JL
1808 return -1;
1809
1810 /*
b18c3ca1
BP
1811 * If we are here, some targets have been found, including
1812 * the most suitable which is, among the runqueues where the
1813 * current tasks have later deadlines than the task's one, the
1814 * rq with the latest possible one.
1baca4ce
JL
1815 *
1816 * Now we check how well this matches with task's
1817 * affinity and system topology.
1818 *
1819 * The last cpu where the task run is our first
1820 * guess, since it is most likely cache-hot there.
1821 */
1822 if (cpumask_test_cpu(cpu, later_mask))
1823 return cpu;
1824 /*
1825 * Check if this_cpu is to be skipped (i.e., it is
1826 * not in the mask) or not.
1827 */
1828 if (!cpumask_test_cpu(this_cpu, later_mask))
1829 this_cpu = -1;
1830
1831 rcu_read_lock();
1832 for_each_domain(cpu, sd) {
1833 if (sd->flags & SD_WAKE_AFFINE) {
b18c3ca1 1834 int best_cpu;
1baca4ce
JL
1835
1836 /*
1837 * If possible, preempting this_cpu is
1838 * cheaper than migrating.
1839 */
1840 if (this_cpu != -1 &&
1841 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1842 rcu_read_unlock();
1843 return this_cpu;
1844 }
1845
b18c3ca1
BP
1846 best_cpu = cpumask_first_and(later_mask,
1847 sched_domain_span(sd));
1baca4ce 1848 /*
b18c3ca1
BP
1849 * Last chance: if a cpu being in both later_mask
1850 * and current sd span is valid, that becomes our
1851 * choice. Of course, the latest possible cpu is
1852 * already under consideration through later_mask.
1baca4ce 1853 */
b18c3ca1 1854 if (best_cpu < nr_cpu_ids) {
1baca4ce
JL
1855 rcu_read_unlock();
1856 return best_cpu;
1857 }
1858 }
1859 }
1860 rcu_read_unlock();
1861
1862 /*
1863 * At this point, all our guesses failed, we just return
1864 * 'something', and let the caller sort the things out.
1865 */
1866 if (this_cpu != -1)
1867 return this_cpu;
1868
1869 cpu = cpumask_any(later_mask);
1870 if (cpu < nr_cpu_ids)
1871 return cpu;
1872
1873 return -1;
1874}
1875
1876/* Locks the rq it finds */
1877static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1878{
1879 struct rq *later_rq = NULL;
1880 int tries;
1881 int cpu;
1882
1883 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1884 cpu = find_later_rq(task);
1885
1886 if ((cpu == -1) || (cpu == rq->cpu))
1887 break;
1888
1889 later_rq = cpu_rq(cpu);
1890
5aa50507
LA
1891 if (later_rq->dl.dl_nr_running &&
1892 !dl_time_before(task->dl.deadline,
9d514262
WL
1893 later_rq->dl.earliest_dl.curr)) {
1894 /*
1895 * Target rq has tasks of equal or earlier deadline,
1896 * retrying does not release any lock and is unlikely
1897 * to yield a different result.
1898 */
1899 later_rq = NULL;
1900 break;
1901 }
1902
1baca4ce
JL
1903 /* Retry if something changed. */
1904 if (double_lock_balance(rq, later_rq)) {
1905 if (unlikely(task_rq(task) != rq ||
0c98d344 1906 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
da0c1e65 1907 task_running(rq, task) ||
13b5ab02 1908 !dl_task(task) ||
da0c1e65 1909 !task_on_rq_queued(task))) {
1baca4ce
JL
1910 double_unlock_balance(rq, later_rq);
1911 later_rq = NULL;
1912 break;
1913 }
1914 }
1915
1916 /*
1917 * If the rq we found has no -deadline task, or
1918 * its earliest one has a later deadline than our
1919 * task, the rq is a good one.
1920 */
1921 if (!later_rq->dl.dl_nr_running ||
1922 dl_time_before(task->dl.deadline,
1923 later_rq->dl.earliest_dl.curr))
1924 break;
1925
1926 /* Otherwise we try again. */
1927 double_unlock_balance(rq, later_rq);
1928 later_rq = NULL;
1929 }
1930
1931 return later_rq;
1932}
1933
1934static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1935{
1936 struct task_struct *p;
1937
1938 if (!has_pushable_dl_tasks(rq))
1939 return NULL;
1940
2161573e 1941 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
1baca4ce
JL
1942 struct task_struct, pushable_dl_tasks);
1943
1944 BUG_ON(rq->cpu != task_cpu(p));
1945 BUG_ON(task_current(rq, p));
4b53a341 1946 BUG_ON(p->nr_cpus_allowed <= 1);
1baca4ce 1947
da0c1e65 1948 BUG_ON(!task_on_rq_queued(p));
1baca4ce
JL
1949 BUG_ON(!dl_task(p));
1950
1951 return p;
1952}
1953
1954/*
1955 * See if the non running -deadline tasks on this rq
1956 * can be sent to some other CPU where they can preempt
1957 * and start executing.
1958 */
1959static int push_dl_task(struct rq *rq)
1960{
1961 struct task_struct *next_task;
1962 struct rq *later_rq;
c51b8ab5 1963 int ret = 0;
1baca4ce
JL
1964
1965 if (!rq->dl.overloaded)
1966 return 0;
1967
1968 next_task = pick_next_pushable_dl_task(rq);
1969 if (!next_task)
1970 return 0;
1971
1972retry:
1973 if (unlikely(next_task == rq->curr)) {
1974 WARN_ON(1);
1975 return 0;
1976 }
1977
1978 /*
1979 * If next_task preempts rq->curr, and rq->curr
1980 * can move away, it makes sense to just reschedule
1981 * without going further in pushing next_task.
1982 */
1983 if (dl_task(rq->curr) &&
1984 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
4b53a341 1985 rq->curr->nr_cpus_allowed > 1) {
8875125e 1986 resched_curr(rq);
1baca4ce
JL
1987 return 0;
1988 }
1989
1990 /* We might release rq lock */
1991 get_task_struct(next_task);
1992
1993 /* Will lock the rq it'll find */
1994 later_rq = find_lock_later_rq(next_task, rq);
1995 if (!later_rq) {
1996 struct task_struct *task;
1997
1998 /*
1999 * We must check all this again, since
2000 * find_lock_later_rq releases rq->lock and it is
2001 * then possible that next_task has migrated.
2002 */
2003 task = pick_next_pushable_dl_task(rq);
a776b968 2004 if (task == next_task) {
1baca4ce
JL
2005 /*
2006 * The task is still there. We don't try
2007 * again, some other cpu will pull it when ready.
2008 */
1baca4ce
JL
2009 goto out;
2010 }
2011
2012 if (!task)
2013 /* No more tasks */
2014 goto out;
2015
2016 put_task_struct(next_task);
2017 next_task = task;
2018 goto retry;
2019 }
2020
2021 deactivate_task(rq, next_task, 0);
e36d8677 2022 sub_running_bw(next_task->dl.dl_bw, &rq->dl);
8fd27231 2023 sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
1baca4ce 2024 set_task_cpu(next_task, later_rq->cpu);
8fd27231 2025 add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
e36d8677 2026 add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
1baca4ce 2027 activate_task(later_rq, next_task, 0);
c51b8ab5 2028 ret = 1;
1baca4ce 2029
8875125e 2030 resched_curr(later_rq);
1baca4ce
JL
2031
2032 double_unlock_balance(rq, later_rq);
2033
2034out:
2035 put_task_struct(next_task);
2036
c51b8ab5 2037 return ret;
1baca4ce
JL
2038}
2039
2040static void push_dl_tasks(struct rq *rq)
2041{
4ffa08ed 2042 /* push_dl_task() will return true if it moved a -deadline task */
1baca4ce
JL
2043 while (push_dl_task(rq))
2044 ;
aab03e05
DF
2045}
2046
0ea60c20 2047static void pull_dl_task(struct rq *this_rq)
1baca4ce 2048{
0ea60c20 2049 int this_cpu = this_rq->cpu, cpu;
1baca4ce 2050 struct task_struct *p;
0ea60c20 2051 bool resched = false;
1baca4ce
JL
2052 struct rq *src_rq;
2053 u64 dmin = LONG_MAX;
2054
2055 if (likely(!dl_overloaded(this_rq)))
0ea60c20 2056 return;
1baca4ce
JL
2057
2058 /*
2059 * Match the barrier from dl_set_overloaded; this guarantees that if we
2060 * see overloaded we must also see the dlo_mask bit.
2061 */
2062 smp_rmb();
2063
2064 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2065 if (this_cpu == cpu)
2066 continue;
2067
2068 src_rq = cpu_rq(cpu);
2069
2070 /*
2071 * It looks racy, abd it is! However, as in sched_rt.c,
2072 * we are fine with this.
2073 */
2074 if (this_rq->dl.dl_nr_running &&
2075 dl_time_before(this_rq->dl.earliest_dl.curr,
2076 src_rq->dl.earliest_dl.next))
2077 continue;
2078
2079 /* Might drop this_rq->lock */
2080 double_lock_balance(this_rq, src_rq);
2081
2082 /*
2083 * If there are no more pullable tasks on the
2084 * rq, we're done with it.
2085 */
2086 if (src_rq->dl.dl_nr_running <= 1)
2087 goto skip;
2088
8b5e770e 2089 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1baca4ce
JL
2090
2091 /*
2092 * We found a task to be pulled if:
2093 * - it preempts our current (if there's one),
2094 * - it will preempt the last one we pulled (if any).
2095 */
2096 if (p && dl_time_before(p->dl.deadline, dmin) &&
2097 (!this_rq->dl.dl_nr_running ||
2098 dl_time_before(p->dl.deadline,
2099 this_rq->dl.earliest_dl.curr))) {
2100 WARN_ON(p == src_rq->curr);
da0c1e65 2101 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
2102
2103 /*
2104 * Then we pull iff p has actually an earlier
2105 * deadline than the current task of its runqueue.
2106 */
2107 if (dl_time_before(p->dl.deadline,
2108 src_rq->curr->dl.deadline))
2109 goto skip;
2110
0ea60c20 2111 resched = true;
1baca4ce
JL
2112
2113 deactivate_task(src_rq, p, 0);
e36d8677 2114 sub_running_bw(p->dl.dl_bw, &src_rq->dl);
8fd27231 2115 sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
1baca4ce 2116 set_task_cpu(p, this_cpu);
8fd27231 2117 add_rq_bw(p->dl.dl_bw, &this_rq->dl);
e36d8677 2118 add_running_bw(p->dl.dl_bw, &this_rq->dl);
1baca4ce
JL
2119 activate_task(this_rq, p, 0);
2120 dmin = p->dl.deadline;
2121
2122 /* Is there any other task even earlier? */
2123 }
2124skip:
2125 double_unlock_balance(this_rq, src_rq);
2126 }
2127
0ea60c20
PZ
2128 if (resched)
2129 resched_curr(this_rq);
1baca4ce
JL
2130}
2131
2132/*
2133 * Since the task is not running and a reschedule is not going to happen
2134 * anytime soon on its runqueue, we try pushing it away now.
2135 */
2136static void task_woken_dl(struct rq *rq, struct task_struct *p)
2137{
2138 if (!task_running(rq, p) &&
2139 !test_tsk_need_resched(rq->curr) &&
4b53a341 2140 p->nr_cpus_allowed > 1 &&
1baca4ce 2141 dl_task(rq->curr) &&
4b53a341 2142 (rq->curr->nr_cpus_allowed < 2 ||
6b0a563f 2143 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1baca4ce
JL
2144 push_dl_tasks(rq);
2145 }
2146}
2147
2148static void set_cpus_allowed_dl(struct task_struct *p,
2149 const struct cpumask *new_mask)
2150{
7f51412a 2151 struct root_domain *src_rd;
6c37067e 2152 struct rq *rq;
1baca4ce
JL
2153
2154 BUG_ON(!dl_task(p));
2155
7f51412a
JL
2156 rq = task_rq(p);
2157 src_rd = rq->rd;
2158 /*
2159 * Migrating a SCHED_DEADLINE task between exclusive
2160 * cpusets (different root_domains) entails a bandwidth
2161 * update. We already made space for us in the destination
2162 * domain (see cpuset_can_attach()).
2163 */
2164 if (!cpumask_intersects(src_rd->span, new_mask)) {
2165 struct dl_bw *src_dl_b;
2166
2167 src_dl_b = dl_bw_of(cpu_of(rq));
2168 /*
2169 * We now free resources of the root_domain we are migrating
2170 * off. In the worst case, sched_setattr() may temporary fail
2171 * until we complete the update.
2172 */
2173 raw_spin_lock(&src_dl_b->lock);
8c0944ce 2174 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
7f51412a
JL
2175 raw_spin_unlock(&src_dl_b->lock);
2176 }
2177
6c37067e 2178 set_cpus_allowed_common(p, new_mask);
1baca4ce
JL
2179}
2180
2181/* Assumes rq->lock is held */
2182static void rq_online_dl(struct rq *rq)
2183{
2184 if (rq->dl.overloaded)
2185 dl_set_overload(rq);
6bfd6d72 2186
16b26943 2187 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
6bfd6d72 2188 if (rq->dl.dl_nr_running > 0)
d8206bb3 2189 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1baca4ce
JL
2190}
2191
2192/* Assumes rq->lock is held */
2193static void rq_offline_dl(struct rq *rq)
2194{
2195 if (rq->dl.overloaded)
2196 dl_clear_overload(rq);
6bfd6d72 2197
d8206bb3 2198 cpudl_clear(&rq->rd->cpudl, rq->cpu);
16b26943 2199 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1baca4ce
JL
2200}
2201
a6c0e746 2202void __init init_sched_dl_class(void)
1baca4ce
JL
2203{
2204 unsigned int i;
2205
2206 for_each_possible_cpu(i)
2207 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2208 GFP_KERNEL, cpu_to_node(i));
2209}
2210
2211#endif /* CONFIG_SMP */
2212
aab03e05
DF
2213static void switched_from_dl(struct rq *rq, struct task_struct *p)
2214{
a649f237 2215 /*
209a0cbd
LA
2216 * task_non_contending() can start the "inactive timer" (if the 0-lag
2217 * time is in the future). If the task switches back to dl before
2218 * the "inactive timer" fires, it can continue to consume its current
2219 * runtime using its current deadline. If it stays outside of
2220 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2221 * will reset the task parameters.
a649f237 2222 */
209a0cbd
LA
2223 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2224 task_non_contending(p);
2225
8fd27231
LA
2226 if (!task_on_rq_queued(p))
2227 sub_rq_bw(p->dl.dl_bw, &rq->dl);
2228
209a0cbd
LA
2229 /*
2230 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2231 * at the 0-lag time, because the task could have been migrated
2232 * while SCHED_OTHER in the meanwhile.
2233 */
2234 if (p->dl.dl_non_contending)
2235 p->dl.dl_non_contending = 0;
a5e7be3b 2236
1baca4ce
JL
2237 /*
2238 * Since this might be the only -deadline task on the rq,
2239 * this is the right place to try to pull some other one
2240 * from an overloaded cpu, if any.
2241 */
cd660911
WL
2242 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2243 return;
2244
9916e214 2245 queue_pull_task(rq);
aab03e05
DF
2246}
2247
1baca4ce
JL
2248/*
2249 * When switching to -deadline, we may overload the rq, then
2250 * we try to push someone off, if possible.
2251 */
aab03e05
DF
2252static void switched_to_dl(struct rq *rq, struct task_struct *p)
2253{
209a0cbd
LA
2254 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2255 put_task_struct(p);
98b0a857
JL
2256
2257 /* If p is not queued we will update its parameters at next wakeup. */
8fd27231
LA
2258 if (!task_on_rq_queued(p)) {
2259 add_rq_bw(p->dl.dl_bw, &rq->dl);
98b0a857 2260
8fd27231
LA
2261 return;
2262 }
72f9f3fd 2263
98b0a857 2264 if (rq->curr != p) {
1baca4ce 2265#ifdef CONFIG_SMP
4b53a341 2266 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
9916e214 2267 queue_push_tasks(rq);
619bd4a7 2268#endif
9916e214
PZ
2269 if (dl_task(rq->curr))
2270 check_preempt_curr_dl(rq, p, 0);
2271 else
2272 resched_curr(rq);
aab03e05
DF
2273 }
2274}
2275
1baca4ce
JL
2276/*
2277 * If the scheduling parameters of a -deadline task changed,
2278 * a push or pull operation might be needed.
2279 */
aab03e05
DF
2280static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2281 int oldprio)
2282{
da0c1e65 2283 if (task_on_rq_queued(p) || rq->curr == p) {
aab03e05 2284#ifdef CONFIG_SMP
1baca4ce
JL
2285 /*
2286 * This might be too much, but unfortunately
2287 * we don't have the old deadline value, and
2288 * we can't argue if the task is increasing
2289 * or lowering its prio, so...
2290 */
2291 if (!rq->dl.overloaded)
9916e214 2292 queue_pull_task(rq);
1baca4ce
JL
2293
2294 /*
2295 * If we now have a earlier deadline task than p,
2296 * then reschedule, provided p is still on this
2297 * runqueue.
2298 */
9916e214 2299 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
8875125e 2300 resched_curr(rq);
1baca4ce
JL
2301#else
2302 /*
2303 * Again, we don't know if p has a earlier
2304 * or later deadline, so let's blindly set a
2305 * (maybe not needed) rescheduling point.
2306 */
8875125e 2307 resched_curr(rq);
1baca4ce 2308#endif /* CONFIG_SMP */
801ccdbf 2309 }
aab03e05 2310}
aab03e05
DF
2311
2312const struct sched_class dl_sched_class = {
2313 .next = &rt_sched_class,
2314 .enqueue_task = enqueue_task_dl,
2315 .dequeue_task = dequeue_task_dl,
2316 .yield_task = yield_task_dl,
2317
2318 .check_preempt_curr = check_preempt_curr_dl,
2319
2320 .pick_next_task = pick_next_task_dl,
2321 .put_prev_task = put_prev_task_dl,
2322
2323#ifdef CONFIG_SMP
2324 .select_task_rq = select_task_rq_dl,
209a0cbd 2325 .migrate_task_rq = migrate_task_rq_dl,
1baca4ce
JL
2326 .set_cpus_allowed = set_cpus_allowed_dl,
2327 .rq_online = rq_online_dl,
2328 .rq_offline = rq_offline_dl,
1baca4ce 2329 .task_woken = task_woken_dl,
aab03e05
DF
2330#endif
2331
2332 .set_curr_task = set_curr_task_dl,
2333 .task_tick = task_tick_dl,
2334 .task_fork = task_fork_dl,
aab03e05
DF
2335
2336 .prio_changed = prio_changed_dl,
2337 .switched_from = switched_from_dl,
2338 .switched_to = switched_to_dl,
6e998916
SG
2339
2340 .update_curr = update_curr_dl,
aab03e05 2341};
acb32132 2342
06a76fe0
NP
2343int sched_dl_global_validate(void)
2344{
2345 u64 runtime = global_rt_runtime();
2346 u64 period = global_rt_period();
2347 u64 new_bw = to_ratio(period, runtime);
2348 struct dl_bw *dl_b;
2349 int cpu, ret = 0;
2350 unsigned long flags;
2351
2352 /*
2353 * Here we want to check the bandwidth not being set to some
2354 * value smaller than the currently allocated bandwidth in
2355 * any of the root_domains.
2356 *
2357 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2358 * cycling on root_domains... Discussion on different/better
2359 * solutions is welcome!
2360 */
2361 for_each_possible_cpu(cpu) {
2362 rcu_read_lock_sched();
2363 dl_b = dl_bw_of(cpu);
2364
2365 raw_spin_lock_irqsave(&dl_b->lock, flags);
2366 if (new_bw < dl_b->total_bw)
2367 ret = -EBUSY;
2368 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2369
2370 rcu_read_unlock_sched();
2371
2372 if (ret)
2373 break;
2374 }
2375
2376 return ret;
2377}
2378
2379void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2380{
2381 if (global_rt_runtime() == RUNTIME_INF) {
2382 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2383 dl_rq->extra_bw = 1 << BW_SHIFT;
2384 } else {
2385 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2386 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2387 dl_rq->extra_bw = to_ratio(global_rt_period(),
2388 global_rt_runtime());
2389 }
2390}
2391
2392void sched_dl_do_global(void)
2393{
2394 u64 new_bw = -1;
2395 struct dl_bw *dl_b;
2396 int cpu;
2397 unsigned long flags;
2398
2399 def_dl_bandwidth.dl_period = global_rt_period();
2400 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2401
2402 if (global_rt_runtime() != RUNTIME_INF)
2403 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2404
2405 /*
2406 * FIXME: As above...
2407 */
2408 for_each_possible_cpu(cpu) {
2409 rcu_read_lock_sched();
2410 dl_b = dl_bw_of(cpu);
2411
2412 raw_spin_lock_irqsave(&dl_b->lock, flags);
2413 dl_b->bw = new_bw;
2414 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2415
2416 rcu_read_unlock_sched();
2417 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2418 }
2419}
2420
2421/*
2422 * We must be sure that accepting a new task (or allowing changing the
2423 * parameters of an existing one) is consistent with the bandwidth
2424 * constraints. If yes, this function also accordingly updates the currently
2425 * allocated bandwidth to reflect the new situation.
2426 *
2427 * This function is called while holding p's rq->lock.
2428 */
2429int sched_dl_overflow(struct task_struct *p, int policy,
2430 const struct sched_attr *attr)
2431{
2432 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2433 u64 period = attr->sched_period ?: attr->sched_deadline;
2434 u64 runtime = attr->sched_runtime;
2435 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2436 int cpus, err = -1;
2437
2438 /* !deadline task may carry old deadline bandwidth */
2439 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2440 return 0;
2441
2442 /*
2443 * Either if a task, enters, leave, or stays -deadline but changes
2444 * its parameters, we may need to update accordingly the total
2445 * allocated bandwidth of the container.
2446 */
2447 raw_spin_lock(&dl_b->lock);
2448 cpus = dl_bw_cpus(task_cpu(p));
2449 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2450 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2451 if (hrtimer_active(&p->dl.inactive_timer))
8c0944ce 2452 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2453 __dl_add(dl_b, new_bw, cpus);
2454 err = 0;
2455 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2456 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2457 /*
2458 * XXX this is slightly incorrect: when the task
2459 * utilization decreases, we should delay the total
2460 * utilization change until the task's 0-lag point.
2461 * But this would require to set the task's "inactive
2462 * timer" when the task is not inactive.
2463 */
8c0944ce 2464 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2465 __dl_add(dl_b, new_bw, cpus);
2466 dl_change_utilization(p, new_bw);
2467 err = 0;
2468 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2469 /*
2470 * Do not decrease the total deadline utilization here,
2471 * switched_from_dl() will take care to do it at the correct
2472 * (0-lag) time.
2473 */
2474 err = 0;
2475 }
2476 raw_spin_unlock(&dl_b->lock);
2477
2478 return err;
2479}
2480
2481/*
2482 * This function initializes the sched_dl_entity of a newly becoming
2483 * SCHED_DEADLINE task.
2484 *
2485 * Only the static values are considered here, the actual runtime and the
2486 * absolute deadline will be properly calculated when the task is enqueued
2487 * for the first time with its new policy.
2488 */
2489void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2490{
2491 struct sched_dl_entity *dl_se = &p->dl;
2492
2493 dl_se->dl_runtime = attr->sched_runtime;
2494 dl_se->dl_deadline = attr->sched_deadline;
2495 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2496 dl_se->flags = attr->sched_flags;
2497 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2498 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2499}
2500
2501void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2502{
2503 struct sched_dl_entity *dl_se = &p->dl;
2504
2505 attr->sched_priority = p->rt_priority;
2506 attr->sched_runtime = dl_se->dl_runtime;
2507 attr->sched_deadline = dl_se->dl_deadline;
2508 attr->sched_period = dl_se->dl_period;
2509 attr->sched_flags = dl_se->flags;
2510}
2511
2512/*
2513 * This function validates the new parameters of a -deadline task.
2514 * We ask for the deadline not being zero, and greater or equal
2515 * than the runtime, as well as the period of being zero or
2516 * greater than deadline. Furthermore, we have to be sure that
2517 * user parameters are above the internal resolution of 1us (we
2518 * check sched_runtime only since it is always the smaller one) and
2519 * below 2^63 ns (we have to check both sched_deadline and
2520 * sched_period, as the latter can be zero).
2521 */
2522bool __checkparam_dl(const struct sched_attr *attr)
2523{
2524 /* deadline != 0 */
2525 if (attr->sched_deadline == 0)
2526 return false;
2527
2528 /*
2529 * Since we truncate DL_SCALE bits, make sure we're at least
2530 * that big.
2531 */
2532 if (attr->sched_runtime < (1ULL << DL_SCALE))
2533 return false;
2534
2535 /*
2536 * Since we use the MSB for wrap-around and sign issues, make
2537 * sure it's not set (mind that period can be equal to zero).
2538 */
2539 if (attr->sched_deadline & (1ULL << 63) ||
2540 attr->sched_period & (1ULL << 63))
2541 return false;
2542
2543 /* runtime <= deadline <= period (if period != 0) */
2544 if ((attr->sched_period != 0 &&
2545 attr->sched_period < attr->sched_deadline) ||
2546 attr->sched_deadline < attr->sched_runtime)
2547 return false;
2548
2549 return true;
2550}
2551
2552/*
2553 * This function clears the sched_dl_entity static params.
2554 */
2555void __dl_clear_params(struct task_struct *p)
2556{
2557 struct sched_dl_entity *dl_se = &p->dl;
2558
2559 dl_se->dl_runtime = 0;
2560 dl_se->dl_deadline = 0;
2561 dl_se->dl_period = 0;
2562 dl_se->flags = 0;
2563 dl_se->dl_bw = 0;
2564 dl_se->dl_density = 0;
2565
2566 dl_se->dl_throttled = 0;
2567 dl_se->dl_yielded = 0;
2568 dl_se->dl_non_contending = 0;
2569}
2570
2571bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2572{
2573 struct sched_dl_entity *dl_se = &p->dl;
2574
2575 if (dl_se->dl_runtime != attr->sched_runtime ||
2576 dl_se->dl_deadline != attr->sched_deadline ||
2577 dl_se->dl_period != attr->sched_period ||
2578 dl_se->flags != attr->sched_flags)
2579 return true;
2580
2581 return false;
2582}
2583
2584#ifdef CONFIG_SMP
2585int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2586{
2587 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
2588 cs_cpus_allowed);
2589 struct dl_bw *dl_b;
2590 bool overflow;
2591 int cpus, ret;
2592 unsigned long flags;
2593
2594 rcu_read_lock_sched();
2595 dl_b = dl_bw_of(dest_cpu);
2596 raw_spin_lock_irqsave(&dl_b->lock, flags);
2597 cpus = dl_bw_cpus(dest_cpu);
2598 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2599 if (overflow)
2600 ret = -EBUSY;
2601 else {
2602 /*
2603 * We reserve space for this task in the destination
2604 * root_domain, as we can't fail after this point.
2605 * We will free resources in the source root_domain
2606 * later on (see set_cpus_allowed_dl()).
2607 */
2608 __dl_add(dl_b, p->dl.dl_bw, cpus);
2609 ret = 0;
2610 }
2611 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2612 rcu_read_unlock_sched();
2613 return ret;
2614}
2615
2616int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2617 const struct cpumask *trial)
2618{
2619 int ret = 1, trial_cpus;
2620 struct dl_bw *cur_dl_b;
2621 unsigned long flags;
2622
2623 rcu_read_lock_sched();
2624 cur_dl_b = dl_bw_of(cpumask_any(cur));
2625 trial_cpus = cpumask_weight(trial);
2626
2627 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2628 if (cur_dl_b->bw != -1 &&
2629 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2630 ret = 0;
2631 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2632 rcu_read_unlock_sched();
2633 return ret;
2634}
2635
2636bool dl_cpu_busy(unsigned int cpu)
2637{
2638 unsigned long flags;
2639 struct dl_bw *dl_b;
2640 bool overflow;
2641 int cpus;
2642
2643 rcu_read_lock_sched();
2644 dl_b = dl_bw_of(cpu);
2645 raw_spin_lock_irqsave(&dl_b->lock, flags);
2646 cpus = dl_bw_cpus(cpu);
2647 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2648 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2649 rcu_read_unlock_sched();
2650 return overflow;
2651}
2652#endif
2653
acb32132
WL
2654#ifdef CONFIG_SCHED_DEBUG
2655extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2656
2657void print_dl_stats(struct seq_file *m, int cpu)
2658{
2659 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2660}
2661#endif /* CONFIG_SCHED_DEBUG */