]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/sched/deadline.c
Merge tag 'pinctrl-v4.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / deadline.c
1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17 #include "sched.h"
18
19 #include <linux/slab.h>
20 #include <uapi/linux/sched/types.h>
21
22 struct dl_bandwidth def_dl_bandwidth;
23
24 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
25 {
26 return container_of(dl_se, struct task_struct, dl);
27 }
28
29 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
30 {
31 return container_of(dl_rq, struct rq, dl);
32 }
33
34 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
35 {
36 struct task_struct *p = dl_task_of(dl_se);
37 struct rq *rq = task_rq(p);
38
39 return &rq->dl;
40 }
41
42 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
43 {
44 return !RB_EMPTY_NODE(&dl_se->rb_node);
45 }
46
47 #ifdef CONFIG_SMP
48 static inline struct dl_bw *dl_bw_of(int i)
49 {
50 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
51 "sched RCU must be held");
52 return &cpu_rq(i)->rd->dl_bw;
53 }
54
55 static inline int dl_bw_cpus(int i)
56 {
57 struct root_domain *rd = cpu_rq(i)->rd;
58 int cpus = 0;
59
60 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
61 "sched RCU must be held");
62 for_each_cpu_and(i, rd->span, cpu_active_mask)
63 cpus++;
64
65 return cpus;
66 }
67 #else
68 static inline struct dl_bw *dl_bw_of(int i)
69 {
70 return &cpu_rq(i)->dl.dl_bw;
71 }
72
73 static inline int dl_bw_cpus(int i)
74 {
75 return 1;
76 }
77 #endif
78
79 static inline
80 void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
81 {
82 u64 old = dl_rq->running_bw;
83
84 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
85 dl_rq->running_bw += dl_bw;
86 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
87 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
88 }
89
90 static inline
91 void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
92 {
93 u64 old = dl_rq->running_bw;
94
95 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
96 dl_rq->running_bw -= dl_bw;
97 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
98 if (dl_rq->running_bw > old)
99 dl_rq->running_bw = 0;
100 }
101
102 static inline
103 void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
104 {
105 u64 old = dl_rq->this_bw;
106
107 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
108 dl_rq->this_bw += dl_bw;
109 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
110 }
111
112 static inline
113 void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
114 {
115 u64 old = dl_rq->this_bw;
116
117 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
118 dl_rq->this_bw -= dl_bw;
119 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
120 if (dl_rq->this_bw > old)
121 dl_rq->this_bw = 0;
122 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
123 }
124
125 void dl_change_utilization(struct task_struct *p, u64 new_bw)
126 {
127 struct rq *rq;
128
129 if (task_on_rq_queued(p))
130 return;
131
132 rq = task_rq(p);
133 if (p->dl.dl_non_contending) {
134 sub_running_bw(p->dl.dl_bw, &rq->dl);
135 p->dl.dl_non_contending = 0;
136 /*
137 * If the timer handler is currently running and the
138 * timer cannot be cancelled, inactive_task_timer()
139 * will see that dl_not_contending is not set, and
140 * will not touch the rq's active utilization,
141 * so we are still safe.
142 */
143 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
144 put_task_struct(p);
145 }
146 sub_rq_bw(p->dl.dl_bw, &rq->dl);
147 add_rq_bw(new_bw, &rq->dl);
148 }
149
150 /*
151 * The utilization of a task cannot be immediately removed from
152 * the rq active utilization (running_bw) when the task blocks.
153 * Instead, we have to wait for the so called "0-lag time".
154 *
155 * If a task blocks before the "0-lag time", a timer (the inactive
156 * timer) is armed, and running_bw is decreased when the timer
157 * fires.
158 *
159 * If the task wakes up again before the inactive timer fires,
160 * the timer is cancelled, whereas if the task wakes up after the
161 * inactive timer fired (and running_bw has been decreased) the
162 * task's utilization has to be added to running_bw again.
163 * A flag in the deadline scheduling entity (dl_non_contending)
164 * is used to avoid race conditions between the inactive timer handler
165 * and task wakeups.
166 *
167 * The following diagram shows how running_bw is updated. A task is
168 * "ACTIVE" when its utilization contributes to running_bw; an
169 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
170 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
171 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
172 * time already passed, which does not contribute to running_bw anymore.
173 * +------------------+
174 * wakeup | ACTIVE |
175 * +------------------>+ contending |
176 * | add_running_bw | |
177 * | +----+------+------+
178 * | | ^
179 * | dequeue | |
180 * +--------+-------+ | |
181 * | | t >= 0-lag | | wakeup
182 * | INACTIVE |<---------------+ |
183 * | | sub_running_bw | |
184 * +--------+-------+ | |
185 * ^ | |
186 * | t < 0-lag | |
187 * | | |
188 * | V |
189 * | +----+------+------+
190 * | sub_running_bw | ACTIVE |
191 * +-------------------+ |
192 * inactive timer | non contending |
193 * fired +------------------+
194 *
195 * The task_non_contending() function is invoked when a task
196 * blocks, and checks if the 0-lag time already passed or
197 * not (in the first case, it directly updates running_bw;
198 * in the second case, it arms the inactive timer).
199 *
200 * The task_contending() function is invoked when a task wakes
201 * up, and checks if the task is still in the "ACTIVE non contending"
202 * state or not (in the second case, it updates running_bw).
203 */
204 static void task_non_contending(struct task_struct *p)
205 {
206 struct sched_dl_entity *dl_se = &p->dl;
207 struct hrtimer *timer = &dl_se->inactive_timer;
208 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
209 struct rq *rq = rq_of_dl_rq(dl_rq);
210 s64 zerolag_time;
211
212 /*
213 * If this is a non-deadline task that has been boosted,
214 * do nothing
215 */
216 if (dl_se->dl_runtime == 0)
217 return;
218
219 WARN_ON(hrtimer_active(&dl_se->inactive_timer));
220 WARN_ON(dl_se->dl_non_contending);
221
222 zerolag_time = dl_se->deadline -
223 div64_long((dl_se->runtime * dl_se->dl_period),
224 dl_se->dl_runtime);
225
226 /*
227 * Using relative times instead of the absolute "0-lag time"
228 * allows to simplify the code
229 */
230 zerolag_time -= rq_clock(rq);
231
232 /*
233 * If the "0-lag time" already passed, decrease the active
234 * utilization now, instead of starting a timer
235 */
236 if (zerolag_time < 0) {
237 if (dl_task(p))
238 sub_running_bw(dl_se->dl_bw, dl_rq);
239 if (!dl_task(p) || p->state == TASK_DEAD) {
240 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
241
242 if (p->state == TASK_DEAD)
243 sub_rq_bw(p->dl.dl_bw, &rq->dl);
244 raw_spin_lock(&dl_b->lock);
245 __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
246 __dl_clear_params(p);
247 raw_spin_unlock(&dl_b->lock);
248 }
249
250 return;
251 }
252
253 dl_se->dl_non_contending = 1;
254 get_task_struct(p);
255 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
256 }
257
258 static void task_contending(struct sched_dl_entity *dl_se, int flags)
259 {
260 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
261
262 /*
263 * If this is a non-deadline task that has been boosted,
264 * do nothing
265 */
266 if (dl_se->dl_runtime == 0)
267 return;
268
269 if (flags & ENQUEUE_MIGRATED)
270 add_rq_bw(dl_se->dl_bw, dl_rq);
271
272 if (dl_se->dl_non_contending) {
273 dl_se->dl_non_contending = 0;
274 /*
275 * If the timer handler is currently running and the
276 * timer cannot be cancelled, inactive_task_timer()
277 * will see that dl_not_contending is not set, and
278 * will not touch the rq's active utilization,
279 * so we are still safe.
280 */
281 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
282 put_task_struct(dl_task_of(dl_se));
283 } else {
284 /*
285 * Since "dl_non_contending" is not set, the
286 * task's utilization has already been removed from
287 * active utilization (either when the task blocked,
288 * when the "inactive timer" fired).
289 * So, add it back.
290 */
291 add_running_bw(dl_se->dl_bw, dl_rq);
292 }
293 }
294
295 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
296 {
297 struct sched_dl_entity *dl_se = &p->dl;
298
299 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
300 }
301
302 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
303 {
304 raw_spin_lock_init(&dl_b->dl_runtime_lock);
305 dl_b->dl_period = period;
306 dl_b->dl_runtime = runtime;
307 }
308
309 void init_dl_bw(struct dl_bw *dl_b)
310 {
311 raw_spin_lock_init(&dl_b->lock);
312 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
313 if (global_rt_runtime() == RUNTIME_INF)
314 dl_b->bw = -1;
315 else
316 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
317 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
318 dl_b->total_bw = 0;
319 }
320
321 void init_dl_rq(struct dl_rq *dl_rq)
322 {
323 dl_rq->root = RB_ROOT_CACHED;
324
325 #ifdef CONFIG_SMP
326 /* zero means no -deadline tasks */
327 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
328
329 dl_rq->dl_nr_migratory = 0;
330 dl_rq->overloaded = 0;
331 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332 #else
333 init_dl_bw(&dl_rq->dl_bw);
334 #endif
335
336 dl_rq->running_bw = 0;
337 dl_rq->this_bw = 0;
338 init_dl_rq_bw_ratio(dl_rq);
339 }
340
341 #ifdef CONFIG_SMP
342
343 static inline int dl_overloaded(struct rq *rq)
344 {
345 return atomic_read(&rq->rd->dlo_count);
346 }
347
348 static inline void dl_set_overload(struct rq *rq)
349 {
350 if (!rq->online)
351 return;
352
353 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
354 /*
355 * Must be visible before the overload count is
356 * set (as in sched_rt.c).
357 *
358 * Matched by the barrier in pull_dl_task().
359 */
360 smp_wmb();
361 atomic_inc(&rq->rd->dlo_count);
362 }
363
364 static inline void dl_clear_overload(struct rq *rq)
365 {
366 if (!rq->online)
367 return;
368
369 atomic_dec(&rq->rd->dlo_count);
370 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
371 }
372
373 static void update_dl_migration(struct dl_rq *dl_rq)
374 {
375 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
376 if (!dl_rq->overloaded) {
377 dl_set_overload(rq_of_dl_rq(dl_rq));
378 dl_rq->overloaded = 1;
379 }
380 } else if (dl_rq->overloaded) {
381 dl_clear_overload(rq_of_dl_rq(dl_rq));
382 dl_rq->overloaded = 0;
383 }
384 }
385
386 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
387 {
388 struct task_struct *p = dl_task_of(dl_se);
389
390 if (p->nr_cpus_allowed > 1)
391 dl_rq->dl_nr_migratory++;
392
393 update_dl_migration(dl_rq);
394 }
395
396 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
397 {
398 struct task_struct *p = dl_task_of(dl_se);
399
400 if (p->nr_cpus_allowed > 1)
401 dl_rq->dl_nr_migratory--;
402
403 update_dl_migration(dl_rq);
404 }
405
406 /*
407 * The list of pushable -deadline task is not a plist, like in
408 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
409 */
410 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
411 {
412 struct dl_rq *dl_rq = &rq->dl;
413 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
414 struct rb_node *parent = NULL;
415 struct task_struct *entry;
416 bool leftmost = true;
417
418 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
419
420 while (*link) {
421 parent = *link;
422 entry = rb_entry(parent, struct task_struct,
423 pushable_dl_tasks);
424 if (dl_entity_preempt(&p->dl, &entry->dl))
425 link = &parent->rb_left;
426 else {
427 link = &parent->rb_right;
428 leftmost = false;
429 }
430 }
431
432 if (leftmost)
433 dl_rq->earliest_dl.next = p->dl.deadline;
434
435 rb_link_node(&p->pushable_dl_tasks, parent, link);
436 rb_insert_color_cached(&p->pushable_dl_tasks,
437 &dl_rq->pushable_dl_tasks_root, leftmost);
438 }
439
440 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
441 {
442 struct dl_rq *dl_rq = &rq->dl;
443
444 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
445 return;
446
447 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
448 struct rb_node *next_node;
449
450 next_node = rb_next(&p->pushable_dl_tasks);
451 if (next_node) {
452 dl_rq->earliest_dl.next = rb_entry(next_node,
453 struct task_struct, pushable_dl_tasks)->dl.deadline;
454 }
455 }
456
457 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
458 RB_CLEAR_NODE(&p->pushable_dl_tasks);
459 }
460
461 static inline int has_pushable_dl_tasks(struct rq *rq)
462 {
463 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
464 }
465
466 static int push_dl_task(struct rq *rq);
467
468 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
469 {
470 return dl_task(prev);
471 }
472
473 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
474 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
475
476 static void push_dl_tasks(struct rq *);
477 static void pull_dl_task(struct rq *);
478
479 static inline void queue_push_tasks(struct rq *rq)
480 {
481 if (!has_pushable_dl_tasks(rq))
482 return;
483
484 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
485 }
486
487 static inline void queue_pull_task(struct rq *rq)
488 {
489 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
490 }
491
492 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
493
494 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
495 {
496 struct rq *later_rq = NULL;
497
498 later_rq = find_lock_later_rq(p, rq);
499 if (!later_rq) {
500 int cpu;
501
502 /*
503 * If we cannot preempt any rq, fall back to pick any
504 * online cpu.
505 */
506 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
507 if (cpu >= nr_cpu_ids) {
508 /*
509 * Fail to find any suitable cpu.
510 * The task will never come back!
511 */
512 BUG_ON(dl_bandwidth_enabled());
513
514 /*
515 * If admission control is disabled we
516 * try a little harder to let the task
517 * run.
518 */
519 cpu = cpumask_any(cpu_active_mask);
520 }
521 later_rq = cpu_rq(cpu);
522 double_lock_balance(rq, later_rq);
523 }
524
525 set_task_cpu(p, later_rq->cpu);
526 double_unlock_balance(later_rq, rq);
527
528 return later_rq;
529 }
530
531 #else
532
533 static inline
534 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
535 {
536 }
537
538 static inline
539 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
540 {
541 }
542
543 static inline
544 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
545 {
546 }
547
548 static inline
549 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
550 {
551 }
552
553 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
554 {
555 return false;
556 }
557
558 static inline void pull_dl_task(struct rq *rq)
559 {
560 }
561
562 static inline void queue_push_tasks(struct rq *rq)
563 {
564 }
565
566 static inline void queue_pull_task(struct rq *rq)
567 {
568 }
569 #endif /* CONFIG_SMP */
570
571 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
572 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
573 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
574 int flags);
575
576 /*
577 * We are being explicitly informed that a new instance is starting,
578 * and this means that:
579 * - the absolute deadline of the entity has to be placed at
580 * current time + relative deadline;
581 * - the runtime of the entity has to be set to the maximum value.
582 *
583 * The capability of specifying such event is useful whenever a -deadline
584 * entity wants to (try to!) synchronize its behaviour with the scheduler's
585 * one, and to (try to!) reconcile itself with its own scheduling
586 * parameters.
587 */
588 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
589 {
590 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
591 struct rq *rq = rq_of_dl_rq(dl_rq);
592
593 WARN_ON(dl_se->dl_boosted);
594 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
595
596 /*
597 * We are racing with the deadline timer. So, do nothing because
598 * the deadline timer handler will take care of properly recharging
599 * the runtime and postponing the deadline
600 */
601 if (dl_se->dl_throttled)
602 return;
603
604 /*
605 * We use the regular wall clock time to set deadlines in the
606 * future; in fact, we must consider execution overheads (time
607 * spent on hardirq context, etc.).
608 */
609 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
610 dl_se->runtime = dl_se->dl_runtime;
611 }
612
613 /*
614 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
615 * possibility of a entity lasting more than what it declared, and thus
616 * exhausting its runtime.
617 *
618 * Here we are interested in making runtime overrun possible, but we do
619 * not want a entity which is misbehaving to affect the scheduling of all
620 * other entities.
621 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
622 * is used, in order to confine each entity within its own bandwidth.
623 *
624 * This function deals exactly with that, and ensures that when the runtime
625 * of a entity is replenished, its deadline is also postponed. That ensures
626 * the overrunning entity can't interfere with other entity in the system and
627 * can't make them miss their deadlines. Reasons why this kind of overruns
628 * could happen are, typically, a entity voluntarily trying to overcome its
629 * runtime, or it just underestimated it during sched_setattr().
630 */
631 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
632 struct sched_dl_entity *pi_se)
633 {
634 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
635 struct rq *rq = rq_of_dl_rq(dl_rq);
636
637 BUG_ON(pi_se->dl_runtime <= 0);
638
639 /*
640 * This could be the case for a !-dl task that is boosted.
641 * Just go with full inherited parameters.
642 */
643 if (dl_se->dl_deadline == 0) {
644 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
645 dl_se->runtime = pi_se->dl_runtime;
646 }
647
648 if (dl_se->dl_yielded && dl_se->runtime > 0)
649 dl_se->runtime = 0;
650
651 /*
652 * We keep moving the deadline away until we get some
653 * available runtime for the entity. This ensures correct
654 * handling of situations where the runtime overrun is
655 * arbitrary large.
656 */
657 while (dl_se->runtime <= 0) {
658 dl_se->deadline += pi_se->dl_period;
659 dl_se->runtime += pi_se->dl_runtime;
660 }
661
662 /*
663 * At this point, the deadline really should be "in
664 * the future" with respect to rq->clock. If it's
665 * not, we are, for some reason, lagging too much!
666 * Anyway, after having warn userspace abut that,
667 * we still try to keep the things running by
668 * resetting the deadline and the budget of the
669 * entity.
670 */
671 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
672 printk_deferred_once("sched: DL replenish lagged too much\n");
673 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
674 dl_se->runtime = pi_se->dl_runtime;
675 }
676
677 if (dl_se->dl_yielded)
678 dl_se->dl_yielded = 0;
679 if (dl_se->dl_throttled)
680 dl_se->dl_throttled = 0;
681 }
682
683 /*
684 * Here we check if --at time t-- an entity (which is probably being
685 * [re]activated or, in general, enqueued) can use its remaining runtime
686 * and its current deadline _without_ exceeding the bandwidth it is
687 * assigned (function returns true if it can't). We are in fact applying
688 * one of the CBS rules: when a task wakes up, if the residual runtime
689 * over residual deadline fits within the allocated bandwidth, then we
690 * can keep the current (absolute) deadline and residual budget without
691 * disrupting the schedulability of the system. Otherwise, we should
692 * refill the runtime and set the deadline a period in the future,
693 * because keeping the current (absolute) deadline of the task would
694 * result in breaking guarantees promised to other tasks (refer to
695 * Documentation/scheduler/sched-deadline.txt for more informations).
696 *
697 * This function returns true if:
698 *
699 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
700 *
701 * IOW we can't recycle current parameters.
702 *
703 * Notice that the bandwidth check is done against the deadline. For
704 * task with deadline equal to period this is the same of using
705 * dl_period instead of dl_deadline in the equation above.
706 */
707 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
708 struct sched_dl_entity *pi_se, u64 t)
709 {
710 u64 left, right;
711
712 /*
713 * left and right are the two sides of the equation above,
714 * after a bit of shuffling to use multiplications instead
715 * of divisions.
716 *
717 * Note that none of the time values involved in the two
718 * multiplications are absolute: dl_deadline and dl_runtime
719 * are the relative deadline and the maximum runtime of each
720 * instance, runtime is the runtime left for the last instance
721 * and (deadline - t), since t is rq->clock, is the time left
722 * to the (absolute) deadline. Even if overflowing the u64 type
723 * is very unlikely to occur in both cases, here we scale down
724 * as we want to avoid that risk at all. Scaling down by 10
725 * means that we reduce granularity to 1us. We are fine with it,
726 * since this is only a true/false check and, anyway, thinking
727 * of anything below microseconds resolution is actually fiction
728 * (but still we want to give the user that illusion >;).
729 */
730 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
731 right = ((dl_se->deadline - t) >> DL_SCALE) *
732 (pi_se->dl_runtime >> DL_SCALE);
733
734 return dl_time_before(right, left);
735 }
736
737 /*
738 * Revised wakeup rule [1]: For self-suspending tasks, rather then
739 * re-initializing task's runtime and deadline, the revised wakeup
740 * rule adjusts the task's runtime to avoid the task to overrun its
741 * density.
742 *
743 * Reasoning: a task may overrun the density if:
744 * runtime / (deadline - t) > dl_runtime / dl_deadline
745 *
746 * Therefore, runtime can be adjusted to:
747 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
748 *
749 * In such way that runtime will be equal to the maximum density
750 * the task can use without breaking any rule.
751 *
752 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
753 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
754 */
755 static void
756 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
757 {
758 u64 laxity = dl_se->deadline - rq_clock(rq);
759
760 /*
761 * If the task has deadline < period, and the deadline is in the past,
762 * it should already be throttled before this check.
763 *
764 * See update_dl_entity() comments for further details.
765 */
766 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
767
768 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
769 }
770
771 /*
772 * Regarding the deadline, a task with implicit deadline has a relative
773 * deadline == relative period. A task with constrained deadline has a
774 * relative deadline <= relative period.
775 *
776 * We support constrained deadline tasks. However, there are some restrictions
777 * applied only for tasks which do not have an implicit deadline. See
778 * update_dl_entity() to know more about such restrictions.
779 *
780 * The dl_is_implicit() returns true if the task has an implicit deadline.
781 */
782 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
783 {
784 return dl_se->dl_deadline == dl_se->dl_period;
785 }
786
787 /*
788 * When a deadline entity is placed in the runqueue, its runtime and deadline
789 * might need to be updated. This is done by a CBS wake up rule. There are two
790 * different rules: 1) the original CBS; and 2) the Revisited CBS.
791 *
792 * When the task is starting a new period, the Original CBS is used. In this
793 * case, the runtime is replenished and a new absolute deadline is set.
794 *
795 * When a task is queued before the begin of the next period, using the
796 * remaining runtime and deadline could make the entity to overflow, see
797 * dl_entity_overflow() to find more about runtime overflow. When such case
798 * is detected, the runtime and deadline need to be updated.
799 *
800 * If the task has an implicit deadline, i.e., deadline == period, the Original
801 * CBS is applied. the runtime is replenished and a new absolute deadline is
802 * set, as in the previous cases.
803 *
804 * However, the Original CBS does not work properly for tasks with
805 * deadline < period, which are said to have a constrained deadline. By
806 * applying the Original CBS, a constrained deadline task would be able to run
807 * runtime/deadline in a period. With deadline < period, the task would
808 * overrun the runtime/period allowed bandwidth, breaking the admission test.
809 *
810 * In order to prevent this misbehave, the Revisited CBS is used for
811 * constrained deadline tasks when a runtime overflow is detected. In the
812 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
813 * the remaining runtime of the task is reduced to avoid runtime overflow.
814 * Please refer to the comments update_dl_revised_wakeup() function to find
815 * more about the Revised CBS rule.
816 */
817 static void update_dl_entity(struct sched_dl_entity *dl_se,
818 struct sched_dl_entity *pi_se)
819 {
820 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
821 struct rq *rq = rq_of_dl_rq(dl_rq);
822
823 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
824 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
825
826 if (unlikely(!dl_is_implicit(dl_se) &&
827 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
828 !dl_se->dl_boosted)){
829 update_dl_revised_wakeup(dl_se, rq);
830 return;
831 }
832
833 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
834 dl_se->runtime = pi_se->dl_runtime;
835 }
836 }
837
838 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
839 {
840 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
841 }
842
843 /*
844 * If the entity depleted all its runtime, and if we want it to sleep
845 * while waiting for some new execution time to become available, we
846 * set the bandwidth replenishment timer to the replenishment instant
847 * and try to activate it.
848 *
849 * Notice that it is important for the caller to know if the timer
850 * actually started or not (i.e., the replenishment instant is in
851 * the future or in the past).
852 */
853 static int start_dl_timer(struct task_struct *p)
854 {
855 struct sched_dl_entity *dl_se = &p->dl;
856 struct hrtimer *timer = &dl_se->dl_timer;
857 struct rq *rq = task_rq(p);
858 ktime_t now, act;
859 s64 delta;
860
861 lockdep_assert_held(&rq->lock);
862
863 /*
864 * We want the timer to fire at the deadline, but considering
865 * that it is actually coming from rq->clock and not from
866 * hrtimer's time base reading.
867 */
868 act = ns_to_ktime(dl_next_period(dl_se));
869 now = hrtimer_cb_get_time(timer);
870 delta = ktime_to_ns(now) - rq_clock(rq);
871 act = ktime_add_ns(act, delta);
872
873 /*
874 * If the expiry time already passed, e.g., because the value
875 * chosen as the deadline is too small, don't even try to
876 * start the timer in the past!
877 */
878 if (ktime_us_delta(act, now) < 0)
879 return 0;
880
881 /*
882 * !enqueued will guarantee another callback; even if one is already in
883 * progress. This ensures a balanced {get,put}_task_struct().
884 *
885 * The race against __run_timer() clearing the enqueued state is
886 * harmless because we're holding task_rq()->lock, therefore the timer
887 * expiring after we've done the check will wait on its task_rq_lock()
888 * and observe our state.
889 */
890 if (!hrtimer_is_queued(timer)) {
891 get_task_struct(p);
892 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
893 }
894
895 return 1;
896 }
897
898 /*
899 * This is the bandwidth enforcement timer callback. If here, we know
900 * a task is not on its dl_rq, since the fact that the timer was running
901 * means the task is throttled and needs a runtime replenishment.
902 *
903 * However, what we actually do depends on the fact the task is active,
904 * (it is on its rq) or has been removed from there by a call to
905 * dequeue_task_dl(). In the former case we must issue the runtime
906 * replenishment and add the task back to the dl_rq; in the latter, we just
907 * do nothing but clearing dl_throttled, so that runtime and deadline
908 * updating (and the queueing back to dl_rq) will be done by the
909 * next call to enqueue_task_dl().
910 */
911 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
912 {
913 struct sched_dl_entity *dl_se = container_of(timer,
914 struct sched_dl_entity,
915 dl_timer);
916 struct task_struct *p = dl_task_of(dl_se);
917 struct rq_flags rf;
918 struct rq *rq;
919
920 rq = task_rq_lock(p, &rf);
921
922 /*
923 * The task might have changed its scheduling policy to something
924 * different than SCHED_DEADLINE (through switched_from_dl()).
925 */
926 if (!dl_task(p))
927 goto unlock;
928
929 /*
930 * The task might have been boosted by someone else and might be in the
931 * boosting/deboosting path, its not throttled.
932 */
933 if (dl_se->dl_boosted)
934 goto unlock;
935
936 /*
937 * Spurious timer due to start_dl_timer() race; or we already received
938 * a replenishment from rt_mutex_setprio().
939 */
940 if (!dl_se->dl_throttled)
941 goto unlock;
942
943 sched_clock_tick();
944 update_rq_clock(rq);
945
946 /*
947 * If the throttle happened during sched-out; like:
948 *
949 * schedule()
950 * deactivate_task()
951 * dequeue_task_dl()
952 * update_curr_dl()
953 * start_dl_timer()
954 * __dequeue_task_dl()
955 * prev->on_rq = 0;
956 *
957 * We can be both throttled and !queued. Replenish the counter
958 * but do not enqueue -- wait for our wakeup to do that.
959 */
960 if (!task_on_rq_queued(p)) {
961 replenish_dl_entity(dl_se, dl_se);
962 goto unlock;
963 }
964
965 #ifdef CONFIG_SMP
966 if (unlikely(!rq->online)) {
967 /*
968 * If the runqueue is no longer available, migrate the
969 * task elsewhere. This necessarily changes rq.
970 */
971 lockdep_unpin_lock(&rq->lock, rf.cookie);
972 rq = dl_task_offline_migration(rq, p);
973 rf.cookie = lockdep_pin_lock(&rq->lock);
974 update_rq_clock(rq);
975
976 /*
977 * Now that the task has been migrated to the new RQ and we
978 * have that locked, proceed as normal and enqueue the task
979 * there.
980 */
981 }
982 #endif
983
984 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
985 if (dl_task(rq->curr))
986 check_preempt_curr_dl(rq, p, 0);
987 else
988 resched_curr(rq);
989
990 #ifdef CONFIG_SMP
991 /*
992 * Queueing this task back might have overloaded rq, check if we need
993 * to kick someone away.
994 */
995 if (has_pushable_dl_tasks(rq)) {
996 /*
997 * Nothing relies on rq->lock after this, so its safe to drop
998 * rq->lock.
999 */
1000 rq_unpin_lock(rq, &rf);
1001 push_dl_task(rq);
1002 rq_repin_lock(rq, &rf);
1003 }
1004 #endif
1005
1006 unlock:
1007 task_rq_unlock(rq, p, &rf);
1008
1009 /*
1010 * This can free the task_struct, including this hrtimer, do not touch
1011 * anything related to that after this.
1012 */
1013 put_task_struct(p);
1014
1015 return HRTIMER_NORESTART;
1016 }
1017
1018 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1019 {
1020 struct hrtimer *timer = &dl_se->dl_timer;
1021
1022 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1023 timer->function = dl_task_timer;
1024 }
1025
1026 /*
1027 * During the activation, CBS checks if it can reuse the current task's
1028 * runtime and period. If the deadline of the task is in the past, CBS
1029 * cannot use the runtime, and so it replenishes the task. This rule
1030 * works fine for implicit deadline tasks (deadline == period), and the
1031 * CBS was designed for implicit deadline tasks. However, a task with
1032 * constrained deadline (deadine < period) might be awakened after the
1033 * deadline, but before the next period. In this case, replenishing the
1034 * task would allow it to run for runtime / deadline. As in this case
1035 * deadline < period, CBS enables a task to run for more than the
1036 * runtime / period. In a very loaded system, this can cause a domino
1037 * effect, making other tasks miss their deadlines.
1038 *
1039 * To avoid this problem, in the activation of a constrained deadline
1040 * task after the deadline but before the next period, throttle the
1041 * task and set the replenishing timer to the begin of the next period,
1042 * unless it is boosted.
1043 */
1044 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1045 {
1046 struct task_struct *p = dl_task_of(dl_se);
1047 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1048
1049 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1050 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1051 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1052 return;
1053 dl_se->dl_throttled = 1;
1054 if (dl_se->runtime > 0)
1055 dl_se->runtime = 0;
1056 }
1057 }
1058
1059 static
1060 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1061 {
1062 return (dl_se->runtime <= 0);
1063 }
1064
1065 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1066
1067 /*
1068 * This function implements the GRUB accounting rule:
1069 * according to the GRUB reclaiming algorithm, the runtime is
1070 * not decreased as "dq = -dt", but as
1071 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1072 * where u is the utilization of the task, Umax is the maximum reclaimable
1073 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1074 * as the difference between the "total runqueue utilization" and the
1075 * runqueue active utilization, and Uextra is the (per runqueue) extra
1076 * reclaimable utilization.
1077 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1078 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1079 * BW_SHIFT.
1080 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1081 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1082 * Since delta is a 64 bit variable, to have an overflow its value
1083 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1084 * So, overflow is not an issue here.
1085 */
1086 u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1087 {
1088 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1089 u64 u_act;
1090 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1091
1092 /*
1093 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1094 * we compare u_inact + rq->dl.extra_bw with
1095 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1096 * u_inact + rq->dl.extra_bw can be larger than
1097 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1098 * leading to wrong results)
1099 */
1100 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1101 u_act = u_act_min;
1102 else
1103 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1104
1105 return (delta * u_act) >> BW_SHIFT;
1106 }
1107
1108 /*
1109 * Update the current task's runtime statistics (provided it is still
1110 * a -deadline task and has not been removed from the dl_rq).
1111 */
1112 static void update_curr_dl(struct rq *rq)
1113 {
1114 struct task_struct *curr = rq->curr;
1115 struct sched_dl_entity *dl_se = &curr->dl;
1116 u64 delta_exec;
1117
1118 if (!dl_task(curr) || !on_dl_rq(dl_se))
1119 return;
1120
1121 /*
1122 * Consumed budget is computed considering the time as
1123 * observed by schedulable tasks (excluding time spent
1124 * in hardirq context, etc.). Deadlines are instead
1125 * computed using hard walltime. This seems to be the more
1126 * natural solution, but the full ramifications of this
1127 * approach need further study.
1128 */
1129 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1130 if (unlikely((s64)delta_exec <= 0)) {
1131 if (unlikely(dl_se->dl_yielded))
1132 goto throttle;
1133 return;
1134 }
1135
1136 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
1137 cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
1138
1139 schedstat_set(curr->se.statistics.exec_max,
1140 max(curr->se.statistics.exec_max, delta_exec));
1141
1142 curr->se.sum_exec_runtime += delta_exec;
1143 account_group_exec_runtime(curr, delta_exec);
1144
1145 curr->se.exec_start = rq_clock_task(rq);
1146 cpuacct_charge(curr, delta_exec);
1147
1148 sched_rt_avg_update(rq, delta_exec);
1149
1150 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
1151 delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
1152 dl_se->runtime -= delta_exec;
1153
1154 throttle:
1155 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1156 dl_se->dl_throttled = 1;
1157 __dequeue_task_dl(rq, curr, 0);
1158 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1159 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1160
1161 if (!is_leftmost(curr, &rq->dl))
1162 resched_curr(rq);
1163 }
1164
1165 /*
1166 * Because -- for now -- we share the rt bandwidth, we need to
1167 * account our runtime there too, otherwise actual rt tasks
1168 * would be able to exceed the shared quota.
1169 *
1170 * Account to the root rt group for now.
1171 *
1172 * The solution we're working towards is having the RT groups scheduled
1173 * using deadline servers -- however there's a few nasties to figure
1174 * out before that can happen.
1175 */
1176 if (rt_bandwidth_enabled()) {
1177 struct rt_rq *rt_rq = &rq->rt;
1178
1179 raw_spin_lock(&rt_rq->rt_runtime_lock);
1180 /*
1181 * We'll let actual RT tasks worry about the overflow here, we
1182 * have our own CBS to keep us inline; only account when RT
1183 * bandwidth is relevant.
1184 */
1185 if (sched_rt_bandwidth_account(rt_rq))
1186 rt_rq->rt_time += delta_exec;
1187 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1188 }
1189 }
1190
1191 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1192 {
1193 struct sched_dl_entity *dl_se = container_of(timer,
1194 struct sched_dl_entity,
1195 inactive_timer);
1196 struct task_struct *p = dl_task_of(dl_se);
1197 struct rq_flags rf;
1198 struct rq *rq;
1199
1200 rq = task_rq_lock(p, &rf);
1201
1202 if (!dl_task(p) || p->state == TASK_DEAD) {
1203 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1204
1205 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1206 sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1207 sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1208 dl_se->dl_non_contending = 0;
1209 }
1210
1211 raw_spin_lock(&dl_b->lock);
1212 __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1213 raw_spin_unlock(&dl_b->lock);
1214 __dl_clear_params(p);
1215
1216 goto unlock;
1217 }
1218 if (dl_se->dl_non_contending == 0)
1219 goto unlock;
1220
1221 sched_clock_tick();
1222 update_rq_clock(rq);
1223
1224 sub_running_bw(dl_se->dl_bw, &rq->dl);
1225 dl_se->dl_non_contending = 0;
1226 unlock:
1227 task_rq_unlock(rq, p, &rf);
1228 put_task_struct(p);
1229
1230 return HRTIMER_NORESTART;
1231 }
1232
1233 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1234 {
1235 struct hrtimer *timer = &dl_se->inactive_timer;
1236
1237 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1238 timer->function = inactive_task_timer;
1239 }
1240
1241 #ifdef CONFIG_SMP
1242
1243 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1244 {
1245 struct rq *rq = rq_of_dl_rq(dl_rq);
1246
1247 if (dl_rq->earliest_dl.curr == 0 ||
1248 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1249 dl_rq->earliest_dl.curr = deadline;
1250 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1251 }
1252 }
1253
1254 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1255 {
1256 struct rq *rq = rq_of_dl_rq(dl_rq);
1257
1258 /*
1259 * Since we may have removed our earliest (and/or next earliest)
1260 * task we must recompute them.
1261 */
1262 if (!dl_rq->dl_nr_running) {
1263 dl_rq->earliest_dl.curr = 0;
1264 dl_rq->earliest_dl.next = 0;
1265 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1266 } else {
1267 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1268 struct sched_dl_entity *entry;
1269
1270 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1271 dl_rq->earliest_dl.curr = entry->deadline;
1272 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1273 }
1274 }
1275
1276 #else
1277
1278 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1279 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1280
1281 #endif /* CONFIG_SMP */
1282
1283 static inline
1284 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1285 {
1286 int prio = dl_task_of(dl_se)->prio;
1287 u64 deadline = dl_se->deadline;
1288
1289 WARN_ON(!dl_prio(prio));
1290 dl_rq->dl_nr_running++;
1291 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1292
1293 inc_dl_deadline(dl_rq, deadline);
1294 inc_dl_migration(dl_se, dl_rq);
1295 }
1296
1297 static inline
1298 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1299 {
1300 int prio = dl_task_of(dl_se)->prio;
1301
1302 WARN_ON(!dl_prio(prio));
1303 WARN_ON(!dl_rq->dl_nr_running);
1304 dl_rq->dl_nr_running--;
1305 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1306
1307 dec_dl_deadline(dl_rq, dl_se->deadline);
1308 dec_dl_migration(dl_se, dl_rq);
1309 }
1310
1311 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1312 {
1313 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1314 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1315 struct rb_node *parent = NULL;
1316 struct sched_dl_entity *entry;
1317 int leftmost = 1;
1318
1319 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1320
1321 while (*link) {
1322 parent = *link;
1323 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1324 if (dl_time_before(dl_se->deadline, entry->deadline))
1325 link = &parent->rb_left;
1326 else {
1327 link = &parent->rb_right;
1328 leftmost = 0;
1329 }
1330 }
1331
1332 rb_link_node(&dl_se->rb_node, parent, link);
1333 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1334
1335 inc_dl_tasks(dl_se, dl_rq);
1336 }
1337
1338 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1339 {
1340 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1341
1342 if (RB_EMPTY_NODE(&dl_se->rb_node))
1343 return;
1344
1345 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1346 RB_CLEAR_NODE(&dl_se->rb_node);
1347
1348 dec_dl_tasks(dl_se, dl_rq);
1349 }
1350
1351 static void
1352 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1353 struct sched_dl_entity *pi_se, int flags)
1354 {
1355 BUG_ON(on_dl_rq(dl_se));
1356
1357 /*
1358 * If this is a wakeup or a new instance, the scheduling
1359 * parameters of the task might need updating. Otherwise,
1360 * we want a replenishment of its runtime.
1361 */
1362 if (flags & ENQUEUE_WAKEUP) {
1363 task_contending(dl_se, flags);
1364 update_dl_entity(dl_se, pi_se);
1365 } else if (flags & ENQUEUE_REPLENISH) {
1366 replenish_dl_entity(dl_se, pi_se);
1367 }
1368
1369 __enqueue_dl_entity(dl_se);
1370 }
1371
1372 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1373 {
1374 __dequeue_dl_entity(dl_se);
1375 }
1376
1377 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1378 {
1379 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1380 struct sched_dl_entity *pi_se = &p->dl;
1381
1382 /*
1383 * Use the scheduling parameters of the top pi-waiter task if:
1384 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1385 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1386 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1387 * boosted due to a SCHED_DEADLINE pi-waiter).
1388 * Otherwise we keep our runtime and deadline.
1389 */
1390 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1391 pi_se = &pi_task->dl;
1392 } else if (!dl_prio(p->normal_prio)) {
1393 /*
1394 * Special case in which we have a !SCHED_DEADLINE task
1395 * that is going to be deboosted, but exceeds its
1396 * runtime while doing so. No point in replenishing
1397 * it, as it's going to return back to its original
1398 * scheduling class after this.
1399 */
1400 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1401 return;
1402 }
1403
1404 /*
1405 * Check if a constrained deadline task was activated
1406 * after the deadline but before the next period.
1407 * If that is the case, the task will be throttled and
1408 * the replenishment timer will be set to the next period.
1409 */
1410 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1411 dl_check_constrained_dl(&p->dl);
1412
1413 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1414 add_rq_bw(p->dl.dl_bw, &rq->dl);
1415 add_running_bw(p->dl.dl_bw, &rq->dl);
1416 }
1417
1418 /*
1419 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1420 * its budget it needs a replenishment and, since it now is on
1421 * its rq, the bandwidth timer callback (which clearly has not
1422 * run yet) will take care of this.
1423 * However, the active utilization does not depend on the fact
1424 * that the task is on the runqueue or not (but depends on the
1425 * task's state - in GRUB parlance, "inactive" vs "active contending").
1426 * In other words, even if a task is throttled its utilization must
1427 * be counted in the active utilization; hence, we need to call
1428 * add_running_bw().
1429 */
1430 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1431 if (flags & ENQUEUE_WAKEUP)
1432 task_contending(&p->dl, flags);
1433
1434 return;
1435 }
1436
1437 enqueue_dl_entity(&p->dl, pi_se, flags);
1438
1439 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1440 enqueue_pushable_dl_task(rq, p);
1441 }
1442
1443 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1444 {
1445 dequeue_dl_entity(&p->dl);
1446 dequeue_pushable_dl_task(rq, p);
1447 }
1448
1449 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1450 {
1451 update_curr_dl(rq);
1452 __dequeue_task_dl(rq, p, flags);
1453
1454 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1455 sub_running_bw(p->dl.dl_bw, &rq->dl);
1456 sub_rq_bw(p->dl.dl_bw, &rq->dl);
1457 }
1458
1459 /*
1460 * This check allows to start the inactive timer (or to immediately
1461 * decrease the active utilization, if needed) in two cases:
1462 * when the task blocks and when it is terminating
1463 * (p->state == TASK_DEAD). We can handle the two cases in the same
1464 * way, because from GRUB's point of view the same thing is happening
1465 * (the task moves from "active contending" to "active non contending"
1466 * or "inactive")
1467 */
1468 if (flags & DEQUEUE_SLEEP)
1469 task_non_contending(p);
1470 }
1471
1472 /*
1473 * Yield task semantic for -deadline tasks is:
1474 *
1475 * get off from the CPU until our next instance, with
1476 * a new runtime. This is of little use now, since we
1477 * don't have a bandwidth reclaiming mechanism. Anyway,
1478 * bandwidth reclaiming is planned for the future, and
1479 * yield_task_dl will indicate that some spare budget
1480 * is available for other task instances to use it.
1481 */
1482 static void yield_task_dl(struct rq *rq)
1483 {
1484 /*
1485 * We make the task go to sleep until its current deadline by
1486 * forcing its runtime to zero. This way, update_curr_dl() stops
1487 * it and the bandwidth timer will wake it up and will give it
1488 * new scheduling parameters (thanks to dl_yielded=1).
1489 */
1490 rq->curr->dl.dl_yielded = 1;
1491
1492 update_rq_clock(rq);
1493 update_curr_dl(rq);
1494 /*
1495 * Tell update_rq_clock() that we've just updated,
1496 * so we don't do microscopic update in schedule()
1497 * and double the fastpath cost.
1498 */
1499 rq_clock_skip_update(rq, true);
1500 }
1501
1502 #ifdef CONFIG_SMP
1503
1504 static int find_later_rq(struct task_struct *task);
1505
1506 static int
1507 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1508 {
1509 struct task_struct *curr;
1510 struct rq *rq;
1511
1512 if (sd_flag != SD_BALANCE_WAKE)
1513 goto out;
1514
1515 rq = cpu_rq(cpu);
1516
1517 rcu_read_lock();
1518 curr = READ_ONCE(rq->curr); /* unlocked access */
1519
1520 /*
1521 * If we are dealing with a -deadline task, we must
1522 * decide where to wake it up.
1523 * If it has a later deadline and the current task
1524 * on this rq can't move (provided the waking task
1525 * can!) we prefer to send it somewhere else. On the
1526 * other hand, if it has a shorter deadline, we
1527 * try to make it stay here, it might be important.
1528 */
1529 if (unlikely(dl_task(curr)) &&
1530 (curr->nr_cpus_allowed < 2 ||
1531 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1532 (p->nr_cpus_allowed > 1)) {
1533 int target = find_later_rq(p);
1534
1535 if (target != -1 &&
1536 (dl_time_before(p->dl.deadline,
1537 cpu_rq(target)->dl.earliest_dl.curr) ||
1538 (cpu_rq(target)->dl.dl_nr_running == 0)))
1539 cpu = target;
1540 }
1541 rcu_read_unlock();
1542
1543 out:
1544 return cpu;
1545 }
1546
1547 static void migrate_task_rq_dl(struct task_struct *p)
1548 {
1549 struct rq *rq;
1550
1551 if (p->state != TASK_WAKING)
1552 return;
1553
1554 rq = task_rq(p);
1555 /*
1556 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1557 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1558 * rq->lock is not... So, lock it
1559 */
1560 raw_spin_lock(&rq->lock);
1561 if (p->dl.dl_non_contending) {
1562 sub_running_bw(p->dl.dl_bw, &rq->dl);
1563 p->dl.dl_non_contending = 0;
1564 /*
1565 * If the timer handler is currently running and the
1566 * timer cannot be cancelled, inactive_task_timer()
1567 * will see that dl_not_contending is not set, and
1568 * will not touch the rq's active utilization,
1569 * so we are still safe.
1570 */
1571 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1572 put_task_struct(p);
1573 }
1574 sub_rq_bw(p->dl.dl_bw, &rq->dl);
1575 raw_spin_unlock(&rq->lock);
1576 }
1577
1578 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1579 {
1580 /*
1581 * Current can't be migrated, useless to reschedule,
1582 * let's hope p can move out.
1583 */
1584 if (rq->curr->nr_cpus_allowed == 1 ||
1585 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1586 return;
1587
1588 /*
1589 * p is migratable, so let's not schedule it and
1590 * see if it is pushed or pulled somewhere else.
1591 */
1592 if (p->nr_cpus_allowed != 1 &&
1593 cpudl_find(&rq->rd->cpudl, p, NULL))
1594 return;
1595
1596 resched_curr(rq);
1597 }
1598
1599 #endif /* CONFIG_SMP */
1600
1601 /*
1602 * Only called when both the current and waking task are -deadline
1603 * tasks.
1604 */
1605 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1606 int flags)
1607 {
1608 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1609 resched_curr(rq);
1610 return;
1611 }
1612
1613 #ifdef CONFIG_SMP
1614 /*
1615 * In the unlikely case current and p have the same deadline
1616 * let us try to decide what's the best thing to do...
1617 */
1618 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1619 !test_tsk_need_resched(rq->curr))
1620 check_preempt_equal_dl(rq, p);
1621 #endif /* CONFIG_SMP */
1622 }
1623
1624 #ifdef CONFIG_SCHED_HRTICK
1625 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1626 {
1627 hrtick_start(rq, p->dl.runtime);
1628 }
1629 #else /* !CONFIG_SCHED_HRTICK */
1630 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1631 {
1632 }
1633 #endif
1634
1635 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1636 struct dl_rq *dl_rq)
1637 {
1638 struct rb_node *left = rb_first_cached(&dl_rq->root);
1639
1640 if (!left)
1641 return NULL;
1642
1643 return rb_entry(left, struct sched_dl_entity, rb_node);
1644 }
1645
1646 static struct task_struct *
1647 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1648 {
1649 struct sched_dl_entity *dl_se;
1650 struct task_struct *p;
1651 struct dl_rq *dl_rq;
1652
1653 dl_rq = &rq->dl;
1654
1655 if (need_pull_dl_task(rq, prev)) {
1656 /*
1657 * This is OK, because current is on_cpu, which avoids it being
1658 * picked for load-balance and preemption/IRQs are still
1659 * disabled avoiding further scheduler activity on it and we're
1660 * being very careful to re-start the picking loop.
1661 */
1662 rq_unpin_lock(rq, rf);
1663 pull_dl_task(rq);
1664 rq_repin_lock(rq, rf);
1665 /*
1666 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1667 * means a stop task can slip in, in which case we need to
1668 * re-start task selection.
1669 */
1670 if (rq->stop && task_on_rq_queued(rq->stop))
1671 return RETRY_TASK;
1672 }
1673
1674 /*
1675 * When prev is DL, we may throttle it in put_prev_task().
1676 * So, we update time before we check for dl_nr_running.
1677 */
1678 if (prev->sched_class == &dl_sched_class)
1679 update_curr_dl(rq);
1680
1681 if (unlikely(!dl_rq->dl_nr_running))
1682 return NULL;
1683
1684 put_prev_task(rq, prev);
1685
1686 dl_se = pick_next_dl_entity(rq, dl_rq);
1687 BUG_ON(!dl_se);
1688
1689 p = dl_task_of(dl_se);
1690 p->se.exec_start = rq_clock_task(rq);
1691
1692 /* Running task will never be pushed. */
1693 dequeue_pushable_dl_task(rq, p);
1694
1695 if (hrtick_enabled(rq))
1696 start_hrtick_dl(rq, p);
1697
1698 queue_push_tasks(rq);
1699
1700 return p;
1701 }
1702
1703 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1704 {
1705 update_curr_dl(rq);
1706
1707 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1708 enqueue_pushable_dl_task(rq, p);
1709 }
1710
1711 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1712 {
1713 update_curr_dl(rq);
1714
1715 /*
1716 * Even when we have runtime, update_curr_dl() might have resulted in us
1717 * not being the leftmost task anymore. In that case NEED_RESCHED will
1718 * be set and schedule() will start a new hrtick for the next task.
1719 */
1720 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1721 is_leftmost(p, &rq->dl))
1722 start_hrtick_dl(rq, p);
1723 }
1724
1725 static void task_fork_dl(struct task_struct *p)
1726 {
1727 /*
1728 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1729 * sched_fork()
1730 */
1731 }
1732
1733 static void set_curr_task_dl(struct rq *rq)
1734 {
1735 struct task_struct *p = rq->curr;
1736
1737 p->se.exec_start = rq_clock_task(rq);
1738
1739 /* You can't push away the running task */
1740 dequeue_pushable_dl_task(rq, p);
1741 }
1742
1743 #ifdef CONFIG_SMP
1744
1745 /* Only try algorithms three times */
1746 #define DL_MAX_TRIES 3
1747
1748 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1749 {
1750 if (!task_running(rq, p) &&
1751 cpumask_test_cpu(cpu, &p->cpus_allowed))
1752 return 1;
1753 return 0;
1754 }
1755
1756 /*
1757 * Return the earliest pushable rq's task, which is suitable to be executed
1758 * on the CPU, NULL otherwise:
1759 */
1760 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1761 {
1762 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1763 struct task_struct *p = NULL;
1764
1765 if (!has_pushable_dl_tasks(rq))
1766 return NULL;
1767
1768 next_node:
1769 if (next_node) {
1770 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1771
1772 if (pick_dl_task(rq, p, cpu))
1773 return p;
1774
1775 next_node = rb_next(next_node);
1776 goto next_node;
1777 }
1778
1779 return NULL;
1780 }
1781
1782 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1783
1784 static int find_later_rq(struct task_struct *task)
1785 {
1786 struct sched_domain *sd;
1787 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1788 int this_cpu = smp_processor_id();
1789 int cpu = task_cpu(task);
1790
1791 /* Make sure the mask is initialized first */
1792 if (unlikely(!later_mask))
1793 return -1;
1794
1795 if (task->nr_cpus_allowed == 1)
1796 return -1;
1797
1798 /*
1799 * We have to consider system topology and task affinity
1800 * first, then we can look for a suitable cpu.
1801 */
1802 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1803 return -1;
1804
1805 /*
1806 * If we are here, some targets have been found, including
1807 * the most suitable which is, among the runqueues where the
1808 * current tasks have later deadlines than the task's one, the
1809 * rq with the latest possible one.
1810 *
1811 * Now we check how well this matches with task's
1812 * affinity and system topology.
1813 *
1814 * The last cpu where the task run is our first
1815 * guess, since it is most likely cache-hot there.
1816 */
1817 if (cpumask_test_cpu(cpu, later_mask))
1818 return cpu;
1819 /*
1820 * Check if this_cpu is to be skipped (i.e., it is
1821 * not in the mask) or not.
1822 */
1823 if (!cpumask_test_cpu(this_cpu, later_mask))
1824 this_cpu = -1;
1825
1826 rcu_read_lock();
1827 for_each_domain(cpu, sd) {
1828 if (sd->flags & SD_WAKE_AFFINE) {
1829 int best_cpu;
1830
1831 /*
1832 * If possible, preempting this_cpu is
1833 * cheaper than migrating.
1834 */
1835 if (this_cpu != -1 &&
1836 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1837 rcu_read_unlock();
1838 return this_cpu;
1839 }
1840
1841 best_cpu = cpumask_first_and(later_mask,
1842 sched_domain_span(sd));
1843 /*
1844 * Last chance: if a cpu being in both later_mask
1845 * and current sd span is valid, that becomes our
1846 * choice. Of course, the latest possible cpu is
1847 * already under consideration through later_mask.
1848 */
1849 if (best_cpu < nr_cpu_ids) {
1850 rcu_read_unlock();
1851 return best_cpu;
1852 }
1853 }
1854 }
1855 rcu_read_unlock();
1856
1857 /*
1858 * At this point, all our guesses failed, we just return
1859 * 'something', and let the caller sort the things out.
1860 */
1861 if (this_cpu != -1)
1862 return this_cpu;
1863
1864 cpu = cpumask_any(later_mask);
1865 if (cpu < nr_cpu_ids)
1866 return cpu;
1867
1868 return -1;
1869 }
1870
1871 /* Locks the rq it finds */
1872 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1873 {
1874 struct rq *later_rq = NULL;
1875 int tries;
1876 int cpu;
1877
1878 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1879 cpu = find_later_rq(task);
1880
1881 if ((cpu == -1) || (cpu == rq->cpu))
1882 break;
1883
1884 later_rq = cpu_rq(cpu);
1885
1886 if (later_rq->dl.dl_nr_running &&
1887 !dl_time_before(task->dl.deadline,
1888 later_rq->dl.earliest_dl.curr)) {
1889 /*
1890 * Target rq has tasks of equal or earlier deadline,
1891 * retrying does not release any lock and is unlikely
1892 * to yield a different result.
1893 */
1894 later_rq = NULL;
1895 break;
1896 }
1897
1898 /* Retry if something changed. */
1899 if (double_lock_balance(rq, later_rq)) {
1900 if (unlikely(task_rq(task) != rq ||
1901 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1902 task_running(rq, task) ||
1903 !dl_task(task) ||
1904 !task_on_rq_queued(task))) {
1905 double_unlock_balance(rq, later_rq);
1906 later_rq = NULL;
1907 break;
1908 }
1909 }
1910
1911 /*
1912 * If the rq we found has no -deadline task, or
1913 * its earliest one has a later deadline than our
1914 * task, the rq is a good one.
1915 */
1916 if (!later_rq->dl.dl_nr_running ||
1917 dl_time_before(task->dl.deadline,
1918 later_rq->dl.earliest_dl.curr))
1919 break;
1920
1921 /* Otherwise we try again. */
1922 double_unlock_balance(rq, later_rq);
1923 later_rq = NULL;
1924 }
1925
1926 return later_rq;
1927 }
1928
1929 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1930 {
1931 struct task_struct *p;
1932
1933 if (!has_pushable_dl_tasks(rq))
1934 return NULL;
1935
1936 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
1937 struct task_struct, pushable_dl_tasks);
1938
1939 BUG_ON(rq->cpu != task_cpu(p));
1940 BUG_ON(task_current(rq, p));
1941 BUG_ON(p->nr_cpus_allowed <= 1);
1942
1943 BUG_ON(!task_on_rq_queued(p));
1944 BUG_ON(!dl_task(p));
1945
1946 return p;
1947 }
1948
1949 /*
1950 * See if the non running -deadline tasks on this rq
1951 * can be sent to some other CPU where they can preempt
1952 * and start executing.
1953 */
1954 static int push_dl_task(struct rq *rq)
1955 {
1956 struct task_struct *next_task;
1957 struct rq *later_rq;
1958 int ret = 0;
1959
1960 if (!rq->dl.overloaded)
1961 return 0;
1962
1963 next_task = pick_next_pushable_dl_task(rq);
1964 if (!next_task)
1965 return 0;
1966
1967 retry:
1968 if (unlikely(next_task == rq->curr)) {
1969 WARN_ON(1);
1970 return 0;
1971 }
1972
1973 /*
1974 * If next_task preempts rq->curr, and rq->curr
1975 * can move away, it makes sense to just reschedule
1976 * without going further in pushing next_task.
1977 */
1978 if (dl_task(rq->curr) &&
1979 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1980 rq->curr->nr_cpus_allowed > 1) {
1981 resched_curr(rq);
1982 return 0;
1983 }
1984
1985 /* We might release rq lock */
1986 get_task_struct(next_task);
1987
1988 /* Will lock the rq it'll find */
1989 later_rq = find_lock_later_rq(next_task, rq);
1990 if (!later_rq) {
1991 struct task_struct *task;
1992
1993 /*
1994 * We must check all this again, since
1995 * find_lock_later_rq releases rq->lock and it is
1996 * then possible that next_task has migrated.
1997 */
1998 task = pick_next_pushable_dl_task(rq);
1999 if (task == next_task) {
2000 /*
2001 * The task is still there. We don't try
2002 * again, some other cpu will pull it when ready.
2003 */
2004 goto out;
2005 }
2006
2007 if (!task)
2008 /* No more tasks */
2009 goto out;
2010
2011 put_task_struct(next_task);
2012 next_task = task;
2013 goto retry;
2014 }
2015
2016 deactivate_task(rq, next_task, 0);
2017 sub_running_bw(next_task->dl.dl_bw, &rq->dl);
2018 sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
2019 set_task_cpu(next_task, later_rq->cpu);
2020 add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
2021 add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
2022 activate_task(later_rq, next_task, 0);
2023 ret = 1;
2024
2025 resched_curr(later_rq);
2026
2027 double_unlock_balance(rq, later_rq);
2028
2029 out:
2030 put_task_struct(next_task);
2031
2032 return ret;
2033 }
2034
2035 static void push_dl_tasks(struct rq *rq)
2036 {
2037 /* push_dl_task() will return true if it moved a -deadline task */
2038 while (push_dl_task(rq))
2039 ;
2040 }
2041
2042 static void pull_dl_task(struct rq *this_rq)
2043 {
2044 int this_cpu = this_rq->cpu, cpu;
2045 struct task_struct *p;
2046 bool resched = false;
2047 struct rq *src_rq;
2048 u64 dmin = LONG_MAX;
2049
2050 if (likely(!dl_overloaded(this_rq)))
2051 return;
2052
2053 /*
2054 * Match the barrier from dl_set_overloaded; this guarantees that if we
2055 * see overloaded we must also see the dlo_mask bit.
2056 */
2057 smp_rmb();
2058
2059 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2060 if (this_cpu == cpu)
2061 continue;
2062
2063 src_rq = cpu_rq(cpu);
2064
2065 /*
2066 * It looks racy, abd it is! However, as in sched_rt.c,
2067 * we are fine with this.
2068 */
2069 if (this_rq->dl.dl_nr_running &&
2070 dl_time_before(this_rq->dl.earliest_dl.curr,
2071 src_rq->dl.earliest_dl.next))
2072 continue;
2073
2074 /* Might drop this_rq->lock */
2075 double_lock_balance(this_rq, src_rq);
2076
2077 /*
2078 * If there are no more pullable tasks on the
2079 * rq, we're done with it.
2080 */
2081 if (src_rq->dl.dl_nr_running <= 1)
2082 goto skip;
2083
2084 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2085
2086 /*
2087 * We found a task to be pulled if:
2088 * - it preempts our current (if there's one),
2089 * - it will preempt the last one we pulled (if any).
2090 */
2091 if (p && dl_time_before(p->dl.deadline, dmin) &&
2092 (!this_rq->dl.dl_nr_running ||
2093 dl_time_before(p->dl.deadline,
2094 this_rq->dl.earliest_dl.curr))) {
2095 WARN_ON(p == src_rq->curr);
2096 WARN_ON(!task_on_rq_queued(p));
2097
2098 /*
2099 * Then we pull iff p has actually an earlier
2100 * deadline than the current task of its runqueue.
2101 */
2102 if (dl_time_before(p->dl.deadline,
2103 src_rq->curr->dl.deadline))
2104 goto skip;
2105
2106 resched = true;
2107
2108 deactivate_task(src_rq, p, 0);
2109 sub_running_bw(p->dl.dl_bw, &src_rq->dl);
2110 sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
2111 set_task_cpu(p, this_cpu);
2112 add_rq_bw(p->dl.dl_bw, &this_rq->dl);
2113 add_running_bw(p->dl.dl_bw, &this_rq->dl);
2114 activate_task(this_rq, p, 0);
2115 dmin = p->dl.deadline;
2116
2117 /* Is there any other task even earlier? */
2118 }
2119 skip:
2120 double_unlock_balance(this_rq, src_rq);
2121 }
2122
2123 if (resched)
2124 resched_curr(this_rq);
2125 }
2126
2127 /*
2128 * Since the task is not running and a reschedule is not going to happen
2129 * anytime soon on its runqueue, we try pushing it away now.
2130 */
2131 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2132 {
2133 if (!task_running(rq, p) &&
2134 !test_tsk_need_resched(rq->curr) &&
2135 p->nr_cpus_allowed > 1 &&
2136 dl_task(rq->curr) &&
2137 (rq->curr->nr_cpus_allowed < 2 ||
2138 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2139 push_dl_tasks(rq);
2140 }
2141 }
2142
2143 static void set_cpus_allowed_dl(struct task_struct *p,
2144 const struct cpumask *new_mask)
2145 {
2146 struct root_domain *src_rd;
2147 struct rq *rq;
2148
2149 BUG_ON(!dl_task(p));
2150
2151 rq = task_rq(p);
2152 src_rd = rq->rd;
2153 /*
2154 * Migrating a SCHED_DEADLINE task between exclusive
2155 * cpusets (different root_domains) entails a bandwidth
2156 * update. We already made space for us in the destination
2157 * domain (see cpuset_can_attach()).
2158 */
2159 if (!cpumask_intersects(src_rd->span, new_mask)) {
2160 struct dl_bw *src_dl_b;
2161
2162 src_dl_b = dl_bw_of(cpu_of(rq));
2163 /*
2164 * We now free resources of the root_domain we are migrating
2165 * off. In the worst case, sched_setattr() may temporary fail
2166 * until we complete the update.
2167 */
2168 raw_spin_lock(&src_dl_b->lock);
2169 __dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2170 raw_spin_unlock(&src_dl_b->lock);
2171 }
2172
2173 set_cpus_allowed_common(p, new_mask);
2174 }
2175
2176 /* Assumes rq->lock is held */
2177 static void rq_online_dl(struct rq *rq)
2178 {
2179 if (rq->dl.overloaded)
2180 dl_set_overload(rq);
2181
2182 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2183 if (rq->dl.dl_nr_running > 0)
2184 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2185 }
2186
2187 /* Assumes rq->lock is held */
2188 static void rq_offline_dl(struct rq *rq)
2189 {
2190 if (rq->dl.overloaded)
2191 dl_clear_overload(rq);
2192
2193 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2194 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2195 }
2196
2197 void __init init_sched_dl_class(void)
2198 {
2199 unsigned int i;
2200
2201 for_each_possible_cpu(i)
2202 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2203 GFP_KERNEL, cpu_to_node(i));
2204 }
2205
2206 #endif /* CONFIG_SMP */
2207
2208 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2209 {
2210 /*
2211 * task_non_contending() can start the "inactive timer" (if the 0-lag
2212 * time is in the future). If the task switches back to dl before
2213 * the "inactive timer" fires, it can continue to consume its current
2214 * runtime using its current deadline. If it stays outside of
2215 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2216 * will reset the task parameters.
2217 */
2218 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2219 task_non_contending(p);
2220
2221 if (!task_on_rq_queued(p))
2222 sub_rq_bw(p->dl.dl_bw, &rq->dl);
2223
2224 /*
2225 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2226 * at the 0-lag time, because the task could have been migrated
2227 * while SCHED_OTHER in the meanwhile.
2228 */
2229 if (p->dl.dl_non_contending)
2230 p->dl.dl_non_contending = 0;
2231
2232 /*
2233 * Since this might be the only -deadline task on the rq,
2234 * this is the right place to try to pull some other one
2235 * from an overloaded cpu, if any.
2236 */
2237 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2238 return;
2239
2240 queue_pull_task(rq);
2241 }
2242
2243 /*
2244 * When switching to -deadline, we may overload the rq, then
2245 * we try to push someone off, if possible.
2246 */
2247 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2248 {
2249 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2250 put_task_struct(p);
2251
2252 /* If p is not queued we will update its parameters at next wakeup. */
2253 if (!task_on_rq_queued(p)) {
2254 add_rq_bw(p->dl.dl_bw, &rq->dl);
2255
2256 return;
2257 }
2258 /*
2259 * If p is boosted we already updated its params in
2260 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
2261 * p's deadline being now already after rq_clock(rq).
2262 */
2263 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
2264 setup_new_dl_entity(&p->dl);
2265
2266 if (rq->curr != p) {
2267 #ifdef CONFIG_SMP
2268 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2269 queue_push_tasks(rq);
2270 #endif
2271 if (dl_task(rq->curr))
2272 check_preempt_curr_dl(rq, p, 0);
2273 else
2274 resched_curr(rq);
2275 }
2276 }
2277
2278 /*
2279 * If the scheduling parameters of a -deadline task changed,
2280 * a push or pull operation might be needed.
2281 */
2282 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2283 int oldprio)
2284 {
2285 if (task_on_rq_queued(p) || rq->curr == p) {
2286 #ifdef CONFIG_SMP
2287 /*
2288 * This might be too much, but unfortunately
2289 * we don't have the old deadline value, and
2290 * we can't argue if the task is increasing
2291 * or lowering its prio, so...
2292 */
2293 if (!rq->dl.overloaded)
2294 queue_pull_task(rq);
2295
2296 /*
2297 * If we now have a earlier deadline task than p,
2298 * then reschedule, provided p is still on this
2299 * runqueue.
2300 */
2301 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2302 resched_curr(rq);
2303 #else
2304 /*
2305 * Again, we don't know if p has a earlier
2306 * or later deadline, so let's blindly set a
2307 * (maybe not needed) rescheduling point.
2308 */
2309 resched_curr(rq);
2310 #endif /* CONFIG_SMP */
2311 }
2312 }
2313
2314 const struct sched_class dl_sched_class = {
2315 .next = &rt_sched_class,
2316 .enqueue_task = enqueue_task_dl,
2317 .dequeue_task = dequeue_task_dl,
2318 .yield_task = yield_task_dl,
2319
2320 .check_preempt_curr = check_preempt_curr_dl,
2321
2322 .pick_next_task = pick_next_task_dl,
2323 .put_prev_task = put_prev_task_dl,
2324
2325 #ifdef CONFIG_SMP
2326 .select_task_rq = select_task_rq_dl,
2327 .migrate_task_rq = migrate_task_rq_dl,
2328 .set_cpus_allowed = set_cpus_allowed_dl,
2329 .rq_online = rq_online_dl,
2330 .rq_offline = rq_offline_dl,
2331 .task_woken = task_woken_dl,
2332 #endif
2333
2334 .set_curr_task = set_curr_task_dl,
2335 .task_tick = task_tick_dl,
2336 .task_fork = task_fork_dl,
2337
2338 .prio_changed = prio_changed_dl,
2339 .switched_from = switched_from_dl,
2340 .switched_to = switched_to_dl,
2341
2342 .update_curr = update_curr_dl,
2343 };
2344
2345 int sched_dl_global_validate(void)
2346 {
2347 u64 runtime = global_rt_runtime();
2348 u64 period = global_rt_period();
2349 u64 new_bw = to_ratio(period, runtime);
2350 struct dl_bw *dl_b;
2351 int cpu, ret = 0;
2352 unsigned long flags;
2353
2354 /*
2355 * Here we want to check the bandwidth not being set to some
2356 * value smaller than the currently allocated bandwidth in
2357 * any of the root_domains.
2358 *
2359 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2360 * cycling on root_domains... Discussion on different/better
2361 * solutions is welcome!
2362 */
2363 for_each_possible_cpu(cpu) {
2364 rcu_read_lock_sched();
2365 dl_b = dl_bw_of(cpu);
2366
2367 raw_spin_lock_irqsave(&dl_b->lock, flags);
2368 if (new_bw < dl_b->total_bw)
2369 ret = -EBUSY;
2370 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2371
2372 rcu_read_unlock_sched();
2373
2374 if (ret)
2375 break;
2376 }
2377
2378 return ret;
2379 }
2380
2381 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2382 {
2383 if (global_rt_runtime() == RUNTIME_INF) {
2384 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2385 dl_rq->extra_bw = 1 << BW_SHIFT;
2386 } else {
2387 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2388 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2389 dl_rq->extra_bw = to_ratio(global_rt_period(),
2390 global_rt_runtime());
2391 }
2392 }
2393
2394 void sched_dl_do_global(void)
2395 {
2396 u64 new_bw = -1;
2397 struct dl_bw *dl_b;
2398 int cpu;
2399 unsigned long flags;
2400
2401 def_dl_bandwidth.dl_period = global_rt_period();
2402 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2403
2404 if (global_rt_runtime() != RUNTIME_INF)
2405 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2406
2407 /*
2408 * FIXME: As above...
2409 */
2410 for_each_possible_cpu(cpu) {
2411 rcu_read_lock_sched();
2412 dl_b = dl_bw_of(cpu);
2413
2414 raw_spin_lock_irqsave(&dl_b->lock, flags);
2415 dl_b->bw = new_bw;
2416 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2417
2418 rcu_read_unlock_sched();
2419 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2420 }
2421 }
2422
2423 /*
2424 * We must be sure that accepting a new task (or allowing changing the
2425 * parameters of an existing one) is consistent with the bandwidth
2426 * constraints. If yes, this function also accordingly updates the currently
2427 * allocated bandwidth to reflect the new situation.
2428 *
2429 * This function is called while holding p's rq->lock.
2430 */
2431 int sched_dl_overflow(struct task_struct *p, int policy,
2432 const struct sched_attr *attr)
2433 {
2434 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2435 u64 period = attr->sched_period ?: attr->sched_deadline;
2436 u64 runtime = attr->sched_runtime;
2437 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2438 int cpus, err = -1;
2439
2440 /* !deadline task may carry old deadline bandwidth */
2441 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2442 return 0;
2443
2444 /*
2445 * Either if a task, enters, leave, or stays -deadline but changes
2446 * its parameters, we may need to update accordingly the total
2447 * allocated bandwidth of the container.
2448 */
2449 raw_spin_lock(&dl_b->lock);
2450 cpus = dl_bw_cpus(task_cpu(p));
2451 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2452 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2453 if (hrtimer_active(&p->dl.inactive_timer))
2454 __dl_clear(dl_b, p->dl.dl_bw, cpus);
2455 __dl_add(dl_b, new_bw, cpus);
2456 err = 0;
2457 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2458 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2459 /*
2460 * XXX this is slightly incorrect: when the task
2461 * utilization decreases, we should delay the total
2462 * utilization change until the task's 0-lag point.
2463 * But this would require to set the task's "inactive
2464 * timer" when the task is not inactive.
2465 */
2466 __dl_clear(dl_b, p->dl.dl_bw, cpus);
2467 __dl_add(dl_b, new_bw, cpus);
2468 dl_change_utilization(p, new_bw);
2469 err = 0;
2470 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2471 /*
2472 * Do not decrease the total deadline utilization here,
2473 * switched_from_dl() will take care to do it at the correct
2474 * (0-lag) time.
2475 */
2476 err = 0;
2477 }
2478 raw_spin_unlock(&dl_b->lock);
2479
2480 return err;
2481 }
2482
2483 /*
2484 * This function initializes the sched_dl_entity of a newly becoming
2485 * SCHED_DEADLINE task.
2486 *
2487 * Only the static values are considered here, the actual runtime and the
2488 * absolute deadline will be properly calculated when the task is enqueued
2489 * for the first time with its new policy.
2490 */
2491 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2492 {
2493 struct sched_dl_entity *dl_se = &p->dl;
2494
2495 dl_se->dl_runtime = attr->sched_runtime;
2496 dl_se->dl_deadline = attr->sched_deadline;
2497 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2498 dl_se->flags = attr->sched_flags;
2499 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2500 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2501 }
2502
2503 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2504 {
2505 struct sched_dl_entity *dl_se = &p->dl;
2506
2507 attr->sched_priority = p->rt_priority;
2508 attr->sched_runtime = dl_se->dl_runtime;
2509 attr->sched_deadline = dl_se->dl_deadline;
2510 attr->sched_period = dl_se->dl_period;
2511 attr->sched_flags = dl_se->flags;
2512 }
2513
2514 /*
2515 * This function validates the new parameters of a -deadline task.
2516 * We ask for the deadline not being zero, and greater or equal
2517 * than the runtime, as well as the period of being zero or
2518 * greater than deadline. Furthermore, we have to be sure that
2519 * user parameters are above the internal resolution of 1us (we
2520 * check sched_runtime only since it is always the smaller one) and
2521 * below 2^63 ns (we have to check both sched_deadline and
2522 * sched_period, as the latter can be zero).
2523 */
2524 bool __checkparam_dl(const struct sched_attr *attr)
2525 {
2526 /* deadline != 0 */
2527 if (attr->sched_deadline == 0)
2528 return false;
2529
2530 /*
2531 * Since we truncate DL_SCALE bits, make sure we're at least
2532 * that big.
2533 */
2534 if (attr->sched_runtime < (1ULL << DL_SCALE))
2535 return false;
2536
2537 /*
2538 * Since we use the MSB for wrap-around and sign issues, make
2539 * sure it's not set (mind that period can be equal to zero).
2540 */
2541 if (attr->sched_deadline & (1ULL << 63) ||
2542 attr->sched_period & (1ULL << 63))
2543 return false;
2544
2545 /* runtime <= deadline <= period (if period != 0) */
2546 if ((attr->sched_period != 0 &&
2547 attr->sched_period < attr->sched_deadline) ||
2548 attr->sched_deadline < attr->sched_runtime)
2549 return false;
2550
2551 return true;
2552 }
2553
2554 /*
2555 * This function clears the sched_dl_entity static params.
2556 */
2557 void __dl_clear_params(struct task_struct *p)
2558 {
2559 struct sched_dl_entity *dl_se = &p->dl;
2560
2561 dl_se->dl_runtime = 0;
2562 dl_se->dl_deadline = 0;
2563 dl_se->dl_period = 0;
2564 dl_se->flags = 0;
2565 dl_se->dl_bw = 0;
2566 dl_se->dl_density = 0;
2567
2568 dl_se->dl_throttled = 0;
2569 dl_se->dl_yielded = 0;
2570 dl_se->dl_non_contending = 0;
2571 }
2572
2573 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2574 {
2575 struct sched_dl_entity *dl_se = &p->dl;
2576
2577 if (dl_se->dl_runtime != attr->sched_runtime ||
2578 dl_se->dl_deadline != attr->sched_deadline ||
2579 dl_se->dl_period != attr->sched_period ||
2580 dl_se->flags != attr->sched_flags)
2581 return true;
2582
2583 return false;
2584 }
2585
2586 #ifdef CONFIG_SMP
2587 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2588 {
2589 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
2590 cs_cpus_allowed);
2591 struct dl_bw *dl_b;
2592 bool overflow;
2593 int cpus, ret;
2594 unsigned long flags;
2595
2596 rcu_read_lock_sched();
2597 dl_b = dl_bw_of(dest_cpu);
2598 raw_spin_lock_irqsave(&dl_b->lock, flags);
2599 cpus = dl_bw_cpus(dest_cpu);
2600 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2601 if (overflow)
2602 ret = -EBUSY;
2603 else {
2604 /*
2605 * We reserve space for this task in the destination
2606 * root_domain, as we can't fail after this point.
2607 * We will free resources in the source root_domain
2608 * later on (see set_cpus_allowed_dl()).
2609 */
2610 __dl_add(dl_b, p->dl.dl_bw, cpus);
2611 ret = 0;
2612 }
2613 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2614 rcu_read_unlock_sched();
2615 return ret;
2616 }
2617
2618 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2619 const struct cpumask *trial)
2620 {
2621 int ret = 1, trial_cpus;
2622 struct dl_bw *cur_dl_b;
2623 unsigned long flags;
2624
2625 rcu_read_lock_sched();
2626 cur_dl_b = dl_bw_of(cpumask_any(cur));
2627 trial_cpus = cpumask_weight(trial);
2628
2629 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2630 if (cur_dl_b->bw != -1 &&
2631 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2632 ret = 0;
2633 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2634 rcu_read_unlock_sched();
2635 return ret;
2636 }
2637
2638 bool dl_cpu_busy(unsigned int cpu)
2639 {
2640 unsigned long flags;
2641 struct dl_bw *dl_b;
2642 bool overflow;
2643 int cpus;
2644
2645 rcu_read_lock_sched();
2646 dl_b = dl_bw_of(cpu);
2647 raw_spin_lock_irqsave(&dl_b->lock, flags);
2648 cpus = dl_bw_cpus(cpu);
2649 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2650 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2651 rcu_read_unlock_sched();
2652 return overflow;
2653 }
2654 #endif
2655
2656 #ifdef CONFIG_SCHED_DEBUG
2657 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2658
2659 void print_dl_stats(struct seq_file *m, int cpu)
2660 {
2661 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2662 }
2663 #endif /* CONFIG_SCHED_DEBUG */