2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth
;
23 static inline struct task_struct
*dl_task_of(struct sched_dl_entity
*dl_se
)
25 return container_of(dl_se
, struct task_struct
, dl
);
28 static inline struct rq
*rq_of_dl_rq(struct dl_rq
*dl_rq
)
30 return container_of(dl_rq
, struct rq
, dl
);
33 static inline struct dl_rq
*dl_rq_of_se(struct sched_dl_entity
*dl_se
)
35 struct task_struct
*p
= dl_task_of(dl_se
);
36 struct rq
*rq
= task_rq(p
);
41 static inline int on_dl_rq(struct sched_dl_entity
*dl_se
)
43 return !RB_EMPTY_NODE(&dl_se
->rb_node
);
46 static inline int is_leftmost(struct task_struct
*p
, struct dl_rq
*dl_rq
)
48 struct sched_dl_entity
*dl_se
= &p
->dl
;
50 return dl_rq
->rb_leftmost
== &dl_se
->rb_node
;
53 void init_dl_bandwidth(struct dl_bandwidth
*dl_b
, u64 period
, u64 runtime
)
55 raw_spin_lock_init(&dl_b
->dl_runtime_lock
);
56 dl_b
->dl_period
= period
;
57 dl_b
->dl_runtime
= runtime
;
60 extern unsigned long to_ratio(u64 period
, u64 runtime
);
62 void init_dl_bw(struct dl_bw
*dl_b
)
64 raw_spin_lock_init(&dl_b
->lock
);
65 raw_spin_lock(&def_dl_bandwidth
.dl_runtime_lock
);
66 if (global_rt_runtime() == RUNTIME_INF
)
69 dl_b
->bw
= to_ratio(global_rt_period(), global_rt_runtime());
70 raw_spin_unlock(&def_dl_bandwidth
.dl_runtime_lock
);
74 void init_dl_rq(struct dl_rq
*dl_rq
, struct rq
*rq
)
76 dl_rq
->rb_root
= RB_ROOT
;
79 /* zero means no -deadline tasks */
80 dl_rq
->earliest_dl
.curr
= dl_rq
->earliest_dl
.next
= 0;
82 dl_rq
->dl_nr_migratory
= 0;
83 dl_rq
->overloaded
= 0;
84 dl_rq
->pushable_dl_tasks_root
= RB_ROOT
;
86 init_dl_bw(&dl_rq
->dl_bw
);
92 static inline int dl_overloaded(struct rq
*rq
)
94 return atomic_read(&rq
->rd
->dlo_count
);
97 static inline void dl_set_overload(struct rq
*rq
)
102 cpumask_set_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
104 * Must be visible before the overload count is
105 * set (as in sched_rt.c).
107 * Matched by the barrier in pull_dl_task().
110 atomic_inc(&rq
->rd
->dlo_count
);
113 static inline void dl_clear_overload(struct rq
*rq
)
118 atomic_dec(&rq
->rd
->dlo_count
);
119 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
122 static void update_dl_migration(struct dl_rq
*dl_rq
)
124 if (dl_rq
->dl_nr_migratory
&& dl_rq
->dl_nr_total
> 1) {
125 if (!dl_rq
->overloaded
) {
126 dl_set_overload(rq_of_dl_rq(dl_rq
));
127 dl_rq
->overloaded
= 1;
129 } else if (dl_rq
->overloaded
) {
130 dl_clear_overload(rq_of_dl_rq(dl_rq
));
131 dl_rq
->overloaded
= 0;
135 static void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
137 struct task_struct
*p
= dl_task_of(dl_se
);
138 dl_rq
= &rq_of_dl_rq(dl_rq
)->dl
;
140 dl_rq
->dl_nr_total
++;
141 if (p
->nr_cpus_allowed
> 1)
142 dl_rq
->dl_nr_migratory
++;
144 update_dl_migration(dl_rq
);
147 static void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
149 struct task_struct
*p
= dl_task_of(dl_se
);
150 dl_rq
= &rq_of_dl_rq(dl_rq
)->dl
;
152 dl_rq
->dl_nr_total
--;
153 if (p
->nr_cpus_allowed
> 1)
154 dl_rq
->dl_nr_migratory
--;
156 update_dl_migration(dl_rq
);
160 * The list of pushable -deadline task is not a plist, like in
161 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
163 static void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
165 struct dl_rq
*dl_rq
= &rq
->dl
;
166 struct rb_node
**link
= &dl_rq
->pushable_dl_tasks_root
.rb_node
;
167 struct rb_node
*parent
= NULL
;
168 struct task_struct
*entry
;
171 BUG_ON(!RB_EMPTY_NODE(&p
->pushable_dl_tasks
));
175 entry
= rb_entry(parent
, struct task_struct
,
177 if (dl_entity_preempt(&p
->dl
, &entry
->dl
))
178 link
= &parent
->rb_left
;
180 link
= &parent
->rb_right
;
186 dl_rq
->pushable_dl_tasks_leftmost
= &p
->pushable_dl_tasks
;
188 rb_link_node(&p
->pushable_dl_tasks
, parent
, link
);
189 rb_insert_color(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
192 static void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
194 struct dl_rq
*dl_rq
= &rq
->dl
;
196 if (RB_EMPTY_NODE(&p
->pushable_dl_tasks
))
199 if (dl_rq
->pushable_dl_tasks_leftmost
== &p
->pushable_dl_tasks
) {
200 struct rb_node
*next_node
;
202 next_node
= rb_next(&p
->pushable_dl_tasks
);
203 dl_rq
->pushable_dl_tasks_leftmost
= next_node
;
206 rb_erase(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
207 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
210 static inline int has_pushable_dl_tasks(struct rq
*rq
)
212 return !RB_EMPTY_ROOT(&rq
->dl
.pushable_dl_tasks_root
);
215 static int push_dl_task(struct rq
*rq
);
220 void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
225 void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
230 void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
235 void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
239 #endif /* CONFIG_SMP */
241 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
242 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
243 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
247 * We are being explicitly informed that a new instance is starting,
248 * and this means that:
249 * - the absolute deadline of the entity has to be placed at
250 * current time + relative deadline;
251 * - the runtime of the entity has to be set to the maximum value.
253 * The capability of specifying such event is useful whenever a -deadline
254 * entity wants to (try to!) synchronize its behaviour with the scheduler's
255 * one, and to (try to!) reconcile itself with its own scheduling
258 static inline void setup_new_dl_entity(struct sched_dl_entity
*dl_se
,
259 struct sched_dl_entity
*pi_se
)
261 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
262 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
264 WARN_ON(!dl_se
->dl_new
|| dl_se
->dl_throttled
);
267 * We use the regular wall clock time to set deadlines in the
268 * future; in fact, we must consider execution overheads (time
269 * spent on hardirq context, etc.).
271 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
272 dl_se
->runtime
= pi_se
->dl_runtime
;
277 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
278 * possibility of a entity lasting more than what it declared, and thus
279 * exhausting its runtime.
281 * Here we are interested in making runtime overrun possible, but we do
282 * not want a entity which is misbehaving to affect the scheduling of all
284 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
285 * is used, in order to confine each entity within its own bandwidth.
287 * This function deals exactly with that, and ensures that when the runtime
288 * of a entity is replenished, its deadline is also postponed. That ensures
289 * the overrunning entity can't interfere with other entity in the system and
290 * can't make them miss their deadlines. Reasons why this kind of overruns
291 * could happen are, typically, a entity voluntarily trying to overcome its
292 * runtime, or it just underestimated it during sched_setscheduler_ex().
294 static void replenish_dl_entity(struct sched_dl_entity
*dl_se
,
295 struct sched_dl_entity
*pi_se
)
297 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
298 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
300 BUG_ON(pi_se
->dl_runtime
<= 0);
303 * This could be the case for a !-dl task that is boosted.
304 * Just go with full inherited parameters.
306 if (dl_se
->dl_deadline
== 0) {
307 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
308 dl_se
->runtime
= pi_se
->dl_runtime
;
312 * We keep moving the deadline away until we get some
313 * available runtime for the entity. This ensures correct
314 * handling of situations where the runtime overrun is
317 while (dl_se
->runtime
<= 0) {
318 dl_se
->deadline
+= pi_se
->dl_period
;
319 dl_se
->runtime
+= pi_se
->dl_runtime
;
323 * At this point, the deadline really should be "in
324 * the future" with respect to rq->clock. If it's
325 * not, we are, for some reason, lagging too much!
326 * Anyway, after having warn userspace abut that,
327 * we still try to keep the things running by
328 * resetting the deadline and the budget of the
331 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
))) {
332 static bool lag_once
= false;
336 printk_sched("sched: DL replenish lagged to much\n");
338 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
339 dl_se
->runtime
= pi_se
->dl_runtime
;
344 * Here we check if --at time t-- an entity (which is probably being
345 * [re]activated or, in general, enqueued) can use its remaining runtime
346 * and its current deadline _without_ exceeding the bandwidth it is
347 * assigned (function returns true if it can't). We are in fact applying
348 * one of the CBS rules: when a task wakes up, if the residual runtime
349 * over residual deadline fits within the allocated bandwidth, then we
350 * can keep the current (absolute) deadline and residual budget without
351 * disrupting the schedulability of the system. Otherwise, we should
352 * refill the runtime and set the deadline a period in the future,
353 * because keeping the current (absolute) deadline of the task would
354 * result in breaking guarantees promised to other tasks.
356 * This function returns true if:
358 * runtime / (deadline - t) > dl_runtime / dl_period ,
360 * IOW we can't recycle current parameters.
362 * Notice that the bandwidth check is done against the period. For
363 * task with deadline equal to period this is the same of using
364 * dl_deadline instead of dl_period in the equation above.
366 static bool dl_entity_overflow(struct sched_dl_entity
*dl_se
,
367 struct sched_dl_entity
*pi_se
, u64 t
)
372 * left and right are the two sides of the equation above,
373 * after a bit of shuffling to use multiplications instead
376 * Note that none of the time values involved in the two
377 * multiplications are absolute: dl_deadline and dl_runtime
378 * are the relative deadline and the maximum runtime of each
379 * instance, runtime is the runtime left for the last instance
380 * and (deadline - t), since t is rq->clock, is the time left
381 * to the (absolute) deadline. Even if overflowing the u64 type
382 * is very unlikely to occur in both cases, here we scale down
383 * as we want to avoid that risk at all. Scaling down by 10
384 * means that we reduce granularity to 1us. We are fine with it,
385 * since this is only a true/false check and, anyway, thinking
386 * of anything below microseconds resolution is actually fiction
387 * (but still we want to give the user that illusion >;).
389 left
= (pi_se
->dl_period
>> DL_SCALE
) * (dl_se
->runtime
>> DL_SCALE
);
390 right
= ((dl_se
->deadline
- t
) >> DL_SCALE
) *
391 (pi_se
->dl_runtime
>> DL_SCALE
);
393 return dl_time_before(right
, left
);
397 * When a -deadline entity is queued back on the runqueue, its runtime and
398 * deadline might need updating.
400 * The policy here is that we update the deadline of the entity only if:
401 * - the current deadline is in the past,
402 * - using the remaining runtime with the current deadline would make
403 * the entity exceed its bandwidth.
405 static void update_dl_entity(struct sched_dl_entity
*dl_se
,
406 struct sched_dl_entity
*pi_se
)
408 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
409 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
412 * The arrival of a new instance needs special treatment, i.e.,
413 * the actual scheduling parameters have to be "renewed".
416 setup_new_dl_entity(dl_se
, pi_se
);
420 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) ||
421 dl_entity_overflow(dl_se
, pi_se
, rq_clock(rq
))) {
422 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
423 dl_se
->runtime
= pi_se
->dl_runtime
;
428 * If the entity depleted all its runtime, and if we want it to sleep
429 * while waiting for some new execution time to become available, we
430 * set the bandwidth enforcement timer to the replenishment instant
431 * and try to activate it.
433 * Notice that it is important for the caller to know if the timer
434 * actually started or not (i.e., the replenishment instant is in
435 * the future or in the past).
437 static int start_dl_timer(struct sched_dl_entity
*dl_se
, bool boosted
)
439 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
440 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
449 * We want the timer to fire at the deadline, but considering
450 * that it is actually coming from rq->clock and not from
451 * hrtimer's time base reading.
453 act
= ns_to_ktime(dl_se
->deadline
);
454 now
= hrtimer_cb_get_time(&dl_se
->dl_timer
);
455 delta
= ktime_to_ns(now
) - rq_clock(rq
);
456 act
= ktime_add_ns(act
, delta
);
459 * If the expiry time already passed, e.g., because the value
460 * chosen as the deadline is too small, don't even try to
461 * start the timer in the past!
463 if (ktime_us_delta(act
, now
) < 0)
466 hrtimer_set_expires(&dl_se
->dl_timer
, act
);
468 soft
= hrtimer_get_softexpires(&dl_se
->dl_timer
);
469 hard
= hrtimer_get_expires(&dl_se
->dl_timer
);
470 range
= ktime_to_ns(ktime_sub(hard
, soft
));
471 __hrtimer_start_range_ns(&dl_se
->dl_timer
, soft
,
472 range
, HRTIMER_MODE_ABS
, 0);
474 return hrtimer_active(&dl_se
->dl_timer
);
478 * This is the bandwidth enforcement timer callback. If here, we know
479 * a task is not on its dl_rq, since the fact that the timer was running
480 * means the task is throttled and needs a runtime replenishment.
482 * However, what we actually do depends on the fact the task is active,
483 * (it is on its rq) or has been removed from there by a call to
484 * dequeue_task_dl(). In the former case we must issue the runtime
485 * replenishment and add the task back to the dl_rq; in the latter, we just
486 * do nothing but clearing dl_throttled, so that runtime and deadline
487 * updating (and the queueing back to dl_rq) will be done by the
488 * next call to enqueue_task_dl().
490 static enum hrtimer_restart
dl_task_timer(struct hrtimer
*timer
)
492 struct sched_dl_entity
*dl_se
= container_of(timer
,
493 struct sched_dl_entity
,
495 struct task_struct
*p
= dl_task_of(dl_se
);
496 struct rq
*rq
= task_rq(p
);
497 raw_spin_lock(&rq
->lock
);
500 * We need to take care of a possible races here. In fact, the
501 * task might have changed its scheduling policy to something
502 * different from SCHED_DEADLINE or changed its reservation
503 * parameters (through sched_setscheduler()).
505 if (!dl_task(p
) || dl_se
->dl_new
)
510 dl_se
->dl_throttled
= 0;
512 enqueue_task_dl(rq
, p
, ENQUEUE_REPLENISH
);
513 if (task_has_dl_policy(rq
->curr
))
514 check_preempt_curr_dl(rq
, p
, 0);
516 resched_task(rq
->curr
);
519 * Queueing this task back might have overloaded rq,
520 * check if we need to kick someone away.
522 if (has_pushable_dl_tasks(rq
))
527 raw_spin_unlock(&rq
->lock
);
529 return HRTIMER_NORESTART
;
532 void init_dl_task_timer(struct sched_dl_entity
*dl_se
)
534 struct hrtimer
*timer
= &dl_se
->dl_timer
;
536 if (hrtimer_active(timer
)) {
537 hrtimer_try_to_cancel(timer
);
541 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
542 timer
->function
= dl_task_timer
;
546 int dl_runtime_exceeded(struct rq
*rq
, struct sched_dl_entity
*dl_se
)
548 int dmiss
= dl_time_before(dl_se
->deadline
, rq_clock(rq
));
549 int rorun
= dl_se
->runtime
<= 0;
551 if (!rorun
&& !dmiss
)
555 * If we are beyond our current deadline and we are still
556 * executing, then we have already used some of the runtime of
557 * the next instance. Thus, if we do not account that, we are
558 * stealing bandwidth from the system at each deadline miss!
561 dl_se
->runtime
= rorun
? dl_se
->runtime
: 0;
562 dl_se
->runtime
-= rq_clock(rq
) - dl_se
->deadline
;
569 * Update the current task's runtime statistics (provided it is still
570 * a -deadline task and has not been removed from the dl_rq).
572 static void update_curr_dl(struct rq
*rq
)
574 struct task_struct
*curr
= rq
->curr
;
575 struct sched_dl_entity
*dl_se
= &curr
->dl
;
578 if (!dl_task(curr
) || !on_dl_rq(dl_se
))
582 * Consumed budget is computed considering the time as
583 * observed by schedulable tasks (excluding time spent
584 * in hardirq context, etc.). Deadlines are instead
585 * computed using hard walltime. This seems to be the more
586 * natural solution, but the full ramifications of this
587 * approach need further study.
589 delta_exec
= rq_clock_task(rq
) - curr
->se
.exec_start
;
590 if (unlikely((s64
)delta_exec
< 0))
593 schedstat_set(curr
->se
.statistics
.exec_max
,
594 max(curr
->se
.statistics
.exec_max
, delta_exec
));
596 curr
->se
.sum_exec_runtime
+= delta_exec
;
597 account_group_exec_runtime(curr
, delta_exec
);
599 curr
->se
.exec_start
= rq_clock_task(rq
);
600 cpuacct_charge(curr
, delta_exec
);
602 sched_rt_avg_update(rq
, delta_exec
);
604 dl_se
->runtime
-= delta_exec
;
605 if (dl_runtime_exceeded(rq
, dl_se
)) {
606 __dequeue_task_dl(rq
, curr
, 0);
607 if (likely(start_dl_timer(dl_se
, curr
->dl
.dl_boosted
)))
608 dl_se
->dl_throttled
= 1;
610 enqueue_task_dl(rq
, curr
, ENQUEUE_REPLENISH
);
612 if (!is_leftmost(curr
, &rq
->dl
))
617 * Because -- for now -- we share the rt bandwidth, we need to
618 * account our runtime there too, otherwise actual rt tasks
619 * would be able to exceed the shared quota.
621 * Account to the root rt group for now.
623 * The solution we're working towards is having the RT groups scheduled
624 * using deadline servers -- however there's a few nasties to figure
625 * out before that can happen.
627 if (rt_bandwidth_enabled()) {
628 struct rt_rq
*rt_rq
= &rq
->rt
;
630 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
631 rt_rq
->rt_time
+= delta_exec
;
633 * We'll let actual RT tasks worry about the overflow here, we
634 * have our own CBS to keep us inline -- see above.
636 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
642 static struct task_struct
*pick_next_earliest_dl_task(struct rq
*rq
, int cpu
);
644 static inline u64
next_deadline(struct rq
*rq
)
646 struct task_struct
*next
= pick_next_earliest_dl_task(rq
, rq
->cpu
);
648 if (next
&& dl_prio(next
->prio
))
649 return next
->dl
.deadline
;
654 static void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
656 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
658 if (dl_rq
->earliest_dl
.curr
== 0 ||
659 dl_time_before(deadline
, dl_rq
->earliest_dl
.curr
)) {
661 * If the dl_rq had no -deadline tasks, or if the new task
662 * has shorter deadline than the current one on dl_rq, we
663 * know that the previous earliest becomes our next earliest,
664 * as the new task becomes the earliest itself.
666 dl_rq
->earliest_dl
.next
= dl_rq
->earliest_dl
.curr
;
667 dl_rq
->earliest_dl
.curr
= deadline
;
668 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, deadline
, 1);
669 } else if (dl_rq
->earliest_dl
.next
== 0 ||
670 dl_time_before(deadline
, dl_rq
->earliest_dl
.next
)) {
672 * On the other hand, if the new -deadline task has a
673 * a later deadline than the earliest one on dl_rq, but
674 * it is earlier than the next (if any), we must
675 * recompute the next-earliest.
677 dl_rq
->earliest_dl
.next
= next_deadline(rq
);
681 static void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
683 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
686 * Since we may have removed our earliest (and/or next earliest)
687 * task we must recompute them.
689 if (!dl_rq
->dl_nr_running
) {
690 dl_rq
->earliest_dl
.curr
= 0;
691 dl_rq
->earliest_dl
.next
= 0;
692 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
694 struct rb_node
*leftmost
= dl_rq
->rb_leftmost
;
695 struct sched_dl_entity
*entry
;
697 entry
= rb_entry(leftmost
, struct sched_dl_entity
, rb_node
);
698 dl_rq
->earliest_dl
.curr
= entry
->deadline
;
699 dl_rq
->earliest_dl
.next
= next_deadline(rq
);
700 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, entry
->deadline
, 1);
706 static inline void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
707 static inline void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
709 #endif /* CONFIG_SMP */
712 void inc_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
714 int prio
= dl_task_of(dl_se
)->prio
;
715 u64 deadline
= dl_se
->deadline
;
717 WARN_ON(!dl_prio(prio
));
718 dl_rq
->dl_nr_running
++;
720 inc_dl_deadline(dl_rq
, deadline
);
721 inc_dl_migration(dl_se
, dl_rq
);
725 void dec_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
727 int prio
= dl_task_of(dl_se
)->prio
;
729 WARN_ON(!dl_prio(prio
));
730 WARN_ON(!dl_rq
->dl_nr_running
);
731 dl_rq
->dl_nr_running
--;
733 dec_dl_deadline(dl_rq
, dl_se
->deadline
);
734 dec_dl_migration(dl_se
, dl_rq
);
737 static void __enqueue_dl_entity(struct sched_dl_entity
*dl_se
)
739 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
740 struct rb_node
**link
= &dl_rq
->rb_root
.rb_node
;
741 struct rb_node
*parent
= NULL
;
742 struct sched_dl_entity
*entry
;
745 BUG_ON(!RB_EMPTY_NODE(&dl_se
->rb_node
));
749 entry
= rb_entry(parent
, struct sched_dl_entity
, rb_node
);
750 if (dl_time_before(dl_se
->deadline
, entry
->deadline
))
751 link
= &parent
->rb_left
;
753 link
= &parent
->rb_right
;
759 dl_rq
->rb_leftmost
= &dl_se
->rb_node
;
761 rb_link_node(&dl_se
->rb_node
, parent
, link
);
762 rb_insert_color(&dl_se
->rb_node
, &dl_rq
->rb_root
);
764 inc_dl_tasks(dl_se
, dl_rq
);
767 static void __dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
769 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
771 if (RB_EMPTY_NODE(&dl_se
->rb_node
))
774 if (dl_rq
->rb_leftmost
== &dl_se
->rb_node
) {
775 struct rb_node
*next_node
;
777 next_node
= rb_next(&dl_se
->rb_node
);
778 dl_rq
->rb_leftmost
= next_node
;
781 rb_erase(&dl_se
->rb_node
, &dl_rq
->rb_root
);
782 RB_CLEAR_NODE(&dl_se
->rb_node
);
784 dec_dl_tasks(dl_se
, dl_rq
);
788 enqueue_dl_entity(struct sched_dl_entity
*dl_se
,
789 struct sched_dl_entity
*pi_se
, int flags
)
791 BUG_ON(on_dl_rq(dl_se
));
794 * If this is a wakeup or a new instance, the scheduling
795 * parameters of the task might need updating. Otherwise,
796 * we want a replenishment of its runtime.
798 if (!dl_se
->dl_new
&& flags
& ENQUEUE_REPLENISH
)
799 replenish_dl_entity(dl_se
, pi_se
);
801 update_dl_entity(dl_se
, pi_se
);
803 __enqueue_dl_entity(dl_se
);
806 static void dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
808 __dequeue_dl_entity(dl_se
);
811 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
813 struct task_struct
*pi_task
= rt_mutex_get_top_task(p
);
814 struct sched_dl_entity
*pi_se
= &p
->dl
;
817 * Use the scheduling parameters of the top pi-waiter
818 * task if we have one and its (relative) deadline is
819 * smaller than our one... OTW we keep our runtime and
822 if (pi_task
&& p
->dl
.dl_boosted
&& dl_prio(pi_task
->normal_prio
))
823 pi_se
= &pi_task
->dl
;
826 * If p is throttled, we do nothing. In fact, if it exhausted
827 * its budget it needs a replenishment and, since it now is on
828 * its rq, the bandwidth timer callback (which clearly has not
829 * run yet) will take care of this.
831 if (p
->dl
.dl_throttled
)
834 enqueue_dl_entity(&p
->dl
, pi_se
, flags
);
836 if (!task_current(rq
, p
) && p
->nr_cpus_allowed
> 1)
837 enqueue_pushable_dl_task(rq
, p
);
842 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
844 dequeue_dl_entity(&p
->dl
);
845 dequeue_pushable_dl_task(rq
, p
);
848 static void dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
851 __dequeue_task_dl(rq
, p
, flags
);
857 * Yield task semantic for -deadline tasks is:
859 * get off from the CPU until our next instance, with
860 * a new runtime. This is of little use now, since we
861 * don't have a bandwidth reclaiming mechanism. Anyway,
862 * bandwidth reclaiming is planned for the future, and
863 * yield_task_dl will indicate that some spare budget
864 * is available for other task instances to use it.
866 static void yield_task_dl(struct rq
*rq
)
868 struct task_struct
*p
= rq
->curr
;
871 * We make the task go to sleep until its current deadline by
872 * forcing its runtime to zero. This way, update_curr_dl() stops
873 * it and the bandwidth timer will wake it up and will give it
874 * new scheduling parameters (thanks to dl_new=1).
876 if (p
->dl
.runtime
> 0) {
877 rq
->curr
->dl
.dl_new
= 1;
885 static int find_later_rq(struct task_struct
*task
);
888 select_task_rq_dl(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
890 struct task_struct
*curr
;
893 if (sd_flag
!= SD_BALANCE_WAKE
&& sd_flag
!= SD_BALANCE_FORK
)
899 curr
= ACCESS_ONCE(rq
->curr
); /* unlocked access */
902 * If we are dealing with a -deadline task, we must
903 * decide where to wake it up.
904 * If it has a later deadline and the current task
905 * on this rq can't move (provided the waking task
906 * can!) we prefer to send it somewhere else. On the
907 * other hand, if it has a shorter deadline, we
908 * try to make it stay here, it might be important.
910 if (unlikely(dl_task(curr
)) &&
911 (curr
->nr_cpus_allowed
< 2 ||
912 !dl_entity_preempt(&p
->dl
, &curr
->dl
)) &&
913 (p
->nr_cpus_allowed
> 1)) {
914 int target
= find_later_rq(p
);
925 static void check_preempt_equal_dl(struct rq
*rq
, struct task_struct
*p
)
928 * Current can't be migrated, useless to reschedule,
929 * let's hope p can move out.
931 if (rq
->curr
->nr_cpus_allowed
== 1 ||
932 cpudl_find(&rq
->rd
->cpudl
, rq
->curr
, NULL
) == -1)
936 * p is migratable, so let's not schedule it and
937 * see if it is pushed or pulled somewhere else.
939 if (p
->nr_cpus_allowed
!= 1 &&
940 cpudl_find(&rq
->rd
->cpudl
, p
, NULL
) != -1)
943 resched_task(rq
->curr
);
946 #endif /* CONFIG_SMP */
949 * Only called when both the current and waking task are -deadline
952 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
955 if (dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
)) {
956 resched_task(rq
->curr
);
962 * In the unlikely case current and p have the same deadline
963 * let us try to decide what's the best thing to do...
965 if ((p
->dl
.deadline
== rq
->curr
->dl
.deadline
) &&
966 !test_tsk_need_resched(rq
->curr
))
967 check_preempt_equal_dl(rq
, p
);
968 #endif /* CONFIG_SMP */
971 #ifdef CONFIG_SCHED_HRTICK
972 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
974 s64 delta
= p
->dl
.dl_runtime
- p
->dl
.runtime
;
977 hrtick_start(rq
, p
->dl
.runtime
);
981 static struct sched_dl_entity
*pick_next_dl_entity(struct rq
*rq
,
984 struct rb_node
*left
= dl_rq
->rb_leftmost
;
989 return rb_entry(left
, struct sched_dl_entity
, rb_node
);
992 struct task_struct
*pick_next_task_dl(struct rq
*rq
)
994 struct sched_dl_entity
*dl_se
;
995 struct task_struct
*p
;
1000 if (unlikely(!dl_rq
->dl_nr_running
))
1003 dl_se
= pick_next_dl_entity(rq
, dl_rq
);
1006 p
= dl_task_of(dl_se
);
1007 p
->se
.exec_start
= rq_clock_task(rq
);
1009 /* Running task will never be pushed. */
1010 dequeue_pushable_dl_task(rq
, p
);
1012 #ifdef CONFIG_SCHED_HRTICK
1013 if (hrtick_enabled(rq
))
1014 start_hrtick_dl(rq
, p
);
1018 rq
->post_schedule
= has_pushable_dl_tasks(rq
);
1019 #endif /* CONFIG_SMP */
1024 static void put_prev_task_dl(struct rq
*rq
, struct task_struct
*p
)
1028 if (on_dl_rq(&p
->dl
) && p
->nr_cpus_allowed
> 1)
1029 enqueue_pushable_dl_task(rq
, p
);
1032 static void task_tick_dl(struct rq
*rq
, struct task_struct
*p
, int queued
)
1036 #ifdef CONFIG_SCHED_HRTICK
1037 if (hrtick_enabled(rq
) && queued
&& p
->dl
.runtime
> 0)
1038 start_hrtick_dl(rq
, p
);
1042 static void task_fork_dl(struct task_struct
*p
)
1045 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1050 static void task_dead_dl(struct task_struct
*p
)
1052 struct hrtimer
*timer
= &p
->dl
.dl_timer
;
1053 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
1056 * Since we are TASK_DEAD we won't slip out of the domain!
1058 raw_spin_lock_irq(&dl_b
->lock
);
1059 dl_b
->total_bw
-= p
->dl
.dl_bw
;
1060 raw_spin_unlock_irq(&dl_b
->lock
);
1062 hrtimer_cancel(timer
);
1065 static void set_curr_task_dl(struct rq
*rq
)
1067 struct task_struct
*p
= rq
->curr
;
1069 p
->se
.exec_start
= rq_clock_task(rq
);
1071 /* You can't push away the running task */
1072 dequeue_pushable_dl_task(rq
, p
);
1077 /* Only try algorithms three times */
1078 #define DL_MAX_TRIES 3
1080 static int pick_dl_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1082 if (!task_running(rq
, p
) &&
1083 (cpu
< 0 || cpumask_test_cpu(cpu
, &p
->cpus_allowed
)) &&
1084 (p
->nr_cpus_allowed
> 1))
1090 /* Returns the second earliest -deadline task, NULL otherwise */
1091 static struct task_struct
*pick_next_earliest_dl_task(struct rq
*rq
, int cpu
)
1093 struct rb_node
*next_node
= rq
->dl
.rb_leftmost
;
1094 struct sched_dl_entity
*dl_se
;
1095 struct task_struct
*p
= NULL
;
1098 next_node
= rb_next(next_node
);
1100 dl_se
= rb_entry(next_node
, struct sched_dl_entity
, rb_node
);
1101 p
= dl_task_of(dl_se
);
1103 if (pick_dl_task(rq
, p
, cpu
))
1112 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask_dl
);
1114 static int find_later_rq(struct task_struct
*task
)
1116 struct sched_domain
*sd
;
1117 struct cpumask
*later_mask
= __get_cpu_var(local_cpu_mask_dl
);
1118 int this_cpu
= smp_processor_id();
1119 int best_cpu
, cpu
= task_cpu(task
);
1121 /* Make sure the mask is initialized first */
1122 if (unlikely(!later_mask
))
1125 if (task
->nr_cpus_allowed
== 1)
1128 best_cpu
= cpudl_find(&task_rq(task
)->rd
->cpudl
,
1134 * If we are here, some target has been found,
1135 * the most suitable of which is cached in best_cpu.
1136 * This is, among the runqueues where the current tasks
1137 * have later deadlines than the task's one, the rq
1138 * with the latest possible one.
1140 * Now we check how well this matches with task's
1141 * affinity and system topology.
1143 * The last cpu where the task run is our first
1144 * guess, since it is most likely cache-hot there.
1146 if (cpumask_test_cpu(cpu
, later_mask
))
1149 * Check if this_cpu is to be skipped (i.e., it is
1150 * not in the mask) or not.
1152 if (!cpumask_test_cpu(this_cpu
, later_mask
))
1156 for_each_domain(cpu
, sd
) {
1157 if (sd
->flags
& SD_WAKE_AFFINE
) {
1160 * If possible, preempting this_cpu is
1161 * cheaper than migrating.
1163 if (this_cpu
!= -1 &&
1164 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
1170 * Last chance: if best_cpu is valid and is
1171 * in the mask, that becomes our choice.
1173 if (best_cpu
< nr_cpu_ids
&&
1174 cpumask_test_cpu(best_cpu
, sched_domain_span(sd
))) {
1183 * At this point, all our guesses failed, we just return
1184 * 'something', and let the caller sort the things out.
1189 cpu
= cpumask_any(later_mask
);
1190 if (cpu
< nr_cpu_ids
)
1196 /* Locks the rq it finds */
1197 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
)
1199 struct rq
*later_rq
= NULL
;
1203 for (tries
= 0; tries
< DL_MAX_TRIES
; tries
++) {
1204 cpu
= find_later_rq(task
);
1206 if ((cpu
== -1) || (cpu
== rq
->cpu
))
1209 later_rq
= cpu_rq(cpu
);
1211 /* Retry if something changed. */
1212 if (double_lock_balance(rq
, later_rq
)) {
1213 if (unlikely(task_rq(task
) != rq
||
1214 !cpumask_test_cpu(later_rq
->cpu
,
1215 &task
->cpus_allowed
) ||
1216 task_running(rq
, task
) || !task
->on_rq
)) {
1217 double_unlock_balance(rq
, later_rq
);
1224 * If the rq we found has no -deadline task, or
1225 * its earliest one has a later deadline than our
1226 * task, the rq is a good one.
1228 if (!later_rq
->dl
.dl_nr_running
||
1229 dl_time_before(task
->dl
.deadline
,
1230 later_rq
->dl
.earliest_dl
.curr
))
1233 /* Otherwise we try again. */
1234 double_unlock_balance(rq
, later_rq
);
1241 static struct task_struct
*pick_next_pushable_dl_task(struct rq
*rq
)
1243 struct task_struct
*p
;
1245 if (!has_pushable_dl_tasks(rq
))
1248 p
= rb_entry(rq
->dl
.pushable_dl_tasks_leftmost
,
1249 struct task_struct
, pushable_dl_tasks
);
1251 BUG_ON(rq
->cpu
!= task_cpu(p
));
1252 BUG_ON(task_current(rq
, p
));
1253 BUG_ON(p
->nr_cpus_allowed
<= 1);
1256 BUG_ON(!dl_task(p
));
1262 * See if the non running -deadline tasks on this rq
1263 * can be sent to some other CPU where they can preempt
1264 * and start executing.
1266 static int push_dl_task(struct rq
*rq
)
1268 struct task_struct
*next_task
;
1269 struct rq
*later_rq
;
1271 if (!rq
->dl
.overloaded
)
1274 next_task
= pick_next_pushable_dl_task(rq
);
1279 if (unlikely(next_task
== rq
->curr
)) {
1285 * If next_task preempts rq->curr, and rq->curr
1286 * can move away, it makes sense to just reschedule
1287 * without going further in pushing next_task.
1289 if (dl_task(rq
->curr
) &&
1290 dl_time_before(next_task
->dl
.deadline
, rq
->curr
->dl
.deadline
) &&
1291 rq
->curr
->nr_cpus_allowed
> 1) {
1292 resched_task(rq
->curr
);
1296 /* We might release rq lock */
1297 get_task_struct(next_task
);
1299 /* Will lock the rq it'll find */
1300 later_rq
= find_lock_later_rq(next_task
, rq
);
1302 struct task_struct
*task
;
1305 * We must check all this again, since
1306 * find_lock_later_rq releases rq->lock and it is
1307 * then possible that next_task has migrated.
1309 task
= pick_next_pushable_dl_task(rq
);
1310 if (task_cpu(next_task
) == rq
->cpu
&& task
== next_task
) {
1312 * The task is still there. We don't try
1313 * again, some other cpu will pull it when ready.
1315 dequeue_pushable_dl_task(rq
, next_task
);
1323 put_task_struct(next_task
);
1328 deactivate_task(rq
, next_task
, 0);
1329 set_task_cpu(next_task
, later_rq
->cpu
);
1330 activate_task(later_rq
, next_task
, 0);
1332 resched_task(later_rq
->curr
);
1334 double_unlock_balance(rq
, later_rq
);
1337 put_task_struct(next_task
);
1342 static void push_dl_tasks(struct rq
*rq
)
1344 /* Terminates as it moves a -deadline task */
1345 while (push_dl_task(rq
))
1349 static int pull_dl_task(struct rq
*this_rq
)
1351 int this_cpu
= this_rq
->cpu
, ret
= 0, cpu
;
1352 struct task_struct
*p
;
1354 u64 dmin
= LONG_MAX
;
1356 if (likely(!dl_overloaded(this_rq
)))
1360 * Match the barrier from dl_set_overloaded; this guarantees that if we
1361 * see overloaded we must also see the dlo_mask bit.
1365 for_each_cpu(cpu
, this_rq
->rd
->dlo_mask
) {
1366 if (this_cpu
== cpu
)
1369 src_rq
= cpu_rq(cpu
);
1372 * It looks racy, abd it is! However, as in sched_rt.c,
1373 * we are fine with this.
1375 if (this_rq
->dl
.dl_nr_running
&&
1376 dl_time_before(this_rq
->dl
.earliest_dl
.curr
,
1377 src_rq
->dl
.earliest_dl
.next
))
1380 /* Might drop this_rq->lock */
1381 double_lock_balance(this_rq
, src_rq
);
1384 * If there are no more pullable tasks on the
1385 * rq, we're done with it.
1387 if (src_rq
->dl
.dl_nr_running
<= 1)
1390 p
= pick_next_earliest_dl_task(src_rq
, this_cpu
);
1393 * We found a task to be pulled if:
1394 * - it preempts our current (if there's one),
1395 * - it will preempt the last one we pulled (if any).
1397 if (p
&& dl_time_before(p
->dl
.deadline
, dmin
) &&
1398 (!this_rq
->dl
.dl_nr_running
||
1399 dl_time_before(p
->dl
.deadline
,
1400 this_rq
->dl
.earliest_dl
.curr
))) {
1401 WARN_ON(p
== src_rq
->curr
);
1405 * Then we pull iff p has actually an earlier
1406 * deadline than the current task of its runqueue.
1408 if (dl_time_before(p
->dl
.deadline
,
1409 src_rq
->curr
->dl
.deadline
))
1414 deactivate_task(src_rq
, p
, 0);
1415 set_task_cpu(p
, this_cpu
);
1416 activate_task(this_rq
, p
, 0);
1417 dmin
= p
->dl
.deadline
;
1419 /* Is there any other task even earlier? */
1422 double_unlock_balance(this_rq
, src_rq
);
1428 static void pre_schedule_dl(struct rq
*rq
, struct task_struct
*prev
)
1430 /* Try to pull other tasks here */
1435 static void post_schedule_dl(struct rq
*rq
)
1441 * Since the task is not running and a reschedule is not going to happen
1442 * anytime soon on its runqueue, we try pushing it away now.
1444 static void task_woken_dl(struct rq
*rq
, struct task_struct
*p
)
1446 if (!task_running(rq
, p
) &&
1447 !test_tsk_need_resched(rq
->curr
) &&
1448 has_pushable_dl_tasks(rq
) &&
1449 p
->nr_cpus_allowed
> 1 &&
1450 dl_task(rq
->curr
) &&
1451 (rq
->curr
->nr_cpus_allowed
< 2 ||
1452 dl_entity_preempt(&rq
->curr
->dl
, &p
->dl
))) {
1457 static void set_cpus_allowed_dl(struct task_struct
*p
,
1458 const struct cpumask
*new_mask
)
1463 BUG_ON(!dl_task(p
));
1466 * Update only if the task is actually running (i.e.,
1467 * it is on the rq AND it is not throttled).
1469 if (!on_dl_rq(&p
->dl
))
1472 weight
= cpumask_weight(new_mask
);
1475 * Only update if the process changes its state from whether it
1476 * can migrate or not.
1478 if ((p
->nr_cpus_allowed
> 1) == (weight
> 1))
1484 * The process used to be able to migrate OR it can now migrate
1487 if (!task_current(rq
, p
))
1488 dequeue_pushable_dl_task(rq
, p
);
1489 BUG_ON(!rq
->dl
.dl_nr_migratory
);
1490 rq
->dl
.dl_nr_migratory
--;
1492 if (!task_current(rq
, p
))
1493 enqueue_pushable_dl_task(rq
, p
);
1494 rq
->dl
.dl_nr_migratory
++;
1497 update_dl_migration(&rq
->dl
);
1500 /* Assumes rq->lock is held */
1501 static void rq_online_dl(struct rq
*rq
)
1503 if (rq
->dl
.overloaded
)
1504 dl_set_overload(rq
);
1506 if (rq
->dl
.dl_nr_running
> 0)
1507 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, rq
->dl
.earliest_dl
.curr
, 1);
1510 /* Assumes rq->lock is held */
1511 static void rq_offline_dl(struct rq
*rq
)
1513 if (rq
->dl
.overloaded
)
1514 dl_clear_overload(rq
);
1516 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, 0, 0);
1519 void init_sched_dl_class(void)
1523 for_each_possible_cpu(i
)
1524 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl
, i
),
1525 GFP_KERNEL
, cpu_to_node(i
));
1528 #endif /* CONFIG_SMP */
1530 static void switched_from_dl(struct rq
*rq
, struct task_struct
*p
)
1532 if (hrtimer_active(&p
->dl
.dl_timer
) && !dl_policy(p
->policy
))
1533 hrtimer_try_to_cancel(&p
->dl
.dl_timer
);
1537 * Since this might be the only -deadline task on the rq,
1538 * this is the right place to try to pull some other one
1539 * from an overloaded cpu, if any.
1541 if (!rq
->dl
.dl_nr_running
)
1547 * When switching to -deadline, we may overload the rq, then
1548 * we try to push someone off, if possible.
1550 static void switched_to_dl(struct rq
*rq
, struct task_struct
*p
)
1552 int check_resched
= 1;
1555 * If p is throttled, don't consider the possibility
1556 * of preempting rq->curr, the check will be done right
1557 * after its runtime will get replenished.
1559 if (unlikely(p
->dl
.dl_throttled
))
1562 if (p
->on_rq
|| rq
->curr
!= p
) {
1564 if (rq
->dl
.overloaded
&& push_dl_task(rq
) && rq
!= task_rq(p
))
1565 /* Only reschedule if pushing failed */
1567 #endif /* CONFIG_SMP */
1568 if (check_resched
&& task_has_dl_policy(rq
->curr
))
1569 check_preempt_curr_dl(rq
, p
, 0);
1574 * If the scheduling parameters of a -deadline task changed,
1575 * a push or pull operation might be needed.
1577 static void prio_changed_dl(struct rq
*rq
, struct task_struct
*p
,
1580 if (p
->on_rq
|| rq
->curr
== p
) {
1583 * This might be too much, but unfortunately
1584 * we don't have the old deadline value, and
1585 * we can't argue if the task is increasing
1586 * or lowering its prio, so...
1588 if (!rq
->dl
.overloaded
)
1592 * If we now have a earlier deadline task than p,
1593 * then reschedule, provided p is still on this
1596 if (dl_time_before(rq
->dl
.earliest_dl
.curr
, p
->dl
.deadline
) &&
1601 * Again, we don't know if p has a earlier
1602 * or later deadline, so let's blindly set a
1603 * (maybe not needed) rescheduling point.
1606 #endif /* CONFIG_SMP */
1608 switched_to_dl(rq
, p
);
1611 const struct sched_class dl_sched_class
= {
1612 .next
= &rt_sched_class
,
1613 .enqueue_task
= enqueue_task_dl
,
1614 .dequeue_task
= dequeue_task_dl
,
1615 .yield_task
= yield_task_dl
,
1617 .check_preempt_curr
= check_preempt_curr_dl
,
1619 .pick_next_task
= pick_next_task_dl
,
1620 .put_prev_task
= put_prev_task_dl
,
1623 .select_task_rq
= select_task_rq_dl
,
1624 .set_cpus_allowed
= set_cpus_allowed_dl
,
1625 .rq_online
= rq_online_dl
,
1626 .rq_offline
= rq_offline_dl
,
1627 .pre_schedule
= pre_schedule_dl
,
1628 .post_schedule
= post_schedule_dl
,
1629 .task_woken
= task_woken_dl
,
1632 .set_curr_task
= set_curr_task_dl
,
1633 .task_tick
= task_tick_dl
,
1634 .task_fork
= task_fork_dl
,
1635 .task_dead
= task_dead_dl
,
1637 .prio_changed
= prio_changed_dl
,
1638 .switched_from
= switched_from_dl
,
1639 .switched_to
= switched_to_dl
,