2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/slab.h>
20 #include <uapi/linux/sched/types.h>
22 struct dl_bandwidth def_dl_bandwidth
;
24 static inline struct task_struct
*dl_task_of(struct sched_dl_entity
*dl_se
)
26 return container_of(dl_se
, struct task_struct
, dl
);
29 static inline struct rq
*rq_of_dl_rq(struct dl_rq
*dl_rq
)
31 return container_of(dl_rq
, struct rq
, dl
);
34 static inline struct dl_rq
*dl_rq_of_se(struct sched_dl_entity
*dl_se
)
36 struct task_struct
*p
= dl_task_of(dl_se
);
37 struct rq
*rq
= task_rq(p
);
42 static inline int on_dl_rq(struct sched_dl_entity
*dl_se
)
44 return !RB_EMPTY_NODE(&dl_se
->rb_node
);
48 static inline struct dl_bw
*dl_bw_of(int i
)
50 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
51 "sched RCU must be held");
52 return &cpu_rq(i
)->rd
->dl_bw
;
55 static inline int dl_bw_cpus(int i
)
57 struct root_domain
*rd
= cpu_rq(i
)->rd
;
60 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
61 "sched RCU must be held");
62 for_each_cpu_and(i
, rd
->span
, cpu_active_mask
)
68 static inline struct dl_bw
*dl_bw_of(int i
)
70 return &cpu_rq(i
)->dl
.dl_bw
;
73 static inline int dl_bw_cpus(int i
)
80 void add_running_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
82 u64 old
= dl_rq
->running_bw
;
84 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
85 dl_rq
->running_bw
+= dl_bw
;
86 SCHED_WARN_ON(dl_rq
->running_bw
< old
); /* overflow */
87 SCHED_WARN_ON(dl_rq
->running_bw
> dl_rq
->this_bw
);
91 void sub_running_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
93 u64 old
= dl_rq
->running_bw
;
95 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
96 dl_rq
->running_bw
-= dl_bw
;
97 SCHED_WARN_ON(dl_rq
->running_bw
> old
); /* underflow */
98 if (dl_rq
->running_bw
> old
)
99 dl_rq
->running_bw
= 0;
103 void add_rq_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
105 u64 old
= dl_rq
->this_bw
;
107 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
108 dl_rq
->this_bw
+= dl_bw
;
109 SCHED_WARN_ON(dl_rq
->this_bw
< old
); /* overflow */
113 void sub_rq_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
115 u64 old
= dl_rq
->this_bw
;
117 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
118 dl_rq
->this_bw
-= dl_bw
;
119 SCHED_WARN_ON(dl_rq
->this_bw
> old
); /* underflow */
120 if (dl_rq
->this_bw
> old
)
122 SCHED_WARN_ON(dl_rq
->running_bw
> dl_rq
->this_bw
);
125 void dl_change_utilization(struct task_struct
*p
, u64 new_bw
)
129 if (task_on_rq_queued(p
))
133 if (p
->dl
.dl_non_contending
) {
134 sub_running_bw(p
->dl
.dl_bw
, &rq
->dl
);
135 p
->dl
.dl_non_contending
= 0;
137 * If the timer handler is currently running and the
138 * timer cannot be cancelled, inactive_task_timer()
139 * will see that dl_not_contending is not set, and
140 * will not touch the rq's active utilization,
141 * so we are still safe.
143 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
146 sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
147 add_rq_bw(new_bw
, &rq
->dl
);
151 * The utilization of a task cannot be immediately removed from
152 * the rq active utilization (running_bw) when the task blocks.
153 * Instead, we have to wait for the so called "0-lag time".
155 * If a task blocks before the "0-lag time", a timer (the inactive
156 * timer) is armed, and running_bw is decreased when the timer
159 * If the task wakes up again before the inactive timer fires,
160 * the timer is cancelled, whereas if the task wakes up after the
161 * inactive timer fired (and running_bw has been decreased) the
162 * task's utilization has to be added to running_bw again.
163 * A flag in the deadline scheduling entity (dl_non_contending)
164 * is used to avoid race conditions between the inactive timer handler
167 * The following diagram shows how running_bw is updated. A task is
168 * "ACTIVE" when its utilization contributes to running_bw; an
169 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
170 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
171 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
172 * time already passed, which does not contribute to running_bw anymore.
173 * +------------------+
175 * +------------------>+ contending |
176 * | add_running_bw | |
177 * | +----+------+------+
180 * +--------+-------+ | |
181 * | | t >= 0-lag | | wakeup
182 * | INACTIVE |<---------------+ |
183 * | | sub_running_bw | |
184 * +--------+-------+ | |
189 * | +----+------+------+
190 * | sub_running_bw | ACTIVE |
191 * +-------------------+ |
192 * inactive timer | non contending |
193 * fired +------------------+
195 * The task_non_contending() function is invoked when a task
196 * blocks, and checks if the 0-lag time already passed or
197 * not (in the first case, it directly updates running_bw;
198 * in the second case, it arms the inactive timer).
200 * The task_contending() function is invoked when a task wakes
201 * up, and checks if the task is still in the "ACTIVE non contending"
202 * state or not (in the second case, it updates running_bw).
204 static void task_non_contending(struct task_struct
*p
)
206 struct sched_dl_entity
*dl_se
= &p
->dl
;
207 struct hrtimer
*timer
= &dl_se
->inactive_timer
;
208 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
209 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
213 * If this is a non-deadline task that has been boosted,
216 if (dl_se
->dl_runtime
== 0)
219 WARN_ON(hrtimer_active(&dl_se
->inactive_timer
));
220 WARN_ON(dl_se
->dl_non_contending
);
222 zerolag_time
= dl_se
->deadline
-
223 div64_long((dl_se
->runtime
* dl_se
->dl_period
),
227 * Using relative times instead of the absolute "0-lag time"
228 * allows to simplify the code
230 zerolag_time
-= rq_clock(rq
);
233 * If the "0-lag time" already passed, decrease the active
234 * utilization now, instead of starting a timer
236 if (zerolag_time
< 0) {
238 sub_running_bw(dl_se
->dl_bw
, dl_rq
);
239 if (!dl_task(p
) || p
->state
== TASK_DEAD
) {
240 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
242 if (p
->state
== TASK_DEAD
)
243 sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
244 raw_spin_lock(&dl_b
->lock
);
245 __dl_clear(dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
246 __dl_clear_params(p
);
247 raw_spin_unlock(&dl_b
->lock
);
253 dl_se
->dl_non_contending
= 1;
255 hrtimer_start(timer
, ns_to_ktime(zerolag_time
), HRTIMER_MODE_REL
);
258 static void task_contending(struct sched_dl_entity
*dl_se
, int flags
)
260 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
263 * If this is a non-deadline task that has been boosted,
266 if (dl_se
->dl_runtime
== 0)
269 if (flags
& ENQUEUE_MIGRATED
)
270 add_rq_bw(dl_se
->dl_bw
, dl_rq
);
272 if (dl_se
->dl_non_contending
) {
273 dl_se
->dl_non_contending
= 0;
275 * If the timer handler is currently running and the
276 * timer cannot be cancelled, inactive_task_timer()
277 * will see that dl_not_contending is not set, and
278 * will not touch the rq's active utilization,
279 * so we are still safe.
281 if (hrtimer_try_to_cancel(&dl_se
->inactive_timer
) == 1)
282 put_task_struct(dl_task_of(dl_se
));
285 * Since "dl_non_contending" is not set, the
286 * task's utilization has already been removed from
287 * active utilization (either when the task blocked,
288 * when the "inactive timer" fired).
291 add_running_bw(dl_se
->dl_bw
, dl_rq
);
295 static inline int is_leftmost(struct task_struct
*p
, struct dl_rq
*dl_rq
)
297 struct sched_dl_entity
*dl_se
= &p
->dl
;
299 return dl_rq
->root
.rb_leftmost
== &dl_se
->rb_node
;
302 void init_dl_bandwidth(struct dl_bandwidth
*dl_b
, u64 period
, u64 runtime
)
304 raw_spin_lock_init(&dl_b
->dl_runtime_lock
);
305 dl_b
->dl_period
= period
;
306 dl_b
->dl_runtime
= runtime
;
309 void init_dl_bw(struct dl_bw
*dl_b
)
311 raw_spin_lock_init(&dl_b
->lock
);
312 raw_spin_lock(&def_dl_bandwidth
.dl_runtime_lock
);
313 if (global_rt_runtime() == RUNTIME_INF
)
316 dl_b
->bw
= to_ratio(global_rt_period(), global_rt_runtime());
317 raw_spin_unlock(&def_dl_bandwidth
.dl_runtime_lock
);
321 void init_dl_rq(struct dl_rq
*dl_rq
)
323 dl_rq
->root
= RB_ROOT_CACHED
;
326 /* zero means no -deadline tasks */
327 dl_rq
->earliest_dl
.curr
= dl_rq
->earliest_dl
.next
= 0;
329 dl_rq
->dl_nr_migratory
= 0;
330 dl_rq
->overloaded
= 0;
331 dl_rq
->pushable_dl_tasks_root
= RB_ROOT_CACHED
;
333 init_dl_bw(&dl_rq
->dl_bw
);
336 dl_rq
->running_bw
= 0;
338 init_dl_rq_bw_ratio(dl_rq
);
343 static inline int dl_overloaded(struct rq
*rq
)
345 return atomic_read(&rq
->rd
->dlo_count
);
348 static inline void dl_set_overload(struct rq
*rq
)
353 cpumask_set_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
355 * Must be visible before the overload count is
356 * set (as in sched_rt.c).
358 * Matched by the barrier in pull_dl_task().
361 atomic_inc(&rq
->rd
->dlo_count
);
364 static inline void dl_clear_overload(struct rq
*rq
)
369 atomic_dec(&rq
->rd
->dlo_count
);
370 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
373 static void update_dl_migration(struct dl_rq
*dl_rq
)
375 if (dl_rq
->dl_nr_migratory
&& dl_rq
->dl_nr_running
> 1) {
376 if (!dl_rq
->overloaded
) {
377 dl_set_overload(rq_of_dl_rq(dl_rq
));
378 dl_rq
->overloaded
= 1;
380 } else if (dl_rq
->overloaded
) {
381 dl_clear_overload(rq_of_dl_rq(dl_rq
));
382 dl_rq
->overloaded
= 0;
386 static void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
388 struct task_struct
*p
= dl_task_of(dl_se
);
390 if (p
->nr_cpus_allowed
> 1)
391 dl_rq
->dl_nr_migratory
++;
393 update_dl_migration(dl_rq
);
396 static void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
398 struct task_struct
*p
= dl_task_of(dl_se
);
400 if (p
->nr_cpus_allowed
> 1)
401 dl_rq
->dl_nr_migratory
--;
403 update_dl_migration(dl_rq
);
407 * The list of pushable -deadline task is not a plist, like in
408 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
410 static void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
412 struct dl_rq
*dl_rq
= &rq
->dl
;
413 struct rb_node
**link
= &dl_rq
->pushable_dl_tasks_root
.rb_root
.rb_node
;
414 struct rb_node
*parent
= NULL
;
415 struct task_struct
*entry
;
416 bool leftmost
= true;
418 BUG_ON(!RB_EMPTY_NODE(&p
->pushable_dl_tasks
));
422 entry
= rb_entry(parent
, struct task_struct
,
424 if (dl_entity_preempt(&p
->dl
, &entry
->dl
))
425 link
= &parent
->rb_left
;
427 link
= &parent
->rb_right
;
433 dl_rq
->earliest_dl
.next
= p
->dl
.deadline
;
435 rb_link_node(&p
->pushable_dl_tasks
, parent
, link
);
436 rb_insert_color_cached(&p
->pushable_dl_tasks
,
437 &dl_rq
->pushable_dl_tasks_root
, leftmost
);
440 static void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
442 struct dl_rq
*dl_rq
= &rq
->dl
;
444 if (RB_EMPTY_NODE(&p
->pushable_dl_tasks
))
447 if (dl_rq
->pushable_dl_tasks_root
.rb_leftmost
== &p
->pushable_dl_tasks
) {
448 struct rb_node
*next_node
;
450 next_node
= rb_next(&p
->pushable_dl_tasks
);
452 dl_rq
->earliest_dl
.next
= rb_entry(next_node
,
453 struct task_struct
, pushable_dl_tasks
)->dl
.deadline
;
457 rb_erase_cached(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
458 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
461 static inline int has_pushable_dl_tasks(struct rq
*rq
)
463 return !RB_EMPTY_ROOT(&rq
->dl
.pushable_dl_tasks_root
.rb_root
);
466 static int push_dl_task(struct rq
*rq
);
468 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
470 return dl_task(prev
);
473 static DEFINE_PER_CPU(struct callback_head
, dl_push_head
);
474 static DEFINE_PER_CPU(struct callback_head
, dl_pull_head
);
476 static void push_dl_tasks(struct rq
*);
477 static void pull_dl_task(struct rq
*);
479 static inline void queue_push_tasks(struct rq
*rq
)
481 if (!has_pushable_dl_tasks(rq
))
484 queue_balance_callback(rq
, &per_cpu(dl_push_head
, rq
->cpu
), push_dl_tasks
);
487 static inline void queue_pull_task(struct rq
*rq
)
489 queue_balance_callback(rq
, &per_cpu(dl_pull_head
, rq
->cpu
), pull_dl_task
);
492 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
);
494 static struct rq
*dl_task_offline_migration(struct rq
*rq
, struct task_struct
*p
)
496 struct rq
*later_rq
= NULL
;
498 later_rq
= find_lock_later_rq(p
, rq
);
503 * If we cannot preempt any rq, fall back to pick any
506 cpu
= cpumask_any_and(cpu_active_mask
, &p
->cpus_allowed
);
507 if (cpu
>= nr_cpu_ids
) {
509 * Fail to find any suitable cpu.
510 * The task will never come back!
512 BUG_ON(dl_bandwidth_enabled());
515 * If admission control is disabled we
516 * try a little harder to let the task
519 cpu
= cpumask_any(cpu_active_mask
);
521 later_rq
= cpu_rq(cpu
);
522 double_lock_balance(rq
, later_rq
);
525 set_task_cpu(p
, later_rq
->cpu
);
526 double_unlock_balance(later_rq
, rq
);
534 void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
539 void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
544 void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
549 void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
553 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
558 static inline void pull_dl_task(struct rq
*rq
)
562 static inline void queue_push_tasks(struct rq
*rq
)
566 static inline void queue_pull_task(struct rq
*rq
)
569 #endif /* CONFIG_SMP */
571 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
572 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
573 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
577 * We are being explicitly informed that a new instance is starting,
578 * and this means that:
579 * - the absolute deadline of the entity has to be placed at
580 * current time + relative deadline;
581 * - the runtime of the entity has to be set to the maximum value.
583 * The capability of specifying such event is useful whenever a -deadline
584 * entity wants to (try to!) synchronize its behaviour with the scheduler's
585 * one, and to (try to!) reconcile itself with its own scheduling
588 static inline void setup_new_dl_entity(struct sched_dl_entity
*dl_se
)
590 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
591 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
593 WARN_ON(dl_se
->dl_boosted
);
594 WARN_ON(dl_time_before(rq_clock(rq
), dl_se
->deadline
));
597 * We are racing with the deadline timer. So, do nothing because
598 * the deadline timer handler will take care of properly recharging
599 * the runtime and postponing the deadline
601 if (dl_se
->dl_throttled
)
605 * We use the regular wall clock time to set deadlines in the
606 * future; in fact, we must consider execution overheads (time
607 * spent on hardirq context, etc.).
609 dl_se
->deadline
= rq_clock(rq
) + dl_se
->dl_deadline
;
610 dl_se
->runtime
= dl_se
->dl_runtime
;
614 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
615 * possibility of a entity lasting more than what it declared, and thus
616 * exhausting its runtime.
618 * Here we are interested in making runtime overrun possible, but we do
619 * not want a entity which is misbehaving to affect the scheduling of all
621 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
622 * is used, in order to confine each entity within its own bandwidth.
624 * This function deals exactly with that, and ensures that when the runtime
625 * of a entity is replenished, its deadline is also postponed. That ensures
626 * the overrunning entity can't interfere with other entity in the system and
627 * can't make them miss their deadlines. Reasons why this kind of overruns
628 * could happen are, typically, a entity voluntarily trying to overcome its
629 * runtime, or it just underestimated it during sched_setattr().
631 static void replenish_dl_entity(struct sched_dl_entity
*dl_se
,
632 struct sched_dl_entity
*pi_se
)
634 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
635 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
637 BUG_ON(pi_se
->dl_runtime
<= 0);
640 * This could be the case for a !-dl task that is boosted.
641 * Just go with full inherited parameters.
643 if (dl_se
->dl_deadline
== 0) {
644 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
645 dl_se
->runtime
= pi_se
->dl_runtime
;
648 if (dl_se
->dl_yielded
&& dl_se
->runtime
> 0)
652 * We keep moving the deadline away until we get some
653 * available runtime for the entity. This ensures correct
654 * handling of situations where the runtime overrun is
657 while (dl_se
->runtime
<= 0) {
658 dl_se
->deadline
+= pi_se
->dl_period
;
659 dl_se
->runtime
+= pi_se
->dl_runtime
;
663 * At this point, the deadline really should be "in
664 * the future" with respect to rq->clock. If it's
665 * not, we are, for some reason, lagging too much!
666 * Anyway, after having warn userspace abut that,
667 * we still try to keep the things running by
668 * resetting the deadline and the budget of the
671 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
))) {
672 printk_deferred_once("sched: DL replenish lagged too much\n");
673 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
674 dl_se
->runtime
= pi_se
->dl_runtime
;
677 if (dl_se
->dl_yielded
)
678 dl_se
->dl_yielded
= 0;
679 if (dl_se
->dl_throttled
)
680 dl_se
->dl_throttled
= 0;
684 * Here we check if --at time t-- an entity (which is probably being
685 * [re]activated or, in general, enqueued) can use its remaining runtime
686 * and its current deadline _without_ exceeding the bandwidth it is
687 * assigned (function returns true if it can't). We are in fact applying
688 * one of the CBS rules: when a task wakes up, if the residual runtime
689 * over residual deadline fits within the allocated bandwidth, then we
690 * can keep the current (absolute) deadline and residual budget without
691 * disrupting the schedulability of the system. Otherwise, we should
692 * refill the runtime and set the deadline a period in the future,
693 * because keeping the current (absolute) deadline of the task would
694 * result in breaking guarantees promised to other tasks (refer to
695 * Documentation/scheduler/sched-deadline.txt for more informations).
697 * This function returns true if:
699 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
701 * IOW we can't recycle current parameters.
703 * Notice that the bandwidth check is done against the deadline. For
704 * task with deadline equal to period this is the same of using
705 * dl_period instead of dl_deadline in the equation above.
707 static bool dl_entity_overflow(struct sched_dl_entity
*dl_se
,
708 struct sched_dl_entity
*pi_se
, u64 t
)
713 * left and right are the two sides of the equation above,
714 * after a bit of shuffling to use multiplications instead
717 * Note that none of the time values involved in the two
718 * multiplications are absolute: dl_deadline and dl_runtime
719 * are the relative deadline and the maximum runtime of each
720 * instance, runtime is the runtime left for the last instance
721 * and (deadline - t), since t is rq->clock, is the time left
722 * to the (absolute) deadline. Even if overflowing the u64 type
723 * is very unlikely to occur in both cases, here we scale down
724 * as we want to avoid that risk at all. Scaling down by 10
725 * means that we reduce granularity to 1us. We are fine with it,
726 * since this is only a true/false check and, anyway, thinking
727 * of anything below microseconds resolution is actually fiction
728 * (but still we want to give the user that illusion >;).
730 left
= (pi_se
->dl_deadline
>> DL_SCALE
) * (dl_se
->runtime
>> DL_SCALE
);
731 right
= ((dl_se
->deadline
- t
) >> DL_SCALE
) *
732 (pi_se
->dl_runtime
>> DL_SCALE
);
734 return dl_time_before(right
, left
);
738 * Revised wakeup rule [1]: For self-suspending tasks, rather then
739 * re-initializing task's runtime and deadline, the revised wakeup
740 * rule adjusts the task's runtime to avoid the task to overrun its
743 * Reasoning: a task may overrun the density if:
744 * runtime / (deadline - t) > dl_runtime / dl_deadline
746 * Therefore, runtime can be adjusted to:
747 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
749 * In such way that runtime will be equal to the maximum density
750 * the task can use without breaking any rule.
752 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
753 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
756 update_dl_revised_wakeup(struct sched_dl_entity
*dl_se
, struct rq
*rq
)
758 u64 laxity
= dl_se
->deadline
- rq_clock(rq
);
761 * If the task has deadline < period, and the deadline is in the past,
762 * it should already be throttled before this check.
764 * See update_dl_entity() comments for further details.
766 WARN_ON(dl_time_before(dl_se
->deadline
, rq_clock(rq
)));
768 dl_se
->runtime
= (dl_se
->dl_density
* laxity
) >> BW_SHIFT
;
772 * Regarding the deadline, a task with implicit deadline has a relative
773 * deadline == relative period. A task with constrained deadline has a
774 * relative deadline <= relative period.
776 * We support constrained deadline tasks. However, there are some restrictions
777 * applied only for tasks which do not have an implicit deadline. See
778 * update_dl_entity() to know more about such restrictions.
780 * The dl_is_implicit() returns true if the task has an implicit deadline.
782 static inline bool dl_is_implicit(struct sched_dl_entity
*dl_se
)
784 return dl_se
->dl_deadline
== dl_se
->dl_period
;
788 * When a deadline entity is placed in the runqueue, its runtime and deadline
789 * might need to be updated. This is done by a CBS wake up rule. There are two
790 * different rules: 1) the original CBS; and 2) the Revisited CBS.
792 * When the task is starting a new period, the Original CBS is used. In this
793 * case, the runtime is replenished and a new absolute deadline is set.
795 * When a task is queued before the begin of the next period, using the
796 * remaining runtime and deadline could make the entity to overflow, see
797 * dl_entity_overflow() to find more about runtime overflow. When such case
798 * is detected, the runtime and deadline need to be updated.
800 * If the task has an implicit deadline, i.e., deadline == period, the Original
801 * CBS is applied. the runtime is replenished and a new absolute deadline is
802 * set, as in the previous cases.
804 * However, the Original CBS does not work properly for tasks with
805 * deadline < period, which are said to have a constrained deadline. By
806 * applying the Original CBS, a constrained deadline task would be able to run
807 * runtime/deadline in a period. With deadline < period, the task would
808 * overrun the runtime/period allowed bandwidth, breaking the admission test.
810 * In order to prevent this misbehave, the Revisited CBS is used for
811 * constrained deadline tasks when a runtime overflow is detected. In the
812 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
813 * the remaining runtime of the task is reduced to avoid runtime overflow.
814 * Please refer to the comments update_dl_revised_wakeup() function to find
815 * more about the Revised CBS rule.
817 static void update_dl_entity(struct sched_dl_entity
*dl_se
,
818 struct sched_dl_entity
*pi_se
)
820 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
821 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
823 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) ||
824 dl_entity_overflow(dl_se
, pi_se
, rq_clock(rq
))) {
826 if (unlikely(!dl_is_implicit(dl_se
) &&
827 !dl_time_before(dl_se
->deadline
, rq_clock(rq
)) &&
828 !dl_se
->dl_boosted
)){
829 update_dl_revised_wakeup(dl_se
, rq
);
833 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
834 dl_se
->runtime
= pi_se
->dl_runtime
;
838 static inline u64
dl_next_period(struct sched_dl_entity
*dl_se
)
840 return dl_se
->deadline
- dl_se
->dl_deadline
+ dl_se
->dl_period
;
844 * If the entity depleted all its runtime, and if we want it to sleep
845 * while waiting for some new execution time to become available, we
846 * set the bandwidth replenishment timer to the replenishment instant
847 * and try to activate it.
849 * Notice that it is important for the caller to know if the timer
850 * actually started or not (i.e., the replenishment instant is in
851 * the future or in the past).
853 static int start_dl_timer(struct task_struct
*p
)
855 struct sched_dl_entity
*dl_se
= &p
->dl
;
856 struct hrtimer
*timer
= &dl_se
->dl_timer
;
857 struct rq
*rq
= task_rq(p
);
861 lockdep_assert_held(&rq
->lock
);
864 * We want the timer to fire at the deadline, but considering
865 * that it is actually coming from rq->clock and not from
866 * hrtimer's time base reading.
868 act
= ns_to_ktime(dl_next_period(dl_se
));
869 now
= hrtimer_cb_get_time(timer
);
870 delta
= ktime_to_ns(now
) - rq_clock(rq
);
871 act
= ktime_add_ns(act
, delta
);
874 * If the expiry time already passed, e.g., because the value
875 * chosen as the deadline is too small, don't even try to
876 * start the timer in the past!
878 if (ktime_us_delta(act
, now
) < 0)
882 * !enqueued will guarantee another callback; even if one is already in
883 * progress. This ensures a balanced {get,put}_task_struct().
885 * The race against __run_timer() clearing the enqueued state is
886 * harmless because we're holding task_rq()->lock, therefore the timer
887 * expiring after we've done the check will wait on its task_rq_lock()
888 * and observe our state.
890 if (!hrtimer_is_queued(timer
)) {
892 hrtimer_start(timer
, act
, HRTIMER_MODE_ABS
);
899 * This is the bandwidth enforcement timer callback. If here, we know
900 * a task is not on its dl_rq, since the fact that the timer was running
901 * means the task is throttled and needs a runtime replenishment.
903 * However, what we actually do depends on the fact the task is active,
904 * (it is on its rq) or has been removed from there by a call to
905 * dequeue_task_dl(). In the former case we must issue the runtime
906 * replenishment and add the task back to the dl_rq; in the latter, we just
907 * do nothing but clearing dl_throttled, so that runtime and deadline
908 * updating (and the queueing back to dl_rq) will be done by the
909 * next call to enqueue_task_dl().
911 static enum hrtimer_restart
dl_task_timer(struct hrtimer
*timer
)
913 struct sched_dl_entity
*dl_se
= container_of(timer
,
914 struct sched_dl_entity
,
916 struct task_struct
*p
= dl_task_of(dl_se
);
920 rq
= task_rq_lock(p
, &rf
);
923 * The task might have changed its scheduling policy to something
924 * different than SCHED_DEADLINE (through switched_from_dl()).
930 * The task might have been boosted by someone else and might be in the
931 * boosting/deboosting path, its not throttled.
933 if (dl_se
->dl_boosted
)
937 * Spurious timer due to start_dl_timer() race; or we already received
938 * a replenishment from rt_mutex_setprio().
940 if (!dl_se
->dl_throttled
)
947 * If the throttle happened during sched-out; like:
954 * __dequeue_task_dl()
957 * We can be both throttled and !queued. Replenish the counter
958 * but do not enqueue -- wait for our wakeup to do that.
960 if (!task_on_rq_queued(p
)) {
961 replenish_dl_entity(dl_se
, dl_se
);
966 if (unlikely(!rq
->online
)) {
968 * If the runqueue is no longer available, migrate the
969 * task elsewhere. This necessarily changes rq.
971 lockdep_unpin_lock(&rq
->lock
, rf
.cookie
);
972 rq
= dl_task_offline_migration(rq
, p
);
973 rf
.cookie
= lockdep_pin_lock(&rq
->lock
);
977 * Now that the task has been migrated to the new RQ and we
978 * have that locked, proceed as normal and enqueue the task
984 enqueue_task_dl(rq
, p
, ENQUEUE_REPLENISH
);
985 if (dl_task(rq
->curr
))
986 check_preempt_curr_dl(rq
, p
, 0);
992 * Queueing this task back might have overloaded rq, check if we need
993 * to kick someone away.
995 if (has_pushable_dl_tasks(rq
)) {
997 * Nothing relies on rq->lock after this, so its safe to drop
1000 rq_unpin_lock(rq
, &rf
);
1002 rq_repin_lock(rq
, &rf
);
1007 task_rq_unlock(rq
, p
, &rf
);
1010 * This can free the task_struct, including this hrtimer, do not touch
1011 * anything related to that after this.
1015 return HRTIMER_NORESTART
;
1018 void init_dl_task_timer(struct sched_dl_entity
*dl_se
)
1020 struct hrtimer
*timer
= &dl_se
->dl_timer
;
1022 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1023 timer
->function
= dl_task_timer
;
1027 * During the activation, CBS checks if it can reuse the current task's
1028 * runtime and period. If the deadline of the task is in the past, CBS
1029 * cannot use the runtime, and so it replenishes the task. This rule
1030 * works fine for implicit deadline tasks (deadline == period), and the
1031 * CBS was designed for implicit deadline tasks. However, a task with
1032 * constrained deadline (deadine < period) might be awakened after the
1033 * deadline, but before the next period. In this case, replenishing the
1034 * task would allow it to run for runtime / deadline. As in this case
1035 * deadline < period, CBS enables a task to run for more than the
1036 * runtime / period. In a very loaded system, this can cause a domino
1037 * effect, making other tasks miss their deadlines.
1039 * To avoid this problem, in the activation of a constrained deadline
1040 * task after the deadline but before the next period, throttle the
1041 * task and set the replenishing timer to the begin of the next period,
1042 * unless it is boosted.
1044 static inline void dl_check_constrained_dl(struct sched_dl_entity
*dl_se
)
1046 struct task_struct
*p
= dl_task_of(dl_se
);
1047 struct rq
*rq
= rq_of_dl_rq(dl_rq_of_se(dl_se
));
1049 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) &&
1050 dl_time_before(rq_clock(rq
), dl_next_period(dl_se
))) {
1051 if (unlikely(dl_se
->dl_boosted
|| !start_dl_timer(p
)))
1053 dl_se
->dl_throttled
= 1;
1054 if (dl_se
->runtime
> 0)
1060 int dl_runtime_exceeded(struct sched_dl_entity
*dl_se
)
1062 return (dl_se
->runtime
<= 0);
1065 extern bool sched_rt_bandwidth_account(struct rt_rq
*rt_rq
);
1068 * This function implements the GRUB accounting rule:
1069 * according to the GRUB reclaiming algorithm, the runtime is
1070 * not decreased as "dq = -dt", but as
1071 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1072 * where u is the utilization of the task, Umax is the maximum reclaimable
1073 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1074 * as the difference between the "total runqueue utilization" and the
1075 * runqueue active utilization, and Uextra is the (per runqueue) extra
1076 * reclaimable utilization.
1077 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1078 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1080 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1081 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1082 * Since delta is a 64 bit variable, to have an overflow its value
1083 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1084 * So, overflow is not an issue here.
1086 u64
grub_reclaim(u64 delta
, struct rq
*rq
, struct sched_dl_entity
*dl_se
)
1088 u64 u_inact
= rq
->dl
.this_bw
- rq
->dl
.running_bw
; /* Utot - Uact */
1090 u64 u_act_min
= (dl_se
->dl_bw
* rq
->dl
.bw_ratio
) >> RATIO_SHIFT
;
1093 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1094 * we compare u_inact + rq->dl.extra_bw with
1095 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1096 * u_inact + rq->dl.extra_bw can be larger than
1097 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1098 * leading to wrong results)
1100 if (u_inact
+ rq
->dl
.extra_bw
> BW_UNIT
- u_act_min
)
1103 u_act
= BW_UNIT
- u_inact
- rq
->dl
.extra_bw
;
1105 return (delta
* u_act
) >> BW_SHIFT
;
1109 * Update the current task's runtime statistics (provided it is still
1110 * a -deadline task and has not been removed from the dl_rq).
1112 static void update_curr_dl(struct rq
*rq
)
1114 struct task_struct
*curr
= rq
->curr
;
1115 struct sched_dl_entity
*dl_se
= &curr
->dl
;
1118 if (!dl_task(curr
) || !on_dl_rq(dl_se
))
1122 * Consumed budget is computed considering the time as
1123 * observed by schedulable tasks (excluding time spent
1124 * in hardirq context, etc.). Deadlines are instead
1125 * computed using hard walltime. This seems to be the more
1126 * natural solution, but the full ramifications of this
1127 * approach need further study.
1129 delta_exec
= rq_clock_task(rq
) - curr
->se
.exec_start
;
1130 if (unlikely((s64
)delta_exec
<= 0)) {
1131 if (unlikely(dl_se
->dl_yielded
))
1136 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
1137 cpufreq_update_util(rq
, SCHED_CPUFREQ_DL
);
1139 schedstat_set(curr
->se
.statistics
.exec_max
,
1140 max(curr
->se
.statistics
.exec_max
, delta_exec
));
1142 curr
->se
.sum_exec_runtime
+= delta_exec
;
1143 account_group_exec_runtime(curr
, delta_exec
);
1145 curr
->se
.exec_start
= rq_clock_task(rq
);
1146 cpuacct_charge(curr
, delta_exec
);
1148 sched_rt_avg_update(rq
, delta_exec
);
1150 if (unlikely(dl_se
->flags
& SCHED_FLAG_RECLAIM
))
1151 delta_exec
= grub_reclaim(delta_exec
, rq
, &curr
->dl
);
1152 dl_se
->runtime
-= delta_exec
;
1155 if (dl_runtime_exceeded(dl_se
) || dl_se
->dl_yielded
) {
1156 dl_se
->dl_throttled
= 1;
1157 __dequeue_task_dl(rq
, curr
, 0);
1158 if (unlikely(dl_se
->dl_boosted
|| !start_dl_timer(curr
)))
1159 enqueue_task_dl(rq
, curr
, ENQUEUE_REPLENISH
);
1161 if (!is_leftmost(curr
, &rq
->dl
))
1166 * Because -- for now -- we share the rt bandwidth, we need to
1167 * account our runtime there too, otherwise actual rt tasks
1168 * would be able to exceed the shared quota.
1170 * Account to the root rt group for now.
1172 * The solution we're working towards is having the RT groups scheduled
1173 * using deadline servers -- however there's a few nasties to figure
1174 * out before that can happen.
1176 if (rt_bandwidth_enabled()) {
1177 struct rt_rq
*rt_rq
= &rq
->rt
;
1179 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
1181 * We'll let actual RT tasks worry about the overflow here, we
1182 * have our own CBS to keep us inline; only account when RT
1183 * bandwidth is relevant.
1185 if (sched_rt_bandwidth_account(rt_rq
))
1186 rt_rq
->rt_time
+= delta_exec
;
1187 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
1191 static enum hrtimer_restart
inactive_task_timer(struct hrtimer
*timer
)
1193 struct sched_dl_entity
*dl_se
= container_of(timer
,
1194 struct sched_dl_entity
,
1196 struct task_struct
*p
= dl_task_of(dl_se
);
1200 rq
= task_rq_lock(p
, &rf
);
1202 if (!dl_task(p
) || p
->state
== TASK_DEAD
) {
1203 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
1205 if (p
->state
== TASK_DEAD
&& dl_se
->dl_non_contending
) {
1206 sub_running_bw(p
->dl
.dl_bw
, dl_rq_of_se(&p
->dl
));
1207 sub_rq_bw(p
->dl
.dl_bw
, dl_rq_of_se(&p
->dl
));
1208 dl_se
->dl_non_contending
= 0;
1211 raw_spin_lock(&dl_b
->lock
);
1212 __dl_clear(dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
1213 raw_spin_unlock(&dl_b
->lock
);
1214 __dl_clear_params(p
);
1218 if (dl_se
->dl_non_contending
== 0)
1222 update_rq_clock(rq
);
1224 sub_running_bw(dl_se
->dl_bw
, &rq
->dl
);
1225 dl_se
->dl_non_contending
= 0;
1227 task_rq_unlock(rq
, p
, &rf
);
1230 return HRTIMER_NORESTART
;
1233 void init_dl_inactive_task_timer(struct sched_dl_entity
*dl_se
)
1235 struct hrtimer
*timer
= &dl_se
->inactive_timer
;
1237 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1238 timer
->function
= inactive_task_timer
;
1243 static void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
1245 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
1247 if (dl_rq
->earliest_dl
.curr
== 0 ||
1248 dl_time_before(deadline
, dl_rq
->earliest_dl
.curr
)) {
1249 dl_rq
->earliest_dl
.curr
= deadline
;
1250 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, deadline
);
1254 static void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
1256 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
1259 * Since we may have removed our earliest (and/or next earliest)
1260 * task we must recompute them.
1262 if (!dl_rq
->dl_nr_running
) {
1263 dl_rq
->earliest_dl
.curr
= 0;
1264 dl_rq
->earliest_dl
.next
= 0;
1265 cpudl_clear(&rq
->rd
->cpudl
, rq
->cpu
);
1267 struct rb_node
*leftmost
= dl_rq
->root
.rb_leftmost
;
1268 struct sched_dl_entity
*entry
;
1270 entry
= rb_entry(leftmost
, struct sched_dl_entity
, rb_node
);
1271 dl_rq
->earliest_dl
.curr
= entry
->deadline
;
1272 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, entry
->deadline
);
1278 static inline void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
1279 static inline void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
1281 #endif /* CONFIG_SMP */
1284 void inc_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
1286 int prio
= dl_task_of(dl_se
)->prio
;
1287 u64 deadline
= dl_se
->deadline
;
1289 WARN_ON(!dl_prio(prio
));
1290 dl_rq
->dl_nr_running
++;
1291 add_nr_running(rq_of_dl_rq(dl_rq
), 1);
1293 inc_dl_deadline(dl_rq
, deadline
);
1294 inc_dl_migration(dl_se
, dl_rq
);
1298 void dec_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
1300 int prio
= dl_task_of(dl_se
)->prio
;
1302 WARN_ON(!dl_prio(prio
));
1303 WARN_ON(!dl_rq
->dl_nr_running
);
1304 dl_rq
->dl_nr_running
--;
1305 sub_nr_running(rq_of_dl_rq(dl_rq
), 1);
1307 dec_dl_deadline(dl_rq
, dl_se
->deadline
);
1308 dec_dl_migration(dl_se
, dl_rq
);
1311 static void __enqueue_dl_entity(struct sched_dl_entity
*dl_se
)
1313 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
1314 struct rb_node
**link
= &dl_rq
->root
.rb_root
.rb_node
;
1315 struct rb_node
*parent
= NULL
;
1316 struct sched_dl_entity
*entry
;
1319 BUG_ON(!RB_EMPTY_NODE(&dl_se
->rb_node
));
1323 entry
= rb_entry(parent
, struct sched_dl_entity
, rb_node
);
1324 if (dl_time_before(dl_se
->deadline
, entry
->deadline
))
1325 link
= &parent
->rb_left
;
1327 link
= &parent
->rb_right
;
1332 rb_link_node(&dl_se
->rb_node
, parent
, link
);
1333 rb_insert_color_cached(&dl_se
->rb_node
, &dl_rq
->root
, leftmost
);
1335 inc_dl_tasks(dl_se
, dl_rq
);
1338 static void __dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
1340 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
1342 if (RB_EMPTY_NODE(&dl_se
->rb_node
))
1345 rb_erase_cached(&dl_se
->rb_node
, &dl_rq
->root
);
1346 RB_CLEAR_NODE(&dl_se
->rb_node
);
1348 dec_dl_tasks(dl_se
, dl_rq
);
1352 enqueue_dl_entity(struct sched_dl_entity
*dl_se
,
1353 struct sched_dl_entity
*pi_se
, int flags
)
1355 BUG_ON(on_dl_rq(dl_se
));
1358 * If this is a wakeup or a new instance, the scheduling
1359 * parameters of the task might need updating. Otherwise,
1360 * we want a replenishment of its runtime.
1362 if (flags
& ENQUEUE_WAKEUP
) {
1363 task_contending(dl_se
, flags
);
1364 update_dl_entity(dl_se
, pi_se
);
1365 } else if (flags
& ENQUEUE_REPLENISH
) {
1366 replenish_dl_entity(dl_se
, pi_se
);
1369 __enqueue_dl_entity(dl_se
);
1372 static void dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
1374 __dequeue_dl_entity(dl_se
);
1377 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1379 struct task_struct
*pi_task
= rt_mutex_get_top_task(p
);
1380 struct sched_dl_entity
*pi_se
= &p
->dl
;
1383 * Use the scheduling parameters of the top pi-waiter task if:
1384 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1385 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1386 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1387 * boosted due to a SCHED_DEADLINE pi-waiter).
1388 * Otherwise we keep our runtime and deadline.
1390 if (pi_task
&& dl_prio(pi_task
->normal_prio
) && p
->dl
.dl_boosted
) {
1391 pi_se
= &pi_task
->dl
;
1392 } else if (!dl_prio(p
->normal_prio
)) {
1394 * Special case in which we have a !SCHED_DEADLINE task
1395 * that is going to be deboosted, but exceeds its
1396 * runtime while doing so. No point in replenishing
1397 * it, as it's going to return back to its original
1398 * scheduling class after this.
1400 BUG_ON(!p
->dl
.dl_boosted
|| flags
!= ENQUEUE_REPLENISH
);
1405 * Check if a constrained deadline task was activated
1406 * after the deadline but before the next period.
1407 * If that is the case, the task will be throttled and
1408 * the replenishment timer will be set to the next period.
1410 if (!p
->dl
.dl_throttled
&& !dl_is_implicit(&p
->dl
))
1411 dl_check_constrained_dl(&p
->dl
);
1413 if (p
->on_rq
== TASK_ON_RQ_MIGRATING
|| flags
& ENQUEUE_RESTORE
) {
1414 add_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
1415 add_running_bw(p
->dl
.dl_bw
, &rq
->dl
);
1419 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1420 * its budget it needs a replenishment and, since it now is on
1421 * its rq, the bandwidth timer callback (which clearly has not
1422 * run yet) will take care of this.
1423 * However, the active utilization does not depend on the fact
1424 * that the task is on the runqueue or not (but depends on the
1425 * task's state - in GRUB parlance, "inactive" vs "active contending").
1426 * In other words, even if a task is throttled its utilization must
1427 * be counted in the active utilization; hence, we need to call
1430 if (p
->dl
.dl_throttled
&& !(flags
& ENQUEUE_REPLENISH
)) {
1431 if (flags
& ENQUEUE_WAKEUP
)
1432 task_contending(&p
->dl
, flags
);
1437 enqueue_dl_entity(&p
->dl
, pi_se
, flags
);
1439 if (!task_current(rq
, p
) && p
->nr_cpus_allowed
> 1)
1440 enqueue_pushable_dl_task(rq
, p
);
1443 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1445 dequeue_dl_entity(&p
->dl
);
1446 dequeue_pushable_dl_task(rq
, p
);
1449 static void dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1452 __dequeue_task_dl(rq
, p
, flags
);
1454 if (p
->on_rq
== TASK_ON_RQ_MIGRATING
|| flags
& DEQUEUE_SAVE
) {
1455 sub_running_bw(p
->dl
.dl_bw
, &rq
->dl
);
1456 sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
1460 * This check allows to start the inactive timer (or to immediately
1461 * decrease the active utilization, if needed) in two cases:
1462 * when the task blocks and when it is terminating
1463 * (p->state == TASK_DEAD). We can handle the two cases in the same
1464 * way, because from GRUB's point of view the same thing is happening
1465 * (the task moves from "active contending" to "active non contending"
1468 if (flags
& DEQUEUE_SLEEP
)
1469 task_non_contending(p
);
1473 * Yield task semantic for -deadline tasks is:
1475 * get off from the CPU until our next instance, with
1476 * a new runtime. This is of little use now, since we
1477 * don't have a bandwidth reclaiming mechanism. Anyway,
1478 * bandwidth reclaiming is planned for the future, and
1479 * yield_task_dl will indicate that some spare budget
1480 * is available for other task instances to use it.
1482 static void yield_task_dl(struct rq
*rq
)
1485 * We make the task go to sleep until its current deadline by
1486 * forcing its runtime to zero. This way, update_curr_dl() stops
1487 * it and the bandwidth timer will wake it up and will give it
1488 * new scheduling parameters (thanks to dl_yielded=1).
1490 rq
->curr
->dl
.dl_yielded
= 1;
1492 update_rq_clock(rq
);
1495 * Tell update_rq_clock() that we've just updated,
1496 * so we don't do microscopic update in schedule()
1497 * and double the fastpath cost.
1499 rq_clock_skip_update(rq
, true);
1504 static int find_later_rq(struct task_struct
*task
);
1507 select_task_rq_dl(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
1509 struct task_struct
*curr
;
1512 if (sd_flag
!= SD_BALANCE_WAKE
)
1518 curr
= READ_ONCE(rq
->curr
); /* unlocked access */
1521 * If we are dealing with a -deadline task, we must
1522 * decide where to wake it up.
1523 * If it has a later deadline and the current task
1524 * on this rq can't move (provided the waking task
1525 * can!) we prefer to send it somewhere else. On the
1526 * other hand, if it has a shorter deadline, we
1527 * try to make it stay here, it might be important.
1529 if (unlikely(dl_task(curr
)) &&
1530 (curr
->nr_cpus_allowed
< 2 ||
1531 !dl_entity_preempt(&p
->dl
, &curr
->dl
)) &&
1532 (p
->nr_cpus_allowed
> 1)) {
1533 int target
= find_later_rq(p
);
1536 (dl_time_before(p
->dl
.deadline
,
1537 cpu_rq(target
)->dl
.earliest_dl
.curr
) ||
1538 (cpu_rq(target
)->dl
.dl_nr_running
== 0)))
1547 static void migrate_task_rq_dl(struct task_struct
*p
)
1551 if (p
->state
!= TASK_WAKING
)
1556 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1557 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1558 * rq->lock is not... So, lock it
1560 raw_spin_lock(&rq
->lock
);
1561 if (p
->dl
.dl_non_contending
) {
1562 sub_running_bw(p
->dl
.dl_bw
, &rq
->dl
);
1563 p
->dl
.dl_non_contending
= 0;
1565 * If the timer handler is currently running and the
1566 * timer cannot be cancelled, inactive_task_timer()
1567 * will see that dl_not_contending is not set, and
1568 * will not touch the rq's active utilization,
1569 * so we are still safe.
1571 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
1574 sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
1575 raw_spin_unlock(&rq
->lock
);
1578 static void check_preempt_equal_dl(struct rq
*rq
, struct task_struct
*p
)
1581 * Current can't be migrated, useless to reschedule,
1582 * let's hope p can move out.
1584 if (rq
->curr
->nr_cpus_allowed
== 1 ||
1585 !cpudl_find(&rq
->rd
->cpudl
, rq
->curr
, NULL
))
1589 * p is migratable, so let's not schedule it and
1590 * see if it is pushed or pulled somewhere else.
1592 if (p
->nr_cpus_allowed
!= 1 &&
1593 cpudl_find(&rq
->rd
->cpudl
, p
, NULL
))
1599 #endif /* CONFIG_SMP */
1602 * Only called when both the current and waking task are -deadline
1605 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
1608 if (dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
)) {
1615 * In the unlikely case current and p have the same deadline
1616 * let us try to decide what's the best thing to do...
1618 if ((p
->dl
.deadline
== rq
->curr
->dl
.deadline
) &&
1619 !test_tsk_need_resched(rq
->curr
))
1620 check_preempt_equal_dl(rq
, p
);
1621 #endif /* CONFIG_SMP */
1624 #ifdef CONFIG_SCHED_HRTICK
1625 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1627 hrtick_start(rq
, p
->dl
.runtime
);
1629 #else /* !CONFIG_SCHED_HRTICK */
1630 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1635 static struct sched_dl_entity
*pick_next_dl_entity(struct rq
*rq
,
1636 struct dl_rq
*dl_rq
)
1638 struct rb_node
*left
= rb_first_cached(&dl_rq
->root
);
1643 return rb_entry(left
, struct sched_dl_entity
, rb_node
);
1646 static struct task_struct
*
1647 pick_next_task_dl(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
1649 struct sched_dl_entity
*dl_se
;
1650 struct task_struct
*p
;
1651 struct dl_rq
*dl_rq
;
1655 if (need_pull_dl_task(rq
, prev
)) {
1657 * This is OK, because current is on_cpu, which avoids it being
1658 * picked for load-balance and preemption/IRQs are still
1659 * disabled avoiding further scheduler activity on it and we're
1660 * being very careful to re-start the picking loop.
1662 rq_unpin_lock(rq
, rf
);
1664 rq_repin_lock(rq
, rf
);
1666 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1667 * means a stop task can slip in, in which case we need to
1668 * re-start task selection.
1670 if (rq
->stop
&& task_on_rq_queued(rq
->stop
))
1675 * When prev is DL, we may throttle it in put_prev_task().
1676 * So, we update time before we check for dl_nr_running.
1678 if (prev
->sched_class
== &dl_sched_class
)
1681 if (unlikely(!dl_rq
->dl_nr_running
))
1684 put_prev_task(rq
, prev
);
1686 dl_se
= pick_next_dl_entity(rq
, dl_rq
);
1689 p
= dl_task_of(dl_se
);
1690 p
->se
.exec_start
= rq_clock_task(rq
);
1692 /* Running task will never be pushed. */
1693 dequeue_pushable_dl_task(rq
, p
);
1695 if (hrtick_enabled(rq
))
1696 start_hrtick_dl(rq
, p
);
1698 queue_push_tasks(rq
);
1703 static void put_prev_task_dl(struct rq
*rq
, struct task_struct
*p
)
1707 if (on_dl_rq(&p
->dl
) && p
->nr_cpus_allowed
> 1)
1708 enqueue_pushable_dl_task(rq
, p
);
1711 static void task_tick_dl(struct rq
*rq
, struct task_struct
*p
, int queued
)
1716 * Even when we have runtime, update_curr_dl() might have resulted in us
1717 * not being the leftmost task anymore. In that case NEED_RESCHED will
1718 * be set and schedule() will start a new hrtick for the next task.
1720 if (hrtick_enabled(rq
) && queued
&& p
->dl
.runtime
> 0 &&
1721 is_leftmost(p
, &rq
->dl
))
1722 start_hrtick_dl(rq
, p
);
1725 static void task_fork_dl(struct task_struct
*p
)
1728 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1733 static void set_curr_task_dl(struct rq
*rq
)
1735 struct task_struct
*p
= rq
->curr
;
1737 p
->se
.exec_start
= rq_clock_task(rq
);
1739 /* You can't push away the running task */
1740 dequeue_pushable_dl_task(rq
, p
);
1745 /* Only try algorithms three times */
1746 #define DL_MAX_TRIES 3
1748 static int pick_dl_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1750 if (!task_running(rq
, p
) &&
1751 cpumask_test_cpu(cpu
, &p
->cpus_allowed
))
1757 * Return the earliest pushable rq's task, which is suitable to be executed
1758 * on the CPU, NULL otherwise:
1760 static struct task_struct
*pick_earliest_pushable_dl_task(struct rq
*rq
, int cpu
)
1762 struct rb_node
*next_node
= rq
->dl
.pushable_dl_tasks_root
.rb_leftmost
;
1763 struct task_struct
*p
= NULL
;
1765 if (!has_pushable_dl_tasks(rq
))
1770 p
= rb_entry(next_node
, struct task_struct
, pushable_dl_tasks
);
1772 if (pick_dl_task(rq
, p
, cpu
))
1775 next_node
= rb_next(next_node
);
1782 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask_dl
);
1784 static int find_later_rq(struct task_struct
*task
)
1786 struct sched_domain
*sd
;
1787 struct cpumask
*later_mask
= this_cpu_cpumask_var_ptr(local_cpu_mask_dl
);
1788 int this_cpu
= smp_processor_id();
1789 int cpu
= task_cpu(task
);
1791 /* Make sure the mask is initialized first */
1792 if (unlikely(!later_mask
))
1795 if (task
->nr_cpus_allowed
== 1)
1799 * We have to consider system topology and task affinity
1800 * first, then we can look for a suitable cpu.
1802 if (!cpudl_find(&task_rq(task
)->rd
->cpudl
, task
, later_mask
))
1806 * If we are here, some targets have been found, including
1807 * the most suitable which is, among the runqueues where the
1808 * current tasks have later deadlines than the task's one, the
1809 * rq with the latest possible one.
1811 * Now we check how well this matches with task's
1812 * affinity and system topology.
1814 * The last cpu where the task run is our first
1815 * guess, since it is most likely cache-hot there.
1817 if (cpumask_test_cpu(cpu
, later_mask
))
1820 * Check if this_cpu is to be skipped (i.e., it is
1821 * not in the mask) or not.
1823 if (!cpumask_test_cpu(this_cpu
, later_mask
))
1827 for_each_domain(cpu
, sd
) {
1828 if (sd
->flags
& SD_WAKE_AFFINE
) {
1832 * If possible, preempting this_cpu is
1833 * cheaper than migrating.
1835 if (this_cpu
!= -1 &&
1836 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
1841 best_cpu
= cpumask_first_and(later_mask
,
1842 sched_domain_span(sd
));
1844 * Last chance: if a cpu being in both later_mask
1845 * and current sd span is valid, that becomes our
1846 * choice. Of course, the latest possible cpu is
1847 * already under consideration through later_mask.
1849 if (best_cpu
< nr_cpu_ids
) {
1858 * At this point, all our guesses failed, we just return
1859 * 'something', and let the caller sort the things out.
1864 cpu
= cpumask_any(later_mask
);
1865 if (cpu
< nr_cpu_ids
)
1871 /* Locks the rq it finds */
1872 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
)
1874 struct rq
*later_rq
= NULL
;
1878 for (tries
= 0; tries
< DL_MAX_TRIES
; tries
++) {
1879 cpu
= find_later_rq(task
);
1881 if ((cpu
== -1) || (cpu
== rq
->cpu
))
1884 later_rq
= cpu_rq(cpu
);
1886 if (later_rq
->dl
.dl_nr_running
&&
1887 !dl_time_before(task
->dl
.deadline
,
1888 later_rq
->dl
.earliest_dl
.curr
)) {
1890 * Target rq has tasks of equal or earlier deadline,
1891 * retrying does not release any lock and is unlikely
1892 * to yield a different result.
1898 /* Retry if something changed. */
1899 if (double_lock_balance(rq
, later_rq
)) {
1900 if (unlikely(task_rq(task
) != rq
||
1901 !cpumask_test_cpu(later_rq
->cpu
, &task
->cpus_allowed
) ||
1902 task_running(rq
, task
) ||
1904 !task_on_rq_queued(task
))) {
1905 double_unlock_balance(rq
, later_rq
);
1912 * If the rq we found has no -deadline task, or
1913 * its earliest one has a later deadline than our
1914 * task, the rq is a good one.
1916 if (!later_rq
->dl
.dl_nr_running
||
1917 dl_time_before(task
->dl
.deadline
,
1918 later_rq
->dl
.earliest_dl
.curr
))
1921 /* Otherwise we try again. */
1922 double_unlock_balance(rq
, later_rq
);
1929 static struct task_struct
*pick_next_pushable_dl_task(struct rq
*rq
)
1931 struct task_struct
*p
;
1933 if (!has_pushable_dl_tasks(rq
))
1936 p
= rb_entry(rq
->dl
.pushable_dl_tasks_root
.rb_leftmost
,
1937 struct task_struct
, pushable_dl_tasks
);
1939 BUG_ON(rq
->cpu
!= task_cpu(p
));
1940 BUG_ON(task_current(rq
, p
));
1941 BUG_ON(p
->nr_cpus_allowed
<= 1);
1943 BUG_ON(!task_on_rq_queued(p
));
1944 BUG_ON(!dl_task(p
));
1950 * See if the non running -deadline tasks on this rq
1951 * can be sent to some other CPU where they can preempt
1952 * and start executing.
1954 static int push_dl_task(struct rq
*rq
)
1956 struct task_struct
*next_task
;
1957 struct rq
*later_rq
;
1960 if (!rq
->dl
.overloaded
)
1963 next_task
= pick_next_pushable_dl_task(rq
);
1968 if (unlikely(next_task
== rq
->curr
)) {
1974 * If next_task preempts rq->curr, and rq->curr
1975 * can move away, it makes sense to just reschedule
1976 * without going further in pushing next_task.
1978 if (dl_task(rq
->curr
) &&
1979 dl_time_before(next_task
->dl
.deadline
, rq
->curr
->dl
.deadline
) &&
1980 rq
->curr
->nr_cpus_allowed
> 1) {
1985 /* We might release rq lock */
1986 get_task_struct(next_task
);
1988 /* Will lock the rq it'll find */
1989 later_rq
= find_lock_later_rq(next_task
, rq
);
1991 struct task_struct
*task
;
1994 * We must check all this again, since
1995 * find_lock_later_rq releases rq->lock and it is
1996 * then possible that next_task has migrated.
1998 task
= pick_next_pushable_dl_task(rq
);
1999 if (task
== next_task
) {
2001 * The task is still there. We don't try
2002 * again, some other cpu will pull it when ready.
2011 put_task_struct(next_task
);
2016 deactivate_task(rq
, next_task
, 0);
2017 sub_running_bw(next_task
->dl
.dl_bw
, &rq
->dl
);
2018 sub_rq_bw(next_task
->dl
.dl_bw
, &rq
->dl
);
2019 set_task_cpu(next_task
, later_rq
->cpu
);
2020 add_rq_bw(next_task
->dl
.dl_bw
, &later_rq
->dl
);
2021 add_running_bw(next_task
->dl
.dl_bw
, &later_rq
->dl
);
2022 activate_task(later_rq
, next_task
, 0);
2025 resched_curr(later_rq
);
2027 double_unlock_balance(rq
, later_rq
);
2030 put_task_struct(next_task
);
2035 static void push_dl_tasks(struct rq
*rq
)
2037 /* push_dl_task() will return true if it moved a -deadline task */
2038 while (push_dl_task(rq
))
2042 static void pull_dl_task(struct rq
*this_rq
)
2044 int this_cpu
= this_rq
->cpu
, cpu
;
2045 struct task_struct
*p
;
2046 bool resched
= false;
2048 u64 dmin
= LONG_MAX
;
2050 if (likely(!dl_overloaded(this_rq
)))
2054 * Match the barrier from dl_set_overloaded; this guarantees that if we
2055 * see overloaded we must also see the dlo_mask bit.
2059 for_each_cpu(cpu
, this_rq
->rd
->dlo_mask
) {
2060 if (this_cpu
== cpu
)
2063 src_rq
= cpu_rq(cpu
);
2066 * It looks racy, abd it is! However, as in sched_rt.c,
2067 * we are fine with this.
2069 if (this_rq
->dl
.dl_nr_running
&&
2070 dl_time_before(this_rq
->dl
.earliest_dl
.curr
,
2071 src_rq
->dl
.earliest_dl
.next
))
2074 /* Might drop this_rq->lock */
2075 double_lock_balance(this_rq
, src_rq
);
2078 * If there are no more pullable tasks on the
2079 * rq, we're done with it.
2081 if (src_rq
->dl
.dl_nr_running
<= 1)
2084 p
= pick_earliest_pushable_dl_task(src_rq
, this_cpu
);
2087 * We found a task to be pulled if:
2088 * - it preempts our current (if there's one),
2089 * - it will preempt the last one we pulled (if any).
2091 if (p
&& dl_time_before(p
->dl
.deadline
, dmin
) &&
2092 (!this_rq
->dl
.dl_nr_running
||
2093 dl_time_before(p
->dl
.deadline
,
2094 this_rq
->dl
.earliest_dl
.curr
))) {
2095 WARN_ON(p
== src_rq
->curr
);
2096 WARN_ON(!task_on_rq_queued(p
));
2099 * Then we pull iff p has actually an earlier
2100 * deadline than the current task of its runqueue.
2102 if (dl_time_before(p
->dl
.deadline
,
2103 src_rq
->curr
->dl
.deadline
))
2108 deactivate_task(src_rq
, p
, 0);
2109 sub_running_bw(p
->dl
.dl_bw
, &src_rq
->dl
);
2110 sub_rq_bw(p
->dl
.dl_bw
, &src_rq
->dl
);
2111 set_task_cpu(p
, this_cpu
);
2112 add_rq_bw(p
->dl
.dl_bw
, &this_rq
->dl
);
2113 add_running_bw(p
->dl
.dl_bw
, &this_rq
->dl
);
2114 activate_task(this_rq
, p
, 0);
2115 dmin
= p
->dl
.deadline
;
2117 /* Is there any other task even earlier? */
2120 double_unlock_balance(this_rq
, src_rq
);
2124 resched_curr(this_rq
);
2128 * Since the task is not running and a reschedule is not going to happen
2129 * anytime soon on its runqueue, we try pushing it away now.
2131 static void task_woken_dl(struct rq
*rq
, struct task_struct
*p
)
2133 if (!task_running(rq
, p
) &&
2134 !test_tsk_need_resched(rq
->curr
) &&
2135 p
->nr_cpus_allowed
> 1 &&
2136 dl_task(rq
->curr
) &&
2137 (rq
->curr
->nr_cpus_allowed
< 2 ||
2138 !dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
))) {
2143 static void set_cpus_allowed_dl(struct task_struct
*p
,
2144 const struct cpumask
*new_mask
)
2146 struct root_domain
*src_rd
;
2149 BUG_ON(!dl_task(p
));
2154 * Migrating a SCHED_DEADLINE task between exclusive
2155 * cpusets (different root_domains) entails a bandwidth
2156 * update. We already made space for us in the destination
2157 * domain (see cpuset_can_attach()).
2159 if (!cpumask_intersects(src_rd
->span
, new_mask
)) {
2160 struct dl_bw
*src_dl_b
;
2162 src_dl_b
= dl_bw_of(cpu_of(rq
));
2164 * We now free resources of the root_domain we are migrating
2165 * off. In the worst case, sched_setattr() may temporary fail
2166 * until we complete the update.
2168 raw_spin_lock(&src_dl_b
->lock
);
2169 __dl_clear(src_dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
2170 raw_spin_unlock(&src_dl_b
->lock
);
2173 set_cpus_allowed_common(p
, new_mask
);
2176 /* Assumes rq->lock is held */
2177 static void rq_online_dl(struct rq
*rq
)
2179 if (rq
->dl
.overloaded
)
2180 dl_set_overload(rq
);
2182 cpudl_set_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
2183 if (rq
->dl
.dl_nr_running
> 0)
2184 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, rq
->dl
.earliest_dl
.curr
);
2187 /* Assumes rq->lock is held */
2188 static void rq_offline_dl(struct rq
*rq
)
2190 if (rq
->dl
.overloaded
)
2191 dl_clear_overload(rq
);
2193 cpudl_clear(&rq
->rd
->cpudl
, rq
->cpu
);
2194 cpudl_clear_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
2197 void __init
init_sched_dl_class(void)
2201 for_each_possible_cpu(i
)
2202 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl
, i
),
2203 GFP_KERNEL
, cpu_to_node(i
));
2206 #endif /* CONFIG_SMP */
2208 static void switched_from_dl(struct rq
*rq
, struct task_struct
*p
)
2211 * task_non_contending() can start the "inactive timer" (if the 0-lag
2212 * time is in the future). If the task switches back to dl before
2213 * the "inactive timer" fires, it can continue to consume its current
2214 * runtime using its current deadline. If it stays outside of
2215 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2216 * will reset the task parameters.
2218 if (task_on_rq_queued(p
) && p
->dl
.dl_runtime
)
2219 task_non_contending(p
);
2221 if (!task_on_rq_queued(p
))
2222 sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
2225 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2226 * at the 0-lag time, because the task could have been migrated
2227 * while SCHED_OTHER in the meanwhile.
2229 if (p
->dl
.dl_non_contending
)
2230 p
->dl
.dl_non_contending
= 0;
2233 * Since this might be the only -deadline task on the rq,
2234 * this is the right place to try to pull some other one
2235 * from an overloaded cpu, if any.
2237 if (!task_on_rq_queued(p
) || rq
->dl
.dl_nr_running
)
2240 queue_pull_task(rq
);
2244 * When switching to -deadline, we may overload the rq, then
2245 * we try to push someone off, if possible.
2247 static void switched_to_dl(struct rq
*rq
, struct task_struct
*p
)
2249 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
2252 /* If p is not queued we will update its parameters at next wakeup. */
2253 if (!task_on_rq_queued(p
)) {
2254 add_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
2259 * If p is boosted we already updated its params in
2260 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
2261 * p's deadline being now already after rq_clock(rq).
2263 if (dl_time_before(p
->dl
.deadline
, rq_clock(rq
)))
2264 setup_new_dl_entity(&p
->dl
);
2266 if (rq
->curr
!= p
) {
2268 if (p
->nr_cpus_allowed
> 1 && rq
->dl
.overloaded
)
2269 queue_push_tasks(rq
);
2271 if (dl_task(rq
->curr
))
2272 check_preempt_curr_dl(rq
, p
, 0);
2279 * If the scheduling parameters of a -deadline task changed,
2280 * a push or pull operation might be needed.
2282 static void prio_changed_dl(struct rq
*rq
, struct task_struct
*p
,
2285 if (task_on_rq_queued(p
) || rq
->curr
== p
) {
2288 * This might be too much, but unfortunately
2289 * we don't have the old deadline value, and
2290 * we can't argue if the task is increasing
2291 * or lowering its prio, so...
2293 if (!rq
->dl
.overloaded
)
2294 queue_pull_task(rq
);
2297 * If we now have a earlier deadline task than p,
2298 * then reschedule, provided p is still on this
2301 if (dl_time_before(rq
->dl
.earliest_dl
.curr
, p
->dl
.deadline
))
2305 * Again, we don't know if p has a earlier
2306 * or later deadline, so let's blindly set a
2307 * (maybe not needed) rescheduling point.
2310 #endif /* CONFIG_SMP */
2314 const struct sched_class dl_sched_class
= {
2315 .next
= &rt_sched_class
,
2316 .enqueue_task
= enqueue_task_dl
,
2317 .dequeue_task
= dequeue_task_dl
,
2318 .yield_task
= yield_task_dl
,
2320 .check_preempt_curr
= check_preempt_curr_dl
,
2322 .pick_next_task
= pick_next_task_dl
,
2323 .put_prev_task
= put_prev_task_dl
,
2326 .select_task_rq
= select_task_rq_dl
,
2327 .migrate_task_rq
= migrate_task_rq_dl
,
2328 .set_cpus_allowed
= set_cpus_allowed_dl
,
2329 .rq_online
= rq_online_dl
,
2330 .rq_offline
= rq_offline_dl
,
2331 .task_woken
= task_woken_dl
,
2334 .set_curr_task
= set_curr_task_dl
,
2335 .task_tick
= task_tick_dl
,
2336 .task_fork
= task_fork_dl
,
2338 .prio_changed
= prio_changed_dl
,
2339 .switched_from
= switched_from_dl
,
2340 .switched_to
= switched_to_dl
,
2342 .update_curr
= update_curr_dl
,
2345 int sched_dl_global_validate(void)
2347 u64 runtime
= global_rt_runtime();
2348 u64 period
= global_rt_period();
2349 u64 new_bw
= to_ratio(period
, runtime
);
2352 unsigned long flags
;
2355 * Here we want to check the bandwidth not being set to some
2356 * value smaller than the currently allocated bandwidth in
2357 * any of the root_domains.
2359 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2360 * cycling on root_domains... Discussion on different/better
2361 * solutions is welcome!
2363 for_each_possible_cpu(cpu
) {
2364 rcu_read_lock_sched();
2365 dl_b
= dl_bw_of(cpu
);
2367 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2368 if (new_bw
< dl_b
->total_bw
)
2370 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2372 rcu_read_unlock_sched();
2381 void init_dl_rq_bw_ratio(struct dl_rq
*dl_rq
)
2383 if (global_rt_runtime() == RUNTIME_INF
) {
2384 dl_rq
->bw_ratio
= 1 << RATIO_SHIFT
;
2385 dl_rq
->extra_bw
= 1 << BW_SHIFT
;
2387 dl_rq
->bw_ratio
= to_ratio(global_rt_runtime(),
2388 global_rt_period()) >> (BW_SHIFT
- RATIO_SHIFT
);
2389 dl_rq
->extra_bw
= to_ratio(global_rt_period(),
2390 global_rt_runtime());
2394 void sched_dl_do_global(void)
2399 unsigned long flags
;
2401 def_dl_bandwidth
.dl_period
= global_rt_period();
2402 def_dl_bandwidth
.dl_runtime
= global_rt_runtime();
2404 if (global_rt_runtime() != RUNTIME_INF
)
2405 new_bw
= to_ratio(global_rt_period(), global_rt_runtime());
2408 * FIXME: As above...
2410 for_each_possible_cpu(cpu
) {
2411 rcu_read_lock_sched();
2412 dl_b
= dl_bw_of(cpu
);
2414 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2416 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2418 rcu_read_unlock_sched();
2419 init_dl_rq_bw_ratio(&cpu_rq(cpu
)->dl
);
2424 * We must be sure that accepting a new task (or allowing changing the
2425 * parameters of an existing one) is consistent with the bandwidth
2426 * constraints. If yes, this function also accordingly updates the currently
2427 * allocated bandwidth to reflect the new situation.
2429 * This function is called while holding p's rq->lock.
2431 int sched_dl_overflow(struct task_struct
*p
, int policy
,
2432 const struct sched_attr
*attr
)
2434 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
2435 u64 period
= attr
->sched_period
?: attr
->sched_deadline
;
2436 u64 runtime
= attr
->sched_runtime
;
2437 u64 new_bw
= dl_policy(policy
) ? to_ratio(period
, runtime
) : 0;
2440 /* !deadline task may carry old deadline bandwidth */
2441 if (new_bw
== p
->dl
.dl_bw
&& task_has_dl_policy(p
))
2445 * Either if a task, enters, leave, or stays -deadline but changes
2446 * its parameters, we may need to update accordingly the total
2447 * allocated bandwidth of the container.
2449 raw_spin_lock(&dl_b
->lock
);
2450 cpus
= dl_bw_cpus(task_cpu(p
));
2451 if (dl_policy(policy
) && !task_has_dl_policy(p
) &&
2452 !__dl_overflow(dl_b
, cpus
, 0, new_bw
)) {
2453 if (hrtimer_active(&p
->dl
.inactive_timer
))
2454 __dl_clear(dl_b
, p
->dl
.dl_bw
, cpus
);
2455 __dl_add(dl_b
, new_bw
, cpus
);
2457 } else if (dl_policy(policy
) && task_has_dl_policy(p
) &&
2458 !__dl_overflow(dl_b
, cpus
, p
->dl
.dl_bw
, new_bw
)) {
2460 * XXX this is slightly incorrect: when the task
2461 * utilization decreases, we should delay the total
2462 * utilization change until the task's 0-lag point.
2463 * But this would require to set the task's "inactive
2464 * timer" when the task is not inactive.
2466 __dl_clear(dl_b
, p
->dl
.dl_bw
, cpus
);
2467 __dl_add(dl_b
, new_bw
, cpus
);
2468 dl_change_utilization(p
, new_bw
);
2470 } else if (!dl_policy(policy
) && task_has_dl_policy(p
)) {
2472 * Do not decrease the total deadline utilization here,
2473 * switched_from_dl() will take care to do it at the correct
2478 raw_spin_unlock(&dl_b
->lock
);
2484 * This function initializes the sched_dl_entity of a newly becoming
2485 * SCHED_DEADLINE task.
2487 * Only the static values are considered here, the actual runtime and the
2488 * absolute deadline will be properly calculated when the task is enqueued
2489 * for the first time with its new policy.
2491 void __setparam_dl(struct task_struct
*p
, const struct sched_attr
*attr
)
2493 struct sched_dl_entity
*dl_se
= &p
->dl
;
2495 dl_se
->dl_runtime
= attr
->sched_runtime
;
2496 dl_se
->dl_deadline
= attr
->sched_deadline
;
2497 dl_se
->dl_period
= attr
->sched_period
?: dl_se
->dl_deadline
;
2498 dl_se
->flags
= attr
->sched_flags
;
2499 dl_se
->dl_bw
= to_ratio(dl_se
->dl_period
, dl_se
->dl_runtime
);
2500 dl_se
->dl_density
= to_ratio(dl_se
->dl_deadline
, dl_se
->dl_runtime
);
2503 void __getparam_dl(struct task_struct
*p
, struct sched_attr
*attr
)
2505 struct sched_dl_entity
*dl_se
= &p
->dl
;
2507 attr
->sched_priority
= p
->rt_priority
;
2508 attr
->sched_runtime
= dl_se
->dl_runtime
;
2509 attr
->sched_deadline
= dl_se
->dl_deadline
;
2510 attr
->sched_period
= dl_se
->dl_period
;
2511 attr
->sched_flags
= dl_se
->flags
;
2515 * This function validates the new parameters of a -deadline task.
2516 * We ask for the deadline not being zero, and greater or equal
2517 * than the runtime, as well as the period of being zero or
2518 * greater than deadline. Furthermore, we have to be sure that
2519 * user parameters are above the internal resolution of 1us (we
2520 * check sched_runtime only since it is always the smaller one) and
2521 * below 2^63 ns (we have to check both sched_deadline and
2522 * sched_period, as the latter can be zero).
2524 bool __checkparam_dl(const struct sched_attr
*attr
)
2527 if (attr
->sched_deadline
== 0)
2531 * Since we truncate DL_SCALE bits, make sure we're at least
2534 if (attr
->sched_runtime
< (1ULL << DL_SCALE
))
2538 * Since we use the MSB for wrap-around and sign issues, make
2539 * sure it's not set (mind that period can be equal to zero).
2541 if (attr
->sched_deadline
& (1ULL << 63) ||
2542 attr
->sched_period
& (1ULL << 63))
2545 /* runtime <= deadline <= period (if period != 0) */
2546 if ((attr
->sched_period
!= 0 &&
2547 attr
->sched_period
< attr
->sched_deadline
) ||
2548 attr
->sched_deadline
< attr
->sched_runtime
)
2555 * This function clears the sched_dl_entity static params.
2557 void __dl_clear_params(struct task_struct
*p
)
2559 struct sched_dl_entity
*dl_se
= &p
->dl
;
2561 dl_se
->dl_runtime
= 0;
2562 dl_se
->dl_deadline
= 0;
2563 dl_se
->dl_period
= 0;
2566 dl_se
->dl_density
= 0;
2568 dl_se
->dl_throttled
= 0;
2569 dl_se
->dl_yielded
= 0;
2570 dl_se
->dl_non_contending
= 0;
2573 bool dl_param_changed(struct task_struct
*p
, const struct sched_attr
*attr
)
2575 struct sched_dl_entity
*dl_se
= &p
->dl
;
2577 if (dl_se
->dl_runtime
!= attr
->sched_runtime
||
2578 dl_se
->dl_deadline
!= attr
->sched_deadline
||
2579 dl_se
->dl_period
!= attr
->sched_period
||
2580 dl_se
->flags
!= attr
->sched_flags
)
2587 int dl_task_can_attach(struct task_struct
*p
, const struct cpumask
*cs_cpus_allowed
)
2589 unsigned int dest_cpu
= cpumask_any_and(cpu_active_mask
,
2594 unsigned long flags
;
2596 rcu_read_lock_sched();
2597 dl_b
= dl_bw_of(dest_cpu
);
2598 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2599 cpus
= dl_bw_cpus(dest_cpu
);
2600 overflow
= __dl_overflow(dl_b
, cpus
, 0, p
->dl
.dl_bw
);
2605 * We reserve space for this task in the destination
2606 * root_domain, as we can't fail after this point.
2607 * We will free resources in the source root_domain
2608 * later on (see set_cpus_allowed_dl()).
2610 __dl_add(dl_b
, p
->dl
.dl_bw
, cpus
);
2613 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2614 rcu_read_unlock_sched();
2618 int dl_cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
2619 const struct cpumask
*trial
)
2621 int ret
= 1, trial_cpus
;
2622 struct dl_bw
*cur_dl_b
;
2623 unsigned long flags
;
2625 rcu_read_lock_sched();
2626 cur_dl_b
= dl_bw_of(cpumask_any(cur
));
2627 trial_cpus
= cpumask_weight(trial
);
2629 raw_spin_lock_irqsave(&cur_dl_b
->lock
, flags
);
2630 if (cur_dl_b
->bw
!= -1 &&
2631 cur_dl_b
->bw
* trial_cpus
< cur_dl_b
->total_bw
)
2633 raw_spin_unlock_irqrestore(&cur_dl_b
->lock
, flags
);
2634 rcu_read_unlock_sched();
2638 bool dl_cpu_busy(unsigned int cpu
)
2640 unsigned long flags
;
2645 rcu_read_lock_sched();
2646 dl_b
= dl_bw_of(cpu
);
2647 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2648 cpus
= dl_bw_cpus(cpu
);
2649 overflow
= __dl_overflow(dl_b
, cpus
, 0, 0);
2650 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2651 rcu_read_unlock_sched();
2656 #ifdef CONFIG_SCHED_DEBUG
2657 extern void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
);
2659 void print_dl_stats(struct seq_file
*m
, int cpu
)
2661 print_dl_rq(m
, cpu
, &cpu_rq(cpu
)->dl
);
2663 #endif /* CONFIG_SCHED_DEBUG */