1 // SPDX-License-Identifier: GPL-2.0
3 * Deadline Scheduling Class (SCHED_DEADLINE)
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
21 struct dl_bandwidth def_dl_bandwidth
;
23 static inline struct task_struct
*dl_task_of(struct sched_dl_entity
*dl_se
)
25 return container_of(dl_se
, struct task_struct
, dl
);
28 static inline struct rq
*rq_of_dl_rq(struct dl_rq
*dl_rq
)
30 return container_of(dl_rq
, struct rq
, dl
);
33 static inline struct dl_rq
*dl_rq_of_se(struct sched_dl_entity
*dl_se
)
35 struct task_struct
*p
= dl_task_of(dl_se
);
36 struct rq
*rq
= task_rq(p
);
41 static inline int on_dl_rq(struct sched_dl_entity
*dl_se
)
43 return !RB_EMPTY_NODE(&dl_se
->rb_node
);
47 static inline struct dl_bw
*dl_bw_of(int i
)
49 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
50 "sched RCU must be held");
51 return &cpu_rq(i
)->rd
->dl_bw
;
54 static inline int dl_bw_cpus(int i
)
56 struct root_domain
*rd
= cpu_rq(i
)->rd
;
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
60 "sched RCU must be held");
62 if (cpumask_subset(rd
->span
, cpu_active_mask
))
63 return cpumask_weight(rd
->span
);
67 for_each_cpu_and(i
, rd
->span
, cpu_active_mask
)
73 static inline unsigned long __dl_bw_capacity(int i
)
75 struct root_domain
*rd
= cpu_rq(i
)->rd
;
76 unsigned long cap
= 0;
78 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
79 "sched RCU must be held");
81 for_each_cpu_and(i
, rd
->span
, cpu_active_mask
)
82 cap
+= capacity_orig_of(i
);
88 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
89 * of the CPU the task is running on rather rd's \Sum CPU capacity.
91 static inline unsigned long dl_bw_capacity(int i
)
93 if (!static_branch_unlikely(&sched_asym_cpucapacity
) &&
94 capacity_orig_of(i
) == SCHED_CAPACITY_SCALE
) {
95 return dl_bw_cpus(i
) << SCHED_CAPACITY_SHIFT
;
97 return __dl_bw_capacity(i
);
101 static inline bool dl_bw_visited(int cpu
, u64 gen
)
103 struct root_domain
*rd
= cpu_rq(cpu
)->rd
;
105 if (rd
->visit_gen
== gen
)
112 static inline struct dl_bw
*dl_bw_of(int i
)
114 return &cpu_rq(i
)->dl
.dl_bw
;
117 static inline int dl_bw_cpus(int i
)
122 static inline unsigned long dl_bw_capacity(int i
)
124 return SCHED_CAPACITY_SCALE
;
127 static inline bool dl_bw_visited(int cpu
, u64 gen
)
134 void __add_running_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
136 u64 old
= dl_rq
->running_bw
;
138 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
139 dl_rq
->running_bw
+= dl_bw
;
140 SCHED_WARN_ON(dl_rq
->running_bw
< old
); /* overflow */
141 SCHED_WARN_ON(dl_rq
->running_bw
> dl_rq
->this_bw
);
142 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
143 cpufreq_update_util(rq_of_dl_rq(dl_rq
), 0);
147 void __sub_running_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
149 u64 old
= dl_rq
->running_bw
;
151 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
152 dl_rq
->running_bw
-= dl_bw
;
153 SCHED_WARN_ON(dl_rq
->running_bw
> old
); /* underflow */
154 if (dl_rq
->running_bw
> old
)
155 dl_rq
->running_bw
= 0;
156 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
157 cpufreq_update_util(rq_of_dl_rq(dl_rq
), 0);
161 void __add_rq_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
163 u64 old
= dl_rq
->this_bw
;
165 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
166 dl_rq
->this_bw
+= dl_bw
;
167 SCHED_WARN_ON(dl_rq
->this_bw
< old
); /* overflow */
171 void __sub_rq_bw(u64 dl_bw
, struct dl_rq
*dl_rq
)
173 u64 old
= dl_rq
->this_bw
;
175 lockdep_assert_held(&(rq_of_dl_rq(dl_rq
))->lock
);
176 dl_rq
->this_bw
-= dl_bw
;
177 SCHED_WARN_ON(dl_rq
->this_bw
> old
); /* underflow */
178 if (dl_rq
->this_bw
> old
)
180 SCHED_WARN_ON(dl_rq
->running_bw
> dl_rq
->this_bw
);
184 void add_rq_bw(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
186 if (!dl_entity_is_special(dl_se
))
187 __add_rq_bw(dl_se
->dl_bw
, dl_rq
);
191 void sub_rq_bw(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
193 if (!dl_entity_is_special(dl_se
))
194 __sub_rq_bw(dl_se
->dl_bw
, dl_rq
);
198 void add_running_bw(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
200 if (!dl_entity_is_special(dl_se
))
201 __add_running_bw(dl_se
->dl_bw
, dl_rq
);
205 void sub_running_bw(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
207 if (!dl_entity_is_special(dl_se
))
208 __sub_running_bw(dl_se
->dl_bw
, dl_rq
);
211 static void dl_change_utilization(struct task_struct
*p
, u64 new_bw
)
215 BUG_ON(p
->dl
.flags
& SCHED_FLAG_SUGOV
);
217 if (task_on_rq_queued(p
))
221 if (p
->dl
.dl_non_contending
) {
222 sub_running_bw(&p
->dl
, &rq
->dl
);
223 p
->dl
.dl_non_contending
= 0;
225 * If the timer handler is currently running and the
226 * timer cannot be cancelled, inactive_task_timer()
227 * will see that dl_not_contending is not set, and
228 * will not touch the rq's active utilization,
229 * so we are still safe.
231 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
234 __sub_rq_bw(p
->dl
.dl_bw
, &rq
->dl
);
235 __add_rq_bw(new_bw
, &rq
->dl
);
239 * The utilization of a task cannot be immediately removed from
240 * the rq active utilization (running_bw) when the task blocks.
241 * Instead, we have to wait for the so called "0-lag time".
243 * If a task blocks before the "0-lag time", a timer (the inactive
244 * timer) is armed, and running_bw is decreased when the timer
247 * If the task wakes up again before the inactive timer fires,
248 * the timer is cancelled, whereas if the task wakes up after the
249 * inactive timer fired (and running_bw has been decreased) the
250 * task's utilization has to be added to running_bw again.
251 * A flag in the deadline scheduling entity (dl_non_contending)
252 * is used to avoid race conditions between the inactive timer handler
255 * The following diagram shows how running_bw is updated. A task is
256 * "ACTIVE" when its utilization contributes to running_bw; an
257 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
258 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
259 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
260 * time already passed, which does not contribute to running_bw anymore.
261 * +------------------+
263 * +------------------>+ contending |
264 * | add_running_bw | |
265 * | +----+------+------+
268 * +--------+-------+ | |
269 * | | t >= 0-lag | | wakeup
270 * | INACTIVE |<---------------+ |
271 * | | sub_running_bw | |
272 * +--------+-------+ | |
277 * | +----+------+------+
278 * | sub_running_bw | ACTIVE |
279 * +-------------------+ |
280 * inactive timer | non contending |
281 * fired +------------------+
283 * The task_non_contending() function is invoked when a task
284 * blocks, and checks if the 0-lag time already passed or
285 * not (in the first case, it directly updates running_bw;
286 * in the second case, it arms the inactive timer).
288 * The task_contending() function is invoked when a task wakes
289 * up, and checks if the task is still in the "ACTIVE non contending"
290 * state or not (in the second case, it updates running_bw).
292 static void task_non_contending(struct task_struct
*p
)
294 struct sched_dl_entity
*dl_se
= &p
->dl
;
295 struct hrtimer
*timer
= &dl_se
->inactive_timer
;
296 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
297 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
301 * If this is a non-deadline task that has been boosted,
304 if (dl_se
->dl_runtime
== 0)
307 if (dl_entity_is_special(dl_se
))
310 WARN_ON(dl_se
->dl_non_contending
);
312 zerolag_time
= dl_se
->deadline
-
313 div64_long((dl_se
->runtime
* dl_se
->dl_period
),
317 * Using relative times instead of the absolute "0-lag time"
318 * allows to simplify the code
320 zerolag_time
-= rq_clock(rq
);
323 * If the "0-lag time" already passed, decrease the active
324 * utilization now, instead of starting a timer
326 if ((zerolag_time
< 0) || hrtimer_active(&dl_se
->inactive_timer
)) {
328 sub_running_bw(dl_se
, dl_rq
);
329 if (!dl_task(p
) || p
->state
== TASK_DEAD
) {
330 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
332 if (p
->state
== TASK_DEAD
)
333 sub_rq_bw(&p
->dl
, &rq
->dl
);
334 raw_spin_lock(&dl_b
->lock
);
335 __dl_sub(dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
336 __dl_clear_params(p
);
337 raw_spin_unlock(&dl_b
->lock
);
343 dl_se
->dl_non_contending
= 1;
345 hrtimer_start(timer
, ns_to_ktime(zerolag_time
), HRTIMER_MODE_REL_HARD
);
348 static void task_contending(struct sched_dl_entity
*dl_se
, int flags
)
350 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
353 * If this is a non-deadline task that has been boosted,
356 if (dl_se
->dl_runtime
== 0)
359 if (flags
& ENQUEUE_MIGRATED
)
360 add_rq_bw(dl_se
, dl_rq
);
362 if (dl_se
->dl_non_contending
) {
363 dl_se
->dl_non_contending
= 0;
365 * If the timer handler is currently running and the
366 * timer cannot be cancelled, inactive_task_timer()
367 * will see that dl_not_contending is not set, and
368 * will not touch the rq's active utilization,
369 * so we are still safe.
371 if (hrtimer_try_to_cancel(&dl_se
->inactive_timer
) == 1)
372 put_task_struct(dl_task_of(dl_se
));
375 * Since "dl_non_contending" is not set, the
376 * task's utilization has already been removed from
377 * active utilization (either when the task blocked,
378 * when the "inactive timer" fired).
381 add_running_bw(dl_se
, dl_rq
);
385 static inline int is_leftmost(struct task_struct
*p
, struct dl_rq
*dl_rq
)
387 struct sched_dl_entity
*dl_se
= &p
->dl
;
389 return dl_rq
->root
.rb_leftmost
== &dl_se
->rb_node
;
392 static void init_dl_rq_bw_ratio(struct dl_rq
*dl_rq
);
394 void init_dl_bandwidth(struct dl_bandwidth
*dl_b
, u64 period
, u64 runtime
)
396 raw_spin_lock_init(&dl_b
->dl_runtime_lock
);
397 dl_b
->dl_period
= period
;
398 dl_b
->dl_runtime
= runtime
;
401 void init_dl_bw(struct dl_bw
*dl_b
)
403 raw_spin_lock_init(&dl_b
->lock
);
404 raw_spin_lock(&def_dl_bandwidth
.dl_runtime_lock
);
405 if (global_rt_runtime() == RUNTIME_INF
)
408 dl_b
->bw
= to_ratio(global_rt_period(), global_rt_runtime());
409 raw_spin_unlock(&def_dl_bandwidth
.dl_runtime_lock
);
413 void init_dl_rq(struct dl_rq
*dl_rq
)
415 dl_rq
->root
= RB_ROOT_CACHED
;
418 /* zero means no -deadline tasks */
419 dl_rq
->earliest_dl
.curr
= dl_rq
->earliest_dl
.next
= 0;
421 dl_rq
->dl_nr_migratory
= 0;
422 dl_rq
->overloaded
= 0;
423 dl_rq
->pushable_dl_tasks_root
= RB_ROOT_CACHED
;
425 init_dl_bw(&dl_rq
->dl_bw
);
428 dl_rq
->running_bw
= 0;
430 init_dl_rq_bw_ratio(dl_rq
);
435 static inline int dl_overloaded(struct rq
*rq
)
437 return atomic_read(&rq
->rd
->dlo_count
);
440 static inline void dl_set_overload(struct rq
*rq
)
445 cpumask_set_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
447 * Must be visible before the overload count is
448 * set (as in sched_rt.c).
450 * Matched by the barrier in pull_dl_task().
453 atomic_inc(&rq
->rd
->dlo_count
);
456 static inline void dl_clear_overload(struct rq
*rq
)
461 atomic_dec(&rq
->rd
->dlo_count
);
462 cpumask_clear_cpu(rq
->cpu
, rq
->rd
->dlo_mask
);
465 static void update_dl_migration(struct dl_rq
*dl_rq
)
467 if (dl_rq
->dl_nr_migratory
&& dl_rq
->dl_nr_running
> 1) {
468 if (!dl_rq
->overloaded
) {
469 dl_set_overload(rq_of_dl_rq(dl_rq
));
470 dl_rq
->overloaded
= 1;
472 } else if (dl_rq
->overloaded
) {
473 dl_clear_overload(rq_of_dl_rq(dl_rq
));
474 dl_rq
->overloaded
= 0;
478 static void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
480 struct task_struct
*p
= dl_task_of(dl_se
);
482 if (p
->nr_cpus_allowed
> 1)
483 dl_rq
->dl_nr_migratory
++;
485 update_dl_migration(dl_rq
);
488 static void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
490 struct task_struct
*p
= dl_task_of(dl_se
);
492 if (p
->nr_cpus_allowed
> 1)
493 dl_rq
->dl_nr_migratory
--;
495 update_dl_migration(dl_rq
);
499 * The list of pushable -deadline task is not a plist, like in
500 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
502 static void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
504 struct dl_rq
*dl_rq
= &rq
->dl
;
505 struct rb_node
**link
= &dl_rq
->pushable_dl_tasks_root
.rb_root
.rb_node
;
506 struct rb_node
*parent
= NULL
;
507 struct task_struct
*entry
;
508 bool leftmost
= true;
510 BUG_ON(!RB_EMPTY_NODE(&p
->pushable_dl_tasks
));
514 entry
= rb_entry(parent
, struct task_struct
,
516 if (dl_entity_preempt(&p
->dl
, &entry
->dl
))
517 link
= &parent
->rb_left
;
519 link
= &parent
->rb_right
;
525 dl_rq
->earliest_dl
.next
= p
->dl
.deadline
;
527 rb_link_node(&p
->pushable_dl_tasks
, parent
, link
);
528 rb_insert_color_cached(&p
->pushable_dl_tasks
,
529 &dl_rq
->pushable_dl_tasks_root
, leftmost
);
532 static void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
534 struct dl_rq
*dl_rq
= &rq
->dl
;
536 if (RB_EMPTY_NODE(&p
->pushable_dl_tasks
))
539 if (dl_rq
->pushable_dl_tasks_root
.rb_leftmost
== &p
->pushable_dl_tasks
) {
540 struct rb_node
*next_node
;
542 next_node
= rb_next(&p
->pushable_dl_tasks
);
544 dl_rq
->earliest_dl
.next
= rb_entry(next_node
,
545 struct task_struct
, pushable_dl_tasks
)->dl
.deadline
;
549 rb_erase_cached(&p
->pushable_dl_tasks
, &dl_rq
->pushable_dl_tasks_root
);
550 RB_CLEAR_NODE(&p
->pushable_dl_tasks
);
553 static inline int has_pushable_dl_tasks(struct rq
*rq
)
555 return !RB_EMPTY_ROOT(&rq
->dl
.pushable_dl_tasks_root
.rb_root
);
558 static int push_dl_task(struct rq
*rq
);
560 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
562 return dl_task(prev
);
565 static DEFINE_PER_CPU(struct callback_head
, dl_push_head
);
566 static DEFINE_PER_CPU(struct callback_head
, dl_pull_head
);
568 static void push_dl_tasks(struct rq
*);
569 static void pull_dl_task(struct rq
*);
571 static inline void deadline_queue_push_tasks(struct rq
*rq
)
573 if (!has_pushable_dl_tasks(rq
))
576 queue_balance_callback(rq
, &per_cpu(dl_push_head
, rq
->cpu
), push_dl_tasks
);
579 static inline void deadline_queue_pull_task(struct rq
*rq
)
581 queue_balance_callback(rq
, &per_cpu(dl_pull_head
, rq
->cpu
), pull_dl_task
);
584 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
);
586 static struct rq
*dl_task_offline_migration(struct rq
*rq
, struct task_struct
*p
)
588 struct rq
*later_rq
= NULL
;
591 later_rq
= find_lock_later_rq(p
, rq
);
596 * If we cannot preempt any rq, fall back to pick any
599 cpu
= cpumask_any_and(cpu_active_mask
, p
->cpus_ptr
);
600 if (cpu
>= nr_cpu_ids
) {
602 * Failed to find any suitable CPU.
603 * The task will never come back!
605 BUG_ON(dl_bandwidth_enabled());
608 * If admission control is disabled we
609 * try a little harder to let the task
612 cpu
= cpumask_any(cpu_active_mask
);
614 later_rq
= cpu_rq(cpu
);
615 double_lock_balance(rq
, later_rq
);
618 if (p
->dl
.dl_non_contending
|| p
->dl
.dl_throttled
) {
620 * Inactive timer is armed (or callback is running, but
621 * waiting for us to release rq locks). In any case, when it
622 * will fire (or continue), it will see running_bw of this
623 * task migrated to later_rq (and correctly handle it).
625 sub_running_bw(&p
->dl
, &rq
->dl
);
626 sub_rq_bw(&p
->dl
, &rq
->dl
);
628 add_rq_bw(&p
->dl
, &later_rq
->dl
);
629 add_running_bw(&p
->dl
, &later_rq
->dl
);
631 sub_rq_bw(&p
->dl
, &rq
->dl
);
632 add_rq_bw(&p
->dl
, &later_rq
->dl
);
636 * And we finally need to fixup root_domain(s) bandwidth accounting,
637 * since p is still hanging out in the old (now moved to default) root
640 dl_b
= &rq
->rd
->dl_bw
;
641 raw_spin_lock(&dl_b
->lock
);
642 __dl_sub(dl_b
, p
->dl
.dl_bw
, cpumask_weight(rq
->rd
->span
));
643 raw_spin_unlock(&dl_b
->lock
);
645 dl_b
= &later_rq
->rd
->dl_bw
;
646 raw_spin_lock(&dl_b
->lock
);
647 __dl_add(dl_b
, p
->dl
.dl_bw
, cpumask_weight(later_rq
->rd
->span
));
648 raw_spin_unlock(&dl_b
->lock
);
650 set_task_cpu(p
, later_rq
->cpu
);
651 double_unlock_balance(later_rq
, rq
);
659 void enqueue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
664 void dequeue_pushable_dl_task(struct rq
*rq
, struct task_struct
*p
)
669 void inc_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
674 void dec_dl_migration(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
678 static inline bool need_pull_dl_task(struct rq
*rq
, struct task_struct
*prev
)
683 static inline void pull_dl_task(struct rq
*rq
)
687 static inline void deadline_queue_push_tasks(struct rq
*rq
)
691 static inline void deadline_queue_pull_task(struct rq
*rq
)
694 #endif /* CONFIG_SMP */
696 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
697 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
698 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
, int flags
);
701 * We are being explicitly informed that a new instance is starting,
702 * and this means that:
703 * - the absolute deadline of the entity has to be placed at
704 * current time + relative deadline;
705 * - the runtime of the entity has to be set to the maximum value.
707 * The capability of specifying such event is useful whenever a -deadline
708 * entity wants to (try to!) synchronize its behaviour with the scheduler's
709 * one, and to (try to!) reconcile itself with its own scheduling
712 static inline void setup_new_dl_entity(struct sched_dl_entity
*dl_se
)
714 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
715 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
717 WARN_ON(dl_se
->dl_boosted
);
718 WARN_ON(dl_time_before(rq_clock(rq
), dl_se
->deadline
));
721 * We are racing with the deadline timer. So, do nothing because
722 * the deadline timer handler will take care of properly recharging
723 * the runtime and postponing the deadline
725 if (dl_se
->dl_throttled
)
729 * We use the regular wall clock time to set deadlines in the
730 * future; in fact, we must consider execution overheads (time
731 * spent on hardirq context, etc.).
733 dl_se
->deadline
= rq_clock(rq
) + dl_se
->dl_deadline
;
734 dl_se
->runtime
= dl_se
->dl_runtime
;
738 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
739 * possibility of a entity lasting more than what it declared, and thus
740 * exhausting its runtime.
742 * Here we are interested in making runtime overrun possible, but we do
743 * not want a entity which is misbehaving to affect the scheduling of all
745 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
746 * is used, in order to confine each entity within its own bandwidth.
748 * This function deals exactly with that, and ensures that when the runtime
749 * of a entity is replenished, its deadline is also postponed. That ensures
750 * the overrunning entity can't interfere with other entity in the system and
751 * can't make them miss their deadlines. Reasons why this kind of overruns
752 * could happen are, typically, a entity voluntarily trying to overcome its
753 * runtime, or it just underestimated it during sched_setattr().
755 static void replenish_dl_entity(struct sched_dl_entity
*dl_se
,
756 struct sched_dl_entity
*pi_se
)
758 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
759 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
761 BUG_ON(pi_se
->dl_runtime
<= 0);
764 * This could be the case for a !-dl task that is boosted.
765 * Just go with full inherited parameters.
767 if (dl_se
->dl_deadline
== 0) {
768 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
769 dl_se
->runtime
= pi_se
->dl_runtime
;
772 if (dl_se
->dl_yielded
&& dl_se
->runtime
> 0)
776 * We keep moving the deadline away until we get some
777 * available runtime for the entity. This ensures correct
778 * handling of situations where the runtime overrun is
781 while (dl_se
->runtime
<= 0) {
782 dl_se
->deadline
+= pi_se
->dl_period
;
783 dl_se
->runtime
+= pi_se
->dl_runtime
;
787 * At this point, the deadline really should be "in
788 * the future" with respect to rq->clock. If it's
789 * not, we are, for some reason, lagging too much!
790 * Anyway, after having warn userspace abut that,
791 * we still try to keep the things running by
792 * resetting the deadline and the budget of the
795 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
))) {
796 printk_deferred_once("sched: DL replenish lagged too much\n");
797 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
798 dl_se
->runtime
= pi_se
->dl_runtime
;
801 if (dl_se
->dl_yielded
)
802 dl_se
->dl_yielded
= 0;
803 if (dl_se
->dl_throttled
)
804 dl_se
->dl_throttled
= 0;
808 * Here we check if --at time t-- an entity (which is probably being
809 * [re]activated or, in general, enqueued) can use its remaining runtime
810 * and its current deadline _without_ exceeding the bandwidth it is
811 * assigned (function returns true if it can't). We are in fact applying
812 * one of the CBS rules: when a task wakes up, if the residual runtime
813 * over residual deadline fits within the allocated bandwidth, then we
814 * can keep the current (absolute) deadline and residual budget without
815 * disrupting the schedulability of the system. Otherwise, we should
816 * refill the runtime and set the deadline a period in the future,
817 * because keeping the current (absolute) deadline of the task would
818 * result in breaking guarantees promised to other tasks (refer to
819 * Documentation/scheduler/sched-deadline.rst for more information).
821 * This function returns true if:
823 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
825 * IOW we can't recycle current parameters.
827 * Notice that the bandwidth check is done against the deadline. For
828 * task with deadline equal to period this is the same of using
829 * dl_period instead of dl_deadline in the equation above.
831 static bool dl_entity_overflow(struct sched_dl_entity
*dl_se
,
832 struct sched_dl_entity
*pi_se
, u64 t
)
837 * left and right are the two sides of the equation above,
838 * after a bit of shuffling to use multiplications instead
841 * Note that none of the time values involved in the two
842 * multiplications are absolute: dl_deadline and dl_runtime
843 * are the relative deadline and the maximum runtime of each
844 * instance, runtime is the runtime left for the last instance
845 * and (deadline - t), since t is rq->clock, is the time left
846 * to the (absolute) deadline. Even if overflowing the u64 type
847 * is very unlikely to occur in both cases, here we scale down
848 * as we want to avoid that risk at all. Scaling down by 10
849 * means that we reduce granularity to 1us. We are fine with it,
850 * since this is only a true/false check and, anyway, thinking
851 * of anything below microseconds resolution is actually fiction
852 * (but still we want to give the user that illusion >;).
854 left
= (pi_se
->dl_deadline
>> DL_SCALE
) * (dl_se
->runtime
>> DL_SCALE
);
855 right
= ((dl_se
->deadline
- t
) >> DL_SCALE
) *
856 (pi_se
->dl_runtime
>> DL_SCALE
);
858 return dl_time_before(right
, left
);
862 * Revised wakeup rule [1]: For self-suspending tasks, rather then
863 * re-initializing task's runtime and deadline, the revised wakeup
864 * rule adjusts the task's runtime to avoid the task to overrun its
867 * Reasoning: a task may overrun the density if:
868 * runtime / (deadline - t) > dl_runtime / dl_deadline
870 * Therefore, runtime can be adjusted to:
871 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
873 * In such way that runtime will be equal to the maximum density
874 * the task can use without breaking any rule.
876 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
877 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
880 update_dl_revised_wakeup(struct sched_dl_entity
*dl_se
, struct rq
*rq
)
882 u64 laxity
= dl_se
->deadline
- rq_clock(rq
);
885 * If the task has deadline < period, and the deadline is in the past,
886 * it should already be throttled before this check.
888 * See update_dl_entity() comments for further details.
890 WARN_ON(dl_time_before(dl_se
->deadline
, rq_clock(rq
)));
892 dl_se
->runtime
= (dl_se
->dl_density
* laxity
) >> BW_SHIFT
;
896 * Regarding the deadline, a task with implicit deadline has a relative
897 * deadline == relative period. A task with constrained deadline has a
898 * relative deadline <= relative period.
900 * We support constrained deadline tasks. However, there are some restrictions
901 * applied only for tasks which do not have an implicit deadline. See
902 * update_dl_entity() to know more about such restrictions.
904 * The dl_is_implicit() returns true if the task has an implicit deadline.
906 static inline bool dl_is_implicit(struct sched_dl_entity
*dl_se
)
908 return dl_se
->dl_deadline
== dl_se
->dl_period
;
912 * When a deadline entity is placed in the runqueue, its runtime and deadline
913 * might need to be updated. This is done by a CBS wake up rule. There are two
914 * different rules: 1) the original CBS; and 2) the Revisited CBS.
916 * When the task is starting a new period, the Original CBS is used. In this
917 * case, the runtime is replenished and a new absolute deadline is set.
919 * When a task is queued before the begin of the next period, using the
920 * remaining runtime and deadline could make the entity to overflow, see
921 * dl_entity_overflow() to find more about runtime overflow. When such case
922 * is detected, the runtime and deadline need to be updated.
924 * If the task has an implicit deadline, i.e., deadline == period, the Original
925 * CBS is applied. the runtime is replenished and a new absolute deadline is
926 * set, as in the previous cases.
928 * However, the Original CBS does not work properly for tasks with
929 * deadline < period, which are said to have a constrained deadline. By
930 * applying the Original CBS, a constrained deadline task would be able to run
931 * runtime/deadline in a period. With deadline < period, the task would
932 * overrun the runtime/period allowed bandwidth, breaking the admission test.
934 * In order to prevent this misbehave, the Revisited CBS is used for
935 * constrained deadline tasks when a runtime overflow is detected. In the
936 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
937 * the remaining runtime of the task is reduced to avoid runtime overflow.
938 * Please refer to the comments update_dl_revised_wakeup() function to find
939 * more about the Revised CBS rule.
941 static void update_dl_entity(struct sched_dl_entity
*dl_se
,
942 struct sched_dl_entity
*pi_se
)
944 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
945 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
947 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) ||
948 dl_entity_overflow(dl_se
, pi_se
, rq_clock(rq
))) {
950 if (unlikely(!dl_is_implicit(dl_se
) &&
951 !dl_time_before(dl_se
->deadline
, rq_clock(rq
)) &&
952 !dl_se
->dl_boosted
)){
953 update_dl_revised_wakeup(dl_se
, rq
);
957 dl_se
->deadline
= rq_clock(rq
) + pi_se
->dl_deadline
;
958 dl_se
->runtime
= pi_se
->dl_runtime
;
962 static inline u64
dl_next_period(struct sched_dl_entity
*dl_se
)
964 return dl_se
->deadline
- dl_se
->dl_deadline
+ dl_se
->dl_period
;
968 * If the entity depleted all its runtime, and if we want it to sleep
969 * while waiting for some new execution time to become available, we
970 * set the bandwidth replenishment timer to the replenishment instant
971 * and try to activate it.
973 * Notice that it is important for the caller to know if the timer
974 * actually started or not (i.e., the replenishment instant is in
975 * the future or in the past).
977 static int start_dl_timer(struct task_struct
*p
)
979 struct sched_dl_entity
*dl_se
= &p
->dl
;
980 struct hrtimer
*timer
= &dl_se
->dl_timer
;
981 struct rq
*rq
= task_rq(p
);
985 lockdep_assert_held(&rq
->lock
);
988 * We want the timer to fire at the deadline, but considering
989 * that it is actually coming from rq->clock and not from
990 * hrtimer's time base reading.
992 act
= ns_to_ktime(dl_next_period(dl_se
));
993 now
= hrtimer_cb_get_time(timer
);
994 delta
= ktime_to_ns(now
) - rq_clock(rq
);
995 act
= ktime_add_ns(act
, delta
);
998 * If the expiry time already passed, e.g., because the value
999 * chosen as the deadline is too small, don't even try to
1000 * start the timer in the past!
1002 if (ktime_us_delta(act
, now
) < 0)
1006 * !enqueued will guarantee another callback; even if one is already in
1007 * progress. This ensures a balanced {get,put}_task_struct().
1009 * The race against __run_timer() clearing the enqueued state is
1010 * harmless because we're holding task_rq()->lock, therefore the timer
1011 * expiring after we've done the check will wait on its task_rq_lock()
1012 * and observe our state.
1014 if (!hrtimer_is_queued(timer
)) {
1016 hrtimer_start(timer
, act
, HRTIMER_MODE_ABS_HARD
);
1023 * This is the bandwidth enforcement timer callback. If here, we know
1024 * a task is not on its dl_rq, since the fact that the timer was running
1025 * means the task is throttled and needs a runtime replenishment.
1027 * However, what we actually do depends on the fact the task is active,
1028 * (it is on its rq) or has been removed from there by a call to
1029 * dequeue_task_dl(). In the former case we must issue the runtime
1030 * replenishment and add the task back to the dl_rq; in the latter, we just
1031 * do nothing but clearing dl_throttled, so that runtime and deadline
1032 * updating (and the queueing back to dl_rq) will be done by the
1033 * next call to enqueue_task_dl().
1035 static enum hrtimer_restart
dl_task_timer(struct hrtimer
*timer
)
1037 struct sched_dl_entity
*dl_se
= container_of(timer
,
1038 struct sched_dl_entity
,
1040 struct task_struct
*p
= dl_task_of(dl_se
);
1044 rq
= task_rq_lock(p
, &rf
);
1047 * The task might have changed its scheduling policy to something
1048 * different than SCHED_DEADLINE (through switched_from_dl()).
1054 * The task might have been boosted by someone else and might be in the
1055 * boosting/deboosting path, its not throttled.
1057 if (dl_se
->dl_boosted
)
1061 * Spurious timer due to start_dl_timer() race; or we already received
1062 * a replenishment from rt_mutex_setprio().
1064 if (!dl_se
->dl_throttled
)
1068 update_rq_clock(rq
);
1071 * If the throttle happened during sched-out; like:
1078 * __dequeue_task_dl()
1081 * We can be both throttled and !queued. Replenish the counter
1082 * but do not enqueue -- wait for our wakeup to do that.
1084 if (!task_on_rq_queued(p
)) {
1085 replenish_dl_entity(dl_se
, dl_se
);
1090 if (unlikely(!rq
->online
)) {
1092 * If the runqueue is no longer available, migrate the
1093 * task elsewhere. This necessarily changes rq.
1095 lockdep_unpin_lock(&rq
->lock
, rf
.cookie
);
1096 rq
= dl_task_offline_migration(rq
, p
);
1097 rf
.cookie
= lockdep_pin_lock(&rq
->lock
);
1098 update_rq_clock(rq
);
1101 * Now that the task has been migrated to the new RQ and we
1102 * have that locked, proceed as normal and enqueue the task
1108 enqueue_task_dl(rq
, p
, ENQUEUE_REPLENISH
);
1109 if (dl_task(rq
->curr
))
1110 check_preempt_curr_dl(rq
, p
, 0);
1116 * Queueing this task back might have overloaded rq, check if we need
1117 * to kick someone away.
1119 if (has_pushable_dl_tasks(rq
)) {
1121 * Nothing relies on rq->lock after this, so its safe to drop
1124 rq_unpin_lock(rq
, &rf
);
1126 rq_repin_lock(rq
, &rf
);
1131 task_rq_unlock(rq
, p
, &rf
);
1134 * This can free the task_struct, including this hrtimer, do not touch
1135 * anything related to that after this.
1139 return HRTIMER_NORESTART
;
1142 void init_dl_task_timer(struct sched_dl_entity
*dl_se
)
1144 struct hrtimer
*timer
= &dl_se
->dl_timer
;
1146 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_HARD
);
1147 timer
->function
= dl_task_timer
;
1151 * During the activation, CBS checks if it can reuse the current task's
1152 * runtime and period. If the deadline of the task is in the past, CBS
1153 * cannot use the runtime, and so it replenishes the task. This rule
1154 * works fine for implicit deadline tasks (deadline == period), and the
1155 * CBS was designed for implicit deadline tasks. However, a task with
1156 * constrained deadline (deadline < period) might be awakened after the
1157 * deadline, but before the next period. In this case, replenishing the
1158 * task would allow it to run for runtime / deadline. As in this case
1159 * deadline < period, CBS enables a task to run for more than the
1160 * runtime / period. In a very loaded system, this can cause a domino
1161 * effect, making other tasks miss their deadlines.
1163 * To avoid this problem, in the activation of a constrained deadline
1164 * task after the deadline but before the next period, throttle the
1165 * task and set the replenishing timer to the begin of the next period,
1166 * unless it is boosted.
1168 static inline void dl_check_constrained_dl(struct sched_dl_entity
*dl_se
)
1170 struct task_struct
*p
= dl_task_of(dl_se
);
1171 struct rq
*rq
= rq_of_dl_rq(dl_rq_of_se(dl_se
));
1173 if (dl_time_before(dl_se
->deadline
, rq_clock(rq
)) &&
1174 dl_time_before(rq_clock(rq
), dl_next_period(dl_se
))) {
1175 if (unlikely(dl_se
->dl_boosted
|| !start_dl_timer(p
)))
1177 dl_se
->dl_throttled
= 1;
1178 if (dl_se
->runtime
> 0)
1184 int dl_runtime_exceeded(struct sched_dl_entity
*dl_se
)
1186 return (dl_se
->runtime
<= 0);
1189 extern bool sched_rt_bandwidth_account(struct rt_rq
*rt_rq
);
1192 * This function implements the GRUB accounting rule:
1193 * according to the GRUB reclaiming algorithm, the runtime is
1194 * not decreased as "dq = -dt", but as
1195 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1196 * where u is the utilization of the task, Umax is the maximum reclaimable
1197 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1198 * as the difference between the "total runqueue utilization" and the
1199 * runqueue active utilization, and Uextra is the (per runqueue) extra
1200 * reclaimable utilization.
1201 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1202 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1204 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1205 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1206 * Since delta is a 64 bit variable, to have an overflow its value
1207 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1208 * So, overflow is not an issue here.
1210 static u64
grub_reclaim(u64 delta
, struct rq
*rq
, struct sched_dl_entity
*dl_se
)
1212 u64 u_inact
= rq
->dl
.this_bw
- rq
->dl
.running_bw
; /* Utot - Uact */
1214 u64 u_act_min
= (dl_se
->dl_bw
* rq
->dl
.bw_ratio
) >> RATIO_SHIFT
;
1217 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1218 * we compare u_inact + rq->dl.extra_bw with
1219 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1220 * u_inact + rq->dl.extra_bw can be larger than
1221 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1222 * leading to wrong results)
1224 if (u_inact
+ rq
->dl
.extra_bw
> BW_UNIT
- u_act_min
)
1227 u_act
= BW_UNIT
- u_inact
- rq
->dl
.extra_bw
;
1229 return (delta
* u_act
) >> BW_SHIFT
;
1233 * Update the current task's runtime statistics (provided it is still
1234 * a -deadline task and has not been removed from the dl_rq).
1236 static void update_curr_dl(struct rq
*rq
)
1238 struct task_struct
*curr
= rq
->curr
;
1239 struct sched_dl_entity
*dl_se
= &curr
->dl
;
1240 u64 delta_exec
, scaled_delta_exec
;
1241 int cpu
= cpu_of(rq
);
1244 if (!dl_task(curr
) || !on_dl_rq(dl_se
))
1248 * Consumed budget is computed considering the time as
1249 * observed by schedulable tasks (excluding time spent
1250 * in hardirq context, etc.). Deadlines are instead
1251 * computed using hard walltime. This seems to be the more
1252 * natural solution, but the full ramifications of this
1253 * approach need further study.
1255 now
= rq_clock_task(rq
);
1256 delta_exec
= now
- curr
->se
.exec_start
;
1257 if (unlikely((s64
)delta_exec
<= 0)) {
1258 if (unlikely(dl_se
->dl_yielded
))
1263 schedstat_set(curr
->se
.statistics
.exec_max
,
1264 max(curr
->se
.statistics
.exec_max
, delta_exec
));
1266 curr
->se
.sum_exec_runtime
+= delta_exec
;
1267 account_group_exec_runtime(curr
, delta_exec
);
1269 curr
->se
.exec_start
= now
;
1270 cgroup_account_cputime(curr
, delta_exec
);
1272 if (dl_entity_is_special(dl_se
))
1276 * For tasks that participate in GRUB, we implement GRUB-PA: the
1277 * spare reclaimed bandwidth is used to clock down frequency.
1279 * For the others, we still need to scale reservation parameters
1280 * according to current frequency and CPU maximum capacity.
1282 if (unlikely(dl_se
->flags
& SCHED_FLAG_RECLAIM
)) {
1283 scaled_delta_exec
= grub_reclaim(delta_exec
,
1287 unsigned long scale_freq
= arch_scale_freq_capacity(cpu
);
1288 unsigned long scale_cpu
= arch_scale_cpu_capacity(cpu
);
1290 scaled_delta_exec
= cap_scale(delta_exec
, scale_freq
);
1291 scaled_delta_exec
= cap_scale(scaled_delta_exec
, scale_cpu
);
1294 dl_se
->runtime
-= scaled_delta_exec
;
1297 if (dl_runtime_exceeded(dl_se
) || dl_se
->dl_yielded
) {
1298 dl_se
->dl_throttled
= 1;
1300 /* If requested, inform the user about runtime overruns. */
1301 if (dl_runtime_exceeded(dl_se
) &&
1302 (dl_se
->flags
& SCHED_FLAG_DL_OVERRUN
))
1303 dl_se
->dl_overrun
= 1;
1305 __dequeue_task_dl(rq
, curr
, 0);
1306 if (unlikely(dl_se
->dl_boosted
|| !start_dl_timer(curr
)))
1307 enqueue_task_dl(rq
, curr
, ENQUEUE_REPLENISH
);
1309 if (!is_leftmost(curr
, &rq
->dl
))
1314 * Because -- for now -- we share the rt bandwidth, we need to
1315 * account our runtime there too, otherwise actual rt tasks
1316 * would be able to exceed the shared quota.
1318 * Account to the root rt group for now.
1320 * The solution we're working towards is having the RT groups scheduled
1321 * using deadline servers -- however there's a few nasties to figure
1322 * out before that can happen.
1324 if (rt_bandwidth_enabled()) {
1325 struct rt_rq
*rt_rq
= &rq
->rt
;
1327 raw_spin_lock(&rt_rq
->rt_runtime_lock
);
1329 * We'll let actual RT tasks worry about the overflow here, we
1330 * have our own CBS to keep us inline; only account when RT
1331 * bandwidth is relevant.
1333 if (sched_rt_bandwidth_account(rt_rq
))
1334 rt_rq
->rt_time
+= delta_exec
;
1335 raw_spin_unlock(&rt_rq
->rt_runtime_lock
);
1339 static enum hrtimer_restart
inactive_task_timer(struct hrtimer
*timer
)
1341 struct sched_dl_entity
*dl_se
= container_of(timer
,
1342 struct sched_dl_entity
,
1344 struct task_struct
*p
= dl_task_of(dl_se
);
1348 rq
= task_rq_lock(p
, &rf
);
1351 update_rq_clock(rq
);
1353 if (!dl_task(p
) || p
->state
== TASK_DEAD
) {
1354 struct dl_bw
*dl_b
= dl_bw_of(task_cpu(p
));
1356 if (p
->state
== TASK_DEAD
&& dl_se
->dl_non_contending
) {
1357 sub_running_bw(&p
->dl
, dl_rq_of_se(&p
->dl
));
1358 sub_rq_bw(&p
->dl
, dl_rq_of_se(&p
->dl
));
1359 dl_se
->dl_non_contending
= 0;
1362 raw_spin_lock(&dl_b
->lock
);
1363 __dl_sub(dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
1364 raw_spin_unlock(&dl_b
->lock
);
1365 __dl_clear_params(p
);
1369 if (dl_se
->dl_non_contending
== 0)
1372 sub_running_bw(dl_se
, &rq
->dl
);
1373 dl_se
->dl_non_contending
= 0;
1375 task_rq_unlock(rq
, p
, &rf
);
1378 return HRTIMER_NORESTART
;
1381 void init_dl_inactive_task_timer(struct sched_dl_entity
*dl_se
)
1383 struct hrtimer
*timer
= &dl_se
->inactive_timer
;
1385 hrtimer_init(timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_HARD
);
1386 timer
->function
= inactive_task_timer
;
1391 static void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
1393 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
1395 if (dl_rq
->earliest_dl
.curr
== 0 ||
1396 dl_time_before(deadline
, dl_rq
->earliest_dl
.curr
)) {
1397 if (dl_rq
->earliest_dl
.curr
== 0)
1398 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, CPUPRI_HIGHER
);
1399 dl_rq
->earliest_dl
.curr
= deadline
;
1400 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, deadline
);
1404 static void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
)
1406 struct rq
*rq
= rq_of_dl_rq(dl_rq
);
1409 * Since we may have removed our earliest (and/or next earliest)
1410 * task we must recompute them.
1412 if (!dl_rq
->dl_nr_running
) {
1413 dl_rq
->earliest_dl
.curr
= 0;
1414 dl_rq
->earliest_dl
.next
= 0;
1415 cpudl_clear(&rq
->rd
->cpudl
, rq
->cpu
);
1416 cpupri_set(&rq
->rd
->cpupri
, rq
->cpu
, rq
->rt
.highest_prio
.curr
);
1418 struct rb_node
*leftmost
= dl_rq
->root
.rb_leftmost
;
1419 struct sched_dl_entity
*entry
;
1421 entry
= rb_entry(leftmost
, struct sched_dl_entity
, rb_node
);
1422 dl_rq
->earliest_dl
.curr
= entry
->deadline
;
1423 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, entry
->deadline
);
1429 static inline void inc_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
1430 static inline void dec_dl_deadline(struct dl_rq
*dl_rq
, u64 deadline
) {}
1432 #endif /* CONFIG_SMP */
1435 void inc_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
1437 int prio
= dl_task_of(dl_se
)->prio
;
1438 u64 deadline
= dl_se
->deadline
;
1440 WARN_ON(!dl_prio(prio
));
1441 dl_rq
->dl_nr_running
++;
1442 add_nr_running(rq_of_dl_rq(dl_rq
), 1);
1444 inc_dl_deadline(dl_rq
, deadline
);
1445 inc_dl_migration(dl_se
, dl_rq
);
1449 void dec_dl_tasks(struct sched_dl_entity
*dl_se
, struct dl_rq
*dl_rq
)
1451 int prio
= dl_task_of(dl_se
)->prio
;
1453 WARN_ON(!dl_prio(prio
));
1454 WARN_ON(!dl_rq
->dl_nr_running
);
1455 dl_rq
->dl_nr_running
--;
1456 sub_nr_running(rq_of_dl_rq(dl_rq
), 1);
1458 dec_dl_deadline(dl_rq
, dl_se
->deadline
);
1459 dec_dl_migration(dl_se
, dl_rq
);
1462 static void __enqueue_dl_entity(struct sched_dl_entity
*dl_se
)
1464 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
1465 struct rb_node
**link
= &dl_rq
->root
.rb_root
.rb_node
;
1466 struct rb_node
*parent
= NULL
;
1467 struct sched_dl_entity
*entry
;
1470 BUG_ON(!RB_EMPTY_NODE(&dl_se
->rb_node
));
1474 entry
= rb_entry(parent
, struct sched_dl_entity
, rb_node
);
1475 if (dl_time_before(dl_se
->deadline
, entry
->deadline
))
1476 link
= &parent
->rb_left
;
1478 link
= &parent
->rb_right
;
1483 rb_link_node(&dl_se
->rb_node
, parent
, link
);
1484 rb_insert_color_cached(&dl_se
->rb_node
, &dl_rq
->root
, leftmost
);
1486 inc_dl_tasks(dl_se
, dl_rq
);
1489 static void __dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
1491 struct dl_rq
*dl_rq
= dl_rq_of_se(dl_se
);
1493 if (RB_EMPTY_NODE(&dl_se
->rb_node
))
1496 rb_erase_cached(&dl_se
->rb_node
, &dl_rq
->root
);
1497 RB_CLEAR_NODE(&dl_se
->rb_node
);
1499 dec_dl_tasks(dl_se
, dl_rq
);
1503 enqueue_dl_entity(struct sched_dl_entity
*dl_se
,
1504 struct sched_dl_entity
*pi_se
, int flags
)
1506 BUG_ON(on_dl_rq(dl_se
));
1509 * If this is a wakeup or a new instance, the scheduling
1510 * parameters of the task might need updating. Otherwise,
1511 * we want a replenishment of its runtime.
1513 if (flags
& ENQUEUE_WAKEUP
) {
1514 task_contending(dl_se
, flags
);
1515 update_dl_entity(dl_se
, pi_se
);
1516 } else if (flags
& ENQUEUE_REPLENISH
) {
1517 replenish_dl_entity(dl_se
, pi_se
);
1518 } else if ((flags
& ENQUEUE_RESTORE
) &&
1519 dl_time_before(dl_se
->deadline
,
1520 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se
))))) {
1521 setup_new_dl_entity(dl_se
);
1524 __enqueue_dl_entity(dl_se
);
1527 static void dequeue_dl_entity(struct sched_dl_entity
*dl_se
)
1529 __dequeue_dl_entity(dl_se
);
1532 static void enqueue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1534 struct task_struct
*pi_task
= rt_mutex_get_top_task(p
);
1535 struct sched_dl_entity
*pi_se
= &p
->dl
;
1538 * Use the scheduling parameters of the top pi-waiter task if:
1539 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1540 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1541 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1542 * boosted due to a SCHED_DEADLINE pi-waiter).
1543 * Otherwise we keep our runtime and deadline.
1545 if (pi_task
&& dl_prio(pi_task
->normal_prio
) && p
->dl
.dl_boosted
) {
1546 pi_se
= &pi_task
->dl
;
1548 * Because of delays in the detection of the overrun of a
1549 * thread's runtime, it might be the case that a thread
1550 * goes to sleep in a rt mutex with negative runtime. As
1551 * a consequence, the thread will be throttled.
1553 * While waiting for the mutex, this thread can also be
1554 * boosted via PI, resulting in a thread that is throttled
1555 * and boosted at the same time.
1557 * In this case, the boost overrides the throttle.
1559 if (p
->dl
.dl_throttled
) {
1561 * The replenish timer needs to be canceled. No
1562 * problem if it fires concurrently: boosted threads
1563 * are ignored in dl_task_timer().
1565 hrtimer_try_to_cancel(&p
->dl
.dl_timer
);
1566 p
->dl
.dl_throttled
= 0;
1568 } else if (!dl_prio(p
->normal_prio
)) {
1570 * Special case in which we have a !SCHED_DEADLINE task that is going
1571 * to be deboosted, but exceeds its runtime while doing so. No point in
1572 * replenishing it, as it's going to return back to its original
1573 * scheduling class after this. If it has been throttled, we need to
1574 * clear the flag, otherwise the task may wake up as throttled after
1575 * being boosted again with no means to replenish the runtime and clear
1578 p
->dl
.dl_throttled
= 0;
1579 BUG_ON(!p
->dl
.dl_boosted
|| flags
!= ENQUEUE_REPLENISH
);
1584 * Check if a constrained deadline task was activated
1585 * after the deadline but before the next period.
1586 * If that is the case, the task will be throttled and
1587 * the replenishment timer will be set to the next period.
1589 if (!p
->dl
.dl_throttled
&& !dl_is_implicit(&p
->dl
))
1590 dl_check_constrained_dl(&p
->dl
);
1592 if (p
->on_rq
== TASK_ON_RQ_MIGRATING
|| flags
& ENQUEUE_RESTORE
) {
1593 add_rq_bw(&p
->dl
, &rq
->dl
);
1594 add_running_bw(&p
->dl
, &rq
->dl
);
1598 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1599 * its budget it needs a replenishment and, since it now is on
1600 * its rq, the bandwidth timer callback (which clearly has not
1601 * run yet) will take care of this.
1602 * However, the active utilization does not depend on the fact
1603 * that the task is on the runqueue or not (but depends on the
1604 * task's state - in GRUB parlance, "inactive" vs "active contending").
1605 * In other words, even if a task is throttled its utilization must
1606 * be counted in the active utilization; hence, we need to call
1609 if (p
->dl
.dl_throttled
&& !(flags
& ENQUEUE_REPLENISH
)) {
1610 if (flags
& ENQUEUE_WAKEUP
)
1611 task_contending(&p
->dl
, flags
);
1616 enqueue_dl_entity(&p
->dl
, pi_se
, flags
);
1618 if (!task_current(rq
, p
) && p
->nr_cpus_allowed
> 1)
1619 enqueue_pushable_dl_task(rq
, p
);
1622 static void __dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1624 dequeue_dl_entity(&p
->dl
);
1625 dequeue_pushable_dl_task(rq
, p
);
1628 static void dequeue_task_dl(struct rq
*rq
, struct task_struct
*p
, int flags
)
1631 __dequeue_task_dl(rq
, p
, flags
);
1633 if (p
->on_rq
== TASK_ON_RQ_MIGRATING
|| flags
& DEQUEUE_SAVE
) {
1634 sub_running_bw(&p
->dl
, &rq
->dl
);
1635 sub_rq_bw(&p
->dl
, &rq
->dl
);
1639 * This check allows to start the inactive timer (or to immediately
1640 * decrease the active utilization, if needed) in two cases:
1641 * when the task blocks and when it is terminating
1642 * (p->state == TASK_DEAD). We can handle the two cases in the same
1643 * way, because from GRUB's point of view the same thing is happening
1644 * (the task moves from "active contending" to "active non contending"
1647 if (flags
& DEQUEUE_SLEEP
)
1648 task_non_contending(p
);
1652 * Yield task semantic for -deadline tasks is:
1654 * get off from the CPU until our next instance, with
1655 * a new runtime. This is of little use now, since we
1656 * don't have a bandwidth reclaiming mechanism. Anyway,
1657 * bandwidth reclaiming is planned for the future, and
1658 * yield_task_dl will indicate that some spare budget
1659 * is available for other task instances to use it.
1661 static void yield_task_dl(struct rq
*rq
)
1664 * We make the task go to sleep until its current deadline by
1665 * forcing its runtime to zero. This way, update_curr_dl() stops
1666 * it and the bandwidth timer will wake it up and will give it
1667 * new scheduling parameters (thanks to dl_yielded=1).
1669 rq
->curr
->dl
.dl_yielded
= 1;
1671 update_rq_clock(rq
);
1674 * Tell update_rq_clock() that we've just updated,
1675 * so we don't do microscopic update in schedule()
1676 * and double the fastpath cost.
1678 rq_clock_skip_update(rq
);
1683 static int find_later_rq(struct task_struct
*task
);
1686 select_task_rq_dl(struct task_struct
*p
, int cpu
, int sd_flag
, int flags
)
1688 struct task_struct
*curr
;
1692 if (sd_flag
!= SD_BALANCE_WAKE
)
1698 curr
= READ_ONCE(rq
->curr
); /* unlocked access */
1701 * If we are dealing with a -deadline task, we must
1702 * decide where to wake it up.
1703 * If it has a later deadline and the current task
1704 * on this rq can't move (provided the waking task
1705 * can!) we prefer to send it somewhere else. On the
1706 * other hand, if it has a shorter deadline, we
1707 * try to make it stay here, it might be important.
1709 select_rq
= unlikely(dl_task(curr
)) &&
1710 (curr
->nr_cpus_allowed
< 2 ||
1711 !dl_entity_preempt(&p
->dl
, &curr
->dl
)) &&
1712 p
->nr_cpus_allowed
> 1;
1715 * Take the capacity of the CPU into account to
1716 * ensure it fits the requirement of the task.
1718 if (static_branch_unlikely(&sched_asym_cpucapacity
))
1719 select_rq
|= !dl_task_fits_capacity(p
, cpu
);
1722 int target
= find_later_rq(p
);
1725 (dl_time_before(p
->dl
.deadline
,
1726 cpu_rq(target
)->dl
.earliest_dl
.curr
) ||
1727 (cpu_rq(target
)->dl
.dl_nr_running
== 0)))
1736 static void migrate_task_rq_dl(struct task_struct
*p
, int new_cpu __maybe_unused
)
1740 if (p
->state
!= TASK_WAKING
)
1745 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1746 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1747 * rq->lock is not... So, lock it
1749 raw_spin_lock(&rq
->lock
);
1750 if (p
->dl
.dl_non_contending
) {
1751 sub_running_bw(&p
->dl
, &rq
->dl
);
1752 p
->dl
.dl_non_contending
= 0;
1754 * If the timer handler is currently running and the
1755 * timer cannot be cancelled, inactive_task_timer()
1756 * will see that dl_not_contending is not set, and
1757 * will not touch the rq's active utilization,
1758 * so we are still safe.
1760 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
1763 sub_rq_bw(&p
->dl
, &rq
->dl
);
1764 raw_spin_unlock(&rq
->lock
);
1767 static void check_preempt_equal_dl(struct rq
*rq
, struct task_struct
*p
)
1770 * Current can't be migrated, useless to reschedule,
1771 * let's hope p can move out.
1773 if (rq
->curr
->nr_cpus_allowed
== 1 ||
1774 !cpudl_find(&rq
->rd
->cpudl
, rq
->curr
, NULL
))
1778 * p is migratable, so let's not schedule it and
1779 * see if it is pushed or pulled somewhere else.
1781 if (p
->nr_cpus_allowed
!= 1 &&
1782 cpudl_find(&rq
->rd
->cpudl
, p
, NULL
))
1788 static int balance_dl(struct rq
*rq
, struct task_struct
*p
, struct rq_flags
*rf
)
1790 if (!on_dl_rq(&p
->dl
) && need_pull_dl_task(rq
, p
)) {
1792 * This is OK, because current is on_cpu, which avoids it being
1793 * picked for load-balance and preemption/IRQs are still
1794 * disabled avoiding further scheduler activity on it and we've
1795 * not yet started the picking loop.
1797 rq_unpin_lock(rq
, rf
);
1799 rq_repin_lock(rq
, rf
);
1802 return sched_stop_runnable(rq
) || sched_dl_runnable(rq
);
1804 #endif /* CONFIG_SMP */
1807 * Only called when both the current and waking task are -deadline
1810 static void check_preempt_curr_dl(struct rq
*rq
, struct task_struct
*p
,
1813 if (dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
)) {
1820 * In the unlikely case current and p have the same deadline
1821 * let us try to decide what's the best thing to do...
1823 if ((p
->dl
.deadline
== rq
->curr
->dl
.deadline
) &&
1824 !test_tsk_need_resched(rq
->curr
))
1825 check_preempt_equal_dl(rq
, p
);
1826 #endif /* CONFIG_SMP */
1829 #ifdef CONFIG_SCHED_HRTICK
1830 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1832 hrtick_start(rq
, p
->dl
.runtime
);
1834 #else /* !CONFIG_SCHED_HRTICK */
1835 static void start_hrtick_dl(struct rq
*rq
, struct task_struct
*p
)
1840 static void set_next_task_dl(struct rq
*rq
, struct task_struct
*p
, bool first
)
1842 p
->se
.exec_start
= rq_clock_task(rq
);
1844 /* You can't push away the running task */
1845 dequeue_pushable_dl_task(rq
, p
);
1850 if (hrtick_enabled(rq
))
1851 start_hrtick_dl(rq
, p
);
1853 if (rq
->curr
->sched_class
!= &dl_sched_class
)
1854 update_dl_rq_load_avg(rq_clock_pelt(rq
), rq
, 0);
1856 deadline_queue_push_tasks(rq
);
1859 static struct sched_dl_entity
*pick_next_dl_entity(struct rq
*rq
,
1860 struct dl_rq
*dl_rq
)
1862 struct rb_node
*left
= rb_first_cached(&dl_rq
->root
);
1867 return rb_entry(left
, struct sched_dl_entity
, rb_node
);
1870 static struct task_struct
*pick_next_task_dl(struct rq
*rq
)
1872 struct sched_dl_entity
*dl_se
;
1873 struct dl_rq
*dl_rq
= &rq
->dl
;
1874 struct task_struct
*p
;
1876 if (!sched_dl_runnable(rq
))
1879 dl_se
= pick_next_dl_entity(rq
, dl_rq
);
1881 p
= dl_task_of(dl_se
);
1882 set_next_task_dl(rq
, p
, true);
1886 static void put_prev_task_dl(struct rq
*rq
, struct task_struct
*p
)
1890 update_dl_rq_load_avg(rq_clock_pelt(rq
), rq
, 1);
1891 if (on_dl_rq(&p
->dl
) && p
->nr_cpus_allowed
> 1)
1892 enqueue_pushable_dl_task(rq
, p
);
1896 * scheduler tick hitting a task of our scheduling class.
1898 * NOTE: This function can be called remotely by the tick offload that
1899 * goes along full dynticks. Therefore no local assumption can be made
1900 * and everything must be accessed through the @rq and @curr passed in
1903 static void task_tick_dl(struct rq
*rq
, struct task_struct
*p
, int queued
)
1907 update_dl_rq_load_avg(rq_clock_pelt(rq
), rq
, 1);
1909 * Even when we have runtime, update_curr_dl() might have resulted in us
1910 * not being the leftmost task anymore. In that case NEED_RESCHED will
1911 * be set and schedule() will start a new hrtick for the next task.
1913 if (hrtick_enabled(rq
) && queued
&& p
->dl
.runtime
> 0 &&
1914 is_leftmost(p
, &rq
->dl
))
1915 start_hrtick_dl(rq
, p
);
1918 static void task_fork_dl(struct task_struct
*p
)
1921 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1928 /* Only try algorithms three times */
1929 #define DL_MAX_TRIES 3
1931 static int pick_dl_task(struct rq
*rq
, struct task_struct
*p
, int cpu
)
1933 if (!task_running(rq
, p
) &&
1934 cpumask_test_cpu(cpu
, p
->cpus_ptr
))
1940 * Return the earliest pushable rq's task, which is suitable to be executed
1941 * on the CPU, NULL otherwise:
1943 static struct task_struct
*pick_earliest_pushable_dl_task(struct rq
*rq
, int cpu
)
1945 struct rb_node
*next_node
= rq
->dl
.pushable_dl_tasks_root
.rb_leftmost
;
1946 struct task_struct
*p
= NULL
;
1948 if (!has_pushable_dl_tasks(rq
))
1953 p
= rb_entry(next_node
, struct task_struct
, pushable_dl_tasks
);
1955 if (pick_dl_task(rq
, p
, cpu
))
1958 next_node
= rb_next(next_node
);
1965 static DEFINE_PER_CPU(cpumask_var_t
, local_cpu_mask_dl
);
1967 static int find_later_rq(struct task_struct
*task
)
1969 struct sched_domain
*sd
;
1970 struct cpumask
*later_mask
= this_cpu_cpumask_var_ptr(local_cpu_mask_dl
);
1971 int this_cpu
= smp_processor_id();
1972 int cpu
= task_cpu(task
);
1974 /* Make sure the mask is initialized first */
1975 if (unlikely(!later_mask
))
1978 if (task
->nr_cpus_allowed
== 1)
1982 * We have to consider system topology and task affinity
1983 * first, then we can look for a suitable CPU.
1985 if (!cpudl_find(&task_rq(task
)->rd
->cpudl
, task
, later_mask
))
1989 * If we are here, some targets have been found, including
1990 * the most suitable which is, among the runqueues where the
1991 * current tasks have later deadlines than the task's one, the
1992 * rq with the latest possible one.
1994 * Now we check how well this matches with task's
1995 * affinity and system topology.
1997 * The last CPU where the task run is our first
1998 * guess, since it is most likely cache-hot there.
2000 if (cpumask_test_cpu(cpu
, later_mask
))
2003 * Check if this_cpu is to be skipped (i.e., it is
2004 * not in the mask) or not.
2006 if (!cpumask_test_cpu(this_cpu
, later_mask
))
2010 for_each_domain(cpu
, sd
) {
2011 if (sd
->flags
& SD_WAKE_AFFINE
) {
2015 * If possible, preempting this_cpu is
2016 * cheaper than migrating.
2018 if (this_cpu
!= -1 &&
2019 cpumask_test_cpu(this_cpu
, sched_domain_span(sd
))) {
2024 best_cpu
= cpumask_first_and(later_mask
,
2025 sched_domain_span(sd
));
2027 * Last chance: if a CPU being in both later_mask
2028 * and current sd span is valid, that becomes our
2029 * choice. Of course, the latest possible CPU is
2030 * already under consideration through later_mask.
2032 if (best_cpu
< nr_cpu_ids
) {
2041 * At this point, all our guesses failed, we just return
2042 * 'something', and let the caller sort the things out.
2047 cpu
= cpumask_any(later_mask
);
2048 if (cpu
< nr_cpu_ids
)
2054 /* Locks the rq it finds */
2055 static struct rq
*find_lock_later_rq(struct task_struct
*task
, struct rq
*rq
)
2057 struct rq
*later_rq
= NULL
;
2061 for (tries
= 0; tries
< DL_MAX_TRIES
; tries
++) {
2062 cpu
= find_later_rq(task
);
2064 if ((cpu
== -1) || (cpu
== rq
->cpu
))
2067 later_rq
= cpu_rq(cpu
);
2069 if (later_rq
->dl
.dl_nr_running
&&
2070 !dl_time_before(task
->dl
.deadline
,
2071 later_rq
->dl
.earliest_dl
.curr
)) {
2073 * Target rq has tasks of equal or earlier deadline,
2074 * retrying does not release any lock and is unlikely
2075 * to yield a different result.
2081 /* Retry if something changed. */
2082 if (double_lock_balance(rq
, later_rq
)) {
2083 if (unlikely(task_rq(task
) != rq
||
2084 !cpumask_test_cpu(later_rq
->cpu
, task
->cpus_ptr
) ||
2085 task_running(rq
, task
) ||
2087 !task_on_rq_queued(task
))) {
2088 double_unlock_balance(rq
, later_rq
);
2095 * If the rq we found has no -deadline task, or
2096 * its earliest one has a later deadline than our
2097 * task, the rq is a good one.
2099 if (!later_rq
->dl
.dl_nr_running
||
2100 dl_time_before(task
->dl
.deadline
,
2101 later_rq
->dl
.earliest_dl
.curr
))
2104 /* Otherwise we try again. */
2105 double_unlock_balance(rq
, later_rq
);
2112 static struct task_struct
*pick_next_pushable_dl_task(struct rq
*rq
)
2114 struct task_struct
*p
;
2116 if (!has_pushable_dl_tasks(rq
))
2119 p
= rb_entry(rq
->dl
.pushable_dl_tasks_root
.rb_leftmost
,
2120 struct task_struct
, pushable_dl_tasks
);
2122 BUG_ON(rq
->cpu
!= task_cpu(p
));
2123 BUG_ON(task_current(rq
, p
));
2124 BUG_ON(p
->nr_cpus_allowed
<= 1);
2126 BUG_ON(!task_on_rq_queued(p
));
2127 BUG_ON(!dl_task(p
));
2133 * See if the non running -deadline tasks on this rq
2134 * can be sent to some other CPU where they can preempt
2135 * and start executing.
2137 static int push_dl_task(struct rq
*rq
)
2139 struct task_struct
*next_task
;
2140 struct rq
*later_rq
;
2143 if (!rq
->dl
.overloaded
)
2146 next_task
= pick_next_pushable_dl_task(rq
);
2151 if (WARN_ON(next_task
== rq
->curr
))
2155 * If next_task preempts rq->curr, and rq->curr
2156 * can move away, it makes sense to just reschedule
2157 * without going further in pushing next_task.
2159 if (dl_task(rq
->curr
) &&
2160 dl_time_before(next_task
->dl
.deadline
, rq
->curr
->dl
.deadline
) &&
2161 rq
->curr
->nr_cpus_allowed
> 1) {
2166 /* We might release rq lock */
2167 get_task_struct(next_task
);
2169 /* Will lock the rq it'll find */
2170 later_rq
= find_lock_later_rq(next_task
, rq
);
2172 struct task_struct
*task
;
2175 * We must check all this again, since
2176 * find_lock_later_rq releases rq->lock and it is
2177 * then possible that next_task has migrated.
2179 task
= pick_next_pushable_dl_task(rq
);
2180 if (task
== next_task
) {
2182 * The task is still there. We don't try
2183 * again, some other CPU will pull it when ready.
2192 put_task_struct(next_task
);
2197 deactivate_task(rq
, next_task
, 0);
2198 set_task_cpu(next_task
, later_rq
->cpu
);
2201 * Update the later_rq clock here, because the clock is used
2202 * by the cpufreq_update_util() inside __add_running_bw().
2204 update_rq_clock(later_rq
);
2205 activate_task(later_rq
, next_task
, ENQUEUE_NOCLOCK
);
2208 resched_curr(later_rq
);
2210 double_unlock_balance(rq
, later_rq
);
2213 put_task_struct(next_task
);
2218 static void push_dl_tasks(struct rq
*rq
)
2220 /* push_dl_task() will return true if it moved a -deadline task */
2221 while (push_dl_task(rq
))
2225 static void pull_dl_task(struct rq
*this_rq
)
2227 int this_cpu
= this_rq
->cpu
, cpu
;
2228 struct task_struct
*p
;
2229 bool resched
= false;
2231 u64 dmin
= LONG_MAX
;
2233 if (likely(!dl_overloaded(this_rq
)))
2237 * Match the barrier from dl_set_overloaded; this guarantees that if we
2238 * see overloaded we must also see the dlo_mask bit.
2242 for_each_cpu(cpu
, this_rq
->rd
->dlo_mask
) {
2243 if (this_cpu
== cpu
)
2246 src_rq
= cpu_rq(cpu
);
2249 * It looks racy, abd it is! However, as in sched_rt.c,
2250 * we are fine with this.
2252 if (this_rq
->dl
.dl_nr_running
&&
2253 dl_time_before(this_rq
->dl
.earliest_dl
.curr
,
2254 src_rq
->dl
.earliest_dl
.next
))
2257 /* Might drop this_rq->lock */
2258 double_lock_balance(this_rq
, src_rq
);
2261 * If there are no more pullable tasks on the
2262 * rq, we're done with it.
2264 if (src_rq
->dl
.dl_nr_running
<= 1)
2267 p
= pick_earliest_pushable_dl_task(src_rq
, this_cpu
);
2270 * We found a task to be pulled if:
2271 * - it preempts our current (if there's one),
2272 * - it will preempt the last one we pulled (if any).
2274 if (p
&& dl_time_before(p
->dl
.deadline
, dmin
) &&
2275 (!this_rq
->dl
.dl_nr_running
||
2276 dl_time_before(p
->dl
.deadline
,
2277 this_rq
->dl
.earliest_dl
.curr
))) {
2278 WARN_ON(p
== src_rq
->curr
);
2279 WARN_ON(!task_on_rq_queued(p
));
2282 * Then we pull iff p has actually an earlier
2283 * deadline than the current task of its runqueue.
2285 if (dl_time_before(p
->dl
.deadline
,
2286 src_rq
->curr
->dl
.deadline
))
2291 deactivate_task(src_rq
, p
, 0);
2292 set_task_cpu(p
, this_cpu
);
2293 activate_task(this_rq
, p
, 0);
2294 dmin
= p
->dl
.deadline
;
2296 /* Is there any other task even earlier? */
2299 double_unlock_balance(this_rq
, src_rq
);
2303 resched_curr(this_rq
);
2307 * Since the task is not running and a reschedule is not going to happen
2308 * anytime soon on its runqueue, we try pushing it away now.
2310 static void task_woken_dl(struct rq
*rq
, struct task_struct
*p
)
2312 if (!task_running(rq
, p
) &&
2313 !test_tsk_need_resched(rq
->curr
) &&
2314 p
->nr_cpus_allowed
> 1 &&
2315 dl_task(rq
->curr
) &&
2316 (rq
->curr
->nr_cpus_allowed
< 2 ||
2317 !dl_entity_preempt(&p
->dl
, &rq
->curr
->dl
))) {
2322 static void set_cpus_allowed_dl(struct task_struct
*p
,
2323 const struct cpumask
*new_mask
)
2325 struct root_domain
*src_rd
;
2328 BUG_ON(!dl_task(p
));
2333 * Migrating a SCHED_DEADLINE task between exclusive
2334 * cpusets (different root_domains) entails a bandwidth
2335 * update. We already made space for us in the destination
2336 * domain (see cpuset_can_attach()).
2338 if (!cpumask_intersects(src_rd
->span
, new_mask
)) {
2339 struct dl_bw
*src_dl_b
;
2341 src_dl_b
= dl_bw_of(cpu_of(rq
));
2343 * We now free resources of the root_domain we are migrating
2344 * off. In the worst case, sched_setattr() may temporary fail
2345 * until we complete the update.
2347 raw_spin_lock(&src_dl_b
->lock
);
2348 __dl_sub(src_dl_b
, p
->dl
.dl_bw
, dl_bw_cpus(task_cpu(p
)));
2349 raw_spin_unlock(&src_dl_b
->lock
);
2352 set_cpus_allowed_common(p
, new_mask
);
2355 /* Assumes rq->lock is held */
2356 static void rq_online_dl(struct rq
*rq
)
2358 if (rq
->dl
.overloaded
)
2359 dl_set_overload(rq
);
2361 cpudl_set_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
2362 if (rq
->dl
.dl_nr_running
> 0)
2363 cpudl_set(&rq
->rd
->cpudl
, rq
->cpu
, rq
->dl
.earliest_dl
.curr
);
2366 /* Assumes rq->lock is held */
2367 static void rq_offline_dl(struct rq
*rq
)
2369 if (rq
->dl
.overloaded
)
2370 dl_clear_overload(rq
);
2372 cpudl_clear(&rq
->rd
->cpudl
, rq
->cpu
);
2373 cpudl_clear_freecpu(&rq
->rd
->cpudl
, rq
->cpu
);
2376 void __init
init_sched_dl_class(void)
2380 for_each_possible_cpu(i
)
2381 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl
, i
),
2382 GFP_KERNEL
, cpu_to_node(i
));
2385 void dl_add_task_root_domain(struct task_struct
*p
)
2391 rq
= task_rq_lock(p
, &rf
);
2395 dl_b
= &rq
->rd
->dl_bw
;
2396 raw_spin_lock(&dl_b
->lock
);
2398 __dl_add(dl_b
, p
->dl
.dl_bw
, cpumask_weight(rq
->rd
->span
));
2400 raw_spin_unlock(&dl_b
->lock
);
2403 task_rq_unlock(rq
, p
, &rf
);
2406 void dl_clear_root_domain(struct root_domain
*rd
)
2408 unsigned long flags
;
2410 raw_spin_lock_irqsave(&rd
->dl_bw
.lock
, flags
);
2411 rd
->dl_bw
.total_bw
= 0;
2412 raw_spin_unlock_irqrestore(&rd
->dl_bw
.lock
, flags
);
2415 #endif /* CONFIG_SMP */
2417 static void switched_from_dl(struct rq
*rq
, struct task_struct
*p
)
2420 * task_non_contending() can start the "inactive timer" (if the 0-lag
2421 * time is in the future). If the task switches back to dl before
2422 * the "inactive timer" fires, it can continue to consume its current
2423 * runtime using its current deadline. If it stays outside of
2424 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2425 * will reset the task parameters.
2427 if (task_on_rq_queued(p
) && p
->dl
.dl_runtime
)
2428 task_non_contending(p
);
2430 if (!task_on_rq_queued(p
)) {
2432 * Inactive timer is armed. However, p is leaving DEADLINE and
2433 * might migrate away from this rq while continuing to run on
2434 * some other class. We need to remove its contribution from
2435 * this rq running_bw now, or sub_rq_bw (below) will complain.
2437 if (p
->dl
.dl_non_contending
)
2438 sub_running_bw(&p
->dl
, &rq
->dl
);
2439 sub_rq_bw(&p
->dl
, &rq
->dl
);
2443 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2444 * at the 0-lag time, because the task could have been migrated
2445 * while SCHED_OTHER in the meanwhile.
2447 if (p
->dl
.dl_non_contending
)
2448 p
->dl
.dl_non_contending
= 0;
2451 * Since this might be the only -deadline task on the rq,
2452 * this is the right place to try to pull some other one
2453 * from an overloaded CPU, if any.
2455 if (!task_on_rq_queued(p
) || rq
->dl
.dl_nr_running
)
2458 deadline_queue_pull_task(rq
);
2462 * When switching to -deadline, we may overload the rq, then
2463 * we try to push someone off, if possible.
2465 static void switched_to_dl(struct rq
*rq
, struct task_struct
*p
)
2467 if (hrtimer_try_to_cancel(&p
->dl
.inactive_timer
) == 1)
2470 /* If p is not queued we will update its parameters at next wakeup. */
2471 if (!task_on_rq_queued(p
)) {
2472 add_rq_bw(&p
->dl
, &rq
->dl
);
2477 if (rq
->curr
!= p
) {
2479 if (p
->nr_cpus_allowed
> 1 && rq
->dl
.overloaded
)
2480 deadline_queue_push_tasks(rq
);
2482 if (dl_task(rq
->curr
))
2483 check_preempt_curr_dl(rq
, p
, 0);
2490 * If the scheduling parameters of a -deadline task changed,
2491 * a push or pull operation might be needed.
2493 static void prio_changed_dl(struct rq
*rq
, struct task_struct
*p
,
2496 if (task_on_rq_queued(p
) || rq
->curr
== p
) {
2499 * This might be too much, but unfortunately
2500 * we don't have the old deadline value, and
2501 * we can't argue if the task is increasing
2502 * or lowering its prio, so...
2504 if (!rq
->dl
.overloaded
)
2505 deadline_queue_pull_task(rq
);
2508 * If we now have a earlier deadline task than p,
2509 * then reschedule, provided p is still on this
2512 if (dl_time_before(rq
->dl
.earliest_dl
.curr
, p
->dl
.deadline
))
2516 * Again, we don't know if p has a earlier
2517 * or later deadline, so let's blindly set a
2518 * (maybe not needed) rescheduling point.
2521 #endif /* CONFIG_SMP */
2525 const struct sched_class dl_sched_class
2526 __section("__dl_sched_class") = {
2527 .enqueue_task
= enqueue_task_dl
,
2528 .dequeue_task
= dequeue_task_dl
,
2529 .yield_task
= yield_task_dl
,
2531 .check_preempt_curr
= check_preempt_curr_dl
,
2533 .pick_next_task
= pick_next_task_dl
,
2534 .put_prev_task
= put_prev_task_dl
,
2535 .set_next_task
= set_next_task_dl
,
2538 .balance
= balance_dl
,
2539 .select_task_rq
= select_task_rq_dl
,
2540 .migrate_task_rq
= migrate_task_rq_dl
,
2541 .set_cpus_allowed
= set_cpus_allowed_dl
,
2542 .rq_online
= rq_online_dl
,
2543 .rq_offline
= rq_offline_dl
,
2544 .task_woken
= task_woken_dl
,
2547 .task_tick
= task_tick_dl
,
2548 .task_fork
= task_fork_dl
,
2550 .prio_changed
= prio_changed_dl
,
2551 .switched_from
= switched_from_dl
,
2552 .switched_to
= switched_to_dl
,
2554 .update_curr
= update_curr_dl
,
2557 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2558 static u64 dl_generation
;
2560 int sched_dl_global_validate(void)
2562 u64 runtime
= global_rt_runtime();
2563 u64 period
= global_rt_period();
2564 u64 new_bw
= to_ratio(period
, runtime
);
2565 u64 gen
= ++dl_generation
;
2567 int cpu
, cpus
, ret
= 0;
2568 unsigned long flags
;
2571 * Here we want to check the bandwidth not being set to some
2572 * value smaller than the currently allocated bandwidth in
2573 * any of the root_domains.
2575 for_each_possible_cpu(cpu
) {
2576 rcu_read_lock_sched();
2578 if (dl_bw_visited(cpu
, gen
))
2581 dl_b
= dl_bw_of(cpu
);
2582 cpus
= dl_bw_cpus(cpu
);
2584 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2585 if (new_bw
* cpus
< dl_b
->total_bw
)
2587 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2590 rcu_read_unlock_sched();
2599 static void init_dl_rq_bw_ratio(struct dl_rq
*dl_rq
)
2601 if (global_rt_runtime() == RUNTIME_INF
) {
2602 dl_rq
->bw_ratio
= 1 << RATIO_SHIFT
;
2603 dl_rq
->extra_bw
= 1 << BW_SHIFT
;
2605 dl_rq
->bw_ratio
= to_ratio(global_rt_runtime(),
2606 global_rt_period()) >> (BW_SHIFT
- RATIO_SHIFT
);
2607 dl_rq
->extra_bw
= to_ratio(global_rt_period(),
2608 global_rt_runtime());
2612 void sched_dl_do_global(void)
2615 u64 gen
= ++dl_generation
;
2618 unsigned long flags
;
2620 def_dl_bandwidth
.dl_period
= global_rt_period();
2621 def_dl_bandwidth
.dl_runtime
= global_rt_runtime();
2623 if (global_rt_runtime() != RUNTIME_INF
)
2624 new_bw
= to_ratio(global_rt_period(), global_rt_runtime());
2626 for_each_possible_cpu(cpu
) {
2627 rcu_read_lock_sched();
2629 if (dl_bw_visited(cpu
, gen
)) {
2630 rcu_read_unlock_sched();
2634 dl_b
= dl_bw_of(cpu
);
2636 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2638 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2640 rcu_read_unlock_sched();
2641 init_dl_rq_bw_ratio(&cpu_rq(cpu
)->dl
);
2646 * We must be sure that accepting a new task (or allowing changing the
2647 * parameters of an existing one) is consistent with the bandwidth
2648 * constraints. If yes, this function also accordingly updates the currently
2649 * allocated bandwidth to reflect the new situation.
2651 * This function is called while holding p's rq->lock.
2653 int sched_dl_overflow(struct task_struct
*p
, int policy
,
2654 const struct sched_attr
*attr
)
2656 u64 period
= attr
->sched_period
?: attr
->sched_deadline
;
2657 u64 runtime
= attr
->sched_runtime
;
2658 u64 new_bw
= dl_policy(policy
) ? to_ratio(period
, runtime
) : 0;
2659 int cpus
, err
= -1, cpu
= task_cpu(p
);
2660 struct dl_bw
*dl_b
= dl_bw_of(cpu
);
2663 if (attr
->sched_flags
& SCHED_FLAG_SUGOV
)
2666 /* !deadline task may carry old deadline bandwidth */
2667 if (new_bw
== p
->dl
.dl_bw
&& task_has_dl_policy(p
))
2671 * Either if a task, enters, leave, or stays -deadline but changes
2672 * its parameters, we may need to update accordingly the total
2673 * allocated bandwidth of the container.
2675 raw_spin_lock(&dl_b
->lock
);
2676 cpus
= dl_bw_cpus(cpu
);
2677 cap
= dl_bw_capacity(cpu
);
2679 if (dl_policy(policy
) && !task_has_dl_policy(p
) &&
2680 !__dl_overflow(dl_b
, cap
, 0, new_bw
)) {
2681 if (hrtimer_active(&p
->dl
.inactive_timer
))
2682 __dl_sub(dl_b
, p
->dl
.dl_bw
, cpus
);
2683 __dl_add(dl_b
, new_bw
, cpus
);
2685 } else if (dl_policy(policy
) && task_has_dl_policy(p
) &&
2686 !__dl_overflow(dl_b
, cap
, p
->dl
.dl_bw
, new_bw
)) {
2688 * XXX this is slightly incorrect: when the task
2689 * utilization decreases, we should delay the total
2690 * utilization change until the task's 0-lag point.
2691 * But this would require to set the task's "inactive
2692 * timer" when the task is not inactive.
2694 __dl_sub(dl_b
, p
->dl
.dl_bw
, cpus
);
2695 __dl_add(dl_b
, new_bw
, cpus
);
2696 dl_change_utilization(p
, new_bw
);
2698 } else if (!dl_policy(policy
) && task_has_dl_policy(p
)) {
2700 * Do not decrease the total deadline utilization here,
2701 * switched_from_dl() will take care to do it at the correct
2706 raw_spin_unlock(&dl_b
->lock
);
2712 * This function initializes the sched_dl_entity of a newly becoming
2713 * SCHED_DEADLINE task.
2715 * Only the static values are considered here, the actual runtime and the
2716 * absolute deadline will be properly calculated when the task is enqueued
2717 * for the first time with its new policy.
2719 void __setparam_dl(struct task_struct
*p
, const struct sched_attr
*attr
)
2721 struct sched_dl_entity
*dl_se
= &p
->dl
;
2723 dl_se
->dl_runtime
= attr
->sched_runtime
;
2724 dl_se
->dl_deadline
= attr
->sched_deadline
;
2725 dl_se
->dl_period
= attr
->sched_period
?: dl_se
->dl_deadline
;
2726 dl_se
->flags
= attr
->sched_flags
;
2727 dl_se
->dl_bw
= to_ratio(dl_se
->dl_period
, dl_se
->dl_runtime
);
2728 dl_se
->dl_density
= to_ratio(dl_se
->dl_deadline
, dl_se
->dl_runtime
);
2731 void __getparam_dl(struct task_struct
*p
, struct sched_attr
*attr
)
2733 struct sched_dl_entity
*dl_se
= &p
->dl
;
2735 attr
->sched_priority
= p
->rt_priority
;
2736 attr
->sched_runtime
= dl_se
->dl_runtime
;
2737 attr
->sched_deadline
= dl_se
->dl_deadline
;
2738 attr
->sched_period
= dl_se
->dl_period
;
2739 attr
->sched_flags
= dl_se
->flags
;
2743 * Default limits for DL period; on the top end we guard against small util
2744 * tasks still getting rediculous long effective runtimes, on the bottom end we
2745 * guard against timer DoS.
2747 unsigned int sysctl_sched_dl_period_max
= 1 << 22; /* ~4 seconds */
2748 unsigned int sysctl_sched_dl_period_min
= 100; /* 100 us */
2751 * This function validates the new parameters of a -deadline task.
2752 * We ask for the deadline not being zero, and greater or equal
2753 * than the runtime, as well as the period of being zero or
2754 * greater than deadline. Furthermore, we have to be sure that
2755 * user parameters are above the internal resolution of 1us (we
2756 * check sched_runtime only since it is always the smaller one) and
2757 * below 2^63 ns (we have to check both sched_deadline and
2758 * sched_period, as the latter can be zero).
2760 bool __checkparam_dl(const struct sched_attr
*attr
)
2762 u64 period
, max
, min
;
2764 /* special dl tasks don't actually use any parameter */
2765 if (attr
->sched_flags
& SCHED_FLAG_SUGOV
)
2769 if (attr
->sched_deadline
== 0)
2773 * Since we truncate DL_SCALE bits, make sure we're at least
2776 if (attr
->sched_runtime
< (1ULL << DL_SCALE
))
2780 * Since we use the MSB for wrap-around and sign issues, make
2781 * sure it's not set (mind that period can be equal to zero).
2783 if (attr
->sched_deadline
& (1ULL << 63) ||
2784 attr
->sched_period
& (1ULL << 63))
2787 period
= attr
->sched_period
;
2789 period
= attr
->sched_deadline
;
2791 /* runtime <= deadline <= period (if period != 0) */
2792 if (period
< attr
->sched_deadline
||
2793 attr
->sched_deadline
< attr
->sched_runtime
)
2796 max
= (u64
)READ_ONCE(sysctl_sched_dl_period_max
) * NSEC_PER_USEC
;
2797 min
= (u64
)READ_ONCE(sysctl_sched_dl_period_min
) * NSEC_PER_USEC
;
2799 if (period
< min
|| period
> max
)
2806 * This function clears the sched_dl_entity static params.
2808 void __dl_clear_params(struct task_struct
*p
)
2810 struct sched_dl_entity
*dl_se
= &p
->dl
;
2812 dl_se
->dl_runtime
= 0;
2813 dl_se
->dl_deadline
= 0;
2814 dl_se
->dl_period
= 0;
2817 dl_se
->dl_density
= 0;
2819 dl_se
->dl_boosted
= 0;
2820 dl_se
->dl_throttled
= 0;
2821 dl_se
->dl_yielded
= 0;
2822 dl_se
->dl_non_contending
= 0;
2823 dl_se
->dl_overrun
= 0;
2826 bool dl_param_changed(struct task_struct
*p
, const struct sched_attr
*attr
)
2828 struct sched_dl_entity
*dl_se
= &p
->dl
;
2830 if (dl_se
->dl_runtime
!= attr
->sched_runtime
||
2831 dl_se
->dl_deadline
!= attr
->sched_deadline
||
2832 dl_se
->dl_period
!= attr
->sched_period
||
2833 dl_se
->flags
!= attr
->sched_flags
)
2840 int dl_task_can_attach(struct task_struct
*p
, const struct cpumask
*cs_cpus_allowed
)
2842 unsigned long flags
, cap
;
2843 unsigned int dest_cpu
;
2848 dest_cpu
= cpumask_any_and(cpu_active_mask
, cs_cpus_allowed
);
2850 rcu_read_lock_sched();
2851 dl_b
= dl_bw_of(dest_cpu
);
2852 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2853 cap
= dl_bw_capacity(dest_cpu
);
2854 overflow
= __dl_overflow(dl_b
, cap
, 0, p
->dl
.dl_bw
);
2859 * We reserve space for this task in the destination
2860 * root_domain, as we can't fail after this point.
2861 * We will free resources in the source root_domain
2862 * later on (see set_cpus_allowed_dl()).
2864 int cpus
= dl_bw_cpus(dest_cpu
);
2866 __dl_add(dl_b
, p
->dl
.dl_bw
, cpus
);
2869 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2870 rcu_read_unlock_sched();
2875 int dl_cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
2876 const struct cpumask
*trial
)
2878 int ret
= 1, trial_cpus
;
2879 struct dl_bw
*cur_dl_b
;
2880 unsigned long flags
;
2882 rcu_read_lock_sched();
2883 cur_dl_b
= dl_bw_of(cpumask_any(cur
));
2884 trial_cpus
= cpumask_weight(trial
);
2886 raw_spin_lock_irqsave(&cur_dl_b
->lock
, flags
);
2887 if (cur_dl_b
->bw
!= -1 &&
2888 cur_dl_b
->bw
* trial_cpus
< cur_dl_b
->total_bw
)
2890 raw_spin_unlock_irqrestore(&cur_dl_b
->lock
, flags
);
2891 rcu_read_unlock_sched();
2896 bool dl_cpu_busy(unsigned int cpu
)
2898 unsigned long flags
, cap
;
2902 rcu_read_lock_sched();
2903 dl_b
= dl_bw_of(cpu
);
2904 raw_spin_lock_irqsave(&dl_b
->lock
, flags
);
2905 cap
= dl_bw_capacity(cpu
);
2906 overflow
= __dl_overflow(dl_b
, cap
, 0, 0);
2907 raw_spin_unlock_irqrestore(&dl_b
->lock
, flags
);
2908 rcu_read_unlock_sched();
2914 #ifdef CONFIG_SCHED_DEBUG
2915 void print_dl_stats(struct seq_file
*m
, int cpu
)
2917 print_dl_rq(m
, cpu
, &cpu_rq(cpu
)->dl
);
2919 #endif /* CONFIG_SCHED_DEBUG */