]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/sched/deadline.c
sched/deadline: Fix migration of SCHED_DEADLINE tasks
[mirror_ubuntu-artful-kernel.git] / kernel / sched / deadline.c
1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17 #include "sched.h"
18
19 #include <linux/slab.h>
20
21 struct dl_bandwidth def_dl_bandwidth;
22
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 return container_of(dl_se, struct task_struct, dl);
26 }
27
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 return container_of(dl_rq, struct rq, dl);
31 }
32
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39 }
40
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47 {
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51 }
52
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54 {
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58 }
59
60 void init_dl_bw(struct dl_bw *dl_b)
61 {
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70 }
71
72 void init_dl_rq(struct dl_rq *dl_rq)
73 {
74 dl_rq->rb_root = RB_ROOT;
75
76 #ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 init_dl_bw(&dl_rq->dl_bw);
85 #endif
86 }
87
88 #ifdef CONFIG_SMP
89
90 static inline int dl_overloaded(struct rq *rq)
91 {
92 return atomic_read(&rq->rd->dlo_count);
93 }
94
95 static inline void dl_set_overload(struct rq *rq)
96 {
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109 }
110
111 static inline void dl_clear_overload(struct rq *rq)
112 {
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118 }
119
120 static void update_dl_migration(struct dl_rq *dl_rq)
121 {
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131 }
132
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134 {
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141 }
142
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151 }
152
153 /*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158 {
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184 }
185
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187 {
188 struct dl_rq *dl_rq = &rq->dl;
189
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
192
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
195
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
198 }
199
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
202 }
203
204 static inline int has_pushable_dl_tasks(struct rq *rq)
205 {
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207 }
208
209 static int push_dl_task(struct rq *rq);
210
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212 {
213 return dl_task(prev);
214 }
215
216 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
217 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
218
219 static void push_dl_tasks(struct rq *);
220 static void pull_dl_task(struct rq *);
221
222 static inline void queue_push_tasks(struct rq *rq)
223 {
224 if (!has_pushable_dl_tasks(rq))
225 return;
226
227 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
228 }
229
230 static inline void queue_pull_task(struct rq *rq)
231 {
232 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
233 }
234
235 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
236
237 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
238 {
239 struct rq *later_rq = NULL;
240 bool fallback = false;
241
242 later_rq = find_lock_later_rq(p, rq);
243
244 if (!later_rq) {
245 int cpu;
246
247 /*
248 * If we cannot preempt any rq, fall back to pick any
249 * online cpu.
250 */
251 fallback = true;
252 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
253 if (cpu >= nr_cpu_ids) {
254 /*
255 * Fail to find any suitable cpu.
256 * The task will never come back!
257 */
258 BUG_ON(dl_bandwidth_enabled());
259
260 /*
261 * If admission control is disabled we
262 * try a little harder to let the task
263 * run.
264 */
265 cpu = cpumask_any(cpu_active_mask);
266 }
267 later_rq = cpu_rq(cpu);
268 double_lock_balance(rq, later_rq);
269 }
270
271 /*
272 * By now the task is replenished and enqueued; migrate it.
273 */
274 deactivate_task(rq, p, 0);
275 set_task_cpu(p, later_rq->cpu);
276 activate_task(later_rq, p, 0);
277
278 if (!fallback)
279 resched_curr(later_rq);
280
281 double_unlock_balance(later_rq, rq);
282
283 return later_rq;
284 }
285
286 #else
287
288 static inline
289 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
290 {
291 }
292
293 static inline
294 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
295 {
296 }
297
298 static inline
299 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
300 {
301 }
302
303 static inline
304 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
305 {
306 }
307
308 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
309 {
310 return false;
311 }
312
313 static inline void pull_dl_task(struct rq *rq)
314 {
315 }
316
317 static inline void queue_push_tasks(struct rq *rq)
318 {
319 }
320
321 static inline void queue_pull_task(struct rq *rq)
322 {
323 }
324 #endif /* CONFIG_SMP */
325
326 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
327 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
328 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
329 int flags);
330
331 /*
332 * We are being explicitly informed that a new instance is starting,
333 * and this means that:
334 * - the absolute deadline of the entity has to be placed at
335 * current time + relative deadline;
336 * - the runtime of the entity has to be set to the maximum value.
337 *
338 * The capability of specifying such event is useful whenever a -deadline
339 * entity wants to (try to!) synchronize its behaviour with the scheduler's
340 * one, and to (try to!) reconcile itself with its own scheduling
341 * parameters.
342 */
343 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
344 struct sched_dl_entity *pi_se)
345 {
346 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
347 struct rq *rq = rq_of_dl_rq(dl_rq);
348
349 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
350
351 /*
352 * We use the regular wall clock time to set deadlines in the
353 * future; in fact, we must consider execution overheads (time
354 * spent on hardirq context, etc.).
355 */
356 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
357 dl_se->runtime = pi_se->dl_runtime;
358 dl_se->dl_new = 0;
359 }
360
361 /*
362 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
363 * possibility of a entity lasting more than what it declared, and thus
364 * exhausting its runtime.
365 *
366 * Here we are interested in making runtime overrun possible, but we do
367 * not want a entity which is misbehaving to affect the scheduling of all
368 * other entities.
369 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
370 * is used, in order to confine each entity within its own bandwidth.
371 *
372 * This function deals exactly with that, and ensures that when the runtime
373 * of a entity is replenished, its deadline is also postponed. That ensures
374 * the overrunning entity can't interfere with other entity in the system and
375 * can't make them miss their deadlines. Reasons why this kind of overruns
376 * could happen are, typically, a entity voluntarily trying to overcome its
377 * runtime, or it just underestimated it during sched_setattr().
378 */
379 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
380 struct sched_dl_entity *pi_se)
381 {
382 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
383 struct rq *rq = rq_of_dl_rq(dl_rq);
384
385 BUG_ON(pi_se->dl_runtime <= 0);
386
387 /*
388 * This could be the case for a !-dl task that is boosted.
389 * Just go with full inherited parameters.
390 */
391 if (dl_se->dl_deadline == 0) {
392 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
393 dl_se->runtime = pi_se->dl_runtime;
394 }
395
396 /*
397 * We keep moving the deadline away until we get some
398 * available runtime for the entity. This ensures correct
399 * handling of situations where the runtime overrun is
400 * arbitrary large.
401 */
402 while (dl_se->runtime <= 0) {
403 dl_se->deadline += pi_se->dl_period;
404 dl_se->runtime += pi_se->dl_runtime;
405 }
406
407 /*
408 * At this point, the deadline really should be "in
409 * the future" with respect to rq->clock. If it's
410 * not, we are, for some reason, lagging too much!
411 * Anyway, after having warn userspace abut that,
412 * we still try to keep the things running by
413 * resetting the deadline and the budget of the
414 * entity.
415 */
416 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
417 printk_deferred_once("sched: DL replenish lagged to much\n");
418 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
419 dl_se->runtime = pi_se->dl_runtime;
420 }
421
422 if (dl_se->dl_yielded)
423 dl_se->dl_yielded = 0;
424 if (dl_se->dl_throttled)
425 dl_se->dl_throttled = 0;
426 }
427
428 /*
429 * Here we check if --at time t-- an entity (which is probably being
430 * [re]activated or, in general, enqueued) can use its remaining runtime
431 * and its current deadline _without_ exceeding the bandwidth it is
432 * assigned (function returns true if it can't). We are in fact applying
433 * one of the CBS rules: when a task wakes up, if the residual runtime
434 * over residual deadline fits within the allocated bandwidth, then we
435 * can keep the current (absolute) deadline and residual budget without
436 * disrupting the schedulability of the system. Otherwise, we should
437 * refill the runtime and set the deadline a period in the future,
438 * because keeping the current (absolute) deadline of the task would
439 * result in breaking guarantees promised to other tasks (refer to
440 * Documentation/scheduler/sched-deadline.txt for more informations).
441 *
442 * This function returns true if:
443 *
444 * runtime / (deadline - t) > dl_runtime / dl_period ,
445 *
446 * IOW we can't recycle current parameters.
447 *
448 * Notice that the bandwidth check is done against the period. For
449 * task with deadline equal to period this is the same of using
450 * dl_deadline instead of dl_period in the equation above.
451 */
452 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
453 struct sched_dl_entity *pi_se, u64 t)
454 {
455 u64 left, right;
456
457 /*
458 * left and right are the two sides of the equation above,
459 * after a bit of shuffling to use multiplications instead
460 * of divisions.
461 *
462 * Note that none of the time values involved in the two
463 * multiplications are absolute: dl_deadline and dl_runtime
464 * are the relative deadline and the maximum runtime of each
465 * instance, runtime is the runtime left for the last instance
466 * and (deadline - t), since t is rq->clock, is the time left
467 * to the (absolute) deadline. Even if overflowing the u64 type
468 * is very unlikely to occur in both cases, here we scale down
469 * as we want to avoid that risk at all. Scaling down by 10
470 * means that we reduce granularity to 1us. We are fine with it,
471 * since this is only a true/false check and, anyway, thinking
472 * of anything below microseconds resolution is actually fiction
473 * (but still we want to give the user that illusion >;).
474 */
475 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
476 right = ((dl_se->deadline - t) >> DL_SCALE) *
477 (pi_se->dl_runtime >> DL_SCALE);
478
479 return dl_time_before(right, left);
480 }
481
482 /*
483 * When a -deadline entity is queued back on the runqueue, its runtime and
484 * deadline might need updating.
485 *
486 * The policy here is that we update the deadline of the entity only if:
487 * - the current deadline is in the past,
488 * - using the remaining runtime with the current deadline would make
489 * the entity exceed its bandwidth.
490 */
491 static void update_dl_entity(struct sched_dl_entity *dl_se,
492 struct sched_dl_entity *pi_se)
493 {
494 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
495 struct rq *rq = rq_of_dl_rq(dl_rq);
496
497 /*
498 * The arrival of a new instance needs special treatment, i.e.,
499 * the actual scheduling parameters have to be "renewed".
500 */
501 if (dl_se->dl_new) {
502 setup_new_dl_entity(dl_se, pi_se);
503 return;
504 }
505
506 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
507 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
508 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
509 dl_se->runtime = pi_se->dl_runtime;
510 }
511 }
512
513 /*
514 * If the entity depleted all its runtime, and if we want it to sleep
515 * while waiting for some new execution time to become available, we
516 * set the bandwidth enforcement timer to the replenishment instant
517 * and try to activate it.
518 *
519 * Notice that it is important for the caller to know if the timer
520 * actually started or not (i.e., the replenishment instant is in
521 * the future or in the past).
522 */
523 static int start_dl_timer(struct task_struct *p)
524 {
525 struct sched_dl_entity *dl_se = &p->dl;
526 struct hrtimer *timer = &dl_se->dl_timer;
527 struct rq *rq = task_rq(p);
528 ktime_t now, act;
529 s64 delta;
530
531 lockdep_assert_held(&rq->lock);
532
533 /*
534 * We want the timer to fire at the deadline, but considering
535 * that it is actually coming from rq->clock and not from
536 * hrtimer's time base reading.
537 */
538 act = ns_to_ktime(dl_se->deadline);
539 now = hrtimer_cb_get_time(timer);
540 delta = ktime_to_ns(now) - rq_clock(rq);
541 act = ktime_add_ns(act, delta);
542
543 /*
544 * If the expiry time already passed, e.g., because the value
545 * chosen as the deadline is too small, don't even try to
546 * start the timer in the past!
547 */
548 if (ktime_us_delta(act, now) < 0)
549 return 0;
550
551 /*
552 * !enqueued will guarantee another callback; even if one is already in
553 * progress. This ensures a balanced {get,put}_task_struct().
554 *
555 * The race against __run_timer() clearing the enqueued state is
556 * harmless because we're holding task_rq()->lock, therefore the timer
557 * expiring after we've done the check will wait on its task_rq_lock()
558 * and observe our state.
559 */
560 if (!hrtimer_is_queued(timer)) {
561 get_task_struct(p);
562 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
563 }
564
565 return 1;
566 }
567
568 /*
569 * This is the bandwidth enforcement timer callback. If here, we know
570 * a task is not on its dl_rq, since the fact that the timer was running
571 * means the task is throttled and needs a runtime replenishment.
572 *
573 * However, what we actually do depends on the fact the task is active,
574 * (it is on its rq) or has been removed from there by a call to
575 * dequeue_task_dl(). In the former case we must issue the runtime
576 * replenishment and add the task back to the dl_rq; in the latter, we just
577 * do nothing but clearing dl_throttled, so that runtime and deadline
578 * updating (and the queueing back to dl_rq) will be done by the
579 * next call to enqueue_task_dl().
580 */
581 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
582 {
583 struct sched_dl_entity *dl_se = container_of(timer,
584 struct sched_dl_entity,
585 dl_timer);
586 struct task_struct *p = dl_task_of(dl_se);
587 unsigned long flags;
588 struct rq *rq;
589
590 rq = task_rq_lock(p, &flags);
591
592 /*
593 * The task might have changed its scheduling policy to something
594 * different than SCHED_DEADLINE (through switched_fromd_dl()).
595 */
596 if (!dl_task(p)) {
597 __dl_clear_params(p);
598 goto unlock;
599 }
600
601 /*
602 * This is possible if switched_from_dl() raced against a running
603 * callback that took the above !dl_task() path and we've since then
604 * switched back into SCHED_DEADLINE.
605 *
606 * There's nothing to do except drop our task reference.
607 */
608 if (dl_se->dl_new)
609 goto unlock;
610
611 /*
612 * The task might have been boosted by someone else and might be in the
613 * boosting/deboosting path, its not throttled.
614 */
615 if (dl_se->dl_boosted)
616 goto unlock;
617
618 /*
619 * Spurious timer due to start_dl_timer() race; or we already received
620 * a replenishment from rt_mutex_setprio().
621 */
622 if (!dl_se->dl_throttled)
623 goto unlock;
624
625 sched_clock_tick();
626 update_rq_clock(rq);
627
628 /*
629 * If the throttle happened during sched-out; like:
630 *
631 * schedule()
632 * deactivate_task()
633 * dequeue_task_dl()
634 * update_curr_dl()
635 * start_dl_timer()
636 * __dequeue_task_dl()
637 * prev->on_rq = 0;
638 *
639 * We can be both throttled and !queued. Replenish the counter
640 * but do not enqueue -- wait for our wakeup to do that.
641 */
642 if (!task_on_rq_queued(p)) {
643 replenish_dl_entity(dl_se, dl_se);
644 goto unlock;
645 }
646
647 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
648 if (dl_task(rq->curr))
649 check_preempt_curr_dl(rq, p, 0);
650 else
651 resched_curr(rq);
652
653 #ifdef CONFIG_SMP
654 /*
655 * Perform balancing operations here; after the replenishments. We
656 * cannot drop rq->lock before this, otherwise the assertion in
657 * start_dl_timer() about not missing updates is not true.
658 *
659 * If we find that the rq the task was on is no longer available, we
660 * need to select a new rq.
661 *
662 * XXX figure out if select_task_rq_dl() deals with offline cpus.
663 */
664 if (unlikely(!rq->online))
665 rq = dl_task_offline_migration(rq, p);
666
667 /*
668 * Queueing this task back might have overloaded rq, check if we need
669 * to kick someone away.
670 */
671 if (has_pushable_dl_tasks(rq))
672 push_dl_task(rq);
673 #endif
674
675 unlock:
676 task_rq_unlock(rq, p, &flags);
677
678 /*
679 * This can free the task_struct, including this hrtimer, do not touch
680 * anything related to that after this.
681 */
682 put_task_struct(p);
683
684 return HRTIMER_NORESTART;
685 }
686
687 void init_dl_task_timer(struct sched_dl_entity *dl_se)
688 {
689 struct hrtimer *timer = &dl_se->dl_timer;
690
691 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
692 timer->function = dl_task_timer;
693 }
694
695 static
696 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
697 {
698 return (dl_se->runtime <= 0);
699 }
700
701 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
702
703 /*
704 * Update the current task's runtime statistics (provided it is still
705 * a -deadline task and has not been removed from the dl_rq).
706 */
707 static void update_curr_dl(struct rq *rq)
708 {
709 struct task_struct *curr = rq->curr;
710 struct sched_dl_entity *dl_se = &curr->dl;
711 u64 delta_exec;
712
713 if (!dl_task(curr) || !on_dl_rq(dl_se))
714 return;
715
716 /*
717 * Consumed budget is computed considering the time as
718 * observed by schedulable tasks (excluding time spent
719 * in hardirq context, etc.). Deadlines are instead
720 * computed using hard walltime. This seems to be the more
721 * natural solution, but the full ramifications of this
722 * approach need further study.
723 */
724 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
725 if (unlikely((s64)delta_exec <= 0))
726 return;
727
728 schedstat_set(curr->se.statistics.exec_max,
729 max(curr->se.statistics.exec_max, delta_exec));
730
731 curr->se.sum_exec_runtime += delta_exec;
732 account_group_exec_runtime(curr, delta_exec);
733
734 curr->se.exec_start = rq_clock_task(rq);
735 cpuacct_charge(curr, delta_exec);
736
737 sched_rt_avg_update(rq, delta_exec);
738
739 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
740 if (dl_runtime_exceeded(dl_se)) {
741 dl_se->dl_throttled = 1;
742 __dequeue_task_dl(rq, curr, 0);
743 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
744 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
745
746 if (!is_leftmost(curr, &rq->dl))
747 resched_curr(rq);
748 }
749
750 /*
751 * Because -- for now -- we share the rt bandwidth, we need to
752 * account our runtime there too, otherwise actual rt tasks
753 * would be able to exceed the shared quota.
754 *
755 * Account to the root rt group for now.
756 *
757 * The solution we're working towards is having the RT groups scheduled
758 * using deadline servers -- however there's a few nasties to figure
759 * out before that can happen.
760 */
761 if (rt_bandwidth_enabled()) {
762 struct rt_rq *rt_rq = &rq->rt;
763
764 raw_spin_lock(&rt_rq->rt_runtime_lock);
765 /*
766 * We'll let actual RT tasks worry about the overflow here, we
767 * have our own CBS to keep us inline; only account when RT
768 * bandwidth is relevant.
769 */
770 if (sched_rt_bandwidth_account(rt_rq))
771 rt_rq->rt_time += delta_exec;
772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 }
774 }
775
776 #ifdef CONFIG_SMP
777
778 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
779
780 static inline u64 next_deadline(struct rq *rq)
781 {
782 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
783
784 if (next && dl_prio(next->prio))
785 return next->dl.deadline;
786 else
787 return 0;
788 }
789
790 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
791 {
792 struct rq *rq = rq_of_dl_rq(dl_rq);
793
794 if (dl_rq->earliest_dl.curr == 0 ||
795 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
796 /*
797 * If the dl_rq had no -deadline tasks, or if the new task
798 * has shorter deadline than the current one on dl_rq, we
799 * know that the previous earliest becomes our next earliest,
800 * as the new task becomes the earliest itself.
801 */
802 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
803 dl_rq->earliest_dl.curr = deadline;
804 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
805 } else if (dl_rq->earliest_dl.next == 0 ||
806 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
807 /*
808 * On the other hand, if the new -deadline task has a
809 * a later deadline than the earliest one on dl_rq, but
810 * it is earlier than the next (if any), we must
811 * recompute the next-earliest.
812 */
813 dl_rq->earliest_dl.next = next_deadline(rq);
814 }
815 }
816
817 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
818 {
819 struct rq *rq = rq_of_dl_rq(dl_rq);
820
821 /*
822 * Since we may have removed our earliest (and/or next earliest)
823 * task we must recompute them.
824 */
825 if (!dl_rq->dl_nr_running) {
826 dl_rq->earliest_dl.curr = 0;
827 dl_rq->earliest_dl.next = 0;
828 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
829 } else {
830 struct rb_node *leftmost = dl_rq->rb_leftmost;
831 struct sched_dl_entity *entry;
832
833 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
834 dl_rq->earliest_dl.curr = entry->deadline;
835 dl_rq->earliest_dl.next = next_deadline(rq);
836 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
837 }
838 }
839
840 #else
841
842 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
843 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
844
845 #endif /* CONFIG_SMP */
846
847 static inline
848 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
849 {
850 int prio = dl_task_of(dl_se)->prio;
851 u64 deadline = dl_se->deadline;
852
853 WARN_ON(!dl_prio(prio));
854 dl_rq->dl_nr_running++;
855 add_nr_running(rq_of_dl_rq(dl_rq), 1);
856
857 inc_dl_deadline(dl_rq, deadline);
858 inc_dl_migration(dl_se, dl_rq);
859 }
860
861 static inline
862 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
863 {
864 int prio = dl_task_of(dl_se)->prio;
865
866 WARN_ON(!dl_prio(prio));
867 WARN_ON(!dl_rq->dl_nr_running);
868 dl_rq->dl_nr_running--;
869 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
870
871 dec_dl_deadline(dl_rq, dl_se->deadline);
872 dec_dl_migration(dl_se, dl_rq);
873 }
874
875 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
876 {
877 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
878 struct rb_node **link = &dl_rq->rb_root.rb_node;
879 struct rb_node *parent = NULL;
880 struct sched_dl_entity *entry;
881 int leftmost = 1;
882
883 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
884
885 while (*link) {
886 parent = *link;
887 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
888 if (dl_time_before(dl_se->deadline, entry->deadline))
889 link = &parent->rb_left;
890 else {
891 link = &parent->rb_right;
892 leftmost = 0;
893 }
894 }
895
896 if (leftmost)
897 dl_rq->rb_leftmost = &dl_se->rb_node;
898
899 rb_link_node(&dl_se->rb_node, parent, link);
900 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
901
902 inc_dl_tasks(dl_se, dl_rq);
903 }
904
905 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
906 {
907 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
908
909 if (RB_EMPTY_NODE(&dl_se->rb_node))
910 return;
911
912 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
913 struct rb_node *next_node;
914
915 next_node = rb_next(&dl_se->rb_node);
916 dl_rq->rb_leftmost = next_node;
917 }
918
919 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
920 RB_CLEAR_NODE(&dl_se->rb_node);
921
922 dec_dl_tasks(dl_se, dl_rq);
923 }
924
925 static void
926 enqueue_dl_entity(struct sched_dl_entity *dl_se,
927 struct sched_dl_entity *pi_se, int flags)
928 {
929 BUG_ON(on_dl_rq(dl_se));
930
931 /*
932 * If this is a wakeup or a new instance, the scheduling
933 * parameters of the task might need updating. Otherwise,
934 * we want a replenishment of its runtime.
935 */
936 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
937 update_dl_entity(dl_se, pi_se);
938 else if (flags & ENQUEUE_REPLENISH)
939 replenish_dl_entity(dl_se, pi_se);
940
941 __enqueue_dl_entity(dl_se);
942 }
943
944 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
945 {
946 __dequeue_dl_entity(dl_se);
947 }
948
949 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
950 {
951 struct task_struct *pi_task = rt_mutex_get_top_task(p);
952 struct sched_dl_entity *pi_se = &p->dl;
953
954 /*
955 * Use the scheduling parameters of the top pi-waiter
956 * task if we have one and its (absolute) deadline is
957 * smaller than our one... OTW we keep our runtime and
958 * deadline.
959 */
960 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
961 pi_se = &pi_task->dl;
962 } else if (!dl_prio(p->normal_prio)) {
963 /*
964 * Special case in which we have a !SCHED_DEADLINE task
965 * that is going to be deboosted, but exceedes its
966 * runtime while doing so. No point in replenishing
967 * it, as it's going to return back to its original
968 * scheduling class after this.
969 */
970 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
971 return;
972 }
973
974 /*
975 * If p is throttled, we do nothing. In fact, if it exhausted
976 * its budget it needs a replenishment and, since it now is on
977 * its rq, the bandwidth timer callback (which clearly has not
978 * run yet) will take care of this.
979 */
980 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
981 return;
982
983 enqueue_dl_entity(&p->dl, pi_se, flags);
984
985 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
986 enqueue_pushable_dl_task(rq, p);
987 }
988
989 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
990 {
991 dequeue_dl_entity(&p->dl);
992 dequeue_pushable_dl_task(rq, p);
993 }
994
995 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
996 {
997 update_curr_dl(rq);
998 __dequeue_task_dl(rq, p, flags);
999 }
1000
1001 /*
1002 * Yield task semantic for -deadline tasks is:
1003 *
1004 * get off from the CPU until our next instance, with
1005 * a new runtime. This is of little use now, since we
1006 * don't have a bandwidth reclaiming mechanism. Anyway,
1007 * bandwidth reclaiming is planned for the future, and
1008 * yield_task_dl will indicate that some spare budget
1009 * is available for other task instances to use it.
1010 */
1011 static void yield_task_dl(struct rq *rq)
1012 {
1013 struct task_struct *p = rq->curr;
1014
1015 /*
1016 * We make the task go to sleep until its current deadline by
1017 * forcing its runtime to zero. This way, update_curr_dl() stops
1018 * it and the bandwidth timer will wake it up and will give it
1019 * new scheduling parameters (thanks to dl_yielded=1).
1020 */
1021 if (p->dl.runtime > 0) {
1022 rq->curr->dl.dl_yielded = 1;
1023 p->dl.runtime = 0;
1024 }
1025 update_rq_clock(rq);
1026 update_curr_dl(rq);
1027 /*
1028 * Tell update_rq_clock() that we've just updated,
1029 * so we don't do microscopic update in schedule()
1030 * and double the fastpath cost.
1031 */
1032 rq_clock_skip_update(rq, true);
1033 }
1034
1035 #ifdef CONFIG_SMP
1036
1037 static int find_later_rq(struct task_struct *task);
1038
1039 static int
1040 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1041 {
1042 struct task_struct *curr;
1043 struct rq *rq;
1044
1045 if (sd_flag != SD_BALANCE_WAKE)
1046 goto out;
1047
1048 rq = cpu_rq(cpu);
1049
1050 rcu_read_lock();
1051 curr = READ_ONCE(rq->curr); /* unlocked access */
1052
1053 /*
1054 * If we are dealing with a -deadline task, we must
1055 * decide where to wake it up.
1056 * If it has a later deadline and the current task
1057 * on this rq can't move (provided the waking task
1058 * can!) we prefer to send it somewhere else. On the
1059 * other hand, if it has a shorter deadline, we
1060 * try to make it stay here, it might be important.
1061 */
1062 if (unlikely(dl_task(curr)) &&
1063 (curr->nr_cpus_allowed < 2 ||
1064 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1065 (p->nr_cpus_allowed > 1)) {
1066 int target = find_later_rq(p);
1067
1068 if (target != -1 &&
1069 (dl_time_before(p->dl.deadline,
1070 cpu_rq(target)->dl.earliest_dl.curr) ||
1071 (cpu_rq(target)->dl.dl_nr_running == 0)))
1072 cpu = target;
1073 }
1074 rcu_read_unlock();
1075
1076 out:
1077 return cpu;
1078 }
1079
1080 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1081 {
1082 /*
1083 * Current can't be migrated, useless to reschedule,
1084 * let's hope p can move out.
1085 */
1086 if (rq->curr->nr_cpus_allowed == 1 ||
1087 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1088 return;
1089
1090 /*
1091 * p is migratable, so let's not schedule it and
1092 * see if it is pushed or pulled somewhere else.
1093 */
1094 if (p->nr_cpus_allowed != 1 &&
1095 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1096 return;
1097
1098 resched_curr(rq);
1099 }
1100
1101 #endif /* CONFIG_SMP */
1102
1103 /*
1104 * Only called when both the current and waking task are -deadline
1105 * tasks.
1106 */
1107 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1108 int flags)
1109 {
1110 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1111 resched_curr(rq);
1112 return;
1113 }
1114
1115 #ifdef CONFIG_SMP
1116 /*
1117 * In the unlikely case current and p have the same deadline
1118 * let us try to decide what's the best thing to do...
1119 */
1120 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1121 !test_tsk_need_resched(rq->curr))
1122 check_preempt_equal_dl(rq, p);
1123 #endif /* CONFIG_SMP */
1124 }
1125
1126 #ifdef CONFIG_SCHED_HRTICK
1127 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1128 {
1129 hrtick_start(rq, p->dl.runtime);
1130 }
1131 #else /* !CONFIG_SCHED_HRTICK */
1132 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1133 {
1134 }
1135 #endif
1136
1137 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1138 struct dl_rq *dl_rq)
1139 {
1140 struct rb_node *left = dl_rq->rb_leftmost;
1141
1142 if (!left)
1143 return NULL;
1144
1145 return rb_entry(left, struct sched_dl_entity, rb_node);
1146 }
1147
1148 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1149 {
1150 struct sched_dl_entity *dl_se;
1151 struct task_struct *p;
1152 struct dl_rq *dl_rq;
1153
1154 dl_rq = &rq->dl;
1155
1156 if (need_pull_dl_task(rq, prev)) {
1157 /*
1158 * This is OK, because current is on_cpu, which avoids it being
1159 * picked for load-balance and preemption/IRQs are still
1160 * disabled avoiding further scheduler activity on it and we're
1161 * being very careful to re-start the picking loop.
1162 */
1163 lockdep_unpin_lock(&rq->lock);
1164 pull_dl_task(rq);
1165 lockdep_pin_lock(&rq->lock);
1166 /*
1167 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1168 * means a stop task can slip in, in which case we need to
1169 * re-start task selection.
1170 */
1171 if (rq->stop && task_on_rq_queued(rq->stop))
1172 return RETRY_TASK;
1173 }
1174
1175 /*
1176 * When prev is DL, we may throttle it in put_prev_task().
1177 * So, we update time before we check for dl_nr_running.
1178 */
1179 if (prev->sched_class == &dl_sched_class)
1180 update_curr_dl(rq);
1181
1182 if (unlikely(!dl_rq->dl_nr_running))
1183 return NULL;
1184
1185 put_prev_task(rq, prev);
1186
1187 dl_se = pick_next_dl_entity(rq, dl_rq);
1188 BUG_ON(!dl_se);
1189
1190 p = dl_task_of(dl_se);
1191 p->se.exec_start = rq_clock_task(rq);
1192
1193 /* Running task will never be pushed. */
1194 dequeue_pushable_dl_task(rq, p);
1195
1196 if (hrtick_enabled(rq))
1197 start_hrtick_dl(rq, p);
1198
1199 queue_push_tasks(rq);
1200
1201 return p;
1202 }
1203
1204 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1205 {
1206 update_curr_dl(rq);
1207
1208 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1209 enqueue_pushable_dl_task(rq, p);
1210 }
1211
1212 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1213 {
1214 update_curr_dl(rq);
1215
1216 /*
1217 * Even when we have runtime, update_curr_dl() might have resulted in us
1218 * not being the leftmost task anymore. In that case NEED_RESCHED will
1219 * be set and schedule() will start a new hrtick for the next task.
1220 */
1221 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1222 is_leftmost(p, &rq->dl))
1223 start_hrtick_dl(rq, p);
1224 }
1225
1226 static void task_fork_dl(struct task_struct *p)
1227 {
1228 /*
1229 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1230 * sched_fork()
1231 */
1232 }
1233
1234 static void task_dead_dl(struct task_struct *p)
1235 {
1236 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1237
1238 /*
1239 * Since we are TASK_DEAD we won't slip out of the domain!
1240 */
1241 raw_spin_lock_irq(&dl_b->lock);
1242 /* XXX we should retain the bw until 0-lag */
1243 dl_b->total_bw -= p->dl.dl_bw;
1244 raw_spin_unlock_irq(&dl_b->lock);
1245 }
1246
1247 static void set_curr_task_dl(struct rq *rq)
1248 {
1249 struct task_struct *p = rq->curr;
1250
1251 p->se.exec_start = rq_clock_task(rq);
1252
1253 /* You can't push away the running task */
1254 dequeue_pushable_dl_task(rq, p);
1255 }
1256
1257 #ifdef CONFIG_SMP
1258
1259 /* Only try algorithms three times */
1260 #define DL_MAX_TRIES 3
1261
1262 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1263 {
1264 if (!task_running(rq, p) &&
1265 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1266 return 1;
1267 return 0;
1268 }
1269
1270 /* Returns the second earliest -deadline task, NULL otherwise */
1271 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1272 {
1273 struct rb_node *next_node = rq->dl.rb_leftmost;
1274 struct sched_dl_entity *dl_se;
1275 struct task_struct *p = NULL;
1276
1277 next_node:
1278 next_node = rb_next(next_node);
1279 if (next_node) {
1280 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1281 p = dl_task_of(dl_se);
1282
1283 if (pick_dl_task(rq, p, cpu))
1284 return p;
1285
1286 goto next_node;
1287 }
1288
1289 return NULL;
1290 }
1291
1292 /*
1293 * Return the earliest pushable rq's task, which is suitable to be executed
1294 * on the CPU, NULL otherwise:
1295 */
1296 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1297 {
1298 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1299 struct task_struct *p = NULL;
1300
1301 if (!has_pushable_dl_tasks(rq))
1302 return NULL;
1303
1304 next_node:
1305 if (next_node) {
1306 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1307
1308 if (pick_dl_task(rq, p, cpu))
1309 return p;
1310
1311 next_node = rb_next(next_node);
1312 goto next_node;
1313 }
1314
1315 return NULL;
1316 }
1317
1318 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1319
1320 static int find_later_rq(struct task_struct *task)
1321 {
1322 struct sched_domain *sd;
1323 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1324 int this_cpu = smp_processor_id();
1325 int best_cpu, cpu = task_cpu(task);
1326
1327 /* Make sure the mask is initialized first */
1328 if (unlikely(!later_mask))
1329 return -1;
1330
1331 if (task->nr_cpus_allowed == 1)
1332 return -1;
1333
1334 /*
1335 * We have to consider system topology and task affinity
1336 * first, then we can look for a suitable cpu.
1337 */
1338 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1339 task, later_mask);
1340 if (best_cpu == -1)
1341 return -1;
1342
1343 /*
1344 * If we are here, some target has been found,
1345 * the most suitable of which is cached in best_cpu.
1346 * This is, among the runqueues where the current tasks
1347 * have later deadlines than the task's one, the rq
1348 * with the latest possible one.
1349 *
1350 * Now we check how well this matches with task's
1351 * affinity and system topology.
1352 *
1353 * The last cpu where the task run is our first
1354 * guess, since it is most likely cache-hot there.
1355 */
1356 if (cpumask_test_cpu(cpu, later_mask))
1357 return cpu;
1358 /*
1359 * Check if this_cpu is to be skipped (i.e., it is
1360 * not in the mask) or not.
1361 */
1362 if (!cpumask_test_cpu(this_cpu, later_mask))
1363 this_cpu = -1;
1364
1365 rcu_read_lock();
1366 for_each_domain(cpu, sd) {
1367 if (sd->flags & SD_WAKE_AFFINE) {
1368
1369 /*
1370 * If possible, preempting this_cpu is
1371 * cheaper than migrating.
1372 */
1373 if (this_cpu != -1 &&
1374 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1375 rcu_read_unlock();
1376 return this_cpu;
1377 }
1378
1379 /*
1380 * Last chance: if best_cpu is valid and is
1381 * in the mask, that becomes our choice.
1382 */
1383 if (best_cpu < nr_cpu_ids &&
1384 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1385 rcu_read_unlock();
1386 return best_cpu;
1387 }
1388 }
1389 }
1390 rcu_read_unlock();
1391
1392 /*
1393 * At this point, all our guesses failed, we just return
1394 * 'something', and let the caller sort the things out.
1395 */
1396 if (this_cpu != -1)
1397 return this_cpu;
1398
1399 cpu = cpumask_any(later_mask);
1400 if (cpu < nr_cpu_ids)
1401 return cpu;
1402
1403 return -1;
1404 }
1405
1406 /* Locks the rq it finds */
1407 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1408 {
1409 struct rq *later_rq = NULL;
1410 int tries;
1411 int cpu;
1412
1413 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1414 cpu = find_later_rq(task);
1415
1416 if ((cpu == -1) || (cpu == rq->cpu))
1417 break;
1418
1419 later_rq = cpu_rq(cpu);
1420
1421 if (later_rq->dl.dl_nr_running &&
1422 !dl_time_before(task->dl.deadline,
1423 later_rq->dl.earliest_dl.curr)) {
1424 /*
1425 * Target rq has tasks of equal or earlier deadline,
1426 * retrying does not release any lock and is unlikely
1427 * to yield a different result.
1428 */
1429 later_rq = NULL;
1430 break;
1431 }
1432
1433 /* Retry if something changed. */
1434 if (double_lock_balance(rq, later_rq)) {
1435 if (unlikely(task_rq(task) != rq ||
1436 !cpumask_test_cpu(later_rq->cpu,
1437 &task->cpus_allowed) ||
1438 task_running(rq, task) ||
1439 !task_on_rq_queued(task))) {
1440 double_unlock_balance(rq, later_rq);
1441 later_rq = NULL;
1442 break;
1443 }
1444 }
1445
1446 /*
1447 * If the rq we found has no -deadline task, or
1448 * its earliest one has a later deadline than our
1449 * task, the rq is a good one.
1450 */
1451 if (!later_rq->dl.dl_nr_running ||
1452 dl_time_before(task->dl.deadline,
1453 later_rq->dl.earliest_dl.curr))
1454 break;
1455
1456 /* Otherwise we try again. */
1457 double_unlock_balance(rq, later_rq);
1458 later_rq = NULL;
1459 }
1460
1461 return later_rq;
1462 }
1463
1464 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1465 {
1466 struct task_struct *p;
1467
1468 if (!has_pushable_dl_tasks(rq))
1469 return NULL;
1470
1471 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1472 struct task_struct, pushable_dl_tasks);
1473
1474 BUG_ON(rq->cpu != task_cpu(p));
1475 BUG_ON(task_current(rq, p));
1476 BUG_ON(p->nr_cpus_allowed <= 1);
1477
1478 BUG_ON(!task_on_rq_queued(p));
1479 BUG_ON(!dl_task(p));
1480
1481 return p;
1482 }
1483
1484 /*
1485 * See if the non running -deadline tasks on this rq
1486 * can be sent to some other CPU where they can preempt
1487 * and start executing.
1488 */
1489 static int push_dl_task(struct rq *rq)
1490 {
1491 struct task_struct *next_task;
1492 struct rq *later_rq;
1493 int ret = 0;
1494
1495 if (!rq->dl.overloaded)
1496 return 0;
1497
1498 next_task = pick_next_pushable_dl_task(rq);
1499 if (!next_task)
1500 return 0;
1501
1502 retry:
1503 if (unlikely(next_task == rq->curr)) {
1504 WARN_ON(1);
1505 return 0;
1506 }
1507
1508 /*
1509 * If next_task preempts rq->curr, and rq->curr
1510 * can move away, it makes sense to just reschedule
1511 * without going further in pushing next_task.
1512 */
1513 if (dl_task(rq->curr) &&
1514 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1515 rq->curr->nr_cpus_allowed > 1) {
1516 resched_curr(rq);
1517 return 0;
1518 }
1519
1520 /* We might release rq lock */
1521 get_task_struct(next_task);
1522
1523 /* Will lock the rq it'll find */
1524 later_rq = find_lock_later_rq(next_task, rq);
1525 if (!later_rq) {
1526 struct task_struct *task;
1527
1528 /*
1529 * We must check all this again, since
1530 * find_lock_later_rq releases rq->lock and it is
1531 * then possible that next_task has migrated.
1532 */
1533 task = pick_next_pushable_dl_task(rq);
1534 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1535 /*
1536 * The task is still there. We don't try
1537 * again, some other cpu will pull it when ready.
1538 */
1539 goto out;
1540 }
1541
1542 if (!task)
1543 /* No more tasks */
1544 goto out;
1545
1546 put_task_struct(next_task);
1547 next_task = task;
1548 goto retry;
1549 }
1550
1551 deactivate_task(rq, next_task, 0);
1552 set_task_cpu(next_task, later_rq->cpu);
1553 activate_task(later_rq, next_task, 0);
1554 ret = 1;
1555
1556 resched_curr(later_rq);
1557
1558 double_unlock_balance(rq, later_rq);
1559
1560 out:
1561 put_task_struct(next_task);
1562
1563 return ret;
1564 }
1565
1566 static void push_dl_tasks(struct rq *rq)
1567 {
1568 /* push_dl_task() will return true if it moved a -deadline task */
1569 while (push_dl_task(rq))
1570 ;
1571 }
1572
1573 static void pull_dl_task(struct rq *this_rq)
1574 {
1575 int this_cpu = this_rq->cpu, cpu;
1576 struct task_struct *p;
1577 bool resched = false;
1578 struct rq *src_rq;
1579 u64 dmin = LONG_MAX;
1580
1581 if (likely(!dl_overloaded(this_rq)))
1582 return;
1583
1584 /*
1585 * Match the barrier from dl_set_overloaded; this guarantees that if we
1586 * see overloaded we must also see the dlo_mask bit.
1587 */
1588 smp_rmb();
1589
1590 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1591 if (this_cpu == cpu)
1592 continue;
1593
1594 src_rq = cpu_rq(cpu);
1595
1596 /*
1597 * It looks racy, abd it is! However, as in sched_rt.c,
1598 * we are fine with this.
1599 */
1600 if (this_rq->dl.dl_nr_running &&
1601 dl_time_before(this_rq->dl.earliest_dl.curr,
1602 src_rq->dl.earliest_dl.next))
1603 continue;
1604
1605 /* Might drop this_rq->lock */
1606 double_lock_balance(this_rq, src_rq);
1607
1608 /*
1609 * If there are no more pullable tasks on the
1610 * rq, we're done with it.
1611 */
1612 if (src_rq->dl.dl_nr_running <= 1)
1613 goto skip;
1614
1615 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1616
1617 /*
1618 * We found a task to be pulled if:
1619 * - it preempts our current (if there's one),
1620 * - it will preempt the last one we pulled (if any).
1621 */
1622 if (p && dl_time_before(p->dl.deadline, dmin) &&
1623 (!this_rq->dl.dl_nr_running ||
1624 dl_time_before(p->dl.deadline,
1625 this_rq->dl.earliest_dl.curr))) {
1626 WARN_ON(p == src_rq->curr);
1627 WARN_ON(!task_on_rq_queued(p));
1628
1629 /*
1630 * Then we pull iff p has actually an earlier
1631 * deadline than the current task of its runqueue.
1632 */
1633 if (dl_time_before(p->dl.deadline,
1634 src_rq->curr->dl.deadline))
1635 goto skip;
1636
1637 resched = true;
1638
1639 deactivate_task(src_rq, p, 0);
1640 set_task_cpu(p, this_cpu);
1641 activate_task(this_rq, p, 0);
1642 dmin = p->dl.deadline;
1643
1644 /* Is there any other task even earlier? */
1645 }
1646 skip:
1647 double_unlock_balance(this_rq, src_rq);
1648 }
1649
1650 if (resched)
1651 resched_curr(this_rq);
1652 }
1653
1654 /*
1655 * Since the task is not running and a reschedule is not going to happen
1656 * anytime soon on its runqueue, we try pushing it away now.
1657 */
1658 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1659 {
1660 if (!task_running(rq, p) &&
1661 !test_tsk_need_resched(rq->curr) &&
1662 p->nr_cpus_allowed > 1 &&
1663 dl_task(rq->curr) &&
1664 (rq->curr->nr_cpus_allowed < 2 ||
1665 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1666 push_dl_tasks(rq);
1667 }
1668 }
1669
1670 static void set_cpus_allowed_dl(struct task_struct *p,
1671 const struct cpumask *new_mask)
1672 {
1673 struct root_domain *src_rd;
1674 struct rq *rq;
1675
1676 BUG_ON(!dl_task(p));
1677
1678 rq = task_rq(p);
1679 src_rd = rq->rd;
1680 /*
1681 * Migrating a SCHED_DEADLINE task between exclusive
1682 * cpusets (different root_domains) entails a bandwidth
1683 * update. We already made space for us in the destination
1684 * domain (see cpuset_can_attach()).
1685 */
1686 if (!cpumask_intersects(src_rd->span, new_mask)) {
1687 struct dl_bw *src_dl_b;
1688
1689 src_dl_b = dl_bw_of(cpu_of(rq));
1690 /*
1691 * We now free resources of the root_domain we are migrating
1692 * off. In the worst case, sched_setattr() may temporary fail
1693 * until we complete the update.
1694 */
1695 raw_spin_lock(&src_dl_b->lock);
1696 __dl_clear(src_dl_b, p->dl.dl_bw);
1697 raw_spin_unlock(&src_dl_b->lock);
1698 }
1699
1700 set_cpus_allowed_common(p, new_mask);
1701 }
1702
1703 /* Assumes rq->lock is held */
1704 static void rq_online_dl(struct rq *rq)
1705 {
1706 if (rq->dl.overloaded)
1707 dl_set_overload(rq);
1708
1709 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1710 if (rq->dl.dl_nr_running > 0)
1711 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1712 }
1713
1714 /* Assumes rq->lock is held */
1715 static void rq_offline_dl(struct rq *rq)
1716 {
1717 if (rq->dl.overloaded)
1718 dl_clear_overload(rq);
1719
1720 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1721 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1722 }
1723
1724 void __init init_sched_dl_class(void)
1725 {
1726 unsigned int i;
1727
1728 for_each_possible_cpu(i)
1729 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1730 GFP_KERNEL, cpu_to_node(i));
1731 }
1732
1733 #endif /* CONFIG_SMP */
1734
1735 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1736 {
1737 /*
1738 * Start the deadline timer; if we switch back to dl before this we'll
1739 * continue consuming our current CBS slice. If we stay outside of
1740 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1741 * task.
1742 */
1743 if (!start_dl_timer(p))
1744 __dl_clear_params(p);
1745
1746 /*
1747 * Since this might be the only -deadline task on the rq,
1748 * this is the right place to try to pull some other one
1749 * from an overloaded cpu, if any.
1750 */
1751 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1752 return;
1753
1754 queue_pull_task(rq);
1755 }
1756
1757 /*
1758 * When switching to -deadline, we may overload the rq, then
1759 * we try to push someone off, if possible.
1760 */
1761 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1762 {
1763 if (task_on_rq_queued(p) && rq->curr != p) {
1764 #ifdef CONFIG_SMP
1765 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1766 queue_push_tasks(rq);
1767 #else
1768 if (dl_task(rq->curr))
1769 check_preempt_curr_dl(rq, p, 0);
1770 else
1771 resched_curr(rq);
1772 #endif
1773 }
1774 }
1775
1776 /*
1777 * If the scheduling parameters of a -deadline task changed,
1778 * a push or pull operation might be needed.
1779 */
1780 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1781 int oldprio)
1782 {
1783 if (task_on_rq_queued(p) || rq->curr == p) {
1784 #ifdef CONFIG_SMP
1785 /*
1786 * This might be too much, but unfortunately
1787 * we don't have the old deadline value, and
1788 * we can't argue if the task is increasing
1789 * or lowering its prio, so...
1790 */
1791 if (!rq->dl.overloaded)
1792 queue_pull_task(rq);
1793
1794 /*
1795 * If we now have a earlier deadline task than p,
1796 * then reschedule, provided p is still on this
1797 * runqueue.
1798 */
1799 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1800 resched_curr(rq);
1801 #else
1802 /*
1803 * Again, we don't know if p has a earlier
1804 * or later deadline, so let's blindly set a
1805 * (maybe not needed) rescheduling point.
1806 */
1807 resched_curr(rq);
1808 #endif /* CONFIG_SMP */
1809 } else
1810 switched_to_dl(rq, p);
1811 }
1812
1813 const struct sched_class dl_sched_class = {
1814 .next = &rt_sched_class,
1815 .enqueue_task = enqueue_task_dl,
1816 .dequeue_task = dequeue_task_dl,
1817 .yield_task = yield_task_dl,
1818
1819 .check_preempt_curr = check_preempt_curr_dl,
1820
1821 .pick_next_task = pick_next_task_dl,
1822 .put_prev_task = put_prev_task_dl,
1823
1824 #ifdef CONFIG_SMP
1825 .select_task_rq = select_task_rq_dl,
1826 .set_cpus_allowed = set_cpus_allowed_dl,
1827 .rq_online = rq_online_dl,
1828 .rq_offline = rq_offline_dl,
1829 .task_woken = task_woken_dl,
1830 #endif
1831
1832 .set_curr_task = set_curr_task_dl,
1833 .task_tick = task_tick_dl,
1834 .task_fork = task_fork_dl,
1835 .task_dead = task_dead_dl,
1836
1837 .prio_changed = prio_changed_dl,
1838 .switched_from = switched_from_dl,
1839 .switched_to = switched_to_dl,
1840
1841 .update_curr = update_curr_dl,
1842 };
1843
1844 #ifdef CONFIG_SCHED_DEBUG
1845 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1846
1847 void print_dl_stats(struct seq_file *m, int cpu)
1848 {
1849 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1850 }
1851 #endif /* CONFIG_SCHED_DEBUG */