]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/sched/rt.c
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82
83 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0;
85 rt_rq->rt_runtime = 0;
86 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 }
88
89 #ifdef CONFIG_RT_GROUP_SCHED
90 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91 {
92 hrtimer_cancel(&rt_b->rt_period_timer);
93 }
94
95 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98 {
99 #ifdef CONFIG_SCHED_DEBUG
100 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101 #endif
102 return container_of(rt_se, struct task_struct, rt);
103 }
104
105 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106 {
107 return rt_rq->rq;
108 }
109
110 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111 {
112 return rt_se->rt_rq;
113 }
114
115 void free_rt_sched_group(struct task_group *tg)
116 {
117 int i;
118
119 if (tg->rt_se)
120 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122 for_each_possible_cpu(i) {
123 if (tg->rt_rq)
124 kfree(tg->rt_rq[i]);
125 if (tg->rt_se)
126 kfree(tg->rt_se[i]);
127 }
128
129 kfree(tg->rt_rq);
130 kfree(tg->rt_se);
131 }
132
133 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134 struct sched_rt_entity *rt_se, int cpu,
135 struct sched_rt_entity *parent)
136 {
137 struct rq *rq = cpu_rq(cpu);
138
139 rt_rq->highest_prio.curr = MAX_RT_PRIO;
140 rt_rq->rt_nr_boosted = 0;
141 rt_rq->rq = rq;
142 rt_rq->tg = tg;
143
144 tg->rt_rq[cpu] = rt_rq;
145 tg->rt_se[cpu] = rt_se;
146
147 if (!rt_se)
148 return;
149
150 if (!parent)
151 rt_se->rt_rq = &rq->rt;
152 else
153 rt_se->rt_rq = parent->my_q;
154
155 rt_se->my_q = rt_rq;
156 rt_se->parent = parent;
157 INIT_LIST_HEAD(&rt_se->run_list);
158 }
159
160 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 {
162 struct rt_rq *rt_rq;
163 struct sched_rt_entity *rt_se;
164 int i;
165
166 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 if (!tg->rt_rq)
168 goto err;
169 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170 if (!tg->rt_se)
171 goto err;
172
173 init_rt_bandwidth(&tg->rt_bandwidth,
174 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176 for_each_possible_cpu(i) {
177 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178 GFP_KERNEL, cpu_to_node(i));
179 if (!rt_rq)
180 goto err;
181
182 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183 GFP_KERNEL, cpu_to_node(i));
184 if (!rt_se)
185 goto err_free_rq;
186
187 init_rt_rq(rt_rq, cpu_rq(i));
188 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190 }
191
192 return 1;
193
194 err_free_rq:
195 kfree(rt_rq);
196 err:
197 return 0;
198 }
199
200 #else /* CONFIG_RT_GROUP_SCHED */
201
202 #define rt_entity_is_task(rt_se) (1)
203
204 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205 {
206 return container_of(rt_se, struct task_struct, rt);
207 }
208
209 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210 {
211 return container_of(rt_rq, struct rq, rt);
212 }
213
214 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215 {
216 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->rt;
220 }
221
222 void free_rt_sched_group(struct task_group *tg) { }
223
224 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225 {
226 return 1;
227 }
228 #endif /* CONFIG_RT_GROUP_SCHED */
229
230 #ifdef CONFIG_SMP
231
232 static int pull_rt_task(struct rq *this_rq);
233
234 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
235 {
236 /* Try to pull RT tasks here if we lower this rq's prio */
237 return rq->rt.highest_prio.curr > prev->prio;
238 }
239
240 static inline int rt_overloaded(struct rq *rq)
241 {
242 return atomic_read(&rq->rd->rto_count);
243 }
244
245 static inline void rt_set_overload(struct rq *rq)
246 {
247 if (!rq->online)
248 return;
249
250 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
251 /*
252 * Make sure the mask is visible before we set
253 * the overload count. That is checked to determine
254 * if we should look at the mask. It would be a shame
255 * if we looked at the mask, but the mask was not
256 * updated yet.
257 *
258 * Matched by the barrier in pull_rt_task().
259 */
260 smp_wmb();
261 atomic_inc(&rq->rd->rto_count);
262 }
263
264 static inline void rt_clear_overload(struct rq *rq)
265 {
266 if (!rq->online)
267 return;
268
269 /* the order here really doesn't matter */
270 atomic_dec(&rq->rd->rto_count);
271 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
272 }
273
274 static void update_rt_migration(struct rt_rq *rt_rq)
275 {
276 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
277 if (!rt_rq->overloaded) {
278 rt_set_overload(rq_of_rt_rq(rt_rq));
279 rt_rq->overloaded = 1;
280 }
281 } else if (rt_rq->overloaded) {
282 rt_clear_overload(rq_of_rt_rq(rt_rq));
283 rt_rq->overloaded = 0;
284 }
285 }
286
287 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
288 {
289 struct task_struct *p;
290
291 if (!rt_entity_is_task(rt_se))
292 return;
293
294 p = rt_task_of(rt_se);
295 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
296
297 rt_rq->rt_nr_total++;
298 if (p->nr_cpus_allowed > 1)
299 rt_rq->rt_nr_migratory++;
300
301 update_rt_migration(rt_rq);
302 }
303
304 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
305 {
306 struct task_struct *p;
307
308 if (!rt_entity_is_task(rt_se))
309 return;
310
311 p = rt_task_of(rt_se);
312 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
313
314 rt_rq->rt_nr_total--;
315 if (p->nr_cpus_allowed > 1)
316 rt_rq->rt_nr_migratory--;
317
318 update_rt_migration(rt_rq);
319 }
320
321 static inline int has_pushable_tasks(struct rq *rq)
322 {
323 return !plist_head_empty(&rq->rt.pushable_tasks);
324 }
325
326 static inline void set_post_schedule(struct rq *rq)
327 {
328 /*
329 * We detect this state here so that we can avoid taking the RQ
330 * lock again later if there is no need to push
331 */
332 rq->post_schedule = has_pushable_tasks(rq);
333 }
334
335 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
336 {
337 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
338 plist_node_init(&p->pushable_tasks, p->prio);
339 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
340
341 /* Update the highest prio pushable task */
342 if (p->prio < rq->rt.highest_prio.next)
343 rq->rt.highest_prio.next = p->prio;
344 }
345
346 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347 {
348 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
349
350 /* Update the new highest prio pushable task */
351 if (has_pushable_tasks(rq)) {
352 p = plist_first_entry(&rq->rt.pushable_tasks,
353 struct task_struct, pushable_tasks);
354 rq->rt.highest_prio.next = p->prio;
355 } else
356 rq->rt.highest_prio.next = MAX_RT_PRIO;
357 }
358
359 #else
360
361 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
362 {
363 }
364
365 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
366 {
367 }
368
369 static inline
370 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
371 {
372 }
373
374 static inline
375 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
376 {
377 }
378
379 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
380 {
381 return false;
382 }
383
384 static inline int pull_rt_task(struct rq *this_rq)
385 {
386 return 0;
387 }
388
389 static inline void set_post_schedule(struct rq *rq)
390 {
391 }
392 #endif /* CONFIG_SMP */
393
394 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
395 {
396 return !list_empty(&rt_se->run_list);
397 }
398
399 #ifdef CONFIG_RT_GROUP_SCHED
400
401 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
402 {
403 if (!rt_rq->tg)
404 return RUNTIME_INF;
405
406 return rt_rq->rt_runtime;
407 }
408
409 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
410 {
411 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
412 }
413
414 typedef struct task_group *rt_rq_iter_t;
415
416 static inline struct task_group *next_task_group(struct task_group *tg)
417 {
418 do {
419 tg = list_entry_rcu(tg->list.next,
420 typeof(struct task_group), list);
421 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
422
423 if (&tg->list == &task_groups)
424 tg = NULL;
425
426 return tg;
427 }
428
429 #define for_each_rt_rq(rt_rq, iter, rq) \
430 for (iter = container_of(&task_groups, typeof(*iter), list); \
431 (iter = next_task_group(iter)) && \
432 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
433
434 #define for_each_sched_rt_entity(rt_se) \
435 for (; rt_se; rt_se = rt_se->parent)
436
437 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
438 {
439 return rt_se->my_q;
440 }
441
442 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
443 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
444
445 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
446 {
447 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
448 struct sched_rt_entity *rt_se;
449
450 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
451
452 rt_se = rt_rq->tg->rt_se[cpu];
453
454 if (rt_rq->rt_nr_running) {
455 if (rt_se && !on_rt_rq(rt_se))
456 enqueue_rt_entity(rt_se, false);
457 if (rt_rq->highest_prio.curr < curr->prio)
458 resched_task(curr);
459 }
460 }
461
462 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
463 {
464 struct sched_rt_entity *rt_se;
465 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
466
467 rt_se = rt_rq->tg->rt_se[cpu];
468
469 if (rt_se && on_rt_rq(rt_se))
470 dequeue_rt_entity(rt_se);
471 }
472
473 static int rt_se_boosted(struct sched_rt_entity *rt_se)
474 {
475 struct rt_rq *rt_rq = group_rt_rq(rt_se);
476 struct task_struct *p;
477
478 if (rt_rq)
479 return !!rt_rq->rt_nr_boosted;
480
481 p = rt_task_of(rt_se);
482 return p->prio != p->normal_prio;
483 }
484
485 #ifdef CONFIG_SMP
486 static inline const struct cpumask *sched_rt_period_mask(void)
487 {
488 return this_rq()->rd->span;
489 }
490 #else
491 static inline const struct cpumask *sched_rt_period_mask(void)
492 {
493 return cpu_online_mask;
494 }
495 #endif
496
497 static inline
498 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
499 {
500 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
501 }
502
503 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
504 {
505 return &rt_rq->tg->rt_bandwidth;
506 }
507
508 #else /* !CONFIG_RT_GROUP_SCHED */
509
510 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
511 {
512 return rt_rq->rt_runtime;
513 }
514
515 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
516 {
517 return ktime_to_ns(def_rt_bandwidth.rt_period);
518 }
519
520 typedef struct rt_rq *rt_rq_iter_t;
521
522 #define for_each_rt_rq(rt_rq, iter, rq) \
523 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
524
525 #define for_each_sched_rt_entity(rt_se) \
526 for (; rt_se; rt_se = NULL)
527
528 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
529 {
530 return NULL;
531 }
532
533 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
534 {
535 if (rt_rq->rt_nr_running)
536 resched_task(rq_of_rt_rq(rt_rq)->curr);
537 }
538
539 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
540 {
541 }
542
543 static inline const struct cpumask *sched_rt_period_mask(void)
544 {
545 return cpu_online_mask;
546 }
547
548 static inline
549 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
550 {
551 return &cpu_rq(cpu)->rt;
552 }
553
554 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
555 {
556 return &def_rt_bandwidth;
557 }
558
559 #endif /* CONFIG_RT_GROUP_SCHED */
560
561 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
562 {
563 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
564
565 return (hrtimer_active(&rt_b->rt_period_timer) ||
566 rt_rq->rt_time < rt_b->rt_runtime);
567 }
568
569 #ifdef CONFIG_SMP
570 /*
571 * We ran out of runtime, see if we can borrow some from our neighbours.
572 */
573 static int do_balance_runtime(struct rt_rq *rt_rq)
574 {
575 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
576 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
577 int i, weight, more = 0;
578 u64 rt_period;
579
580 weight = cpumask_weight(rd->span);
581
582 raw_spin_lock(&rt_b->rt_runtime_lock);
583 rt_period = ktime_to_ns(rt_b->rt_period);
584 for_each_cpu(i, rd->span) {
585 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
586 s64 diff;
587
588 if (iter == rt_rq)
589 continue;
590
591 raw_spin_lock(&iter->rt_runtime_lock);
592 /*
593 * Either all rqs have inf runtime and there's nothing to steal
594 * or __disable_runtime() below sets a specific rq to inf to
595 * indicate its been disabled and disalow stealing.
596 */
597 if (iter->rt_runtime == RUNTIME_INF)
598 goto next;
599
600 /*
601 * From runqueues with spare time, take 1/n part of their
602 * spare time, but no more than our period.
603 */
604 diff = iter->rt_runtime - iter->rt_time;
605 if (diff > 0) {
606 diff = div_u64((u64)diff, weight);
607 if (rt_rq->rt_runtime + diff > rt_period)
608 diff = rt_period - rt_rq->rt_runtime;
609 iter->rt_runtime -= diff;
610 rt_rq->rt_runtime += diff;
611 more = 1;
612 if (rt_rq->rt_runtime == rt_period) {
613 raw_spin_unlock(&iter->rt_runtime_lock);
614 break;
615 }
616 }
617 next:
618 raw_spin_unlock(&iter->rt_runtime_lock);
619 }
620 raw_spin_unlock(&rt_b->rt_runtime_lock);
621
622 return more;
623 }
624
625 /*
626 * Ensure this RQ takes back all the runtime it lend to its neighbours.
627 */
628 static void __disable_runtime(struct rq *rq)
629 {
630 struct root_domain *rd = rq->rd;
631 rt_rq_iter_t iter;
632 struct rt_rq *rt_rq;
633
634 if (unlikely(!scheduler_running))
635 return;
636
637 for_each_rt_rq(rt_rq, iter, rq) {
638 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
639 s64 want;
640 int i;
641
642 raw_spin_lock(&rt_b->rt_runtime_lock);
643 raw_spin_lock(&rt_rq->rt_runtime_lock);
644 /*
645 * Either we're all inf and nobody needs to borrow, or we're
646 * already disabled and thus have nothing to do, or we have
647 * exactly the right amount of runtime to take out.
648 */
649 if (rt_rq->rt_runtime == RUNTIME_INF ||
650 rt_rq->rt_runtime == rt_b->rt_runtime)
651 goto balanced;
652 raw_spin_unlock(&rt_rq->rt_runtime_lock);
653
654 /*
655 * Calculate the difference between what we started out with
656 * and what we current have, that's the amount of runtime
657 * we lend and now have to reclaim.
658 */
659 want = rt_b->rt_runtime - rt_rq->rt_runtime;
660
661 /*
662 * Greedy reclaim, take back as much as we can.
663 */
664 for_each_cpu(i, rd->span) {
665 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
666 s64 diff;
667
668 /*
669 * Can't reclaim from ourselves or disabled runqueues.
670 */
671 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
672 continue;
673
674 raw_spin_lock(&iter->rt_runtime_lock);
675 if (want > 0) {
676 diff = min_t(s64, iter->rt_runtime, want);
677 iter->rt_runtime -= diff;
678 want -= diff;
679 } else {
680 iter->rt_runtime -= want;
681 want -= want;
682 }
683 raw_spin_unlock(&iter->rt_runtime_lock);
684
685 if (!want)
686 break;
687 }
688
689 raw_spin_lock(&rt_rq->rt_runtime_lock);
690 /*
691 * We cannot be left wanting - that would mean some runtime
692 * leaked out of the system.
693 */
694 BUG_ON(want);
695 balanced:
696 /*
697 * Disable all the borrow logic by pretending we have inf
698 * runtime - in which case borrowing doesn't make sense.
699 */
700 rt_rq->rt_runtime = RUNTIME_INF;
701 rt_rq->rt_throttled = 0;
702 raw_spin_unlock(&rt_rq->rt_runtime_lock);
703 raw_spin_unlock(&rt_b->rt_runtime_lock);
704 }
705 }
706
707 static void __enable_runtime(struct rq *rq)
708 {
709 rt_rq_iter_t iter;
710 struct rt_rq *rt_rq;
711
712 if (unlikely(!scheduler_running))
713 return;
714
715 /*
716 * Reset each runqueue's bandwidth settings
717 */
718 for_each_rt_rq(rt_rq, iter, rq) {
719 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
720
721 raw_spin_lock(&rt_b->rt_runtime_lock);
722 raw_spin_lock(&rt_rq->rt_runtime_lock);
723 rt_rq->rt_runtime = rt_b->rt_runtime;
724 rt_rq->rt_time = 0;
725 rt_rq->rt_throttled = 0;
726 raw_spin_unlock(&rt_rq->rt_runtime_lock);
727 raw_spin_unlock(&rt_b->rt_runtime_lock);
728 }
729 }
730
731 static int balance_runtime(struct rt_rq *rt_rq)
732 {
733 int more = 0;
734
735 if (!sched_feat(RT_RUNTIME_SHARE))
736 return more;
737
738 if (rt_rq->rt_time > rt_rq->rt_runtime) {
739 raw_spin_unlock(&rt_rq->rt_runtime_lock);
740 more = do_balance_runtime(rt_rq);
741 raw_spin_lock(&rt_rq->rt_runtime_lock);
742 }
743
744 return more;
745 }
746 #else /* !CONFIG_SMP */
747 static inline int balance_runtime(struct rt_rq *rt_rq)
748 {
749 return 0;
750 }
751 #endif /* CONFIG_SMP */
752
753 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
754 {
755 int i, idle = 1, throttled = 0;
756 const struct cpumask *span;
757
758 span = sched_rt_period_mask();
759 #ifdef CONFIG_RT_GROUP_SCHED
760 /*
761 * FIXME: isolated CPUs should really leave the root task group,
762 * whether they are isolcpus or were isolated via cpusets, lest
763 * the timer run on a CPU which does not service all runqueues,
764 * potentially leaving other CPUs indefinitely throttled. If
765 * isolation is really required, the user will turn the throttle
766 * off to kill the perturbations it causes anyway. Meanwhile,
767 * this maintains functionality for boot and/or troubleshooting.
768 */
769 if (rt_b == &root_task_group.rt_bandwidth)
770 span = cpu_online_mask;
771 #endif
772 for_each_cpu(i, span) {
773 int enqueue = 0;
774 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
775 struct rq *rq = rq_of_rt_rq(rt_rq);
776
777 raw_spin_lock(&rq->lock);
778 if (rt_rq->rt_time) {
779 u64 runtime;
780
781 raw_spin_lock(&rt_rq->rt_runtime_lock);
782 if (rt_rq->rt_throttled)
783 balance_runtime(rt_rq);
784 runtime = rt_rq->rt_runtime;
785 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
786 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
787 rt_rq->rt_throttled = 0;
788 enqueue = 1;
789
790 /*
791 * Force a clock update if the CPU was idle,
792 * lest wakeup -> unthrottle time accumulate.
793 */
794 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
795 rq->skip_clock_update = -1;
796 }
797 if (rt_rq->rt_time || rt_rq->rt_nr_running)
798 idle = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 } else if (rt_rq->rt_nr_running) {
801 idle = 0;
802 if (!rt_rq_throttled(rt_rq))
803 enqueue = 1;
804 }
805 if (rt_rq->rt_throttled)
806 throttled = 1;
807
808 if (enqueue)
809 sched_rt_rq_enqueue(rt_rq);
810 raw_spin_unlock(&rq->lock);
811 }
812
813 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
814 return 1;
815
816 return idle;
817 }
818
819 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
820 {
821 #ifdef CONFIG_RT_GROUP_SCHED
822 struct rt_rq *rt_rq = group_rt_rq(rt_se);
823
824 if (rt_rq)
825 return rt_rq->highest_prio.curr;
826 #endif
827
828 return rt_task_of(rt_se)->prio;
829 }
830
831 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
832 {
833 u64 runtime = sched_rt_runtime(rt_rq);
834
835 if (rt_rq->rt_throttled)
836 return rt_rq_throttled(rt_rq);
837
838 if (runtime >= sched_rt_period(rt_rq))
839 return 0;
840
841 balance_runtime(rt_rq);
842 runtime = sched_rt_runtime(rt_rq);
843 if (runtime == RUNTIME_INF)
844 return 0;
845
846 if (rt_rq->rt_time > runtime) {
847 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
848
849 /*
850 * Don't actually throttle groups that have no runtime assigned
851 * but accrue some time due to boosting.
852 */
853 if (likely(rt_b->rt_runtime)) {
854 static bool once = false;
855
856 rt_rq->rt_throttled = 1;
857
858 if (!once) {
859 once = true;
860 printk_sched("sched: RT throttling activated\n");
861 }
862 } else {
863 /*
864 * In case we did anyway, make it go away,
865 * replenishment is a joke, since it will replenish us
866 * with exactly 0 ns.
867 */
868 rt_rq->rt_time = 0;
869 }
870
871 if (rt_rq_throttled(rt_rq)) {
872 sched_rt_rq_dequeue(rt_rq);
873 return 1;
874 }
875 }
876
877 return 0;
878 }
879
880 /*
881 * Update the current task's runtime statistics. Skip current tasks that
882 * are not in our scheduling class.
883 */
884 static void update_curr_rt(struct rq *rq)
885 {
886 struct task_struct *curr = rq->curr;
887 struct sched_rt_entity *rt_se = &curr->rt;
888 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
889 u64 delta_exec;
890
891 if (curr->sched_class != &rt_sched_class)
892 return;
893
894 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
895 if (unlikely((s64)delta_exec <= 0))
896 return;
897
898 schedstat_set(curr->se.statistics.exec_max,
899 max(curr->se.statistics.exec_max, delta_exec));
900
901 curr->se.sum_exec_runtime += delta_exec;
902 account_group_exec_runtime(curr, delta_exec);
903
904 curr->se.exec_start = rq_clock_task(rq);
905 cpuacct_charge(curr, delta_exec);
906
907 sched_rt_avg_update(rq, delta_exec);
908
909 if (!rt_bandwidth_enabled())
910 return;
911
912 for_each_sched_rt_entity(rt_se) {
913 rt_rq = rt_rq_of_se(rt_se);
914
915 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
916 raw_spin_lock(&rt_rq->rt_runtime_lock);
917 rt_rq->rt_time += delta_exec;
918 if (sched_rt_runtime_exceeded(rt_rq))
919 resched_task(curr);
920 raw_spin_unlock(&rt_rq->rt_runtime_lock);
921 }
922 }
923 }
924
925 #if defined CONFIG_SMP
926
927 static void
928 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
929 {
930 struct rq *rq = rq_of_rt_rq(rt_rq);
931
932 #ifdef CONFIG_RT_GROUP_SCHED
933 /*
934 * Change rq's cpupri only if rt_rq is the top queue.
935 */
936 if (&rq->rt != rt_rq)
937 return;
938 #endif
939 if (rq->online && prio < prev_prio)
940 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
941 }
942
943 static void
944 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
945 {
946 struct rq *rq = rq_of_rt_rq(rt_rq);
947
948 #ifdef CONFIG_RT_GROUP_SCHED
949 /*
950 * Change rq's cpupri only if rt_rq is the top queue.
951 */
952 if (&rq->rt != rt_rq)
953 return;
954 #endif
955 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
956 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
957 }
958
959 #else /* CONFIG_SMP */
960
961 static inline
962 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
963 static inline
964 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
965
966 #endif /* CONFIG_SMP */
967
968 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
969 static void
970 inc_rt_prio(struct rt_rq *rt_rq, int prio)
971 {
972 int prev_prio = rt_rq->highest_prio.curr;
973
974 if (prio < prev_prio)
975 rt_rq->highest_prio.curr = prio;
976
977 inc_rt_prio_smp(rt_rq, prio, prev_prio);
978 }
979
980 static void
981 dec_rt_prio(struct rt_rq *rt_rq, int prio)
982 {
983 int prev_prio = rt_rq->highest_prio.curr;
984
985 if (rt_rq->rt_nr_running) {
986
987 WARN_ON(prio < prev_prio);
988
989 /*
990 * This may have been our highest task, and therefore
991 * we may have some recomputation to do
992 */
993 if (prio == prev_prio) {
994 struct rt_prio_array *array = &rt_rq->active;
995
996 rt_rq->highest_prio.curr =
997 sched_find_first_bit(array->bitmap);
998 }
999
1000 } else
1001 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1002
1003 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1004 }
1005
1006 #else
1007
1008 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1010
1011 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1012
1013 #ifdef CONFIG_RT_GROUP_SCHED
1014
1015 static void
1016 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1017 {
1018 if (rt_se_boosted(rt_se))
1019 rt_rq->rt_nr_boosted++;
1020
1021 if (rt_rq->tg)
1022 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1023 }
1024
1025 static void
1026 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1027 {
1028 if (rt_se_boosted(rt_se))
1029 rt_rq->rt_nr_boosted--;
1030
1031 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1032 }
1033
1034 #else /* CONFIG_RT_GROUP_SCHED */
1035
1036 static void
1037 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1038 {
1039 start_rt_bandwidth(&def_rt_bandwidth);
1040 }
1041
1042 static inline
1043 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1044
1045 #endif /* CONFIG_RT_GROUP_SCHED */
1046
1047 static inline
1048 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1049 {
1050 int prio = rt_se_prio(rt_se);
1051
1052 WARN_ON(!rt_prio(prio));
1053 rt_rq->rt_nr_running++;
1054
1055 inc_rt_prio(rt_rq, prio);
1056 inc_rt_migration(rt_se, rt_rq);
1057 inc_rt_group(rt_se, rt_rq);
1058 }
1059
1060 static inline
1061 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1062 {
1063 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1064 WARN_ON(!rt_rq->rt_nr_running);
1065 rt_rq->rt_nr_running--;
1066
1067 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1068 dec_rt_migration(rt_se, rt_rq);
1069 dec_rt_group(rt_se, rt_rq);
1070 }
1071
1072 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1073 {
1074 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1075 struct rt_prio_array *array = &rt_rq->active;
1076 struct rt_rq *group_rq = group_rt_rq(rt_se);
1077 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1078
1079 /*
1080 * Don't enqueue the group if its throttled, or when empty.
1081 * The latter is a consequence of the former when a child group
1082 * get throttled and the current group doesn't have any other
1083 * active members.
1084 */
1085 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1086 return;
1087
1088 if (head)
1089 list_add(&rt_se->run_list, queue);
1090 else
1091 list_add_tail(&rt_se->run_list, queue);
1092 __set_bit(rt_se_prio(rt_se), array->bitmap);
1093
1094 inc_rt_tasks(rt_se, rt_rq);
1095 }
1096
1097 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1098 {
1099 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1100 struct rt_prio_array *array = &rt_rq->active;
1101
1102 list_del_init(&rt_se->run_list);
1103 if (list_empty(array->queue + rt_se_prio(rt_se)))
1104 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1105
1106 dec_rt_tasks(rt_se, rt_rq);
1107 }
1108
1109 /*
1110 * Because the prio of an upper entry depends on the lower
1111 * entries, we must remove entries top - down.
1112 */
1113 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1114 {
1115 struct sched_rt_entity *back = NULL;
1116
1117 for_each_sched_rt_entity(rt_se) {
1118 rt_se->back = back;
1119 back = rt_se;
1120 }
1121
1122 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1123 if (on_rt_rq(rt_se))
1124 __dequeue_rt_entity(rt_se);
1125 }
1126 }
1127
1128 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1129 {
1130 dequeue_rt_stack(rt_se);
1131 for_each_sched_rt_entity(rt_se)
1132 __enqueue_rt_entity(rt_se, head);
1133 }
1134
1135 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1136 {
1137 dequeue_rt_stack(rt_se);
1138
1139 for_each_sched_rt_entity(rt_se) {
1140 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1141
1142 if (rt_rq && rt_rq->rt_nr_running)
1143 __enqueue_rt_entity(rt_se, false);
1144 }
1145 }
1146
1147 /*
1148 * Adding/removing a task to/from a priority array:
1149 */
1150 static void
1151 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1152 {
1153 struct sched_rt_entity *rt_se = &p->rt;
1154
1155 if (flags & ENQUEUE_WAKEUP)
1156 rt_se->timeout = 0;
1157
1158 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1159
1160 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1161 enqueue_pushable_task(rq, p);
1162
1163 inc_nr_running(rq);
1164 }
1165
1166 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1167 {
1168 struct sched_rt_entity *rt_se = &p->rt;
1169
1170 update_curr_rt(rq);
1171 dequeue_rt_entity(rt_se);
1172
1173 dequeue_pushable_task(rq, p);
1174
1175 dec_nr_running(rq);
1176 }
1177
1178 /*
1179 * Put task to the head or the end of the run list without the overhead of
1180 * dequeue followed by enqueue.
1181 */
1182 static void
1183 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1184 {
1185 if (on_rt_rq(rt_se)) {
1186 struct rt_prio_array *array = &rt_rq->active;
1187 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1188
1189 if (head)
1190 list_move(&rt_se->run_list, queue);
1191 else
1192 list_move_tail(&rt_se->run_list, queue);
1193 }
1194 }
1195
1196 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1197 {
1198 struct sched_rt_entity *rt_se = &p->rt;
1199 struct rt_rq *rt_rq;
1200
1201 for_each_sched_rt_entity(rt_se) {
1202 rt_rq = rt_rq_of_se(rt_se);
1203 requeue_rt_entity(rt_rq, rt_se, head);
1204 }
1205 }
1206
1207 static void yield_task_rt(struct rq *rq)
1208 {
1209 requeue_task_rt(rq, rq->curr, 0);
1210 }
1211
1212 #ifdef CONFIG_SMP
1213 static int find_lowest_rq(struct task_struct *task);
1214
1215 static int
1216 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1217 {
1218 struct task_struct *curr;
1219 struct rq *rq;
1220
1221 if (p->nr_cpus_allowed == 1)
1222 goto out;
1223
1224 /* For anything but wake ups, just return the task_cpu */
1225 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1226 goto out;
1227
1228 rq = cpu_rq(cpu);
1229
1230 rcu_read_lock();
1231 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1232
1233 /*
1234 * If the current task on @p's runqueue is an RT task, then
1235 * try to see if we can wake this RT task up on another
1236 * runqueue. Otherwise simply start this RT task
1237 * on its current runqueue.
1238 *
1239 * We want to avoid overloading runqueues. If the woken
1240 * task is a higher priority, then it will stay on this CPU
1241 * and the lower prio task should be moved to another CPU.
1242 * Even though this will probably make the lower prio task
1243 * lose its cache, we do not want to bounce a higher task
1244 * around just because it gave up its CPU, perhaps for a
1245 * lock?
1246 *
1247 * For equal prio tasks, we just let the scheduler sort it out.
1248 *
1249 * Otherwise, just let it ride on the affined RQ and the
1250 * post-schedule router will push the preempted task away
1251 *
1252 * This test is optimistic, if we get it wrong the load-balancer
1253 * will have to sort it out.
1254 */
1255 if (curr && unlikely(rt_task(curr)) &&
1256 (curr->nr_cpus_allowed < 2 ||
1257 curr->prio <= p->prio)) {
1258 int target = find_lowest_rq(p);
1259
1260 if (target != -1)
1261 cpu = target;
1262 }
1263 rcu_read_unlock();
1264
1265 out:
1266 return cpu;
1267 }
1268
1269 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1270 {
1271 if (rq->curr->nr_cpus_allowed == 1)
1272 return;
1273
1274 if (p->nr_cpus_allowed != 1
1275 && cpupri_find(&rq->rd->cpupri, p, NULL))
1276 return;
1277
1278 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1279 return;
1280
1281 /*
1282 * There appears to be other cpus that can accept
1283 * current and none to run 'p', so lets reschedule
1284 * to try and push current away:
1285 */
1286 requeue_task_rt(rq, p, 1);
1287 resched_task(rq->curr);
1288 }
1289
1290 #endif /* CONFIG_SMP */
1291
1292 /*
1293 * Preempt the current task with a newly woken task if needed:
1294 */
1295 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1296 {
1297 if (p->prio < rq->curr->prio) {
1298 resched_task(rq->curr);
1299 return;
1300 }
1301
1302 #ifdef CONFIG_SMP
1303 /*
1304 * If:
1305 *
1306 * - the newly woken task is of equal priority to the current task
1307 * - the newly woken task is non-migratable while current is migratable
1308 * - current will be preempted on the next reschedule
1309 *
1310 * we should check to see if current can readily move to a different
1311 * cpu. If so, we will reschedule to allow the push logic to try
1312 * to move current somewhere else, making room for our non-migratable
1313 * task.
1314 */
1315 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1316 check_preempt_equal_prio(rq, p);
1317 #endif
1318 }
1319
1320 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1321 struct rt_rq *rt_rq)
1322 {
1323 struct rt_prio_array *array = &rt_rq->active;
1324 struct sched_rt_entity *next = NULL;
1325 struct list_head *queue;
1326 int idx;
1327
1328 idx = sched_find_first_bit(array->bitmap);
1329 BUG_ON(idx >= MAX_RT_PRIO);
1330
1331 queue = array->queue + idx;
1332 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1333
1334 return next;
1335 }
1336
1337 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1338 {
1339 struct sched_rt_entity *rt_se;
1340 struct task_struct *p;
1341 struct rt_rq *rt_rq = &rq->rt;
1342
1343 do {
1344 rt_se = pick_next_rt_entity(rq, rt_rq);
1345 BUG_ON(!rt_se);
1346 rt_rq = group_rt_rq(rt_se);
1347 } while (rt_rq);
1348
1349 p = rt_task_of(rt_se);
1350 p->se.exec_start = rq_clock_task(rq);
1351
1352 return p;
1353 }
1354
1355 static struct task_struct *
1356 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1357 {
1358 struct task_struct *p;
1359 struct rt_rq *rt_rq = &rq->rt;
1360
1361 if (need_pull_rt_task(rq, prev)) {
1362 pull_rt_task(rq);
1363 /*
1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365 * means a dl task can slip in, in which case we need to
1366 * re-start task selection.
1367 */
1368 if (unlikely(rq->dl.dl_nr_running))
1369 return RETRY_TASK;
1370 }
1371
1372 /*
1373 * We may dequeue prev's rt_rq in put_prev_task().
1374 * So, we update time before rt_nr_running check.
1375 */
1376 if (prev->sched_class == &rt_sched_class)
1377 update_curr_rt(rq);
1378
1379 if (!rt_rq->rt_nr_running)
1380 return NULL;
1381
1382 if (rt_rq_throttled(rt_rq))
1383 return NULL;
1384
1385 put_prev_task(rq, prev);
1386
1387 p = _pick_next_task_rt(rq);
1388
1389 /* The running task is never eligible for pushing */
1390 if (p)
1391 dequeue_pushable_task(rq, p);
1392
1393 set_post_schedule(rq);
1394
1395 return p;
1396 }
1397
1398 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1399 {
1400 update_curr_rt(rq);
1401
1402 /*
1403 * The previous task needs to be made eligible for pushing
1404 * if it is still active
1405 */
1406 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1407 enqueue_pushable_task(rq, p);
1408 }
1409
1410 #ifdef CONFIG_SMP
1411
1412 /* Only try algorithms three times */
1413 #define RT_MAX_TRIES 3
1414
1415 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1416 {
1417 if (!task_running(rq, p) &&
1418 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1419 return 1;
1420 return 0;
1421 }
1422
1423 /*
1424 * Return the highest pushable rq's task, which is suitable to be executed
1425 * on the cpu, NULL otherwise
1426 */
1427 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1428 {
1429 struct plist_head *head = &rq->rt.pushable_tasks;
1430 struct task_struct *p;
1431
1432 if (!has_pushable_tasks(rq))
1433 return NULL;
1434
1435 plist_for_each_entry(p, head, pushable_tasks) {
1436 if (pick_rt_task(rq, p, cpu))
1437 return p;
1438 }
1439
1440 return NULL;
1441 }
1442
1443 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1444
1445 static int find_lowest_rq(struct task_struct *task)
1446 {
1447 struct sched_domain *sd;
1448 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1449 int this_cpu = smp_processor_id();
1450 int cpu = task_cpu(task);
1451
1452 /* Make sure the mask is initialized first */
1453 if (unlikely(!lowest_mask))
1454 return -1;
1455
1456 if (task->nr_cpus_allowed == 1)
1457 return -1; /* No other targets possible */
1458
1459 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1460 return -1; /* No targets found */
1461
1462 /*
1463 * At this point we have built a mask of cpus representing the
1464 * lowest priority tasks in the system. Now we want to elect
1465 * the best one based on our affinity and topology.
1466 *
1467 * We prioritize the last cpu that the task executed on since
1468 * it is most likely cache-hot in that location.
1469 */
1470 if (cpumask_test_cpu(cpu, lowest_mask))
1471 return cpu;
1472
1473 /*
1474 * Otherwise, we consult the sched_domains span maps to figure
1475 * out which cpu is logically closest to our hot cache data.
1476 */
1477 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1478 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1479
1480 rcu_read_lock();
1481 for_each_domain(cpu, sd) {
1482 if (sd->flags & SD_WAKE_AFFINE) {
1483 int best_cpu;
1484
1485 /*
1486 * "this_cpu" is cheaper to preempt than a
1487 * remote processor.
1488 */
1489 if (this_cpu != -1 &&
1490 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1491 rcu_read_unlock();
1492 return this_cpu;
1493 }
1494
1495 best_cpu = cpumask_first_and(lowest_mask,
1496 sched_domain_span(sd));
1497 if (best_cpu < nr_cpu_ids) {
1498 rcu_read_unlock();
1499 return best_cpu;
1500 }
1501 }
1502 }
1503 rcu_read_unlock();
1504
1505 /*
1506 * And finally, if there were no matches within the domains
1507 * just give the caller *something* to work with from the compatible
1508 * locations.
1509 */
1510 if (this_cpu != -1)
1511 return this_cpu;
1512
1513 cpu = cpumask_any(lowest_mask);
1514 if (cpu < nr_cpu_ids)
1515 return cpu;
1516 return -1;
1517 }
1518
1519 /* Will lock the rq it finds */
1520 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1521 {
1522 struct rq *lowest_rq = NULL;
1523 int tries;
1524 int cpu;
1525
1526 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1527 cpu = find_lowest_rq(task);
1528
1529 if ((cpu == -1) || (cpu == rq->cpu))
1530 break;
1531
1532 lowest_rq = cpu_rq(cpu);
1533
1534 /* if the prio of this runqueue changed, try again */
1535 if (double_lock_balance(rq, lowest_rq)) {
1536 /*
1537 * We had to unlock the run queue. In
1538 * the mean time, task could have
1539 * migrated already or had its affinity changed.
1540 * Also make sure that it wasn't scheduled on its rq.
1541 */
1542 if (unlikely(task_rq(task) != rq ||
1543 !cpumask_test_cpu(lowest_rq->cpu,
1544 tsk_cpus_allowed(task)) ||
1545 task_running(rq, task) ||
1546 !task->on_rq)) {
1547
1548 double_unlock_balance(rq, lowest_rq);
1549 lowest_rq = NULL;
1550 break;
1551 }
1552 }
1553
1554 /* If this rq is still suitable use it. */
1555 if (lowest_rq->rt.highest_prio.curr > task->prio)
1556 break;
1557
1558 /* try again */
1559 double_unlock_balance(rq, lowest_rq);
1560 lowest_rq = NULL;
1561 }
1562
1563 return lowest_rq;
1564 }
1565
1566 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1567 {
1568 struct task_struct *p;
1569
1570 if (!has_pushable_tasks(rq))
1571 return NULL;
1572
1573 p = plist_first_entry(&rq->rt.pushable_tasks,
1574 struct task_struct, pushable_tasks);
1575
1576 BUG_ON(rq->cpu != task_cpu(p));
1577 BUG_ON(task_current(rq, p));
1578 BUG_ON(p->nr_cpus_allowed <= 1);
1579
1580 BUG_ON(!p->on_rq);
1581 BUG_ON(!rt_task(p));
1582
1583 return p;
1584 }
1585
1586 /*
1587 * If the current CPU has more than one RT task, see if the non
1588 * running task can migrate over to a CPU that is running a task
1589 * of lesser priority.
1590 */
1591 static int push_rt_task(struct rq *rq)
1592 {
1593 struct task_struct *next_task;
1594 struct rq *lowest_rq;
1595 int ret = 0;
1596
1597 if (!rq->rt.overloaded)
1598 return 0;
1599
1600 next_task = pick_next_pushable_task(rq);
1601 if (!next_task)
1602 return 0;
1603
1604 retry:
1605 if (unlikely(next_task == rq->curr)) {
1606 WARN_ON(1);
1607 return 0;
1608 }
1609
1610 /*
1611 * It's possible that the next_task slipped in of
1612 * higher priority than current. If that's the case
1613 * just reschedule current.
1614 */
1615 if (unlikely(next_task->prio < rq->curr->prio)) {
1616 resched_task(rq->curr);
1617 return 0;
1618 }
1619
1620 /* We might release rq lock */
1621 get_task_struct(next_task);
1622
1623 /* find_lock_lowest_rq locks the rq if found */
1624 lowest_rq = find_lock_lowest_rq(next_task, rq);
1625 if (!lowest_rq) {
1626 struct task_struct *task;
1627 /*
1628 * find_lock_lowest_rq releases rq->lock
1629 * so it is possible that next_task has migrated.
1630 *
1631 * We need to make sure that the task is still on the same
1632 * run-queue and is also still the next task eligible for
1633 * pushing.
1634 */
1635 task = pick_next_pushable_task(rq);
1636 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1637 /*
1638 * The task hasn't migrated, and is still the next
1639 * eligible task, but we failed to find a run-queue
1640 * to push it to. Do not retry in this case, since
1641 * other cpus will pull from us when ready.
1642 */
1643 goto out;
1644 }
1645
1646 if (!task)
1647 /* No more tasks, just exit */
1648 goto out;
1649
1650 /*
1651 * Something has shifted, try again.
1652 */
1653 put_task_struct(next_task);
1654 next_task = task;
1655 goto retry;
1656 }
1657
1658 deactivate_task(rq, next_task, 0);
1659 set_task_cpu(next_task, lowest_rq->cpu);
1660 activate_task(lowest_rq, next_task, 0);
1661 ret = 1;
1662
1663 resched_task(lowest_rq->curr);
1664
1665 double_unlock_balance(rq, lowest_rq);
1666
1667 out:
1668 put_task_struct(next_task);
1669
1670 return ret;
1671 }
1672
1673 static void push_rt_tasks(struct rq *rq)
1674 {
1675 /* push_rt_task will return true if it moved an RT */
1676 while (push_rt_task(rq))
1677 ;
1678 }
1679
1680 static int pull_rt_task(struct rq *this_rq)
1681 {
1682 int this_cpu = this_rq->cpu, ret = 0, cpu;
1683 struct task_struct *p;
1684 struct rq *src_rq;
1685
1686 if (likely(!rt_overloaded(this_rq)))
1687 return 0;
1688
1689 /*
1690 * Match the barrier from rt_set_overloaded; this guarantees that if we
1691 * see overloaded we must also see the rto_mask bit.
1692 */
1693 smp_rmb();
1694
1695 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1696 if (this_cpu == cpu)
1697 continue;
1698
1699 src_rq = cpu_rq(cpu);
1700
1701 /*
1702 * Don't bother taking the src_rq->lock if the next highest
1703 * task is known to be lower-priority than our current task.
1704 * This may look racy, but if this value is about to go
1705 * logically higher, the src_rq will push this task away.
1706 * And if its going logically lower, we do not care
1707 */
1708 if (src_rq->rt.highest_prio.next >=
1709 this_rq->rt.highest_prio.curr)
1710 continue;
1711
1712 /*
1713 * We can potentially drop this_rq's lock in
1714 * double_lock_balance, and another CPU could
1715 * alter this_rq
1716 */
1717 double_lock_balance(this_rq, src_rq);
1718
1719 /*
1720 * We can pull only a task, which is pushable
1721 * on its rq, and no others.
1722 */
1723 p = pick_highest_pushable_task(src_rq, this_cpu);
1724
1725 /*
1726 * Do we have an RT task that preempts
1727 * the to-be-scheduled task?
1728 */
1729 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1730 WARN_ON(p == src_rq->curr);
1731 WARN_ON(!p->on_rq);
1732
1733 /*
1734 * There's a chance that p is higher in priority
1735 * than what's currently running on its cpu.
1736 * This is just that p is wakeing up and hasn't
1737 * had a chance to schedule. We only pull
1738 * p if it is lower in priority than the
1739 * current task on the run queue
1740 */
1741 if (p->prio < src_rq->curr->prio)
1742 goto skip;
1743
1744 ret = 1;
1745
1746 deactivate_task(src_rq, p, 0);
1747 set_task_cpu(p, this_cpu);
1748 activate_task(this_rq, p, 0);
1749 /*
1750 * We continue with the search, just in
1751 * case there's an even higher prio task
1752 * in another runqueue. (low likelihood
1753 * but possible)
1754 */
1755 }
1756 skip:
1757 double_unlock_balance(this_rq, src_rq);
1758 }
1759
1760 return ret;
1761 }
1762
1763 static void post_schedule_rt(struct rq *rq)
1764 {
1765 push_rt_tasks(rq);
1766 }
1767
1768 /*
1769 * If we are not running and we are not going to reschedule soon, we should
1770 * try to push tasks away now
1771 */
1772 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1773 {
1774 if (!task_running(rq, p) &&
1775 !test_tsk_need_resched(rq->curr) &&
1776 has_pushable_tasks(rq) &&
1777 p->nr_cpus_allowed > 1 &&
1778 (dl_task(rq->curr) || rt_task(rq->curr)) &&
1779 (rq->curr->nr_cpus_allowed < 2 ||
1780 rq->curr->prio <= p->prio))
1781 push_rt_tasks(rq);
1782 }
1783
1784 static void set_cpus_allowed_rt(struct task_struct *p,
1785 const struct cpumask *new_mask)
1786 {
1787 struct rq *rq;
1788 int weight;
1789
1790 BUG_ON(!rt_task(p));
1791
1792 if (!p->on_rq)
1793 return;
1794
1795 weight = cpumask_weight(new_mask);
1796
1797 /*
1798 * Only update if the process changes its state from whether it
1799 * can migrate or not.
1800 */
1801 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1802 return;
1803
1804 rq = task_rq(p);
1805
1806 /*
1807 * The process used to be able to migrate OR it can now migrate
1808 */
1809 if (weight <= 1) {
1810 if (!task_current(rq, p))
1811 dequeue_pushable_task(rq, p);
1812 BUG_ON(!rq->rt.rt_nr_migratory);
1813 rq->rt.rt_nr_migratory--;
1814 } else {
1815 if (!task_current(rq, p))
1816 enqueue_pushable_task(rq, p);
1817 rq->rt.rt_nr_migratory++;
1818 }
1819
1820 update_rt_migration(&rq->rt);
1821 }
1822
1823 /* Assumes rq->lock is held */
1824 static void rq_online_rt(struct rq *rq)
1825 {
1826 if (rq->rt.overloaded)
1827 rt_set_overload(rq);
1828
1829 __enable_runtime(rq);
1830
1831 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1832 }
1833
1834 /* Assumes rq->lock is held */
1835 static void rq_offline_rt(struct rq *rq)
1836 {
1837 if (rq->rt.overloaded)
1838 rt_clear_overload(rq);
1839
1840 __disable_runtime(rq);
1841
1842 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1843 }
1844
1845 /*
1846 * When switch from the rt queue, we bring ourselves to a position
1847 * that we might want to pull RT tasks from other runqueues.
1848 */
1849 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1850 {
1851 /*
1852 * If there are other RT tasks then we will reschedule
1853 * and the scheduling of the other RT tasks will handle
1854 * the balancing. But if we are the last RT task
1855 * we may need to handle the pulling of RT tasks
1856 * now.
1857 */
1858 if (!p->on_rq || rq->rt.rt_nr_running)
1859 return;
1860
1861 if (pull_rt_task(rq))
1862 resched_task(rq->curr);
1863 }
1864
1865 void __init init_sched_rt_class(void)
1866 {
1867 unsigned int i;
1868
1869 for_each_possible_cpu(i) {
1870 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1871 GFP_KERNEL, cpu_to_node(i));
1872 }
1873 }
1874 #endif /* CONFIG_SMP */
1875
1876 /*
1877 * When switching a task to RT, we may overload the runqueue
1878 * with RT tasks. In this case we try to push them off to
1879 * other runqueues.
1880 */
1881 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1882 {
1883 int check_resched = 1;
1884
1885 /*
1886 * If we are already running, then there's nothing
1887 * that needs to be done. But if we are not running
1888 * we may need to preempt the current running task.
1889 * If that current running task is also an RT task
1890 * then see if we can move to another run queue.
1891 */
1892 if (p->on_rq && rq->curr != p) {
1893 #ifdef CONFIG_SMP
1894 if (rq->rt.overloaded && push_rt_task(rq) &&
1895 /* Don't resched if we changed runqueues */
1896 rq != task_rq(p))
1897 check_resched = 0;
1898 #endif /* CONFIG_SMP */
1899 if (check_resched && p->prio < rq->curr->prio)
1900 resched_task(rq->curr);
1901 }
1902 }
1903
1904 /*
1905 * Priority of the task has changed. This may cause
1906 * us to initiate a push or pull.
1907 */
1908 static void
1909 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1910 {
1911 if (!p->on_rq)
1912 return;
1913
1914 if (rq->curr == p) {
1915 #ifdef CONFIG_SMP
1916 /*
1917 * If our priority decreases while running, we
1918 * may need to pull tasks to this runqueue.
1919 */
1920 if (oldprio < p->prio)
1921 pull_rt_task(rq);
1922 /*
1923 * If there's a higher priority task waiting to run
1924 * then reschedule. Note, the above pull_rt_task
1925 * can release the rq lock and p could migrate.
1926 * Only reschedule if p is still on the same runqueue.
1927 */
1928 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1929 resched_task(p);
1930 #else
1931 /* For UP simply resched on drop of prio */
1932 if (oldprio < p->prio)
1933 resched_task(p);
1934 #endif /* CONFIG_SMP */
1935 } else {
1936 /*
1937 * This task is not running, but if it is
1938 * greater than the current running task
1939 * then reschedule.
1940 */
1941 if (p->prio < rq->curr->prio)
1942 resched_task(rq->curr);
1943 }
1944 }
1945
1946 static void watchdog(struct rq *rq, struct task_struct *p)
1947 {
1948 unsigned long soft, hard;
1949
1950 /* max may change after cur was read, this will be fixed next tick */
1951 soft = task_rlimit(p, RLIMIT_RTTIME);
1952 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1953
1954 if (soft != RLIM_INFINITY) {
1955 unsigned long next;
1956
1957 if (p->rt.watchdog_stamp != jiffies) {
1958 p->rt.timeout++;
1959 p->rt.watchdog_stamp = jiffies;
1960 }
1961
1962 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1963 if (p->rt.timeout > next)
1964 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1965 }
1966 }
1967
1968 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1969 {
1970 struct sched_rt_entity *rt_se = &p->rt;
1971
1972 update_curr_rt(rq);
1973
1974 watchdog(rq, p);
1975
1976 /*
1977 * RR tasks need a special form of timeslice management.
1978 * FIFO tasks have no timeslices.
1979 */
1980 if (p->policy != SCHED_RR)
1981 return;
1982
1983 if (--p->rt.time_slice)
1984 return;
1985
1986 p->rt.time_slice = sched_rr_timeslice;
1987
1988 /*
1989 * Requeue to the end of queue if we (and all of our ancestors) are not
1990 * the only element on the queue
1991 */
1992 for_each_sched_rt_entity(rt_se) {
1993 if (rt_se->run_list.prev != rt_se->run_list.next) {
1994 requeue_task_rt(rq, p, 0);
1995 set_tsk_need_resched(p);
1996 return;
1997 }
1998 }
1999 }
2000
2001 static void set_curr_task_rt(struct rq *rq)
2002 {
2003 struct task_struct *p = rq->curr;
2004
2005 p->se.exec_start = rq_clock_task(rq);
2006
2007 /* The running task is never eligible for pushing */
2008 dequeue_pushable_task(rq, p);
2009 }
2010
2011 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2012 {
2013 /*
2014 * Time slice is 0 for SCHED_FIFO tasks
2015 */
2016 if (task->policy == SCHED_RR)
2017 return sched_rr_timeslice;
2018 else
2019 return 0;
2020 }
2021
2022 const struct sched_class rt_sched_class = {
2023 .next = &fair_sched_class,
2024 .enqueue_task = enqueue_task_rt,
2025 .dequeue_task = dequeue_task_rt,
2026 .yield_task = yield_task_rt,
2027
2028 .check_preempt_curr = check_preempt_curr_rt,
2029
2030 .pick_next_task = pick_next_task_rt,
2031 .put_prev_task = put_prev_task_rt,
2032
2033 #ifdef CONFIG_SMP
2034 .select_task_rq = select_task_rq_rt,
2035
2036 .set_cpus_allowed = set_cpus_allowed_rt,
2037 .rq_online = rq_online_rt,
2038 .rq_offline = rq_offline_rt,
2039 .post_schedule = post_schedule_rt,
2040 .task_woken = task_woken_rt,
2041 .switched_from = switched_from_rt,
2042 #endif
2043
2044 .set_curr_task = set_curr_task_rt,
2045 .task_tick = task_tick_rt,
2046
2047 .get_rr_interval = get_rr_interval_rt,
2048
2049 .prio_changed = prio_changed_rt,
2050 .switched_to = switched_to_rt,
2051 };
2052
2053 #ifdef CONFIG_SCHED_DEBUG
2054 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2055
2056 void print_rt_stats(struct seq_file *m, int cpu)
2057 {
2058 rt_rq_iter_t iter;
2059 struct rt_rq *rt_rq;
2060
2061 rcu_read_lock();
2062 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2063 print_rt_rq(m, cpu, rt_rq);
2064 rcu_read_unlock();
2065 }
2066 #endif /* CONFIG_SCHED_DEBUG */