]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched_rt.c
mm, hotplug: fix error handling in mem_online_node()
[mirror_ubuntu-artful-kernel.git] / kernel / sched_rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
8f48894f
PZ
6#ifdef CONFIG_RT_GROUP_SCHED
7
8#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
398a153b
GH
10static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11{
8f48894f
PZ
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
398a153b
GH
15 return container_of(rt_se, struct task_struct, rt);
16}
17
398a153b
GH
18static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19{
20 return rt_rq->rq;
21}
22
23static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24{
25 return rt_se->rt_rq;
26}
27
28#else /* CONFIG_RT_GROUP_SCHED */
29
a1ba4d8b
PZ
30#define rt_entity_is_task(rt_se) (1)
31
8f48894f
PZ
32static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
398a153b
GH
37static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38{
39 return container_of(rt_rq, struct rq, rt);
40}
41
42static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43{
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
46
47 return &rq->rt;
48}
49
50#endif /* CONFIG_RT_GROUP_SCHED */
51
4fd29176 52#ifdef CONFIG_SMP
84de4274 53
637f5085 54static inline int rt_overloaded(struct rq *rq)
4fd29176 55{
637f5085 56 return atomic_read(&rq->rd->rto_count);
4fd29176 57}
84de4274 58
4fd29176
SR
59static inline void rt_set_overload(struct rq *rq)
60{
1f11eb6a
GH
61 if (!rq->online)
62 return;
63
c6c4927b 64 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
65 /*
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
70 * updated yet.
71 */
72 wmb();
637f5085 73 atomic_inc(&rq->rd->rto_count);
4fd29176 74}
84de4274 75
4fd29176
SR
76static inline void rt_clear_overload(struct rq *rq)
77{
1f11eb6a
GH
78 if (!rq->online)
79 return;
80
4fd29176 81 /* the order here really doesn't matter */
637f5085 82 atomic_dec(&rq->rd->rto_count);
c6c4927b 83 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 84}
73fe6aae 85
398a153b 86static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 87{
a1ba4d8b 88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
cdc8eb98 92 }
398a153b
GH
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
637f5085 96 }
73fe6aae 97}
4fd29176 98
398a153b
GH
99static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100{
a1ba4d8b
PZ
101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
398a153b
GH
107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
109
110 update_rt_migration(rt_rq);
111}
112
113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114{
a1ba4d8b
PZ
115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
398a153b
GH
121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
123
124 update_rt_migration(rt_rq);
125}
126
917b627d
GH
127static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128{
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132}
133
134static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135{
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137}
138
bcf08df3
IM
139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
917b627d
GH
144#else
145
ceacc2c1 146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 147{
6f505b16
PZ
148}
149
ceacc2c1
PZ
150static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151{
152}
153
b07430ac 154static inline
ceacc2c1
PZ
155void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156{
157}
158
398a153b 159static inline
ceacc2c1
PZ
160void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161{
162}
917b627d 163
4fd29176
SR
164#endif /* CONFIG_SMP */
165
6f505b16
PZ
166static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167{
168 return !list_empty(&rt_se->run_list);
169}
170
052f1dc7 171#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 172
9f0c1e56 173static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
174{
175 if (!rt_rq->tg)
9f0c1e56 176 return RUNTIME_INF;
6f505b16 177
ac086bc2
PZ
178 return rt_rq->rt_runtime;
179}
180
181static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182{
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
184}
185
ec514c48
CX
186typedef struct task_group *rt_rq_iter_t;
187
188#define for_each_rt_rq(rt_rq, iter, rq) \
189 for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
190 (&iter->list != &task_groups) && \
191 (rt_rq = iter->rt_rq[cpu_of(rq)]); \
192 iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
193
3d4b47b4
PZ
194static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
195{
196 list_add_rcu(&rt_rq->leaf_rt_rq_list,
197 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
198}
199
200static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
201{
202 list_del_rcu(&rt_rq->leaf_rt_rq_list);
203}
204
6f505b16 205#define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4 206 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b16 207
6f505b16
PZ
208#define for_each_sched_rt_entity(rt_se) \
209 for (; rt_se; rt_se = rt_se->parent)
210
211static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
212{
213 return rt_se->my_q;
214}
215
37dad3fc 216static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b16
PZ
217static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
218
9f0c1e56 219static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 220{
f6121f4f 221 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb58
YZ
222 struct sched_rt_entity *rt_se;
223
0c3b9168
BS
224 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
225
226 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 227
f6121f4f
DF
228 if (rt_rq->rt_nr_running) {
229 if (rt_se && !on_rt_rq(rt_se))
37dad3fc 230 enqueue_rt_entity(rt_se, false);
e864c499 231 if (rt_rq->highest_prio.curr < curr->prio)
1020387f 232 resched_task(curr);
6f505b16
PZ
233 }
234}
235
9f0c1e56 236static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 237{
74b7eb58 238 struct sched_rt_entity *rt_se;
0c3b9168 239 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 240
0c3b9168 241 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16
PZ
242
243 if (rt_se && on_rt_rq(rt_se))
244 dequeue_rt_entity(rt_se);
245}
246
23b0fdfc
PZ
247static inline int rt_rq_throttled(struct rt_rq *rt_rq)
248{
249 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
250}
251
252static int rt_se_boosted(struct sched_rt_entity *rt_se)
253{
254 struct rt_rq *rt_rq = group_rt_rq(rt_se);
255 struct task_struct *p;
256
257 if (rt_rq)
258 return !!rt_rq->rt_nr_boosted;
259
260 p = rt_task_of(rt_se);
261 return p->prio != p->normal_prio;
262}
263
d0b27fa7 264#ifdef CONFIG_SMP
c6c4927b 265static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7
PZ
266{
267 return cpu_rq(smp_processor_id())->rd->span;
268}
6f505b16 269#else
c6c4927b 270static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 271{
c6c4927b 272 return cpu_online_mask;
d0b27fa7
PZ
273}
274#endif
6f505b16 275
d0b27fa7
PZ
276static inline
277struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 278{
d0b27fa7
PZ
279 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
280}
9f0c1e56 281
ac086bc2
PZ
282static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
283{
284 return &rt_rq->tg->rt_bandwidth;
285}
286
55e12e5e 287#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
288
289static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
290{
ac086bc2
PZ
291 return rt_rq->rt_runtime;
292}
293
294static inline u64 sched_rt_period(struct rt_rq *rt_rq)
295{
296 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
297}
298
ec514c48
CX
299typedef struct rt_rq *rt_rq_iter_t;
300
301#define for_each_rt_rq(rt_rq, iter, rq) \
302 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
303
3d4b47b4
PZ
304static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
305{
306}
307
308static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
309{
310}
311
6f505b16
PZ
312#define for_each_leaf_rt_rq(rt_rq, rq) \
313 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
314
6f505b16
PZ
315#define for_each_sched_rt_entity(rt_se) \
316 for (; rt_se; rt_se = NULL)
317
318static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
319{
320 return NULL;
321}
322
9f0c1e56 323static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 324{
f3ade837
JB
325 if (rt_rq->rt_nr_running)
326 resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b16
PZ
327}
328
9f0c1e56 329static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
330{
331}
332
23b0fdfc
PZ
333static inline int rt_rq_throttled(struct rt_rq *rt_rq)
334{
335 return rt_rq->rt_throttled;
336}
d0b27fa7 337
c6c4927b 338static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 339{
c6c4927b 340 return cpu_online_mask;
d0b27fa7
PZ
341}
342
343static inline
344struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
345{
346 return &cpu_rq(cpu)->rt;
347}
348
ac086bc2
PZ
349static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
350{
351 return &def_rt_bandwidth;
352}
353
55e12e5e 354#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 355
ac086bc2 356#ifdef CONFIG_SMP
78333cdd
PZ
357/*
358 * We ran out of runtime, see if we can borrow some from our neighbours.
359 */
b79f3833 360static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
361{
362 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
363 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
364 int i, weight, more = 0;
365 u64 rt_period;
366
c6c4927b 367 weight = cpumask_weight(rd->span);
ac086bc2 368
0986b11b 369 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 370 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 371 for_each_cpu(i, rd->span) {
ac086bc2
PZ
372 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
373 s64 diff;
374
375 if (iter == rt_rq)
376 continue;
377
0986b11b 378 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
379 /*
380 * Either all rqs have inf runtime and there's nothing to steal
381 * or __disable_runtime() below sets a specific rq to inf to
382 * indicate its been disabled and disalow stealing.
383 */
7def2be1
PZ
384 if (iter->rt_runtime == RUNTIME_INF)
385 goto next;
386
78333cdd
PZ
387 /*
388 * From runqueues with spare time, take 1/n part of their
389 * spare time, but no more than our period.
390 */
ac086bc2
PZ
391 diff = iter->rt_runtime - iter->rt_time;
392 if (diff > 0) {
58838cf3 393 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
394 if (rt_rq->rt_runtime + diff > rt_period)
395 diff = rt_period - rt_rq->rt_runtime;
396 iter->rt_runtime -= diff;
397 rt_rq->rt_runtime += diff;
398 more = 1;
399 if (rt_rq->rt_runtime == rt_period) {
0986b11b 400 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
401 break;
402 }
403 }
7def2be1 404next:
0986b11b 405 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 406 }
0986b11b 407 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2
PZ
408
409 return more;
410}
7def2be1 411
78333cdd
PZ
412/*
413 * Ensure this RQ takes back all the runtime it lend to its neighbours.
414 */
7def2be1
PZ
415static void __disable_runtime(struct rq *rq)
416{
417 struct root_domain *rd = rq->rd;
ec514c48 418 rt_rq_iter_t iter;
7def2be1
PZ
419 struct rt_rq *rt_rq;
420
421 if (unlikely(!scheduler_running))
422 return;
423
ec514c48 424 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
425 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
426 s64 want;
427 int i;
428
0986b11b
TG
429 raw_spin_lock(&rt_b->rt_runtime_lock);
430 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
431 /*
432 * Either we're all inf and nobody needs to borrow, or we're
433 * already disabled and thus have nothing to do, or we have
434 * exactly the right amount of runtime to take out.
435 */
7def2be1
PZ
436 if (rt_rq->rt_runtime == RUNTIME_INF ||
437 rt_rq->rt_runtime == rt_b->rt_runtime)
438 goto balanced;
0986b11b 439 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 440
78333cdd
PZ
441 /*
442 * Calculate the difference between what we started out with
443 * and what we current have, that's the amount of runtime
444 * we lend and now have to reclaim.
445 */
7def2be1
PZ
446 want = rt_b->rt_runtime - rt_rq->rt_runtime;
447
78333cdd
PZ
448 /*
449 * Greedy reclaim, take back as much as we can.
450 */
c6c4927b 451 for_each_cpu(i, rd->span) {
7def2be1
PZ
452 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
453 s64 diff;
454
78333cdd
PZ
455 /*
456 * Can't reclaim from ourselves or disabled runqueues.
457 */
f1679d08 458 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
459 continue;
460
0986b11b 461 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
462 if (want > 0) {
463 diff = min_t(s64, iter->rt_runtime, want);
464 iter->rt_runtime -= diff;
465 want -= diff;
466 } else {
467 iter->rt_runtime -= want;
468 want -= want;
469 }
0986b11b 470 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
471
472 if (!want)
473 break;
474 }
475
0986b11b 476 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
477 /*
478 * We cannot be left wanting - that would mean some runtime
479 * leaked out of the system.
480 */
7def2be1
PZ
481 BUG_ON(want);
482balanced:
78333cdd
PZ
483 /*
484 * Disable all the borrow logic by pretending we have inf
485 * runtime - in which case borrowing doesn't make sense.
486 */
7def2be1 487 rt_rq->rt_runtime = RUNTIME_INF;
0986b11b
TG
488 raw_spin_unlock(&rt_rq->rt_runtime_lock);
489 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
490 }
491}
492
493static void disable_runtime(struct rq *rq)
494{
495 unsigned long flags;
496
05fa785c 497 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 498 __disable_runtime(rq);
05fa785c 499 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
500}
501
502static void __enable_runtime(struct rq *rq)
503{
ec514c48 504 rt_rq_iter_t iter;
7def2be1
PZ
505 struct rt_rq *rt_rq;
506
507 if (unlikely(!scheduler_running))
508 return;
509
78333cdd
PZ
510 /*
511 * Reset each runqueue's bandwidth settings
512 */
ec514c48 513 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
514 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
515
0986b11b
TG
516 raw_spin_lock(&rt_b->rt_runtime_lock);
517 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
518 rt_rq->rt_runtime = rt_b->rt_runtime;
519 rt_rq->rt_time = 0;
baf25731 520 rt_rq->rt_throttled = 0;
0986b11b
TG
521 raw_spin_unlock(&rt_rq->rt_runtime_lock);
522 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
523 }
524}
525
526static void enable_runtime(struct rq *rq)
527{
528 unsigned long flags;
529
05fa785c 530 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 531 __enable_runtime(rq);
05fa785c 532 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
533}
534
eff6549b
PZ
535static int balance_runtime(struct rt_rq *rt_rq)
536{
537 int more = 0;
538
539 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 540 raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b 541 more = do_balance_runtime(rt_rq);
0986b11b 542 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
543 }
544
545 return more;
546}
55e12e5e 547#else /* !CONFIG_SMP */
eff6549b
PZ
548static inline int balance_runtime(struct rt_rq *rt_rq)
549{
550 return 0;
551}
55e12e5e 552#endif /* CONFIG_SMP */
ac086bc2 553
eff6549b
PZ
554static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
555{
556 int i, idle = 1;
c6c4927b 557 const struct cpumask *span;
eff6549b 558
0b148fa0 559 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
eff6549b
PZ
560 return 1;
561
562 span = sched_rt_period_mask();
c6c4927b 563 for_each_cpu(i, span) {
eff6549b
PZ
564 int enqueue = 0;
565 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
566 struct rq *rq = rq_of_rt_rq(rt_rq);
567
05fa785c 568 raw_spin_lock(&rq->lock);
eff6549b
PZ
569 if (rt_rq->rt_time) {
570 u64 runtime;
571
0986b11b 572 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
573 if (rt_rq->rt_throttled)
574 balance_runtime(rt_rq);
575 runtime = rt_rq->rt_runtime;
576 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
577 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
578 rt_rq->rt_throttled = 0;
579 enqueue = 1;
61eadef6
MG
580
581 /*
582 * Force a clock update if the CPU was idle,
583 * lest wakeup -> unthrottle time accumulate.
584 */
585 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
586 rq->skip_clock_update = -1;
eff6549b
PZ
587 }
588 if (rt_rq->rt_time || rt_rq->rt_nr_running)
589 idle = 0;
0986b11b 590 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 591 } else if (rt_rq->rt_nr_running) {
6c3df255 592 idle = 0;
0c3b9168
BS
593 if (!rt_rq_throttled(rt_rq))
594 enqueue = 1;
595 }
eff6549b
PZ
596
597 if (enqueue)
598 sched_rt_rq_enqueue(rt_rq);
05fa785c 599 raw_spin_unlock(&rq->lock);
eff6549b
PZ
600 }
601
602 return idle;
603}
ac086bc2 604
6f505b16
PZ
605static inline int rt_se_prio(struct sched_rt_entity *rt_se)
606{
052f1dc7 607#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
608 struct rt_rq *rt_rq = group_rt_rq(rt_se);
609
610 if (rt_rq)
e864c499 611 return rt_rq->highest_prio.curr;
6f505b16
PZ
612#endif
613
614 return rt_task_of(rt_se)->prio;
615}
616
9f0c1e56 617static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 618{
9f0c1e56 619 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 620
fa85ae24 621 if (rt_rq->rt_throttled)
23b0fdfc 622 return rt_rq_throttled(rt_rq);
fa85ae24 623
ac086bc2
PZ
624 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
625 return 0;
626
b79f3833
PZ
627 balance_runtime(rt_rq);
628 runtime = sched_rt_runtime(rt_rq);
629 if (runtime == RUNTIME_INF)
630 return 0;
ac086bc2 631
9f0c1e56 632 if (rt_rq->rt_time > runtime) {
6f505b16 633 rt_rq->rt_throttled = 1;
23b0fdfc 634 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 635 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
636 return 1;
637 }
fa85ae24
PZ
638 }
639
640 return 0;
641}
642
bb44e5d1
IM
643/*
644 * Update the current task's runtime statistics. Skip current tasks that
645 * are not in our scheduling class.
646 */
a9957449 647static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
648{
649 struct task_struct *curr = rq->curr;
6f505b16
PZ
650 struct sched_rt_entity *rt_se = &curr->rt;
651 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
652 u64 delta_exec;
653
06c3bc65 654 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
655 return;
656
305e6835 657 delta_exec = rq->clock_task - curr->se.exec_start;
bb44e5d1
IM
658 if (unlikely((s64)delta_exec < 0))
659 delta_exec = 0;
6cfb0d5d 660
41acab88 661 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
662
663 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
664 account_group_exec_runtime(curr, delta_exec);
665
305e6835 666 curr->se.exec_start = rq->clock_task;
d842de87 667 cpuacct_charge(curr, delta_exec);
fa85ae24 668
e9e9250b
PZ
669 sched_rt_avg_update(rq, delta_exec);
670
0b148fa0
PZ
671 if (!rt_bandwidth_enabled())
672 return;
673
354d60c2
DG
674 for_each_sched_rt_entity(rt_se) {
675 rt_rq = rt_rq_of_se(rt_se);
676
cc2991cf 677 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 678 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
679 rt_rq->rt_time += delta_exec;
680 if (sched_rt_runtime_exceeded(rt_rq))
681 resched_task(curr);
0986b11b 682 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 683 }
354d60c2 684 }
bb44e5d1
IM
685}
686
398a153b 687#if defined CONFIG_SMP
e864c499
GH
688
689static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
690
691static inline int next_prio(struct rq *rq)
63489e45 692{
e864c499
GH
693 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
694
695 if (next && rt_prio(next->prio))
696 return next->prio;
697 else
698 return MAX_RT_PRIO;
699}
e864c499 700
398a153b
GH
701static void
702inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 703{
4d984277 704 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 705
398a153b 706 if (prio < prev_prio) {
4d984277 707
e864c499
GH
708 /*
709 * If the new task is higher in priority than anything on the
398a153b
GH
710 * run-queue, we know that the previous high becomes our
711 * next-highest.
e864c499 712 */
398a153b 713 rt_rq->highest_prio.next = prev_prio;
1f11eb6a
GH
714
715 if (rq->online)
4d984277 716 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1100ac91 717
e864c499
GH
718 } else if (prio == rt_rq->highest_prio.curr)
719 /*
720 * If the next task is equal in priority to the highest on
721 * the run-queue, then we implicitly know that the next highest
722 * task cannot be any lower than current
723 */
724 rt_rq->highest_prio.next = prio;
725 else if (prio < rt_rq->highest_prio.next)
726 /*
727 * Otherwise, we need to recompute next-highest
728 */
729 rt_rq->highest_prio.next = next_prio(rq);
398a153b 730}
73fe6aae 731
398a153b
GH
732static void
733dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
734{
735 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 736
398a153b
GH
737 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
738 rt_rq->highest_prio.next = next_prio(rq);
739
740 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
741 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
742}
743
398a153b
GH
744#else /* CONFIG_SMP */
745
6f505b16 746static inline
398a153b
GH
747void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
748static inline
749void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
750
751#endif /* CONFIG_SMP */
6e0534f2 752
052f1dc7 753#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
754static void
755inc_rt_prio(struct rt_rq *rt_rq, int prio)
756{
757 int prev_prio = rt_rq->highest_prio.curr;
758
759 if (prio < prev_prio)
760 rt_rq->highest_prio.curr = prio;
761
762 inc_rt_prio_smp(rt_rq, prio, prev_prio);
763}
764
765static void
766dec_rt_prio(struct rt_rq *rt_rq, int prio)
767{
768 int prev_prio = rt_rq->highest_prio.curr;
769
6f505b16 770 if (rt_rq->rt_nr_running) {
764a9d6f 771
398a153b 772 WARN_ON(prio < prev_prio);
764a9d6f 773
e864c499 774 /*
398a153b
GH
775 * This may have been our highest task, and therefore
776 * we may have some recomputation to do
e864c499 777 */
398a153b 778 if (prio == prev_prio) {
e864c499
GH
779 struct rt_prio_array *array = &rt_rq->active;
780
781 rt_rq->highest_prio.curr =
764a9d6f 782 sched_find_first_bit(array->bitmap);
e864c499
GH
783 }
784
764a9d6f 785 } else
e864c499 786 rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae 787
398a153b
GH
788 dec_rt_prio_smp(rt_rq, prio, prev_prio);
789}
1f11eb6a 790
398a153b
GH
791#else
792
793static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
794static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
795
796#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 797
052f1dc7 798#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
799
800static void
801inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
802{
803 if (rt_se_boosted(rt_se))
804 rt_rq->rt_nr_boosted++;
805
806 if (rt_rq->tg)
807 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
808}
809
810static void
811dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
812{
23b0fdfc
PZ
813 if (rt_se_boosted(rt_se))
814 rt_rq->rt_nr_boosted--;
815
816 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
817}
818
819#else /* CONFIG_RT_GROUP_SCHED */
820
821static void
822inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
823{
824 start_rt_bandwidth(&def_rt_bandwidth);
825}
826
827static inline
828void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
829
830#endif /* CONFIG_RT_GROUP_SCHED */
831
832static inline
833void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
834{
835 int prio = rt_se_prio(rt_se);
836
837 WARN_ON(!rt_prio(prio));
838 rt_rq->rt_nr_running++;
839
840 inc_rt_prio(rt_rq, prio);
841 inc_rt_migration(rt_se, rt_rq);
842 inc_rt_group(rt_se, rt_rq);
843}
844
845static inline
846void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
847{
848 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
849 WARN_ON(!rt_rq->rt_nr_running);
850 rt_rq->rt_nr_running--;
851
852 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
853 dec_rt_migration(rt_se, rt_rq);
854 dec_rt_group(rt_se, rt_rq);
63489e45
SR
855}
856
37dad3fc 857static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1 858{
6f505b16
PZ
859 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
860 struct rt_prio_array *array = &rt_rq->active;
861 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 862 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 863
ad2a3f13
PZ
864 /*
865 * Don't enqueue the group if its throttled, or when empty.
866 * The latter is a consequence of the former when a child group
867 * get throttled and the current group doesn't have any other
868 * active members.
869 */
870 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b16 871 return;
63489e45 872
3d4b47b4
PZ
873 if (!rt_rq->rt_nr_running)
874 list_add_leaf_rt_rq(rt_rq);
875
37dad3fc
TG
876 if (head)
877 list_add(&rt_se->run_list, queue);
878 else
879 list_add_tail(&rt_se->run_list, queue);
6f505b16 880 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 881
6f505b16
PZ
882 inc_rt_tasks(rt_se, rt_rq);
883}
884
ad2a3f13 885static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b16
PZ
886{
887 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
888 struct rt_prio_array *array = &rt_rq->active;
889
890 list_del_init(&rt_se->run_list);
891 if (list_empty(array->queue + rt_se_prio(rt_se)))
892 __clear_bit(rt_se_prio(rt_se), array->bitmap);
893
894 dec_rt_tasks(rt_se, rt_rq);
3d4b47b4
PZ
895 if (!rt_rq->rt_nr_running)
896 list_del_leaf_rt_rq(rt_rq);
6f505b16
PZ
897}
898
899/*
900 * Because the prio of an upper entry depends on the lower
901 * entries, we must remove entries top - down.
6f505b16 902 */
ad2a3f13 903static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b16 904{
ad2a3f13 905 struct sched_rt_entity *back = NULL;
6f505b16 906
58d6c2d7
PZ
907 for_each_sched_rt_entity(rt_se) {
908 rt_se->back = back;
909 back = rt_se;
910 }
911
912 for (rt_se = back; rt_se; rt_se = rt_se->back) {
913 if (on_rt_rq(rt_se))
ad2a3f13
PZ
914 __dequeue_rt_entity(rt_se);
915 }
916}
917
37dad3fc 918static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13
PZ
919{
920 dequeue_rt_stack(rt_se);
921 for_each_sched_rt_entity(rt_se)
37dad3fc 922 __enqueue_rt_entity(rt_se, head);
ad2a3f13
PZ
923}
924
925static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
926{
927 dequeue_rt_stack(rt_se);
928
929 for_each_sched_rt_entity(rt_se) {
930 struct rt_rq *rt_rq = group_rt_rq(rt_se);
931
932 if (rt_rq && rt_rq->rt_nr_running)
37dad3fc 933 __enqueue_rt_entity(rt_se, false);
58d6c2d7 934 }
bb44e5d1
IM
935}
936
937/*
938 * Adding/removing a task to/from a priority array:
939 */
ea87bb78 940static void
371fd7e7 941enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
942{
943 struct sched_rt_entity *rt_se = &p->rt;
944
371fd7e7 945 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
946 rt_se->timeout = 0;
947
371fd7e7 948 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f6 949
917b627d
GH
950 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
951 enqueue_pushable_task(rq, p);
6f505b16
PZ
952}
953
371fd7e7 954static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 955{
6f505b16 956 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 957
f1e14ef6 958 update_curr_rt(rq);
ad2a3f13 959 dequeue_rt_entity(rt_se);
c09595f6 960
917b627d 961 dequeue_pushable_task(rq, p);
bb44e5d1
IM
962}
963
964/*
965 * Put task to the end of the run list without the overhead of dequeue
966 * followed by enqueue.
967 */
7ebefa8c
DA
968static void
969requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 970{
1cdad715 971 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
972 struct rt_prio_array *array = &rt_rq->active;
973 struct list_head *queue = array->queue + rt_se_prio(rt_se);
974
975 if (head)
976 list_move(&rt_se->run_list, queue);
977 else
978 list_move_tail(&rt_se->run_list, queue);
1cdad715 979 }
6f505b16
PZ
980}
981
7ebefa8c 982static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 983{
6f505b16
PZ
984 struct sched_rt_entity *rt_se = &p->rt;
985 struct rt_rq *rt_rq;
bb44e5d1 986
6f505b16
PZ
987 for_each_sched_rt_entity(rt_se) {
988 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 989 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 990 }
bb44e5d1
IM
991}
992
6f505b16 993static void yield_task_rt(struct rq *rq)
bb44e5d1 994{
7ebefa8c 995 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
996}
997
e7693a36 998#ifdef CONFIG_SMP
318e0893
GH
999static int find_lowest_rq(struct task_struct *task);
1000
0017d735 1001static int
7608dec2 1002select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a36 1003{
7608dec2
PZ
1004 struct task_struct *curr;
1005 struct rq *rq;
1006 int cpu;
1007
0763a660 1008 if (sd_flag != SD_BALANCE_WAKE)
5f3edc1b
PZ
1009 return smp_processor_id();
1010
7608dec2
PZ
1011 cpu = task_cpu(p);
1012 rq = cpu_rq(cpu);
1013
1014 rcu_read_lock();
1015 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1016
318e0893 1017 /*
7608dec2 1018 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1019 * try to see if we can wake this RT task up on another
1020 * runqueue. Otherwise simply start this RT task
1021 * on its current runqueue.
1022 *
43fa5460
SR
1023 * We want to avoid overloading runqueues. If the woken
1024 * task is a higher priority, then it will stay on this CPU
1025 * and the lower prio task should be moved to another CPU.
1026 * Even though this will probably make the lower prio task
1027 * lose its cache, we do not want to bounce a higher task
1028 * around just because it gave up its CPU, perhaps for a
1029 * lock?
1030 *
1031 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1032 *
1033 * Otherwise, just let it ride on the affined RQ and the
1034 * post-schedule router will push the preempted task away
1035 *
1036 * This test is optimistic, if we get it wrong the load-balancer
1037 * will have to sort it out.
318e0893 1038 */
7608dec2
PZ
1039 if (curr && unlikely(rt_task(curr)) &&
1040 (curr->rt.nr_cpus_allowed < 2 ||
1041 curr->prio < p->prio) &&
6f505b16 1042 (p->rt.nr_cpus_allowed > 1)) {
7608dec2 1043 int target = find_lowest_rq(p);
318e0893 1044
7608dec2
PZ
1045 if (target != -1)
1046 cpu = target;
318e0893 1047 }
7608dec2 1048 rcu_read_unlock();
318e0893 1049
7608dec2 1050 return cpu;
e7693a36 1051}
7ebefa8c
DA
1052
1053static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1054{
7ebefa8c
DA
1055 if (rq->curr->rt.nr_cpus_allowed == 1)
1056 return;
1057
24600ce8 1058 if (p->rt.nr_cpus_allowed != 1
13b8bd0a
RR
1059 && cpupri_find(&rq->rd->cpupri, p, NULL))
1060 return;
24600ce8 1061
13b8bd0a
RR
1062 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1063 return;
7ebefa8c
DA
1064
1065 /*
1066 * There appears to be other cpus that can accept
1067 * current and none to run 'p', so lets reschedule
1068 * to try and push current away:
1069 */
1070 requeue_task_rt(rq, p, 1);
1071 resched_task(rq->curr);
1072}
1073
e7693a36
GH
1074#endif /* CONFIG_SMP */
1075
bb44e5d1
IM
1076/*
1077 * Preempt the current task with a newly woken task if needed:
1078 */
7d478721 1079static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1080{
45c01e82 1081 if (p->prio < rq->curr->prio) {
bb44e5d1 1082 resched_task(rq->curr);
45c01e82
GH
1083 return;
1084 }
1085
1086#ifdef CONFIG_SMP
1087 /*
1088 * If:
1089 *
1090 * - the newly woken task is of equal priority to the current task
1091 * - the newly woken task is non-migratable while current is migratable
1092 * - current will be preempted on the next reschedule
1093 *
1094 * we should check to see if current can readily move to a different
1095 * cpu. If so, we will reschedule to allow the push logic to try
1096 * to move current somewhere else, making room for our non-migratable
1097 * task.
1098 */
8dd0de8b 1099 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1100 check_preempt_equal_prio(rq, p);
45c01e82 1101#endif
bb44e5d1
IM
1102}
1103
6f505b16
PZ
1104static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1105 struct rt_rq *rt_rq)
bb44e5d1 1106{
6f505b16
PZ
1107 struct rt_prio_array *array = &rt_rq->active;
1108 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1109 struct list_head *queue;
1110 int idx;
1111
1112 idx = sched_find_first_bit(array->bitmap);
6f505b16 1113 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1114
1115 queue = array->queue + idx;
6f505b16 1116 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1117
6f505b16
PZ
1118 return next;
1119}
bb44e5d1 1120
917b627d 1121static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1122{
1123 struct sched_rt_entity *rt_se;
1124 struct task_struct *p;
1125 struct rt_rq *rt_rq;
bb44e5d1 1126
6f505b16
PZ
1127 rt_rq = &rq->rt;
1128
1129 if (unlikely(!rt_rq->rt_nr_running))
1130 return NULL;
1131
23b0fdfc 1132 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
1133 return NULL;
1134
1135 do {
1136 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1137 BUG_ON(!rt_se);
6f505b16
PZ
1138 rt_rq = group_rt_rq(rt_se);
1139 } while (rt_rq);
1140
1141 p = rt_task_of(rt_se);
305e6835 1142 p->se.exec_start = rq->clock_task;
917b627d
GH
1143
1144 return p;
1145}
1146
1147static struct task_struct *pick_next_task_rt(struct rq *rq)
1148{
1149 struct task_struct *p = _pick_next_task_rt(rq);
1150
1151 /* The running task is never eligible for pushing */
1152 if (p)
1153 dequeue_pushable_task(rq, p);
1154
bcf08df3 1155#ifdef CONFIG_SMP
3f029d3c
GH
1156 /*
1157 * We detect this state here so that we can avoid taking the RQ
1158 * lock again later if there is no need to push
1159 */
1160 rq->post_schedule = has_pushable_tasks(rq);
bcf08df3 1161#endif
3f029d3c 1162
6f505b16 1163 return p;
bb44e5d1
IM
1164}
1165
31ee529c 1166static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1167{
f1e14ef6 1168 update_curr_rt(rq);
bb44e5d1 1169 p->se.exec_start = 0;
917b627d
GH
1170
1171 /*
1172 * The previous task needs to be made eligible for pushing
1173 * if it is still active
1174 */
fd2f4419 1175 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
917b627d 1176 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1177}
1178
681f3e68 1179#ifdef CONFIG_SMP
6f505b16 1180
e8fa1362
SR
1181/* Only try algorithms three times */
1182#define RT_MAX_TRIES 3
1183
e8fa1362
SR
1184static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1185
f65eda4f
SR
1186static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1187{
1188 if (!task_running(rq, p) &&
96f874e2 1189 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
6f505b16 1190 (p->rt.nr_cpus_allowed > 1))
f65eda4f
SR
1191 return 1;
1192 return 0;
1193}
1194
e8fa1362 1195/* Return the second highest RT task, NULL otherwise */
79064fbf 1196static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 1197{
6f505b16
PZ
1198 struct task_struct *next = NULL;
1199 struct sched_rt_entity *rt_se;
1200 struct rt_prio_array *array;
1201 struct rt_rq *rt_rq;
e8fa1362
SR
1202 int idx;
1203
6f505b16
PZ
1204 for_each_leaf_rt_rq(rt_rq, rq) {
1205 array = &rt_rq->active;
1206 idx = sched_find_first_bit(array->bitmap);
49246274 1207next_idx:
6f505b16
PZ
1208 if (idx >= MAX_RT_PRIO)
1209 continue;
1210 if (next && next->prio < idx)
1211 continue;
1212 list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b
PZ
1213 struct task_struct *p;
1214
1215 if (!rt_entity_is_task(rt_se))
1216 continue;
1217
1218 p = rt_task_of(rt_se);
6f505b16
PZ
1219 if (pick_rt_task(rq, p, cpu)) {
1220 next = p;
1221 break;
1222 }
1223 }
1224 if (!next) {
1225 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1226 goto next_idx;
1227 }
f65eda4f
SR
1228 }
1229
e8fa1362
SR
1230 return next;
1231}
1232
0e3900e6 1233static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1234
6e1254d2
GH
1235static int find_lowest_rq(struct task_struct *task)
1236{
1237 struct sched_domain *sd;
96f874e2 1238 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2
GH
1239 int this_cpu = smp_processor_id();
1240 int cpu = task_cpu(task);
06f90dbd 1241
0da938c4
SR
1242 /* Make sure the mask is initialized first */
1243 if (unlikely(!lowest_mask))
1244 return -1;
1245
6e0534f2
GH
1246 if (task->rt.nr_cpus_allowed == 1)
1247 return -1; /* No other targets possible */
6e1254d2 1248
6e0534f2
GH
1249 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1250 return -1; /* No targets found */
6e1254d2
GH
1251
1252 /*
1253 * At this point we have built a mask of cpus representing the
1254 * lowest priority tasks in the system. Now we want to elect
1255 * the best one based on our affinity and topology.
1256 *
1257 * We prioritize the last cpu that the task executed on since
1258 * it is most likely cache-hot in that location.
1259 */
96f874e2 1260 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1261 return cpu;
1262
1263 /*
1264 * Otherwise, we consult the sched_domains span maps to figure
1265 * out which cpu is logically closest to our hot cache data.
1266 */
e2c88063
RR
1267 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1268 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1269
cd4ae6ad 1270 rcu_read_lock();
e2c88063
RR
1271 for_each_domain(cpu, sd) {
1272 if (sd->flags & SD_WAKE_AFFINE) {
1273 int best_cpu;
6e1254d2 1274
e2c88063
RR
1275 /*
1276 * "this_cpu" is cheaper to preempt than a
1277 * remote processor.
1278 */
1279 if (this_cpu != -1 &&
cd4ae6ad
XF
1280 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1281 rcu_read_unlock();
e2c88063 1282 return this_cpu;
cd4ae6ad 1283 }
e2c88063
RR
1284
1285 best_cpu = cpumask_first_and(lowest_mask,
1286 sched_domain_span(sd));
cd4ae6ad
XF
1287 if (best_cpu < nr_cpu_ids) {
1288 rcu_read_unlock();
e2c88063 1289 return best_cpu;
cd4ae6ad 1290 }
6e1254d2
GH
1291 }
1292 }
cd4ae6ad 1293 rcu_read_unlock();
6e1254d2
GH
1294
1295 /*
1296 * And finally, if there were no matches within the domains
1297 * just give the caller *something* to work with from the compatible
1298 * locations.
1299 */
e2c88063
RR
1300 if (this_cpu != -1)
1301 return this_cpu;
1302
1303 cpu = cpumask_any(lowest_mask);
1304 if (cpu < nr_cpu_ids)
1305 return cpu;
1306 return -1;
07b4032c
GH
1307}
1308
1309/* Will lock the rq it finds */
4df64c0b 1310static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1311{
1312 struct rq *lowest_rq = NULL;
07b4032c 1313 int tries;
4df64c0b 1314 int cpu;
e8fa1362 1315
07b4032c
GH
1316 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1317 cpu = find_lowest_rq(task);
1318
2de0b463 1319 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1320 break;
1321
07b4032c
GH
1322 lowest_rq = cpu_rq(cpu);
1323
e8fa1362 1324 /* if the prio of this runqueue changed, try again */
07b4032c 1325 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1326 /*
1327 * We had to unlock the run queue. In
1328 * the mean time, task could have
1329 * migrated already or had its affinity changed.
1330 * Also make sure that it wasn't scheduled on its rq.
1331 */
07b4032c 1332 if (unlikely(task_rq(task) != rq ||
96f874e2
RR
1333 !cpumask_test_cpu(lowest_rq->cpu,
1334 &task->cpus_allowed) ||
07b4032c 1335 task_running(rq, task) ||
fd2f4419 1336 !task->on_rq)) {
4df64c0b 1337
05fa785c 1338 raw_spin_unlock(&lowest_rq->lock);
e8fa1362
SR
1339 lowest_rq = NULL;
1340 break;
1341 }
1342 }
1343
1344 /* If this rq is still suitable use it. */
e864c499 1345 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1346 break;
1347
1348 /* try again */
1b12bbc7 1349 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1350 lowest_rq = NULL;
1351 }
1352
1353 return lowest_rq;
1354}
1355
917b627d
GH
1356static struct task_struct *pick_next_pushable_task(struct rq *rq)
1357{
1358 struct task_struct *p;
1359
1360 if (!has_pushable_tasks(rq))
1361 return NULL;
1362
1363 p = plist_first_entry(&rq->rt.pushable_tasks,
1364 struct task_struct, pushable_tasks);
1365
1366 BUG_ON(rq->cpu != task_cpu(p));
1367 BUG_ON(task_current(rq, p));
1368 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1369
fd2f4419 1370 BUG_ON(!p->on_rq);
917b627d
GH
1371 BUG_ON(!rt_task(p));
1372
1373 return p;
1374}
1375
e8fa1362
SR
1376/*
1377 * If the current CPU has more than one RT task, see if the non
1378 * running task can migrate over to a CPU that is running a task
1379 * of lesser priority.
1380 */
697f0a48 1381static int push_rt_task(struct rq *rq)
e8fa1362
SR
1382{
1383 struct task_struct *next_task;
1384 struct rq *lowest_rq;
e8fa1362 1385
a22d7fc1
GH
1386 if (!rq->rt.overloaded)
1387 return 0;
1388
917b627d 1389 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1390 if (!next_task)
1391 return 0;
1392
49246274 1393retry:
697f0a48 1394 if (unlikely(next_task == rq->curr)) {
f65eda4f 1395 WARN_ON(1);
e8fa1362 1396 return 0;
f65eda4f 1397 }
e8fa1362
SR
1398
1399 /*
1400 * It's possible that the next_task slipped in of
1401 * higher priority than current. If that's the case
1402 * just reschedule current.
1403 */
697f0a48
GH
1404 if (unlikely(next_task->prio < rq->curr->prio)) {
1405 resched_task(rq->curr);
e8fa1362
SR
1406 return 0;
1407 }
1408
697f0a48 1409 /* We might release rq lock */
e8fa1362
SR
1410 get_task_struct(next_task);
1411
1412 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1413 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1414 if (!lowest_rq) {
1415 struct task_struct *task;
1416 /*
697f0a48 1417 * find lock_lowest_rq releases rq->lock
1563513d
GH
1418 * so it is possible that next_task has migrated.
1419 *
1420 * We need to make sure that the task is still on the same
1421 * run-queue and is also still the next task eligible for
1422 * pushing.
e8fa1362 1423 */
917b627d 1424 task = pick_next_pushable_task(rq);
1563513d
GH
1425 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1426 /*
25985edc 1427 * If we get here, the task hasn't moved at all, but
1563513d
GH
1428 * it has failed to push. We will not try again,
1429 * since the other cpus will pull from us when they
1430 * are ready.
1431 */
1432 dequeue_pushable_task(rq, next_task);
1433 goto out;
e8fa1362 1434 }
917b627d 1435
1563513d
GH
1436 if (!task)
1437 /* No more tasks, just exit */
1438 goto out;
1439
917b627d 1440 /*
1563513d 1441 * Something has shifted, try again.
917b627d 1442 */
1563513d
GH
1443 put_task_struct(next_task);
1444 next_task = task;
1445 goto retry;
e8fa1362
SR
1446 }
1447
697f0a48 1448 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1449 set_task_cpu(next_task, lowest_rq->cpu);
1450 activate_task(lowest_rq, next_task, 0);
1451
1452 resched_task(lowest_rq->curr);
1453
1b12bbc7 1454 double_unlock_balance(rq, lowest_rq);
e8fa1362 1455
e8fa1362
SR
1456out:
1457 put_task_struct(next_task);
1458
917b627d 1459 return 1;
e8fa1362
SR
1460}
1461
e8fa1362
SR
1462static void push_rt_tasks(struct rq *rq)
1463{
1464 /* push_rt_task will return true if it moved an RT */
1465 while (push_rt_task(rq))
1466 ;
1467}
1468
f65eda4f
SR
1469static int pull_rt_task(struct rq *this_rq)
1470{
80bf3171 1471 int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944 1472 struct task_struct *p;
f65eda4f 1473 struct rq *src_rq;
f65eda4f 1474
637f5085 1475 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1476 return 0;
1477
c6c4927b 1478 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1479 if (this_cpu == cpu)
1480 continue;
1481
1482 src_rq = cpu_rq(cpu);
74ab8e4f
GH
1483
1484 /*
1485 * Don't bother taking the src_rq->lock if the next highest
1486 * task is known to be lower-priority than our current task.
1487 * This may look racy, but if this value is about to go
1488 * logically higher, the src_rq will push this task away.
1489 * And if its going logically lower, we do not care
1490 */
1491 if (src_rq->rt.highest_prio.next >=
1492 this_rq->rt.highest_prio.curr)
1493 continue;
1494
f65eda4f
SR
1495 /*
1496 * We can potentially drop this_rq's lock in
1497 * double_lock_balance, and another CPU could
a8728944 1498 * alter this_rq
f65eda4f 1499 */
a8728944 1500 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
1501
1502 /*
1503 * Are there still pullable RT tasks?
1504 */
614ee1f6
MG
1505 if (src_rq->rt.rt_nr_running <= 1)
1506 goto skip;
f65eda4f 1507
f65eda4f
SR
1508 p = pick_next_highest_task_rt(src_rq, this_cpu);
1509
1510 /*
1511 * Do we have an RT task that preempts
1512 * the to-be-scheduled task?
1513 */
a8728944 1514 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 1515 WARN_ON(p == src_rq->curr);
fd2f4419 1516 WARN_ON(!p->on_rq);
f65eda4f
SR
1517
1518 /*
1519 * There's a chance that p is higher in priority
1520 * than what's currently running on its cpu.
1521 * This is just that p is wakeing up and hasn't
1522 * had a chance to schedule. We only pull
1523 * p if it is lower in priority than the
a8728944 1524 * current task on the run queue
f65eda4f 1525 */
a8728944 1526 if (p->prio < src_rq->curr->prio)
614ee1f6 1527 goto skip;
f65eda4f
SR
1528
1529 ret = 1;
1530
1531 deactivate_task(src_rq, p, 0);
1532 set_task_cpu(p, this_cpu);
1533 activate_task(this_rq, p, 0);
1534 /*
1535 * We continue with the search, just in
1536 * case there's an even higher prio task
25985edc 1537 * in another runqueue. (low likelihood
f65eda4f 1538 * but possible)
f65eda4f 1539 */
f65eda4f 1540 }
49246274 1541skip:
1b12bbc7 1542 double_unlock_balance(this_rq, src_rq);
f65eda4f
SR
1543 }
1544
1545 return ret;
1546}
1547
9a897c5a 1548static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1549{
1550 /* Try to pull RT tasks here if we lower this rq's prio */
e864c499 1551 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
f65eda4f
SR
1552 pull_rt_task(rq);
1553}
1554
9a897c5a 1555static void post_schedule_rt(struct rq *rq)
e8fa1362 1556{
967fc046 1557 push_rt_tasks(rq);
e8fa1362
SR
1558}
1559
8ae121ac
GH
1560/*
1561 * If we are not running and we are not going to reschedule soon, we should
1562 * try to push tasks away now
1563 */
efbbd05a 1564static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 1565{
9a897c5a 1566 if (!task_running(rq, p) &&
8ae121ac 1567 !test_tsk_need_resched(rq->curr) &&
917b627d 1568 has_pushable_tasks(rq) &&
b3bc211c 1569 p->rt.nr_cpus_allowed > 1 &&
43fa5460 1570 rt_task(rq->curr) &&
b3bc211c
SR
1571 (rq->curr->rt.nr_cpus_allowed < 2 ||
1572 rq->curr->prio < p->prio))
4642dafd
SR
1573 push_rt_tasks(rq);
1574}
1575
cd8ba7cd 1576static void set_cpus_allowed_rt(struct task_struct *p,
96f874e2 1577 const struct cpumask *new_mask)
73fe6aae 1578{
96f874e2 1579 int weight = cpumask_weight(new_mask);
73fe6aae
GH
1580
1581 BUG_ON(!rt_task(p));
1582
1583 /*
1584 * Update the migration status of the RQ if we have an RT task
1585 * which is running AND changing its weight value.
1586 */
fd2f4419 1587 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae
GH
1588 struct rq *rq = task_rq(p);
1589
917b627d
GH
1590 if (!task_current(rq, p)) {
1591 /*
1592 * Make sure we dequeue this task from the pushable list
1593 * before going further. It will either remain off of
1594 * the list because we are no longer pushable, or it
1595 * will be requeued.
1596 */
1597 if (p->rt.nr_cpus_allowed > 1)
1598 dequeue_pushable_task(rq, p);
1599
1600 /*
1601 * Requeue if our weight is changing and still > 1
1602 */
1603 if (weight > 1)
1604 enqueue_pushable_task(rq, p);
1605
1606 }
1607
6f505b16 1608 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae 1609 rq->rt.rt_nr_migratory++;
6f505b16 1610 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae
GH
1611 BUG_ON(!rq->rt.rt_nr_migratory);
1612 rq->rt.rt_nr_migratory--;
1613 }
1614
398a153b 1615 update_rt_migration(&rq->rt);
73fe6aae
GH
1616 }
1617
96f874e2 1618 cpumask_copy(&p->cpus_allowed, new_mask);
6f505b16 1619 p->rt.nr_cpus_allowed = weight;
73fe6aae 1620}
deeeccd4 1621
bdd7c81b 1622/* Assumes rq->lock is held */
1f11eb6a 1623static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1624{
1625 if (rq->rt.overloaded)
1626 rt_set_overload(rq);
6e0534f2 1627
7def2be1
PZ
1628 __enable_runtime(rq);
1629
e864c499 1630 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
1631}
1632
1633/* Assumes rq->lock is held */
1f11eb6a 1634static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1635{
1636 if (rq->rt.overloaded)
1637 rt_clear_overload(rq);
6e0534f2 1638
7def2be1
PZ
1639 __disable_runtime(rq);
1640
6e0534f2 1641 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1642}
cb469845
SR
1643
1644/*
1645 * When switch from the rt queue, we bring ourselves to a position
1646 * that we might want to pull RT tasks from other runqueues.
1647 */
da7a735e 1648static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1649{
1650 /*
1651 * If there are other RT tasks then we will reschedule
1652 * and the scheduling of the other RT tasks will handle
1653 * the balancing. But if we are the last RT task
1654 * we may need to handle the pulling of RT tasks
1655 * now.
1656 */
fd2f4419 1657 if (p->on_rq && !rq->rt.rt_nr_running)
cb469845
SR
1658 pull_rt_task(rq);
1659}
3d8cbdf8
RR
1660
1661static inline void init_sched_rt_class(void)
1662{
1663 unsigned int i;
1664
1665 for_each_possible_cpu(i)
eaa95840 1666 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 1667 GFP_KERNEL, cpu_to_node(i));
3d8cbdf8 1668}
cb469845
SR
1669#endif /* CONFIG_SMP */
1670
1671/*
1672 * When switching a task to RT, we may overload the runqueue
1673 * with RT tasks. In this case we try to push them off to
1674 * other runqueues.
1675 */
da7a735e 1676static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1677{
1678 int check_resched = 1;
1679
1680 /*
1681 * If we are already running, then there's nothing
1682 * that needs to be done. But if we are not running
1683 * we may need to preempt the current running task.
1684 * If that current running task is also an RT task
1685 * then see if we can move to another run queue.
1686 */
fd2f4419 1687 if (p->on_rq && rq->curr != p) {
cb469845
SR
1688#ifdef CONFIG_SMP
1689 if (rq->rt.overloaded && push_rt_task(rq) &&
1690 /* Don't resched if we changed runqueues */
1691 rq != task_rq(p))
1692 check_resched = 0;
1693#endif /* CONFIG_SMP */
1694 if (check_resched && p->prio < rq->curr->prio)
1695 resched_task(rq->curr);
1696 }
1697}
1698
1699/*
1700 * Priority of the task has changed. This may cause
1701 * us to initiate a push or pull.
1702 */
da7a735e
PZ
1703static void
1704prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 1705{
fd2f4419 1706 if (!p->on_rq)
da7a735e
PZ
1707 return;
1708
1709 if (rq->curr == p) {
cb469845
SR
1710#ifdef CONFIG_SMP
1711 /*
1712 * If our priority decreases while running, we
1713 * may need to pull tasks to this runqueue.
1714 */
1715 if (oldprio < p->prio)
1716 pull_rt_task(rq);
1717 /*
1718 * If there's a higher priority task waiting to run
6fa46fa5
SR
1719 * then reschedule. Note, the above pull_rt_task
1720 * can release the rq lock and p could migrate.
1721 * Only reschedule if p is still on the same runqueue.
cb469845 1722 */
e864c499 1723 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb469845
SR
1724 resched_task(p);
1725#else
1726 /* For UP simply resched on drop of prio */
1727 if (oldprio < p->prio)
1728 resched_task(p);
e8fa1362 1729#endif /* CONFIG_SMP */
cb469845
SR
1730 } else {
1731 /*
1732 * This task is not running, but if it is
1733 * greater than the current running task
1734 * then reschedule.
1735 */
1736 if (p->prio < rq->curr->prio)
1737 resched_task(rq->curr);
1738 }
1739}
1740
78f2c7db
PZ
1741static void watchdog(struct rq *rq, struct task_struct *p)
1742{
1743 unsigned long soft, hard;
1744
78d7d407
JS
1745 /* max may change after cur was read, this will be fixed next tick */
1746 soft = task_rlimit(p, RLIMIT_RTTIME);
1747 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
1748
1749 if (soft != RLIM_INFINITY) {
1750 unsigned long next;
1751
1752 p->rt.timeout++;
1753 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1754 if (p->rt.timeout > next)
f06febc9 1755 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db
PZ
1756 }
1757}
bb44e5d1 1758
8f4d37ec 1759static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1760{
67e2be02
PZ
1761 update_curr_rt(rq);
1762
78f2c7db
PZ
1763 watchdog(rq, p);
1764
bb44e5d1
IM
1765 /*
1766 * RR tasks need a special form of timeslice management.
1767 * FIFO tasks have no timeslices.
1768 */
1769 if (p->policy != SCHED_RR)
1770 return;
1771
fa717060 1772 if (--p->rt.time_slice)
bb44e5d1
IM
1773 return;
1774
fa717060 1775 p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1 1776
98fbc798
DA
1777 /*
1778 * Requeue to the end of queue if we are not the only element
1779 * on the queue:
1780 */
fa717060 1781 if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8c 1782 requeue_task_rt(rq, p, 0);
98fbc798
DA
1783 set_tsk_need_resched(p);
1784 }
bb44e5d1
IM
1785}
1786
83b699ed
SV
1787static void set_curr_task_rt(struct rq *rq)
1788{
1789 struct task_struct *p = rq->curr;
1790
305e6835 1791 p->se.exec_start = rq->clock_task;
917b627d
GH
1792
1793 /* The running task is never eligible for pushing */
1794 dequeue_pushable_task(rq, p);
83b699ed
SV
1795}
1796
6d686f45 1797static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
1798{
1799 /*
1800 * Time slice is 0 for SCHED_FIFO tasks
1801 */
1802 if (task->policy == SCHED_RR)
1803 return DEF_TIMESLICE;
1804 else
1805 return 0;
1806}
1807
2abdad0a 1808static const struct sched_class rt_sched_class = {
5522d5d5 1809 .next = &fair_sched_class,
bb44e5d1
IM
1810 .enqueue_task = enqueue_task_rt,
1811 .dequeue_task = dequeue_task_rt,
1812 .yield_task = yield_task_rt,
1813
1814 .check_preempt_curr = check_preempt_curr_rt,
1815
1816 .pick_next_task = pick_next_task_rt,
1817 .put_prev_task = put_prev_task_rt,
1818
681f3e68 1819#ifdef CONFIG_SMP
4ce72a2c
LZ
1820 .select_task_rq = select_task_rq_rt,
1821
73fe6aae 1822 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
1823 .rq_online = rq_online_rt,
1824 .rq_offline = rq_offline_rt,
9a897c5a
SR
1825 .pre_schedule = pre_schedule_rt,
1826 .post_schedule = post_schedule_rt,
efbbd05a 1827 .task_woken = task_woken_rt,
cb469845 1828 .switched_from = switched_from_rt,
681f3e68 1829#endif
bb44e5d1 1830
83b699ed 1831 .set_curr_task = set_curr_task_rt,
bb44e5d1 1832 .task_tick = task_tick_rt,
cb469845 1833
0d721cea
PW
1834 .get_rr_interval = get_rr_interval_rt,
1835
cb469845
SR
1836 .prio_changed = prio_changed_rt,
1837 .switched_to = switched_to_rt,
bb44e5d1 1838};
ada18de2
PZ
1839
1840#ifdef CONFIG_SCHED_DEBUG
1841extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1842
1843static void print_rt_stats(struct seq_file *m, int cpu)
1844{
ec514c48 1845 rt_rq_iter_t iter;
ada18de2
PZ
1846 struct rt_rq *rt_rq;
1847
1848 rcu_read_lock();
ec514c48 1849 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
1850 print_rt_rq(m, cpu, rt_rq);
1851 rcu_read_unlock();
1852}
55e12e5e 1853#endif /* CONFIG_SCHED_DEBUG */
0e3900e6 1854