]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/sched/rt.c
sched: Make sure to not re-read variables after validation
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
029632fb
PZ
6#include "sched.h"
7
8#include <linux/slab.h>
9
10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11
12struct rt_bandwidth def_rt_bandwidth;
13
14static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15{
16 struct rt_bandwidth *rt_b =
17 container_of(timer, struct rt_bandwidth, rt_period_timer);
18 ktime_t now;
19 int overrun;
20 int idle = 0;
21
22 for (;;) {
23 now = hrtimer_cb_get_time(timer);
24 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25
26 if (!overrun)
27 break;
28
29 idle = do_sched_rt_period_timer(rt_b, overrun);
30 }
31
32 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33}
34
35void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36{
37 rt_b->rt_period = ns_to_ktime(period);
38 rt_b->rt_runtime = runtime;
39
40 raw_spin_lock_init(&rt_b->rt_runtime_lock);
41
42 hrtimer_init(&rt_b->rt_period_timer,
43 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 rt_b->rt_period_timer.function = sched_rt_period_timer;
45}
46
47static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48{
49 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50 return;
51
52 if (hrtimer_active(&rt_b->rt_period_timer))
53 return;
54
55 raw_spin_lock(&rt_b->rt_runtime_lock);
56 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 raw_spin_unlock(&rt_b->rt_runtime_lock);
58}
59
60void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61{
62 struct rt_prio_array *array;
63 int i;
64
65 array = &rt_rq->active;
66 for (i = 0; i < MAX_RT_PRIO; i++) {
67 INIT_LIST_HEAD(array->queue + i);
68 __clear_bit(i, array->bitmap);
69 }
70 /* delimiter for bitsearch: */
71 __set_bit(MAX_RT_PRIO, array->bitmap);
72
73#if defined CONFIG_SMP
74 rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 rt_rq->highest_prio.next = MAX_RT_PRIO;
76 rt_rq->rt_nr_migratory = 0;
77 rt_rq->overloaded = 0;
78 plist_head_init(&rt_rq->pushable_tasks);
79#endif
80
81 rt_rq->rt_time = 0;
82 rt_rq->rt_throttled = 0;
83 rt_rq->rt_runtime = 0;
84 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85}
86
8f48894f 87#ifdef CONFIG_RT_GROUP_SCHED
029632fb
PZ
88static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89{
90 hrtimer_cancel(&rt_b->rt_period_timer);
91}
8f48894f
PZ
92
93#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94
398a153b
GH
95static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96{
8f48894f
PZ
97#ifdef CONFIG_SCHED_DEBUG
98 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99#endif
398a153b
GH
100 return container_of(rt_se, struct task_struct, rt);
101}
102
398a153b
GH
103static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104{
105 return rt_rq->rq;
106}
107
108static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109{
110 return rt_se->rt_rq;
111}
112
029632fb
PZ
113void free_rt_sched_group(struct task_group *tg)
114{
115 int i;
116
117 if (tg->rt_se)
118 destroy_rt_bandwidth(&tg->rt_bandwidth);
119
120 for_each_possible_cpu(i) {
121 if (tg->rt_rq)
122 kfree(tg->rt_rq[i]);
123 if (tg->rt_se)
124 kfree(tg->rt_se[i]);
125 }
126
127 kfree(tg->rt_rq);
128 kfree(tg->rt_se);
129}
130
131void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 struct sched_rt_entity *rt_se, int cpu,
133 struct sched_rt_entity *parent)
134{
135 struct rq *rq = cpu_rq(cpu);
136
137 rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 rt_rq->rt_nr_boosted = 0;
139 rt_rq->rq = rq;
140 rt_rq->tg = tg;
141
142 tg->rt_rq[cpu] = rt_rq;
143 tg->rt_se[cpu] = rt_se;
144
145 if (!rt_se)
146 return;
147
148 if (!parent)
149 rt_se->rt_rq = &rq->rt;
150 else
151 rt_se->rt_rq = parent->my_q;
152
153 rt_se->my_q = rt_rq;
154 rt_se->parent = parent;
155 INIT_LIST_HEAD(&rt_se->run_list);
156}
157
158int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159{
160 struct rt_rq *rt_rq;
161 struct sched_rt_entity *rt_se;
162 int i;
163
164 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165 if (!tg->rt_rq)
166 goto err;
167 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168 if (!tg->rt_se)
169 goto err;
170
171 init_rt_bandwidth(&tg->rt_bandwidth,
172 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173
174 for_each_possible_cpu(i) {
175 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 GFP_KERNEL, cpu_to_node(i));
177 if (!rt_rq)
178 goto err;
179
180 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 GFP_KERNEL, cpu_to_node(i));
182 if (!rt_se)
183 goto err_free_rq;
184
185 init_rt_rq(rt_rq, cpu_rq(i));
186 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188 }
189
190 return 1;
191
192err_free_rq:
193 kfree(rt_rq);
194err:
195 return 0;
196}
197
398a153b
GH
198#else /* CONFIG_RT_GROUP_SCHED */
199
a1ba4d8b
PZ
200#define rt_entity_is_task(rt_se) (1)
201
8f48894f
PZ
202static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203{
204 return container_of(rt_se, struct task_struct, rt);
205}
206
398a153b
GH
207static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208{
209 return container_of(rt_rq, struct rq, rt);
210}
211
212static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213{
214 struct task_struct *p = rt_task_of(rt_se);
215 struct rq *rq = task_rq(p);
216
217 return &rq->rt;
218}
219
029632fb
PZ
220void free_rt_sched_group(struct task_group *tg) { }
221
222int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223{
224 return 1;
225}
398a153b
GH
226#endif /* CONFIG_RT_GROUP_SCHED */
227
4fd29176 228#ifdef CONFIG_SMP
84de4274 229
637f5085 230static inline int rt_overloaded(struct rq *rq)
4fd29176 231{
637f5085 232 return atomic_read(&rq->rd->rto_count);
4fd29176 233}
84de4274 234
4fd29176
SR
235static inline void rt_set_overload(struct rq *rq)
236{
1f11eb6a
GH
237 if (!rq->online)
238 return;
239
c6c4927b 240 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
241 /*
242 * Make sure the mask is visible before we set
243 * the overload count. That is checked to determine
244 * if we should look at the mask. It would be a shame
245 * if we looked at the mask, but the mask was not
246 * updated yet.
247 */
248 wmb();
637f5085 249 atomic_inc(&rq->rd->rto_count);
4fd29176 250}
84de4274 251
4fd29176
SR
252static inline void rt_clear_overload(struct rq *rq)
253{
1f11eb6a
GH
254 if (!rq->online)
255 return;
256
4fd29176 257 /* the order here really doesn't matter */
637f5085 258 atomic_dec(&rq->rd->rto_count);
c6c4927b 259 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 260}
73fe6aae 261
398a153b 262static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 263{
a1ba4d8b 264 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
265 if (!rt_rq->overloaded) {
266 rt_set_overload(rq_of_rt_rq(rt_rq));
267 rt_rq->overloaded = 1;
cdc8eb98 268 }
398a153b
GH
269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
637f5085 272 }
73fe6aae 273}
4fd29176 274
398a153b
GH
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{
a1ba4d8b
PZ
277 if (!rt_entity_is_task(rt_se))
278 return;
279
280 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
281
282 rt_rq->rt_nr_total++;
398a153b
GH
283 if (rt_se->nr_cpus_allowed > 1)
284 rt_rq->rt_nr_migratory++;
285
286 update_rt_migration(rt_rq);
287}
288
289static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
290{
a1ba4d8b
PZ
291 if (!rt_entity_is_task(rt_se))
292 return;
293
294 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
295
296 rt_rq->rt_nr_total--;
398a153b
GH
297 if (rt_se->nr_cpus_allowed > 1)
298 rt_rq->rt_nr_migratory--;
299
300 update_rt_migration(rt_rq);
301}
302
5181f4a4
SR
303static inline int has_pushable_tasks(struct rq *rq)
304{
305 return !plist_head_empty(&rq->rt.pushable_tasks);
306}
307
917b627d
GH
308static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
309{
310 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
311 plist_node_init(&p->pushable_tasks, p->prio);
312 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a4
SR
313
314 /* Update the highest prio pushable task */
315 if (p->prio < rq->rt.highest_prio.next)
316 rq->rt.highest_prio.next = p->prio;
917b627d
GH
317}
318
319static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
320{
321 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d 322
5181f4a4
SR
323 /* Update the new highest prio pushable task */
324 if (has_pushable_tasks(rq)) {
325 p = plist_first_entry(&rq->rt.pushable_tasks,
326 struct task_struct, pushable_tasks);
327 rq->rt.highest_prio.next = p->prio;
328 } else
329 rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3
IM
330}
331
917b627d
GH
332#else
333
ceacc2c1 334static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 335{
6f505b16
PZ
336}
337
ceacc2c1
PZ
338static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
339{
340}
341
b07430ac 342static inline
ceacc2c1
PZ
343void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344{
345}
346
398a153b 347static inline
ceacc2c1
PZ
348void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349{
350}
917b627d 351
4fd29176
SR
352#endif /* CONFIG_SMP */
353
6f505b16
PZ
354static inline int on_rt_rq(struct sched_rt_entity *rt_se)
355{
356 return !list_empty(&rt_se->run_list);
357}
358
052f1dc7 359#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 360
9f0c1e56 361static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
362{
363 if (!rt_rq->tg)
9f0c1e56 364 return RUNTIME_INF;
6f505b16 365
ac086bc2
PZ
366 return rt_rq->rt_runtime;
367}
368
369static inline u64 sched_rt_period(struct rt_rq *rt_rq)
370{
371 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
372}
373
ec514c48
CX
374typedef struct task_group *rt_rq_iter_t;
375
1c09ab0d
YZ
376static inline struct task_group *next_task_group(struct task_group *tg)
377{
378 do {
379 tg = list_entry_rcu(tg->list.next,
380 typeof(struct task_group), list);
381 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
382
383 if (&tg->list == &task_groups)
384 tg = NULL;
385
386 return tg;
387}
388
389#define for_each_rt_rq(rt_rq, iter, rq) \
390 for (iter = container_of(&task_groups, typeof(*iter), list); \
391 (iter = next_task_group(iter)) && \
392 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c48 393
3d4b47b4
PZ
394static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
395{
396 list_add_rcu(&rt_rq->leaf_rt_rq_list,
397 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
398}
399
400static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
401{
402 list_del_rcu(&rt_rq->leaf_rt_rq_list);
403}
404
6f505b16 405#define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4 406 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b16 407
6f505b16
PZ
408#define for_each_sched_rt_entity(rt_se) \
409 for (; rt_se; rt_se = rt_se->parent)
410
411static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
412{
413 return rt_se->my_q;
414}
415
37dad3fc 416static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b16
PZ
417static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
418
9f0c1e56 419static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 420{
f6121f4f 421 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb58
YZ
422 struct sched_rt_entity *rt_se;
423
0c3b9168
BS
424 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
425
426 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 427
f6121f4f
DF
428 if (rt_rq->rt_nr_running) {
429 if (rt_se && !on_rt_rq(rt_se))
37dad3fc 430 enqueue_rt_entity(rt_se, false);
e864c499 431 if (rt_rq->highest_prio.curr < curr->prio)
1020387f 432 resched_task(curr);
6f505b16
PZ
433 }
434}
435
9f0c1e56 436static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 437{
74b7eb58 438 struct sched_rt_entity *rt_se;
0c3b9168 439 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 440
0c3b9168 441 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16
PZ
442
443 if (rt_se && on_rt_rq(rt_se))
444 dequeue_rt_entity(rt_se);
445}
446
23b0fdfc
PZ
447static inline int rt_rq_throttled(struct rt_rq *rt_rq)
448{
449 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
450}
451
452static int rt_se_boosted(struct sched_rt_entity *rt_se)
453{
454 struct rt_rq *rt_rq = group_rt_rq(rt_se);
455 struct task_struct *p;
456
457 if (rt_rq)
458 return !!rt_rq->rt_nr_boosted;
459
460 p = rt_task_of(rt_se);
461 return p->prio != p->normal_prio;
462}
463
d0b27fa7 464#ifdef CONFIG_SMP
c6c4927b 465static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7
PZ
466{
467 return cpu_rq(smp_processor_id())->rd->span;
468}
6f505b16 469#else
c6c4927b 470static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 471{
c6c4927b 472 return cpu_online_mask;
d0b27fa7
PZ
473}
474#endif
6f505b16 475
d0b27fa7
PZ
476static inline
477struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 478{
d0b27fa7
PZ
479 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
480}
9f0c1e56 481
ac086bc2
PZ
482static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
483{
484 return &rt_rq->tg->rt_bandwidth;
485}
486
55e12e5e 487#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
488
489static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
490{
ac086bc2
PZ
491 return rt_rq->rt_runtime;
492}
493
494static inline u64 sched_rt_period(struct rt_rq *rt_rq)
495{
496 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
497}
498
ec514c48
CX
499typedef struct rt_rq *rt_rq_iter_t;
500
501#define for_each_rt_rq(rt_rq, iter, rq) \
502 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
503
3d4b47b4
PZ
504static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
505{
506}
507
508static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
509{
510}
511
6f505b16
PZ
512#define for_each_leaf_rt_rq(rt_rq, rq) \
513 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
514
6f505b16
PZ
515#define for_each_sched_rt_entity(rt_se) \
516 for (; rt_se; rt_se = NULL)
517
518static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
519{
520 return NULL;
521}
522
9f0c1e56 523static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 524{
f3ade837
JB
525 if (rt_rq->rt_nr_running)
526 resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b16
PZ
527}
528
9f0c1e56 529static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
530{
531}
532
23b0fdfc
PZ
533static inline int rt_rq_throttled(struct rt_rq *rt_rq)
534{
535 return rt_rq->rt_throttled;
536}
d0b27fa7 537
c6c4927b 538static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 539{
c6c4927b 540 return cpu_online_mask;
d0b27fa7
PZ
541}
542
543static inline
544struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
545{
546 return &cpu_rq(cpu)->rt;
547}
548
ac086bc2
PZ
549static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
550{
551 return &def_rt_bandwidth;
552}
553
55e12e5e 554#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 555
ac086bc2 556#ifdef CONFIG_SMP
78333cdd
PZ
557/*
558 * We ran out of runtime, see if we can borrow some from our neighbours.
559 */
b79f3833 560static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
561{
562 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
563 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
564 int i, weight, more = 0;
565 u64 rt_period;
566
c6c4927b 567 weight = cpumask_weight(rd->span);
ac086bc2 568
0986b11b 569 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 570 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 571 for_each_cpu(i, rd->span) {
ac086bc2
PZ
572 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
573 s64 diff;
574
575 if (iter == rt_rq)
576 continue;
577
0986b11b 578 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
579 /*
580 * Either all rqs have inf runtime and there's nothing to steal
581 * or __disable_runtime() below sets a specific rq to inf to
582 * indicate its been disabled and disalow stealing.
583 */
7def2be1
PZ
584 if (iter->rt_runtime == RUNTIME_INF)
585 goto next;
586
78333cdd
PZ
587 /*
588 * From runqueues with spare time, take 1/n part of their
589 * spare time, but no more than our period.
590 */
ac086bc2
PZ
591 diff = iter->rt_runtime - iter->rt_time;
592 if (diff > 0) {
58838cf3 593 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
594 if (rt_rq->rt_runtime + diff > rt_period)
595 diff = rt_period - rt_rq->rt_runtime;
596 iter->rt_runtime -= diff;
597 rt_rq->rt_runtime += diff;
598 more = 1;
599 if (rt_rq->rt_runtime == rt_period) {
0986b11b 600 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
601 break;
602 }
603 }
7def2be1 604next:
0986b11b 605 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 606 }
0986b11b 607 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2
PZ
608
609 return more;
610}
7def2be1 611
78333cdd
PZ
612/*
613 * Ensure this RQ takes back all the runtime it lend to its neighbours.
614 */
7def2be1
PZ
615static void __disable_runtime(struct rq *rq)
616{
617 struct root_domain *rd = rq->rd;
ec514c48 618 rt_rq_iter_t iter;
7def2be1
PZ
619 struct rt_rq *rt_rq;
620
621 if (unlikely(!scheduler_running))
622 return;
623
ec514c48 624 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
625 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
626 s64 want;
627 int i;
628
0986b11b
TG
629 raw_spin_lock(&rt_b->rt_runtime_lock);
630 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
631 /*
632 * Either we're all inf and nobody needs to borrow, or we're
633 * already disabled and thus have nothing to do, or we have
634 * exactly the right amount of runtime to take out.
635 */
7def2be1
PZ
636 if (rt_rq->rt_runtime == RUNTIME_INF ||
637 rt_rq->rt_runtime == rt_b->rt_runtime)
638 goto balanced;
0986b11b 639 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 640
78333cdd
PZ
641 /*
642 * Calculate the difference between what we started out with
643 * and what we current have, that's the amount of runtime
644 * we lend and now have to reclaim.
645 */
7def2be1
PZ
646 want = rt_b->rt_runtime - rt_rq->rt_runtime;
647
78333cdd
PZ
648 /*
649 * Greedy reclaim, take back as much as we can.
650 */
c6c4927b 651 for_each_cpu(i, rd->span) {
7def2be1
PZ
652 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
653 s64 diff;
654
78333cdd
PZ
655 /*
656 * Can't reclaim from ourselves or disabled runqueues.
657 */
f1679d08 658 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
659 continue;
660
0986b11b 661 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
662 if (want > 0) {
663 diff = min_t(s64, iter->rt_runtime, want);
664 iter->rt_runtime -= diff;
665 want -= diff;
666 } else {
667 iter->rt_runtime -= want;
668 want -= want;
669 }
0986b11b 670 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
671
672 if (!want)
673 break;
674 }
675
0986b11b 676 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
677 /*
678 * We cannot be left wanting - that would mean some runtime
679 * leaked out of the system.
680 */
7def2be1
PZ
681 BUG_ON(want);
682balanced:
78333cdd
PZ
683 /*
684 * Disable all the borrow logic by pretending we have inf
685 * runtime - in which case borrowing doesn't make sense.
686 */
7def2be1 687 rt_rq->rt_runtime = RUNTIME_INF;
0986b11b
TG
688 raw_spin_unlock(&rt_rq->rt_runtime_lock);
689 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
690 }
691}
692
693static void disable_runtime(struct rq *rq)
694{
695 unsigned long flags;
696
05fa785c 697 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 698 __disable_runtime(rq);
05fa785c 699 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
700}
701
702static void __enable_runtime(struct rq *rq)
703{
ec514c48 704 rt_rq_iter_t iter;
7def2be1
PZ
705 struct rt_rq *rt_rq;
706
707 if (unlikely(!scheduler_running))
708 return;
709
78333cdd
PZ
710 /*
711 * Reset each runqueue's bandwidth settings
712 */
ec514c48 713 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
714 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
715
0986b11b
TG
716 raw_spin_lock(&rt_b->rt_runtime_lock);
717 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
718 rt_rq->rt_runtime = rt_b->rt_runtime;
719 rt_rq->rt_time = 0;
baf25731 720 rt_rq->rt_throttled = 0;
0986b11b
TG
721 raw_spin_unlock(&rt_rq->rt_runtime_lock);
722 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
723 }
724}
725
726static void enable_runtime(struct rq *rq)
727{
728 unsigned long flags;
729
05fa785c 730 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 731 __enable_runtime(rq);
05fa785c 732 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
733}
734
029632fb
PZ
735int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
736{
737 int cpu = (int)(long)hcpu;
738
739 switch (action) {
740 case CPU_DOWN_PREPARE:
741 case CPU_DOWN_PREPARE_FROZEN:
742 disable_runtime(cpu_rq(cpu));
743 return NOTIFY_OK;
744
745 case CPU_DOWN_FAILED:
746 case CPU_DOWN_FAILED_FROZEN:
747 case CPU_ONLINE:
748 case CPU_ONLINE_FROZEN:
749 enable_runtime(cpu_rq(cpu));
750 return NOTIFY_OK;
751
752 default:
753 return NOTIFY_DONE;
754 }
755}
756
eff6549b
PZ
757static int balance_runtime(struct rt_rq *rt_rq)
758{
759 int more = 0;
760
4a6184ce
PZ
761 if (!sched_feat(RT_RUNTIME_SHARE))
762 return more;
763
eff6549b 764 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 765 raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b 766 more = do_balance_runtime(rt_rq);
0986b11b 767 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
768 }
769
770 return more;
771}
55e12e5e 772#else /* !CONFIG_SMP */
eff6549b
PZ
773static inline int balance_runtime(struct rt_rq *rt_rq)
774{
775 return 0;
776}
55e12e5e 777#endif /* CONFIG_SMP */
ac086bc2 778
eff6549b
PZ
779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
780{
42c62a58 781 int i, idle = 1, throttled = 0;
c6c4927b 782 const struct cpumask *span;
eff6549b 783
eff6549b 784 span = sched_rt_period_mask();
c6c4927b 785 for_each_cpu(i, span) {
eff6549b
PZ
786 int enqueue = 0;
787 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
788 struct rq *rq = rq_of_rt_rq(rt_rq);
789
05fa785c 790 raw_spin_lock(&rq->lock);
eff6549b
PZ
791 if (rt_rq->rt_time) {
792 u64 runtime;
793
0986b11b 794 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
795 if (rt_rq->rt_throttled)
796 balance_runtime(rt_rq);
797 runtime = rt_rq->rt_runtime;
798 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
799 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
800 rt_rq->rt_throttled = 0;
801 enqueue = 1;
61eadef6
MG
802
803 /*
804 * Force a clock update if the CPU was idle,
805 * lest wakeup -> unthrottle time accumulate.
806 */
807 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
808 rq->skip_clock_update = -1;
eff6549b
PZ
809 }
810 if (rt_rq->rt_time || rt_rq->rt_nr_running)
811 idle = 0;
0986b11b 812 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 813 } else if (rt_rq->rt_nr_running) {
6c3df255 814 idle = 0;
0c3b9168
BS
815 if (!rt_rq_throttled(rt_rq))
816 enqueue = 1;
817 }
42c62a58
PZ
818 if (rt_rq->rt_throttled)
819 throttled = 1;
eff6549b
PZ
820
821 if (enqueue)
822 sched_rt_rq_enqueue(rt_rq);
05fa785c 823 raw_spin_unlock(&rq->lock);
eff6549b
PZ
824 }
825
42c62a58
PZ
826 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
827 return 1;
828
eff6549b
PZ
829 return idle;
830}
ac086bc2 831
6f505b16
PZ
832static inline int rt_se_prio(struct sched_rt_entity *rt_se)
833{
052f1dc7 834#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
835 struct rt_rq *rt_rq = group_rt_rq(rt_se);
836
837 if (rt_rq)
e864c499 838 return rt_rq->highest_prio.curr;
6f505b16
PZ
839#endif
840
841 return rt_task_of(rt_se)->prio;
842}
843
9f0c1e56 844static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 845{
9f0c1e56 846 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 847
fa85ae24 848 if (rt_rq->rt_throttled)
23b0fdfc 849 return rt_rq_throttled(rt_rq);
fa85ae24 850
5b680fd6 851 if (runtime >= sched_rt_period(rt_rq))
ac086bc2
PZ
852 return 0;
853
b79f3833
PZ
854 balance_runtime(rt_rq);
855 runtime = sched_rt_runtime(rt_rq);
856 if (runtime == RUNTIME_INF)
857 return 0;
ac086bc2 858
9f0c1e56 859 if (rt_rq->rt_time > runtime) {
7abc63b1
PZ
860 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
861
862 /*
863 * Don't actually throttle groups that have no runtime assigned
864 * but accrue some time due to boosting.
865 */
866 if (likely(rt_b->rt_runtime)) {
3ccf3e83
PZ
867 static bool once = false;
868
7abc63b1 869 rt_rq->rt_throttled = 1;
3ccf3e83
PZ
870
871 if (!once) {
872 once = true;
873 printk_sched("sched: RT throttling activated\n");
874 }
7abc63b1
PZ
875 } else {
876 /*
877 * In case we did anyway, make it go away,
878 * replenishment is a joke, since it will replenish us
879 * with exactly 0 ns.
880 */
881 rt_rq->rt_time = 0;
882 }
883
23b0fdfc 884 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 885 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
886 return 1;
887 }
fa85ae24
PZ
888 }
889
890 return 0;
891}
892
bb44e5d1
IM
893/*
894 * Update the current task's runtime statistics. Skip current tasks that
895 * are not in our scheduling class.
896 */
a9957449 897static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
898{
899 struct task_struct *curr = rq->curr;
6f505b16
PZ
900 struct sched_rt_entity *rt_se = &curr->rt;
901 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
902 u64 delta_exec;
903
06c3bc65 904 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
905 return;
906
305e6835 907 delta_exec = rq->clock_task - curr->se.exec_start;
bb44e5d1
IM
908 if (unlikely((s64)delta_exec < 0))
909 delta_exec = 0;
6cfb0d5d 910
42c62a58
PZ
911 schedstat_set(curr->se.statistics.exec_max,
912 max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
913
914 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
915 account_group_exec_runtime(curr, delta_exec);
916
305e6835 917 curr->se.exec_start = rq->clock_task;
d842de87 918 cpuacct_charge(curr, delta_exec);
fa85ae24 919
e9e9250b
PZ
920 sched_rt_avg_update(rq, delta_exec);
921
0b148fa0
PZ
922 if (!rt_bandwidth_enabled())
923 return;
924
354d60c2
DG
925 for_each_sched_rt_entity(rt_se) {
926 rt_rq = rt_rq_of_se(rt_se);
927
cc2991cf 928 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 929 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
930 rt_rq->rt_time += delta_exec;
931 if (sched_rt_runtime_exceeded(rt_rq))
932 resched_task(curr);
0986b11b 933 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 934 }
354d60c2 935 }
bb44e5d1
IM
936}
937
398a153b 938#if defined CONFIG_SMP
e864c499 939
398a153b
GH
940static void
941inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 942{
4d984277 943 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 944
5181f4a4
SR
945 if (rq->online && prio < prev_prio)
946 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b 947}
73fe6aae 948
398a153b
GH
949static void
950dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
951{
952 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 953
398a153b
GH
954 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
955 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
956}
957
398a153b
GH
958#else /* CONFIG_SMP */
959
6f505b16 960static inline
398a153b
GH
961void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
962static inline
963void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
964
965#endif /* CONFIG_SMP */
6e0534f2 966
052f1dc7 967#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
968static void
969inc_rt_prio(struct rt_rq *rt_rq, int prio)
970{
971 int prev_prio = rt_rq->highest_prio.curr;
972
973 if (prio < prev_prio)
974 rt_rq->highest_prio.curr = prio;
975
976 inc_rt_prio_smp(rt_rq, prio, prev_prio);
977}
978
979static void
980dec_rt_prio(struct rt_rq *rt_rq, int prio)
981{
982 int prev_prio = rt_rq->highest_prio.curr;
983
6f505b16 984 if (rt_rq->rt_nr_running) {
764a9d6f 985
398a153b 986 WARN_ON(prio < prev_prio);
764a9d6f 987
e864c499 988 /*
398a153b
GH
989 * This may have been our highest task, and therefore
990 * we may have some recomputation to do
e864c499 991 */
398a153b 992 if (prio == prev_prio) {
e864c499
GH
993 struct rt_prio_array *array = &rt_rq->active;
994
995 rt_rq->highest_prio.curr =
764a9d6f 996 sched_find_first_bit(array->bitmap);
e864c499
GH
997 }
998
764a9d6f 999 } else
e864c499 1000 rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae 1001
398a153b
GH
1002 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1003}
1f11eb6a 1004
398a153b
GH
1005#else
1006
1007static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1008static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009
1010#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 1011
052f1dc7 1012#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
1013
1014static void
1015inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1016{
1017 if (rt_se_boosted(rt_se))
1018 rt_rq->rt_nr_boosted++;
1019
1020 if (rt_rq->tg)
1021 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1022}
1023
1024static void
1025dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1026{
23b0fdfc
PZ
1027 if (rt_se_boosted(rt_se))
1028 rt_rq->rt_nr_boosted--;
1029
1030 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
1031}
1032
1033#else /* CONFIG_RT_GROUP_SCHED */
1034
1035static void
1036inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1037{
1038 start_rt_bandwidth(&def_rt_bandwidth);
1039}
1040
1041static inline
1042void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1043
1044#endif /* CONFIG_RT_GROUP_SCHED */
1045
1046static inline
1047void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1048{
1049 int prio = rt_se_prio(rt_se);
1050
1051 WARN_ON(!rt_prio(prio));
1052 rt_rq->rt_nr_running++;
1053
1054 inc_rt_prio(rt_rq, prio);
1055 inc_rt_migration(rt_se, rt_rq);
1056 inc_rt_group(rt_se, rt_rq);
1057}
1058
1059static inline
1060void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1061{
1062 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1063 WARN_ON(!rt_rq->rt_nr_running);
1064 rt_rq->rt_nr_running--;
1065
1066 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1067 dec_rt_migration(rt_se, rt_rq);
1068 dec_rt_group(rt_se, rt_rq);
63489e45
SR
1069}
1070
37dad3fc 1071static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1 1072{
6f505b16
PZ
1073 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1074 struct rt_prio_array *array = &rt_rq->active;
1075 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 1076 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 1077
ad2a3f13
PZ
1078 /*
1079 * Don't enqueue the group if its throttled, or when empty.
1080 * The latter is a consequence of the former when a child group
1081 * get throttled and the current group doesn't have any other
1082 * active members.
1083 */
1084 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b16 1085 return;
63489e45 1086
3d4b47b4
PZ
1087 if (!rt_rq->rt_nr_running)
1088 list_add_leaf_rt_rq(rt_rq);
1089
37dad3fc
TG
1090 if (head)
1091 list_add(&rt_se->run_list, queue);
1092 else
1093 list_add_tail(&rt_se->run_list, queue);
6f505b16 1094 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 1095
6f505b16
PZ
1096 inc_rt_tasks(rt_se, rt_rq);
1097}
1098
ad2a3f13 1099static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b16
PZ
1100{
1101 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1102 struct rt_prio_array *array = &rt_rq->active;
1103
1104 list_del_init(&rt_se->run_list);
1105 if (list_empty(array->queue + rt_se_prio(rt_se)))
1106 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1107
1108 dec_rt_tasks(rt_se, rt_rq);
3d4b47b4
PZ
1109 if (!rt_rq->rt_nr_running)
1110 list_del_leaf_rt_rq(rt_rq);
6f505b16
PZ
1111}
1112
1113/*
1114 * Because the prio of an upper entry depends on the lower
1115 * entries, we must remove entries top - down.
6f505b16 1116 */
ad2a3f13 1117static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b16 1118{
ad2a3f13 1119 struct sched_rt_entity *back = NULL;
6f505b16 1120
58d6c2d7
PZ
1121 for_each_sched_rt_entity(rt_se) {
1122 rt_se->back = back;
1123 back = rt_se;
1124 }
1125
1126 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1127 if (on_rt_rq(rt_se))
ad2a3f13
PZ
1128 __dequeue_rt_entity(rt_se);
1129 }
1130}
1131
37dad3fc 1132static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13
PZ
1133{
1134 dequeue_rt_stack(rt_se);
1135 for_each_sched_rt_entity(rt_se)
37dad3fc 1136 __enqueue_rt_entity(rt_se, head);
ad2a3f13
PZ
1137}
1138
1139static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1140{
1141 dequeue_rt_stack(rt_se);
1142
1143 for_each_sched_rt_entity(rt_se) {
1144 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1145
1146 if (rt_rq && rt_rq->rt_nr_running)
37dad3fc 1147 __enqueue_rt_entity(rt_se, false);
58d6c2d7 1148 }
bb44e5d1
IM
1149}
1150
1151/*
1152 * Adding/removing a task to/from a priority array:
1153 */
ea87bb78 1154static void
371fd7e7 1155enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
1156{
1157 struct sched_rt_entity *rt_se = &p->rt;
1158
371fd7e7 1159 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
1160 rt_se->timeout = 0;
1161
371fd7e7 1162 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f6 1163
917b627d
GH
1164 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
1165 enqueue_pushable_task(rq, p);
953bfcd1
PT
1166
1167 inc_nr_running(rq);
6f505b16
PZ
1168}
1169
371fd7e7 1170static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1171{
6f505b16 1172 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 1173
f1e14ef6 1174 update_curr_rt(rq);
ad2a3f13 1175 dequeue_rt_entity(rt_se);
c09595f6 1176
917b627d 1177 dequeue_pushable_task(rq, p);
953bfcd1
PT
1178
1179 dec_nr_running(rq);
bb44e5d1
IM
1180}
1181
1182/*
60686317
RW
1183 * Put task to the head or the end of the run list without the overhead of
1184 * dequeue followed by enqueue.
bb44e5d1 1185 */
7ebefa8c
DA
1186static void
1187requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 1188{
1cdad715 1189 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
1190 struct rt_prio_array *array = &rt_rq->active;
1191 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1192
1193 if (head)
1194 list_move(&rt_se->run_list, queue);
1195 else
1196 list_move_tail(&rt_se->run_list, queue);
1cdad715 1197 }
6f505b16
PZ
1198}
1199
7ebefa8c 1200static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 1201{
6f505b16
PZ
1202 struct sched_rt_entity *rt_se = &p->rt;
1203 struct rt_rq *rt_rq;
bb44e5d1 1204
6f505b16
PZ
1205 for_each_sched_rt_entity(rt_se) {
1206 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 1207 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 1208 }
bb44e5d1
IM
1209}
1210
6f505b16 1211static void yield_task_rt(struct rq *rq)
bb44e5d1 1212{
7ebefa8c 1213 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
1214}
1215
e7693a36 1216#ifdef CONFIG_SMP
318e0893
GH
1217static int find_lowest_rq(struct task_struct *task);
1218
0017d735 1219static int
7608dec2 1220select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a36 1221{
7608dec2
PZ
1222 struct task_struct *curr;
1223 struct rq *rq;
1224 int cpu;
1225
7608dec2 1226 cpu = task_cpu(p);
c37495fd 1227
76854c7e
MG
1228 if (p->rt.nr_cpus_allowed == 1)
1229 goto out;
1230
c37495fd
SR
1231 /* For anything but wake ups, just return the task_cpu */
1232 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1233 goto out;
1234
7608dec2
PZ
1235 rq = cpu_rq(cpu);
1236
1237 rcu_read_lock();
1238 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1239
318e0893 1240 /*
7608dec2 1241 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1242 * try to see if we can wake this RT task up on another
1243 * runqueue. Otherwise simply start this RT task
1244 * on its current runqueue.
1245 *
43fa5460
SR
1246 * We want to avoid overloading runqueues. If the woken
1247 * task is a higher priority, then it will stay on this CPU
1248 * and the lower prio task should be moved to another CPU.
1249 * Even though this will probably make the lower prio task
1250 * lose its cache, we do not want to bounce a higher task
1251 * around just because it gave up its CPU, perhaps for a
1252 * lock?
1253 *
1254 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1255 *
1256 * Otherwise, just let it ride on the affined RQ and the
1257 * post-schedule router will push the preempted task away
1258 *
1259 * This test is optimistic, if we get it wrong the load-balancer
1260 * will have to sort it out.
318e0893 1261 */
7608dec2
PZ
1262 if (curr && unlikely(rt_task(curr)) &&
1263 (curr->rt.nr_cpus_allowed < 2 ||
3be209a8 1264 curr->prio <= p->prio) &&
6f505b16 1265 (p->rt.nr_cpus_allowed > 1)) {
7608dec2 1266 int target = find_lowest_rq(p);
318e0893 1267
7608dec2
PZ
1268 if (target != -1)
1269 cpu = target;
318e0893 1270 }
7608dec2 1271 rcu_read_unlock();
318e0893 1272
c37495fd 1273out:
7608dec2 1274 return cpu;
e7693a36 1275}
7ebefa8c
DA
1276
1277static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1278{
7ebefa8c
DA
1279 if (rq->curr->rt.nr_cpus_allowed == 1)
1280 return;
1281
24600ce8 1282 if (p->rt.nr_cpus_allowed != 1
13b8bd0a
RR
1283 && cpupri_find(&rq->rd->cpupri, p, NULL))
1284 return;
24600ce8 1285
13b8bd0a
RR
1286 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1287 return;
7ebefa8c
DA
1288
1289 /*
1290 * There appears to be other cpus that can accept
1291 * current and none to run 'p', so lets reschedule
1292 * to try and push current away:
1293 */
1294 requeue_task_rt(rq, p, 1);
1295 resched_task(rq->curr);
1296}
1297
e7693a36
GH
1298#endif /* CONFIG_SMP */
1299
bb44e5d1
IM
1300/*
1301 * Preempt the current task with a newly woken task if needed:
1302 */
7d478721 1303static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1304{
45c01e82 1305 if (p->prio < rq->curr->prio) {
bb44e5d1 1306 resched_task(rq->curr);
45c01e82
GH
1307 return;
1308 }
1309
1310#ifdef CONFIG_SMP
1311 /*
1312 * If:
1313 *
1314 * - the newly woken task is of equal priority to the current task
1315 * - the newly woken task is non-migratable while current is migratable
1316 * - current will be preempted on the next reschedule
1317 *
1318 * we should check to see if current can readily move to a different
1319 * cpu. If so, we will reschedule to allow the push logic to try
1320 * to move current somewhere else, making room for our non-migratable
1321 * task.
1322 */
8dd0de8b 1323 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1324 check_preempt_equal_prio(rq, p);
45c01e82 1325#endif
bb44e5d1
IM
1326}
1327
6f505b16
PZ
1328static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1329 struct rt_rq *rt_rq)
bb44e5d1 1330{
6f505b16
PZ
1331 struct rt_prio_array *array = &rt_rq->active;
1332 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1333 struct list_head *queue;
1334 int idx;
1335
1336 idx = sched_find_first_bit(array->bitmap);
6f505b16 1337 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1338
1339 queue = array->queue + idx;
6f505b16 1340 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1341
6f505b16
PZ
1342 return next;
1343}
bb44e5d1 1344
917b627d 1345static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1346{
1347 struct sched_rt_entity *rt_se;
1348 struct task_struct *p;
1349 struct rt_rq *rt_rq;
bb44e5d1 1350
6f505b16
PZ
1351 rt_rq = &rq->rt;
1352
8e54a2c0 1353 if (!rt_rq->rt_nr_running)
6f505b16
PZ
1354 return NULL;
1355
23b0fdfc 1356 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
1357 return NULL;
1358
1359 do {
1360 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1361 BUG_ON(!rt_se);
6f505b16
PZ
1362 rt_rq = group_rt_rq(rt_se);
1363 } while (rt_rq);
1364
1365 p = rt_task_of(rt_se);
305e6835 1366 p->se.exec_start = rq->clock_task;
917b627d
GH
1367
1368 return p;
1369}
1370
1371static struct task_struct *pick_next_task_rt(struct rq *rq)
1372{
1373 struct task_struct *p = _pick_next_task_rt(rq);
1374
1375 /* The running task is never eligible for pushing */
1376 if (p)
1377 dequeue_pushable_task(rq, p);
1378
bcf08df3 1379#ifdef CONFIG_SMP
3f029d3c
GH
1380 /*
1381 * We detect this state here so that we can avoid taking the RQ
1382 * lock again later if there is no need to push
1383 */
1384 rq->post_schedule = has_pushable_tasks(rq);
bcf08df3 1385#endif
3f029d3c 1386
6f505b16 1387 return p;
bb44e5d1
IM
1388}
1389
31ee529c 1390static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1391{
f1e14ef6 1392 update_curr_rt(rq);
917b627d
GH
1393
1394 /*
1395 * The previous task needs to be made eligible for pushing
1396 * if it is still active
1397 */
fd2f4419 1398 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
917b627d 1399 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1400}
1401
681f3e68 1402#ifdef CONFIG_SMP
6f505b16 1403
e8fa1362
SR
1404/* Only try algorithms three times */
1405#define RT_MAX_TRIES 3
1406
f65eda4f
SR
1407static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1408{
1409 if (!task_running(rq, p) &&
fa17b507 1410 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
6f505b16 1411 (p->rt.nr_cpus_allowed > 1))
f65eda4f
SR
1412 return 1;
1413 return 0;
1414}
1415
e8fa1362 1416/* Return the second highest RT task, NULL otherwise */
79064fbf 1417static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 1418{
6f505b16
PZ
1419 struct task_struct *next = NULL;
1420 struct sched_rt_entity *rt_se;
1421 struct rt_prio_array *array;
1422 struct rt_rq *rt_rq;
e8fa1362
SR
1423 int idx;
1424
6f505b16
PZ
1425 for_each_leaf_rt_rq(rt_rq, rq) {
1426 array = &rt_rq->active;
1427 idx = sched_find_first_bit(array->bitmap);
49246274 1428next_idx:
6f505b16
PZ
1429 if (idx >= MAX_RT_PRIO)
1430 continue;
1b028abc 1431 if (next && next->prio <= idx)
6f505b16
PZ
1432 continue;
1433 list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b
PZ
1434 struct task_struct *p;
1435
1436 if (!rt_entity_is_task(rt_se))
1437 continue;
1438
1439 p = rt_task_of(rt_se);
6f505b16
PZ
1440 if (pick_rt_task(rq, p, cpu)) {
1441 next = p;
1442 break;
1443 }
1444 }
1445 if (!next) {
1446 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1447 goto next_idx;
1448 }
f65eda4f
SR
1449 }
1450
e8fa1362
SR
1451 return next;
1452}
1453
0e3900e6 1454static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1455
6e1254d2
GH
1456static int find_lowest_rq(struct task_struct *task)
1457{
1458 struct sched_domain *sd;
96f874e2 1459 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2
GH
1460 int this_cpu = smp_processor_id();
1461 int cpu = task_cpu(task);
06f90dbd 1462
0da938c4
SR
1463 /* Make sure the mask is initialized first */
1464 if (unlikely(!lowest_mask))
1465 return -1;
1466
6e0534f2
GH
1467 if (task->rt.nr_cpus_allowed == 1)
1468 return -1; /* No other targets possible */
6e1254d2 1469
6e0534f2
GH
1470 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1471 return -1; /* No targets found */
6e1254d2
GH
1472
1473 /*
1474 * At this point we have built a mask of cpus representing the
1475 * lowest priority tasks in the system. Now we want to elect
1476 * the best one based on our affinity and topology.
1477 *
1478 * We prioritize the last cpu that the task executed on since
1479 * it is most likely cache-hot in that location.
1480 */
96f874e2 1481 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1482 return cpu;
1483
1484 /*
1485 * Otherwise, we consult the sched_domains span maps to figure
1486 * out which cpu is logically closest to our hot cache data.
1487 */
e2c88063
RR
1488 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1489 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1490
cd4ae6ad 1491 rcu_read_lock();
e2c88063
RR
1492 for_each_domain(cpu, sd) {
1493 if (sd->flags & SD_WAKE_AFFINE) {
1494 int best_cpu;
6e1254d2 1495
e2c88063
RR
1496 /*
1497 * "this_cpu" is cheaper to preempt than a
1498 * remote processor.
1499 */
1500 if (this_cpu != -1 &&
cd4ae6ad
XF
1501 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1502 rcu_read_unlock();
e2c88063 1503 return this_cpu;
cd4ae6ad 1504 }
e2c88063
RR
1505
1506 best_cpu = cpumask_first_and(lowest_mask,
1507 sched_domain_span(sd));
cd4ae6ad
XF
1508 if (best_cpu < nr_cpu_ids) {
1509 rcu_read_unlock();
e2c88063 1510 return best_cpu;
cd4ae6ad 1511 }
6e1254d2
GH
1512 }
1513 }
cd4ae6ad 1514 rcu_read_unlock();
6e1254d2
GH
1515
1516 /*
1517 * And finally, if there were no matches within the domains
1518 * just give the caller *something* to work with from the compatible
1519 * locations.
1520 */
e2c88063
RR
1521 if (this_cpu != -1)
1522 return this_cpu;
1523
1524 cpu = cpumask_any(lowest_mask);
1525 if (cpu < nr_cpu_ids)
1526 return cpu;
1527 return -1;
07b4032c
GH
1528}
1529
1530/* Will lock the rq it finds */
4df64c0b 1531static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1532{
1533 struct rq *lowest_rq = NULL;
07b4032c 1534 int tries;
4df64c0b 1535 int cpu;
e8fa1362 1536
07b4032c
GH
1537 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1538 cpu = find_lowest_rq(task);
1539
2de0b463 1540 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1541 break;
1542
07b4032c
GH
1543 lowest_rq = cpu_rq(cpu);
1544
e8fa1362 1545 /* if the prio of this runqueue changed, try again */
07b4032c 1546 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1547 /*
1548 * We had to unlock the run queue. In
1549 * the mean time, task could have
1550 * migrated already or had its affinity changed.
1551 * Also make sure that it wasn't scheduled on its rq.
1552 */
07b4032c 1553 if (unlikely(task_rq(task) != rq ||
96f874e2 1554 !cpumask_test_cpu(lowest_rq->cpu,
fa17b507 1555 tsk_cpus_allowed(task)) ||
07b4032c 1556 task_running(rq, task) ||
fd2f4419 1557 !task->on_rq)) {
4df64c0b 1558
05fa785c 1559 raw_spin_unlock(&lowest_rq->lock);
e8fa1362
SR
1560 lowest_rq = NULL;
1561 break;
1562 }
1563 }
1564
1565 /* If this rq is still suitable use it. */
e864c499 1566 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1567 break;
1568
1569 /* try again */
1b12bbc7 1570 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1571 lowest_rq = NULL;
1572 }
1573
1574 return lowest_rq;
1575}
1576
917b627d
GH
1577static struct task_struct *pick_next_pushable_task(struct rq *rq)
1578{
1579 struct task_struct *p;
1580
1581 if (!has_pushable_tasks(rq))
1582 return NULL;
1583
1584 p = plist_first_entry(&rq->rt.pushable_tasks,
1585 struct task_struct, pushable_tasks);
1586
1587 BUG_ON(rq->cpu != task_cpu(p));
1588 BUG_ON(task_current(rq, p));
1589 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1590
fd2f4419 1591 BUG_ON(!p->on_rq);
917b627d
GH
1592 BUG_ON(!rt_task(p));
1593
1594 return p;
1595}
1596
e8fa1362
SR
1597/*
1598 * If the current CPU has more than one RT task, see if the non
1599 * running task can migrate over to a CPU that is running a task
1600 * of lesser priority.
1601 */
697f0a48 1602static int push_rt_task(struct rq *rq)
e8fa1362
SR
1603{
1604 struct task_struct *next_task;
1605 struct rq *lowest_rq;
311e800e 1606 int ret = 0;
e8fa1362 1607
a22d7fc1
GH
1608 if (!rq->rt.overloaded)
1609 return 0;
1610
917b627d 1611 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1612 if (!next_task)
1613 return 0;
1614
cb297a3e
CM
1615#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1616 if (unlikely(task_running(rq, next_task)))
1617 return 0;
1618#endif
1619
49246274 1620retry:
697f0a48 1621 if (unlikely(next_task == rq->curr)) {
f65eda4f 1622 WARN_ON(1);
e8fa1362 1623 return 0;
f65eda4f 1624 }
e8fa1362
SR
1625
1626 /*
1627 * It's possible that the next_task slipped in of
1628 * higher priority than current. If that's the case
1629 * just reschedule current.
1630 */
697f0a48
GH
1631 if (unlikely(next_task->prio < rq->curr->prio)) {
1632 resched_task(rq->curr);
e8fa1362
SR
1633 return 0;
1634 }
1635
697f0a48 1636 /* We might release rq lock */
e8fa1362
SR
1637 get_task_struct(next_task);
1638
1639 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1640 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1641 if (!lowest_rq) {
1642 struct task_struct *task;
1643 /*
311e800e 1644 * find_lock_lowest_rq releases rq->lock
1563513d
GH
1645 * so it is possible that next_task has migrated.
1646 *
1647 * We need to make sure that the task is still on the same
1648 * run-queue and is also still the next task eligible for
1649 * pushing.
e8fa1362 1650 */
917b627d 1651 task = pick_next_pushable_task(rq);
1563513d
GH
1652 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1653 /*
311e800e
HD
1654 * The task hasn't migrated, and is still the next
1655 * eligible task, but we failed to find a run-queue
1656 * to push it to. Do not retry in this case, since
1657 * other cpus will pull from us when ready.
1563513d 1658 */
1563513d 1659 goto out;
e8fa1362 1660 }
917b627d 1661
1563513d
GH
1662 if (!task)
1663 /* No more tasks, just exit */
1664 goto out;
1665
917b627d 1666 /*
1563513d 1667 * Something has shifted, try again.
917b627d 1668 */
1563513d
GH
1669 put_task_struct(next_task);
1670 next_task = task;
1671 goto retry;
e8fa1362
SR
1672 }
1673
697f0a48 1674 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1675 set_task_cpu(next_task, lowest_rq->cpu);
1676 activate_task(lowest_rq, next_task, 0);
311e800e 1677 ret = 1;
e8fa1362
SR
1678
1679 resched_task(lowest_rq->curr);
1680
1b12bbc7 1681 double_unlock_balance(rq, lowest_rq);
e8fa1362 1682
e8fa1362
SR
1683out:
1684 put_task_struct(next_task);
1685
311e800e 1686 return ret;
e8fa1362
SR
1687}
1688
e8fa1362
SR
1689static void push_rt_tasks(struct rq *rq)
1690{
1691 /* push_rt_task will return true if it moved an RT */
1692 while (push_rt_task(rq))
1693 ;
1694}
1695
f65eda4f
SR
1696static int pull_rt_task(struct rq *this_rq)
1697{
80bf3171 1698 int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944 1699 struct task_struct *p;
f65eda4f 1700 struct rq *src_rq;
f65eda4f 1701
637f5085 1702 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1703 return 0;
1704
c6c4927b 1705 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1706 if (this_cpu == cpu)
1707 continue;
1708
1709 src_rq = cpu_rq(cpu);
74ab8e4f
GH
1710
1711 /*
1712 * Don't bother taking the src_rq->lock if the next highest
1713 * task is known to be lower-priority than our current task.
1714 * This may look racy, but if this value is about to go
1715 * logically higher, the src_rq will push this task away.
1716 * And if its going logically lower, we do not care
1717 */
1718 if (src_rq->rt.highest_prio.next >=
1719 this_rq->rt.highest_prio.curr)
1720 continue;
1721
f65eda4f
SR
1722 /*
1723 * We can potentially drop this_rq's lock in
1724 * double_lock_balance, and another CPU could
a8728944 1725 * alter this_rq
f65eda4f 1726 */
a8728944 1727 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
1728
1729 /*
1730 * Are there still pullable RT tasks?
1731 */
614ee1f6
MG
1732 if (src_rq->rt.rt_nr_running <= 1)
1733 goto skip;
f65eda4f 1734
f65eda4f
SR
1735 p = pick_next_highest_task_rt(src_rq, this_cpu);
1736
1737 /*
1738 * Do we have an RT task that preempts
1739 * the to-be-scheduled task?
1740 */
a8728944 1741 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 1742 WARN_ON(p == src_rq->curr);
fd2f4419 1743 WARN_ON(!p->on_rq);
f65eda4f
SR
1744
1745 /*
1746 * There's a chance that p is higher in priority
1747 * than what's currently running on its cpu.
1748 * This is just that p is wakeing up and hasn't
1749 * had a chance to schedule. We only pull
1750 * p if it is lower in priority than the
a8728944 1751 * current task on the run queue
f65eda4f 1752 */
a8728944 1753 if (p->prio < src_rq->curr->prio)
614ee1f6 1754 goto skip;
f65eda4f
SR
1755
1756 ret = 1;
1757
1758 deactivate_task(src_rq, p, 0);
1759 set_task_cpu(p, this_cpu);
1760 activate_task(this_rq, p, 0);
1761 /*
1762 * We continue with the search, just in
1763 * case there's an even higher prio task
25985edc 1764 * in another runqueue. (low likelihood
f65eda4f 1765 * but possible)
f65eda4f 1766 */
f65eda4f 1767 }
49246274 1768skip:
1b12bbc7 1769 double_unlock_balance(this_rq, src_rq);
f65eda4f
SR
1770 }
1771
1772 return ret;
1773}
1774
9a897c5a 1775static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1776{
1777 /* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c6 1778 if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f
SR
1779 pull_rt_task(rq);
1780}
1781
9a897c5a 1782static void post_schedule_rt(struct rq *rq)
e8fa1362 1783{
967fc046 1784 push_rt_tasks(rq);
e8fa1362
SR
1785}
1786
8ae121ac
GH
1787/*
1788 * If we are not running and we are not going to reschedule soon, we should
1789 * try to push tasks away now
1790 */
efbbd05a 1791static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 1792{
9a897c5a 1793 if (!task_running(rq, p) &&
8ae121ac 1794 !test_tsk_need_resched(rq->curr) &&
917b627d 1795 has_pushable_tasks(rq) &&
b3bc211c 1796 p->rt.nr_cpus_allowed > 1 &&
43fa5460 1797 rt_task(rq->curr) &&
b3bc211c 1798 (rq->curr->rt.nr_cpus_allowed < 2 ||
3be209a8 1799 rq->curr->prio <= p->prio))
4642dafd
SR
1800 push_rt_tasks(rq);
1801}
1802
cd8ba7cd 1803static void set_cpus_allowed_rt(struct task_struct *p,
96f874e2 1804 const struct cpumask *new_mask)
73fe6aae 1805{
8d3d5ada
KT
1806 struct rq *rq;
1807 int weight;
73fe6aae
GH
1808
1809 BUG_ON(!rt_task(p));
1810
8d3d5ada
KT
1811 if (!p->on_rq)
1812 return;
917b627d 1813
8d3d5ada 1814 weight = cpumask_weight(new_mask);
917b627d 1815
8d3d5ada
KT
1816 /*
1817 * Only update if the process changes its state from whether it
1818 * can migrate or not.
1819 */
1820 if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
1821 return;
917b627d 1822
8d3d5ada 1823 rq = task_rq(p);
73fe6aae 1824
8d3d5ada
KT
1825 /*
1826 * The process used to be able to migrate OR it can now migrate
1827 */
1828 if (weight <= 1) {
1829 if (!task_current(rq, p))
1830 dequeue_pushable_task(rq, p);
1831 BUG_ON(!rq->rt.rt_nr_migratory);
1832 rq->rt.rt_nr_migratory--;
1833 } else {
1834 if (!task_current(rq, p))
1835 enqueue_pushable_task(rq, p);
1836 rq->rt.rt_nr_migratory++;
73fe6aae 1837 }
8d3d5ada
KT
1838
1839 update_rt_migration(&rq->rt);
73fe6aae 1840}
deeeccd4 1841
bdd7c81b 1842/* Assumes rq->lock is held */
1f11eb6a 1843static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1844{
1845 if (rq->rt.overloaded)
1846 rt_set_overload(rq);
6e0534f2 1847
7def2be1
PZ
1848 __enable_runtime(rq);
1849
e864c499 1850 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
1851}
1852
1853/* Assumes rq->lock is held */
1f11eb6a 1854static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1855{
1856 if (rq->rt.overloaded)
1857 rt_clear_overload(rq);
6e0534f2 1858
7def2be1
PZ
1859 __disable_runtime(rq);
1860
6e0534f2 1861 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1862}
cb469845
SR
1863
1864/*
1865 * When switch from the rt queue, we bring ourselves to a position
1866 * that we might want to pull RT tasks from other runqueues.
1867 */
da7a735e 1868static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1869{
1870 /*
1871 * If there are other RT tasks then we will reschedule
1872 * and the scheduling of the other RT tasks will handle
1873 * the balancing. But if we are the last RT task
1874 * we may need to handle the pulling of RT tasks
1875 * now.
1876 */
fd2f4419 1877 if (p->on_rq && !rq->rt.rt_nr_running)
cb469845
SR
1878 pull_rt_task(rq);
1879}
3d8cbdf8 1880
029632fb 1881void init_sched_rt_class(void)
3d8cbdf8
RR
1882{
1883 unsigned int i;
1884
029632fb 1885 for_each_possible_cpu(i) {
eaa95840 1886 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 1887 GFP_KERNEL, cpu_to_node(i));
029632fb 1888 }
3d8cbdf8 1889}
cb469845
SR
1890#endif /* CONFIG_SMP */
1891
1892/*
1893 * When switching a task to RT, we may overload the runqueue
1894 * with RT tasks. In this case we try to push them off to
1895 * other runqueues.
1896 */
da7a735e 1897static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1898{
1899 int check_resched = 1;
1900
1901 /*
1902 * If we are already running, then there's nothing
1903 * that needs to be done. But if we are not running
1904 * we may need to preempt the current running task.
1905 * If that current running task is also an RT task
1906 * then see if we can move to another run queue.
1907 */
fd2f4419 1908 if (p->on_rq && rq->curr != p) {
cb469845
SR
1909#ifdef CONFIG_SMP
1910 if (rq->rt.overloaded && push_rt_task(rq) &&
1911 /* Don't resched if we changed runqueues */
1912 rq != task_rq(p))
1913 check_resched = 0;
1914#endif /* CONFIG_SMP */
1915 if (check_resched && p->prio < rq->curr->prio)
1916 resched_task(rq->curr);
1917 }
1918}
1919
1920/*
1921 * Priority of the task has changed. This may cause
1922 * us to initiate a push or pull.
1923 */
da7a735e
PZ
1924static void
1925prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 1926{
fd2f4419 1927 if (!p->on_rq)
da7a735e
PZ
1928 return;
1929
1930 if (rq->curr == p) {
cb469845
SR
1931#ifdef CONFIG_SMP
1932 /*
1933 * If our priority decreases while running, we
1934 * may need to pull tasks to this runqueue.
1935 */
1936 if (oldprio < p->prio)
1937 pull_rt_task(rq);
1938 /*
1939 * If there's a higher priority task waiting to run
6fa46fa5
SR
1940 * then reschedule. Note, the above pull_rt_task
1941 * can release the rq lock and p could migrate.
1942 * Only reschedule if p is still on the same runqueue.
cb469845 1943 */
e864c499 1944 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb469845
SR
1945 resched_task(p);
1946#else
1947 /* For UP simply resched on drop of prio */
1948 if (oldprio < p->prio)
1949 resched_task(p);
e8fa1362 1950#endif /* CONFIG_SMP */
cb469845
SR
1951 } else {
1952 /*
1953 * This task is not running, but if it is
1954 * greater than the current running task
1955 * then reschedule.
1956 */
1957 if (p->prio < rq->curr->prio)
1958 resched_task(rq->curr);
1959 }
1960}
1961
78f2c7db
PZ
1962static void watchdog(struct rq *rq, struct task_struct *p)
1963{
1964 unsigned long soft, hard;
1965
78d7d407
JS
1966 /* max may change after cur was read, this will be fixed next tick */
1967 soft = task_rlimit(p, RLIMIT_RTTIME);
1968 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
1969
1970 if (soft != RLIM_INFINITY) {
1971 unsigned long next;
1972
1973 p->rt.timeout++;
1974 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1975 if (p->rt.timeout > next)
f06febc9 1976 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db
PZ
1977 }
1978}
bb44e5d1 1979
8f4d37ec 1980static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1981{
67e2be02
PZ
1982 update_curr_rt(rq);
1983
78f2c7db
PZ
1984 watchdog(rq, p);
1985
bb44e5d1
IM
1986 /*
1987 * RR tasks need a special form of timeslice management.
1988 * FIFO tasks have no timeslices.
1989 */
1990 if (p->policy != SCHED_RR)
1991 return;
1992
fa717060 1993 if (--p->rt.time_slice)
bb44e5d1
IM
1994 return;
1995
de5bdff7 1996 p->rt.time_slice = RR_TIMESLICE;
bb44e5d1 1997
98fbc798
DA
1998 /*
1999 * Requeue to the end of queue if we are not the only element
2000 * on the queue:
2001 */
fa717060 2002 if (p->rt.run_list.prev != p->rt.run_list.next) {
7ebefa8c 2003 requeue_task_rt(rq, p, 0);
98fbc798
DA
2004 set_tsk_need_resched(p);
2005 }
bb44e5d1
IM
2006}
2007
83b699ed
SV
2008static void set_curr_task_rt(struct rq *rq)
2009{
2010 struct task_struct *p = rq->curr;
2011
305e6835 2012 p->se.exec_start = rq->clock_task;
917b627d
GH
2013
2014 /* The running task is never eligible for pushing */
2015 dequeue_pushable_task(rq, p);
83b699ed
SV
2016}
2017
6d686f45 2018static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
2019{
2020 /*
2021 * Time slice is 0 for SCHED_FIFO tasks
2022 */
2023 if (task->policy == SCHED_RR)
de5bdff7 2024 return RR_TIMESLICE;
0d721cea
PW
2025 else
2026 return 0;
2027}
2028
029632fb 2029const struct sched_class rt_sched_class = {
5522d5d5 2030 .next = &fair_sched_class,
bb44e5d1
IM
2031 .enqueue_task = enqueue_task_rt,
2032 .dequeue_task = dequeue_task_rt,
2033 .yield_task = yield_task_rt,
2034
2035 .check_preempt_curr = check_preempt_curr_rt,
2036
2037 .pick_next_task = pick_next_task_rt,
2038 .put_prev_task = put_prev_task_rt,
2039
681f3e68 2040#ifdef CONFIG_SMP
4ce72a2c
LZ
2041 .select_task_rq = select_task_rq_rt,
2042
73fe6aae 2043 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
2044 .rq_online = rq_online_rt,
2045 .rq_offline = rq_offline_rt,
9a897c5a
SR
2046 .pre_schedule = pre_schedule_rt,
2047 .post_schedule = post_schedule_rt,
efbbd05a 2048 .task_woken = task_woken_rt,
cb469845 2049 .switched_from = switched_from_rt,
681f3e68 2050#endif
bb44e5d1 2051
83b699ed 2052 .set_curr_task = set_curr_task_rt,
bb44e5d1 2053 .task_tick = task_tick_rt,
cb469845 2054
0d721cea
PW
2055 .get_rr_interval = get_rr_interval_rt,
2056
cb469845
SR
2057 .prio_changed = prio_changed_rt,
2058 .switched_to = switched_to_rt,
bb44e5d1 2059};
ada18de2
PZ
2060
2061#ifdef CONFIG_SCHED_DEBUG
2062extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2063
029632fb 2064void print_rt_stats(struct seq_file *m, int cpu)
ada18de2 2065{
ec514c48 2066 rt_rq_iter_t iter;
ada18de2
PZ
2067 struct rt_rq *rt_rq;
2068
2069 rcu_read_lock();
ec514c48 2070 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
2071 print_rt_rq(m, cpu, rt_rq);
2072 rcu_read_unlock();
2073}
55e12e5e 2074#endif /* CONFIG_SCHED_DEBUG */