]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - kernel/sched_rt.c
sched: add RT-balance cpu-weight
[mirror_ubuntu-kernels.git] / kernel / sched_rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
4fd29176
SR
6#ifdef CONFIG_SMP
7static cpumask_t rt_overload_mask;
8static atomic_t rto_count;
9static inline int rt_overloaded(void)
10{
11 return atomic_read(&rto_count);
12}
13static inline cpumask_t *rt_overload(void)
14{
15 return &rt_overload_mask;
16}
17static inline void rt_set_overload(struct rq *rq)
18{
19 cpu_set(rq->cpu, rt_overload_mask);
20 /*
21 * Make sure the mask is visible before we set
22 * the overload count. That is checked to determine
23 * if we should look at the mask. It would be a shame
24 * if we looked at the mask, but the mask was not
25 * updated yet.
26 */
27 wmb();
28 atomic_inc(&rto_count);
29}
30static inline void rt_clear_overload(struct rq *rq)
31{
32 /* the order here really doesn't matter */
33 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask);
35}
73fe6aae
GH
36
37static void update_rt_migration(struct rq *rq)
38{
39 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
40 rt_set_overload(rq);
41 else
42 rt_clear_overload(rq);
43}
4fd29176
SR
44#endif /* CONFIG_SMP */
45
bb44e5d1
IM
46/*
47 * Update the current task's runtime statistics. Skip current tasks that
48 * are not in our scheduling class.
49 */
a9957449 50static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
51{
52 struct task_struct *curr = rq->curr;
53 u64 delta_exec;
54
55 if (!task_has_rt_policy(curr))
56 return;
57
d281918d 58 delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1
IM
59 if (unlikely((s64)delta_exec < 0))
60 delta_exec = 0;
6cfb0d5d
IM
61
62 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1
IM
63
64 curr->se.sum_exec_runtime += delta_exec;
d281918d 65 curr->se.exec_start = rq->clock;
d842de87 66 cpuacct_charge(curr, delta_exec);
bb44e5d1
IM
67}
68
63489e45
SR
69static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
70{
71 WARN_ON(!rt_task(p));
72 rq->rt.rt_nr_running++;
764a9d6f
SR
73#ifdef CONFIG_SMP
74 if (p->prio < rq->rt.highest_prio)
75 rq->rt.highest_prio = p->prio;
73fe6aae
GH
76 if (p->nr_cpus_allowed > 1)
77 rq->rt.rt_nr_migratory++;
78
79 update_rt_migration(rq);
764a9d6f 80#endif /* CONFIG_SMP */
63489e45
SR
81}
82
83static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
84{
85 WARN_ON(!rt_task(p));
86 WARN_ON(!rq->rt.rt_nr_running);
87 rq->rt.rt_nr_running--;
764a9d6f
SR
88#ifdef CONFIG_SMP
89 if (rq->rt.rt_nr_running) {
90 struct rt_prio_array *array;
91
92 WARN_ON(p->prio < rq->rt.highest_prio);
93 if (p->prio == rq->rt.highest_prio) {
94 /* recalculate */
95 array = &rq->rt.active;
96 rq->rt.highest_prio =
97 sched_find_first_bit(array->bitmap);
98 } /* otherwise leave rq->highest prio alone */
99 } else
100 rq->rt.highest_prio = MAX_RT_PRIO;
73fe6aae
GH
101 if (p->nr_cpus_allowed > 1)
102 rq->rt.rt_nr_migratory--;
103
104 update_rt_migration(rq);
764a9d6f 105#endif /* CONFIG_SMP */
63489e45
SR
106}
107
fd390f6a 108static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
bb44e5d1
IM
109{
110 struct rt_prio_array *array = &rq->rt.active;
111
112 list_add_tail(&p->run_list, array->queue + p->prio);
113 __set_bit(p->prio, array->bitmap);
58e2d4ca 114 inc_cpu_load(rq, p->se.load.weight);
63489e45
SR
115
116 inc_rt_tasks(p, rq);
bb44e5d1
IM
117}
118
119/*
120 * Adding/removing a task to/from a priority array:
121 */
f02231e5 122static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1
IM
123{
124 struct rt_prio_array *array = &rq->rt.active;
125
f1e14ef6 126 update_curr_rt(rq);
bb44e5d1
IM
127
128 list_del(&p->run_list);
129 if (list_empty(array->queue + p->prio))
130 __clear_bit(p->prio, array->bitmap);
58e2d4ca 131 dec_cpu_load(rq, p->se.load.weight);
63489e45
SR
132
133 dec_rt_tasks(p, rq);
bb44e5d1
IM
134}
135
136/*
137 * Put task to the end of the run list without the overhead of dequeue
138 * followed by enqueue.
139 */
140static void requeue_task_rt(struct rq *rq, struct task_struct *p)
141{
142 struct rt_prio_array *array = &rq->rt.active;
143
144 list_move_tail(&p->run_list, array->queue + p->prio);
145}
146
147static void
4530d7ab 148yield_task_rt(struct rq *rq)
bb44e5d1 149{
4530d7ab 150 requeue_task_rt(rq, rq->curr);
bb44e5d1
IM
151}
152
153/*
154 * Preempt the current task with a newly woken task if needed:
155 */
156static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
157{
158 if (p->prio < rq->curr->prio)
159 resched_task(rq->curr);
160}
161
fb8d4724 162static struct task_struct *pick_next_task_rt(struct rq *rq)
bb44e5d1
IM
163{
164 struct rt_prio_array *array = &rq->rt.active;
165 struct task_struct *next;
166 struct list_head *queue;
167 int idx;
168
169 idx = sched_find_first_bit(array->bitmap);
170 if (idx >= MAX_RT_PRIO)
171 return NULL;
172
173 queue = array->queue + idx;
174 next = list_entry(queue->next, struct task_struct, run_list);
175
d281918d 176 next->se.exec_start = rq->clock;
bb44e5d1
IM
177
178 return next;
179}
180
31ee529c 181static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 182{
f1e14ef6 183 update_curr_rt(rq);
bb44e5d1
IM
184 p->se.exec_start = 0;
185}
186
681f3e68 187#ifdef CONFIG_SMP
e8fa1362
SR
188/* Only try algorithms three times */
189#define RT_MAX_TRIES 3
190
191static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
192static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
193
f65eda4f
SR
194static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
195{
196 if (!task_running(rq, p) &&
73fe6aae
GH
197 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
198 (p->nr_cpus_allowed > 1))
f65eda4f
SR
199 return 1;
200 return 0;
201}
202
e8fa1362 203/* Return the second highest RT task, NULL otherwise */
f65eda4f
SR
204static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
205 int cpu)
e8fa1362
SR
206{
207 struct rt_prio_array *array = &rq->rt.active;
208 struct task_struct *next;
209 struct list_head *queue;
210 int idx;
211
212 assert_spin_locked(&rq->lock);
213
214 if (likely(rq->rt.rt_nr_running < 2))
215 return NULL;
216
217 idx = sched_find_first_bit(array->bitmap);
218 if (unlikely(idx >= MAX_RT_PRIO)) {
219 WARN_ON(1); /* rt_nr_running is bad */
220 return NULL;
221 }
222
223 queue = array->queue + idx;
f65eda4f
SR
224 BUG_ON(list_empty(queue));
225
e8fa1362 226 next = list_entry(queue->next, struct task_struct, run_list);
f65eda4f
SR
227 if (unlikely(pick_rt_task(rq, next, cpu)))
228 goto out;
e8fa1362
SR
229
230 if (queue->next->next != queue) {
231 /* same prio task */
232 next = list_entry(queue->next->next, struct task_struct, run_list);
f65eda4f
SR
233 if (pick_rt_task(rq, next, cpu))
234 goto out;
e8fa1362
SR
235 }
236
f65eda4f 237 retry:
e8fa1362
SR
238 /* slower, but more flexible */
239 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
f65eda4f 240 if (unlikely(idx >= MAX_RT_PRIO))
e8fa1362 241 return NULL;
e8fa1362
SR
242
243 queue = array->queue + idx;
f65eda4f
SR
244 BUG_ON(list_empty(queue));
245
246 list_for_each_entry(next, queue, run_list) {
247 if (pick_rt_task(rq, next, cpu))
248 goto out;
249 }
250
251 goto retry;
e8fa1362 252
f65eda4f 253 out:
e8fa1362
SR
254 return next;
255}
256
257static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
258
259/* Will lock the rq it finds */
260static struct rq *find_lock_lowest_rq(struct task_struct *task,
261 struct rq *this_rq)
262{
263 struct rq *lowest_rq = NULL;
264 int cpu;
265 int tries;
266 cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
267
268 cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
269
270 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
271 /*
272 * Scan each rq for the lowest prio.
273 */
274 for_each_cpu_mask(cpu, *cpu_mask) {
275 struct rq *rq = &per_cpu(runqueues, cpu);
276
277 if (cpu == this_rq->cpu)
278 continue;
279
280 /* We look for lowest RT prio or non-rt CPU */
281 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
282 lowest_rq = rq;
283 break;
284 }
285
286 /* no locking for now */
287 if (rq->rt.highest_prio > task->prio &&
288 (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
289 lowest_rq = rq;
290 }
291 }
292
293 if (!lowest_rq)
294 break;
295
296 /* if the prio of this runqueue changed, try again */
297 if (double_lock_balance(this_rq, lowest_rq)) {
298 /*
299 * We had to unlock the run queue. In
300 * the mean time, task could have
301 * migrated already or had its affinity changed.
302 * Also make sure that it wasn't scheduled on its rq.
303 */
304 if (unlikely(task_rq(task) != this_rq ||
305 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
306 task_running(this_rq, task) ||
307 !task->se.on_rq)) {
308 spin_unlock(&lowest_rq->lock);
309 lowest_rq = NULL;
310 break;
311 }
312 }
313
314 /* If this rq is still suitable use it. */
315 if (lowest_rq->rt.highest_prio > task->prio)
316 break;
317
318 /* try again */
319 spin_unlock(&lowest_rq->lock);
320 lowest_rq = NULL;
321 }
322
323 return lowest_rq;
324}
325
326/*
327 * If the current CPU has more than one RT task, see if the non
328 * running task can migrate over to a CPU that is running a task
329 * of lesser priority.
330 */
331static int push_rt_task(struct rq *this_rq)
332{
333 struct task_struct *next_task;
334 struct rq *lowest_rq;
335 int ret = 0;
336 int paranoid = RT_MAX_TRIES;
337
338 assert_spin_locked(&this_rq->lock);
339
f65eda4f 340 next_task = pick_next_highest_task_rt(this_rq, -1);
e8fa1362
SR
341 if (!next_task)
342 return 0;
343
344 retry:
f65eda4f
SR
345 if (unlikely(next_task == this_rq->curr)) {
346 WARN_ON(1);
e8fa1362 347 return 0;
f65eda4f 348 }
e8fa1362
SR
349
350 /*
351 * It's possible that the next_task slipped in of
352 * higher priority than current. If that's the case
353 * just reschedule current.
354 */
355 if (unlikely(next_task->prio < this_rq->curr->prio)) {
356 resched_task(this_rq->curr);
357 return 0;
358 }
359
360 /* We might release this_rq lock */
361 get_task_struct(next_task);
362
363 /* find_lock_lowest_rq locks the rq if found */
364 lowest_rq = find_lock_lowest_rq(next_task, this_rq);
365 if (!lowest_rq) {
366 struct task_struct *task;
367 /*
368 * find lock_lowest_rq releases this_rq->lock
369 * so it is possible that next_task has changed.
370 * If it has, then try again.
371 */
f65eda4f 372 task = pick_next_highest_task_rt(this_rq, -1);
e8fa1362
SR
373 if (unlikely(task != next_task) && task && paranoid--) {
374 put_task_struct(next_task);
375 next_task = task;
376 goto retry;
377 }
378 goto out;
379 }
380
381 assert_spin_locked(&lowest_rq->lock);
382
383 deactivate_task(this_rq, next_task, 0);
384 set_task_cpu(next_task, lowest_rq->cpu);
385 activate_task(lowest_rq, next_task, 0);
386
387 resched_task(lowest_rq->curr);
388
389 spin_unlock(&lowest_rq->lock);
390
391 ret = 1;
392out:
393 put_task_struct(next_task);
394
395 return ret;
396}
397
398/*
399 * TODO: Currently we just use the second highest prio task on
400 * the queue, and stop when it can't migrate (or there's
401 * no more RT tasks). There may be a case where a lower
402 * priority RT task has a different affinity than the
403 * higher RT task. In this case the lower RT task could
404 * possibly be able to migrate where as the higher priority
405 * RT task could not. We currently ignore this issue.
406 * Enhancements are welcome!
407 */
408static void push_rt_tasks(struct rq *rq)
409{
410 /* push_rt_task will return true if it moved an RT */
411 while (push_rt_task(rq))
412 ;
413}
414
f65eda4f
SR
415static int pull_rt_task(struct rq *this_rq)
416{
417 struct task_struct *next;
418 struct task_struct *p;
419 struct rq *src_rq;
420 cpumask_t *rto_cpumask;
421 int this_cpu = this_rq->cpu;
422 int cpu;
423 int ret = 0;
424
425 assert_spin_locked(&this_rq->lock);
426
427 /*
428 * If cpusets are used, and we have overlapping
429 * run queue cpusets, then this algorithm may not catch all.
430 * This is just the price you pay on trying to keep
431 * dirtying caches down on large SMP machines.
432 */
433 if (likely(!rt_overloaded()))
434 return 0;
435
436 next = pick_next_task_rt(this_rq);
437
438 rto_cpumask = rt_overload();
439
440 for_each_cpu_mask(cpu, *rto_cpumask) {
441 if (this_cpu == cpu)
442 continue;
443
444 src_rq = cpu_rq(cpu);
445 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
446 /*
447 * It is possible that overlapping cpusets
448 * will miss clearing a non overloaded runqueue.
449 * Clear it now.
450 */
451 if (double_lock_balance(this_rq, src_rq)) {
452 /* unlocked our runqueue lock */
453 struct task_struct *old_next = next;
454 next = pick_next_task_rt(this_rq);
455 if (next != old_next)
456 ret = 1;
457 }
458 if (likely(src_rq->rt.rt_nr_running <= 1))
459 /*
460 * Small chance that this_rq->curr changed
461 * but it's really harmless here.
462 */
463 rt_clear_overload(this_rq);
464 else
465 /*
466 * Heh, the src_rq is now overloaded, since
467 * we already have the src_rq lock, go straight
468 * to pulling tasks from it.
469 */
470 goto try_pulling;
471 spin_unlock(&src_rq->lock);
472 continue;
473 }
474
475 /*
476 * We can potentially drop this_rq's lock in
477 * double_lock_balance, and another CPU could
478 * steal our next task - hence we must cause
479 * the caller to recalculate the next task
480 * in that case:
481 */
482 if (double_lock_balance(this_rq, src_rq)) {
483 struct task_struct *old_next = next;
484 next = pick_next_task_rt(this_rq);
485 if (next != old_next)
486 ret = 1;
487 }
488
489 /*
490 * Are there still pullable RT tasks?
491 */
492 if (src_rq->rt.rt_nr_running <= 1) {
493 spin_unlock(&src_rq->lock);
494 continue;
495 }
496
497 try_pulling:
498 p = pick_next_highest_task_rt(src_rq, this_cpu);
499
500 /*
501 * Do we have an RT task that preempts
502 * the to-be-scheduled task?
503 */
504 if (p && (!next || (p->prio < next->prio))) {
505 WARN_ON(p == src_rq->curr);
506 WARN_ON(!p->se.on_rq);
507
508 /*
509 * There's a chance that p is higher in priority
510 * than what's currently running on its cpu.
511 * This is just that p is wakeing up and hasn't
512 * had a chance to schedule. We only pull
513 * p if it is lower in priority than the
514 * current task on the run queue or
515 * this_rq next task is lower in prio than
516 * the current task on that rq.
517 */
518 if (p->prio < src_rq->curr->prio ||
519 (next && next->prio < src_rq->curr->prio))
520 goto bail;
521
522 ret = 1;
523
524 deactivate_task(src_rq, p, 0);
525 set_task_cpu(p, this_cpu);
526 activate_task(this_rq, p, 0);
527 /*
528 * We continue with the search, just in
529 * case there's an even higher prio task
530 * in another runqueue. (low likelyhood
531 * but possible)
532 */
533
534 /*
535 * Update next so that we won't pick a task
536 * on another cpu with a priority lower (or equal)
537 * than the one we just picked.
538 */
539 next = p;
540
541 }
542 bail:
543 spin_unlock(&src_rq->lock);
544 }
545
546 return ret;
547}
548
549static void schedule_balance_rt(struct rq *rq,
550 struct task_struct *prev)
551{
552 /* Try to pull RT tasks here if we lower this rq's prio */
553 if (unlikely(rt_task(prev)) &&
554 rq->rt.highest_prio > prev->prio)
555 pull_rt_task(rq);
556}
557
e8fa1362
SR
558static void schedule_tail_balance_rt(struct rq *rq)
559{
560 /*
561 * If we have more than one rt_task queued, then
562 * see if we can push the other rt_tasks off to other CPUS.
563 * Note we may release the rq lock, and since
564 * the lock was owned by prev, we need to release it
565 * first via finish_lock_switch and then reaquire it here.
566 */
567 if (unlikely(rq->rt.rt_nr_running > 1)) {
568 spin_lock_irq(&rq->lock);
569 push_rt_tasks(rq);
570 spin_unlock_irq(&rq->lock);
571 }
572}
573
4642dafd
SR
574
575static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
576{
577 if (unlikely(rt_task(p)) &&
578 !task_running(rq, p) &&
579 (p->prio >= rq->curr->prio))
580 push_rt_tasks(rq);
581}
582
43010659 583static unsigned long
bb44e5d1 584load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f
PW
585 unsigned long max_load_move,
586 struct sched_domain *sd, enum cpu_idle_type idle,
587 int *all_pinned, int *this_best_prio)
bb44e5d1 588{
c7a1e46a
SR
589 /* don't touch RT tasks */
590 return 0;
e1d1484f
PW
591}
592
593static int
594move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
595 struct sched_domain *sd, enum cpu_idle_type idle)
596{
c7a1e46a
SR
597 /* don't touch RT tasks */
598 return 0;
bb44e5d1 599}
73fe6aae
GH
600static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
601{
602 int weight = cpus_weight(*new_mask);
603
604 BUG_ON(!rt_task(p));
605
606 /*
607 * Update the migration status of the RQ if we have an RT task
608 * which is running AND changing its weight value.
609 */
610 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
611 struct rq *rq = task_rq(p);
612
613 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
614 rq->rt.rt_nr_migratory++;
615 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
616 BUG_ON(!rq->rt.rt_nr_migratory);
617 rq->rt.rt_nr_migratory--;
618 }
619
620 update_rt_migration(rq);
621 }
622
623 p->cpus_allowed = *new_mask;
624 p->nr_cpus_allowed = weight;
625}
e8fa1362
SR
626#else /* CONFIG_SMP */
627# define schedule_tail_balance_rt(rq) do { } while (0)
f65eda4f 628# define schedule_balance_rt(rq, prev) do { } while (0)
4642dafd 629# define wakeup_balance_rt(rq, p) do { } while (0)
e8fa1362 630#endif /* CONFIG_SMP */
bb44e5d1
IM
631
632static void task_tick_rt(struct rq *rq, struct task_struct *p)
633{
67e2be02
PZ
634 update_curr_rt(rq);
635
bb44e5d1
IM
636 /*
637 * RR tasks need a special form of timeslice management.
638 * FIFO tasks have no timeslices.
639 */
640 if (p->policy != SCHED_RR)
641 return;
642
643 if (--p->time_slice)
644 return;
645
a4ec24b4 646 p->time_slice = DEF_TIMESLICE;
bb44e5d1 647
98fbc798
DA
648 /*
649 * Requeue to the end of queue if we are not the only element
650 * on the queue:
651 */
652 if (p->run_list.prev != p->run_list.next) {
653 requeue_task_rt(rq, p);
654 set_tsk_need_resched(p);
655 }
bb44e5d1
IM
656}
657
83b699ed
SV
658static void set_curr_task_rt(struct rq *rq)
659{
660 struct task_struct *p = rq->curr;
661
662 p->se.exec_start = rq->clock;
663}
664
5522d5d5
IM
665const struct sched_class rt_sched_class = {
666 .next = &fair_sched_class,
bb44e5d1
IM
667 .enqueue_task = enqueue_task_rt,
668 .dequeue_task = dequeue_task_rt,
669 .yield_task = yield_task_rt,
670
671 .check_preempt_curr = check_preempt_curr_rt,
672
673 .pick_next_task = pick_next_task_rt,
674 .put_prev_task = put_prev_task_rt,
675
681f3e68 676#ifdef CONFIG_SMP
bb44e5d1 677 .load_balance = load_balance_rt,
e1d1484f 678 .move_one_task = move_one_task_rt,
73fe6aae 679 .set_cpus_allowed = set_cpus_allowed_rt,
681f3e68 680#endif
bb44e5d1 681
83b699ed 682 .set_curr_task = set_curr_task_rt,
bb44e5d1 683 .task_tick = task_tick_rt,
bb44e5d1 684};