]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/sched_rt.c
sched: de-SCHED_OTHER-ize the RT path
[mirror_ubuntu-artful-kernel.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #ifdef CONFIG_SMP
7 static cpumask_t rt_overload_mask;
8 static atomic_t rto_count;
9 static inline int rt_overloaded(void)
10 {
11 return atomic_read(&rto_count);
12 }
13 static inline cpumask_t *rt_overload(void)
14 {
15 return &rt_overload_mask;
16 }
17 static inline void rt_set_overload(struct rq *rq)
18 {
19 cpu_set(rq->cpu, rt_overload_mask);
20 /*
21 * Make sure the mask is visible before we set
22 * the overload count. That is checked to determine
23 * if we should look at the mask. It would be a shame
24 * if we looked at the mask, but the mask was not
25 * updated yet.
26 */
27 wmb();
28 atomic_inc(&rto_count);
29 }
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32 /* the order here really doesn't matter */
33 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask);
35 }
36
37 static void update_rt_migration(struct rq *rq)
38 {
39 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
40 rt_set_overload(rq);
41 else
42 rt_clear_overload(rq);
43 }
44 #endif /* CONFIG_SMP */
45
46 /*
47 * Update the current task's runtime statistics. Skip current tasks that
48 * are not in our scheduling class.
49 */
50 static void update_curr_rt(struct rq *rq)
51 {
52 struct task_struct *curr = rq->curr;
53 u64 delta_exec;
54
55 if (!task_has_rt_policy(curr))
56 return;
57
58 delta_exec = rq->clock - curr->se.exec_start;
59 if (unlikely((s64)delta_exec < 0))
60 delta_exec = 0;
61
62 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
63
64 curr->se.sum_exec_runtime += delta_exec;
65 curr->se.exec_start = rq->clock;
66 cpuacct_charge(curr, delta_exec);
67 }
68
69 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
70 {
71 WARN_ON(!rt_task(p));
72 rq->rt.rt_nr_running++;
73 #ifdef CONFIG_SMP
74 if (p->prio < rq->rt.highest_prio)
75 rq->rt.highest_prio = p->prio;
76 if (p->nr_cpus_allowed > 1)
77 rq->rt.rt_nr_migratory++;
78
79 update_rt_migration(rq);
80 #endif /* CONFIG_SMP */
81 }
82
83 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
84 {
85 WARN_ON(!rt_task(p));
86 WARN_ON(!rq->rt.rt_nr_running);
87 rq->rt.rt_nr_running--;
88 #ifdef CONFIG_SMP
89 if (rq->rt.rt_nr_running) {
90 struct rt_prio_array *array;
91
92 WARN_ON(p->prio < rq->rt.highest_prio);
93 if (p->prio == rq->rt.highest_prio) {
94 /* recalculate */
95 array = &rq->rt.active;
96 rq->rt.highest_prio =
97 sched_find_first_bit(array->bitmap);
98 } /* otherwise leave rq->highest prio alone */
99 } else
100 rq->rt.highest_prio = MAX_RT_PRIO;
101 if (p->nr_cpus_allowed > 1)
102 rq->rt.rt_nr_migratory--;
103
104 update_rt_migration(rq);
105 #endif /* CONFIG_SMP */
106 }
107
108 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
109 {
110 struct rt_prio_array *array = &rq->rt.active;
111
112 list_add_tail(&p->run_list, array->queue + p->prio);
113 __set_bit(p->prio, array->bitmap);
114 inc_cpu_load(rq, p->se.load.weight);
115
116 inc_rt_tasks(p, rq);
117 }
118
119 /*
120 * Adding/removing a task to/from a priority array:
121 */
122 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
123 {
124 struct rt_prio_array *array = &rq->rt.active;
125
126 update_curr_rt(rq);
127
128 list_del(&p->run_list);
129 if (list_empty(array->queue + p->prio))
130 __clear_bit(p->prio, array->bitmap);
131 dec_cpu_load(rq, p->se.load.weight);
132
133 dec_rt_tasks(p, rq);
134 }
135
136 /*
137 * Put task to the end of the run list without the overhead of dequeue
138 * followed by enqueue.
139 */
140 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
141 {
142 struct rt_prio_array *array = &rq->rt.active;
143
144 list_move_tail(&p->run_list, array->queue + p->prio);
145 }
146
147 static void
148 yield_task_rt(struct rq *rq)
149 {
150 requeue_task_rt(rq, rq->curr);
151 }
152
153 #ifdef CONFIG_SMP
154 static int select_task_rq_rt(struct task_struct *p, int sync)
155 {
156 return task_cpu(p);
157 }
158 #endif /* CONFIG_SMP */
159
160 /*
161 * Preempt the current task with a newly woken task if needed:
162 */
163 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
164 {
165 if (p->prio < rq->curr->prio)
166 resched_task(rq->curr);
167 }
168
169 static struct task_struct *pick_next_task_rt(struct rq *rq)
170 {
171 struct rt_prio_array *array = &rq->rt.active;
172 struct task_struct *next;
173 struct list_head *queue;
174 int idx;
175
176 idx = sched_find_first_bit(array->bitmap);
177 if (idx >= MAX_RT_PRIO)
178 return NULL;
179
180 queue = array->queue + idx;
181 next = list_entry(queue->next, struct task_struct, run_list);
182
183 next->se.exec_start = rq->clock;
184
185 return next;
186 }
187
188 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
189 {
190 update_curr_rt(rq);
191 p->se.exec_start = 0;
192 }
193
194 #ifdef CONFIG_SMP
195 /* Only try algorithms three times */
196 #define RT_MAX_TRIES 3
197
198 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
199 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
200
201 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
202 {
203 if (!task_running(rq, p) &&
204 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
205 (p->nr_cpus_allowed > 1))
206 return 1;
207 return 0;
208 }
209
210 /* Return the second highest RT task, NULL otherwise */
211 static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
212 int cpu)
213 {
214 struct rt_prio_array *array = &rq->rt.active;
215 struct task_struct *next;
216 struct list_head *queue;
217 int idx;
218
219 assert_spin_locked(&rq->lock);
220
221 if (likely(rq->rt.rt_nr_running < 2))
222 return NULL;
223
224 idx = sched_find_first_bit(array->bitmap);
225 if (unlikely(idx >= MAX_RT_PRIO)) {
226 WARN_ON(1); /* rt_nr_running is bad */
227 return NULL;
228 }
229
230 queue = array->queue + idx;
231 BUG_ON(list_empty(queue));
232
233 next = list_entry(queue->next, struct task_struct, run_list);
234 if (unlikely(pick_rt_task(rq, next, cpu)))
235 goto out;
236
237 if (queue->next->next != queue) {
238 /* same prio task */
239 next = list_entry(queue->next->next, struct task_struct, run_list);
240 if (pick_rt_task(rq, next, cpu))
241 goto out;
242 }
243
244 retry:
245 /* slower, but more flexible */
246 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
247 if (unlikely(idx >= MAX_RT_PRIO))
248 return NULL;
249
250 queue = array->queue + idx;
251 BUG_ON(list_empty(queue));
252
253 list_for_each_entry(next, queue, run_list) {
254 if (pick_rt_task(rq, next, cpu))
255 goto out;
256 }
257
258 goto retry;
259
260 out:
261 return next;
262 }
263
264 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
265
266 /* Will lock the rq it finds */
267 static struct rq *find_lock_lowest_rq(struct task_struct *task,
268 struct rq *this_rq)
269 {
270 struct rq *lowest_rq = NULL;
271 int cpu;
272 int tries;
273 cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
274
275 cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
276
277 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
278 /*
279 * Scan each rq for the lowest prio.
280 */
281 for_each_cpu_mask(cpu, *cpu_mask) {
282 struct rq *rq = &per_cpu(runqueues, cpu);
283
284 if (cpu == this_rq->cpu)
285 continue;
286
287 /* We look for lowest RT prio or non-rt CPU */
288 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
289 lowest_rq = rq;
290 break;
291 }
292
293 /* no locking for now */
294 if (rq->rt.highest_prio > task->prio &&
295 (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
296 lowest_rq = rq;
297 }
298 }
299
300 if (!lowest_rq)
301 break;
302
303 /* if the prio of this runqueue changed, try again */
304 if (double_lock_balance(this_rq, lowest_rq)) {
305 /*
306 * We had to unlock the run queue. In
307 * the mean time, task could have
308 * migrated already or had its affinity changed.
309 * Also make sure that it wasn't scheduled on its rq.
310 */
311 if (unlikely(task_rq(task) != this_rq ||
312 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
313 task_running(this_rq, task) ||
314 !task->se.on_rq)) {
315 spin_unlock(&lowest_rq->lock);
316 lowest_rq = NULL;
317 break;
318 }
319 }
320
321 /* If this rq is still suitable use it. */
322 if (lowest_rq->rt.highest_prio > task->prio)
323 break;
324
325 /* try again */
326 spin_unlock(&lowest_rq->lock);
327 lowest_rq = NULL;
328 }
329
330 return lowest_rq;
331 }
332
333 /*
334 * If the current CPU has more than one RT task, see if the non
335 * running task can migrate over to a CPU that is running a task
336 * of lesser priority.
337 */
338 static int push_rt_task(struct rq *rq)
339 {
340 struct task_struct *next_task;
341 struct rq *lowest_rq;
342 int ret = 0;
343 int paranoid = RT_MAX_TRIES;
344
345 assert_spin_locked(&rq->lock);
346
347 next_task = pick_next_highest_task_rt(rq, -1);
348 if (!next_task)
349 return 0;
350
351 retry:
352 if (unlikely(next_task == rq->curr)) {
353 WARN_ON(1);
354 return 0;
355 }
356
357 /*
358 * It's possible that the next_task slipped in of
359 * higher priority than current. If that's the case
360 * just reschedule current.
361 */
362 if (unlikely(next_task->prio < rq->curr->prio)) {
363 resched_task(rq->curr);
364 return 0;
365 }
366
367 /* We might release rq lock */
368 get_task_struct(next_task);
369
370 /* find_lock_lowest_rq locks the rq if found */
371 lowest_rq = find_lock_lowest_rq(next_task, rq);
372 if (!lowest_rq) {
373 struct task_struct *task;
374 /*
375 * find lock_lowest_rq releases rq->lock
376 * so it is possible that next_task has changed.
377 * If it has, then try again.
378 */
379 task = pick_next_highest_task_rt(rq, -1);
380 if (unlikely(task != next_task) && task && paranoid--) {
381 put_task_struct(next_task);
382 next_task = task;
383 goto retry;
384 }
385 goto out;
386 }
387
388 assert_spin_locked(&lowest_rq->lock);
389
390 deactivate_task(rq, next_task, 0);
391 set_task_cpu(next_task, lowest_rq->cpu);
392 activate_task(lowest_rq, next_task, 0);
393
394 resched_task(lowest_rq->curr);
395
396 spin_unlock(&lowest_rq->lock);
397
398 ret = 1;
399 out:
400 put_task_struct(next_task);
401
402 return ret;
403 }
404
405 /*
406 * TODO: Currently we just use the second highest prio task on
407 * the queue, and stop when it can't migrate (or there's
408 * no more RT tasks). There may be a case where a lower
409 * priority RT task has a different affinity than the
410 * higher RT task. In this case the lower RT task could
411 * possibly be able to migrate where as the higher priority
412 * RT task could not. We currently ignore this issue.
413 * Enhancements are welcome!
414 */
415 static void push_rt_tasks(struct rq *rq)
416 {
417 /* push_rt_task will return true if it moved an RT */
418 while (push_rt_task(rq))
419 ;
420 }
421
422 static int pull_rt_task(struct rq *this_rq)
423 {
424 struct task_struct *next;
425 struct task_struct *p;
426 struct rq *src_rq;
427 cpumask_t *rto_cpumask;
428 int this_cpu = this_rq->cpu;
429 int cpu;
430 int ret = 0;
431
432 assert_spin_locked(&this_rq->lock);
433
434 /*
435 * If cpusets are used, and we have overlapping
436 * run queue cpusets, then this algorithm may not catch all.
437 * This is just the price you pay on trying to keep
438 * dirtying caches down on large SMP machines.
439 */
440 if (likely(!rt_overloaded()))
441 return 0;
442
443 next = pick_next_task_rt(this_rq);
444
445 rto_cpumask = rt_overload();
446
447 for_each_cpu_mask(cpu, *rto_cpumask) {
448 if (this_cpu == cpu)
449 continue;
450
451 src_rq = cpu_rq(cpu);
452 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
453 /*
454 * It is possible that overlapping cpusets
455 * will miss clearing a non overloaded runqueue.
456 * Clear it now.
457 */
458 if (double_lock_balance(this_rq, src_rq)) {
459 /* unlocked our runqueue lock */
460 struct task_struct *old_next = next;
461 next = pick_next_task_rt(this_rq);
462 if (next != old_next)
463 ret = 1;
464 }
465 if (likely(src_rq->rt.rt_nr_running <= 1))
466 /*
467 * Small chance that this_rq->curr changed
468 * but it's really harmless here.
469 */
470 rt_clear_overload(this_rq);
471 else
472 /*
473 * Heh, the src_rq is now overloaded, since
474 * we already have the src_rq lock, go straight
475 * to pulling tasks from it.
476 */
477 goto try_pulling;
478 spin_unlock(&src_rq->lock);
479 continue;
480 }
481
482 /*
483 * We can potentially drop this_rq's lock in
484 * double_lock_balance, and another CPU could
485 * steal our next task - hence we must cause
486 * the caller to recalculate the next task
487 * in that case:
488 */
489 if (double_lock_balance(this_rq, src_rq)) {
490 struct task_struct *old_next = next;
491 next = pick_next_task_rt(this_rq);
492 if (next != old_next)
493 ret = 1;
494 }
495
496 /*
497 * Are there still pullable RT tasks?
498 */
499 if (src_rq->rt.rt_nr_running <= 1) {
500 spin_unlock(&src_rq->lock);
501 continue;
502 }
503
504 try_pulling:
505 p = pick_next_highest_task_rt(src_rq, this_cpu);
506
507 /*
508 * Do we have an RT task that preempts
509 * the to-be-scheduled task?
510 */
511 if (p && (!next || (p->prio < next->prio))) {
512 WARN_ON(p == src_rq->curr);
513 WARN_ON(!p->se.on_rq);
514
515 /*
516 * There's a chance that p is higher in priority
517 * than what's currently running on its cpu.
518 * This is just that p is wakeing up and hasn't
519 * had a chance to schedule. We only pull
520 * p if it is lower in priority than the
521 * current task on the run queue or
522 * this_rq next task is lower in prio than
523 * the current task on that rq.
524 */
525 if (p->prio < src_rq->curr->prio ||
526 (next && next->prio < src_rq->curr->prio))
527 goto bail;
528
529 ret = 1;
530
531 deactivate_task(src_rq, p, 0);
532 set_task_cpu(p, this_cpu);
533 activate_task(this_rq, p, 0);
534 /*
535 * We continue with the search, just in
536 * case there's an even higher prio task
537 * in another runqueue. (low likelyhood
538 * but possible)
539 */
540
541 /*
542 * Update next so that we won't pick a task
543 * on another cpu with a priority lower (or equal)
544 * than the one we just picked.
545 */
546 next = p;
547
548 }
549 bail:
550 spin_unlock(&src_rq->lock);
551 }
552
553 return ret;
554 }
555
556 static void schedule_balance_rt(struct rq *rq,
557 struct task_struct *prev)
558 {
559 /* Try to pull RT tasks here if we lower this rq's prio */
560 if (unlikely(rt_task(prev)) &&
561 rq->rt.highest_prio > prev->prio)
562 pull_rt_task(rq);
563 }
564
565 static void schedule_tail_balance_rt(struct rq *rq)
566 {
567 /*
568 * If we have more than one rt_task queued, then
569 * see if we can push the other rt_tasks off to other CPUS.
570 * Note we may release the rq lock, and since
571 * the lock was owned by prev, we need to release it
572 * first via finish_lock_switch and then reaquire it here.
573 */
574 if (unlikely(rq->rt.rt_nr_running > 1)) {
575 spin_lock_irq(&rq->lock);
576 push_rt_tasks(rq);
577 spin_unlock_irq(&rq->lock);
578 }
579 }
580
581
582 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
583 {
584 if (unlikely(rt_task(p)) &&
585 !task_running(rq, p) &&
586 (p->prio >= rq->curr->prio))
587 push_rt_tasks(rq);
588 }
589
590 static unsigned long
591 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
592 unsigned long max_load_move,
593 struct sched_domain *sd, enum cpu_idle_type idle,
594 int *all_pinned, int *this_best_prio)
595 {
596 /* don't touch RT tasks */
597 return 0;
598 }
599
600 static int
601 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
602 struct sched_domain *sd, enum cpu_idle_type idle)
603 {
604 /* don't touch RT tasks */
605 return 0;
606 }
607 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
608 {
609 int weight = cpus_weight(*new_mask);
610
611 BUG_ON(!rt_task(p));
612
613 /*
614 * Update the migration status of the RQ if we have an RT task
615 * which is running AND changing its weight value.
616 */
617 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
618 struct rq *rq = task_rq(p);
619
620 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
621 rq->rt.rt_nr_migratory++;
622 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
623 BUG_ON(!rq->rt.rt_nr_migratory);
624 rq->rt.rt_nr_migratory--;
625 }
626
627 update_rt_migration(rq);
628 }
629
630 p->cpus_allowed = *new_mask;
631 p->nr_cpus_allowed = weight;
632 }
633 #else /* CONFIG_SMP */
634 # define schedule_tail_balance_rt(rq) do { } while (0)
635 # define schedule_balance_rt(rq, prev) do { } while (0)
636 # define wakeup_balance_rt(rq, p) do { } while (0)
637 #endif /* CONFIG_SMP */
638
639 static void task_tick_rt(struct rq *rq, struct task_struct *p)
640 {
641 update_curr_rt(rq);
642
643 /*
644 * RR tasks need a special form of timeslice management.
645 * FIFO tasks have no timeslices.
646 */
647 if (p->policy != SCHED_RR)
648 return;
649
650 if (--p->time_slice)
651 return;
652
653 p->time_slice = DEF_TIMESLICE;
654
655 /*
656 * Requeue to the end of queue if we are not the only element
657 * on the queue:
658 */
659 if (p->run_list.prev != p->run_list.next) {
660 requeue_task_rt(rq, p);
661 set_tsk_need_resched(p);
662 }
663 }
664
665 static void set_curr_task_rt(struct rq *rq)
666 {
667 struct task_struct *p = rq->curr;
668
669 p->se.exec_start = rq->clock;
670 }
671
672 const struct sched_class rt_sched_class = {
673 .next = &fair_sched_class,
674 .enqueue_task = enqueue_task_rt,
675 .dequeue_task = dequeue_task_rt,
676 .yield_task = yield_task_rt,
677 #ifdef CONFIG_SMP
678 .select_task_rq = select_task_rq_rt,
679 #endif /* CONFIG_SMP */
680
681 .check_preempt_curr = check_preempt_curr_rt,
682
683 .pick_next_task = pick_next_task_rt,
684 .put_prev_task = put_prev_task_rt,
685
686 #ifdef CONFIG_SMP
687 .load_balance = load_balance_rt,
688 .move_one_task = move_one_task_rt,
689 .set_cpus_allowed = set_cpus_allowed_rt,
690 #endif
691
692 .set_curr_task = set_curr_task_rt,
693 .task_tick = task_tick_rt,
694 };