]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/sched.c
[PATCH] sched: balance timers
[mirror_ubuntu-zesty-kernel.git] / kernel / sched.c
1 /*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/nmi.h>
24 #include <linux/init.h>
25 #include <asm/uaccess.h>
26 #include <linux/highmem.h>
27 #include <linux/smp_lock.h>
28 #include <asm/mmu_context.h>
29 #include <linux/interrupt.h>
30 #include <linux/completion.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/security.h>
33 #include <linux/notifier.h>
34 #include <linux/profile.h>
35 #include <linux/suspend.h>
36 #include <linux/blkdev.h>
37 #include <linux/delay.h>
38 #include <linux/smp.h>
39 #include <linux/threads.h>
40 #include <linux/timer.h>
41 #include <linux/rcupdate.h>
42 #include <linux/cpu.h>
43 #include <linux/cpuset.h>
44 #include <linux/percpu.h>
45 #include <linux/kthread.h>
46 #include <linux/seq_file.h>
47 #include <linux/syscalls.h>
48 #include <linux/times.h>
49 #include <linux/acct.h>
50 #include <asm/tlb.h>
51
52 #include <asm/unistd.h>
53
54 /*
55 * Convert user-nice values [ -20 ... 0 ... 19 ]
56 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
57 * and back.
58 */
59 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
60 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
61 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
62
63 /*
64 * 'User priority' is the nice value converted to something we
65 * can work with better when scaling various scheduler parameters,
66 * it's a [ 0 ... 39 ] range.
67 */
68 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
69 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
70 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
71
72 /*
73 * Some helpers for converting nanosecond timing to jiffy resolution
74 */
75 #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
76 #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
77
78 /*
79 * These are the 'tuning knobs' of the scheduler:
80 *
81 * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
82 * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
83 * Timeslices get refilled after they expire.
84 */
85 #define MIN_TIMESLICE max(5 * HZ / 1000, 1)
86 #define DEF_TIMESLICE (100 * HZ / 1000)
87 #define ON_RUNQUEUE_WEIGHT 30
88 #define CHILD_PENALTY 95
89 #define PARENT_PENALTY 100
90 #define EXIT_WEIGHT 3
91 #define PRIO_BONUS_RATIO 25
92 #define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
93 #define INTERACTIVE_DELTA 2
94 #define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS)
95 #define STARVATION_LIMIT (MAX_SLEEP_AVG)
96 #define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
97
98 /*
99 * If a task is 'interactive' then we reinsert it in the active
100 * array after it has expired its current timeslice. (it will not
101 * continue to run immediately, it will still roundrobin with
102 * other interactive tasks.)
103 *
104 * This part scales the interactivity limit depending on niceness.
105 *
106 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
107 * Here are a few examples of different nice levels:
108 *
109 * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
110 * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
111 * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
112 * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
113 * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
114 *
115 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
116 * priority range a task can explore, a value of '1' means the
117 * task is rated interactive.)
118 *
119 * Ie. nice +19 tasks can never get 'interactive' enough to be
120 * reinserted into the active array. And only heavily CPU-hog nice -20
121 * tasks will be expired. Default nice 0 tasks are somewhere between,
122 * it takes some effort for them to get interactive, but it's not
123 * too hard.
124 */
125
126 #define CURRENT_BONUS(p) \
127 (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
128 MAX_SLEEP_AVG)
129
130 #define GRANULARITY (10 * HZ / 1000 ? : 1)
131
132 #ifdef CONFIG_SMP
133 #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
134 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
135 num_online_cpus())
136 #else
137 #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
138 (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
139 #endif
140
141 #define SCALE(v1,v1_max,v2_max) \
142 (v1) * (v2_max) / (v1_max)
143
144 #define DELTA(p) \
145 (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
146
147 #define TASK_INTERACTIVE(p) \
148 ((p)->prio <= (p)->static_prio - DELTA(p))
149
150 #define INTERACTIVE_SLEEP(p) \
151 (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
152 (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
153
154 #define TASK_PREEMPTS_CURR(p, rq) \
155 ((p)->prio < (rq)->curr->prio)
156
157 /*
158 * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
159 * to time slice values: [800ms ... 100ms ... 5ms]
160 *
161 * The higher a thread's priority, the bigger timeslices
162 * it gets during one round of execution. But even the lowest
163 * priority thread gets MIN_TIMESLICE worth of execution time.
164 */
165
166 #define SCALE_PRIO(x, prio) \
167 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
168
169 static inline unsigned int task_timeslice(task_t *p)
170 {
171 if (p->static_prio < NICE_TO_PRIO(0))
172 return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
173 else
174 return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
175 }
176 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
177 < (long long) (sd)->cache_hot_time)
178
179 /*
180 * These are the runqueue data structures:
181 */
182
183 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
184
185 typedef struct runqueue runqueue_t;
186
187 struct prio_array {
188 unsigned int nr_active;
189 unsigned long bitmap[BITMAP_SIZE];
190 struct list_head queue[MAX_PRIO];
191 };
192
193 /*
194 * This is the main, per-CPU runqueue data structure.
195 *
196 * Locking rule: those places that want to lock multiple runqueues
197 * (such as the load balancing or the thread migration code), lock
198 * acquire operations must be ordered by ascending &runqueue.
199 */
200 struct runqueue {
201 spinlock_t lock;
202
203 /*
204 * nr_running and cpu_load should be in the same cacheline because
205 * remote CPUs use both these fields when doing load calculation.
206 */
207 unsigned long nr_running;
208 #ifdef CONFIG_SMP
209 unsigned long cpu_load[3];
210 #endif
211 unsigned long long nr_switches;
212
213 /*
214 * This is part of a global counter where only the total sum
215 * over all CPUs matters. A task can increase this counter on
216 * one CPU and if it got migrated afterwards it may decrease
217 * it on another CPU. Always updated under the runqueue lock:
218 */
219 unsigned long nr_uninterruptible;
220
221 unsigned long expired_timestamp;
222 unsigned long long timestamp_last_tick;
223 task_t *curr, *idle;
224 struct mm_struct *prev_mm;
225 prio_array_t *active, *expired, arrays[2];
226 int best_expired_prio;
227 atomic_t nr_iowait;
228
229 #ifdef CONFIG_SMP
230 struct sched_domain *sd;
231
232 /* For active balancing */
233 int active_balance;
234 int push_cpu;
235
236 task_t *migration_thread;
237 struct list_head migration_queue;
238 #endif
239
240 #ifdef CONFIG_SCHEDSTATS
241 /* latency stats */
242 struct sched_info rq_sched_info;
243
244 /* sys_sched_yield() stats */
245 unsigned long yld_exp_empty;
246 unsigned long yld_act_empty;
247 unsigned long yld_both_empty;
248 unsigned long yld_cnt;
249
250 /* schedule() stats */
251 unsigned long sched_switch;
252 unsigned long sched_cnt;
253 unsigned long sched_goidle;
254
255 /* try_to_wake_up() stats */
256 unsigned long ttwu_cnt;
257 unsigned long ttwu_local;
258 #endif
259 };
260
261 static DEFINE_PER_CPU(struct runqueue, runqueues);
262
263 #define for_each_domain(cpu, domain) \
264 for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
265
266 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
267 #define this_rq() (&__get_cpu_var(runqueues))
268 #define task_rq(p) cpu_rq(task_cpu(p))
269 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
270
271 /*
272 * Default context-switch locking:
273 */
274 #ifndef prepare_arch_switch
275 # define prepare_arch_switch(rq, next) do { } while (0)
276 # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
277 # define task_running(rq, p) ((rq)->curr == (p))
278 #endif
279
280 /*
281 * task_rq_lock - lock the runqueue a given task resides on and disable
282 * interrupts. Note the ordering: we can safely lookup the task_rq without
283 * explicitly disabling preemption.
284 */
285 static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
286 __acquires(rq->lock)
287 {
288 struct runqueue *rq;
289
290 repeat_lock_task:
291 local_irq_save(*flags);
292 rq = task_rq(p);
293 spin_lock(&rq->lock);
294 if (unlikely(rq != task_rq(p))) {
295 spin_unlock_irqrestore(&rq->lock, *flags);
296 goto repeat_lock_task;
297 }
298 return rq;
299 }
300
301 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
302 __releases(rq->lock)
303 {
304 spin_unlock_irqrestore(&rq->lock, *flags);
305 }
306
307 #ifdef CONFIG_SCHEDSTATS
308 /*
309 * bump this up when changing the output format or the meaning of an existing
310 * format, so that tools can adapt (or abort)
311 */
312 #define SCHEDSTAT_VERSION 11
313
314 static int show_schedstat(struct seq_file *seq, void *v)
315 {
316 int cpu;
317
318 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
319 seq_printf(seq, "timestamp %lu\n", jiffies);
320 for_each_online_cpu(cpu) {
321 runqueue_t *rq = cpu_rq(cpu);
322 #ifdef CONFIG_SMP
323 struct sched_domain *sd;
324 int dcnt = 0;
325 #endif
326
327 /* runqueue-specific stats */
328 seq_printf(seq,
329 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
330 cpu, rq->yld_both_empty,
331 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
332 rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
333 rq->ttwu_cnt, rq->ttwu_local,
334 rq->rq_sched_info.cpu_time,
335 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
336
337 seq_printf(seq, "\n");
338
339 #ifdef CONFIG_SMP
340 /* domain-specific stats */
341 for_each_domain(cpu, sd) {
342 enum idle_type itype;
343 char mask_str[NR_CPUS];
344
345 cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
346 seq_printf(seq, "domain%d %s", dcnt++, mask_str);
347 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
348 itype++) {
349 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
350 sd->lb_cnt[itype],
351 sd->lb_balanced[itype],
352 sd->lb_failed[itype],
353 sd->lb_imbalance[itype],
354 sd->lb_gained[itype],
355 sd->lb_hot_gained[itype],
356 sd->lb_nobusyq[itype],
357 sd->lb_nobusyg[itype]);
358 }
359 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n",
360 sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
361 sd->sbe_pushed, sd->sbe_attempts,
362 sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
363 }
364 #endif
365 }
366 return 0;
367 }
368
369 static int schedstat_open(struct inode *inode, struct file *file)
370 {
371 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
372 char *buf = kmalloc(size, GFP_KERNEL);
373 struct seq_file *m;
374 int res;
375
376 if (!buf)
377 return -ENOMEM;
378 res = single_open(file, show_schedstat, NULL);
379 if (!res) {
380 m = file->private_data;
381 m->buf = buf;
382 m->size = size;
383 } else
384 kfree(buf);
385 return res;
386 }
387
388 struct file_operations proc_schedstat_operations = {
389 .open = schedstat_open,
390 .read = seq_read,
391 .llseek = seq_lseek,
392 .release = single_release,
393 };
394
395 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
396 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
397 #else /* !CONFIG_SCHEDSTATS */
398 # define schedstat_inc(rq, field) do { } while (0)
399 # define schedstat_add(rq, field, amt) do { } while (0)
400 #endif
401
402 /*
403 * rq_lock - lock a given runqueue and disable interrupts.
404 */
405 static inline runqueue_t *this_rq_lock(void)
406 __acquires(rq->lock)
407 {
408 runqueue_t *rq;
409
410 local_irq_disable();
411 rq = this_rq();
412 spin_lock(&rq->lock);
413
414 return rq;
415 }
416
417 #ifdef CONFIG_SCHED_SMT
418 static int cpu_and_siblings_are_idle(int cpu)
419 {
420 int sib;
421 for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
422 if (idle_cpu(sib))
423 continue;
424 return 0;
425 }
426
427 return 1;
428 }
429 #else
430 #define cpu_and_siblings_are_idle(A) idle_cpu(A)
431 #endif
432
433 #ifdef CONFIG_SCHEDSTATS
434 /*
435 * Called when a process is dequeued from the active array and given
436 * the cpu. We should note that with the exception of interactive
437 * tasks, the expired queue will become the active queue after the active
438 * queue is empty, without explicitly dequeuing and requeuing tasks in the
439 * expired queue. (Interactive tasks may be requeued directly to the
440 * active queue, thus delaying tasks in the expired queue from running;
441 * see scheduler_tick()).
442 *
443 * This function is only called from sched_info_arrive(), rather than
444 * dequeue_task(). Even though a task may be queued and dequeued multiple
445 * times as it is shuffled about, we're really interested in knowing how
446 * long it was from the *first* time it was queued to the time that it
447 * finally hit a cpu.
448 */
449 static inline void sched_info_dequeued(task_t *t)
450 {
451 t->sched_info.last_queued = 0;
452 }
453
454 /*
455 * Called when a task finally hits the cpu. We can now calculate how
456 * long it was waiting to run. We also note when it began so that we
457 * can keep stats on how long its timeslice is.
458 */
459 static inline void sched_info_arrive(task_t *t)
460 {
461 unsigned long now = jiffies, diff = 0;
462 struct runqueue *rq = task_rq(t);
463
464 if (t->sched_info.last_queued)
465 diff = now - t->sched_info.last_queued;
466 sched_info_dequeued(t);
467 t->sched_info.run_delay += diff;
468 t->sched_info.last_arrival = now;
469 t->sched_info.pcnt++;
470
471 if (!rq)
472 return;
473
474 rq->rq_sched_info.run_delay += diff;
475 rq->rq_sched_info.pcnt++;
476 }
477
478 /*
479 * Called when a process is queued into either the active or expired
480 * array. The time is noted and later used to determine how long we
481 * had to wait for us to reach the cpu. Since the expired queue will
482 * become the active queue after active queue is empty, without dequeuing
483 * and requeuing any tasks, we are interested in queuing to either. It
484 * is unusual but not impossible for tasks to be dequeued and immediately
485 * requeued in the same or another array: this can happen in sched_yield(),
486 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
487 * to runqueue.
488 *
489 * This function is only called from enqueue_task(), but also only updates
490 * the timestamp if it is already not set. It's assumed that
491 * sched_info_dequeued() will clear that stamp when appropriate.
492 */
493 static inline void sched_info_queued(task_t *t)
494 {
495 if (!t->sched_info.last_queued)
496 t->sched_info.last_queued = jiffies;
497 }
498
499 /*
500 * Called when a process ceases being the active-running process, either
501 * voluntarily or involuntarily. Now we can calculate how long we ran.
502 */
503 static inline void sched_info_depart(task_t *t)
504 {
505 struct runqueue *rq = task_rq(t);
506 unsigned long diff = jiffies - t->sched_info.last_arrival;
507
508 t->sched_info.cpu_time += diff;
509
510 if (rq)
511 rq->rq_sched_info.cpu_time += diff;
512 }
513
514 /*
515 * Called when tasks are switched involuntarily due, typically, to expiring
516 * their time slice. (This may also be called when switching to or from
517 * the idle task.) We are only called when prev != next.
518 */
519 static inline void sched_info_switch(task_t *prev, task_t *next)
520 {
521 struct runqueue *rq = task_rq(prev);
522
523 /*
524 * prev now departs the cpu. It's not interesting to record
525 * stats about how efficient we were at scheduling the idle
526 * process, however.
527 */
528 if (prev != rq->idle)
529 sched_info_depart(prev);
530
531 if (next != rq->idle)
532 sched_info_arrive(next);
533 }
534 #else
535 #define sched_info_queued(t) do { } while (0)
536 #define sched_info_switch(t, next) do { } while (0)
537 #endif /* CONFIG_SCHEDSTATS */
538
539 /*
540 * Adding/removing a task to/from a priority array:
541 */
542 static void dequeue_task(struct task_struct *p, prio_array_t *array)
543 {
544 array->nr_active--;
545 list_del(&p->run_list);
546 if (list_empty(array->queue + p->prio))
547 __clear_bit(p->prio, array->bitmap);
548 }
549
550 static void enqueue_task(struct task_struct *p, prio_array_t *array)
551 {
552 sched_info_queued(p);
553 list_add_tail(&p->run_list, array->queue + p->prio);
554 __set_bit(p->prio, array->bitmap);
555 array->nr_active++;
556 p->array = array;
557 }
558
559 /*
560 * Put task to the end of the run list without the overhead of dequeue
561 * followed by enqueue.
562 */
563 static void requeue_task(struct task_struct *p, prio_array_t *array)
564 {
565 list_move_tail(&p->run_list, array->queue + p->prio);
566 }
567
568 static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
569 {
570 list_add(&p->run_list, array->queue + p->prio);
571 __set_bit(p->prio, array->bitmap);
572 array->nr_active++;
573 p->array = array;
574 }
575
576 /*
577 * effective_prio - return the priority that is based on the static
578 * priority but is modified by bonuses/penalties.
579 *
580 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
581 * into the -5 ... 0 ... +5 bonus/penalty range.
582 *
583 * We use 25% of the full 0...39 priority range so that:
584 *
585 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
586 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
587 *
588 * Both properties are important to certain workloads.
589 */
590 static int effective_prio(task_t *p)
591 {
592 int bonus, prio;
593
594 if (rt_task(p))
595 return p->prio;
596
597 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
598
599 prio = p->static_prio - bonus;
600 if (prio < MAX_RT_PRIO)
601 prio = MAX_RT_PRIO;
602 if (prio > MAX_PRIO-1)
603 prio = MAX_PRIO-1;
604 return prio;
605 }
606
607 /*
608 * __activate_task - move a task to the runqueue.
609 */
610 static inline void __activate_task(task_t *p, runqueue_t *rq)
611 {
612 enqueue_task(p, rq->active);
613 rq->nr_running++;
614 }
615
616 /*
617 * __activate_idle_task - move idle task to the _front_ of runqueue.
618 */
619 static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
620 {
621 enqueue_task_head(p, rq->active);
622 rq->nr_running++;
623 }
624
625 static void recalc_task_prio(task_t *p, unsigned long long now)
626 {
627 /* Caller must always ensure 'now >= p->timestamp' */
628 unsigned long long __sleep_time = now - p->timestamp;
629 unsigned long sleep_time;
630
631 if (__sleep_time > NS_MAX_SLEEP_AVG)
632 sleep_time = NS_MAX_SLEEP_AVG;
633 else
634 sleep_time = (unsigned long)__sleep_time;
635
636 if (likely(sleep_time > 0)) {
637 /*
638 * User tasks that sleep a long time are categorised as
639 * idle and will get just interactive status to stay active &
640 * prevent them suddenly becoming cpu hogs and starving
641 * other processes.
642 */
643 if (p->mm && p->activated != -1 &&
644 sleep_time > INTERACTIVE_SLEEP(p)) {
645 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
646 DEF_TIMESLICE);
647 } else {
648 /*
649 * The lower the sleep avg a task has the more
650 * rapidly it will rise with sleep time.
651 */
652 sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
653
654 /*
655 * Tasks waking from uninterruptible sleep are
656 * limited in their sleep_avg rise as they
657 * are likely to be waiting on I/O
658 */
659 if (p->activated == -1 && p->mm) {
660 if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
661 sleep_time = 0;
662 else if (p->sleep_avg + sleep_time >=
663 INTERACTIVE_SLEEP(p)) {
664 p->sleep_avg = INTERACTIVE_SLEEP(p);
665 sleep_time = 0;
666 }
667 }
668
669 /*
670 * This code gives a bonus to interactive tasks.
671 *
672 * The boost works by updating the 'average sleep time'
673 * value here, based on ->timestamp. The more time a
674 * task spends sleeping, the higher the average gets -
675 * and the higher the priority boost gets as well.
676 */
677 p->sleep_avg += sleep_time;
678
679 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
680 p->sleep_avg = NS_MAX_SLEEP_AVG;
681 }
682 }
683
684 p->prio = effective_prio(p);
685 }
686
687 /*
688 * activate_task - move a task to the runqueue and do priority recalculation
689 *
690 * Update all the scheduling statistics stuff. (sleep average
691 * calculation, priority modifiers, etc.)
692 */
693 static void activate_task(task_t *p, runqueue_t *rq, int local)
694 {
695 unsigned long long now;
696
697 now = sched_clock();
698 #ifdef CONFIG_SMP
699 if (!local) {
700 /* Compensate for drifting sched_clock */
701 runqueue_t *this_rq = this_rq();
702 now = (now - this_rq->timestamp_last_tick)
703 + rq->timestamp_last_tick;
704 }
705 #endif
706
707 recalc_task_prio(p, now);
708
709 /*
710 * This checks to make sure it's not an uninterruptible task
711 * that is now waking up.
712 */
713 if (!p->activated) {
714 /*
715 * Tasks which were woken up by interrupts (ie. hw events)
716 * are most likely of interactive nature. So we give them
717 * the credit of extending their sleep time to the period
718 * of time they spend on the runqueue, waiting for execution
719 * on a CPU, first time around:
720 */
721 if (in_interrupt())
722 p->activated = 2;
723 else {
724 /*
725 * Normal first-time wakeups get a credit too for
726 * on-runqueue time, but it will be weighted down:
727 */
728 p->activated = 1;
729 }
730 }
731 p->timestamp = now;
732
733 __activate_task(p, rq);
734 }
735
736 /*
737 * deactivate_task - remove a task from the runqueue.
738 */
739 static void deactivate_task(struct task_struct *p, runqueue_t *rq)
740 {
741 rq->nr_running--;
742 dequeue_task(p, p->array);
743 p->array = NULL;
744 }
745
746 /*
747 * resched_task - mark a task 'to be rescheduled now'.
748 *
749 * On UP this means the setting of the need_resched flag, on SMP it
750 * might also involve a cross-CPU call to trigger the scheduler on
751 * the target CPU.
752 */
753 #ifdef CONFIG_SMP
754 static void resched_task(task_t *p)
755 {
756 int need_resched, nrpolling;
757
758 assert_spin_locked(&task_rq(p)->lock);
759
760 /* minimise the chance of sending an interrupt to poll_idle() */
761 nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
762 need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
763 nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
764
765 if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
766 smp_send_reschedule(task_cpu(p));
767 }
768 #else
769 static inline void resched_task(task_t *p)
770 {
771 set_tsk_need_resched(p);
772 }
773 #endif
774
775 /**
776 * task_curr - is this task currently executing on a CPU?
777 * @p: the task in question.
778 */
779 inline int task_curr(const task_t *p)
780 {
781 return cpu_curr(task_cpu(p)) == p;
782 }
783
784 #ifdef CONFIG_SMP
785 enum request_type {
786 REQ_MOVE_TASK,
787 REQ_SET_DOMAIN,
788 };
789
790 typedef struct {
791 struct list_head list;
792 enum request_type type;
793
794 /* For REQ_MOVE_TASK */
795 task_t *task;
796 int dest_cpu;
797
798 /* For REQ_SET_DOMAIN */
799 struct sched_domain *sd;
800
801 struct completion done;
802 } migration_req_t;
803
804 /*
805 * The task's runqueue lock must be held.
806 * Returns true if you have to wait for migration thread.
807 */
808 static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
809 {
810 runqueue_t *rq = task_rq(p);
811
812 /*
813 * If the task is not on a runqueue (and not running), then
814 * it is sufficient to simply update the task's cpu field.
815 */
816 if (!p->array && !task_running(rq, p)) {
817 set_task_cpu(p, dest_cpu);
818 return 0;
819 }
820
821 init_completion(&req->done);
822 req->type = REQ_MOVE_TASK;
823 req->task = p;
824 req->dest_cpu = dest_cpu;
825 list_add(&req->list, &rq->migration_queue);
826 return 1;
827 }
828
829 /*
830 * wait_task_inactive - wait for a thread to unschedule.
831 *
832 * The caller must ensure that the task *will* unschedule sometime soon,
833 * else this function might spin for a *long* time. This function can't
834 * be called with interrupts off, or it may introduce deadlock with
835 * smp_call_function() if an IPI is sent by the same process we are
836 * waiting to become inactive.
837 */
838 void wait_task_inactive(task_t * p)
839 {
840 unsigned long flags;
841 runqueue_t *rq;
842 int preempted;
843
844 repeat:
845 rq = task_rq_lock(p, &flags);
846 /* Must be off runqueue entirely, not preempted. */
847 if (unlikely(p->array || task_running(rq, p))) {
848 /* If it's preempted, we yield. It could be a while. */
849 preempted = !task_running(rq, p);
850 task_rq_unlock(rq, &flags);
851 cpu_relax();
852 if (preempted)
853 yield();
854 goto repeat;
855 }
856 task_rq_unlock(rq, &flags);
857 }
858
859 /***
860 * kick_process - kick a running thread to enter/exit the kernel
861 * @p: the to-be-kicked thread
862 *
863 * Cause a process which is running on another CPU to enter
864 * kernel-mode, without any delay. (to get signals handled.)
865 *
866 * NOTE: this function doesnt have to take the runqueue lock,
867 * because all it wants to ensure is that the remote task enters
868 * the kernel. If the IPI races and the task has been migrated
869 * to another CPU then no harm is done and the purpose has been
870 * achieved as well.
871 */
872 void kick_process(task_t *p)
873 {
874 int cpu;
875
876 preempt_disable();
877 cpu = task_cpu(p);
878 if ((cpu != smp_processor_id()) && task_curr(p))
879 smp_send_reschedule(cpu);
880 preempt_enable();
881 }
882
883 /*
884 * Return a low guess at the load of a migration-source cpu.
885 *
886 * We want to under-estimate the load of migration sources, to
887 * balance conservatively.
888 */
889 static inline unsigned long source_load(int cpu, int type)
890 {
891 runqueue_t *rq = cpu_rq(cpu);
892 unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
893 if (type == 0)
894 return load_now;
895
896 return min(rq->cpu_load[type-1], load_now);
897 }
898
899 /*
900 * Return a high guess at the load of a migration-target cpu
901 */
902 static inline unsigned long target_load(int cpu, int type)
903 {
904 runqueue_t *rq = cpu_rq(cpu);
905 unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
906 if (type == 0)
907 return load_now;
908
909 return max(rq->cpu_load[type-1], load_now);
910 }
911
912 #endif
913
914 /*
915 * wake_idle() will wake a task on an idle cpu if task->cpu is
916 * not idle and an idle cpu is available. The span of cpus to
917 * search starts with cpus closest then further out as needed,
918 * so we always favor a closer, idle cpu.
919 *
920 * Returns the CPU we should wake onto.
921 */
922 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
923 static int wake_idle(int cpu, task_t *p)
924 {
925 cpumask_t tmp;
926 struct sched_domain *sd;
927 int i;
928
929 if (idle_cpu(cpu))
930 return cpu;
931
932 for_each_domain(cpu, sd) {
933 if (sd->flags & SD_WAKE_IDLE) {
934 cpus_and(tmp, sd->span, p->cpus_allowed);
935 for_each_cpu_mask(i, tmp) {
936 if (idle_cpu(i))
937 return i;
938 }
939 }
940 else
941 break;
942 }
943 return cpu;
944 }
945 #else
946 static inline int wake_idle(int cpu, task_t *p)
947 {
948 return cpu;
949 }
950 #endif
951
952 /***
953 * try_to_wake_up - wake up a thread
954 * @p: the to-be-woken-up thread
955 * @state: the mask of task states that can be woken
956 * @sync: do a synchronous wakeup?
957 *
958 * Put it on the run-queue if it's not already there. The "current"
959 * thread is always on the run-queue (except when the actual
960 * re-schedule is in progress), and as such you're allowed to do
961 * the simpler "current->state = TASK_RUNNING" to mark yourself
962 * runnable without the overhead of this.
963 *
964 * returns failure only if the task is already active.
965 */
966 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
967 {
968 int cpu, this_cpu, success = 0;
969 unsigned long flags;
970 long old_state;
971 runqueue_t *rq;
972 #ifdef CONFIG_SMP
973 unsigned long load, this_load;
974 struct sched_domain *sd, *this_sd = NULL;
975 int new_cpu;
976 #endif
977
978 rq = task_rq_lock(p, &flags);
979 old_state = p->state;
980 if (!(old_state & state))
981 goto out;
982
983 if (p->array)
984 goto out_running;
985
986 cpu = task_cpu(p);
987 this_cpu = smp_processor_id();
988
989 #ifdef CONFIG_SMP
990 if (unlikely(task_running(rq, p)))
991 goto out_activate;
992
993 new_cpu = cpu;
994
995 schedstat_inc(rq, ttwu_cnt);
996 if (cpu == this_cpu) {
997 schedstat_inc(rq, ttwu_local);
998 goto out_set_cpu;
999 }
1000
1001 for_each_domain(this_cpu, sd) {
1002 if (cpu_isset(cpu, sd->span)) {
1003 schedstat_inc(sd, ttwu_wake_remote);
1004 this_sd = sd;
1005 break;
1006 }
1007 }
1008
1009 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1010 goto out_set_cpu;
1011
1012 /*
1013 * Check for affine wakeup and passive balancing possibilities.
1014 */
1015 if (this_sd) {
1016 int idx = this_sd->wake_idx;
1017 unsigned int imbalance;
1018
1019 load = source_load(cpu, idx);
1020 this_load = target_load(this_cpu, idx);
1021
1022 /*
1023 * If sync wakeup then subtract the (maximum possible) effect of
1024 * the currently running task from the load of the current CPU:
1025 */
1026 if (sync)
1027 this_load -= SCHED_LOAD_SCALE;
1028
1029 /* Don't pull the task off an idle CPU to a busy one */
1030 if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
1031 goto out_set_cpu;
1032
1033 new_cpu = this_cpu; /* Wake to this CPU if we can */
1034
1035 if ((this_sd->flags & SD_WAKE_AFFINE) &&
1036 !task_hot(p, rq->timestamp_last_tick, this_sd)) {
1037 /*
1038 * This domain has SD_WAKE_AFFINE and p is cache cold
1039 * in this domain.
1040 */
1041 schedstat_inc(this_sd, ttwu_move_affine);
1042 goto out_set_cpu;
1043 } else if ((this_sd->flags & SD_WAKE_BALANCE) &&
1044 imbalance*this_load <= 100*load) {
1045 /*
1046 * This domain has SD_WAKE_BALANCE and there is
1047 * an imbalance.
1048 */
1049 schedstat_inc(this_sd, ttwu_move_balance);
1050 goto out_set_cpu;
1051 }
1052 }
1053
1054 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1055 out_set_cpu:
1056 new_cpu = wake_idle(new_cpu, p);
1057 if (new_cpu != cpu) {
1058 set_task_cpu(p, new_cpu);
1059 task_rq_unlock(rq, &flags);
1060 /* might preempt at this point */
1061 rq = task_rq_lock(p, &flags);
1062 old_state = p->state;
1063 if (!(old_state & state))
1064 goto out;
1065 if (p->array)
1066 goto out_running;
1067
1068 this_cpu = smp_processor_id();
1069 cpu = task_cpu(p);
1070 }
1071
1072 out_activate:
1073 #endif /* CONFIG_SMP */
1074 if (old_state == TASK_UNINTERRUPTIBLE) {
1075 rq->nr_uninterruptible--;
1076 /*
1077 * Tasks on involuntary sleep don't earn
1078 * sleep_avg beyond just interactive state.
1079 */
1080 p->activated = -1;
1081 }
1082
1083 /*
1084 * Sync wakeups (i.e. those types of wakeups where the waker
1085 * has indicated that it will leave the CPU in short order)
1086 * don't trigger a preemption, if the woken up task will run on
1087 * this cpu. (in this case the 'I will reschedule' promise of
1088 * the waker guarantees that the freshly woken up task is going
1089 * to be considered on this CPU.)
1090 */
1091 activate_task(p, rq, cpu == this_cpu);
1092 if (!sync || cpu != this_cpu) {
1093 if (TASK_PREEMPTS_CURR(p, rq))
1094 resched_task(rq->curr);
1095 }
1096 success = 1;
1097
1098 out_running:
1099 p->state = TASK_RUNNING;
1100 out:
1101 task_rq_unlock(rq, &flags);
1102
1103 return success;
1104 }
1105
1106 int fastcall wake_up_process(task_t * p)
1107 {
1108 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1109 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1110 }
1111
1112 EXPORT_SYMBOL(wake_up_process);
1113
1114 int fastcall wake_up_state(task_t *p, unsigned int state)
1115 {
1116 return try_to_wake_up(p, state, 0);
1117 }
1118
1119 #ifdef CONFIG_SMP
1120 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1121 struct sched_domain *sd);
1122 #endif
1123
1124 /*
1125 * Perform scheduler related setup for a newly forked process p.
1126 * p is forked by current.
1127 */
1128 void fastcall sched_fork(task_t *p)
1129 {
1130 /*
1131 * We mark the process as running here, but have not actually
1132 * inserted it onto the runqueue yet. This guarantees that
1133 * nobody will actually run it, and a signal or other external
1134 * event cannot wake it up and insert it on the runqueue either.
1135 */
1136 p->state = TASK_RUNNING;
1137 INIT_LIST_HEAD(&p->run_list);
1138 p->array = NULL;
1139 spin_lock_init(&p->switch_lock);
1140 #ifdef CONFIG_SCHEDSTATS
1141 memset(&p->sched_info, 0, sizeof(p->sched_info));
1142 #endif
1143 #ifdef CONFIG_PREEMPT
1144 /*
1145 * During context-switch we hold precisely one spinlock, which
1146 * schedule_tail drops. (in the common case it's this_rq()->lock,
1147 * but it also can be p->switch_lock.) So we compensate with a count
1148 * of 1. Also, we want to start with kernel preemption disabled.
1149 */
1150 p->thread_info->preempt_count = 1;
1151 #endif
1152 /*
1153 * Share the timeslice between parent and child, thus the
1154 * total amount of pending timeslices in the system doesn't change,
1155 * resulting in more scheduling fairness.
1156 */
1157 local_irq_disable();
1158 p->time_slice = (current->time_slice + 1) >> 1;
1159 /*
1160 * The remainder of the first timeslice might be recovered by
1161 * the parent if the child exits early enough.
1162 */
1163 p->first_time_slice = 1;
1164 current->time_slice >>= 1;
1165 p->timestamp = sched_clock();
1166 if (unlikely(!current->time_slice)) {
1167 /*
1168 * This case is rare, it happens when the parent has only
1169 * a single jiffy left from its timeslice. Taking the
1170 * runqueue lock is not a problem.
1171 */
1172 current->time_slice = 1;
1173 preempt_disable();
1174 scheduler_tick();
1175 local_irq_enable();
1176 preempt_enable();
1177 } else
1178 local_irq_enable();
1179 }
1180
1181 /*
1182 * wake_up_new_task - wake up a newly created task for the first time.
1183 *
1184 * This function will do some initial scheduler statistics housekeeping
1185 * that must be done for every newly created context, then puts the task
1186 * on the runqueue and wakes it.
1187 */
1188 void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
1189 {
1190 unsigned long flags;
1191 int this_cpu, cpu;
1192 runqueue_t *rq, *this_rq;
1193
1194 rq = task_rq_lock(p, &flags);
1195 cpu = task_cpu(p);
1196 this_cpu = smp_processor_id();
1197
1198 BUG_ON(p->state != TASK_RUNNING);
1199
1200 /*
1201 * We decrease the sleep average of forking parents
1202 * and children as well, to keep max-interactive tasks
1203 * from forking tasks that are max-interactive. The parent
1204 * (current) is done further down, under its lock.
1205 */
1206 p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
1207 CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1208
1209 p->prio = effective_prio(p);
1210
1211 if (likely(cpu == this_cpu)) {
1212 if (!(clone_flags & CLONE_VM)) {
1213 /*
1214 * The VM isn't cloned, so we're in a good position to
1215 * do child-runs-first in anticipation of an exec. This
1216 * usually avoids a lot of COW overhead.
1217 */
1218 if (unlikely(!current->array))
1219 __activate_task(p, rq);
1220 else {
1221 p->prio = current->prio;
1222 list_add_tail(&p->run_list, &current->run_list);
1223 p->array = current->array;
1224 p->array->nr_active++;
1225 rq->nr_running++;
1226 }
1227 set_need_resched();
1228 } else
1229 /* Run child last */
1230 __activate_task(p, rq);
1231 /*
1232 * We skip the following code due to cpu == this_cpu
1233 *
1234 * task_rq_unlock(rq, &flags);
1235 * this_rq = task_rq_lock(current, &flags);
1236 */
1237 this_rq = rq;
1238 } else {
1239 this_rq = cpu_rq(this_cpu);
1240
1241 /*
1242 * Not the local CPU - must adjust timestamp. This should
1243 * get optimised away in the !CONFIG_SMP case.
1244 */
1245 p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
1246 + rq->timestamp_last_tick;
1247 __activate_task(p, rq);
1248 if (TASK_PREEMPTS_CURR(p, rq))
1249 resched_task(rq->curr);
1250
1251 /*
1252 * Parent and child are on different CPUs, now get the
1253 * parent runqueue to update the parent's ->sleep_avg:
1254 */
1255 task_rq_unlock(rq, &flags);
1256 this_rq = task_rq_lock(current, &flags);
1257 }
1258 current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
1259 PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1260 task_rq_unlock(this_rq, &flags);
1261 }
1262
1263 /*
1264 * Potentially available exiting-child timeslices are
1265 * retrieved here - this way the parent does not get
1266 * penalized for creating too many threads.
1267 *
1268 * (this cannot be used to 'generate' timeslices
1269 * artificially, because any timeslice recovered here
1270 * was given away by the parent in the first place.)
1271 */
1272 void fastcall sched_exit(task_t * p)
1273 {
1274 unsigned long flags;
1275 runqueue_t *rq;
1276
1277 /*
1278 * If the child was a (relative-) CPU hog then decrease
1279 * the sleep_avg of the parent as well.
1280 */
1281 rq = task_rq_lock(p->parent, &flags);
1282 if (p->first_time_slice) {
1283 p->parent->time_slice += p->time_slice;
1284 if (unlikely(p->parent->time_slice > task_timeslice(p)))
1285 p->parent->time_slice = task_timeslice(p);
1286 }
1287 if (p->sleep_avg < p->parent->sleep_avg)
1288 p->parent->sleep_avg = p->parent->sleep_avg /
1289 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
1290 (EXIT_WEIGHT + 1);
1291 task_rq_unlock(rq, &flags);
1292 }
1293
1294 /**
1295 * finish_task_switch - clean up after a task-switch
1296 * @prev: the thread we just switched away from.
1297 *
1298 * We enter this with the runqueue still locked, and finish_arch_switch()
1299 * will unlock it along with doing any other architecture-specific cleanup
1300 * actions.
1301 *
1302 * Note that we may have delayed dropping an mm in context_switch(). If
1303 * so, we finish that here outside of the runqueue lock. (Doing it
1304 * with the lock held can cause deadlocks; see schedule() for
1305 * details.)
1306 */
1307 static inline void finish_task_switch(task_t *prev)
1308 __releases(rq->lock)
1309 {
1310 runqueue_t *rq = this_rq();
1311 struct mm_struct *mm = rq->prev_mm;
1312 unsigned long prev_task_flags;
1313
1314 rq->prev_mm = NULL;
1315
1316 /*
1317 * A task struct has one reference for the use as "current".
1318 * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
1319 * calls schedule one last time. The schedule call will never return,
1320 * and the scheduled task must drop that reference.
1321 * The test for EXIT_ZOMBIE must occur while the runqueue locks are
1322 * still held, otherwise prev could be scheduled on another cpu, die
1323 * there before we look at prev->state, and then the reference would
1324 * be dropped twice.
1325 * Manfred Spraul <manfred@colorfullife.com>
1326 */
1327 prev_task_flags = prev->flags;
1328 finish_arch_switch(rq, prev);
1329 if (mm)
1330 mmdrop(mm);
1331 if (unlikely(prev_task_flags & PF_DEAD))
1332 put_task_struct(prev);
1333 }
1334
1335 /**
1336 * schedule_tail - first thing a freshly forked thread must call.
1337 * @prev: the thread we just switched away from.
1338 */
1339 asmlinkage void schedule_tail(task_t *prev)
1340 __releases(rq->lock)
1341 {
1342 finish_task_switch(prev);
1343
1344 if (current->set_child_tid)
1345 put_user(current->pid, current->set_child_tid);
1346 }
1347
1348 /*
1349 * context_switch - switch to the new MM and the new
1350 * thread's register state.
1351 */
1352 static inline
1353 task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
1354 {
1355 struct mm_struct *mm = next->mm;
1356 struct mm_struct *oldmm = prev->active_mm;
1357
1358 if (unlikely(!mm)) {
1359 next->active_mm = oldmm;
1360 atomic_inc(&oldmm->mm_count);
1361 enter_lazy_tlb(oldmm, next);
1362 } else
1363 switch_mm(oldmm, mm, next);
1364
1365 if (unlikely(!prev->mm)) {
1366 prev->active_mm = NULL;
1367 WARN_ON(rq->prev_mm);
1368 rq->prev_mm = oldmm;
1369 }
1370
1371 /* Here we just switch the register state and the stack. */
1372 switch_to(prev, next, prev);
1373
1374 return prev;
1375 }
1376
1377 /*
1378 * nr_running, nr_uninterruptible and nr_context_switches:
1379 *
1380 * externally visible scheduler statistics: current number of runnable
1381 * threads, current number of uninterruptible-sleeping threads, total
1382 * number of context switches performed since bootup.
1383 */
1384 unsigned long nr_running(void)
1385 {
1386 unsigned long i, sum = 0;
1387
1388 for_each_online_cpu(i)
1389 sum += cpu_rq(i)->nr_running;
1390
1391 return sum;
1392 }
1393
1394 unsigned long nr_uninterruptible(void)
1395 {
1396 unsigned long i, sum = 0;
1397
1398 for_each_cpu(i)
1399 sum += cpu_rq(i)->nr_uninterruptible;
1400
1401 /*
1402 * Since we read the counters lockless, it might be slightly
1403 * inaccurate. Do not allow it to go below zero though:
1404 */
1405 if (unlikely((long)sum < 0))
1406 sum = 0;
1407
1408 return sum;
1409 }
1410
1411 unsigned long long nr_context_switches(void)
1412 {
1413 unsigned long long i, sum = 0;
1414
1415 for_each_cpu(i)
1416 sum += cpu_rq(i)->nr_switches;
1417
1418 return sum;
1419 }
1420
1421 unsigned long nr_iowait(void)
1422 {
1423 unsigned long i, sum = 0;
1424
1425 for_each_cpu(i)
1426 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1427
1428 return sum;
1429 }
1430
1431 #ifdef CONFIG_SMP
1432
1433 /*
1434 * double_rq_lock - safely lock two runqueues
1435 *
1436 * Note this does not disable interrupts like task_rq_lock,
1437 * you need to do so manually before calling.
1438 */
1439 static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1440 __acquires(rq1->lock)
1441 __acquires(rq2->lock)
1442 {
1443 if (rq1 == rq2) {
1444 spin_lock(&rq1->lock);
1445 __acquire(rq2->lock); /* Fake it out ;) */
1446 } else {
1447 if (rq1 < rq2) {
1448 spin_lock(&rq1->lock);
1449 spin_lock(&rq2->lock);
1450 } else {
1451 spin_lock(&rq2->lock);
1452 spin_lock(&rq1->lock);
1453 }
1454 }
1455 }
1456
1457 /*
1458 * double_rq_unlock - safely unlock two runqueues
1459 *
1460 * Note this does not restore interrupts like task_rq_unlock,
1461 * you need to do so manually after calling.
1462 */
1463 static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1464 __releases(rq1->lock)
1465 __releases(rq2->lock)
1466 {
1467 spin_unlock(&rq1->lock);
1468 if (rq1 != rq2)
1469 spin_unlock(&rq2->lock);
1470 else
1471 __release(rq2->lock);
1472 }
1473
1474 /*
1475 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1476 */
1477 static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1478 __releases(this_rq->lock)
1479 __acquires(busiest->lock)
1480 __acquires(this_rq->lock)
1481 {
1482 if (unlikely(!spin_trylock(&busiest->lock))) {
1483 if (busiest < this_rq) {
1484 spin_unlock(&this_rq->lock);
1485 spin_lock(&busiest->lock);
1486 spin_lock(&this_rq->lock);
1487 } else
1488 spin_lock(&busiest->lock);
1489 }
1490 }
1491
1492 /*
1493 * find_idlest_cpu - find the least busy runqueue.
1494 */
1495 static int find_idlest_cpu(struct task_struct *p, int this_cpu,
1496 struct sched_domain *sd)
1497 {
1498 unsigned long load, min_load, this_load;
1499 int i, min_cpu;
1500 cpumask_t mask;
1501
1502 min_cpu = UINT_MAX;
1503 min_load = ULONG_MAX;
1504
1505 cpus_and(mask, sd->span, p->cpus_allowed);
1506
1507 for_each_cpu_mask(i, mask) {
1508 load = target_load(i, sd->wake_idx);
1509
1510 if (load < min_load) {
1511 min_cpu = i;
1512 min_load = load;
1513
1514 /* break out early on an idle CPU: */
1515 if (!min_load)
1516 break;
1517 }
1518 }
1519
1520 /* add +1 to account for the new task */
1521 this_load = source_load(this_cpu, sd->wake_idx) + SCHED_LOAD_SCALE;
1522
1523 /*
1524 * Would with the addition of the new task to the
1525 * current CPU there be an imbalance between this
1526 * CPU and the idlest CPU?
1527 *
1528 * Use half of the balancing threshold - new-context is
1529 * a good opportunity to balance.
1530 */
1531 if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100)
1532 return min_cpu;
1533
1534 return this_cpu;
1535 }
1536
1537 /*
1538 * If dest_cpu is allowed for this process, migrate the task to it.
1539 * This is accomplished by forcing the cpu_allowed mask to only
1540 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
1541 * the cpu_allowed mask is restored.
1542 */
1543 static void sched_migrate_task(task_t *p, int dest_cpu)
1544 {
1545 migration_req_t req;
1546 runqueue_t *rq;
1547 unsigned long flags;
1548
1549 rq = task_rq_lock(p, &flags);
1550 if (!cpu_isset(dest_cpu, p->cpus_allowed)
1551 || unlikely(cpu_is_offline(dest_cpu)))
1552 goto out;
1553
1554 /* force the process onto the specified CPU */
1555 if (migrate_task(p, dest_cpu, &req)) {
1556 /* Need to wait for migration thread (might exit: take ref). */
1557 struct task_struct *mt = rq->migration_thread;
1558 get_task_struct(mt);
1559 task_rq_unlock(rq, &flags);
1560 wake_up_process(mt);
1561 put_task_struct(mt);
1562 wait_for_completion(&req.done);
1563 return;
1564 }
1565 out:
1566 task_rq_unlock(rq, &flags);
1567 }
1568
1569 /*
1570 * sched_exec(): find the highest-level, exec-balance-capable
1571 * domain and try to migrate the task to the least loaded CPU.
1572 *
1573 * execve() is a valuable balancing opportunity, because at this point
1574 * the task has the smallest effective memory and cache footprint.
1575 */
1576 void sched_exec(void)
1577 {
1578 struct sched_domain *tmp, *sd = NULL;
1579 int new_cpu, this_cpu = get_cpu();
1580
1581 /* Prefer the current CPU if there's only this task running */
1582 if (this_rq()->nr_running <= 1)
1583 goto out;
1584
1585 for_each_domain(this_cpu, tmp)
1586 if (tmp->flags & SD_BALANCE_EXEC)
1587 sd = tmp;
1588
1589 if (sd) {
1590 schedstat_inc(sd, sbe_attempts);
1591 new_cpu = find_idlest_cpu(current, this_cpu, sd);
1592 if (new_cpu != this_cpu) {
1593 schedstat_inc(sd, sbe_pushed);
1594 put_cpu();
1595 sched_migrate_task(current, new_cpu);
1596 return;
1597 }
1598 }
1599 out:
1600 put_cpu();
1601 }
1602
1603 /*
1604 * pull_task - move a task from a remote runqueue to the local runqueue.
1605 * Both runqueues must be locked.
1606 */
1607 static inline
1608 void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1609 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1610 {
1611 dequeue_task(p, src_array);
1612 src_rq->nr_running--;
1613 set_task_cpu(p, this_cpu);
1614 this_rq->nr_running++;
1615 enqueue_task(p, this_array);
1616 p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
1617 + this_rq->timestamp_last_tick;
1618 /*
1619 * Note that idle threads have a prio of MAX_PRIO, for this test
1620 * to be always true for them.
1621 */
1622 if (TASK_PREEMPTS_CURR(p, this_rq))
1623 resched_task(this_rq->curr);
1624 }
1625
1626 /*
1627 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1628 */
1629 static inline
1630 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1631 struct sched_domain *sd, enum idle_type idle, int *all_pinned)
1632 {
1633 /*
1634 * We do not migrate tasks that are:
1635 * 1) running (obviously), or
1636 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1637 * 3) are cache-hot on their current CPU.
1638 */
1639 if (!cpu_isset(this_cpu, p->cpus_allowed))
1640 return 0;
1641 *all_pinned = 0;
1642
1643 if (task_running(rq, p))
1644 return 0;
1645
1646 /*
1647 * Aggressive migration if:
1648 * 1) the [whole] cpu is idle, or
1649 * 2) too many balance attempts have failed.
1650 */
1651
1652 if (cpu_and_siblings_are_idle(this_cpu) || \
1653 sd->nr_balance_failed > sd->cache_nice_tries)
1654 return 1;
1655
1656 if (task_hot(p, rq->timestamp_last_tick, sd))
1657 return 0;
1658 return 1;
1659 }
1660
1661 /*
1662 * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
1663 * as part of a balancing operation within "domain". Returns the number of
1664 * tasks moved.
1665 *
1666 * Called with both runqueues locked.
1667 */
1668 static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1669 unsigned long max_nr_move, struct sched_domain *sd,
1670 enum idle_type idle, int *all_pinned)
1671 {
1672 prio_array_t *array, *dst_array;
1673 struct list_head *head, *curr;
1674 int idx, pulled = 0, pinned = 0;
1675 task_t *tmp;
1676
1677 if (max_nr_move == 0)
1678 goto out;
1679
1680 pinned = 1;
1681
1682 /*
1683 * We first consider expired tasks. Those will likely not be
1684 * executed in the near future, and they are most likely to
1685 * be cache-cold, thus switching CPUs has the least effect
1686 * on them.
1687 */
1688 if (busiest->expired->nr_active) {
1689 array = busiest->expired;
1690 dst_array = this_rq->expired;
1691 } else {
1692 array = busiest->active;
1693 dst_array = this_rq->active;
1694 }
1695
1696 new_array:
1697 /* Start searching at priority 0: */
1698 idx = 0;
1699 skip_bitmap:
1700 if (!idx)
1701 idx = sched_find_first_bit(array->bitmap);
1702 else
1703 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1704 if (idx >= MAX_PRIO) {
1705 if (array == busiest->expired && busiest->active->nr_active) {
1706 array = busiest->active;
1707 dst_array = this_rq->active;
1708 goto new_array;
1709 }
1710 goto out;
1711 }
1712
1713 head = array->queue + idx;
1714 curr = head->prev;
1715 skip_queue:
1716 tmp = list_entry(curr, task_t, run_list);
1717
1718 curr = curr->prev;
1719
1720 if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
1721 if (curr != head)
1722 goto skip_queue;
1723 idx++;
1724 goto skip_bitmap;
1725 }
1726
1727 #ifdef CONFIG_SCHEDSTATS
1728 if (task_hot(tmp, busiest->timestamp_last_tick, sd))
1729 schedstat_inc(sd, lb_hot_gained[idle]);
1730 #endif
1731
1732 pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
1733 pulled++;
1734
1735 /* We only want to steal up to the prescribed number of tasks. */
1736 if (pulled < max_nr_move) {
1737 if (curr != head)
1738 goto skip_queue;
1739 idx++;
1740 goto skip_bitmap;
1741 }
1742 out:
1743 /*
1744 * Right now, this is the only place pull_task() is called,
1745 * so we can safely collect pull_task() stats here rather than
1746 * inside pull_task().
1747 */
1748 schedstat_add(sd, lb_gained[idle], pulled);
1749
1750 if (all_pinned)
1751 *all_pinned = pinned;
1752 return pulled;
1753 }
1754
1755 /*
1756 * find_busiest_group finds and returns the busiest CPU group within the
1757 * domain. It calculates and returns the number of tasks which should be
1758 * moved to restore balance via the imbalance parameter.
1759 */
1760 static struct sched_group *
1761 find_busiest_group(struct sched_domain *sd, int this_cpu,
1762 unsigned long *imbalance, enum idle_type idle)
1763 {
1764 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
1765 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
1766 int load_idx;
1767
1768 max_load = this_load = total_load = total_pwr = 0;
1769 if (idle == NOT_IDLE)
1770 load_idx = sd->busy_idx;
1771 else if (idle == NEWLY_IDLE)
1772 load_idx = sd->newidle_idx;
1773 else
1774 load_idx = sd->idle_idx;
1775
1776 do {
1777 unsigned long load;
1778 int local_group;
1779 int i;
1780
1781 local_group = cpu_isset(this_cpu, group->cpumask);
1782
1783 /* Tally up the load of all CPUs in the group */
1784 avg_load = 0;
1785
1786 for_each_cpu_mask(i, group->cpumask) {
1787 /* Bias balancing toward cpus of our domain */
1788 if (local_group)
1789 load = target_load(i, load_idx);
1790 else
1791 load = source_load(i, load_idx);
1792
1793 avg_load += load;
1794 }
1795
1796 total_load += avg_load;
1797 total_pwr += group->cpu_power;
1798
1799 /* Adjust by relative CPU power of the group */
1800 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1801
1802 if (local_group) {
1803 this_load = avg_load;
1804 this = group;
1805 goto nextgroup;
1806 } else if (avg_load > max_load) {
1807 max_load = avg_load;
1808 busiest = group;
1809 }
1810 nextgroup:
1811 group = group->next;
1812 } while (group != sd->groups);
1813
1814 if (!busiest || this_load >= max_load)
1815 goto out_balanced;
1816
1817 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
1818
1819 if (this_load >= avg_load ||
1820 100*max_load <= sd->imbalance_pct*this_load)
1821 goto out_balanced;
1822
1823 /*
1824 * We're trying to get all the cpus to the average_load, so we don't
1825 * want to push ourselves above the average load, nor do we wish to
1826 * reduce the max loaded cpu below the average load, as either of these
1827 * actions would just result in more rebalancing later, and ping-pong
1828 * tasks around. Thus we look for the minimum possible imbalance.
1829 * Negative imbalances (*we* are more loaded than anyone else) will
1830 * be counted as no imbalance for these purposes -- we can't fix that
1831 * by pulling tasks to us. Be careful of negative numbers as they'll
1832 * appear as very large values with unsigned longs.
1833 */
1834 /* How much load to actually move to equalise the imbalance */
1835 *imbalance = min((max_load - avg_load) * busiest->cpu_power,
1836 (avg_load - this_load) * this->cpu_power)
1837 / SCHED_LOAD_SCALE;
1838
1839 if (*imbalance < SCHED_LOAD_SCALE) {
1840 unsigned long pwr_now = 0, pwr_move = 0;
1841 unsigned long tmp;
1842
1843 if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
1844 *imbalance = 1;
1845 return busiest;
1846 }
1847
1848 /*
1849 * OK, we don't have enough imbalance to justify moving tasks,
1850 * however we may be able to increase total CPU power used by
1851 * moving them.
1852 */
1853
1854 pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
1855 pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
1856 pwr_now /= SCHED_LOAD_SCALE;
1857
1858 /* Amount of load we'd subtract */
1859 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
1860 if (max_load > tmp)
1861 pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
1862 max_load - tmp);
1863
1864 /* Amount of load we'd add */
1865 if (max_load*busiest->cpu_power <
1866 SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
1867 tmp = max_load*busiest->cpu_power/this->cpu_power;
1868 else
1869 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
1870 pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
1871 pwr_move /= SCHED_LOAD_SCALE;
1872
1873 /* Move if we gain throughput */
1874 if (pwr_move <= pwr_now)
1875 goto out_balanced;
1876
1877 *imbalance = 1;
1878 return busiest;
1879 }
1880
1881 /* Get rid of the scaling factor, rounding down as we divide */
1882 *imbalance = *imbalance / SCHED_LOAD_SCALE;
1883 return busiest;
1884
1885 out_balanced:
1886
1887 *imbalance = 0;
1888 return NULL;
1889 }
1890
1891 /*
1892 * find_busiest_queue - find the busiest runqueue among the cpus in group.
1893 */
1894 static runqueue_t *find_busiest_queue(struct sched_group *group)
1895 {
1896 unsigned long load, max_load = 0;
1897 runqueue_t *busiest = NULL;
1898 int i;
1899
1900 for_each_cpu_mask(i, group->cpumask) {
1901 load = source_load(i, 0);
1902
1903 if (load > max_load) {
1904 max_load = load;
1905 busiest = cpu_rq(i);
1906 }
1907 }
1908
1909 return busiest;
1910 }
1911
1912 /*
1913 * Check this_cpu to ensure it is balanced within domain. Attempt to move
1914 * tasks if there is an imbalance.
1915 *
1916 * Called with this_rq unlocked.
1917 */
1918 static int load_balance(int this_cpu, runqueue_t *this_rq,
1919 struct sched_domain *sd, enum idle_type idle)
1920 {
1921 struct sched_group *group;
1922 runqueue_t *busiest;
1923 unsigned long imbalance;
1924 int nr_moved, all_pinned;
1925 int active_balance = 0;
1926
1927 spin_lock(&this_rq->lock);
1928 schedstat_inc(sd, lb_cnt[idle]);
1929
1930 group = find_busiest_group(sd, this_cpu, &imbalance, idle);
1931 if (!group) {
1932 schedstat_inc(sd, lb_nobusyg[idle]);
1933 goto out_balanced;
1934 }
1935
1936 busiest = find_busiest_queue(group);
1937 if (!busiest) {
1938 schedstat_inc(sd, lb_nobusyq[idle]);
1939 goto out_balanced;
1940 }
1941
1942 BUG_ON(busiest == this_rq);
1943
1944 schedstat_add(sd, lb_imbalance[idle], imbalance);
1945
1946 nr_moved = 0;
1947 if (busiest->nr_running > 1) {
1948 /*
1949 * Attempt to move tasks. If find_busiest_group has found
1950 * an imbalance but busiest->nr_running <= 1, the group is
1951 * still unbalanced. nr_moved simply stays zero, so it is
1952 * correctly treated as an imbalance.
1953 */
1954 double_lock_balance(this_rq, busiest);
1955 nr_moved = move_tasks(this_rq, this_cpu, busiest,
1956 imbalance, sd, idle,
1957 &all_pinned);
1958 spin_unlock(&busiest->lock);
1959
1960 /* All tasks on this runqueue were pinned by CPU affinity */
1961 if (unlikely(all_pinned))
1962 goto out_balanced;
1963 }
1964
1965 spin_unlock(&this_rq->lock);
1966
1967 if (!nr_moved) {
1968 schedstat_inc(sd, lb_failed[idle]);
1969 sd->nr_balance_failed++;
1970
1971 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
1972
1973 spin_lock(&busiest->lock);
1974 if (!busiest->active_balance) {
1975 busiest->active_balance = 1;
1976 busiest->push_cpu = this_cpu;
1977 active_balance = 1;
1978 }
1979 spin_unlock(&busiest->lock);
1980 if (active_balance)
1981 wake_up_process(busiest->migration_thread);
1982
1983 /*
1984 * We've kicked active balancing, reset the failure
1985 * counter.
1986 */
1987 sd->nr_balance_failed = sd->cache_nice_tries+1;
1988 }
1989 } else
1990 sd->nr_balance_failed = 0;
1991
1992 if (likely(!active_balance)) {
1993 /* We were unbalanced, so reset the balancing interval */
1994 sd->balance_interval = sd->min_interval;
1995 } else {
1996 /*
1997 * If we've begun active balancing, start to back off. This
1998 * case may not be covered by the all_pinned logic if there
1999 * is only 1 task on the busy runqueue (because we don't call
2000 * move_tasks).
2001 */
2002 if (sd->balance_interval < sd->max_interval)
2003 sd->balance_interval *= 2;
2004 }
2005
2006 return nr_moved;
2007
2008 out_balanced:
2009 spin_unlock(&this_rq->lock);
2010
2011 schedstat_inc(sd, lb_balanced[idle]);
2012
2013 sd->nr_balance_failed = 0;
2014 /* tune up the balancing interval */
2015 if (sd->balance_interval < sd->max_interval)
2016 sd->balance_interval *= 2;
2017
2018 return 0;
2019 }
2020
2021 /*
2022 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2023 * tasks if there is an imbalance.
2024 *
2025 * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
2026 * this_rq is locked.
2027 */
2028 static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2029 struct sched_domain *sd)
2030 {
2031 struct sched_group *group;
2032 runqueue_t *busiest = NULL;
2033 unsigned long imbalance;
2034 int nr_moved = 0;
2035
2036 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2037 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
2038 if (!group) {
2039 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2040 goto out_balanced;
2041 }
2042
2043 busiest = find_busiest_queue(group);
2044 if (!busiest) {
2045 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2046 goto out_balanced;
2047 }
2048
2049 BUG_ON(busiest == this_rq);
2050
2051 /* Attempt to move tasks */
2052 double_lock_balance(this_rq, busiest);
2053
2054 schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
2055 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2056 imbalance, sd, NEWLY_IDLE, NULL);
2057 if (!nr_moved)
2058 schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2059 else
2060 sd->nr_balance_failed = 0;
2061
2062 spin_unlock(&busiest->lock);
2063 return nr_moved;
2064
2065 out_balanced:
2066 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2067 sd->nr_balance_failed = 0;
2068 return 0;
2069 }
2070
2071 /*
2072 * idle_balance is called by schedule() if this_cpu is about to become
2073 * idle. Attempts to pull tasks from other CPUs.
2074 */
2075 static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
2076 {
2077 struct sched_domain *sd;
2078
2079 for_each_domain(this_cpu, sd) {
2080 if (sd->flags & SD_BALANCE_NEWIDLE) {
2081 if (load_balance_newidle(this_cpu, this_rq, sd)) {
2082 /* We've pulled tasks over so stop searching */
2083 break;
2084 }
2085 }
2086 }
2087 }
2088
2089 /*
2090 * active_load_balance is run by migration threads. It pushes running tasks
2091 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
2092 * running on each physical CPU where possible, and avoids physical /
2093 * logical imbalances.
2094 *
2095 * Called with busiest_rq locked.
2096 */
2097 static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
2098 {
2099 struct sched_domain *sd;
2100 runqueue_t *target_rq;
2101 int target_cpu = busiest_rq->push_cpu;
2102
2103 if (busiest_rq->nr_running <= 1)
2104 /* no task to move */
2105 return;
2106
2107 target_rq = cpu_rq(target_cpu);
2108
2109 /*
2110 * This condition is "impossible", if it occurs
2111 * we need to fix it. Originally reported by
2112 * Bjorn Helgaas on a 128-cpu setup.
2113 */
2114 BUG_ON(busiest_rq == target_rq);
2115
2116 /* move a task from busiest_rq to target_rq */
2117 double_lock_balance(busiest_rq, target_rq);
2118
2119 /* Search for an sd spanning us and the target CPU. */
2120 for_each_domain(target_cpu, sd)
2121 if ((sd->flags & SD_LOAD_BALANCE) &&
2122 cpu_isset(busiest_cpu, sd->span))
2123 break;
2124
2125 if (unlikely(sd == NULL))
2126 goto out;
2127
2128 schedstat_inc(sd, alb_cnt);
2129
2130 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
2131 schedstat_inc(sd, alb_pushed);
2132 else
2133 schedstat_inc(sd, alb_failed);
2134 out:
2135 spin_unlock(&target_rq->lock);
2136 }
2137
2138 /*
2139 * rebalance_tick will get called every timer tick, on every CPU.
2140 *
2141 * It checks each scheduling domain to see if it is due to be balanced,
2142 * and initiates a balancing operation if so.
2143 *
2144 * Balancing parameters are set up in arch_init_sched_domains.
2145 */
2146
2147 /* Don't have all balancing operations going off at once */
2148 #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS)
2149
2150 static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2151 enum idle_type idle)
2152 {
2153 unsigned long old_load, this_load;
2154 unsigned long j = jiffies + CPU_OFFSET(this_cpu);
2155 struct sched_domain *sd;
2156 int i;
2157
2158 this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
2159 /* Update our load */
2160 for (i = 0; i < 3; i++) {
2161 unsigned long new_load = this_load;
2162 int scale = 1 << i;
2163 old_load = this_rq->cpu_load[i];
2164 /*
2165 * Round up the averaging division if load is increasing. This
2166 * prevents us from getting stuck on 9 if the load is 10, for
2167 * example.
2168 */
2169 if (new_load > old_load)
2170 new_load += scale-1;
2171 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
2172 }
2173
2174 for_each_domain(this_cpu, sd) {
2175 unsigned long interval;
2176
2177 if (!(sd->flags & SD_LOAD_BALANCE))
2178 continue;
2179
2180 interval = sd->balance_interval;
2181 if (idle != SCHED_IDLE)
2182 interval *= sd->busy_factor;
2183
2184 /* scale ms to jiffies */
2185 interval = msecs_to_jiffies(interval);
2186 if (unlikely(!interval))
2187 interval = 1;
2188
2189 if (j - sd->last_balance >= interval) {
2190 if (load_balance(this_cpu, this_rq, sd, idle)) {
2191 /* We've pulled tasks over so no longer idle */
2192 idle = NOT_IDLE;
2193 }
2194 sd->last_balance += interval;
2195 }
2196 }
2197 }
2198 #else
2199 /*
2200 * on UP we do not need to balance between CPUs:
2201 */
2202 static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
2203 {
2204 }
2205 static inline void idle_balance(int cpu, runqueue_t *rq)
2206 {
2207 }
2208 #endif
2209
2210 static inline int wake_priority_sleeper(runqueue_t *rq)
2211 {
2212 int ret = 0;
2213 #ifdef CONFIG_SCHED_SMT
2214 spin_lock(&rq->lock);
2215 /*
2216 * If an SMT sibling task has been put to sleep for priority
2217 * reasons reschedule the idle task to see if it can now run.
2218 */
2219 if (rq->nr_running) {
2220 resched_task(rq->idle);
2221 ret = 1;
2222 }
2223 spin_unlock(&rq->lock);
2224 #endif
2225 return ret;
2226 }
2227
2228 DEFINE_PER_CPU(struct kernel_stat, kstat);
2229
2230 EXPORT_PER_CPU_SYMBOL(kstat);
2231
2232 /*
2233 * This is called on clock ticks and on context switches.
2234 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2235 */
2236 static inline void update_cpu_clock(task_t *p, runqueue_t *rq,
2237 unsigned long long now)
2238 {
2239 unsigned long long last = max(p->timestamp, rq->timestamp_last_tick);
2240 p->sched_time += now - last;
2241 }
2242
2243 /*
2244 * Return current->sched_time plus any more ns on the sched_clock
2245 * that have not yet been banked.
2246 */
2247 unsigned long long current_sched_time(const task_t *tsk)
2248 {
2249 unsigned long long ns;
2250 unsigned long flags;
2251 local_irq_save(flags);
2252 ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick);
2253 ns = tsk->sched_time + (sched_clock() - ns);
2254 local_irq_restore(flags);
2255 return ns;
2256 }
2257
2258 /*
2259 * We place interactive tasks back into the active array, if possible.
2260 *
2261 * To guarantee that this does not starve expired tasks we ignore the
2262 * interactivity of a task if the first expired task had to wait more
2263 * than a 'reasonable' amount of time. This deadline timeout is
2264 * load-dependent, as the frequency of array switched decreases with
2265 * increasing number of running tasks. We also ignore the interactivity
2266 * if a better static_prio task has expired:
2267 */
2268 #define EXPIRED_STARVING(rq) \
2269 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2270 (jiffies - (rq)->expired_timestamp >= \
2271 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2272 ((rq)->curr->static_prio > (rq)->best_expired_prio))
2273
2274 /*
2275 * Account user cpu time to a process.
2276 * @p: the process that the cpu time gets accounted to
2277 * @hardirq_offset: the offset to subtract from hardirq_count()
2278 * @cputime: the cpu time spent in user space since the last update
2279 */
2280 void account_user_time(struct task_struct *p, cputime_t cputime)
2281 {
2282 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2283 cputime64_t tmp;
2284
2285 p->utime = cputime_add(p->utime, cputime);
2286
2287 /* Add user time to cpustat. */
2288 tmp = cputime_to_cputime64(cputime);
2289 if (TASK_NICE(p) > 0)
2290 cpustat->nice = cputime64_add(cpustat->nice, tmp);
2291 else
2292 cpustat->user = cputime64_add(cpustat->user, tmp);
2293 }
2294
2295 /*
2296 * Account system cpu time to a process.
2297 * @p: the process that the cpu time gets accounted to
2298 * @hardirq_offset: the offset to subtract from hardirq_count()
2299 * @cputime: the cpu time spent in kernel space since the last update
2300 */
2301 void account_system_time(struct task_struct *p, int hardirq_offset,
2302 cputime_t cputime)
2303 {
2304 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2305 runqueue_t *rq = this_rq();
2306 cputime64_t tmp;
2307
2308 p->stime = cputime_add(p->stime, cputime);
2309
2310 /* Add system time to cpustat. */
2311 tmp = cputime_to_cputime64(cputime);
2312 if (hardirq_count() - hardirq_offset)
2313 cpustat->irq = cputime64_add(cpustat->irq, tmp);
2314 else if (softirq_count())
2315 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
2316 else if (p != rq->idle)
2317 cpustat->system = cputime64_add(cpustat->system, tmp);
2318 else if (atomic_read(&rq->nr_iowait) > 0)
2319 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
2320 else
2321 cpustat->idle = cputime64_add(cpustat->idle, tmp);
2322 /* Account for system time used */
2323 acct_update_integrals(p);
2324 /* Update rss highwater mark */
2325 update_mem_hiwater(p);
2326 }
2327
2328 /*
2329 * Account for involuntary wait time.
2330 * @p: the process from which the cpu time has been stolen
2331 * @steal: the cpu time spent in involuntary wait
2332 */
2333 void account_steal_time(struct task_struct *p, cputime_t steal)
2334 {
2335 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2336 cputime64_t tmp = cputime_to_cputime64(steal);
2337 runqueue_t *rq = this_rq();
2338
2339 if (p == rq->idle) {
2340 p->stime = cputime_add(p->stime, steal);
2341 if (atomic_read(&rq->nr_iowait) > 0)
2342 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
2343 else
2344 cpustat->idle = cputime64_add(cpustat->idle, tmp);
2345 } else
2346 cpustat->steal = cputime64_add(cpustat->steal, tmp);
2347 }
2348
2349 /*
2350 * This function gets called by the timer code, with HZ frequency.
2351 * We call it with interrupts disabled.
2352 *
2353 * It also gets called by the fork code, when changing the parent's
2354 * timeslices.
2355 */
2356 void scheduler_tick(void)
2357 {
2358 int cpu = smp_processor_id();
2359 runqueue_t *rq = this_rq();
2360 task_t *p = current;
2361 unsigned long long now = sched_clock();
2362
2363 update_cpu_clock(p, rq, now);
2364
2365 rq->timestamp_last_tick = now;
2366
2367 if (p == rq->idle) {
2368 if (wake_priority_sleeper(rq))
2369 goto out;
2370 rebalance_tick(cpu, rq, SCHED_IDLE);
2371 return;
2372 }
2373
2374 /* Task might have expired already, but not scheduled off yet */
2375 if (p->array != rq->active) {
2376 set_tsk_need_resched(p);
2377 goto out;
2378 }
2379 spin_lock(&rq->lock);
2380 /*
2381 * The task was running during this tick - update the
2382 * time slice counter. Note: we do not update a thread's
2383 * priority until it either goes to sleep or uses up its
2384 * timeslice. This makes it possible for interactive tasks
2385 * to use up their timeslices at their highest priority levels.
2386 */
2387 if (rt_task(p)) {
2388 /*
2389 * RR tasks need a special form of timeslice management.
2390 * FIFO tasks have no timeslices.
2391 */
2392 if ((p->policy == SCHED_RR) && !--p->time_slice) {
2393 p->time_slice = task_timeslice(p);
2394 p->first_time_slice = 0;
2395 set_tsk_need_resched(p);
2396
2397 /* put it at the end of the queue: */
2398 requeue_task(p, rq->active);
2399 }
2400 goto out_unlock;
2401 }
2402 if (!--p->time_slice) {
2403 dequeue_task(p, rq->active);
2404 set_tsk_need_resched(p);
2405 p->prio = effective_prio(p);
2406 p->time_slice = task_timeslice(p);
2407 p->first_time_slice = 0;
2408
2409 if (!rq->expired_timestamp)
2410 rq->expired_timestamp = jiffies;
2411 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
2412 enqueue_task(p, rq->expired);
2413 if (p->static_prio < rq->best_expired_prio)
2414 rq->best_expired_prio = p->static_prio;
2415 } else
2416 enqueue_task(p, rq->active);
2417 } else {
2418 /*
2419 * Prevent a too long timeslice allowing a task to monopolize
2420 * the CPU. We do this by splitting up the timeslice into
2421 * smaller pieces.
2422 *
2423 * Note: this does not mean the task's timeslices expire or
2424 * get lost in any way, they just might be preempted by
2425 * another task of equal priority. (one with higher
2426 * priority would have preempted this task already.) We
2427 * requeue this task to the end of the list on this priority
2428 * level, which is in essence a round-robin of tasks with
2429 * equal priority.
2430 *
2431 * This only applies to tasks in the interactive
2432 * delta range with at least TIMESLICE_GRANULARITY to requeue.
2433 */
2434 if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
2435 p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
2436 (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
2437 (p->array == rq->active)) {
2438
2439 requeue_task(p, rq->active);
2440 set_tsk_need_resched(p);
2441 }
2442 }
2443 out_unlock:
2444 spin_unlock(&rq->lock);
2445 out:
2446 rebalance_tick(cpu, rq, NOT_IDLE);
2447 }
2448
2449 #ifdef CONFIG_SCHED_SMT
2450 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2451 {
2452 struct sched_domain *sd = this_rq->sd;
2453 cpumask_t sibling_map;
2454 int i;
2455
2456 if (!(sd->flags & SD_SHARE_CPUPOWER))
2457 return;
2458
2459 /*
2460 * Unlock the current runqueue because we have to lock in
2461 * CPU order to avoid deadlocks. Caller knows that we might
2462 * unlock. We keep IRQs disabled.
2463 */
2464 spin_unlock(&this_rq->lock);
2465
2466 sibling_map = sd->span;
2467
2468 for_each_cpu_mask(i, sibling_map)
2469 spin_lock(&cpu_rq(i)->lock);
2470 /*
2471 * We clear this CPU from the mask. This both simplifies the
2472 * inner loop and keps this_rq locked when we exit:
2473 */
2474 cpu_clear(this_cpu, sibling_map);
2475
2476 for_each_cpu_mask(i, sibling_map) {
2477 runqueue_t *smt_rq = cpu_rq(i);
2478
2479 /*
2480 * If an SMT sibling task is sleeping due to priority
2481 * reasons wake it up now.
2482 */
2483 if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
2484 resched_task(smt_rq->idle);
2485 }
2486
2487 for_each_cpu_mask(i, sibling_map)
2488 spin_unlock(&cpu_rq(i)->lock);
2489 /*
2490 * We exit with this_cpu's rq still held and IRQs
2491 * still disabled:
2492 */
2493 }
2494
2495 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2496 {
2497 struct sched_domain *sd = this_rq->sd;
2498 cpumask_t sibling_map;
2499 prio_array_t *array;
2500 int ret = 0, i;
2501 task_t *p;
2502
2503 if (!(sd->flags & SD_SHARE_CPUPOWER))
2504 return 0;
2505
2506 /*
2507 * The same locking rules and details apply as for
2508 * wake_sleeping_dependent():
2509 */
2510 spin_unlock(&this_rq->lock);
2511 sibling_map = sd->span;
2512 for_each_cpu_mask(i, sibling_map)
2513 spin_lock(&cpu_rq(i)->lock);
2514 cpu_clear(this_cpu, sibling_map);
2515
2516 /*
2517 * Establish next task to be run - it might have gone away because
2518 * we released the runqueue lock above:
2519 */
2520 if (!this_rq->nr_running)
2521 goto out_unlock;
2522 array = this_rq->active;
2523 if (!array->nr_active)
2524 array = this_rq->expired;
2525 BUG_ON(!array->nr_active);
2526
2527 p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
2528 task_t, run_list);
2529
2530 for_each_cpu_mask(i, sibling_map) {
2531 runqueue_t *smt_rq = cpu_rq(i);
2532 task_t *smt_curr = smt_rq->curr;
2533
2534 /*
2535 * If a user task with lower static priority than the
2536 * running task on the SMT sibling is trying to schedule,
2537 * delay it till there is proportionately less timeslice
2538 * left of the sibling task to prevent a lower priority
2539 * task from using an unfair proportion of the
2540 * physical cpu's resources. -ck
2541 */
2542 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
2543 task_timeslice(p) || rt_task(smt_curr)) &&
2544 p->mm && smt_curr->mm && !rt_task(p))
2545 ret = 1;
2546
2547 /*
2548 * Reschedule a lower priority task on the SMT sibling,
2549 * or wake it up if it has been put to sleep for priority
2550 * reasons.
2551 */
2552 if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
2553 task_timeslice(smt_curr) || rt_task(p)) &&
2554 smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
2555 (smt_curr == smt_rq->idle && smt_rq->nr_running))
2556 resched_task(smt_curr);
2557 }
2558 out_unlock:
2559 for_each_cpu_mask(i, sibling_map)
2560 spin_unlock(&cpu_rq(i)->lock);
2561 return ret;
2562 }
2563 #else
2564 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2565 {
2566 }
2567
2568 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2569 {
2570 return 0;
2571 }
2572 #endif
2573
2574 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
2575
2576 void fastcall add_preempt_count(int val)
2577 {
2578 /*
2579 * Underflow?
2580 */
2581 BUG_ON((preempt_count() < 0));
2582 preempt_count() += val;
2583 /*
2584 * Spinlock count overflowing soon?
2585 */
2586 BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
2587 }
2588 EXPORT_SYMBOL(add_preempt_count);
2589
2590 void fastcall sub_preempt_count(int val)
2591 {
2592 /*
2593 * Underflow?
2594 */
2595 BUG_ON(val > preempt_count());
2596 /*
2597 * Is the spinlock portion underflowing?
2598 */
2599 BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK));
2600 preempt_count() -= val;
2601 }
2602 EXPORT_SYMBOL(sub_preempt_count);
2603
2604 #endif
2605
2606 /*
2607 * schedule() is the main scheduler function.
2608 */
2609 asmlinkage void __sched schedule(void)
2610 {
2611 long *switch_count;
2612 task_t *prev, *next;
2613 runqueue_t *rq;
2614 prio_array_t *array;
2615 struct list_head *queue;
2616 unsigned long long now;
2617 unsigned long run_time;
2618 int cpu, idx;
2619
2620 /*
2621 * Test if we are atomic. Since do_exit() needs to call into
2622 * schedule() atomically, we ignore that path for now.
2623 * Otherwise, whine if we are scheduling when we should not be.
2624 */
2625 if (likely(!current->exit_state)) {
2626 if (unlikely(in_atomic())) {
2627 printk(KERN_ERR "scheduling while atomic: "
2628 "%s/0x%08x/%d\n",
2629 current->comm, preempt_count(), current->pid);
2630 dump_stack();
2631 }
2632 }
2633 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2634
2635 need_resched:
2636 preempt_disable();
2637 prev = current;
2638 release_kernel_lock(prev);
2639 need_resched_nonpreemptible:
2640 rq = this_rq();
2641
2642 /*
2643 * The idle thread is not allowed to schedule!
2644 * Remove this check after it has been exercised a bit.
2645 */
2646 if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
2647 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
2648 dump_stack();
2649 }
2650
2651 schedstat_inc(rq, sched_cnt);
2652 now = sched_clock();
2653 if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
2654 run_time = now - prev->timestamp;
2655 if (unlikely((long long)(now - prev->timestamp) < 0))
2656 run_time = 0;
2657 } else
2658 run_time = NS_MAX_SLEEP_AVG;
2659
2660 /*
2661 * Tasks charged proportionately less run_time at high sleep_avg to
2662 * delay them losing their interactive status
2663 */
2664 run_time /= (CURRENT_BONUS(prev) ? : 1);
2665
2666 spin_lock_irq(&rq->lock);
2667
2668 if (unlikely(prev->flags & PF_DEAD))
2669 prev->state = EXIT_DEAD;
2670
2671 switch_count = &prev->nivcsw;
2672 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2673 switch_count = &prev->nvcsw;
2674 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
2675 unlikely(signal_pending(prev))))
2676 prev->state = TASK_RUNNING;
2677 else {
2678 if (prev->state == TASK_UNINTERRUPTIBLE)
2679 rq->nr_uninterruptible++;
2680 deactivate_task(prev, rq);
2681 }
2682 }
2683
2684 cpu = smp_processor_id();
2685 if (unlikely(!rq->nr_running)) {
2686 go_idle:
2687 idle_balance(cpu, rq);
2688 if (!rq->nr_running) {
2689 next = rq->idle;
2690 rq->expired_timestamp = 0;
2691 wake_sleeping_dependent(cpu, rq);
2692 /*
2693 * wake_sleeping_dependent() might have released
2694 * the runqueue, so break out if we got new
2695 * tasks meanwhile:
2696 */
2697 if (!rq->nr_running)
2698 goto switch_tasks;
2699 }
2700 } else {
2701 if (dependent_sleeper(cpu, rq)) {
2702 next = rq->idle;
2703 goto switch_tasks;
2704 }
2705 /*
2706 * dependent_sleeper() releases and reacquires the runqueue
2707 * lock, hence go into the idle loop if the rq went
2708 * empty meanwhile:
2709 */
2710 if (unlikely(!rq->nr_running))
2711 goto go_idle;
2712 }
2713
2714 array = rq->active;
2715 if (unlikely(!array->nr_active)) {
2716 /*
2717 * Switch the active and expired arrays.
2718 */
2719 schedstat_inc(rq, sched_switch);
2720 rq->active = rq->expired;
2721 rq->expired = array;
2722 array = rq->active;
2723 rq->expired_timestamp = 0;
2724 rq->best_expired_prio = MAX_PRIO;
2725 }
2726
2727 idx = sched_find_first_bit(array->bitmap);
2728 queue = array->queue + idx;
2729 next = list_entry(queue->next, task_t, run_list);
2730
2731 if (!rt_task(next) && next->activated > 0) {
2732 unsigned long long delta = now - next->timestamp;
2733 if (unlikely((long long)(now - next->timestamp) < 0))
2734 delta = 0;
2735
2736 if (next->activated == 1)
2737 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
2738
2739 array = next->array;
2740 dequeue_task(next, array);
2741 recalc_task_prio(next, next->timestamp + delta);
2742 enqueue_task(next, array);
2743 }
2744 next->activated = 0;
2745 switch_tasks:
2746 if (next == rq->idle)
2747 schedstat_inc(rq, sched_goidle);
2748 prefetch(next);
2749 clear_tsk_need_resched(prev);
2750 rcu_qsctr_inc(task_cpu(prev));
2751
2752 update_cpu_clock(prev, rq, now);
2753
2754 prev->sleep_avg -= run_time;
2755 if ((long)prev->sleep_avg <= 0)
2756 prev->sleep_avg = 0;
2757 prev->timestamp = prev->last_ran = now;
2758
2759 sched_info_switch(prev, next);
2760 if (likely(prev != next)) {
2761 next->timestamp = now;
2762 rq->nr_switches++;
2763 rq->curr = next;
2764 ++*switch_count;
2765
2766 prepare_arch_switch(rq, next);
2767 prev = context_switch(rq, prev, next);
2768 barrier();
2769
2770 finish_task_switch(prev);
2771 } else
2772 spin_unlock_irq(&rq->lock);
2773
2774 prev = current;
2775 if (unlikely(reacquire_kernel_lock(prev) < 0))
2776 goto need_resched_nonpreemptible;
2777 preempt_enable_no_resched();
2778 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2779 goto need_resched;
2780 }
2781
2782 EXPORT_SYMBOL(schedule);
2783
2784 #ifdef CONFIG_PREEMPT
2785 /*
2786 * this is is the entry point to schedule() from in-kernel preemption
2787 * off of preempt_enable. Kernel preemptions off return from interrupt
2788 * occur there and call schedule directly.
2789 */
2790 asmlinkage void __sched preempt_schedule(void)
2791 {
2792 struct thread_info *ti = current_thread_info();
2793 #ifdef CONFIG_PREEMPT_BKL
2794 struct task_struct *task = current;
2795 int saved_lock_depth;
2796 #endif
2797 /*
2798 * If there is a non-zero preempt_count or interrupts are disabled,
2799 * we do not want to preempt the current task. Just return..
2800 */
2801 if (unlikely(ti->preempt_count || irqs_disabled()))
2802 return;
2803
2804 need_resched:
2805 add_preempt_count(PREEMPT_ACTIVE);
2806 /*
2807 * We keep the big kernel semaphore locked, but we
2808 * clear ->lock_depth so that schedule() doesnt
2809 * auto-release the semaphore:
2810 */
2811 #ifdef CONFIG_PREEMPT_BKL
2812 saved_lock_depth = task->lock_depth;
2813 task->lock_depth = -1;
2814 #endif
2815 schedule();
2816 #ifdef CONFIG_PREEMPT_BKL
2817 task->lock_depth = saved_lock_depth;
2818 #endif
2819 sub_preempt_count(PREEMPT_ACTIVE);
2820
2821 /* we could miss a preemption opportunity between schedule and now */
2822 barrier();
2823 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2824 goto need_resched;
2825 }
2826
2827 EXPORT_SYMBOL(preempt_schedule);
2828
2829 /*
2830 * this is is the entry point to schedule() from kernel preemption
2831 * off of irq context.
2832 * Note, that this is called and return with irqs disabled. This will
2833 * protect us against recursive calling from irq.
2834 */
2835 asmlinkage void __sched preempt_schedule_irq(void)
2836 {
2837 struct thread_info *ti = current_thread_info();
2838 #ifdef CONFIG_PREEMPT_BKL
2839 struct task_struct *task = current;
2840 int saved_lock_depth;
2841 #endif
2842 /* Catch callers which need to be fixed*/
2843 BUG_ON(ti->preempt_count || !irqs_disabled());
2844
2845 need_resched:
2846 add_preempt_count(PREEMPT_ACTIVE);
2847 /*
2848 * We keep the big kernel semaphore locked, but we
2849 * clear ->lock_depth so that schedule() doesnt
2850 * auto-release the semaphore:
2851 */
2852 #ifdef CONFIG_PREEMPT_BKL
2853 saved_lock_depth = task->lock_depth;
2854 task->lock_depth = -1;
2855 #endif
2856 local_irq_enable();
2857 schedule();
2858 local_irq_disable();
2859 #ifdef CONFIG_PREEMPT_BKL
2860 task->lock_depth = saved_lock_depth;
2861 #endif
2862 sub_preempt_count(PREEMPT_ACTIVE);
2863
2864 /* we could miss a preemption opportunity between schedule and now */
2865 barrier();
2866 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2867 goto need_resched;
2868 }
2869
2870 #endif /* CONFIG_PREEMPT */
2871
2872 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
2873 {
2874 task_t *p = curr->private;
2875 return try_to_wake_up(p, mode, sync);
2876 }
2877
2878 EXPORT_SYMBOL(default_wake_function);
2879
2880 /*
2881 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
2882 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
2883 * number) then we wake all the non-exclusive tasks and one exclusive task.
2884 *
2885 * There are circumstances in which we can try to wake a task which has already
2886 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
2887 * zero in this (rare) case, and we handle it by continuing to scan the queue.
2888 */
2889 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2890 int nr_exclusive, int sync, void *key)
2891 {
2892 struct list_head *tmp, *next;
2893
2894 list_for_each_safe(tmp, next, &q->task_list) {
2895 wait_queue_t *curr;
2896 unsigned flags;
2897 curr = list_entry(tmp, wait_queue_t, task_list);
2898 flags = curr->flags;
2899 if (curr->func(curr, mode, sync, key) &&
2900 (flags & WQ_FLAG_EXCLUSIVE) &&
2901 !--nr_exclusive)
2902 break;
2903 }
2904 }
2905
2906 /**
2907 * __wake_up - wake up threads blocked on a waitqueue.
2908 * @q: the waitqueue
2909 * @mode: which threads
2910 * @nr_exclusive: how many wake-one or wake-many threads to wake up
2911 * @key: is directly passed to the wakeup function
2912 */
2913 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
2914 int nr_exclusive, void *key)
2915 {
2916 unsigned long flags;
2917
2918 spin_lock_irqsave(&q->lock, flags);
2919 __wake_up_common(q, mode, nr_exclusive, 0, key);
2920 spin_unlock_irqrestore(&q->lock, flags);
2921 }
2922
2923 EXPORT_SYMBOL(__wake_up);
2924
2925 /*
2926 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2927 */
2928 void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
2929 {
2930 __wake_up_common(q, mode, 1, 0, NULL);
2931 }
2932
2933 /**
2934 * __wake_up_sync - wake up threads blocked on a waitqueue.
2935 * @q: the waitqueue
2936 * @mode: which threads
2937 * @nr_exclusive: how many wake-one or wake-many threads to wake up
2938 *
2939 * The sync wakeup differs that the waker knows that it will schedule
2940 * away soon, so while the target thread will be woken up, it will not
2941 * be migrated to another CPU - ie. the two threads are 'synchronized'
2942 * with each other. This can prevent needless bouncing between CPUs.
2943 *
2944 * On UP it can prevent extra preemption.
2945 */
2946 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2947 {
2948 unsigned long flags;
2949 int sync = 1;
2950
2951 if (unlikely(!q))
2952 return;
2953
2954 if (unlikely(!nr_exclusive))
2955 sync = 0;
2956
2957 spin_lock_irqsave(&q->lock, flags);
2958 __wake_up_common(q, mode, nr_exclusive, sync, NULL);
2959 spin_unlock_irqrestore(&q->lock, flags);
2960 }
2961 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
2962
2963 void fastcall complete(struct completion *x)
2964 {
2965 unsigned long flags;
2966
2967 spin_lock_irqsave(&x->wait.lock, flags);
2968 x->done++;
2969 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2970 1, 0, NULL);
2971 spin_unlock_irqrestore(&x->wait.lock, flags);
2972 }
2973 EXPORT_SYMBOL(complete);
2974
2975 void fastcall complete_all(struct completion *x)
2976 {
2977 unsigned long flags;
2978
2979 spin_lock_irqsave(&x->wait.lock, flags);
2980 x->done += UINT_MAX/2;
2981 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2982 0, 0, NULL);
2983 spin_unlock_irqrestore(&x->wait.lock, flags);
2984 }
2985 EXPORT_SYMBOL(complete_all);
2986
2987 void fastcall __sched wait_for_completion(struct completion *x)
2988 {
2989 might_sleep();
2990 spin_lock_irq(&x->wait.lock);
2991 if (!x->done) {
2992 DECLARE_WAITQUEUE(wait, current);
2993
2994 wait.flags |= WQ_FLAG_EXCLUSIVE;
2995 __add_wait_queue_tail(&x->wait, &wait);
2996 do {
2997 __set_current_state(TASK_UNINTERRUPTIBLE);
2998 spin_unlock_irq(&x->wait.lock);
2999 schedule();
3000 spin_lock_irq(&x->wait.lock);
3001 } while (!x->done);
3002 __remove_wait_queue(&x->wait, &wait);
3003 }
3004 x->done--;
3005 spin_unlock_irq(&x->wait.lock);
3006 }
3007 EXPORT_SYMBOL(wait_for_completion);
3008
3009 unsigned long fastcall __sched
3010 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3011 {
3012 might_sleep();
3013
3014 spin_lock_irq(&x->wait.lock);
3015 if (!x->done) {
3016 DECLARE_WAITQUEUE(wait, current);
3017
3018 wait.flags |= WQ_FLAG_EXCLUSIVE;
3019 __add_wait_queue_tail(&x->wait, &wait);
3020 do {
3021 __set_current_state(TASK_UNINTERRUPTIBLE);
3022 spin_unlock_irq(&x->wait.lock);
3023 timeout = schedule_timeout(timeout);
3024 spin_lock_irq(&x->wait.lock);
3025 if (!timeout) {
3026 __remove_wait_queue(&x->wait, &wait);
3027 goto out;
3028 }
3029 } while (!x->done);
3030 __remove_wait_queue(&x->wait, &wait);
3031 }
3032 x->done--;
3033 out:
3034 spin_unlock_irq(&x->wait.lock);
3035 return timeout;
3036 }
3037 EXPORT_SYMBOL(wait_for_completion_timeout);
3038
3039 int fastcall __sched wait_for_completion_interruptible(struct completion *x)
3040 {
3041 int ret = 0;
3042
3043 might_sleep();
3044
3045 spin_lock_irq(&x->wait.lock);
3046 if (!x->done) {
3047 DECLARE_WAITQUEUE(wait, current);
3048
3049 wait.flags |= WQ_FLAG_EXCLUSIVE;
3050 __add_wait_queue_tail(&x->wait, &wait);
3051 do {
3052 if (signal_pending(current)) {
3053 ret = -ERESTARTSYS;
3054 __remove_wait_queue(&x->wait, &wait);
3055 goto out;
3056 }
3057 __set_current_state(TASK_INTERRUPTIBLE);
3058 spin_unlock_irq(&x->wait.lock);
3059 schedule();
3060 spin_lock_irq(&x->wait.lock);
3061 } while (!x->done);
3062 __remove_wait_queue(&x->wait, &wait);
3063 }
3064 x->done--;
3065 out:
3066 spin_unlock_irq(&x->wait.lock);
3067
3068 return ret;
3069 }
3070 EXPORT_SYMBOL(wait_for_completion_interruptible);
3071
3072 unsigned long fastcall __sched
3073 wait_for_completion_interruptible_timeout(struct completion *x,
3074 unsigned long timeout)
3075 {
3076 might_sleep();
3077
3078 spin_lock_irq(&x->wait.lock);
3079 if (!x->done) {
3080 DECLARE_WAITQUEUE(wait, current);
3081
3082 wait.flags |= WQ_FLAG_EXCLUSIVE;
3083 __add_wait_queue_tail(&x->wait, &wait);
3084 do {
3085 if (signal_pending(current)) {
3086 timeout = -ERESTARTSYS;
3087 __remove_wait_queue(&x->wait, &wait);
3088 goto out;
3089 }
3090 __set_current_state(TASK_INTERRUPTIBLE);
3091 spin_unlock_irq(&x->wait.lock);
3092 timeout = schedule_timeout(timeout);
3093 spin_lock_irq(&x->wait.lock);
3094 if (!timeout) {
3095 __remove_wait_queue(&x->wait, &wait);
3096 goto out;
3097 }
3098 } while (!x->done);
3099 __remove_wait_queue(&x->wait, &wait);
3100 }
3101 x->done--;
3102 out:
3103 spin_unlock_irq(&x->wait.lock);
3104 return timeout;
3105 }
3106 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3107
3108
3109 #define SLEEP_ON_VAR \
3110 unsigned long flags; \
3111 wait_queue_t wait; \
3112 init_waitqueue_entry(&wait, current);
3113
3114 #define SLEEP_ON_HEAD \
3115 spin_lock_irqsave(&q->lock,flags); \
3116 __add_wait_queue(q, &wait); \
3117 spin_unlock(&q->lock);
3118
3119 #define SLEEP_ON_TAIL \
3120 spin_lock_irq(&q->lock); \
3121 __remove_wait_queue(q, &wait); \
3122 spin_unlock_irqrestore(&q->lock, flags);
3123
3124 void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
3125 {
3126 SLEEP_ON_VAR
3127
3128 current->state = TASK_INTERRUPTIBLE;
3129
3130 SLEEP_ON_HEAD
3131 schedule();
3132 SLEEP_ON_TAIL
3133 }
3134
3135 EXPORT_SYMBOL(interruptible_sleep_on);
3136
3137 long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3138 {
3139 SLEEP_ON_VAR
3140
3141 current->state = TASK_INTERRUPTIBLE;
3142
3143 SLEEP_ON_HEAD
3144 timeout = schedule_timeout(timeout);
3145 SLEEP_ON_TAIL
3146
3147 return timeout;
3148 }
3149
3150 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3151
3152 void fastcall __sched sleep_on(wait_queue_head_t *q)
3153 {
3154 SLEEP_ON_VAR
3155
3156 current->state = TASK_UNINTERRUPTIBLE;
3157
3158 SLEEP_ON_HEAD
3159 schedule();
3160 SLEEP_ON_TAIL
3161 }
3162
3163 EXPORT_SYMBOL(sleep_on);
3164
3165 long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3166 {
3167 SLEEP_ON_VAR
3168
3169 current->state = TASK_UNINTERRUPTIBLE;
3170
3171 SLEEP_ON_HEAD
3172 timeout = schedule_timeout(timeout);
3173 SLEEP_ON_TAIL
3174
3175 return timeout;
3176 }
3177
3178 EXPORT_SYMBOL(sleep_on_timeout);
3179
3180 void set_user_nice(task_t *p, long nice)
3181 {
3182 unsigned long flags;
3183 prio_array_t *array;
3184 runqueue_t *rq;
3185 int old_prio, new_prio, delta;
3186
3187 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3188 return;
3189 /*
3190 * We have to be careful, if called from sys_setpriority(),
3191 * the task might be in the middle of scheduling on another CPU.
3192 */
3193 rq = task_rq_lock(p, &flags);
3194 /*
3195 * The RT priorities are set via sched_setscheduler(), but we still
3196 * allow the 'normal' nice value to be set - but as expected
3197 * it wont have any effect on scheduling until the task is
3198 * not SCHED_NORMAL:
3199 */
3200 if (rt_task(p)) {
3201 p->static_prio = NICE_TO_PRIO(nice);
3202 goto out_unlock;
3203 }
3204 array = p->array;
3205 if (array)
3206 dequeue_task(p, array);
3207
3208 old_prio = p->prio;
3209 new_prio = NICE_TO_PRIO(nice);
3210 delta = new_prio - old_prio;
3211 p->static_prio = NICE_TO_PRIO(nice);
3212 p->prio += delta;
3213
3214 if (array) {
3215 enqueue_task(p, array);
3216 /*
3217 * If the task increased its priority or is running and
3218 * lowered its priority, then reschedule its CPU:
3219 */
3220 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3221 resched_task(rq->curr);
3222 }
3223 out_unlock:
3224 task_rq_unlock(rq, &flags);
3225 }
3226
3227 EXPORT_SYMBOL(set_user_nice);
3228
3229 /*
3230 * can_nice - check if a task can reduce its nice value
3231 * @p: task
3232 * @nice: nice value
3233 */
3234 int can_nice(const task_t *p, const int nice)
3235 {
3236 /* convert nice value [19,-20] to rlimit style value [0,39] */
3237 int nice_rlim = 19 - nice;
3238 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3239 capable(CAP_SYS_NICE));
3240 }
3241
3242 #ifdef __ARCH_WANT_SYS_NICE
3243
3244 /*
3245 * sys_nice - change the priority of the current process.
3246 * @increment: priority increment
3247 *
3248 * sys_setpriority is a more generic, but much slower function that
3249 * does similar things.
3250 */
3251 asmlinkage long sys_nice(int increment)
3252 {
3253 int retval;
3254 long nice;
3255
3256 /*
3257 * Setpriority might change our priority at the same moment.
3258 * We don't have to worry. Conceptually one call occurs first
3259 * and we have a single winner.
3260 */
3261 if (increment < -40)
3262 increment = -40;
3263 if (increment > 40)
3264 increment = 40;
3265
3266 nice = PRIO_TO_NICE(current->static_prio) + increment;
3267 if (nice < -20)
3268 nice = -20;
3269 if (nice > 19)
3270 nice = 19;
3271
3272 if (increment < 0 && !can_nice(current, nice))
3273 return -EPERM;
3274
3275 retval = security_task_setnice(current, nice);
3276 if (retval)
3277 return retval;
3278
3279 set_user_nice(current, nice);
3280 return 0;
3281 }
3282
3283 #endif
3284
3285 /**
3286 * task_prio - return the priority value of a given task.
3287 * @p: the task in question.
3288 *
3289 * This is the priority value as seen by users in /proc.
3290 * RT tasks are offset by -200. Normal tasks are centered
3291 * around 0, value goes from -16 to +15.
3292 */
3293 int task_prio(const task_t *p)
3294 {
3295 return p->prio - MAX_RT_PRIO;
3296 }
3297
3298 /**
3299 * task_nice - return the nice value of a given task.
3300 * @p: the task in question.
3301 */
3302 int task_nice(const task_t *p)
3303 {
3304 return TASK_NICE(p);
3305 }
3306
3307 /*
3308 * The only users of task_nice are binfmt_elf and binfmt_elf32.
3309 * binfmt_elf is no longer modular, but binfmt_elf32 still is.
3310 * Therefore, task_nice is needed if there is a compat_mode.
3311 */
3312 #ifdef CONFIG_COMPAT
3313 EXPORT_SYMBOL_GPL(task_nice);
3314 #endif
3315
3316 /**
3317 * idle_cpu - is a given cpu idle currently?
3318 * @cpu: the processor in question.
3319 */
3320 int idle_cpu(int cpu)
3321 {
3322 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3323 }
3324
3325 EXPORT_SYMBOL_GPL(idle_cpu);
3326
3327 /**
3328 * idle_task - return the idle task for a given cpu.
3329 * @cpu: the processor in question.
3330 */
3331 task_t *idle_task(int cpu)
3332 {
3333 return cpu_rq(cpu)->idle;
3334 }
3335
3336 /**
3337 * find_process_by_pid - find a process with a matching PID value.
3338 * @pid: the pid in question.
3339 */
3340 static inline task_t *find_process_by_pid(pid_t pid)
3341 {
3342 return pid ? find_task_by_pid(pid) : current;
3343 }
3344
3345 /* Actually do priority change: must hold rq lock. */
3346 static void __setscheduler(struct task_struct *p, int policy, int prio)
3347 {
3348 BUG_ON(p->array);
3349 p->policy = policy;
3350 p->rt_priority = prio;
3351 if (policy != SCHED_NORMAL)
3352 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
3353 else
3354 p->prio = p->static_prio;
3355 }
3356
3357 /**
3358 * sched_setscheduler - change the scheduling policy and/or RT priority of
3359 * a thread.
3360 * @p: the task in question.
3361 * @policy: new policy.
3362 * @param: structure containing the new RT priority.
3363 */
3364 int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
3365 {
3366 int retval;
3367 int oldprio, oldpolicy = -1;
3368 prio_array_t *array;
3369 unsigned long flags;
3370 runqueue_t *rq;
3371
3372 recheck:
3373 /* double check policy once rq lock held */
3374 if (policy < 0)
3375 policy = oldpolicy = p->policy;
3376 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
3377 policy != SCHED_NORMAL)
3378 return -EINVAL;
3379 /*
3380 * Valid priorities for SCHED_FIFO and SCHED_RR are
3381 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
3382 */
3383 if (param->sched_priority < 0 ||
3384 param->sched_priority > MAX_USER_RT_PRIO-1)
3385 return -EINVAL;
3386 if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
3387 return -EINVAL;
3388
3389 if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
3390 param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur &&
3391 !capable(CAP_SYS_NICE))
3392 return -EPERM;
3393 if ((current->euid != p->euid) && (current->euid != p->uid) &&
3394 !capable(CAP_SYS_NICE))
3395 return -EPERM;
3396
3397 retval = security_task_setscheduler(p, policy, param);
3398 if (retval)
3399 return retval;
3400 /*
3401 * To be able to change p->policy safely, the apropriate
3402 * runqueue lock must be held.
3403 */
3404 rq = task_rq_lock(p, &flags);
3405 /* recheck policy now with rq lock held */
3406 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3407 policy = oldpolicy = -1;
3408 task_rq_unlock(rq, &flags);
3409 goto recheck;
3410 }
3411 array = p->array;
3412 if (array)
3413 deactivate_task(p, rq);
3414 oldprio = p->prio;
3415 __setscheduler(p, policy, param->sched_priority);
3416 if (array) {
3417 __activate_task(p, rq);
3418 /*
3419 * Reschedule if we are currently running on this runqueue and
3420 * our priority decreased, or if we are not currently running on
3421 * this runqueue and our priority is higher than the current's
3422 */
3423 if (task_running(rq, p)) {
3424 if (p->prio > oldprio)
3425 resched_task(rq->curr);
3426 } else if (TASK_PREEMPTS_CURR(p, rq))
3427 resched_task(rq->curr);
3428 }
3429 task_rq_unlock(rq, &flags);
3430 return 0;
3431 }
3432 EXPORT_SYMBOL_GPL(sched_setscheduler);
3433
3434 static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3435 {
3436 int retval;
3437 struct sched_param lparam;
3438 struct task_struct *p;
3439
3440 if (!param || pid < 0)
3441 return -EINVAL;
3442 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3443 return -EFAULT;
3444 read_lock_irq(&tasklist_lock);
3445 p = find_process_by_pid(pid);
3446 if (!p) {
3447 read_unlock_irq(&tasklist_lock);
3448 return -ESRCH;
3449 }
3450 retval = sched_setscheduler(p, policy, &lparam);
3451 read_unlock_irq(&tasklist_lock);
3452 return retval;
3453 }
3454
3455 /**
3456 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3457 * @pid: the pid in question.
3458 * @policy: new policy.
3459 * @param: structure containing the new RT priority.
3460 */
3461 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3462 struct sched_param __user *param)
3463 {
3464 return do_sched_setscheduler(pid, policy, param);
3465 }
3466
3467 /**
3468 * sys_sched_setparam - set/change the RT priority of a thread
3469 * @pid: the pid in question.
3470 * @param: structure containing the new RT priority.
3471 */
3472 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
3473 {
3474 return do_sched_setscheduler(pid, -1, param);
3475 }
3476
3477 /**
3478 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3479 * @pid: the pid in question.
3480 */
3481 asmlinkage long sys_sched_getscheduler(pid_t pid)
3482 {
3483 int retval = -EINVAL;
3484 task_t *p;
3485
3486 if (pid < 0)
3487 goto out_nounlock;
3488
3489 retval = -ESRCH;
3490 read_lock(&tasklist_lock);
3491 p = find_process_by_pid(pid);
3492 if (p) {
3493 retval = security_task_getscheduler(p);
3494 if (!retval)
3495 retval = p->policy;
3496 }
3497 read_unlock(&tasklist_lock);
3498
3499 out_nounlock:
3500 return retval;
3501 }
3502
3503 /**
3504 * sys_sched_getscheduler - get the RT priority of a thread
3505 * @pid: the pid in question.
3506 * @param: structure containing the RT priority.
3507 */
3508 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
3509 {
3510 struct sched_param lp;
3511 int retval = -EINVAL;
3512 task_t *p;
3513
3514 if (!param || pid < 0)
3515 goto out_nounlock;
3516
3517 read_lock(&tasklist_lock);
3518 p = find_process_by_pid(pid);
3519 retval = -ESRCH;
3520 if (!p)
3521 goto out_unlock;
3522
3523 retval = security_task_getscheduler(p);
3524 if (retval)
3525 goto out_unlock;
3526
3527 lp.sched_priority = p->rt_priority;
3528 read_unlock(&tasklist_lock);
3529
3530 /*
3531 * This one might sleep, we cannot do it with a spinlock held ...
3532 */
3533 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3534
3535 out_nounlock:
3536 return retval;
3537
3538 out_unlock:
3539 read_unlock(&tasklist_lock);
3540 return retval;
3541 }
3542
3543 long sched_setaffinity(pid_t pid, cpumask_t new_mask)
3544 {
3545 task_t *p;
3546 int retval;
3547 cpumask_t cpus_allowed;
3548
3549 lock_cpu_hotplug();
3550 read_lock(&tasklist_lock);
3551
3552 p = find_process_by_pid(pid);
3553 if (!p) {
3554 read_unlock(&tasklist_lock);
3555 unlock_cpu_hotplug();
3556 return -ESRCH;
3557 }
3558
3559 /*
3560 * It is not safe to call set_cpus_allowed with the
3561 * tasklist_lock held. We will bump the task_struct's
3562 * usage count and then drop tasklist_lock.
3563 */
3564 get_task_struct(p);
3565 read_unlock(&tasklist_lock);
3566
3567 retval = -EPERM;
3568 if ((current->euid != p->euid) && (current->euid != p->uid) &&
3569 !capable(CAP_SYS_NICE))
3570 goto out_unlock;
3571
3572 cpus_allowed = cpuset_cpus_allowed(p);
3573 cpus_and(new_mask, new_mask, cpus_allowed);
3574 retval = set_cpus_allowed(p, new_mask);
3575
3576 out_unlock:
3577 put_task_struct(p);
3578 unlock_cpu_hotplug();
3579 return retval;
3580 }
3581
3582 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3583 cpumask_t *new_mask)
3584 {
3585 if (len < sizeof(cpumask_t)) {
3586 memset(new_mask, 0, sizeof(cpumask_t));
3587 } else if (len > sizeof(cpumask_t)) {
3588 len = sizeof(cpumask_t);
3589 }
3590 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3591 }
3592
3593 /**
3594 * sys_sched_setaffinity - set the cpu affinity of a process
3595 * @pid: pid of the process
3596 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3597 * @user_mask_ptr: user-space pointer to the new cpu mask
3598 */
3599 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
3600 unsigned long __user *user_mask_ptr)
3601 {
3602 cpumask_t new_mask;
3603 int retval;
3604
3605 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
3606 if (retval)
3607 return retval;
3608
3609 return sched_setaffinity(pid, new_mask);
3610 }
3611
3612 /*
3613 * Represents all cpu's present in the system
3614 * In systems capable of hotplug, this map could dynamically grow
3615 * as new cpu's are detected in the system via any platform specific
3616 * method, such as ACPI for e.g.
3617 */
3618
3619 cpumask_t cpu_present_map;
3620 EXPORT_SYMBOL(cpu_present_map);
3621
3622 #ifndef CONFIG_SMP
3623 cpumask_t cpu_online_map = CPU_MASK_ALL;
3624 cpumask_t cpu_possible_map = CPU_MASK_ALL;
3625 #endif
3626
3627 long sched_getaffinity(pid_t pid, cpumask_t *mask)
3628 {
3629 int retval;
3630 task_t *p;
3631
3632 lock_cpu_hotplug();
3633 read_lock(&tasklist_lock);
3634
3635 retval = -ESRCH;
3636 p = find_process_by_pid(pid);
3637 if (!p)
3638 goto out_unlock;
3639
3640 retval = 0;
3641 cpus_and(*mask, p->cpus_allowed, cpu_possible_map);
3642
3643 out_unlock:
3644 read_unlock(&tasklist_lock);
3645 unlock_cpu_hotplug();
3646 if (retval)
3647 return retval;
3648
3649 return 0;
3650 }
3651
3652 /**
3653 * sys_sched_getaffinity - get the cpu affinity of a process
3654 * @pid: pid of the process
3655 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3656 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3657 */
3658 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
3659 unsigned long __user *user_mask_ptr)
3660 {
3661 int ret;
3662 cpumask_t mask;
3663
3664 if (len < sizeof(cpumask_t))
3665 return -EINVAL;
3666
3667 ret = sched_getaffinity(pid, &mask);
3668 if (ret < 0)
3669 return ret;
3670
3671 if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
3672 return -EFAULT;
3673
3674 return sizeof(cpumask_t);
3675 }
3676
3677 /**
3678 * sys_sched_yield - yield the current processor to other threads.
3679 *
3680 * this function yields the current CPU by moving the calling thread
3681 * to the expired array. If there are no other threads running on this
3682 * CPU then this function will return.
3683 */
3684 asmlinkage long sys_sched_yield(void)
3685 {
3686 runqueue_t *rq = this_rq_lock();
3687 prio_array_t *array = current->array;
3688 prio_array_t *target = rq->expired;
3689
3690 schedstat_inc(rq, yld_cnt);
3691 /*
3692 * We implement yielding by moving the task into the expired
3693 * queue.
3694 *
3695 * (special rule: RT tasks will just roundrobin in the active
3696 * array.)
3697 */
3698 if (rt_task(current))
3699 target = rq->active;
3700
3701 if (current->array->nr_active == 1) {
3702 schedstat_inc(rq, yld_act_empty);
3703 if (!rq->expired->nr_active)
3704 schedstat_inc(rq, yld_both_empty);
3705 } else if (!rq->expired->nr_active)
3706 schedstat_inc(rq, yld_exp_empty);
3707
3708 if (array != target) {
3709 dequeue_task(current, array);
3710 enqueue_task(current, target);
3711 } else
3712 /*
3713 * requeue_task is cheaper so perform that if possible.
3714 */
3715 requeue_task(current, array);
3716
3717 /*
3718 * Since we are going to call schedule() anyway, there's
3719 * no need to preempt or enable interrupts:
3720 */
3721 __release(rq->lock);
3722 _raw_spin_unlock(&rq->lock);
3723 preempt_enable_no_resched();
3724
3725 schedule();
3726
3727 return 0;
3728 }
3729
3730 static inline void __cond_resched(void)
3731 {
3732 do {
3733 add_preempt_count(PREEMPT_ACTIVE);
3734 schedule();
3735 sub_preempt_count(PREEMPT_ACTIVE);
3736 } while (need_resched());
3737 }
3738
3739 int __sched cond_resched(void)
3740 {
3741 if (need_resched()) {
3742 __cond_resched();
3743 return 1;
3744 }
3745 return 0;
3746 }
3747
3748 EXPORT_SYMBOL(cond_resched);
3749
3750 /*
3751 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
3752 * call schedule, and on return reacquire the lock.
3753 *
3754 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
3755 * operations here to prevent schedule() from being called twice (once via
3756 * spin_unlock(), once by hand).
3757 */
3758 int cond_resched_lock(spinlock_t * lock)
3759 {
3760 int ret = 0;
3761
3762 if (need_lockbreak(lock)) {
3763 spin_unlock(lock);
3764 cpu_relax();
3765 ret = 1;
3766 spin_lock(lock);
3767 }
3768 if (need_resched()) {
3769 _raw_spin_unlock(lock);
3770 preempt_enable_no_resched();
3771 __cond_resched();
3772 ret = 1;
3773 spin_lock(lock);
3774 }
3775 return ret;
3776 }
3777
3778 EXPORT_SYMBOL(cond_resched_lock);
3779
3780 int __sched cond_resched_softirq(void)
3781 {
3782 BUG_ON(!in_softirq());
3783
3784 if (need_resched()) {
3785 __local_bh_enable();
3786 __cond_resched();
3787 local_bh_disable();
3788 return 1;
3789 }
3790 return 0;
3791 }
3792
3793 EXPORT_SYMBOL(cond_resched_softirq);
3794
3795
3796 /**
3797 * yield - yield the current processor to other threads.
3798 *
3799 * this is a shortcut for kernel-space yielding - it marks the
3800 * thread runnable and calls sys_sched_yield().
3801 */
3802 void __sched yield(void)
3803 {
3804 set_current_state(TASK_RUNNING);
3805 sys_sched_yield();
3806 }
3807
3808 EXPORT_SYMBOL(yield);
3809
3810 /*
3811 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
3812 * that process accounting knows that this is a task in IO wait state.
3813 *
3814 * But don't do that if it is a deliberate, throttling IO wait (this task
3815 * has set its backing_dev_info: the queue against which it should throttle)
3816 */
3817 void __sched io_schedule(void)
3818 {
3819 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3820
3821 atomic_inc(&rq->nr_iowait);
3822 schedule();
3823 atomic_dec(&rq->nr_iowait);
3824 }
3825
3826 EXPORT_SYMBOL(io_schedule);
3827
3828 long __sched io_schedule_timeout(long timeout)
3829 {
3830 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3831 long ret;
3832
3833 atomic_inc(&rq->nr_iowait);
3834 ret = schedule_timeout(timeout);
3835 atomic_dec(&rq->nr_iowait);
3836 return ret;
3837 }
3838
3839 /**
3840 * sys_sched_get_priority_max - return maximum RT priority.
3841 * @policy: scheduling class.
3842 *
3843 * this syscall returns the maximum rt_priority that can be used
3844 * by a given scheduling class.
3845 */
3846 asmlinkage long sys_sched_get_priority_max(int policy)
3847 {
3848 int ret = -EINVAL;
3849
3850 switch (policy) {
3851 case SCHED_FIFO:
3852 case SCHED_RR:
3853 ret = MAX_USER_RT_PRIO-1;
3854 break;
3855 case SCHED_NORMAL:
3856 ret = 0;
3857 break;
3858 }
3859 return ret;
3860 }
3861
3862 /**
3863 * sys_sched_get_priority_min - return minimum RT priority.
3864 * @policy: scheduling class.
3865 *
3866 * this syscall returns the minimum rt_priority that can be used
3867 * by a given scheduling class.
3868 */
3869 asmlinkage long sys_sched_get_priority_min(int policy)
3870 {
3871 int ret = -EINVAL;
3872
3873 switch (policy) {
3874 case SCHED_FIFO:
3875 case SCHED_RR:
3876 ret = 1;
3877 break;
3878 case SCHED_NORMAL:
3879 ret = 0;
3880 }
3881 return ret;
3882 }
3883
3884 /**
3885 * sys_sched_rr_get_interval - return the default timeslice of a process.
3886 * @pid: pid of the process.
3887 * @interval: userspace pointer to the timeslice value.
3888 *
3889 * this syscall writes the default timeslice value of a given process
3890 * into the user-space timespec buffer. A value of '0' means infinity.
3891 */
3892 asmlinkage
3893 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
3894 {
3895 int retval = -EINVAL;
3896 struct timespec t;
3897 task_t *p;
3898
3899 if (pid < 0)
3900 goto out_nounlock;
3901
3902 retval = -ESRCH;
3903 read_lock(&tasklist_lock);
3904 p = find_process_by_pid(pid);
3905 if (!p)
3906 goto out_unlock;
3907
3908 retval = security_task_getscheduler(p);
3909 if (retval)
3910 goto out_unlock;
3911
3912 jiffies_to_timespec(p->policy & SCHED_FIFO ?
3913 0 : task_timeslice(p), &t);
3914 read_unlock(&tasklist_lock);
3915 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3916 out_nounlock:
3917 return retval;
3918 out_unlock:
3919 read_unlock(&tasklist_lock);
3920 return retval;
3921 }
3922
3923 static inline struct task_struct *eldest_child(struct task_struct *p)
3924 {
3925 if (list_empty(&p->children)) return NULL;
3926 return list_entry(p->children.next,struct task_struct,sibling);
3927 }
3928
3929 static inline struct task_struct *older_sibling(struct task_struct *p)
3930 {
3931 if (p->sibling.prev==&p->parent->children) return NULL;
3932 return list_entry(p->sibling.prev,struct task_struct,sibling);
3933 }
3934
3935 static inline struct task_struct *younger_sibling(struct task_struct *p)
3936 {
3937 if (p->sibling.next==&p->parent->children) return NULL;
3938 return list_entry(p->sibling.next,struct task_struct,sibling);
3939 }
3940
3941 static void show_task(task_t * p)
3942 {
3943 task_t *relative;
3944 unsigned state;
3945 unsigned long free = 0;
3946 static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
3947
3948 printk("%-13.13s ", p->comm);
3949 state = p->state ? __ffs(p->state) + 1 : 0;
3950 if (state < ARRAY_SIZE(stat_nam))
3951 printk(stat_nam[state]);
3952 else
3953 printk("?");
3954 #if (BITS_PER_LONG == 32)
3955 if (state == TASK_RUNNING)
3956 printk(" running ");
3957 else
3958 printk(" %08lX ", thread_saved_pc(p));
3959 #else
3960 if (state == TASK_RUNNING)
3961 printk(" running task ");
3962 else
3963 printk(" %016lx ", thread_saved_pc(p));
3964 #endif
3965 #ifdef CONFIG_DEBUG_STACK_USAGE
3966 {
3967 unsigned long * n = (unsigned long *) (p->thread_info+1);
3968 while (!*n)
3969 n++;
3970 free = (unsigned long) n - (unsigned long)(p->thread_info+1);
3971 }
3972 #endif
3973 printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
3974 if ((relative = eldest_child(p)))
3975 printk("%5d ", relative->pid);
3976 else
3977 printk(" ");
3978 if ((relative = younger_sibling(p)))
3979 printk("%7d", relative->pid);
3980 else
3981 printk(" ");
3982 if ((relative = older_sibling(p)))
3983 printk(" %5d", relative->pid);
3984 else
3985 printk(" ");
3986 if (!p->mm)
3987 printk(" (L-TLB)\n");
3988 else
3989 printk(" (NOTLB)\n");
3990
3991 if (state != TASK_RUNNING)
3992 show_stack(p, NULL);
3993 }
3994
3995 void show_state(void)
3996 {
3997 task_t *g, *p;
3998
3999 #if (BITS_PER_LONG == 32)
4000 printk("\n"
4001 " sibling\n");
4002 printk(" task PC pid father child younger older\n");
4003 #else
4004 printk("\n"
4005 " sibling\n");
4006 printk(" task PC pid father child younger older\n");
4007 #endif
4008 read_lock(&tasklist_lock);
4009 do_each_thread(g, p) {
4010 /*
4011 * reset the NMI-timeout, listing all files on a slow
4012 * console might take alot of time:
4013 */
4014 touch_nmi_watchdog();
4015 show_task(p);
4016 } while_each_thread(g, p);
4017
4018 read_unlock(&tasklist_lock);
4019 }
4020
4021 void __devinit init_idle(task_t *idle, int cpu)
4022 {
4023 runqueue_t *rq = cpu_rq(cpu);
4024 unsigned long flags;
4025
4026 idle->sleep_avg = 0;
4027 idle->array = NULL;
4028 idle->prio = MAX_PRIO;
4029 idle->state = TASK_RUNNING;
4030 idle->cpus_allowed = cpumask_of_cpu(cpu);
4031 set_task_cpu(idle, cpu);
4032
4033 spin_lock_irqsave(&rq->lock, flags);
4034 rq->curr = rq->idle = idle;
4035 set_tsk_need_resched(idle);
4036 spin_unlock_irqrestore(&rq->lock, flags);
4037
4038 /* Set the preempt count _outside_ the spinlocks! */
4039 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
4040 idle->thread_info->preempt_count = (idle->lock_depth >= 0);
4041 #else
4042 idle->thread_info->preempt_count = 0;
4043 #endif
4044 }
4045
4046 /*
4047 * In a system that switches off the HZ timer nohz_cpu_mask
4048 * indicates which cpus entered this state. This is used
4049 * in the rcu update to wait only for active cpus. For system
4050 * which do not switch off the HZ timer nohz_cpu_mask should
4051 * always be CPU_MASK_NONE.
4052 */
4053 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4054
4055 #ifdef CONFIG_SMP
4056 /*
4057 * This is how migration works:
4058 *
4059 * 1) we queue a migration_req_t structure in the source CPU's
4060 * runqueue and wake up that CPU's migration thread.
4061 * 2) we down() the locked semaphore => thread blocks.
4062 * 3) migration thread wakes up (implicitly it forces the migrated
4063 * thread off the CPU)
4064 * 4) it gets the migration request and checks whether the migrated
4065 * task is still in the wrong runqueue.
4066 * 5) if it's in the wrong runqueue then the migration thread removes
4067 * it and puts it into the right queue.
4068 * 6) migration thread up()s the semaphore.
4069 * 7) we wake up and the migration is done.
4070 */
4071
4072 /*
4073 * Change a given task's CPU affinity. Migrate the thread to a
4074 * proper CPU and schedule it away if the CPU it's executing on
4075 * is removed from the allowed bitmask.
4076 *
4077 * NOTE: the caller must have a valid reference to the task, the
4078 * task must not exit() & deallocate itself prematurely. The
4079 * call is not atomic; no spinlocks may be held.
4080 */
4081 int set_cpus_allowed(task_t *p, cpumask_t new_mask)
4082 {
4083 unsigned long flags;
4084 int ret = 0;
4085 migration_req_t req;
4086 runqueue_t *rq;
4087
4088 rq = task_rq_lock(p, &flags);
4089 if (!cpus_intersects(new_mask, cpu_online_map)) {
4090 ret = -EINVAL;
4091 goto out;
4092 }
4093
4094 p->cpus_allowed = new_mask;
4095 /* Can the task run on the task's current CPU? If so, we're done */
4096 if (cpu_isset(task_cpu(p), new_mask))
4097 goto out;
4098
4099 if (migrate_task(p, any_online_cpu(new_mask), &req)) {
4100 /* Need help from migration thread: drop lock and wait. */
4101 task_rq_unlock(rq, &flags);
4102 wake_up_process(rq->migration_thread);
4103 wait_for_completion(&req.done);
4104 tlb_migrate_finish(p->mm);
4105 return 0;
4106 }
4107 out:
4108 task_rq_unlock(rq, &flags);
4109 return ret;
4110 }
4111
4112 EXPORT_SYMBOL_GPL(set_cpus_allowed);
4113
4114 /*
4115 * Move (not current) task off this cpu, onto dest cpu. We're doing
4116 * this because either it can't run here any more (set_cpus_allowed()
4117 * away from this CPU, or CPU going down), or because we're
4118 * attempting to rebalance this task on exec (sched_exec).
4119 *
4120 * So we race with normal scheduler movements, but that's OK, as long
4121 * as the task is no longer on this CPU.
4122 */
4123 static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4124 {
4125 runqueue_t *rq_dest, *rq_src;
4126
4127 if (unlikely(cpu_is_offline(dest_cpu)))
4128 return;
4129
4130 rq_src = cpu_rq(src_cpu);
4131 rq_dest = cpu_rq(dest_cpu);
4132
4133 double_rq_lock(rq_src, rq_dest);
4134 /* Already moved. */
4135 if (task_cpu(p) != src_cpu)
4136 goto out;
4137 /* Affinity changed (again). */
4138 if (!cpu_isset(dest_cpu, p->cpus_allowed))
4139 goto out;
4140
4141 set_task_cpu(p, dest_cpu);
4142 if (p->array) {
4143 /*
4144 * Sync timestamp with rq_dest's before activating.
4145 * The same thing could be achieved by doing this step
4146 * afterwards, and pretending it was a local activate.
4147 * This way is cleaner and logically correct.
4148 */
4149 p->timestamp = p->timestamp - rq_src->timestamp_last_tick
4150 + rq_dest->timestamp_last_tick;
4151 deactivate_task(p, rq_src);
4152 activate_task(p, rq_dest, 0);
4153 if (TASK_PREEMPTS_CURR(p, rq_dest))
4154 resched_task(rq_dest->curr);
4155 }
4156
4157 out:
4158 double_rq_unlock(rq_src, rq_dest);
4159 }
4160
4161 /*
4162 * migration_thread - this is a highprio system thread that performs
4163 * thread migration by bumping thread off CPU then 'pushing' onto
4164 * another runqueue.
4165 */
4166 static int migration_thread(void * data)
4167 {
4168 runqueue_t *rq;
4169 int cpu = (long)data;
4170
4171 rq = cpu_rq(cpu);
4172 BUG_ON(rq->migration_thread != current);
4173
4174 set_current_state(TASK_INTERRUPTIBLE);
4175 while (!kthread_should_stop()) {
4176 struct list_head *head;
4177 migration_req_t *req;
4178
4179 if (current->flags & PF_FREEZE)
4180 refrigerator(PF_FREEZE);
4181
4182 spin_lock_irq(&rq->lock);
4183
4184 if (cpu_is_offline(cpu)) {
4185 spin_unlock_irq(&rq->lock);
4186 goto wait_to_die;
4187 }
4188
4189 if (rq->active_balance) {
4190 active_load_balance(rq, cpu);
4191 rq->active_balance = 0;
4192 }
4193
4194 head = &rq->migration_queue;
4195
4196 if (list_empty(head)) {
4197 spin_unlock_irq(&rq->lock);
4198 schedule();
4199 set_current_state(TASK_INTERRUPTIBLE);
4200 continue;
4201 }
4202 req = list_entry(head->next, migration_req_t, list);
4203 list_del_init(head->next);
4204
4205 if (req->type == REQ_MOVE_TASK) {
4206 spin_unlock(&rq->lock);
4207 __migrate_task(req->task, cpu, req->dest_cpu);
4208 local_irq_enable();
4209 } else if (req->type == REQ_SET_DOMAIN) {
4210 rq->sd = req->sd;
4211 spin_unlock_irq(&rq->lock);
4212 } else {
4213 spin_unlock_irq(&rq->lock);
4214 WARN_ON(1);
4215 }
4216
4217 complete(&req->done);
4218 }
4219 __set_current_state(TASK_RUNNING);
4220 return 0;
4221
4222 wait_to_die:
4223 /* Wait for kthread_stop */
4224 set_current_state(TASK_INTERRUPTIBLE);
4225 while (!kthread_should_stop()) {
4226 schedule();
4227 set_current_state(TASK_INTERRUPTIBLE);
4228 }
4229 __set_current_state(TASK_RUNNING);
4230 return 0;
4231 }
4232
4233 #ifdef CONFIG_HOTPLUG_CPU
4234 /* Figure out where task on dead CPU should go, use force if neccessary. */
4235 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4236 {
4237 int dest_cpu;
4238 cpumask_t mask;
4239
4240 /* On same node? */
4241 mask = node_to_cpumask(cpu_to_node(dead_cpu));
4242 cpus_and(mask, mask, tsk->cpus_allowed);
4243 dest_cpu = any_online_cpu(mask);
4244
4245 /* On any allowed CPU? */
4246 if (dest_cpu == NR_CPUS)
4247 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4248
4249 /* No more Mr. Nice Guy. */
4250 if (dest_cpu == NR_CPUS) {
4251 cpus_setall(tsk->cpus_allowed);
4252 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4253
4254 /*
4255 * Don't tell them about moving exiting tasks or
4256 * kernel threads (both mm NULL), since they never
4257 * leave kernel.
4258 */
4259 if (tsk->mm && printk_ratelimit())
4260 printk(KERN_INFO "process %d (%s) no "
4261 "longer affine to cpu%d\n",
4262 tsk->pid, tsk->comm, dead_cpu);
4263 }
4264 __migrate_task(tsk, dead_cpu, dest_cpu);
4265 }
4266
4267 /*
4268 * While a dead CPU has no uninterruptible tasks queued at this point,
4269 * it might still have a nonzero ->nr_uninterruptible counter, because
4270 * for performance reasons the counter is not stricly tracking tasks to
4271 * their home CPUs. So we just add the counter to another CPU's counter,
4272 * to keep the global sum constant after CPU-down:
4273 */
4274 static void migrate_nr_uninterruptible(runqueue_t *rq_src)
4275 {
4276 runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
4277 unsigned long flags;
4278
4279 local_irq_save(flags);
4280 double_rq_lock(rq_src, rq_dest);
4281 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4282 rq_src->nr_uninterruptible = 0;
4283 double_rq_unlock(rq_src, rq_dest);
4284 local_irq_restore(flags);
4285 }
4286
4287 /* Run through task list and migrate tasks from the dead cpu. */
4288 static void migrate_live_tasks(int src_cpu)
4289 {
4290 struct task_struct *tsk, *t;
4291
4292 write_lock_irq(&tasklist_lock);
4293
4294 do_each_thread(t, tsk) {
4295 if (tsk == current)
4296 continue;
4297
4298 if (task_cpu(tsk) == src_cpu)
4299 move_task_off_dead_cpu(src_cpu, tsk);
4300 } while_each_thread(t, tsk);
4301
4302 write_unlock_irq(&tasklist_lock);
4303 }
4304
4305 /* Schedules idle task to be the next runnable task on current CPU.
4306 * It does so by boosting its priority to highest possible and adding it to
4307 * the _front_ of runqueue. Used by CPU offline code.
4308 */
4309 void sched_idle_next(void)
4310 {
4311 int cpu = smp_processor_id();
4312 runqueue_t *rq = this_rq();
4313 struct task_struct *p = rq->idle;
4314 unsigned long flags;
4315
4316 /* cpu has to be offline */
4317 BUG_ON(cpu_online(cpu));
4318
4319 /* Strictly not necessary since rest of the CPUs are stopped by now
4320 * and interrupts disabled on current cpu.
4321 */
4322 spin_lock_irqsave(&rq->lock, flags);
4323
4324 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
4325 /* Add idle task to _front_ of it's priority queue */
4326 __activate_idle_task(p, rq);
4327
4328 spin_unlock_irqrestore(&rq->lock, flags);
4329 }
4330
4331 /* Ensures that the idle task is using init_mm right before its cpu goes
4332 * offline.
4333 */
4334 void idle_task_exit(void)
4335 {
4336 struct mm_struct *mm = current->active_mm;
4337
4338 BUG_ON(cpu_online(smp_processor_id()));
4339
4340 if (mm != &init_mm)
4341 switch_mm(mm, &init_mm, current);
4342 mmdrop(mm);
4343 }
4344
4345 static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
4346 {
4347 struct runqueue *rq = cpu_rq(dead_cpu);
4348
4349 /* Must be exiting, otherwise would be on tasklist. */
4350 BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD);
4351
4352 /* Cannot have done final schedule yet: would have vanished. */
4353 BUG_ON(tsk->flags & PF_DEAD);
4354
4355 get_task_struct(tsk);
4356
4357 /*
4358 * Drop lock around migration; if someone else moves it,
4359 * that's OK. No task can be added to this CPU, so iteration is
4360 * fine.
4361 */
4362 spin_unlock_irq(&rq->lock);
4363 move_task_off_dead_cpu(dead_cpu, tsk);
4364 spin_lock_irq(&rq->lock);
4365
4366 put_task_struct(tsk);
4367 }
4368
4369 /* release_task() removes task from tasklist, so we won't find dead tasks. */
4370 static void migrate_dead_tasks(unsigned int dead_cpu)
4371 {
4372 unsigned arr, i;
4373 struct runqueue *rq = cpu_rq(dead_cpu);
4374
4375 for (arr = 0; arr < 2; arr++) {
4376 for (i = 0; i < MAX_PRIO; i++) {
4377 struct list_head *list = &rq->arrays[arr].queue[i];
4378 while (!list_empty(list))
4379 migrate_dead(dead_cpu,
4380 list_entry(list->next, task_t,
4381 run_list));
4382 }
4383 }
4384 }
4385 #endif /* CONFIG_HOTPLUG_CPU */
4386
4387 /*
4388 * migration_call - callback that gets triggered when a CPU is added.
4389 * Here we can start up the necessary migration thread for the new CPU.
4390 */
4391 static int migration_call(struct notifier_block *nfb, unsigned long action,
4392 void *hcpu)
4393 {
4394 int cpu = (long)hcpu;
4395 struct task_struct *p;
4396 struct runqueue *rq;
4397 unsigned long flags;
4398
4399 switch (action) {
4400 case CPU_UP_PREPARE:
4401 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
4402 if (IS_ERR(p))
4403 return NOTIFY_BAD;
4404 p->flags |= PF_NOFREEZE;
4405 kthread_bind(p, cpu);
4406 /* Must be high prio: stop_machine expects to yield to it. */
4407 rq = task_rq_lock(p, &flags);
4408 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
4409 task_rq_unlock(rq, &flags);
4410 cpu_rq(cpu)->migration_thread = p;
4411 break;
4412 case CPU_ONLINE:
4413 /* Strictly unneccessary, as first user will wake it. */
4414 wake_up_process(cpu_rq(cpu)->migration_thread);
4415 break;
4416 #ifdef CONFIG_HOTPLUG_CPU
4417 case CPU_UP_CANCELED:
4418 /* Unbind it from offline cpu so it can run. Fall thru. */
4419 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id());
4420 kthread_stop(cpu_rq(cpu)->migration_thread);
4421 cpu_rq(cpu)->migration_thread = NULL;
4422 break;
4423 case CPU_DEAD:
4424 migrate_live_tasks(cpu);
4425 rq = cpu_rq(cpu);
4426 kthread_stop(rq->migration_thread);
4427 rq->migration_thread = NULL;
4428 /* Idle task back to normal (off runqueue, low prio) */
4429 rq = task_rq_lock(rq->idle, &flags);
4430 deactivate_task(rq->idle, rq);
4431 rq->idle->static_prio = MAX_PRIO;
4432 __setscheduler(rq->idle, SCHED_NORMAL, 0);
4433 migrate_dead_tasks(cpu);
4434 task_rq_unlock(rq, &flags);
4435 migrate_nr_uninterruptible(rq);
4436 BUG_ON(rq->nr_running != 0);
4437
4438 /* No need to migrate the tasks: it was best-effort if
4439 * they didn't do lock_cpu_hotplug(). Just wake up
4440 * the requestors. */
4441 spin_lock_irq(&rq->lock);
4442 while (!list_empty(&rq->migration_queue)) {
4443 migration_req_t *req;
4444 req = list_entry(rq->migration_queue.next,
4445 migration_req_t, list);
4446 BUG_ON(req->type != REQ_MOVE_TASK);
4447 list_del_init(&req->list);
4448 complete(&req->done);
4449 }
4450 spin_unlock_irq(&rq->lock);
4451 break;
4452 #endif
4453 }
4454 return NOTIFY_OK;
4455 }
4456
4457 /* Register at highest priority so that task migration (migrate_all_tasks)
4458 * happens before everything else.
4459 */
4460 static struct notifier_block __devinitdata migration_notifier = {
4461 .notifier_call = migration_call,
4462 .priority = 10
4463 };
4464
4465 int __init migration_init(void)
4466 {
4467 void *cpu = (void *)(long)smp_processor_id();
4468 /* Start one for boot CPU. */
4469 migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4470 migration_call(&migration_notifier, CPU_ONLINE, cpu);
4471 register_cpu_notifier(&migration_notifier);
4472 return 0;
4473 }
4474 #endif
4475
4476 #ifdef CONFIG_SMP
4477 #define SCHED_DOMAIN_DEBUG
4478 #ifdef SCHED_DOMAIN_DEBUG
4479 static void sched_domain_debug(struct sched_domain *sd, int cpu)
4480 {
4481 int level = 0;
4482
4483 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4484
4485 do {
4486 int i;
4487 char str[NR_CPUS];
4488 struct sched_group *group = sd->groups;
4489 cpumask_t groupmask;
4490
4491 cpumask_scnprintf(str, NR_CPUS, sd->span);
4492 cpus_clear(groupmask);
4493
4494 printk(KERN_DEBUG);
4495 for (i = 0; i < level + 1; i++)
4496 printk(" ");
4497 printk("domain %d: ", level);
4498
4499 if (!(sd->flags & SD_LOAD_BALANCE)) {
4500 printk("does not load-balance\n");
4501 if (sd->parent)
4502 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
4503 break;
4504 }
4505
4506 printk("span %s\n", str);
4507
4508 if (!cpu_isset(cpu, sd->span))
4509 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
4510 if (!cpu_isset(cpu, group->cpumask))
4511 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
4512
4513 printk(KERN_DEBUG);
4514 for (i = 0; i < level + 2; i++)
4515 printk(" ");
4516 printk("groups:");
4517 do {
4518 if (!group) {
4519 printk("\n");
4520 printk(KERN_ERR "ERROR: group is NULL\n");
4521 break;
4522 }
4523
4524 if (!group->cpu_power) {
4525 printk("\n");
4526 printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
4527 }
4528
4529 if (!cpus_weight(group->cpumask)) {
4530 printk("\n");
4531 printk(KERN_ERR "ERROR: empty group\n");
4532 }
4533
4534 if (cpus_intersects(groupmask, group->cpumask)) {
4535 printk("\n");
4536 printk(KERN_ERR "ERROR: repeated CPUs\n");
4537 }
4538
4539 cpus_or(groupmask, groupmask, group->cpumask);
4540
4541 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
4542 printk(" %s", str);
4543
4544 group = group->next;
4545 } while (group != sd->groups);
4546 printk("\n");
4547
4548 if (!cpus_equal(sd->span, groupmask))
4549 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
4550
4551 level++;
4552 sd = sd->parent;
4553
4554 if (sd) {
4555 if (!cpus_subset(groupmask, sd->span))
4556 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
4557 }
4558
4559 } while (sd);
4560 }
4561 #else
4562 #define sched_domain_debug(sd, cpu) {}
4563 #endif
4564
4565 /*
4566 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
4567 * hold the hotplug lock.
4568 */
4569 void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
4570 {
4571 migration_req_t req;
4572 unsigned long flags;
4573 runqueue_t *rq = cpu_rq(cpu);
4574 int local = 1;
4575
4576 sched_domain_debug(sd, cpu);
4577
4578 spin_lock_irqsave(&rq->lock, flags);
4579
4580 if (cpu == smp_processor_id() || !cpu_online(cpu)) {
4581 rq->sd = sd;
4582 } else {
4583 init_completion(&req.done);
4584 req.type = REQ_SET_DOMAIN;
4585 req.sd = sd;
4586 list_add(&req.list, &rq->migration_queue);
4587 local = 0;
4588 }
4589
4590 spin_unlock_irqrestore(&rq->lock, flags);
4591
4592 if (!local) {
4593 wake_up_process(rq->migration_thread);
4594 wait_for_completion(&req.done);
4595 }
4596 }
4597
4598 /* cpus with isolated domains */
4599 cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
4600
4601 /* Setup the mask of cpus configured for isolated domains */
4602 static int __init isolated_cpu_setup(char *str)
4603 {
4604 int ints[NR_CPUS], i;
4605
4606 str = get_options(str, ARRAY_SIZE(ints), ints);
4607 cpus_clear(cpu_isolated_map);
4608 for (i = 1; i <= ints[0]; i++)
4609 if (ints[i] < NR_CPUS)
4610 cpu_set(ints[i], cpu_isolated_map);
4611 return 1;
4612 }
4613
4614 __setup ("isolcpus=", isolated_cpu_setup);
4615
4616 /*
4617 * init_sched_build_groups takes an array of groups, the cpumask we wish
4618 * to span, and a pointer to a function which identifies what group a CPU
4619 * belongs to. The return value of group_fn must be a valid index into the
4620 * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we
4621 * keep track of groups covered with a cpumask_t).
4622 *
4623 * init_sched_build_groups will build a circular linked list of the groups
4624 * covered by the given span, and will set each group's ->cpumask correctly,
4625 * and ->cpu_power to 0.
4626 */
4627 void __devinit init_sched_build_groups(struct sched_group groups[],
4628 cpumask_t span, int (*group_fn)(int cpu))
4629 {
4630 struct sched_group *first = NULL, *last = NULL;
4631 cpumask_t covered = CPU_MASK_NONE;
4632 int i;
4633
4634 for_each_cpu_mask(i, span) {
4635 int group = group_fn(i);
4636 struct sched_group *sg = &groups[group];
4637 int j;
4638
4639 if (cpu_isset(i, covered))
4640 continue;
4641
4642 sg->cpumask = CPU_MASK_NONE;
4643 sg->cpu_power = 0;
4644
4645 for_each_cpu_mask(j, span) {
4646 if (group_fn(j) != group)
4647 continue;
4648
4649 cpu_set(j, covered);
4650 cpu_set(j, sg->cpumask);
4651 }
4652 if (!first)
4653 first = sg;
4654 if (last)
4655 last->next = sg;
4656 last = sg;
4657 }
4658 last->next = first;
4659 }
4660
4661
4662 #ifdef ARCH_HAS_SCHED_DOMAIN
4663 extern void __devinit arch_init_sched_domains(void);
4664 extern void __devinit arch_destroy_sched_domains(void);
4665 #else
4666 #ifdef CONFIG_SCHED_SMT
4667 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
4668 static struct sched_group sched_group_cpus[NR_CPUS];
4669 static int __devinit cpu_to_cpu_group(int cpu)
4670 {
4671 return cpu;
4672 }
4673 #endif
4674
4675 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
4676 static struct sched_group sched_group_phys[NR_CPUS];
4677 static int __devinit cpu_to_phys_group(int cpu)
4678 {
4679 #ifdef CONFIG_SCHED_SMT
4680 return first_cpu(cpu_sibling_map[cpu]);
4681 #else
4682 return cpu;
4683 #endif
4684 }
4685
4686 #ifdef CONFIG_NUMA
4687
4688 static DEFINE_PER_CPU(struct sched_domain, node_domains);
4689 static struct sched_group sched_group_nodes[MAX_NUMNODES];
4690 static int __devinit cpu_to_node_group(int cpu)
4691 {
4692 return cpu_to_node(cpu);
4693 }
4694 #endif
4695
4696 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4697 /*
4698 * The domains setup code relies on siblings not spanning
4699 * multiple nodes. Make sure the architecture has a proper
4700 * siblings map:
4701 */
4702 static void check_sibling_maps(void)
4703 {
4704 int i, j;
4705
4706 for_each_online_cpu(i) {
4707 for_each_cpu_mask(j, cpu_sibling_map[i]) {
4708 if (cpu_to_node(i) != cpu_to_node(j)) {
4709 printk(KERN_INFO "warning: CPU %d siblings map "
4710 "to different node - isolating "
4711 "them.\n", i);
4712 cpu_sibling_map[i] = cpumask_of_cpu(i);
4713 break;
4714 }
4715 }
4716 }
4717 }
4718 #endif
4719
4720 /*
4721 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
4722 */
4723 static void __devinit arch_init_sched_domains(void)
4724 {
4725 int i;
4726 cpumask_t cpu_default_map;
4727
4728 #if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4729 check_sibling_maps();
4730 #endif
4731 /*
4732 * Setup mask for cpus without special case scheduling requirements.
4733 * For now this just excludes isolated cpus, but could be used to
4734 * exclude other special cases in the future.
4735 */
4736 cpus_complement(cpu_default_map, cpu_isolated_map);
4737 cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
4738
4739 /*
4740 * Set up domains. Isolated domains just stay on the dummy domain.
4741 */
4742 for_each_cpu_mask(i, cpu_default_map) {
4743 int group;
4744 struct sched_domain *sd = NULL, *p;
4745 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
4746
4747 cpus_and(nodemask, nodemask, cpu_default_map);
4748
4749 #ifdef CONFIG_NUMA
4750 sd = &per_cpu(node_domains, i);
4751 group = cpu_to_node_group(i);
4752 *sd = SD_NODE_INIT;
4753 sd->span = cpu_default_map;
4754 sd->groups = &sched_group_nodes[group];
4755 #endif
4756
4757 p = sd;
4758 sd = &per_cpu(phys_domains, i);
4759 group = cpu_to_phys_group(i);
4760 *sd = SD_CPU_INIT;
4761 sd->span = nodemask;
4762 sd->parent = p;
4763 sd->groups = &sched_group_phys[group];
4764
4765 #ifdef CONFIG_SCHED_SMT
4766 p = sd;
4767 sd = &per_cpu(cpu_domains, i);
4768 group = cpu_to_cpu_group(i);
4769 *sd = SD_SIBLING_INIT;
4770 sd->span = cpu_sibling_map[i];
4771 cpus_and(sd->span, sd->span, cpu_default_map);
4772 sd->parent = p;
4773 sd->groups = &sched_group_cpus[group];
4774 #endif
4775 }
4776
4777 #ifdef CONFIG_SCHED_SMT
4778 /* Set up CPU (sibling) groups */
4779 for_each_online_cpu(i) {
4780 cpumask_t this_sibling_map = cpu_sibling_map[i];
4781 cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
4782 if (i != first_cpu(this_sibling_map))
4783 continue;
4784
4785 init_sched_build_groups(sched_group_cpus, this_sibling_map,
4786 &cpu_to_cpu_group);
4787 }
4788 #endif
4789
4790 /* Set up physical groups */
4791 for (i = 0; i < MAX_NUMNODES; i++) {
4792 cpumask_t nodemask = node_to_cpumask(i);
4793
4794 cpus_and(nodemask, nodemask, cpu_default_map);
4795 if (cpus_empty(nodemask))
4796 continue;
4797
4798 init_sched_build_groups(sched_group_phys, nodemask,
4799 &cpu_to_phys_group);
4800 }
4801
4802 #ifdef CONFIG_NUMA
4803 /* Set up node groups */
4804 init_sched_build_groups(sched_group_nodes, cpu_default_map,
4805 &cpu_to_node_group);
4806 #endif
4807
4808 /* Calculate CPU power for physical packages and nodes */
4809 for_each_cpu_mask(i, cpu_default_map) {
4810 int power;
4811 struct sched_domain *sd;
4812 #ifdef CONFIG_SCHED_SMT
4813 sd = &per_cpu(cpu_domains, i);
4814 power = SCHED_LOAD_SCALE;
4815 sd->groups->cpu_power = power;
4816 #endif
4817
4818 sd = &per_cpu(phys_domains, i);
4819 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
4820 (cpus_weight(sd->groups->cpumask)-1) / 10;
4821 sd->groups->cpu_power = power;
4822
4823 #ifdef CONFIG_NUMA
4824 if (i == first_cpu(sd->groups->cpumask)) {
4825 /* Only add "power" once for each physical package. */
4826 sd = &per_cpu(node_domains, i);
4827 sd->groups->cpu_power += power;
4828 }
4829 #endif
4830 }
4831
4832 /* Attach the domains */
4833 for_each_online_cpu(i) {
4834 struct sched_domain *sd;
4835 #ifdef CONFIG_SCHED_SMT
4836 sd = &per_cpu(cpu_domains, i);
4837 #else
4838 sd = &per_cpu(phys_domains, i);
4839 #endif
4840 cpu_attach_domain(sd, i);
4841 }
4842 }
4843
4844 #ifdef CONFIG_HOTPLUG_CPU
4845 static void __devinit arch_destroy_sched_domains(void)
4846 {
4847 /* Do nothing: everything is statically allocated. */
4848 }
4849 #endif
4850
4851 #endif /* ARCH_HAS_SCHED_DOMAIN */
4852
4853 /*
4854 * Initial dummy domain for early boot and for hotplug cpu. Being static,
4855 * it is initialized to zero, so all balancing flags are cleared which is
4856 * what we want.
4857 */
4858 static struct sched_domain sched_domain_dummy;
4859
4860 #ifdef CONFIG_HOTPLUG_CPU
4861 /*
4862 * Force a reinitialization of the sched domains hierarchy. The domains
4863 * and groups cannot be updated in place without racing with the balancing
4864 * code, so we temporarily attach all running cpus to a "dummy" domain
4865 * which will prevent rebalancing while the sched domains are recalculated.
4866 */
4867 static int update_sched_domains(struct notifier_block *nfb,
4868 unsigned long action, void *hcpu)
4869 {
4870 int i;
4871
4872 switch (action) {
4873 case CPU_UP_PREPARE:
4874 case CPU_DOWN_PREPARE:
4875 for_each_online_cpu(i)
4876 cpu_attach_domain(&sched_domain_dummy, i);
4877 arch_destroy_sched_domains();
4878 return NOTIFY_OK;
4879
4880 case CPU_UP_CANCELED:
4881 case CPU_DOWN_FAILED:
4882 case CPU_ONLINE:
4883 case CPU_DEAD:
4884 /*
4885 * Fall through and re-initialise the domains.
4886 */
4887 break;
4888 default:
4889 return NOTIFY_DONE;
4890 }
4891
4892 /* The hotplug lock is already held by cpu_up/cpu_down */
4893 arch_init_sched_domains();
4894
4895 return NOTIFY_OK;
4896 }
4897 #endif
4898
4899 void __init sched_init_smp(void)
4900 {
4901 lock_cpu_hotplug();
4902 arch_init_sched_domains();
4903 unlock_cpu_hotplug();
4904 /* XXX: Theoretical race here - CPU may be hotplugged now */
4905 hotcpu_notifier(update_sched_domains, 0);
4906 }
4907 #else
4908 void __init sched_init_smp(void)
4909 {
4910 }
4911 #endif /* CONFIG_SMP */
4912
4913 int in_sched_functions(unsigned long addr)
4914 {
4915 /* Linker adds these: start and end of __sched functions */
4916 extern char __sched_text_start[], __sched_text_end[];
4917 return in_lock_functions(addr) ||
4918 (addr >= (unsigned long)__sched_text_start
4919 && addr < (unsigned long)__sched_text_end);
4920 }
4921
4922 void __init sched_init(void)
4923 {
4924 runqueue_t *rq;
4925 int i, j, k;
4926
4927 for (i = 0; i < NR_CPUS; i++) {
4928 prio_array_t *array;
4929
4930 rq = cpu_rq(i);
4931 spin_lock_init(&rq->lock);
4932 rq->nr_running = 0;
4933 rq->active = rq->arrays;
4934 rq->expired = rq->arrays + 1;
4935 rq->best_expired_prio = MAX_PRIO;
4936
4937 #ifdef CONFIG_SMP
4938 rq->sd = &sched_domain_dummy;
4939 for (j = 1; j < 3; j++)
4940 rq->cpu_load[j] = 0;
4941 rq->active_balance = 0;
4942 rq->push_cpu = 0;
4943 rq->migration_thread = NULL;
4944 INIT_LIST_HEAD(&rq->migration_queue);
4945 #endif
4946 atomic_set(&rq->nr_iowait, 0);
4947
4948 for (j = 0; j < 2; j++) {
4949 array = rq->arrays + j;
4950 for (k = 0; k < MAX_PRIO; k++) {
4951 INIT_LIST_HEAD(array->queue + k);
4952 __clear_bit(k, array->bitmap);
4953 }
4954 // delimiter for bitsearch
4955 __set_bit(MAX_PRIO, array->bitmap);
4956 }
4957 }
4958
4959 /*
4960 * The boot idle thread does lazy MMU switching as well:
4961 */
4962 atomic_inc(&init_mm.mm_count);
4963 enter_lazy_tlb(&init_mm, current);
4964
4965 /*
4966 * Make us the idle thread. Technically, schedule() should not be
4967 * called from this thread, however somewhere below it might be,
4968 * but because we are the idle thread, we just pick up running again
4969 * when this runqueue becomes "idle".
4970 */
4971 init_idle(current, smp_processor_id());
4972 }
4973
4974 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4975 void __might_sleep(char *file, int line)
4976 {
4977 #if defined(in_atomic)
4978 static unsigned long prev_jiffy; /* ratelimiting */
4979
4980 if ((in_atomic() || irqs_disabled()) &&
4981 system_state == SYSTEM_RUNNING && !oops_in_progress) {
4982 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
4983 return;
4984 prev_jiffy = jiffies;
4985 printk(KERN_ERR "Debug: sleeping function called from invalid"
4986 " context at %s:%d\n", file, line);
4987 printk("in_atomic():%d, irqs_disabled():%d\n",
4988 in_atomic(), irqs_disabled());
4989 dump_stack();
4990 }
4991 #endif
4992 }
4993 EXPORT_SYMBOL(__might_sleep);
4994 #endif
4995
4996 #ifdef CONFIG_MAGIC_SYSRQ
4997 void normalize_rt_tasks(void)
4998 {
4999 struct task_struct *p;
5000 prio_array_t *array;
5001 unsigned long flags;
5002 runqueue_t *rq;
5003
5004 read_lock_irq(&tasklist_lock);
5005 for_each_process (p) {
5006 if (!rt_task(p))
5007 continue;
5008
5009 rq = task_rq_lock(p, &flags);
5010
5011 array = p->array;
5012 if (array)
5013 deactivate_task(p, task_rq(p));
5014 __setscheduler(p, SCHED_NORMAL, 0);
5015 if (array) {
5016 __activate_task(p, task_rq(p));
5017 resched_task(rq->curr);
5018 }
5019
5020 task_rq_unlock(rq, &flags);
5021 }
5022 read_unlock_irq(&tasklist_lock);
5023 }
5024
5025 #endif /* CONFIG_MAGIC_SYSRQ */