]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/core.c
sched/accounting: Fix parameter passing in task_group_account_field
[mirror_ubuntu-artful-kernel.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
1da177e4 74
5517d86b 75#include <asm/tlb.h>
838225b4 76#include <asm/irq_regs.h>
e6e6685a
GC
77#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
79#endif
1da177e4 80
029632fb 81#include "sched.h"
391e43da 82#include "../workqueue_sched.h"
6e0534f2 83
a8d154b0 84#define CREATE_TRACE_POINTS
ad8d75ff 85#include <trace/events/sched.h>
a8d154b0 86
029632fb 87void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
d0b27fa7 88{
58088ad0
PT
89 unsigned long delta;
90 ktime_t soft, hard, now;
d0b27fa7 91
58088ad0
PT
92 for (;;) {
93 if (hrtimer_active(period_timer))
94 break;
95
96 now = hrtimer_cb_get_time(period_timer);
97 hrtimer_forward(period_timer, now, period);
d0b27fa7 98
58088ad0
PT
99 soft = hrtimer_get_softexpires(period_timer);
100 hard = hrtimer_get_expires(period_timer);
101 delta = ktime_to_ns(ktime_sub(hard, soft));
102 __hrtimer_start_range_ns(period_timer, soft, delta,
103 HRTIMER_MODE_ABS_PINNED, 0);
104 }
105}
106
029632fb
PZ
107DEFINE_MUTEX(sched_domains_mutex);
108DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 109
fe44d621 110static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 111
029632fb 112void update_rq_clock(struct rq *rq)
3e51f33f 113{
fe44d621 114 s64 delta;
305e6835 115
61eadef6 116 if (rq->skip_clock_update > 0)
f26f9aff 117 return;
aa483808 118
fe44d621
PZ
119 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
120 rq->clock += delta;
121 update_rq_clock_task(rq, delta);
3e51f33f
PZ
122}
123
bf5c91ba
IM
124/*
125 * Debugging: various feature bits
126 */
f00b45c1 127
f00b45c1
PZ
128#define SCHED_FEAT(name, enabled) \
129 (1UL << __SCHED_FEAT_##name) * enabled |
130
bf5c91ba 131const_debug unsigned int sysctl_sched_features =
391e43da 132#include "features.h"
f00b45c1
PZ
133 0;
134
135#undef SCHED_FEAT
136
137#ifdef CONFIG_SCHED_DEBUG
138#define SCHED_FEAT(name, enabled) \
139 #name ,
140
983ed7a6 141static __read_mostly char *sched_feat_names[] = {
391e43da 142#include "features.h"
f00b45c1
PZ
143 NULL
144};
145
146#undef SCHED_FEAT
147
34f3a814 148static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 149{
f00b45c1
PZ
150 int i;
151
152 for (i = 0; sched_feat_names[i]; i++) {
34f3a814
LZ
153 if (!(sysctl_sched_features & (1UL << i)))
154 seq_puts(m, "NO_");
155 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 156 }
34f3a814 157 seq_puts(m, "\n");
f00b45c1 158
34f3a814 159 return 0;
f00b45c1
PZ
160}
161
162static ssize_t
163sched_feat_write(struct file *filp, const char __user *ubuf,
164 size_t cnt, loff_t *ppos)
165{
166 char buf[64];
7740191c 167 char *cmp;
f00b45c1
PZ
168 int neg = 0;
169 int i;
170
171 if (cnt > 63)
172 cnt = 63;
173
174 if (copy_from_user(&buf, ubuf, cnt))
175 return -EFAULT;
176
177 buf[cnt] = 0;
7740191c 178 cmp = strstrip(buf);
f00b45c1 179
524429c3 180 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
181 neg = 1;
182 cmp += 3;
183 }
184
185 for (i = 0; sched_feat_names[i]; i++) {
7740191c 186 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f00b45c1
PZ
187 if (neg)
188 sysctl_sched_features &= ~(1UL << i);
189 else
190 sysctl_sched_features |= (1UL << i);
191 break;
192 }
193 }
194
195 if (!sched_feat_names[i])
196 return -EINVAL;
197
42994724 198 *ppos += cnt;
f00b45c1
PZ
199
200 return cnt;
201}
202
34f3a814
LZ
203static int sched_feat_open(struct inode *inode, struct file *filp)
204{
205 return single_open(filp, sched_feat_show, NULL);
206}
207
828c0950 208static const struct file_operations sched_feat_fops = {
34f3a814
LZ
209 .open = sched_feat_open,
210 .write = sched_feat_write,
211 .read = seq_read,
212 .llseek = seq_lseek,
213 .release = single_release,
f00b45c1
PZ
214};
215
216static __init int sched_init_debug(void)
217{
f00b45c1
PZ
218 debugfs_create_file("sched_features", 0644, NULL, NULL,
219 &sched_feat_fops);
220
221 return 0;
222}
223late_initcall(sched_init_debug);
224
225#endif
226
b82d9fdd
PZ
227/*
228 * Number of tasks to iterate in a single balance run.
229 * Limited because this is done with IRQs disabled.
230 */
231const_debug unsigned int sysctl_sched_nr_migrate = 32;
232
e9e9250b
PZ
233/*
234 * period over which we average the RT time consumption, measured
235 * in ms.
236 *
237 * default: 1s
238 */
239const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
240
029632fb
PZ
241/*
242 * period over which we measure -rt task cpu usage in us.
243 * default: 1s
244 */
245unsigned int sysctl_sched_rt_period = 1000000;
8a25d5de 246
029632fb
PZ
247__read_mostly int scheduler_running;
248
249/*
250 * part of the period that we allow rt tasks to run in us.
251 * default: 0.95s
252 */
253int sysctl_sched_rt_runtime = 950000;
4866cde0 254
4866cde0 255
1da177e4 256
0970d299 257/*
0122ec5b 258 * __task_rq_lock - lock the rq @p resides on.
b29739f9 259 */
70b97a7f 260static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
261 __acquires(rq->lock)
262{
0970d299
PZ
263 struct rq *rq;
264
0122ec5b
PZ
265 lockdep_assert_held(&p->pi_lock);
266
3a5c359a 267 for (;;) {
0970d299 268 rq = task_rq(p);
05fa785c 269 raw_spin_lock(&rq->lock);
65cc8e48 270 if (likely(rq == task_rq(p)))
3a5c359a 271 return rq;
05fa785c 272 raw_spin_unlock(&rq->lock);
b29739f9 273 }
b29739f9
IM
274}
275
1da177e4 276/*
0122ec5b 277 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 278 */
70b97a7f 279static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 280 __acquires(p->pi_lock)
1da177e4
LT
281 __acquires(rq->lock)
282{
70b97a7f 283 struct rq *rq;
1da177e4 284
3a5c359a 285 for (;;) {
0122ec5b 286 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 287 rq = task_rq(p);
05fa785c 288 raw_spin_lock(&rq->lock);
65cc8e48 289 if (likely(rq == task_rq(p)))
3a5c359a 290 return rq;
0122ec5b
PZ
291 raw_spin_unlock(&rq->lock);
292 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 293 }
1da177e4
LT
294}
295
a9957449 296static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
297 __releases(rq->lock)
298{
05fa785c 299 raw_spin_unlock(&rq->lock);
b29739f9
IM
300}
301
0122ec5b
PZ
302static inline void
303task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 304 __releases(rq->lock)
0122ec5b 305 __releases(p->pi_lock)
1da177e4 306{
0122ec5b
PZ
307 raw_spin_unlock(&rq->lock);
308 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
309}
310
1da177e4 311/*
cc2a73b5 312 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 313 */
a9957449 314static struct rq *this_rq_lock(void)
1da177e4
LT
315 __acquires(rq->lock)
316{
70b97a7f 317 struct rq *rq;
1da177e4
LT
318
319 local_irq_disable();
320 rq = this_rq();
05fa785c 321 raw_spin_lock(&rq->lock);
1da177e4
LT
322
323 return rq;
324}
325
8f4d37ec
PZ
326#ifdef CONFIG_SCHED_HRTICK
327/*
328 * Use HR-timers to deliver accurate preemption points.
329 *
330 * Its all a bit involved since we cannot program an hrt while holding the
331 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
332 * reschedule event.
333 *
334 * When we get rescheduled we reprogram the hrtick_timer outside of the
335 * rq->lock.
336 */
8f4d37ec 337
8f4d37ec
PZ
338static void hrtick_clear(struct rq *rq)
339{
340 if (hrtimer_active(&rq->hrtick_timer))
341 hrtimer_cancel(&rq->hrtick_timer);
342}
343
8f4d37ec
PZ
344/*
345 * High-resolution timer tick.
346 * Runs from hardirq context with interrupts disabled.
347 */
348static enum hrtimer_restart hrtick(struct hrtimer *timer)
349{
350 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
351
352 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
353
05fa785c 354 raw_spin_lock(&rq->lock);
3e51f33f 355 update_rq_clock(rq);
8f4d37ec 356 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 357 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
358
359 return HRTIMER_NORESTART;
360}
361
95e904c7 362#ifdef CONFIG_SMP
31656519
PZ
363/*
364 * called from hardirq (IPI) context
365 */
366static void __hrtick_start(void *arg)
b328ca18 367{
31656519 368 struct rq *rq = arg;
b328ca18 369
05fa785c 370 raw_spin_lock(&rq->lock);
31656519
PZ
371 hrtimer_restart(&rq->hrtick_timer);
372 rq->hrtick_csd_pending = 0;
05fa785c 373 raw_spin_unlock(&rq->lock);
b328ca18
PZ
374}
375
31656519
PZ
376/*
377 * Called to set the hrtick timer state.
378 *
379 * called with rq->lock held and irqs disabled
380 */
029632fb 381void hrtick_start(struct rq *rq, u64 delay)
b328ca18 382{
31656519
PZ
383 struct hrtimer *timer = &rq->hrtick_timer;
384 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 385
cc584b21 386 hrtimer_set_expires(timer, time);
31656519
PZ
387
388 if (rq == this_rq()) {
389 hrtimer_restart(timer);
390 } else if (!rq->hrtick_csd_pending) {
6e275637 391 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
392 rq->hrtick_csd_pending = 1;
393 }
b328ca18
PZ
394}
395
396static int
397hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
398{
399 int cpu = (int)(long)hcpu;
400
401 switch (action) {
402 case CPU_UP_CANCELED:
403 case CPU_UP_CANCELED_FROZEN:
404 case CPU_DOWN_PREPARE:
405 case CPU_DOWN_PREPARE_FROZEN:
406 case CPU_DEAD:
407 case CPU_DEAD_FROZEN:
31656519 408 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
409 return NOTIFY_OK;
410 }
411
412 return NOTIFY_DONE;
413}
414
fa748203 415static __init void init_hrtick(void)
b328ca18
PZ
416{
417 hotcpu_notifier(hotplug_hrtick, 0);
418}
31656519
PZ
419#else
420/*
421 * Called to set the hrtick timer state.
422 *
423 * called with rq->lock held and irqs disabled
424 */
029632fb 425void hrtick_start(struct rq *rq, u64 delay)
31656519 426{
7f1e2ca9 427 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 428 HRTIMER_MODE_REL_PINNED, 0);
31656519 429}
b328ca18 430
006c75f1 431static inline void init_hrtick(void)
8f4d37ec 432{
8f4d37ec 433}
31656519 434#endif /* CONFIG_SMP */
8f4d37ec 435
31656519 436static void init_rq_hrtick(struct rq *rq)
8f4d37ec 437{
31656519
PZ
438#ifdef CONFIG_SMP
439 rq->hrtick_csd_pending = 0;
8f4d37ec 440
31656519
PZ
441 rq->hrtick_csd.flags = 0;
442 rq->hrtick_csd.func = __hrtick_start;
443 rq->hrtick_csd.info = rq;
444#endif
8f4d37ec 445
31656519
PZ
446 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
447 rq->hrtick_timer.function = hrtick;
8f4d37ec 448}
006c75f1 449#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
450static inline void hrtick_clear(struct rq *rq)
451{
452}
453
8f4d37ec
PZ
454static inline void init_rq_hrtick(struct rq *rq)
455{
456}
457
b328ca18
PZ
458static inline void init_hrtick(void)
459{
460}
006c75f1 461#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 462
c24d20db
IM
463/*
464 * resched_task - mark a task 'to be rescheduled now'.
465 *
466 * On UP this means the setting of the need_resched flag, on SMP it
467 * might also involve a cross-CPU call to trigger the scheduler on
468 * the target CPU.
469 */
470#ifdef CONFIG_SMP
471
472#ifndef tsk_is_polling
473#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
474#endif
475
029632fb 476void resched_task(struct task_struct *p)
c24d20db
IM
477{
478 int cpu;
479
05fa785c 480 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 481
5ed0cec0 482 if (test_tsk_need_resched(p))
c24d20db
IM
483 return;
484
5ed0cec0 485 set_tsk_need_resched(p);
c24d20db
IM
486
487 cpu = task_cpu(p);
488 if (cpu == smp_processor_id())
489 return;
490
491 /* NEED_RESCHED must be visible before we test polling */
492 smp_mb();
493 if (!tsk_is_polling(p))
494 smp_send_reschedule(cpu);
495}
496
029632fb 497void resched_cpu(int cpu)
c24d20db
IM
498{
499 struct rq *rq = cpu_rq(cpu);
500 unsigned long flags;
501
05fa785c 502 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
503 return;
504 resched_task(cpu_curr(cpu));
05fa785c 505 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 506}
06d8308c
TG
507
508#ifdef CONFIG_NO_HZ
83cd4fe2
VP
509/*
510 * In the semi idle case, use the nearest busy cpu for migrating timers
511 * from an idle cpu. This is good for power-savings.
512 *
513 * We don't do similar optimization for completely idle system, as
514 * selecting an idle cpu will add more delays to the timers than intended
515 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
516 */
517int get_nohz_timer_target(void)
518{
519 int cpu = smp_processor_id();
520 int i;
521 struct sched_domain *sd;
522
057f3fad 523 rcu_read_lock();
83cd4fe2 524 for_each_domain(cpu, sd) {
057f3fad
PZ
525 for_each_cpu(i, sched_domain_span(sd)) {
526 if (!idle_cpu(i)) {
527 cpu = i;
528 goto unlock;
529 }
530 }
83cd4fe2 531 }
057f3fad
PZ
532unlock:
533 rcu_read_unlock();
83cd4fe2
VP
534 return cpu;
535}
06d8308c
TG
536/*
537 * When add_timer_on() enqueues a timer into the timer wheel of an
538 * idle CPU then this timer might expire before the next timer event
539 * which is scheduled to wake up that CPU. In case of a completely
540 * idle system the next event might even be infinite time into the
541 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
542 * leaves the inner idle loop so the newly added timer is taken into
543 * account when the CPU goes back to idle and evaluates the timer
544 * wheel for the next timer event.
545 */
546void wake_up_idle_cpu(int cpu)
547{
548 struct rq *rq = cpu_rq(cpu);
549
550 if (cpu == smp_processor_id())
551 return;
552
553 /*
554 * This is safe, as this function is called with the timer
555 * wheel base lock of (cpu) held. When the CPU is on the way
556 * to idle and has not yet set rq->curr to idle then it will
557 * be serialized on the timer wheel base lock and take the new
558 * timer into account automatically.
559 */
560 if (rq->curr != rq->idle)
561 return;
562
563 /*
564 * We can set TIF_RESCHED on the idle task of the other CPU
565 * lockless. The worst case is that the other CPU runs the
566 * idle task through an additional NOOP schedule()
567 */
5ed0cec0 568 set_tsk_need_resched(rq->idle);
06d8308c
TG
569
570 /* NEED_RESCHED must be visible before we test polling */
571 smp_mb();
572 if (!tsk_is_polling(rq->idle))
573 smp_send_reschedule(cpu);
574}
39c0cbe2 575
ca38062e
SS
576static inline bool got_nohz_idle_kick(void)
577{
1c792db7
SS
578 int cpu = smp_processor_id();
579 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
ca38062e
SS
580}
581
582#else /* CONFIG_NO_HZ */
583
584static inline bool got_nohz_idle_kick(void)
585{
586 return false;
587}
588
6d6bc0ad 589#endif /* CONFIG_NO_HZ */
06d8308c 590
029632fb 591void sched_avg_update(struct rq *rq)
e9e9250b
PZ
592{
593 s64 period = sched_avg_period();
594
595 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
596 /*
597 * Inline assembly required to prevent the compiler
598 * optimising this loop into a divmod call.
599 * See __iter_div_u64_rem() for another example of this.
600 */
601 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
602 rq->age_stamp += period;
603 rq->rt_avg /= 2;
604 }
605}
606
6d6bc0ad 607#else /* !CONFIG_SMP */
029632fb 608void resched_task(struct task_struct *p)
c24d20db 609{
05fa785c 610 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 611 set_tsk_need_resched(p);
c24d20db 612}
6d6bc0ad 613#endif /* CONFIG_SMP */
c24d20db 614
a790de99
PT
615#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
616 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 617/*
8277434e
PT
618 * Iterate task_group tree rooted at *from, calling @down when first entering a
619 * node and @up when leaving it for the final time.
620 *
621 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 622 */
029632fb 623int walk_tg_tree_from(struct task_group *from,
8277434e 624 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
625{
626 struct task_group *parent, *child;
eb755805 627 int ret;
c09595f6 628
8277434e
PT
629 parent = from;
630
c09595f6 631down:
eb755805
PZ
632 ret = (*down)(parent, data);
633 if (ret)
8277434e 634 goto out;
c09595f6
PZ
635 list_for_each_entry_rcu(child, &parent->children, siblings) {
636 parent = child;
637 goto down;
638
639up:
029632fb
PZ
640 continue;
641 }
642 ret = (*up)(parent, data);
643 if (ret || parent == from)
644 goto out;
1e3c88bd 645
029632fb
PZ
646 child = parent;
647 parent = parent->parent;
648 if (parent)
649 goto up;
650out:
651 return ret;
9c217245
IM
652}
653
029632fb 654int tg_nop(struct task_group *tg, void *data)
9c217245 655{
029632fb 656 return 0;
9c217245 657}
029632fb
PZ
658#endif
659
660void update_cpu_load(struct rq *this_rq);
9c217245 661
45bf76df
IM
662static void set_load_weight(struct task_struct *p)
663{
f05998d4
NR
664 int prio = p->static_prio - MAX_RT_PRIO;
665 struct load_weight *load = &p->se.load;
666
dd41f596
IM
667 /*
668 * SCHED_IDLE tasks get minimal weight:
669 */
670 if (p->policy == SCHED_IDLE) {
c8b28116 671 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 672 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
673 return;
674 }
71f8bd46 675
c8b28116 676 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 677 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
678}
679
371fd7e7 680static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 681{
a64692a3 682 update_rq_clock(rq);
dd41f596 683 sched_info_queued(p);
371fd7e7 684 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
685}
686
371fd7e7 687static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 688{
a64692a3 689 update_rq_clock(rq);
46ac22ba 690 sched_info_dequeued(p);
371fd7e7 691 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
692}
693
1e3c88bd
PZ
694/*
695 * activate_task - move a task to the runqueue.
696 */
029632fb 697void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
698{
699 if (task_contributes_to_load(p))
700 rq->nr_uninterruptible--;
701
371fd7e7 702 enqueue_task(rq, p, flags);
1e3c88bd
PZ
703}
704
705/*
706 * deactivate_task - remove a task from the runqueue.
707 */
029632fb 708void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
709{
710 if (task_contributes_to_load(p))
711 rq->nr_uninterruptible++;
712
371fd7e7 713 dequeue_task(rq, p, flags);
1e3c88bd
PZ
714}
715
b52bfee4
VP
716#ifdef CONFIG_IRQ_TIME_ACCOUNTING
717
305e6835
VP
718/*
719 * There are no locks covering percpu hardirq/softirq time.
720 * They are only modified in account_system_vtime, on corresponding CPU
721 * with interrupts disabled. So, writes are safe.
722 * They are read and saved off onto struct rq in update_rq_clock().
723 * This may result in other CPU reading this CPU's irq time and can
724 * race with irq/account_system_vtime on this CPU. We would either get old
8e92c201
PZ
725 * or new value with a side effect of accounting a slice of irq time to wrong
726 * task when irq is in progress while we read rq->clock. That is a worthy
727 * compromise in place of having locks on each irq in account_system_time.
305e6835 728 */
b52bfee4
VP
729static DEFINE_PER_CPU(u64, cpu_hardirq_time);
730static DEFINE_PER_CPU(u64, cpu_softirq_time);
731
732static DEFINE_PER_CPU(u64, irq_start_time);
733static int sched_clock_irqtime;
734
735void enable_sched_clock_irqtime(void)
736{
737 sched_clock_irqtime = 1;
738}
739
740void disable_sched_clock_irqtime(void)
741{
742 sched_clock_irqtime = 0;
743}
744
8e92c201
PZ
745#ifndef CONFIG_64BIT
746static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
747
748static inline void irq_time_write_begin(void)
749{
750 __this_cpu_inc(irq_time_seq.sequence);
751 smp_wmb();
752}
753
754static inline void irq_time_write_end(void)
755{
756 smp_wmb();
757 __this_cpu_inc(irq_time_seq.sequence);
758}
759
760static inline u64 irq_time_read(int cpu)
761{
762 u64 irq_time;
763 unsigned seq;
764
765 do {
766 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
767 irq_time = per_cpu(cpu_softirq_time, cpu) +
768 per_cpu(cpu_hardirq_time, cpu);
769 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
770
771 return irq_time;
772}
773#else /* CONFIG_64BIT */
774static inline void irq_time_write_begin(void)
775{
776}
777
778static inline void irq_time_write_end(void)
779{
780}
781
782static inline u64 irq_time_read(int cpu)
305e6835 783{
305e6835
VP
784 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
785}
8e92c201 786#endif /* CONFIG_64BIT */
305e6835 787
fe44d621
PZ
788/*
789 * Called before incrementing preempt_count on {soft,}irq_enter
790 * and before decrementing preempt_count on {soft,}irq_exit.
791 */
b52bfee4
VP
792void account_system_vtime(struct task_struct *curr)
793{
794 unsigned long flags;
fe44d621 795 s64 delta;
b52bfee4 796 int cpu;
b52bfee4
VP
797
798 if (!sched_clock_irqtime)
799 return;
800
801 local_irq_save(flags);
802
b52bfee4 803 cpu = smp_processor_id();
fe44d621
PZ
804 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
805 __this_cpu_add(irq_start_time, delta);
806
8e92c201 807 irq_time_write_begin();
b52bfee4
VP
808 /*
809 * We do not account for softirq time from ksoftirqd here.
810 * We want to continue accounting softirq time to ksoftirqd thread
811 * in that case, so as not to confuse scheduler with a special task
812 * that do not consume any time, but still wants to run.
813 */
814 if (hardirq_count())
fe44d621 815 __this_cpu_add(cpu_hardirq_time, delta);
4dd53d89 816 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
fe44d621 817 __this_cpu_add(cpu_softirq_time, delta);
b52bfee4 818
8e92c201 819 irq_time_write_end();
b52bfee4
VP
820 local_irq_restore(flags);
821}
b7dadc38 822EXPORT_SYMBOL_GPL(account_system_vtime);
b52bfee4 823
e6e6685a
GC
824#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
825
826#ifdef CONFIG_PARAVIRT
827static inline u64 steal_ticks(u64 steal)
aa483808 828{
e6e6685a
GC
829 if (unlikely(steal > NSEC_PER_SEC))
830 return div_u64(steal, TICK_NSEC);
fe44d621 831
e6e6685a
GC
832 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
833}
834#endif
835
fe44d621 836static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 837{
095c0aa8
GC
838/*
839 * In theory, the compile should just see 0 here, and optimize out the call
840 * to sched_rt_avg_update. But I don't trust it...
841 */
842#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
843 s64 steal = 0, irq_delta = 0;
844#endif
845#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 846 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
847
848 /*
849 * Since irq_time is only updated on {soft,}irq_exit, we might run into
850 * this case when a previous update_rq_clock() happened inside a
851 * {soft,}irq region.
852 *
853 * When this happens, we stop ->clock_task and only update the
854 * prev_irq_time stamp to account for the part that fit, so that a next
855 * update will consume the rest. This ensures ->clock_task is
856 * monotonic.
857 *
858 * It does however cause some slight miss-attribution of {soft,}irq
859 * time, a more accurate solution would be to update the irq_time using
860 * the current rq->clock timestamp, except that would require using
861 * atomic ops.
862 */
863 if (irq_delta > delta)
864 irq_delta = delta;
865
866 rq->prev_irq_time += irq_delta;
867 delta -= irq_delta;
095c0aa8
GC
868#endif
869#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
870 if (static_branch((&paravirt_steal_rq_enabled))) {
871 u64 st;
872
873 steal = paravirt_steal_clock(cpu_of(rq));
874 steal -= rq->prev_steal_time_rq;
875
876 if (unlikely(steal > delta))
877 steal = delta;
878
879 st = steal_ticks(steal);
880 steal = st * TICK_NSEC;
881
882 rq->prev_steal_time_rq += steal;
883
884 delta -= steal;
885 }
886#endif
887
fe44d621
PZ
888 rq->clock_task += delta;
889
095c0aa8
GC
890#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
891 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
892 sched_rt_avg_update(rq, irq_delta + steal);
893#endif
aa483808
VP
894}
895
095c0aa8 896#ifdef CONFIG_IRQ_TIME_ACCOUNTING
abb74cef
VP
897static int irqtime_account_hi_update(void)
898{
3292beb3 899 u64 *cpustat = kcpustat_this_cpu->cpustat;
abb74cef
VP
900 unsigned long flags;
901 u64 latest_ns;
902 int ret = 0;
903
904 local_irq_save(flags);
905 latest_ns = this_cpu_read(cpu_hardirq_time);
3292beb3 906 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ]))
abb74cef
VP
907 ret = 1;
908 local_irq_restore(flags);
909 return ret;
910}
911
912static int irqtime_account_si_update(void)
913{
3292beb3 914 u64 *cpustat = kcpustat_this_cpu->cpustat;
abb74cef
VP
915 unsigned long flags;
916 u64 latest_ns;
917 int ret = 0;
918
919 local_irq_save(flags);
920 latest_ns = this_cpu_read(cpu_softirq_time);
3292beb3 921 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ]))
abb74cef
VP
922 ret = 1;
923 local_irq_restore(flags);
924 return ret;
925}
926
fe44d621 927#else /* CONFIG_IRQ_TIME_ACCOUNTING */
305e6835 928
abb74cef
VP
929#define sched_clock_irqtime (0)
930
095c0aa8 931#endif
b52bfee4 932
34f971f6
PZ
933void sched_set_stop_task(int cpu, struct task_struct *stop)
934{
935 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
936 struct task_struct *old_stop = cpu_rq(cpu)->stop;
937
938 if (stop) {
939 /*
940 * Make it appear like a SCHED_FIFO task, its something
941 * userspace knows about and won't get confused about.
942 *
943 * Also, it will make PI more or less work without too
944 * much confusion -- but then, stop work should not
945 * rely on PI working anyway.
946 */
947 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
948
949 stop->sched_class = &stop_sched_class;
950 }
951
952 cpu_rq(cpu)->stop = stop;
953
954 if (old_stop) {
955 /*
956 * Reset it back to a normal scheduling class so that
957 * it can die in pieces.
958 */
959 old_stop->sched_class = &rt_sched_class;
960 }
961}
962
14531189 963/*
dd41f596 964 * __normal_prio - return the priority that is based on the static prio
14531189 965 */
14531189
IM
966static inline int __normal_prio(struct task_struct *p)
967{
dd41f596 968 return p->static_prio;
14531189
IM
969}
970
b29739f9
IM
971/*
972 * Calculate the expected normal priority: i.e. priority
973 * without taking RT-inheritance into account. Might be
974 * boosted by interactivity modifiers. Changes upon fork,
975 * setprio syscalls, and whenever the interactivity
976 * estimator recalculates.
977 */
36c8b586 978static inline int normal_prio(struct task_struct *p)
b29739f9
IM
979{
980 int prio;
981
e05606d3 982 if (task_has_rt_policy(p))
b29739f9
IM
983 prio = MAX_RT_PRIO-1 - p->rt_priority;
984 else
985 prio = __normal_prio(p);
986 return prio;
987}
988
989/*
990 * Calculate the current priority, i.e. the priority
991 * taken into account by the scheduler. This value might
992 * be boosted by RT tasks, or might be boosted by
993 * interactivity modifiers. Will be RT if the task got
994 * RT-boosted. If not then it returns p->normal_prio.
995 */
36c8b586 996static int effective_prio(struct task_struct *p)
b29739f9
IM
997{
998 p->normal_prio = normal_prio(p);
999 /*
1000 * If we are RT tasks or we were boosted to RT priority,
1001 * keep the priority unchanged. Otherwise, update priority
1002 * to the normal priority:
1003 */
1004 if (!rt_prio(p->prio))
1005 return p->normal_prio;
1006 return p->prio;
1007}
1008
1da177e4
LT
1009/**
1010 * task_curr - is this task currently executing on a CPU?
1011 * @p: the task in question.
1012 */
36c8b586 1013inline int task_curr(const struct task_struct *p)
1da177e4
LT
1014{
1015 return cpu_curr(task_cpu(p)) == p;
1016}
1017
cb469845
SR
1018static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1019 const struct sched_class *prev_class,
da7a735e 1020 int oldprio)
cb469845
SR
1021{
1022 if (prev_class != p->sched_class) {
1023 if (prev_class->switched_from)
da7a735e
PZ
1024 prev_class->switched_from(rq, p);
1025 p->sched_class->switched_to(rq, p);
1026 } else if (oldprio != p->prio)
1027 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1028}
1029
029632fb 1030void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1031{
1032 const struct sched_class *class;
1033
1034 if (p->sched_class == rq->curr->sched_class) {
1035 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1036 } else {
1037 for_each_class(class) {
1038 if (class == rq->curr->sched_class)
1039 break;
1040 if (class == p->sched_class) {
1041 resched_task(rq->curr);
1042 break;
1043 }
1044 }
1045 }
1046
1047 /*
1048 * A queue event has occurred, and we're going to schedule. In
1049 * this case, we can save a useless back to back clock update.
1050 */
fd2f4419 1051 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
1052 rq->skip_clock_update = 1;
1053}
1054
1da177e4 1055#ifdef CONFIG_SMP
dd41f596 1056void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1057{
e2912009
PZ
1058#ifdef CONFIG_SCHED_DEBUG
1059 /*
1060 * We should never call set_task_cpu() on a blocked task,
1061 * ttwu() will sort out the placement.
1062 */
077614ee
PZ
1063 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1064 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
1065
1066#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1067 /*
1068 * The caller should hold either p->pi_lock or rq->lock, when changing
1069 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1070 *
1071 * sched_move_task() holds both and thus holding either pins the cgroup,
1072 * see set_task_rq().
1073 *
1074 * Furthermore, all task_rq users should acquire both locks, see
1075 * task_rq_lock().
1076 */
0122ec5b
PZ
1077 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1078 lockdep_is_held(&task_rq(p)->lock)));
1079#endif
e2912009
PZ
1080#endif
1081
de1d7286 1082 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1083
0c69774e
PZ
1084 if (task_cpu(p) != new_cpu) {
1085 p->se.nr_migrations++;
a8b0ca17 1086 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
0c69774e 1087 }
dd41f596
IM
1088
1089 __set_task_cpu(p, new_cpu);
c65cc870
IM
1090}
1091
969c7921 1092struct migration_arg {
36c8b586 1093 struct task_struct *task;
1da177e4 1094 int dest_cpu;
70b97a7f 1095};
1da177e4 1096
969c7921
TH
1097static int migration_cpu_stop(void *data);
1098
1da177e4
LT
1099/*
1100 * wait_task_inactive - wait for a thread to unschedule.
1101 *
85ba2d86
RM
1102 * If @match_state is nonzero, it's the @p->state value just checked and
1103 * not expected to change. If it changes, i.e. @p might have woken up,
1104 * then return zero. When we succeed in waiting for @p to be off its CPU,
1105 * we return a positive number (its total switch count). If a second call
1106 * a short while later returns the same number, the caller can be sure that
1107 * @p has remained unscheduled the whole time.
1108 *
1da177e4
LT
1109 * The caller must ensure that the task *will* unschedule sometime soon,
1110 * else this function might spin for a *long* time. This function can't
1111 * be called with interrupts off, or it may introduce deadlock with
1112 * smp_call_function() if an IPI is sent by the same process we are
1113 * waiting to become inactive.
1114 */
85ba2d86 1115unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1116{
1117 unsigned long flags;
dd41f596 1118 int running, on_rq;
85ba2d86 1119 unsigned long ncsw;
70b97a7f 1120 struct rq *rq;
1da177e4 1121
3a5c359a
AK
1122 for (;;) {
1123 /*
1124 * We do the initial early heuristics without holding
1125 * any task-queue locks at all. We'll only try to get
1126 * the runqueue lock when things look like they will
1127 * work out!
1128 */
1129 rq = task_rq(p);
fa490cfd 1130
3a5c359a
AK
1131 /*
1132 * If the task is actively running on another CPU
1133 * still, just relax and busy-wait without holding
1134 * any locks.
1135 *
1136 * NOTE! Since we don't hold any locks, it's not
1137 * even sure that "rq" stays as the right runqueue!
1138 * But we don't care, since "task_running()" will
1139 * return false if the runqueue has changed and p
1140 * is actually now running somewhere else!
1141 */
85ba2d86
RM
1142 while (task_running(rq, p)) {
1143 if (match_state && unlikely(p->state != match_state))
1144 return 0;
3a5c359a 1145 cpu_relax();
85ba2d86 1146 }
fa490cfd 1147
3a5c359a
AK
1148 /*
1149 * Ok, time to look more closely! We need the rq
1150 * lock now, to be *sure*. If we're wrong, we'll
1151 * just go back and repeat.
1152 */
1153 rq = task_rq_lock(p, &flags);
27a9da65 1154 trace_sched_wait_task(p);
3a5c359a 1155 running = task_running(rq, p);
fd2f4419 1156 on_rq = p->on_rq;
85ba2d86 1157 ncsw = 0;
f31e11d8 1158 if (!match_state || p->state == match_state)
93dcf55f 1159 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1160 task_rq_unlock(rq, p, &flags);
fa490cfd 1161
85ba2d86
RM
1162 /*
1163 * If it changed from the expected state, bail out now.
1164 */
1165 if (unlikely(!ncsw))
1166 break;
1167
3a5c359a
AK
1168 /*
1169 * Was it really running after all now that we
1170 * checked with the proper locks actually held?
1171 *
1172 * Oops. Go back and try again..
1173 */
1174 if (unlikely(running)) {
1175 cpu_relax();
1176 continue;
1177 }
fa490cfd 1178
3a5c359a
AK
1179 /*
1180 * It's not enough that it's not actively running,
1181 * it must be off the runqueue _entirely_, and not
1182 * preempted!
1183 *
80dd99b3 1184 * So if it was still runnable (but just not actively
3a5c359a
AK
1185 * running right now), it's preempted, and we should
1186 * yield - it could be a while.
1187 */
1188 if (unlikely(on_rq)) {
8eb90c30
TG
1189 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1190
1191 set_current_state(TASK_UNINTERRUPTIBLE);
1192 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1193 continue;
1194 }
fa490cfd 1195
3a5c359a
AK
1196 /*
1197 * Ahh, all good. It wasn't running, and it wasn't
1198 * runnable, which means that it will never become
1199 * running in the future either. We're all done!
1200 */
1201 break;
1202 }
85ba2d86
RM
1203
1204 return ncsw;
1da177e4
LT
1205}
1206
1207/***
1208 * kick_process - kick a running thread to enter/exit the kernel
1209 * @p: the to-be-kicked thread
1210 *
1211 * Cause a process which is running on another CPU to enter
1212 * kernel-mode, without any delay. (to get signals handled.)
1213 *
25985edc 1214 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1215 * because all it wants to ensure is that the remote task enters
1216 * the kernel. If the IPI races and the task has been migrated
1217 * to another CPU then no harm is done and the purpose has been
1218 * achieved as well.
1219 */
36c8b586 1220void kick_process(struct task_struct *p)
1da177e4
LT
1221{
1222 int cpu;
1223
1224 preempt_disable();
1225 cpu = task_cpu(p);
1226 if ((cpu != smp_processor_id()) && task_curr(p))
1227 smp_send_reschedule(cpu);
1228 preempt_enable();
1229}
b43e3521 1230EXPORT_SYMBOL_GPL(kick_process);
476d139c 1231#endif /* CONFIG_SMP */
1da177e4 1232
970b13ba 1233#ifdef CONFIG_SMP
30da688e 1234/*
013fdb80 1235 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1236 */
5da9a0fb
PZ
1237static int select_fallback_rq(int cpu, struct task_struct *p)
1238{
1239 int dest_cpu;
1240 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1241
1242 /* Look for allowed, online CPU in same node. */
1243 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
fa17b507 1244 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5da9a0fb
PZ
1245 return dest_cpu;
1246
1247 /* Any allowed, online CPU? */
fa17b507 1248 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
5da9a0fb
PZ
1249 if (dest_cpu < nr_cpu_ids)
1250 return dest_cpu;
1251
1252 /* No more Mr. Nice Guy. */
48c5ccae
PZ
1253 dest_cpu = cpuset_cpus_allowed_fallback(p);
1254 /*
1255 * Don't tell them about moving exiting tasks or
1256 * kernel threads (both mm NULL), since they never
1257 * leave kernel.
1258 */
1259 if (p->mm && printk_ratelimit()) {
1260 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
1261 task_pid_nr(p), p->comm, cpu);
5da9a0fb
PZ
1262 }
1263
1264 return dest_cpu;
1265}
1266
e2912009 1267/*
013fdb80 1268 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1269 */
970b13ba 1270static inline
7608dec2 1271int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 1272{
7608dec2 1273 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
1274
1275 /*
1276 * In order not to call set_task_cpu() on a blocking task we need
1277 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1278 * cpu.
1279 *
1280 * Since this is common to all placement strategies, this lives here.
1281 *
1282 * [ this allows ->select_task() to simply return task_cpu(p) and
1283 * not worry about this generic constraint ]
1284 */
fa17b507 1285 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1286 !cpu_online(cpu)))
5da9a0fb 1287 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1288
1289 return cpu;
970b13ba 1290}
09a40af5
MG
1291
1292static void update_avg(u64 *avg, u64 sample)
1293{
1294 s64 diff = sample - *avg;
1295 *avg += diff >> 3;
1296}
970b13ba
PZ
1297#endif
1298
d7c01d27 1299static void
b84cb5df 1300ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1301{
d7c01d27 1302#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1303 struct rq *rq = this_rq();
1304
d7c01d27
PZ
1305#ifdef CONFIG_SMP
1306 int this_cpu = smp_processor_id();
1307
1308 if (cpu == this_cpu) {
1309 schedstat_inc(rq, ttwu_local);
1310 schedstat_inc(p, se.statistics.nr_wakeups_local);
1311 } else {
1312 struct sched_domain *sd;
1313
1314 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1315 rcu_read_lock();
d7c01d27
PZ
1316 for_each_domain(this_cpu, sd) {
1317 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1318 schedstat_inc(sd, ttwu_wake_remote);
1319 break;
1320 }
1321 }
057f3fad 1322 rcu_read_unlock();
d7c01d27 1323 }
f339b9dc
PZ
1324
1325 if (wake_flags & WF_MIGRATED)
1326 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1327
d7c01d27
PZ
1328#endif /* CONFIG_SMP */
1329
1330 schedstat_inc(rq, ttwu_count);
9ed3811a 1331 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1332
1333 if (wake_flags & WF_SYNC)
9ed3811a 1334 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1335
d7c01d27
PZ
1336#endif /* CONFIG_SCHEDSTATS */
1337}
1338
1339static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1340{
9ed3811a 1341 activate_task(rq, p, en_flags);
fd2f4419 1342 p->on_rq = 1;
c2f7115e
PZ
1343
1344 /* if a worker is waking up, notify workqueue */
1345 if (p->flags & PF_WQ_WORKER)
1346 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1347}
1348
23f41eeb
PZ
1349/*
1350 * Mark the task runnable and perform wakeup-preemption.
1351 */
89363381 1352static void
23f41eeb 1353ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1354{
89363381 1355 trace_sched_wakeup(p, true);
9ed3811a
TH
1356 check_preempt_curr(rq, p, wake_flags);
1357
1358 p->state = TASK_RUNNING;
1359#ifdef CONFIG_SMP
1360 if (p->sched_class->task_woken)
1361 p->sched_class->task_woken(rq, p);
1362
e69c6341 1363 if (rq->idle_stamp) {
9ed3811a
TH
1364 u64 delta = rq->clock - rq->idle_stamp;
1365 u64 max = 2*sysctl_sched_migration_cost;
1366
1367 if (delta > max)
1368 rq->avg_idle = max;
1369 else
1370 update_avg(&rq->avg_idle, delta);
1371 rq->idle_stamp = 0;
1372 }
1373#endif
1374}
1375
c05fbafb
PZ
1376static void
1377ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1378{
1379#ifdef CONFIG_SMP
1380 if (p->sched_contributes_to_load)
1381 rq->nr_uninterruptible--;
1382#endif
1383
1384 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1385 ttwu_do_wakeup(rq, p, wake_flags);
1386}
1387
1388/*
1389 * Called in case the task @p isn't fully descheduled from its runqueue,
1390 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1391 * since all we need to do is flip p->state to TASK_RUNNING, since
1392 * the task is still ->on_rq.
1393 */
1394static int ttwu_remote(struct task_struct *p, int wake_flags)
1395{
1396 struct rq *rq;
1397 int ret = 0;
1398
1399 rq = __task_rq_lock(p);
1400 if (p->on_rq) {
1401 ttwu_do_wakeup(rq, p, wake_flags);
1402 ret = 1;
1403 }
1404 __task_rq_unlock(rq);
1405
1406 return ret;
1407}
1408
317f3941 1409#ifdef CONFIG_SMP
fa14ff4a 1410static void sched_ttwu_pending(void)
317f3941
PZ
1411{
1412 struct rq *rq = this_rq();
fa14ff4a
PZ
1413 struct llist_node *llist = llist_del_all(&rq->wake_list);
1414 struct task_struct *p;
317f3941
PZ
1415
1416 raw_spin_lock(&rq->lock);
1417
fa14ff4a
PZ
1418 while (llist) {
1419 p = llist_entry(llist, struct task_struct, wake_entry);
1420 llist = llist_next(llist);
317f3941
PZ
1421 ttwu_do_activate(rq, p, 0);
1422 }
1423
1424 raw_spin_unlock(&rq->lock);
1425}
1426
1427void scheduler_ipi(void)
1428{
ca38062e 1429 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1430 return;
1431
1432 /*
1433 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1434 * traditionally all their work was done from the interrupt return
1435 * path. Now that we actually do some work, we need to make sure
1436 * we do call them.
1437 *
1438 * Some archs already do call them, luckily irq_enter/exit nest
1439 * properly.
1440 *
1441 * Arguably we should visit all archs and update all handlers,
1442 * however a fair share of IPIs are still resched only so this would
1443 * somewhat pessimize the simple resched case.
1444 */
1445 irq_enter();
fa14ff4a 1446 sched_ttwu_pending();
ca38062e
SS
1447
1448 /*
1449 * Check if someone kicked us for doing the nohz idle load balance.
1450 */
6eb57e0d
SS
1451 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1452 this_rq()->idle_balance = 1;
ca38062e 1453 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1454 }
c5d753a5 1455 irq_exit();
317f3941
PZ
1456}
1457
1458static void ttwu_queue_remote(struct task_struct *p, int cpu)
1459{
fa14ff4a 1460 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
317f3941
PZ
1461 smp_send_reschedule(cpu);
1462}
d6aa8f85
PZ
1463
1464#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1465static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1466{
1467 struct rq *rq;
1468 int ret = 0;
1469
1470 rq = __task_rq_lock(p);
1471 if (p->on_cpu) {
1472 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1473 ttwu_do_wakeup(rq, p, wake_flags);
1474 ret = 1;
1475 }
1476 __task_rq_unlock(rq);
1477
1478 return ret;
1479
1480}
1481#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1482#endif /* CONFIG_SMP */
317f3941 1483
c05fbafb
PZ
1484static void ttwu_queue(struct task_struct *p, int cpu)
1485{
1486 struct rq *rq = cpu_rq(cpu);
1487
17d9f311 1488#if defined(CONFIG_SMP)
317f3941 1489 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
f01114cb 1490 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1491 ttwu_queue_remote(p, cpu);
1492 return;
1493 }
1494#endif
1495
c05fbafb
PZ
1496 raw_spin_lock(&rq->lock);
1497 ttwu_do_activate(rq, p, 0);
1498 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1499}
1500
1501/**
1da177e4 1502 * try_to_wake_up - wake up a thread
9ed3811a 1503 * @p: the thread to be awakened
1da177e4 1504 * @state: the mask of task states that can be woken
9ed3811a 1505 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1506 *
1507 * Put it on the run-queue if it's not already there. The "current"
1508 * thread is always on the run-queue (except when the actual
1509 * re-schedule is in progress), and as such you're allowed to do
1510 * the simpler "current->state = TASK_RUNNING" to mark yourself
1511 * runnable without the overhead of this.
1512 *
9ed3811a
TH
1513 * Returns %true if @p was woken up, %false if it was already running
1514 * or @state didn't match @p's state.
1da177e4 1515 */
e4a52bcb
PZ
1516static int
1517try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1518{
1da177e4 1519 unsigned long flags;
c05fbafb 1520 int cpu, success = 0;
2398f2c6 1521
04e2f174 1522 smp_wmb();
013fdb80 1523 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1524 if (!(p->state & state))
1da177e4
LT
1525 goto out;
1526
c05fbafb 1527 success = 1; /* we're going to change ->state */
1da177e4 1528 cpu = task_cpu(p);
1da177e4 1529
c05fbafb
PZ
1530 if (p->on_rq && ttwu_remote(p, wake_flags))
1531 goto stat;
1da177e4 1532
1da177e4 1533#ifdef CONFIG_SMP
e9c84311 1534 /*
c05fbafb
PZ
1535 * If the owning (remote) cpu is still in the middle of schedule() with
1536 * this task as prev, wait until its done referencing the task.
e9c84311 1537 */
e4a52bcb
PZ
1538 while (p->on_cpu) {
1539#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1540 /*
d6aa8f85
PZ
1541 * In case the architecture enables interrupts in
1542 * context_switch(), we cannot busy wait, since that
1543 * would lead to deadlocks when an interrupt hits and
1544 * tries to wake up @prev. So bail and do a complete
1545 * remote wakeup.
e4a52bcb 1546 */
d6aa8f85 1547 if (ttwu_activate_remote(p, wake_flags))
c05fbafb 1548 goto stat;
d6aa8f85 1549#else
e4a52bcb 1550 cpu_relax();
d6aa8f85 1551#endif
371fd7e7 1552 }
0970d299 1553 /*
e4a52bcb 1554 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1555 */
e4a52bcb 1556 smp_rmb();
1da177e4 1557
a8e4f2ea 1558 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1559 p->state = TASK_WAKING;
e7693a36 1560
e4a52bcb 1561 if (p->sched_class->task_waking)
74f8e4b2 1562 p->sched_class->task_waking(p);
efbbd05a 1563
7608dec2 1564 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1565 if (task_cpu(p) != cpu) {
1566 wake_flags |= WF_MIGRATED;
e4a52bcb 1567 set_task_cpu(p, cpu);
f339b9dc 1568 }
1da177e4 1569#endif /* CONFIG_SMP */
1da177e4 1570
c05fbafb
PZ
1571 ttwu_queue(p, cpu);
1572stat:
b84cb5df 1573 ttwu_stat(p, cpu, wake_flags);
1da177e4 1574out:
013fdb80 1575 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1576
1577 return success;
1578}
1579
21aa9af0
TH
1580/**
1581 * try_to_wake_up_local - try to wake up a local task with rq lock held
1582 * @p: the thread to be awakened
1583 *
2acca55e 1584 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1585 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1586 * the current task.
21aa9af0
TH
1587 */
1588static void try_to_wake_up_local(struct task_struct *p)
1589{
1590 struct rq *rq = task_rq(p);
21aa9af0
TH
1591
1592 BUG_ON(rq != this_rq());
1593 BUG_ON(p == current);
1594 lockdep_assert_held(&rq->lock);
1595
2acca55e
PZ
1596 if (!raw_spin_trylock(&p->pi_lock)) {
1597 raw_spin_unlock(&rq->lock);
1598 raw_spin_lock(&p->pi_lock);
1599 raw_spin_lock(&rq->lock);
1600 }
1601
21aa9af0 1602 if (!(p->state & TASK_NORMAL))
2acca55e 1603 goto out;
21aa9af0 1604
fd2f4419 1605 if (!p->on_rq)
d7c01d27
PZ
1606 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1607
23f41eeb 1608 ttwu_do_wakeup(rq, p, 0);
b84cb5df 1609 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
1610out:
1611 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
1612}
1613
50fa610a
DH
1614/**
1615 * wake_up_process - Wake up a specific process
1616 * @p: The process to be woken up.
1617 *
1618 * Attempt to wake up the nominated process and move it to the set of runnable
1619 * processes. Returns 1 if the process was woken up, 0 if it was already
1620 * running.
1621 *
1622 * It may be assumed that this function implies a write memory barrier before
1623 * changing the task state if and only if any tasks are woken up.
1624 */
7ad5b3a5 1625int wake_up_process(struct task_struct *p)
1da177e4 1626{
d9514f6c 1627 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 1628}
1da177e4
LT
1629EXPORT_SYMBOL(wake_up_process);
1630
7ad5b3a5 1631int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
1632{
1633 return try_to_wake_up(p, state, 0);
1634}
1635
1da177e4
LT
1636/*
1637 * Perform scheduler related setup for a newly forked process p.
1638 * p is forked by current.
dd41f596
IM
1639 *
1640 * __sched_fork() is basic setup used by init_idle() too:
1641 */
1642static void __sched_fork(struct task_struct *p)
1643{
fd2f4419
PZ
1644 p->on_rq = 0;
1645
1646 p->se.on_rq = 0;
dd41f596
IM
1647 p->se.exec_start = 0;
1648 p->se.sum_exec_runtime = 0;
f6cf891c 1649 p->se.prev_sum_exec_runtime = 0;
6c594c21 1650 p->se.nr_migrations = 0;
da7a735e 1651 p->se.vruntime = 0;
fd2f4419 1652 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
1653
1654#ifdef CONFIG_SCHEDSTATS
41acab88 1655 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 1656#endif
476d139c 1657
fa717060 1658 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 1659
e107be36
AK
1660#ifdef CONFIG_PREEMPT_NOTIFIERS
1661 INIT_HLIST_HEAD(&p->preempt_notifiers);
1662#endif
dd41f596
IM
1663}
1664
1665/*
1666 * fork()/clone()-time setup:
1667 */
3e51e3ed 1668void sched_fork(struct task_struct *p)
dd41f596 1669{
0122ec5b 1670 unsigned long flags;
dd41f596
IM
1671 int cpu = get_cpu();
1672
1673 __sched_fork(p);
06b83b5f 1674 /*
0017d735 1675 * We mark the process as running here. This guarantees that
06b83b5f
PZ
1676 * nobody will actually run it, and a signal or other external
1677 * event cannot wake it up and insert it on the runqueue either.
1678 */
0017d735 1679 p->state = TASK_RUNNING;
dd41f596 1680
c350a04e
MG
1681 /*
1682 * Make sure we do not leak PI boosting priority to the child.
1683 */
1684 p->prio = current->normal_prio;
1685
b9dc29e7
MG
1686 /*
1687 * Revert to default priority/policy on fork if requested.
1688 */
1689 if (unlikely(p->sched_reset_on_fork)) {
c350a04e 1690 if (task_has_rt_policy(p)) {
b9dc29e7 1691 p->policy = SCHED_NORMAL;
6c697bdf 1692 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
1693 p->rt_priority = 0;
1694 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1695 p->static_prio = NICE_TO_PRIO(0);
1696
1697 p->prio = p->normal_prio = __normal_prio(p);
1698 set_load_weight(p);
6c697bdf 1699
b9dc29e7
MG
1700 /*
1701 * We don't need the reset flag anymore after the fork. It has
1702 * fulfilled its duty:
1703 */
1704 p->sched_reset_on_fork = 0;
1705 }
ca94c442 1706
2ddbf952
HS
1707 if (!rt_prio(p->prio))
1708 p->sched_class = &fair_sched_class;
b29739f9 1709
cd29fe6f
PZ
1710 if (p->sched_class->task_fork)
1711 p->sched_class->task_fork(p);
1712
86951599
PZ
1713 /*
1714 * The child is not yet in the pid-hash so no cgroup attach races,
1715 * and the cgroup is pinned to this child due to cgroup_fork()
1716 * is ran before sched_fork().
1717 *
1718 * Silence PROVE_RCU.
1719 */
0122ec5b 1720 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 1721 set_task_cpu(p, cpu);
0122ec5b 1722 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 1723
52f17b6c 1724#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 1725 if (likely(sched_info_on()))
52f17b6c 1726 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 1727#endif
3ca7a440
PZ
1728#if defined(CONFIG_SMP)
1729 p->on_cpu = 0;
4866cde0 1730#endif
bdd4e85d 1731#ifdef CONFIG_PREEMPT_COUNT
4866cde0 1732 /* Want to start with kernel preemption disabled. */
a1261f54 1733 task_thread_info(p)->preempt_count = 1;
1da177e4 1734#endif
806c09a7 1735#ifdef CONFIG_SMP
917b627d 1736 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 1737#endif
917b627d 1738
476d139c 1739 put_cpu();
1da177e4
LT
1740}
1741
1742/*
1743 * wake_up_new_task - wake up a newly created task for the first time.
1744 *
1745 * This function will do some initial scheduler statistics housekeeping
1746 * that must be done for every newly created context, then puts the task
1747 * on the runqueue and wakes it.
1748 */
3e51e3ed 1749void wake_up_new_task(struct task_struct *p)
1da177e4
LT
1750{
1751 unsigned long flags;
dd41f596 1752 struct rq *rq;
fabf318e 1753
ab2515c4 1754 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
1755#ifdef CONFIG_SMP
1756 /*
1757 * Fork balancing, do it here and not earlier because:
1758 * - cpus_allowed can change in the fork path
1759 * - any previously selected cpu might disappear through hotplug
fabf318e 1760 */
ab2515c4 1761 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
1762#endif
1763
ab2515c4 1764 rq = __task_rq_lock(p);
cd29fe6f 1765 activate_task(rq, p, 0);
fd2f4419 1766 p->on_rq = 1;
89363381 1767 trace_sched_wakeup_new(p, true);
a7558e01 1768 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 1769#ifdef CONFIG_SMP
efbbd05a
PZ
1770 if (p->sched_class->task_woken)
1771 p->sched_class->task_woken(rq, p);
9a897c5a 1772#endif
0122ec5b 1773 task_rq_unlock(rq, p, &flags);
1da177e4
LT
1774}
1775
e107be36
AK
1776#ifdef CONFIG_PREEMPT_NOTIFIERS
1777
1778/**
80dd99b3 1779 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 1780 * @notifier: notifier struct to register
e107be36
AK
1781 */
1782void preempt_notifier_register(struct preempt_notifier *notifier)
1783{
1784 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1785}
1786EXPORT_SYMBOL_GPL(preempt_notifier_register);
1787
1788/**
1789 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 1790 * @notifier: notifier struct to unregister
e107be36
AK
1791 *
1792 * This is safe to call from within a preemption notifier.
1793 */
1794void preempt_notifier_unregister(struct preempt_notifier *notifier)
1795{
1796 hlist_del(&notifier->link);
1797}
1798EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1799
1800static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1801{
1802 struct preempt_notifier *notifier;
1803 struct hlist_node *node;
1804
1805 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1806 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1807}
1808
1809static void
1810fire_sched_out_preempt_notifiers(struct task_struct *curr,
1811 struct task_struct *next)
1812{
1813 struct preempt_notifier *notifier;
1814 struct hlist_node *node;
1815
1816 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1817 notifier->ops->sched_out(notifier, next);
1818}
1819
6d6bc0ad 1820#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
1821
1822static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1823{
1824}
1825
1826static void
1827fire_sched_out_preempt_notifiers(struct task_struct *curr,
1828 struct task_struct *next)
1829{
1830}
1831
6d6bc0ad 1832#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 1833
4866cde0
NP
1834/**
1835 * prepare_task_switch - prepare to switch tasks
1836 * @rq: the runqueue preparing to switch
421cee29 1837 * @prev: the current task that is being switched out
4866cde0
NP
1838 * @next: the task we are going to switch to.
1839 *
1840 * This is called with the rq lock held and interrupts off. It must
1841 * be paired with a subsequent finish_task_switch after the context
1842 * switch.
1843 *
1844 * prepare_task_switch sets up locking and calls architecture specific
1845 * hooks.
1846 */
e107be36
AK
1847static inline void
1848prepare_task_switch(struct rq *rq, struct task_struct *prev,
1849 struct task_struct *next)
4866cde0 1850{
fe4b04fa
PZ
1851 sched_info_switch(prev, next);
1852 perf_event_task_sched_out(prev, next);
e107be36 1853 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
1854 prepare_lock_switch(rq, next);
1855 prepare_arch_switch(next);
fe4b04fa 1856 trace_sched_switch(prev, next);
4866cde0
NP
1857}
1858
1da177e4
LT
1859/**
1860 * finish_task_switch - clean up after a task-switch
344babaa 1861 * @rq: runqueue associated with task-switch
1da177e4
LT
1862 * @prev: the thread we just switched away from.
1863 *
4866cde0
NP
1864 * finish_task_switch must be called after the context switch, paired
1865 * with a prepare_task_switch call before the context switch.
1866 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1867 * and do any other architecture-specific cleanup actions.
1da177e4
LT
1868 *
1869 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 1870 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
1871 * with the lock held can cause deadlocks; see schedule() for
1872 * details.)
1873 */
a9957449 1874static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
1875 __releases(rq->lock)
1876{
1da177e4 1877 struct mm_struct *mm = rq->prev_mm;
55a101f8 1878 long prev_state;
1da177e4
LT
1879
1880 rq->prev_mm = NULL;
1881
1882 /*
1883 * A task struct has one reference for the use as "current".
c394cc9f 1884 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
1885 * schedule one last time. The schedule call will never return, and
1886 * the scheduled task must drop that reference.
c394cc9f 1887 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
1888 * still held, otherwise prev could be scheduled on another cpu, die
1889 * there before we look at prev->state, and then the reference would
1890 * be dropped twice.
1891 * Manfred Spraul <manfred@colorfullife.com>
1892 */
55a101f8 1893 prev_state = prev->state;
4866cde0 1894 finish_arch_switch(prev);
8381f65d
JI
1895#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1896 local_irq_disable();
1897#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
a8d757ef 1898 perf_event_task_sched_in(prev, current);
8381f65d
JI
1899#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1900 local_irq_enable();
1901#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
4866cde0 1902 finish_lock_switch(rq, prev);
e8fa1362 1903
e107be36 1904 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
1905 if (mm)
1906 mmdrop(mm);
c394cc9f 1907 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 1908 /*
1909 * Remove function-return probe instances associated with this
1910 * task and put them back on the free list.
9761eea8 1911 */
c6fd91f0 1912 kprobe_flush_task(prev);
1da177e4 1913 put_task_struct(prev);
c6fd91f0 1914 }
1da177e4
LT
1915}
1916
3f029d3c
GH
1917#ifdef CONFIG_SMP
1918
1919/* assumes rq->lock is held */
1920static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1921{
1922 if (prev->sched_class->pre_schedule)
1923 prev->sched_class->pre_schedule(rq, prev);
1924}
1925
1926/* rq->lock is NOT held, but preemption is disabled */
1927static inline void post_schedule(struct rq *rq)
1928{
1929 if (rq->post_schedule) {
1930 unsigned long flags;
1931
05fa785c 1932 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
1933 if (rq->curr->sched_class->post_schedule)
1934 rq->curr->sched_class->post_schedule(rq);
05fa785c 1935 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
1936
1937 rq->post_schedule = 0;
1938 }
1939}
1940
1941#else
da19ab51 1942
3f029d3c
GH
1943static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1944{
1945}
1946
1947static inline void post_schedule(struct rq *rq)
1948{
1da177e4
LT
1949}
1950
3f029d3c
GH
1951#endif
1952
1da177e4
LT
1953/**
1954 * schedule_tail - first thing a freshly forked thread must call.
1955 * @prev: the thread we just switched away from.
1956 */
36c8b586 1957asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
1958 __releases(rq->lock)
1959{
70b97a7f
IM
1960 struct rq *rq = this_rq();
1961
4866cde0 1962 finish_task_switch(rq, prev);
da19ab51 1963
3f029d3c
GH
1964 /*
1965 * FIXME: do we need to worry about rq being invalidated by the
1966 * task_switch?
1967 */
1968 post_schedule(rq);
70b97a7f 1969
4866cde0
NP
1970#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1971 /* In this case, finish_task_switch does not reenable preemption */
1972 preempt_enable();
1973#endif
1da177e4 1974 if (current->set_child_tid)
b488893a 1975 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
1976}
1977
1978/*
1979 * context_switch - switch to the new MM and the new
1980 * thread's register state.
1981 */
dd41f596 1982static inline void
70b97a7f 1983context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 1984 struct task_struct *next)
1da177e4 1985{
dd41f596 1986 struct mm_struct *mm, *oldmm;
1da177e4 1987
e107be36 1988 prepare_task_switch(rq, prev, next);
fe4b04fa 1989
dd41f596
IM
1990 mm = next->mm;
1991 oldmm = prev->active_mm;
9226d125
ZA
1992 /*
1993 * For paravirt, this is coupled with an exit in switch_to to
1994 * combine the page table reload and the switch backend into
1995 * one hypercall.
1996 */
224101ed 1997 arch_start_context_switch(prev);
9226d125 1998
31915ab4 1999 if (!mm) {
1da177e4
LT
2000 next->active_mm = oldmm;
2001 atomic_inc(&oldmm->mm_count);
2002 enter_lazy_tlb(oldmm, next);
2003 } else
2004 switch_mm(oldmm, mm, next);
2005
31915ab4 2006 if (!prev->mm) {
1da177e4 2007 prev->active_mm = NULL;
1da177e4
LT
2008 rq->prev_mm = oldmm;
2009 }
3a5f5e48
IM
2010 /*
2011 * Since the runqueue lock will be released by the next
2012 * task (which is an invalid locking op but in the case
2013 * of the scheduler it's an obvious special-case), so we
2014 * do an early lockdep release here:
2015 */
2016#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2017 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2018#endif
1da177e4
LT
2019
2020 /* Here we just switch the register state and the stack. */
2021 switch_to(prev, next, prev);
2022
dd41f596
IM
2023 barrier();
2024 /*
2025 * this_rq must be evaluated again because prev may have moved
2026 * CPUs since it called schedule(), thus the 'rq' on its stack
2027 * frame will be invalid.
2028 */
2029 finish_task_switch(this_rq(), prev);
1da177e4
LT
2030}
2031
2032/*
2033 * nr_running, nr_uninterruptible and nr_context_switches:
2034 *
2035 * externally visible scheduler statistics: current number of runnable
2036 * threads, current number of uninterruptible-sleeping threads, total
2037 * number of context switches performed since bootup.
2038 */
2039unsigned long nr_running(void)
2040{
2041 unsigned long i, sum = 0;
2042
2043 for_each_online_cpu(i)
2044 sum += cpu_rq(i)->nr_running;
2045
2046 return sum;
f711f609 2047}
1da177e4
LT
2048
2049unsigned long nr_uninterruptible(void)
f711f609 2050{
1da177e4 2051 unsigned long i, sum = 0;
f711f609 2052
0a945022 2053 for_each_possible_cpu(i)
1da177e4 2054 sum += cpu_rq(i)->nr_uninterruptible;
f711f609
GS
2055
2056 /*
1da177e4
LT
2057 * Since we read the counters lockless, it might be slightly
2058 * inaccurate. Do not allow it to go below zero though:
f711f609 2059 */
1da177e4
LT
2060 if (unlikely((long)sum < 0))
2061 sum = 0;
f711f609 2062
1da177e4 2063 return sum;
f711f609 2064}
f711f609 2065
1da177e4 2066unsigned long long nr_context_switches(void)
46cb4b7c 2067{
cc94abfc
SR
2068 int i;
2069 unsigned long long sum = 0;
46cb4b7c 2070
0a945022 2071 for_each_possible_cpu(i)
1da177e4 2072 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2073
1da177e4
LT
2074 return sum;
2075}
483b4ee6 2076
1da177e4
LT
2077unsigned long nr_iowait(void)
2078{
2079 unsigned long i, sum = 0;
483b4ee6 2080
0a945022 2081 for_each_possible_cpu(i)
1da177e4 2082 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2083
1da177e4
LT
2084 return sum;
2085}
483b4ee6 2086
8c215bd3 2087unsigned long nr_iowait_cpu(int cpu)
69d25870 2088{
8c215bd3 2089 struct rq *this = cpu_rq(cpu);
69d25870
AV
2090 return atomic_read(&this->nr_iowait);
2091}
46cb4b7c 2092
69d25870
AV
2093unsigned long this_cpu_load(void)
2094{
2095 struct rq *this = this_rq();
2096 return this->cpu_load[0];
2097}
e790fb0b 2098
46cb4b7c 2099
dce48a84
TG
2100/* Variables and functions for calc_load */
2101static atomic_long_t calc_load_tasks;
2102static unsigned long calc_load_update;
2103unsigned long avenrun[3];
2104EXPORT_SYMBOL(avenrun);
46cb4b7c 2105
74f5187a
PZ
2106static long calc_load_fold_active(struct rq *this_rq)
2107{
2108 long nr_active, delta = 0;
2109
2110 nr_active = this_rq->nr_running;
2111 nr_active += (long) this_rq->nr_uninterruptible;
2112
2113 if (nr_active != this_rq->calc_load_active) {
2114 delta = nr_active - this_rq->calc_load_active;
2115 this_rq->calc_load_active = nr_active;
2116 }
2117
2118 return delta;
2119}
2120
0f004f5a
PZ
2121static unsigned long
2122calc_load(unsigned long load, unsigned long exp, unsigned long active)
2123{
2124 load *= exp;
2125 load += active * (FIXED_1 - exp);
2126 load += 1UL << (FSHIFT - 1);
2127 return load >> FSHIFT;
2128}
2129
74f5187a
PZ
2130#ifdef CONFIG_NO_HZ
2131/*
2132 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2133 *
2134 * When making the ILB scale, we should try to pull this in as well.
2135 */
2136static atomic_long_t calc_load_tasks_idle;
2137
029632fb 2138void calc_load_account_idle(struct rq *this_rq)
74f5187a
PZ
2139{
2140 long delta;
2141
2142 delta = calc_load_fold_active(this_rq);
2143 if (delta)
2144 atomic_long_add(delta, &calc_load_tasks_idle);
2145}
2146
2147static long calc_load_fold_idle(void)
2148{
2149 long delta = 0;
2150
2151 /*
2152 * Its got a race, we don't care...
2153 */
2154 if (atomic_long_read(&calc_load_tasks_idle))
2155 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2156
2157 return delta;
2158}
0f004f5a
PZ
2159
2160/**
2161 * fixed_power_int - compute: x^n, in O(log n) time
2162 *
2163 * @x: base of the power
2164 * @frac_bits: fractional bits of @x
2165 * @n: power to raise @x to.
2166 *
2167 * By exploiting the relation between the definition of the natural power
2168 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2169 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2170 * (where: n_i \elem {0, 1}, the binary vector representing n),
2171 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2172 * of course trivially computable in O(log_2 n), the length of our binary
2173 * vector.
2174 */
2175static unsigned long
2176fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2177{
2178 unsigned long result = 1UL << frac_bits;
2179
2180 if (n) for (;;) {
2181 if (n & 1) {
2182 result *= x;
2183 result += 1UL << (frac_bits - 1);
2184 result >>= frac_bits;
2185 }
2186 n >>= 1;
2187 if (!n)
2188 break;
2189 x *= x;
2190 x += 1UL << (frac_bits - 1);
2191 x >>= frac_bits;
2192 }
2193
2194 return result;
2195}
2196
2197/*
2198 * a1 = a0 * e + a * (1 - e)
2199 *
2200 * a2 = a1 * e + a * (1 - e)
2201 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2202 * = a0 * e^2 + a * (1 - e) * (1 + e)
2203 *
2204 * a3 = a2 * e + a * (1 - e)
2205 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2206 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2207 *
2208 * ...
2209 *
2210 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2211 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2212 * = a0 * e^n + a * (1 - e^n)
2213 *
2214 * [1] application of the geometric series:
2215 *
2216 * n 1 - x^(n+1)
2217 * S_n := \Sum x^i = -------------
2218 * i=0 1 - x
2219 */
2220static unsigned long
2221calc_load_n(unsigned long load, unsigned long exp,
2222 unsigned long active, unsigned int n)
2223{
2224
2225 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2226}
2227
2228/*
2229 * NO_HZ can leave us missing all per-cpu ticks calling
2230 * calc_load_account_active(), but since an idle CPU folds its delta into
2231 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2232 * in the pending idle delta if our idle period crossed a load cycle boundary.
2233 *
2234 * Once we've updated the global active value, we need to apply the exponential
2235 * weights adjusted to the number of cycles missed.
2236 */
2237static void calc_global_nohz(unsigned long ticks)
2238{
2239 long delta, active, n;
2240
2241 if (time_before(jiffies, calc_load_update))
2242 return;
2243
2244 /*
2245 * If we crossed a calc_load_update boundary, make sure to fold
2246 * any pending idle changes, the respective CPUs might have
2247 * missed the tick driven calc_load_account_active() update
2248 * due to NO_HZ.
2249 */
2250 delta = calc_load_fold_idle();
2251 if (delta)
2252 atomic_long_add(delta, &calc_load_tasks);
2253
2254 /*
2255 * If we were idle for multiple load cycles, apply them.
2256 */
2257 if (ticks >= LOAD_FREQ) {
2258 n = ticks / LOAD_FREQ;
2259
2260 active = atomic_long_read(&calc_load_tasks);
2261 active = active > 0 ? active * FIXED_1 : 0;
2262
2263 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2264 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2265 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2266
2267 calc_load_update += n * LOAD_FREQ;
2268 }
2269
2270 /*
2271 * Its possible the remainder of the above division also crosses
2272 * a LOAD_FREQ period, the regular check in calc_global_load()
2273 * which comes after this will take care of that.
2274 *
2275 * Consider us being 11 ticks before a cycle completion, and us
2276 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
2277 * age us 4 cycles, and the test in calc_global_load() will
2278 * pick up the final one.
2279 */
2280}
74f5187a 2281#else
029632fb 2282void calc_load_account_idle(struct rq *this_rq)
74f5187a
PZ
2283{
2284}
2285
2286static inline long calc_load_fold_idle(void)
2287{
2288 return 0;
2289}
0f004f5a
PZ
2290
2291static void calc_global_nohz(unsigned long ticks)
2292{
2293}
74f5187a
PZ
2294#endif
2295
2d02494f
TG
2296/**
2297 * get_avenrun - get the load average array
2298 * @loads: pointer to dest load array
2299 * @offset: offset to add
2300 * @shift: shift count to shift the result left
2301 *
2302 * These values are estimates at best, so no need for locking.
2303 */
2304void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2305{
2306 loads[0] = (avenrun[0] + offset) << shift;
2307 loads[1] = (avenrun[1] + offset) << shift;
2308 loads[2] = (avenrun[2] + offset) << shift;
46cb4b7c 2309}
46cb4b7c 2310
46cb4b7c 2311/*
dce48a84
TG
2312 * calc_load - update the avenrun load estimates 10 ticks after the
2313 * CPUs have updated calc_load_tasks.
7835b98b 2314 */
0f004f5a 2315void calc_global_load(unsigned long ticks)
7835b98b 2316{
dce48a84 2317 long active;
1da177e4 2318
0f004f5a
PZ
2319 calc_global_nohz(ticks);
2320
2321 if (time_before(jiffies, calc_load_update + 10))
dce48a84 2322 return;
1da177e4 2323
dce48a84
TG
2324 active = atomic_long_read(&calc_load_tasks);
2325 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 2326
dce48a84
TG
2327 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2328 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2329 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 2330
dce48a84
TG
2331 calc_load_update += LOAD_FREQ;
2332}
1da177e4 2333
dce48a84 2334/*
74f5187a
PZ
2335 * Called from update_cpu_load() to periodically update this CPU's
2336 * active count.
dce48a84
TG
2337 */
2338static void calc_load_account_active(struct rq *this_rq)
2339{
74f5187a 2340 long delta;
08c183f3 2341
74f5187a
PZ
2342 if (time_before(jiffies, this_rq->calc_load_update))
2343 return;
783609c6 2344
74f5187a
PZ
2345 delta = calc_load_fold_active(this_rq);
2346 delta += calc_load_fold_idle();
2347 if (delta)
dce48a84 2348 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
2349
2350 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
2351}
2352
fdf3e95d
VP
2353/*
2354 * The exact cpuload at various idx values, calculated at every tick would be
2355 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2356 *
2357 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2358 * on nth tick when cpu may be busy, then we have:
2359 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2360 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2361 *
2362 * decay_load_missed() below does efficient calculation of
2363 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2364 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2365 *
2366 * The calculation is approximated on a 128 point scale.
2367 * degrade_zero_ticks is the number of ticks after which load at any
2368 * particular idx is approximated to be zero.
2369 * degrade_factor is a precomputed table, a row for each load idx.
2370 * Each column corresponds to degradation factor for a power of two ticks,
2371 * based on 128 point scale.
2372 * Example:
2373 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2374 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2375 *
2376 * With this power of 2 load factors, we can degrade the load n times
2377 * by looking at 1 bits in n and doing as many mult/shift instead of
2378 * n mult/shifts needed by the exact degradation.
2379 */
2380#define DEGRADE_SHIFT 7
2381static const unsigned char
2382 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2383static const unsigned char
2384 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2385 {0, 0, 0, 0, 0, 0, 0, 0},
2386 {64, 32, 8, 0, 0, 0, 0, 0},
2387 {96, 72, 40, 12, 1, 0, 0},
2388 {112, 98, 75, 43, 15, 1, 0},
2389 {120, 112, 98, 76, 45, 16, 2} };
2390
2391/*
2392 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2393 * would be when CPU is idle and so we just decay the old load without
2394 * adding any new load.
2395 */
2396static unsigned long
2397decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2398{
2399 int j = 0;
2400
2401 if (!missed_updates)
2402 return load;
2403
2404 if (missed_updates >= degrade_zero_ticks[idx])
2405 return 0;
2406
2407 if (idx == 1)
2408 return load >> missed_updates;
2409
2410 while (missed_updates) {
2411 if (missed_updates % 2)
2412 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2413
2414 missed_updates >>= 1;
2415 j++;
2416 }
2417 return load;
2418}
2419
46cb4b7c 2420/*
dd41f596 2421 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
2422 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2423 * every tick. We fix it up based on jiffies.
46cb4b7c 2424 */
029632fb 2425void update_cpu_load(struct rq *this_rq)
46cb4b7c 2426{
495eca49 2427 unsigned long this_load = this_rq->load.weight;
fdf3e95d
VP
2428 unsigned long curr_jiffies = jiffies;
2429 unsigned long pending_updates;
dd41f596 2430 int i, scale;
46cb4b7c 2431
dd41f596 2432 this_rq->nr_load_updates++;
46cb4b7c 2433
fdf3e95d
VP
2434 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
2435 if (curr_jiffies == this_rq->last_load_update_tick)
2436 return;
2437
2438 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2439 this_rq->last_load_update_tick = curr_jiffies;
2440
dd41f596 2441 /* Update our load: */
fdf3e95d
VP
2442 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2443 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 2444 unsigned long old_load, new_load;
7d1e6a9b 2445
dd41f596 2446 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 2447
dd41f596 2448 old_load = this_rq->cpu_load[i];
fdf3e95d 2449 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 2450 new_load = this_load;
a25707f3
IM
2451 /*
2452 * Round up the averaging division if load is increasing. This
2453 * prevents us from getting stuck on 9 if the load is 10, for
2454 * example.
2455 */
2456 if (new_load > old_load)
fdf3e95d
VP
2457 new_load += scale - 1;
2458
2459 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 2460 }
da2b71ed
SS
2461
2462 sched_avg_update(this_rq);
fdf3e95d
VP
2463}
2464
2465static void update_cpu_load_active(struct rq *this_rq)
2466{
2467 update_cpu_load(this_rq);
46cb4b7c 2468
74f5187a 2469 calc_load_account_active(this_rq);
46cb4b7c
SS
2470}
2471
dd41f596 2472#ifdef CONFIG_SMP
8a0be9ef 2473
46cb4b7c 2474/*
38022906
PZ
2475 * sched_exec - execve() is a valuable balancing opportunity, because at
2476 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2477 */
38022906 2478void sched_exec(void)
46cb4b7c 2479{
38022906 2480 struct task_struct *p = current;
1da177e4 2481 unsigned long flags;
0017d735 2482 int dest_cpu;
46cb4b7c 2483
8f42ced9 2484 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 2485 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
2486 if (dest_cpu == smp_processor_id())
2487 goto unlock;
38022906 2488
8f42ced9 2489 if (likely(cpu_active(dest_cpu))) {
969c7921 2490 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2491
8f42ced9
PZ
2492 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2493 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2494 return;
2495 }
0017d735 2496unlock:
8f42ced9 2497 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2498}
dd41f596 2499
1da177e4
LT
2500#endif
2501
1da177e4 2502DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2503DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2504
2505EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2506EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4
LT
2507
2508/*
c5f8d995 2509 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 2510 * @p in case that task is currently running.
c5f8d995
HS
2511 *
2512 * Called with task_rq_lock() held on @rq.
1da177e4 2513 */
c5f8d995
HS
2514static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2515{
2516 u64 ns = 0;
2517
2518 if (task_current(rq, p)) {
2519 update_rq_clock(rq);
305e6835 2520 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
2521 if ((s64)ns < 0)
2522 ns = 0;
2523 }
2524
2525 return ns;
2526}
2527
bb34d92f 2528unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 2529{
1da177e4 2530 unsigned long flags;
41b86e9c 2531 struct rq *rq;
bb34d92f 2532 u64 ns = 0;
48f24c4d 2533
41b86e9c 2534 rq = task_rq_lock(p, &flags);
c5f8d995 2535 ns = do_task_delta_exec(p, rq);
0122ec5b 2536 task_rq_unlock(rq, p, &flags);
1508487e 2537
c5f8d995
HS
2538 return ns;
2539}
f06febc9 2540
c5f8d995
HS
2541/*
2542 * Return accounted runtime for the task.
2543 * In case the task is currently running, return the runtime plus current's
2544 * pending runtime that have not been accounted yet.
2545 */
2546unsigned long long task_sched_runtime(struct task_struct *p)
2547{
2548 unsigned long flags;
2549 struct rq *rq;
2550 u64 ns = 0;
2551
2552 rq = task_rq_lock(p, &flags);
2553 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 2554 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2555
2556 return ns;
2557}
48f24c4d 2558
54c707e9
GC
2559#ifdef CONFIG_CGROUP_CPUACCT
2560struct cgroup_subsys cpuacct_subsys;
2561struct cpuacct root_cpuacct;
2562#endif
2563
be726ffd
GC
2564static inline void task_group_account_field(struct task_struct *p, int index,
2565 u64 tmp)
54c707e9
GC
2566{
2567#ifdef CONFIG_CGROUP_CPUACCT
2568 struct kernel_cpustat *kcpustat;
2569 struct cpuacct *ca;
2570#endif
2571 /*
2572 * Since all updates are sure to touch the root cgroup, we
2573 * get ourselves ahead and touch it first. If the root cgroup
2574 * is the only cgroup, then nothing else should be necessary.
2575 *
2576 */
2577 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
2578
2579#ifdef CONFIG_CGROUP_CPUACCT
2580 if (unlikely(!cpuacct_subsys.active))
2581 return;
2582
2583 rcu_read_lock();
2584 ca = task_ca(p);
2585 while (ca && (ca != &root_cpuacct)) {
2586 kcpustat = this_cpu_ptr(ca->cpustat);
2587 kcpustat->cpustat[index] += tmp;
2588 ca = parent_ca(ca);
2589 }
2590 rcu_read_unlock();
2591#endif
2592}
2593
2594
1da177e4
LT
2595/*
2596 * Account user cpu time to a process.
2597 * @p: the process that the cpu time gets accounted to
1da177e4 2598 * @cputime: the cpu time spent in user space since the last update
457533a7 2599 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4 2600 */
457533a7
MS
2601void account_user_time(struct task_struct *p, cputime_t cputime,
2602 cputime_t cputime_scaled)
1da177e4 2603{
3292beb3 2604 int index;
1da177e4 2605
457533a7 2606 /* Add user time to process. */
1da177e4 2607 p->utime = cputime_add(p->utime, cputime);
457533a7 2608 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 2609 account_group_user_time(p, cputime);
1da177e4 2610
3292beb3 2611 index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
ef12fefa 2612
1c77f38a 2613 /* Add user time to cpustat. */
54c707e9 2614 task_group_account_field(p, index, cputime);
1c77f38a 2615
49b5cf34
JL
2616 /* Account for user time used */
2617 acct_update_integrals(p);
1da177e4
LT
2618}
2619
94886b84
LV
2620/*
2621 * Account guest cpu time to a process.
2622 * @p: the process that the cpu time gets accounted to
2623 * @cputime: the cpu time spent in virtual machine since the last update
457533a7 2624 * @cputime_scaled: cputime scaled by cpu frequency
94886b84 2625 */
457533a7
MS
2626static void account_guest_time(struct task_struct *p, cputime_t cputime,
2627 cputime_t cputime_scaled)
94886b84 2628{
3292beb3
GC
2629 u64 tmp;
2630 u64 *cpustat = kcpustat_this_cpu->cpustat;
94886b84
LV
2631
2632 tmp = cputime_to_cputime64(cputime);
2633
457533a7 2634 /* Add guest time to process. */
94886b84 2635 p->utime = cputime_add(p->utime, cputime);
457533a7 2636 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 2637 account_group_user_time(p, cputime);
94886b84
LV
2638 p->gtime = cputime_add(p->gtime, cputime);
2639
457533a7 2640 /* Add guest time to cpustat. */
ce0e7b28 2641 if (TASK_NICE(p) > 0) {
3292beb3
GC
2642 cpustat[CPUTIME_NICE] += tmp;
2643 cpustat[CPUTIME_GUEST_NICE] += tmp;
ce0e7b28 2644 } else {
3292beb3
GC
2645 cpustat[CPUTIME_USER] += tmp;
2646 cpustat[CPUTIME_GUEST] += tmp;
ce0e7b28 2647 }
94886b84
LV
2648}
2649
70a89a66
VP
2650/*
2651 * Account system cpu time to a process and desired cpustat field
2652 * @p: the process that the cpu time gets accounted to
2653 * @cputime: the cpu time spent in kernel space since the last update
2654 * @cputime_scaled: cputime scaled by cpu frequency
2655 * @target_cputime64: pointer to cpustat field that has to be updated
2656 */
2657static inline
2658void __account_system_time(struct task_struct *p, cputime_t cputime,
3292beb3 2659 cputime_t cputime_scaled, int index)
70a89a66 2660{
70a89a66
VP
2661 /* Add system time to process. */
2662 p->stime = cputime_add(p->stime, cputime);
2663 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
2664 account_group_system_time(p, cputime);
2665
2666 /* Add system time to cpustat. */
54c707e9 2667 task_group_account_field(p, index, cputime);
70a89a66
VP
2668
2669 /* Account for system time used */
2670 acct_update_integrals(p);
2671}
2672
1da177e4
LT
2673/*
2674 * Account system cpu time to a process.
2675 * @p: the process that the cpu time gets accounted to
2676 * @hardirq_offset: the offset to subtract from hardirq_count()
2677 * @cputime: the cpu time spent in kernel space since the last update
457533a7 2678 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4
LT
2679 */
2680void account_system_time(struct task_struct *p, int hardirq_offset,
457533a7 2681 cputime_t cputime, cputime_t cputime_scaled)
1da177e4 2682{
3292beb3 2683 int index;
1da177e4 2684
983ed7a6 2685 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
457533a7 2686 account_guest_time(p, cputime, cputime_scaled);
983ed7a6
HH
2687 return;
2688 }
94886b84 2689
1da177e4 2690 if (hardirq_count() - hardirq_offset)
3292beb3 2691 index = CPUTIME_IRQ;
75e1056f 2692 else if (in_serving_softirq())
3292beb3 2693 index = CPUTIME_SOFTIRQ;
1da177e4 2694 else
3292beb3 2695 index = CPUTIME_SYSTEM;
ef12fefa 2696
3292beb3 2697 __account_system_time(p, cputime, cputime_scaled, index);
1da177e4
LT
2698}
2699
c66f08be 2700/*
1da177e4 2701 * Account for involuntary wait time.
544b4a1f 2702 * @cputime: the cpu time spent in involuntary wait
c66f08be 2703 */
79741dd3 2704void account_steal_time(cputime_t cputime)
c66f08be 2705{
3292beb3
GC
2706 u64 *cpustat = kcpustat_this_cpu->cpustat;
2707 u64 cputime64 = cputime_to_cputime64(cputime);
79741dd3 2708
3292beb3 2709 cpustat[CPUTIME_STEAL] += cputime64;
c66f08be
MN
2710}
2711
1da177e4 2712/*
79741dd3
MS
2713 * Account for idle time.
2714 * @cputime: the cpu time spent in idle wait
1da177e4 2715 */
79741dd3 2716void account_idle_time(cputime_t cputime)
1da177e4 2717{
3292beb3
GC
2718 u64 *cpustat = kcpustat_this_cpu->cpustat;
2719 u64 cputime64 = cputime_to_cputime64(cputime);
70b97a7f 2720 struct rq *rq = this_rq();
1da177e4 2721
79741dd3 2722 if (atomic_read(&rq->nr_iowait) > 0)
3292beb3 2723 cpustat[CPUTIME_IOWAIT] += cputime64;
79741dd3 2724 else
3292beb3 2725 cpustat[CPUTIME_IDLE] += cputime64;
1da177e4
LT
2726}
2727
e6e6685a
GC
2728static __always_inline bool steal_account_process_tick(void)
2729{
2730#ifdef CONFIG_PARAVIRT
2731 if (static_branch(&paravirt_steal_enabled)) {
2732 u64 steal, st = 0;
2733
2734 steal = paravirt_steal_clock(smp_processor_id());
2735 steal -= this_rq()->prev_steal_time;
2736
2737 st = steal_ticks(steal);
2738 this_rq()->prev_steal_time += st * TICK_NSEC;
2739
2740 account_steal_time(st);
2741 return st;
2742 }
2743#endif
2744 return false;
2745}
2746
79741dd3
MS
2747#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2748
abb74cef
VP
2749#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2750/*
2751 * Account a tick to a process and cpustat
2752 * @p: the process that the cpu time gets accounted to
2753 * @user_tick: is the tick from userspace
2754 * @rq: the pointer to rq
2755 *
2756 * Tick demultiplexing follows the order
2757 * - pending hardirq update
2758 * - pending softirq update
2759 * - user_time
2760 * - idle_time
2761 * - system time
2762 * - check for guest_time
2763 * - else account as system_time
2764 *
2765 * Check for hardirq is done both for system and user time as there is
2766 * no timer going off while we are on hardirq and hence we may never get an
2767 * opportunity to update it solely in system time.
2768 * p->stime and friends are only updated on system time and not on irq
2769 * softirq as those do not count in task exec_runtime any more.
2770 */
2771static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2772 struct rq *rq)
2773{
2774 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3292beb3
GC
2775 u64 tmp = cputime_to_cputime64(cputime_one_jiffy);
2776 u64 *cpustat = kcpustat_this_cpu->cpustat;
abb74cef 2777
e6e6685a
GC
2778 if (steal_account_process_tick())
2779 return;
2780
abb74cef 2781 if (irqtime_account_hi_update()) {
3292beb3 2782 cpustat[CPUTIME_IRQ] += tmp;
abb74cef 2783 } else if (irqtime_account_si_update()) {
3292beb3 2784 cpustat[CPUTIME_SOFTIRQ] += tmp;
414bee9b
VP
2785 } else if (this_cpu_ksoftirqd() == p) {
2786 /*
2787 * ksoftirqd time do not get accounted in cpu_softirq_time.
2788 * So, we have to handle it separately here.
2789 * Also, p->stime needs to be updated for ksoftirqd.
2790 */
2791 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3292beb3 2792 CPUTIME_SOFTIRQ);
abb74cef
VP
2793 } else if (user_tick) {
2794 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2795 } else if (p == rq->idle) {
2796 account_idle_time(cputime_one_jiffy);
2797 } else if (p->flags & PF_VCPU) { /* System time or guest time */
2798 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
2799 } else {
2800 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3292beb3 2801 CPUTIME_SYSTEM);
abb74cef
VP
2802 }
2803}
2804
2805static void irqtime_account_idle_ticks(int ticks)
2806{
2807 int i;
2808 struct rq *rq = this_rq();
2809
2810 for (i = 0; i < ticks; i++)
2811 irqtime_account_process_tick(current, 0, rq);
2812}
544b4a1f 2813#else /* CONFIG_IRQ_TIME_ACCOUNTING */
abb74cef
VP
2814static void irqtime_account_idle_ticks(int ticks) {}
2815static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2816 struct rq *rq) {}
544b4a1f 2817#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
79741dd3
MS
2818
2819/*
2820 * Account a single tick of cpu time.
2821 * @p: the process that the cpu time gets accounted to
2822 * @user_tick: indicates if the tick is a user or a system tick
2823 */
2824void account_process_tick(struct task_struct *p, int user_tick)
2825{
a42548a1 2826 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
79741dd3
MS
2827 struct rq *rq = this_rq();
2828
abb74cef
VP
2829 if (sched_clock_irqtime) {
2830 irqtime_account_process_tick(p, user_tick, rq);
2831 return;
2832 }
2833
e6e6685a
GC
2834 if (steal_account_process_tick())
2835 return;
2836
79741dd3 2837 if (user_tick)
a42548a1 2838 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
f5f293a4 2839 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
a42548a1 2840 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
79741dd3
MS
2841 one_jiffy_scaled);
2842 else
a42548a1 2843 account_idle_time(cputime_one_jiffy);
79741dd3
MS
2844}
2845
2846/*
2847 * Account multiple ticks of steal time.
2848 * @p: the process from which the cpu time has been stolen
2849 * @ticks: number of stolen ticks
2850 */
2851void account_steal_ticks(unsigned long ticks)
2852{
2853 account_steal_time(jiffies_to_cputime(ticks));
2854}
2855
2856/*
2857 * Account multiple ticks of idle time.
2858 * @ticks: number of stolen ticks
2859 */
2860void account_idle_ticks(unsigned long ticks)
2861{
abb74cef
VP
2862
2863 if (sched_clock_irqtime) {
2864 irqtime_account_idle_ticks(ticks);
2865 return;
2866 }
2867
79741dd3 2868 account_idle_time(jiffies_to_cputime(ticks));
1da177e4
LT
2869}
2870
79741dd3
MS
2871#endif
2872
49048622
BS
2873/*
2874 * Use precise platform statistics if available:
2875 */
2876#ifdef CONFIG_VIRT_CPU_ACCOUNTING
d180c5bc 2877void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2878{
d99ca3b9
HS
2879 *ut = p->utime;
2880 *st = p->stime;
49048622
BS
2881}
2882
0cf55e1e 2883void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2884{
0cf55e1e
HS
2885 struct task_cputime cputime;
2886
2887 thread_group_cputime(p, &cputime);
2888
2889 *ut = cputime.utime;
2890 *st = cputime.stime;
49048622
BS
2891}
2892#else
761b1d26
HS
2893
2894#ifndef nsecs_to_cputime
b7b20df9 2895# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
761b1d26
HS
2896#endif
2897
d180c5bc 2898void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2899{
d99ca3b9 2900 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
49048622
BS
2901
2902 /*
2903 * Use CFS's precise accounting:
2904 */
d180c5bc 2905 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
49048622
BS
2906
2907 if (total) {
e75e863d 2908 u64 temp = rtime;
d180c5bc 2909
e75e863d 2910 temp *= utime;
49048622 2911 do_div(temp, total);
d180c5bc
HS
2912 utime = (cputime_t)temp;
2913 } else
2914 utime = rtime;
49048622 2915
d180c5bc
HS
2916 /*
2917 * Compare with previous values, to keep monotonicity:
2918 */
761b1d26 2919 p->prev_utime = max(p->prev_utime, utime);
d99ca3b9 2920 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
49048622 2921
d99ca3b9
HS
2922 *ut = p->prev_utime;
2923 *st = p->prev_stime;
49048622
BS
2924}
2925
0cf55e1e
HS
2926/*
2927 * Must be called with siglock held.
2928 */
2929void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2930{
0cf55e1e
HS
2931 struct signal_struct *sig = p->signal;
2932 struct task_cputime cputime;
2933 cputime_t rtime, utime, total;
49048622 2934
0cf55e1e 2935 thread_group_cputime(p, &cputime);
49048622 2936
0cf55e1e
HS
2937 total = cputime_add(cputime.utime, cputime.stime);
2938 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
49048622 2939
0cf55e1e 2940 if (total) {
e75e863d 2941 u64 temp = rtime;
49048622 2942
e75e863d 2943 temp *= cputime.utime;
0cf55e1e
HS
2944 do_div(temp, total);
2945 utime = (cputime_t)temp;
2946 } else
2947 utime = rtime;
2948
2949 sig->prev_utime = max(sig->prev_utime, utime);
2950 sig->prev_stime = max(sig->prev_stime,
2951 cputime_sub(rtime, sig->prev_utime));
2952
2953 *ut = sig->prev_utime;
2954 *st = sig->prev_stime;
49048622 2955}
49048622 2956#endif
49048622 2957
7835b98b
CL
2958/*
2959 * This function gets called by the timer code, with HZ frequency.
2960 * We call it with interrupts disabled.
7835b98b
CL
2961 */
2962void scheduler_tick(void)
2963{
7835b98b
CL
2964 int cpu = smp_processor_id();
2965 struct rq *rq = cpu_rq(cpu);
dd41f596 2966 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2967
2968 sched_clock_tick();
dd41f596 2969
05fa785c 2970 raw_spin_lock(&rq->lock);
3e51f33f 2971 update_rq_clock(rq);
fdf3e95d 2972 update_cpu_load_active(rq);
fa85ae24 2973 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 2974 raw_spin_unlock(&rq->lock);
7835b98b 2975
e9d2b064 2976 perf_event_task_tick();
e220d2dc 2977
e418e1c2 2978#ifdef CONFIG_SMP
6eb57e0d 2979 rq->idle_balance = idle_cpu(cpu);
dd41f596 2980 trigger_load_balance(rq, cpu);
e418e1c2 2981#endif
1da177e4
LT
2982}
2983
132380a0 2984notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2985{
2986 if (in_lock_functions(addr)) {
2987 addr = CALLER_ADDR2;
2988 if (in_lock_functions(addr))
2989 addr = CALLER_ADDR3;
2990 }
2991 return addr;
2992}
1da177e4 2993
7e49fcce
SR
2994#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2995 defined(CONFIG_PREEMPT_TRACER))
2996
43627582 2997void __kprobes add_preempt_count(int val)
1da177e4 2998{
6cd8a4bb 2999#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3000 /*
3001 * Underflow?
3002 */
9a11b49a
IM
3003 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3004 return;
6cd8a4bb 3005#endif
1da177e4 3006 preempt_count() += val;
6cd8a4bb 3007#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3008 /*
3009 * Spinlock count overflowing soon?
3010 */
33859f7f
MOS
3011 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3012 PREEMPT_MASK - 10);
6cd8a4bb
SR
3013#endif
3014 if (preempt_count() == val)
3015 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
3016}
3017EXPORT_SYMBOL(add_preempt_count);
3018
43627582 3019void __kprobes sub_preempt_count(int val)
1da177e4 3020{
6cd8a4bb 3021#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3022 /*
3023 * Underflow?
3024 */
01e3eb82 3025 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 3026 return;
1da177e4
LT
3027 /*
3028 * Is the spinlock portion underflowing?
3029 */
9a11b49a
IM
3030 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3031 !(preempt_count() & PREEMPT_MASK)))
3032 return;
6cd8a4bb 3033#endif
9a11b49a 3034
6cd8a4bb
SR
3035 if (preempt_count() == val)
3036 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
3037 preempt_count() -= val;
3038}
3039EXPORT_SYMBOL(sub_preempt_count);
3040
3041#endif
3042
3043/*
dd41f596 3044 * Print scheduling while atomic bug:
1da177e4 3045 */
dd41f596 3046static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3047{
838225b4
SS
3048 struct pt_regs *regs = get_irq_regs();
3049
3df0fc5b
PZ
3050 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3051 prev->comm, prev->pid, preempt_count());
838225b4 3052
dd41f596 3053 debug_show_held_locks(prev);
e21f5b15 3054 print_modules();
dd41f596
IM
3055 if (irqs_disabled())
3056 print_irqtrace_events(prev);
838225b4
SS
3057
3058 if (regs)
3059 show_regs(regs);
3060 else
3061 dump_stack();
dd41f596 3062}
1da177e4 3063
dd41f596
IM
3064/*
3065 * Various schedule()-time debugging checks and statistics:
3066 */
3067static inline void schedule_debug(struct task_struct *prev)
3068{
1da177e4 3069 /*
41a2d6cf 3070 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
3071 * schedule() atomically, we ignore that path for now.
3072 * Otherwise, whine if we are scheduling when we should not be.
3073 */
3f33a7ce 3074 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596 3075 __schedule_bug(prev);
b3fbab05 3076 rcu_sleep_check();
dd41f596 3077
1da177e4
LT
3078 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3079
2d72376b 3080 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
3081}
3082
6cecd084 3083static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 3084{
61eadef6 3085 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 3086 update_rq_clock(rq);
6cecd084 3087 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
3088}
3089
dd41f596
IM
3090/*
3091 * Pick up the highest-prio task:
3092 */
3093static inline struct task_struct *
b67802ea 3094pick_next_task(struct rq *rq)
dd41f596 3095{
5522d5d5 3096 const struct sched_class *class;
dd41f596 3097 struct task_struct *p;
1da177e4
LT
3098
3099 /*
dd41f596
IM
3100 * Optimization: we know that if all tasks are in
3101 * the fair class we can call that function directly:
1da177e4 3102 */
953bfcd1 3103 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
fb8d4724 3104 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
3105 if (likely(p))
3106 return p;
1da177e4
LT
3107 }
3108
34f971f6 3109 for_each_class(class) {
fb8d4724 3110 p = class->pick_next_task(rq);
dd41f596
IM
3111 if (p)
3112 return p;
dd41f596 3113 }
34f971f6
PZ
3114
3115 BUG(); /* the idle class will always have a runnable task */
dd41f596 3116}
1da177e4 3117
dd41f596 3118/*
c259e01a 3119 * __schedule() is the main scheduler function.
dd41f596 3120 */
c259e01a 3121static void __sched __schedule(void)
dd41f596
IM
3122{
3123 struct task_struct *prev, *next;
67ca7bde 3124 unsigned long *switch_count;
dd41f596 3125 struct rq *rq;
31656519 3126 int cpu;
dd41f596 3127
ff743345
PZ
3128need_resched:
3129 preempt_disable();
dd41f596
IM
3130 cpu = smp_processor_id();
3131 rq = cpu_rq(cpu);
25502a6c 3132 rcu_note_context_switch(cpu);
dd41f596 3133 prev = rq->curr;
dd41f596 3134
dd41f596 3135 schedule_debug(prev);
1da177e4 3136
31656519 3137 if (sched_feat(HRTICK))
f333fdc9 3138 hrtick_clear(rq);
8f4d37ec 3139
05fa785c 3140 raw_spin_lock_irq(&rq->lock);
1da177e4 3141
246d86b5 3142 switch_count = &prev->nivcsw;
1da177e4 3143 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 3144 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3145 prev->state = TASK_RUNNING;
21aa9af0 3146 } else {
2acca55e
PZ
3147 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3148 prev->on_rq = 0;
3149
21aa9af0 3150 /*
2acca55e
PZ
3151 * If a worker went to sleep, notify and ask workqueue
3152 * whether it wants to wake up a task to maintain
3153 * concurrency.
21aa9af0
TH
3154 */
3155 if (prev->flags & PF_WQ_WORKER) {
3156 struct task_struct *to_wakeup;
3157
3158 to_wakeup = wq_worker_sleeping(prev, cpu);
3159 if (to_wakeup)
3160 try_to_wake_up_local(to_wakeup);
3161 }
21aa9af0 3162 }
dd41f596 3163 switch_count = &prev->nvcsw;
1da177e4
LT
3164 }
3165
3f029d3c 3166 pre_schedule(rq, prev);
f65eda4f 3167
dd41f596 3168 if (unlikely(!rq->nr_running))
1da177e4 3169 idle_balance(cpu, rq);
1da177e4 3170
df1c99d4 3171 put_prev_task(rq, prev);
b67802ea 3172 next = pick_next_task(rq);
f26f9aff
MG
3173 clear_tsk_need_resched(prev);
3174 rq->skip_clock_update = 0;
1da177e4 3175
1da177e4 3176 if (likely(prev != next)) {
1da177e4
LT
3177 rq->nr_switches++;
3178 rq->curr = next;
3179 ++*switch_count;
3180
dd41f596 3181 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 3182 /*
246d86b5
ON
3183 * The context switch have flipped the stack from under us
3184 * and restored the local variables which were saved when
3185 * this task called schedule() in the past. prev == current
3186 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
3187 */
3188 cpu = smp_processor_id();
3189 rq = cpu_rq(cpu);
1da177e4 3190 } else
05fa785c 3191 raw_spin_unlock_irq(&rq->lock);
1da177e4 3192
3f029d3c 3193 post_schedule(rq);
1da177e4 3194
1da177e4 3195 preempt_enable_no_resched();
ff743345 3196 if (need_resched())
1da177e4
LT
3197 goto need_resched;
3198}
c259e01a 3199
9c40cef2
TG
3200static inline void sched_submit_work(struct task_struct *tsk)
3201{
3202 if (!tsk->state)
3203 return;
3204 /*
3205 * If we are going to sleep and we have plugged IO queued,
3206 * make sure to submit it to avoid deadlocks.
3207 */
3208 if (blk_needs_flush_plug(tsk))
3209 blk_schedule_flush_plug(tsk);
3210}
3211
6ebbe7a0 3212asmlinkage void __sched schedule(void)
c259e01a 3213{
9c40cef2
TG
3214 struct task_struct *tsk = current;
3215
3216 sched_submit_work(tsk);
c259e01a
TG
3217 __schedule();
3218}
1da177e4
LT
3219EXPORT_SYMBOL(schedule);
3220
c08f7829 3221#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d 3222
c6eb3dda
PZ
3223static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3224{
c6eb3dda 3225 if (lock->owner != owner)
307bf980 3226 return false;
0d66bf6d
PZ
3227
3228 /*
c6eb3dda
PZ
3229 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3230 * lock->owner still matches owner, if that fails, owner might
3231 * point to free()d memory, if it still matches, the rcu_read_lock()
3232 * ensures the memory stays valid.
0d66bf6d 3233 */
c6eb3dda 3234 barrier();
0d66bf6d 3235
307bf980 3236 return owner->on_cpu;
c6eb3dda 3237}
0d66bf6d 3238
c6eb3dda
PZ
3239/*
3240 * Look out! "owner" is an entirely speculative pointer
3241 * access and not reliable.
3242 */
3243int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3244{
3245 if (!sched_feat(OWNER_SPIN))
3246 return 0;
0d66bf6d 3247
307bf980 3248 rcu_read_lock();
c6eb3dda
PZ
3249 while (owner_running(lock, owner)) {
3250 if (need_resched())
307bf980 3251 break;
0d66bf6d 3252
335d7afb 3253 arch_mutex_cpu_relax();
0d66bf6d 3254 }
307bf980 3255 rcu_read_unlock();
4b402210 3256
c6eb3dda 3257 /*
307bf980
TG
3258 * We break out the loop above on need_resched() and when the
3259 * owner changed, which is a sign for heavy contention. Return
3260 * success only when lock->owner is NULL.
c6eb3dda 3261 */
307bf980 3262 return lock->owner == NULL;
0d66bf6d
PZ
3263}
3264#endif
3265
1da177e4
LT
3266#ifdef CONFIG_PREEMPT
3267/*
2ed6e34f 3268 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3269 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3270 * occur there and call schedule directly.
3271 */
d1f74e20 3272asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
3273{
3274 struct thread_info *ti = current_thread_info();
6478d880 3275
1da177e4
LT
3276 /*
3277 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3278 * we do not want to preempt the current task. Just return..
1da177e4 3279 */
beed33a8 3280 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
3281 return;
3282
3a5c359a 3283 do {
d1f74e20 3284 add_preempt_count_notrace(PREEMPT_ACTIVE);
c259e01a 3285 __schedule();
d1f74e20 3286 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 3287
3a5c359a
AK
3288 /*
3289 * Check again in case we missed a preemption opportunity
3290 * between schedule and now.
3291 */
3292 barrier();
5ed0cec0 3293 } while (need_resched());
1da177e4 3294}
1da177e4
LT
3295EXPORT_SYMBOL(preempt_schedule);
3296
3297/*
2ed6e34f 3298 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3299 * off of irq context.
3300 * Note, that this is called and return with irqs disabled. This will
3301 * protect us against recursive calling from irq.
3302 */
3303asmlinkage void __sched preempt_schedule_irq(void)
3304{
3305 struct thread_info *ti = current_thread_info();
6478d880 3306
2ed6e34f 3307 /* Catch callers which need to be fixed */
1da177e4
LT
3308 BUG_ON(ti->preempt_count || !irqs_disabled());
3309
3a5c359a
AK
3310 do {
3311 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 3312 local_irq_enable();
c259e01a 3313 __schedule();
3a5c359a 3314 local_irq_disable();
3a5c359a 3315 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 3316
3a5c359a
AK
3317 /*
3318 * Check again in case we missed a preemption opportunity
3319 * between schedule and now.
3320 */
3321 barrier();
5ed0cec0 3322 } while (need_resched());
1da177e4
LT
3323}
3324
3325#endif /* CONFIG_PREEMPT */
3326
63859d4f 3327int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3328 void *key)
1da177e4 3329{
63859d4f 3330 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3331}
1da177e4
LT
3332EXPORT_SYMBOL(default_wake_function);
3333
3334/*
41a2d6cf
IM
3335 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3336 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
3337 * number) then we wake all the non-exclusive tasks and one exclusive task.
3338 *
3339 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 3340 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
3341 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3342 */
78ddb08f 3343static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 3344 int nr_exclusive, int wake_flags, void *key)
1da177e4 3345{
2e45874c 3346 wait_queue_t *curr, *next;
1da177e4 3347
2e45874c 3348 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
3349 unsigned flags = curr->flags;
3350
63859d4f 3351 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 3352 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
3353 break;
3354 }
3355}
3356
3357/**
3358 * __wake_up - wake up threads blocked on a waitqueue.
3359 * @q: the waitqueue
3360 * @mode: which threads
3361 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 3362 * @key: is directly passed to the wakeup function
50fa610a
DH
3363 *
3364 * It may be assumed that this function implies a write memory barrier before
3365 * changing the task state if and only if any tasks are woken up.
1da177e4 3366 */
7ad5b3a5 3367void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 3368 int nr_exclusive, void *key)
1da177e4
LT
3369{
3370 unsigned long flags;
3371
3372 spin_lock_irqsave(&q->lock, flags);
3373 __wake_up_common(q, mode, nr_exclusive, 0, key);
3374 spin_unlock_irqrestore(&q->lock, flags);
3375}
1da177e4
LT
3376EXPORT_SYMBOL(__wake_up);
3377
3378/*
3379 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3380 */
7ad5b3a5 3381void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1da177e4
LT
3382{
3383 __wake_up_common(q, mode, 1, 0, NULL);
3384}
22c43c81 3385EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 3386
4ede816a
DL
3387void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3388{
3389 __wake_up_common(q, mode, 1, 0, key);
3390}
bf294b41 3391EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 3392
1da177e4 3393/**
4ede816a 3394 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
3395 * @q: the waitqueue
3396 * @mode: which threads
3397 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 3398 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
3399 *
3400 * The sync wakeup differs that the waker knows that it will schedule
3401 * away soon, so while the target thread will be woken up, it will not
3402 * be migrated to another CPU - ie. the two threads are 'synchronized'
3403 * with each other. This can prevent needless bouncing between CPUs.
3404 *
3405 * On UP it can prevent extra preemption.
50fa610a
DH
3406 *
3407 * It may be assumed that this function implies a write memory barrier before
3408 * changing the task state if and only if any tasks are woken up.
1da177e4 3409 */
4ede816a
DL
3410void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3411 int nr_exclusive, void *key)
1da177e4
LT
3412{
3413 unsigned long flags;
7d478721 3414 int wake_flags = WF_SYNC;
1da177e4
LT
3415
3416 if (unlikely(!q))
3417 return;
3418
3419 if (unlikely(!nr_exclusive))
7d478721 3420 wake_flags = 0;
1da177e4
LT
3421
3422 spin_lock_irqsave(&q->lock, flags);
7d478721 3423 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
3424 spin_unlock_irqrestore(&q->lock, flags);
3425}
4ede816a
DL
3426EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3427
3428/*
3429 * __wake_up_sync - see __wake_up_sync_key()
3430 */
3431void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3432{
3433 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3434}
1da177e4
LT
3435EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3436
65eb3dc6
KD
3437/**
3438 * complete: - signals a single thread waiting on this completion
3439 * @x: holds the state of this particular completion
3440 *
3441 * This will wake up a single thread waiting on this completion. Threads will be
3442 * awakened in the same order in which they were queued.
3443 *
3444 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
3445 *
3446 * It may be assumed that this function implies a write memory barrier before
3447 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3448 */
b15136e9 3449void complete(struct completion *x)
1da177e4
LT
3450{
3451 unsigned long flags;
3452
3453 spin_lock_irqsave(&x->wait.lock, flags);
3454 x->done++;
d9514f6c 3455 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
3456 spin_unlock_irqrestore(&x->wait.lock, flags);
3457}
3458EXPORT_SYMBOL(complete);
3459
65eb3dc6
KD
3460/**
3461 * complete_all: - signals all threads waiting on this completion
3462 * @x: holds the state of this particular completion
3463 *
3464 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
3465 *
3466 * It may be assumed that this function implies a write memory barrier before
3467 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3468 */
b15136e9 3469void complete_all(struct completion *x)
1da177e4
LT
3470{
3471 unsigned long flags;
3472
3473 spin_lock_irqsave(&x->wait.lock, flags);
3474 x->done += UINT_MAX/2;
d9514f6c 3475 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
3476 spin_unlock_irqrestore(&x->wait.lock, flags);
3477}
3478EXPORT_SYMBOL(complete_all);
3479
8cbbe86d
AK
3480static inline long __sched
3481do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3482{
1da177e4
LT
3483 if (!x->done) {
3484 DECLARE_WAITQUEUE(wait, current);
3485
a93d2f17 3486 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 3487 do {
94d3d824 3488 if (signal_pending_state(state, current)) {
ea71a546
ON
3489 timeout = -ERESTARTSYS;
3490 break;
8cbbe86d
AK
3491 }
3492 __set_current_state(state);
1da177e4
LT
3493 spin_unlock_irq(&x->wait.lock);
3494 timeout = schedule_timeout(timeout);
3495 spin_lock_irq(&x->wait.lock);
ea71a546 3496 } while (!x->done && timeout);
1da177e4 3497 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
3498 if (!x->done)
3499 return timeout;
1da177e4
LT
3500 }
3501 x->done--;
ea71a546 3502 return timeout ?: 1;
1da177e4 3503}
1da177e4 3504
8cbbe86d
AK
3505static long __sched
3506wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3507{
1da177e4
LT
3508 might_sleep();
3509
3510 spin_lock_irq(&x->wait.lock);
8cbbe86d 3511 timeout = do_wait_for_common(x, timeout, state);
1da177e4 3512 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
3513 return timeout;
3514}
1da177e4 3515
65eb3dc6
KD
3516/**
3517 * wait_for_completion: - waits for completion of a task
3518 * @x: holds the state of this particular completion
3519 *
3520 * This waits to be signaled for completion of a specific task. It is NOT
3521 * interruptible and there is no timeout.
3522 *
3523 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3524 * and interrupt capability. Also see complete().
3525 */
b15136e9 3526void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
3527{
3528 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 3529}
8cbbe86d 3530EXPORT_SYMBOL(wait_for_completion);
1da177e4 3531
65eb3dc6
KD
3532/**
3533 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3534 * @x: holds the state of this particular completion
3535 * @timeout: timeout value in jiffies
3536 *
3537 * This waits for either a completion of a specific task to be signaled or for a
3538 * specified timeout to expire. The timeout is in jiffies. It is not
3539 * interruptible.
c6dc7f05
BF
3540 *
3541 * The return value is 0 if timed out, and positive (at least 1, or number of
3542 * jiffies left till timeout) if completed.
65eb3dc6 3543 */
b15136e9 3544unsigned long __sched
8cbbe86d 3545wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 3546{
8cbbe86d 3547 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 3548}
8cbbe86d 3549EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 3550
65eb3dc6
KD
3551/**
3552 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3553 * @x: holds the state of this particular completion
3554 *
3555 * This waits for completion of a specific task to be signaled. It is
3556 * interruptible.
c6dc7f05
BF
3557 *
3558 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3559 */
8cbbe86d 3560int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 3561{
51e97990
AK
3562 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3563 if (t == -ERESTARTSYS)
3564 return t;
3565 return 0;
0fec171c 3566}
8cbbe86d 3567EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 3568
65eb3dc6
KD
3569/**
3570 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3571 * @x: holds the state of this particular completion
3572 * @timeout: timeout value in jiffies
3573 *
3574 * This waits for either a completion of a specific task to be signaled or for a
3575 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
c6dc7f05
BF
3576 *
3577 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3578 * positive (at least 1, or number of jiffies left till timeout) if completed.
65eb3dc6 3579 */
6bf41237 3580long __sched
8cbbe86d
AK
3581wait_for_completion_interruptible_timeout(struct completion *x,
3582 unsigned long timeout)
0fec171c 3583{
8cbbe86d 3584 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 3585}
8cbbe86d 3586EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 3587
65eb3dc6
KD
3588/**
3589 * wait_for_completion_killable: - waits for completion of a task (killable)
3590 * @x: holds the state of this particular completion
3591 *
3592 * This waits to be signaled for completion of a specific task. It can be
3593 * interrupted by a kill signal.
c6dc7f05
BF
3594 *
3595 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3596 */
009e577e
MW
3597int __sched wait_for_completion_killable(struct completion *x)
3598{
3599 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3600 if (t == -ERESTARTSYS)
3601 return t;
3602 return 0;
3603}
3604EXPORT_SYMBOL(wait_for_completion_killable);
3605
0aa12fb4
SW
3606/**
3607 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3608 * @x: holds the state of this particular completion
3609 * @timeout: timeout value in jiffies
3610 *
3611 * This waits for either a completion of a specific task to be
3612 * signaled or for a specified timeout to expire. It can be
3613 * interrupted by a kill signal. The timeout is in jiffies.
c6dc7f05
BF
3614 *
3615 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3616 * positive (at least 1, or number of jiffies left till timeout) if completed.
0aa12fb4 3617 */
6bf41237 3618long __sched
0aa12fb4
SW
3619wait_for_completion_killable_timeout(struct completion *x,
3620 unsigned long timeout)
3621{
3622 return wait_for_common(x, timeout, TASK_KILLABLE);
3623}
3624EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3625
be4de352
DC
3626/**
3627 * try_wait_for_completion - try to decrement a completion without blocking
3628 * @x: completion structure
3629 *
3630 * Returns: 0 if a decrement cannot be done without blocking
3631 * 1 if a decrement succeeded.
3632 *
3633 * If a completion is being used as a counting completion,
3634 * attempt to decrement the counter without blocking. This
3635 * enables us to avoid waiting if the resource the completion
3636 * is protecting is not available.
3637 */
3638bool try_wait_for_completion(struct completion *x)
3639{
7539a3b3 3640 unsigned long flags;
be4de352
DC
3641 int ret = 1;
3642
7539a3b3 3643 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3644 if (!x->done)
3645 ret = 0;
3646 else
3647 x->done--;
7539a3b3 3648 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3649 return ret;
3650}
3651EXPORT_SYMBOL(try_wait_for_completion);
3652
3653/**
3654 * completion_done - Test to see if a completion has any waiters
3655 * @x: completion structure
3656 *
3657 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3658 * 1 if there are no waiters.
3659 *
3660 */
3661bool completion_done(struct completion *x)
3662{
7539a3b3 3663 unsigned long flags;
be4de352
DC
3664 int ret = 1;
3665
7539a3b3 3666 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3667 if (!x->done)
3668 ret = 0;
7539a3b3 3669 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3670 return ret;
3671}
3672EXPORT_SYMBOL(completion_done);
3673
8cbbe86d
AK
3674static long __sched
3675sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 3676{
0fec171c
IM
3677 unsigned long flags;
3678 wait_queue_t wait;
3679
3680 init_waitqueue_entry(&wait, current);
1da177e4 3681
8cbbe86d 3682 __set_current_state(state);
1da177e4 3683
8cbbe86d
AK
3684 spin_lock_irqsave(&q->lock, flags);
3685 __add_wait_queue(q, &wait);
3686 spin_unlock(&q->lock);
3687 timeout = schedule_timeout(timeout);
3688 spin_lock_irq(&q->lock);
3689 __remove_wait_queue(q, &wait);
3690 spin_unlock_irqrestore(&q->lock, flags);
3691
3692 return timeout;
3693}
3694
3695void __sched interruptible_sleep_on(wait_queue_head_t *q)
3696{
3697 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3698}
1da177e4
LT
3699EXPORT_SYMBOL(interruptible_sleep_on);
3700
0fec171c 3701long __sched
95cdf3b7 3702interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3703{
8cbbe86d 3704 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 3705}
1da177e4
LT
3706EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3707
0fec171c 3708void __sched sleep_on(wait_queue_head_t *q)
1da177e4 3709{
8cbbe86d 3710 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3711}
1da177e4
LT
3712EXPORT_SYMBOL(sleep_on);
3713
0fec171c 3714long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3715{
8cbbe86d 3716 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 3717}
1da177e4
LT
3718EXPORT_SYMBOL(sleep_on_timeout);
3719
b29739f9
IM
3720#ifdef CONFIG_RT_MUTEXES
3721
3722/*
3723 * rt_mutex_setprio - set the current priority of a task
3724 * @p: task
3725 * @prio: prio value (kernel-internal form)
3726 *
3727 * This function changes the 'effective' priority of a task. It does
3728 * not touch ->normal_prio like __setscheduler().
3729 *
3730 * Used by the rt_mutex code to implement priority inheritance logic.
3731 */
36c8b586 3732void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3733{
83b699ed 3734 int oldprio, on_rq, running;
70b97a7f 3735 struct rq *rq;
83ab0aa0 3736 const struct sched_class *prev_class;
b29739f9
IM
3737
3738 BUG_ON(prio < 0 || prio > MAX_PRIO);
3739
0122ec5b 3740 rq = __task_rq_lock(p);
b29739f9 3741
a8027073 3742 trace_sched_pi_setprio(p, prio);
d5f9f942 3743 oldprio = p->prio;
83ab0aa0 3744 prev_class = p->sched_class;
fd2f4419 3745 on_rq = p->on_rq;
051a1d1a 3746 running = task_current(rq, p);
0e1f3483 3747 if (on_rq)
69be72c1 3748 dequeue_task(rq, p, 0);
0e1f3483
HS
3749 if (running)
3750 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
3751
3752 if (rt_prio(prio))
3753 p->sched_class = &rt_sched_class;
3754 else
3755 p->sched_class = &fair_sched_class;
3756
b29739f9
IM
3757 p->prio = prio;
3758
0e1f3483
HS
3759 if (running)
3760 p->sched_class->set_curr_task(rq);
da7a735e 3761 if (on_rq)
371fd7e7 3762 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 3763
da7a735e 3764 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 3765 __task_rq_unlock(rq);
b29739f9
IM
3766}
3767
3768#endif
3769
36c8b586 3770void set_user_nice(struct task_struct *p, long nice)
1da177e4 3771{
dd41f596 3772 int old_prio, delta, on_rq;
1da177e4 3773 unsigned long flags;
70b97a7f 3774 struct rq *rq;
1da177e4
LT
3775
3776 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3777 return;
3778 /*
3779 * We have to be careful, if called from sys_setpriority(),
3780 * the task might be in the middle of scheduling on another CPU.
3781 */
3782 rq = task_rq_lock(p, &flags);
3783 /*
3784 * The RT priorities are set via sched_setscheduler(), but we still
3785 * allow the 'normal' nice value to be set - but as expected
3786 * it wont have any effect on scheduling until the task is
dd41f596 3787 * SCHED_FIFO/SCHED_RR:
1da177e4 3788 */
e05606d3 3789 if (task_has_rt_policy(p)) {
1da177e4
LT
3790 p->static_prio = NICE_TO_PRIO(nice);
3791 goto out_unlock;
3792 }
fd2f4419 3793 on_rq = p->on_rq;
c09595f6 3794 if (on_rq)
69be72c1 3795 dequeue_task(rq, p, 0);
1da177e4 3796
1da177e4 3797 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3798 set_load_weight(p);
b29739f9
IM
3799 old_prio = p->prio;
3800 p->prio = effective_prio(p);
3801 delta = p->prio - old_prio;
1da177e4 3802
dd41f596 3803 if (on_rq) {
371fd7e7 3804 enqueue_task(rq, p, 0);
1da177e4 3805 /*
d5f9f942
AM
3806 * If the task increased its priority or is running and
3807 * lowered its priority, then reschedule its CPU:
1da177e4 3808 */
d5f9f942 3809 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
3810 resched_task(rq->curr);
3811 }
3812out_unlock:
0122ec5b 3813 task_rq_unlock(rq, p, &flags);
1da177e4 3814}
1da177e4
LT
3815EXPORT_SYMBOL(set_user_nice);
3816
e43379f1
MM
3817/*
3818 * can_nice - check if a task can reduce its nice value
3819 * @p: task
3820 * @nice: nice value
3821 */
36c8b586 3822int can_nice(const struct task_struct *p, const int nice)
e43379f1 3823{
024f4747
MM
3824 /* convert nice value [19,-20] to rlimit style value [1,40] */
3825 int nice_rlim = 20 - nice;
48f24c4d 3826
78d7d407 3827 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3828 capable(CAP_SYS_NICE));
3829}
3830
1da177e4
LT
3831#ifdef __ARCH_WANT_SYS_NICE
3832
3833/*
3834 * sys_nice - change the priority of the current process.
3835 * @increment: priority increment
3836 *
3837 * sys_setpriority is a more generic, but much slower function that
3838 * does similar things.
3839 */
5add95d4 3840SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3841{
48f24c4d 3842 long nice, retval;
1da177e4
LT
3843
3844 /*
3845 * Setpriority might change our priority at the same moment.
3846 * We don't have to worry. Conceptually one call occurs first
3847 * and we have a single winner.
3848 */
e43379f1
MM
3849 if (increment < -40)
3850 increment = -40;
1da177e4
LT
3851 if (increment > 40)
3852 increment = 40;
3853
2b8f836f 3854 nice = TASK_NICE(current) + increment;
1da177e4
LT
3855 if (nice < -20)
3856 nice = -20;
3857 if (nice > 19)
3858 nice = 19;
3859
e43379f1
MM
3860 if (increment < 0 && !can_nice(current, nice))
3861 return -EPERM;
3862
1da177e4
LT
3863 retval = security_task_setnice(current, nice);
3864 if (retval)
3865 return retval;
3866
3867 set_user_nice(current, nice);
3868 return 0;
3869}
3870
3871#endif
3872
3873/**
3874 * task_prio - return the priority value of a given task.
3875 * @p: the task in question.
3876 *
3877 * This is the priority value as seen by users in /proc.
3878 * RT tasks are offset by -200. Normal tasks are centered
3879 * around 0, value goes from -16 to +15.
3880 */
36c8b586 3881int task_prio(const struct task_struct *p)
1da177e4
LT
3882{
3883 return p->prio - MAX_RT_PRIO;
3884}
3885
3886/**
3887 * task_nice - return the nice value of a given task.
3888 * @p: the task in question.
3889 */
36c8b586 3890int task_nice(const struct task_struct *p)
1da177e4
LT
3891{
3892 return TASK_NICE(p);
3893}
150d8bed 3894EXPORT_SYMBOL(task_nice);
1da177e4
LT
3895
3896/**
3897 * idle_cpu - is a given cpu idle currently?
3898 * @cpu: the processor in question.
3899 */
3900int idle_cpu(int cpu)
3901{
908a3283
TG
3902 struct rq *rq = cpu_rq(cpu);
3903
3904 if (rq->curr != rq->idle)
3905 return 0;
3906
3907 if (rq->nr_running)
3908 return 0;
3909
3910#ifdef CONFIG_SMP
3911 if (!llist_empty(&rq->wake_list))
3912 return 0;
3913#endif
3914
3915 return 1;
1da177e4
LT
3916}
3917
1da177e4
LT
3918/**
3919 * idle_task - return the idle task for a given cpu.
3920 * @cpu: the processor in question.
3921 */
36c8b586 3922struct task_struct *idle_task(int cpu)
1da177e4
LT
3923{
3924 return cpu_rq(cpu)->idle;
3925}
3926
3927/**
3928 * find_process_by_pid - find a process with a matching PID value.
3929 * @pid: the pid in question.
3930 */
a9957449 3931static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3932{
228ebcbe 3933 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3934}
3935
3936/* Actually do priority change: must hold rq lock. */
dd41f596
IM
3937static void
3938__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 3939{
1da177e4
LT
3940 p->policy = policy;
3941 p->rt_priority = prio;
b29739f9
IM
3942 p->normal_prio = normal_prio(p);
3943 /* we are holding p->pi_lock already */
3944 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
3945 if (rt_prio(p->prio))
3946 p->sched_class = &rt_sched_class;
3947 else
3948 p->sched_class = &fair_sched_class;
2dd73a4f 3949 set_load_weight(p);
1da177e4
LT
3950}
3951
c69e8d9c
DH
3952/*
3953 * check the target process has a UID that matches the current process's
3954 */
3955static bool check_same_owner(struct task_struct *p)
3956{
3957 const struct cred *cred = current_cred(), *pcred;
3958 bool match;
3959
3960 rcu_read_lock();
3961 pcred = __task_cred(p);
b0e77598
SH
3962 if (cred->user->user_ns == pcred->user->user_ns)
3963 match = (cred->euid == pcred->euid ||
3964 cred->euid == pcred->uid);
3965 else
3966 match = false;
c69e8d9c
DH
3967 rcu_read_unlock();
3968 return match;
3969}
3970
961ccddd 3971static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3972 const struct sched_param *param, bool user)
1da177e4 3973{
83b699ed 3974 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 3975 unsigned long flags;
83ab0aa0 3976 const struct sched_class *prev_class;
70b97a7f 3977 struct rq *rq;
ca94c442 3978 int reset_on_fork;
1da177e4 3979
66e5393a
SR
3980 /* may grab non-irq protected spin_locks */
3981 BUG_ON(in_interrupt());
1da177e4
LT
3982recheck:
3983 /* double check policy once rq lock held */
ca94c442
LP
3984 if (policy < 0) {
3985 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3986 policy = oldpolicy = p->policy;
ca94c442
LP
3987 } else {
3988 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3989 policy &= ~SCHED_RESET_ON_FORK;
3990
3991 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3992 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3993 policy != SCHED_IDLE)
3994 return -EINVAL;
3995 }
3996
1da177e4
LT
3997 /*
3998 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3999 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4000 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
4001 */
4002 if (param->sched_priority < 0 ||
95cdf3b7 4003 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 4004 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 4005 return -EINVAL;
e05606d3 4006 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
4007 return -EINVAL;
4008
37e4ab3f
OC
4009 /*
4010 * Allow unprivileged RT tasks to decrease priority:
4011 */
961ccddd 4012 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 4013 if (rt_policy(policy)) {
a44702e8
ON
4014 unsigned long rlim_rtprio =
4015 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
4016
4017 /* can't set/change the rt policy */
4018 if (policy != p->policy && !rlim_rtprio)
4019 return -EPERM;
4020
4021 /* can't increase priority */
4022 if (param->sched_priority > p->rt_priority &&
4023 param->sched_priority > rlim_rtprio)
4024 return -EPERM;
4025 }
c02aa73b 4026
dd41f596 4027 /*
c02aa73b
DH
4028 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4029 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 4030 */
c02aa73b
DH
4031 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4032 if (!can_nice(p, TASK_NICE(p)))
4033 return -EPERM;
4034 }
5fe1d75f 4035
37e4ab3f 4036 /* can't change other user's priorities */
c69e8d9c 4037 if (!check_same_owner(p))
37e4ab3f 4038 return -EPERM;
ca94c442
LP
4039
4040 /* Normal users shall not reset the sched_reset_on_fork flag */
4041 if (p->sched_reset_on_fork && !reset_on_fork)
4042 return -EPERM;
37e4ab3f 4043 }
1da177e4 4044
725aad24 4045 if (user) {
b0ae1981 4046 retval = security_task_setscheduler(p);
725aad24
JF
4047 if (retval)
4048 return retval;
4049 }
4050
b29739f9
IM
4051 /*
4052 * make sure no PI-waiters arrive (or leave) while we are
4053 * changing the priority of the task:
0122ec5b 4054 *
25985edc 4055 * To be able to change p->policy safely, the appropriate
1da177e4
LT
4056 * runqueue lock must be held.
4057 */
0122ec5b 4058 rq = task_rq_lock(p, &flags);
dc61b1d6 4059
34f971f6
PZ
4060 /*
4061 * Changing the policy of the stop threads its a very bad idea
4062 */
4063 if (p == rq->stop) {
0122ec5b 4064 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
4065 return -EINVAL;
4066 }
4067
a51e9198
DF
4068 /*
4069 * If not changing anything there's no need to proceed further:
4070 */
4071 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4072 param->sched_priority == p->rt_priority))) {
4073
4074 __task_rq_unlock(rq);
4075 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4076 return 0;
4077 }
4078
dc61b1d6
PZ
4079#ifdef CONFIG_RT_GROUP_SCHED
4080 if (user) {
4081 /*
4082 * Do not allow realtime tasks into groups that have no runtime
4083 * assigned.
4084 */
4085 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
4086 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4087 !task_group_is_autogroup(task_group(p))) {
0122ec5b 4088 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
4089 return -EPERM;
4090 }
4091 }
4092#endif
4093
1da177e4
LT
4094 /* recheck policy now with rq lock held */
4095 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4096 policy = oldpolicy = -1;
0122ec5b 4097 task_rq_unlock(rq, p, &flags);
1da177e4
LT
4098 goto recheck;
4099 }
fd2f4419 4100 on_rq = p->on_rq;
051a1d1a 4101 running = task_current(rq, p);
0e1f3483 4102 if (on_rq)
2e1cb74a 4103 deactivate_task(rq, p, 0);
0e1f3483
HS
4104 if (running)
4105 p->sched_class->put_prev_task(rq, p);
f6b53205 4106
ca94c442
LP
4107 p->sched_reset_on_fork = reset_on_fork;
4108
1da177e4 4109 oldprio = p->prio;
83ab0aa0 4110 prev_class = p->sched_class;
dd41f596 4111 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 4112
0e1f3483
HS
4113 if (running)
4114 p->sched_class->set_curr_task(rq);
da7a735e 4115 if (on_rq)
dd41f596 4116 activate_task(rq, p, 0);
cb469845 4117
da7a735e 4118 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 4119 task_rq_unlock(rq, p, &flags);
b29739f9 4120
95e02ca9
TG
4121 rt_mutex_adjust_pi(p);
4122
1da177e4
LT
4123 return 0;
4124}
961ccddd
RR
4125
4126/**
4127 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4128 * @p: the task in question.
4129 * @policy: new policy.
4130 * @param: structure containing the new RT priority.
4131 *
4132 * NOTE that the task may be already dead.
4133 */
4134int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4135 const struct sched_param *param)
961ccddd
RR
4136{
4137 return __sched_setscheduler(p, policy, param, true);
4138}
1da177e4
LT
4139EXPORT_SYMBOL_GPL(sched_setscheduler);
4140
961ccddd
RR
4141/**
4142 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4143 * @p: the task in question.
4144 * @policy: new policy.
4145 * @param: structure containing the new RT priority.
4146 *
4147 * Just like sched_setscheduler, only don't bother checking if the
4148 * current context has permission. For example, this is needed in
4149 * stop_machine(): we create temporary high priority worker threads,
4150 * but our caller might not have that capability.
4151 */
4152int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4153 const struct sched_param *param)
961ccddd
RR
4154{
4155 return __sched_setscheduler(p, policy, param, false);
4156}
4157
95cdf3b7
IM
4158static int
4159do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4160{
1da177e4
LT
4161 struct sched_param lparam;
4162 struct task_struct *p;
36c8b586 4163 int retval;
1da177e4
LT
4164
4165 if (!param || pid < 0)
4166 return -EINVAL;
4167 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4168 return -EFAULT;
5fe1d75f
ON
4169
4170 rcu_read_lock();
4171 retval = -ESRCH;
1da177e4 4172 p = find_process_by_pid(pid);
5fe1d75f
ON
4173 if (p != NULL)
4174 retval = sched_setscheduler(p, policy, &lparam);
4175 rcu_read_unlock();
36c8b586 4176
1da177e4
LT
4177 return retval;
4178}
4179
4180/**
4181 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4182 * @pid: the pid in question.
4183 * @policy: new policy.
4184 * @param: structure containing the new RT priority.
4185 */
5add95d4
HC
4186SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4187 struct sched_param __user *, param)
1da177e4 4188{
c21761f1
JB
4189 /* negative values for policy are not valid */
4190 if (policy < 0)
4191 return -EINVAL;
4192
1da177e4
LT
4193 return do_sched_setscheduler(pid, policy, param);
4194}
4195
4196/**
4197 * sys_sched_setparam - set/change the RT priority of a thread
4198 * @pid: the pid in question.
4199 * @param: structure containing the new RT priority.
4200 */
5add95d4 4201SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4202{
4203 return do_sched_setscheduler(pid, -1, param);
4204}
4205
4206/**
4207 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4208 * @pid: the pid in question.
4209 */
5add95d4 4210SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4211{
36c8b586 4212 struct task_struct *p;
3a5c359a 4213 int retval;
1da177e4
LT
4214
4215 if (pid < 0)
3a5c359a 4216 return -EINVAL;
1da177e4
LT
4217
4218 retval = -ESRCH;
5fe85be0 4219 rcu_read_lock();
1da177e4
LT
4220 p = find_process_by_pid(pid);
4221 if (p) {
4222 retval = security_task_getscheduler(p);
4223 if (!retval)
ca94c442
LP
4224 retval = p->policy
4225 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4226 }
5fe85be0 4227 rcu_read_unlock();
1da177e4
LT
4228 return retval;
4229}
4230
4231/**
ca94c442 4232 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4233 * @pid: the pid in question.
4234 * @param: structure containing the RT priority.
4235 */
5add95d4 4236SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4237{
4238 struct sched_param lp;
36c8b586 4239 struct task_struct *p;
3a5c359a 4240 int retval;
1da177e4
LT
4241
4242 if (!param || pid < 0)
3a5c359a 4243 return -EINVAL;
1da177e4 4244
5fe85be0 4245 rcu_read_lock();
1da177e4
LT
4246 p = find_process_by_pid(pid);
4247 retval = -ESRCH;
4248 if (!p)
4249 goto out_unlock;
4250
4251 retval = security_task_getscheduler(p);
4252 if (retval)
4253 goto out_unlock;
4254
4255 lp.sched_priority = p->rt_priority;
5fe85be0 4256 rcu_read_unlock();
1da177e4
LT
4257
4258 /*
4259 * This one might sleep, we cannot do it with a spinlock held ...
4260 */
4261 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4262
1da177e4
LT
4263 return retval;
4264
4265out_unlock:
5fe85be0 4266 rcu_read_unlock();
1da177e4
LT
4267 return retval;
4268}
4269
96f874e2 4270long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4271{
5a16f3d3 4272 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4273 struct task_struct *p;
4274 int retval;
1da177e4 4275
95402b38 4276 get_online_cpus();
23f5d142 4277 rcu_read_lock();
1da177e4
LT
4278
4279 p = find_process_by_pid(pid);
4280 if (!p) {
23f5d142 4281 rcu_read_unlock();
95402b38 4282 put_online_cpus();
1da177e4
LT
4283 return -ESRCH;
4284 }
4285
23f5d142 4286 /* Prevent p going away */
1da177e4 4287 get_task_struct(p);
23f5d142 4288 rcu_read_unlock();
1da177e4 4289
5a16f3d3
RR
4290 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4291 retval = -ENOMEM;
4292 goto out_put_task;
4293 }
4294 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4295 retval = -ENOMEM;
4296 goto out_free_cpus_allowed;
4297 }
1da177e4 4298 retval = -EPERM;
b0e77598 4299 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
1da177e4
LT
4300 goto out_unlock;
4301
b0ae1981 4302 retval = security_task_setscheduler(p);
e7834f8f
DQ
4303 if (retval)
4304 goto out_unlock;
4305
5a16f3d3
RR
4306 cpuset_cpus_allowed(p, cpus_allowed);
4307 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 4308again:
5a16f3d3 4309 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 4310
8707d8b8 4311 if (!retval) {
5a16f3d3
RR
4312 cpuset_cpus_allowed(p, cpus_allowed);
4313 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4314 /*
4315 * We must have raced with a concurrent cpuset
4316 * update. Just reset the cpus_allowed to the
4317 * cpuset's cpus_allowed
4318 */
5a16f3d3 4319 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4320 goto again;
4321 }
4322 }
1da177e4 4323out_unlock:
5a16f3d3
RR
4324 free_cpumask_var(new_mask);
4325out_free_cpus_allowed:
4326 free_cpumask_var(cpus_allowed);
4327out_put_task:
1da177e4 4328 put_task_struct(p);
95402b38 4329 put_online_cpus();
1da177e4
LT
4330 return retval;
4331}
4332
4333static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4334 struct cpumask *new_mask)
1da177e4 4335{
96f874e2
RR
4336 if (len < cpumask_size())
4337 cpumask_clear(new_mask);
4338 else if (len > cpumask_size())
4339 len = cpumask_size();
4340
1da177e4
LT
4341 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4342}
4343
4344/**
4345 * sys_sched_setaffinity - set the cpu affinity of a process
4346 * @pid: pid of the process
4347 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4348 * @user_mask_ptr: user-space pointer to the new cpu mask
4349 */
5add95d4
HC
4350SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4351 unsigned long __user *, user_mask_ptr)
1da177e4 4352{
5a16f3d3 4353 cpumask_var_t new_mask;
1da177e4
LT
4354 int retval;
4355
5a16f3d3
RR
4356 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4357 return -ENOMEM;
1da177e4 4358
5a16f3d3
RR
4359 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4360 if (retval == 0)
4361 retval = sched_setaffinity(pid, new_mask);
4362 free_cpumask_var(new_mask);
4363 return retval;
1da177e4
LT
4364}
4365
96f874e2 4366long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4367{
36c8b586 4368 struct task_struct *p;
31605683 4369 unsigned long flags;
1da177e4 4370 int retval;
1da177e4 4371
95402b38 4372 get_online_cpus();
23f5d142 4373 rcu_read_lock();
1da177e4
LT
4374
4375 retval = -ESRCH;
4376 p = find_process_by_pid(pid);
4377 if (!p)
4378 goto out_unlock;
4379
e7834f8f
DQ
4380 retval = security_task_getscheduler(p);
4381 if (retval)
4382 goto out_unlock;
4383
013fdb80 4384 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 4385 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 4386 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4387
4388out_unlock:
23f5d142 4389 rcu_read_unlock();
95402b38 4390 put_online_cpus();
1da177e4 4391
9531b62f 4392 return retval;
1da177e4
LT
4393}
4394
4395/**
4396 * sys_sched_getaffinity - get the cpu affinity of a process
4397 * @pid: pid of the process
4398 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4399 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4400 */
5add95d4
HC
4401SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4402 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4403{
4404 int ret;
f17c8607 4405 cpumask_var_t mask;
1da177e4 4406
84fba5ec 4407 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4408 return -EINVAL;
4409 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4410 return -EINVAL;
4411
f17c8607
RR
4412 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4413 return -ENOMEM;
1da177e4 4414
f17c8607
RR
4415 ret = sched_getaffinity(pid, mask);
4416 if (ret == 0) {
8bc037fb 4417 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4418
4419 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4420 ret = -EFAULT;
4421 else
cd3d8031 4422 ret = retlen;
f17c8607
RR
4423 }
4424 free_cpumask_var(mask);
1da177e4 4425
f17c8607 4426 return ret;
1da177e4
LT
4427}
4428
4429/**
4430 * sys_sched_yield - yield the current processor to other threads.
4431 *
dd41f596
IM
4432 * This function yields the current CPU to other tasks. If there are no
4433 * other threads running on this CPU then this function will return.
1da177e4 4434 */
5add95d4 4435SYSCALL_DEFINE0(sched_yield)
1da177e4 4436{
70b97a7f 4437 struct rq *rq = this_rq_lock();
1da177e4 4438
2d72376b 4439 schedstat_inc(rq, yld_count);
4530d7ab 4440 current->sched_class->yield_task(rq);
1da177e4
LT
4441
4442 /*
4443 * Since we are going to call schedule() anyway, there's
4444 * no need to preempt or enable interrupts:
4445 */
4446 __release(rq->lock);
8a25d5de 4447 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4448 do_raw_spin_unlock(&rq->lock);
1da177e4
LT
4449 preempt_enable_no_resched();
4450
4451 schedule();
4452
4453 return 0;
4454}
4455
d86ee480
PZ
4456static inline int should_resched(void)
4457{
4458 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4459}
4460
e7b38404 4461static void __cond_resched(void)
1da177e4 4462{
e7aaaa69 4463 add_preempt_count(PREEMPT_ACTIVE);
c259e01a 4464 __schedule();
e7aaaa69 4465 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
4466}
4467
02b67cc3 4468int __sched _cond_resched(void)
1da177e4 4469{
d86ee480 4470 if (should_resched()) {
1da177e4
LT
4471 __cond_resched();
4472 return 1;
4473 }
4474 return 0;
4475}
02b67cc3 4476EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4477
4478/*
613afbf8 4479 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4480 * call schedule, and on return reacquire the lock.
4481 *
41a2d6cf 4482 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4483 * operations here to prevent schedule() from being called twice (once via
4484 * spin_unlock(), once by hand).
4485 */
613afbf8 4486int __cond_resched_lock(spinlock_t *lock)
1da177e4 4487{
d86ee480 4488 int resched = should_resched();
6df3cecb
JK
4489 int ret = 0;
4490
f607c668
PZ
4491 lockdep_assert_held(lock);
4492
95c354fe 4493 if (spin_needbreak(lock) || resched) {
1da177e4 4494 spin_unlock(lock);
d86ee480 4495 if (resched)
95c354fe
NP
4496 __cond_resched();
4497 else
4498 cpu_relax();
6df3cecb 4499 ret = 1;
1da177e4 4500 spin_lock(lock);
1da177e4 4501 }
6df3cecb 4502 return ret;
1da177e4 4503}
613afbf8 4504EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4505
613afbf8 4506int __sched __cond_resched_softirq(void)
1da177e4
LT
4507{
4508 BUG_ON(!in_softirq());
4509
d86ee480 4510 if (should_resched()) {
98d82567 4511 local_bh_enable();
1da177e4
LT
4512 __cond_resched();
4513 local_bh_disable();
4514 return 1;
4515 }
4516 return 0;
4517}
613afbf8 4518EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4519
1da177e4
LT
4520/**
4521 * yield - yield the current processor to other threads.
4522 *
72fd4a35 4523 * This is a shortcut for kernel-space yielding - it marks the
1da177e4
LT
4524 * thread runnable and calls sys_sched_yield().
4525 */
4526void __sched yield(void)
4527{
4528 set_current_state(TASK_RUNNING);
4529 sys_sched_yield();
4530}
1da177e4
LT
4531EXPORT_SYMBOL(yield);
4532
d95f4122
MG
4533/**
4534 * yield_to - yield the current processor to another thread in
4535 * your thread group, or accelerate that thread toward the
4536 * processor it's on.
16addf95
RD
4537 * @p: target task
4538 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4539 *
4540 * It's the caller's job to ensure that the target task struct
4541 * can't go away on us before we can do any checks.
4542 *
4543 * Returns true if we indeed boosted the target task.
4544 */
4545bool __sched yield_to(struct task_struct *p, bool preempt)
4546{
4547 struct task_struct *curr = current;
4548 struct rq *rq, *p_rq;
4549 unsigned long flags;
4550 bool yielded = 0;
4551
4552 local_irq_save(flags);
4553 rq = this_rq();
4554
4555again:
4556 p_rq = task_rq(p);
4557 double_rq_lock(rq, p_rq);
4558 while (task_rq(p) != p_rq) {
4559 double_rq_unlock(rq, p_rq);
4560 goto again;
4561 }
4562
4563 if (!curr->sched_class->yield_to_task)
4564 goto out;
4565
4566 if (curr->sched_class != p->sched_class)
4567 goto out;
4568
4569 if (task_running(p_rq, p) || p->state)
4570 goto out;
4571
4572 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4573 if (yielded) {
d95f4122 4574 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4575 /*
4576 * Make p's CPU reschedule; pick_next_entity takes care of
4577 * fairness.
4578 */
4579 if (preempt && rq != p_rq)
4580 resched_task(p_rq->curr);
916671c0
MG
4581 } else {
4582 /*
4583 * We might have set it in task_yield_fair(), but are
4584 * not going to schedule(), so don't want to skip
4585 * the next update.
4586 */
4587 rq->skip_clock_update = 0;
6d1cafd8 4588 }
d95f4122
MG
4589
4590out:
4591 double_rq_unlock(rq, p_rq);
4592 local_irq_restore(flags);
4593
4594 if (yielded)
4595 schedule();
4596
4597 return yielded;
4598}
4599EXPORT_SYMBOL_GPL(yield_to);
4600
1da177e4 4601/*
41a2d6cf 4602 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4603 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
4604 */
4605void __sched io_schedule(void)
4606{
54d35f29 4607 struct rq *rq = raw_rq();
1da177e4 4608
0ff92245 4609 delayacct_blkio_start();
1da177e4 4610 atomic_inc(&rq->nr_iowait);
73c10101 4611 blk_flush_plug(current);
8f0dfc34 4612 current->in_iowait = 1;
1da177e4 4613 schedule();
8f0dfc34 4614 current->in_iowait = 0;
1da177e4 4615 atomic_dec(&rq->nr_iowait);
0ff92245 4616 delayacct_blkio_end();
1da177e4 4617}
1da177e4
LT
4618EXPORT_SYMBOL(io_schedule);
4619
4620long __sched io_schedule_timeout(long timeout)
4621{
54d35f29 4622 struct rq *rq = raw_rq();
1da177e4
LT
4623 long ret;
4624
0ff92245 4625 delayacct_blkio_start();
1da177e4 4626 atomic_inc(&rq->nr_iowait);
73c10101 4627 blk_flush_plug(current);
8f0dfc34 4628 current->in_iowait = 1;
1da177e4 4629 ret = schedule_timeout(timeout);
8f0dfc34 4630 current->in_iowait = 0;
1da177e4 4631 atomic_dec(&rq->nr_iowait);
0ff92245 4632 delayacct_blkio_end();
1da177e4
LT
4633 return ret;
4634}
4635
4636/**
4637 * sys_sched_get_priority_max - return maximum RT priority.
4638 * @policy: scheduling class.
4639 *
4640 * this syscall returns the maximum rt_priority that can be used
4641 * by a given scheduling class.
4642 */
5add95d4 4643SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4644{
4645 int ret = -EINVAL;
4646
4647 switch (policy) {
4648 case SCHED_FIFO:
4649 case SCHED_RR:
4650 ret = MAX_USER_RT_PRIO-1;
4651 break;
4652 case SCHED_NORMAL:
b0a9499c 4653 case SCHED_BATCH:
dd41f596 4654 case SCHED_IDLE:
1da177e4
LT
4655 ret = 0;
4656 break;
4657 }
4658 return ret;
4659}
4660
4661/**
4662 * sys_sched_get_priority_min - return minimum RT priority.
4663 * @policy: scheduling class.
4664 *
4665 * this syscall returns the minimum rt_priority that can be used
4666 * by a given scheduling class.
4667 */
5add95d4 4668SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4669{
4670 int ret = -EINVAL;
4671
4672 switch (policy) {
4673 case SCHED_FIFO:
4674 case SCHED_RR:
4675 ret = 1;
4676 break;
4677 case SCHED_NORMAL:
b0a9499c 4678 case SCHED_BATCH:
dd41f596 4679 case SCHED_IDLE:
1da177e4
LT
4680 ret = 0;
4681 }
4682 return ret;
4683}
4684
4685/**
4686 * sys_sched_rr_get_interval - return the default timeslice of a process.
4687 * @pid: pid of the process.
4688 * @interval: userspace pointer to the timeslice value.
4689 *
4690 * this syscall writes the default timeslice value of a given process
4691 * into the user-space timespec buffer. A value of '0' means infinity.
4692 */
17da2bd9 4693SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4694 struct timespec __user *, interval)
1da177e4 4695{
36c8b586 4696 struct task_struct *p;
a4ec24b4 4697 unsigned int time_slice;
dba091b9
TG
4698 unsigned long flags;
4699 struct rq *rq;
3a5c359a 4700 int retval;
1da177e4 4701 struct timespec t;
1da177e4
LT
4702
4703 if (pid < 0)
3a5c359a 4704 return -EINVAL;
1da177e4
LT
4705
4706 retval = -ESRCH;
1a551ae7 4707 rcu_read_lock();
1da177e4
LT
4708 p = find_process_by_pid(pid);
4709 if (!p)
4710 goto out_unlock;
4711
4712 retval = security_task_getscheduler(p);
4713 if (retval)
4714 goto out_unlock;
4715
dba091b9
TG
4716 rq = task_rq_lock(p, &flags);
4717 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4718 task_rq_unlock(rq, p, &flags);
a4ec24b4 4719
1a551ae7 4720 rcu_read_unlock();
a4ec24b4 4721 jiffies_to_timespec(time_slice, &t);
1da177e4 4722 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4723 return retval;
3a5c359a 4724
1da177e4 4725out_unlock:
1a551ae7 4726 rcu_read_unlock();
1da177e4
LT
4727 return retval;
4728}
4729
7c731e0a 4730static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4731
82a1fcb9 4732void sched_show_task(struct task_struct *p)
1da177e4 4733{
1da177e4 4734 unsigned long free = 0;
36c8b586 4735 unsigned state;
1da177e4 4736
1da177e4 4737 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 4738 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4739 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4740#if BITS_PER_LONG == 32
1da177e4 4741 if (state == TASK_RUNNING)
3df0fc5b 4742 printk(KERN_CONT " running ");
1da177e4 4743 else
3df0fc5b 4744 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4745#else
4746 if (state == TASK_RUNNING)
3df0fc5b 4747 printk(KERN_CONT " running task ");
1da177e4 4748 else
3df0fc5b 4749 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4750#endif
4751#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4752 free = stack_not_used(p);
1da177e4 4753#endif
3df0fc5b 4754 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
aa47b7e0
DR
4755 task_pid_nr(p), task_pid_nr(p->real_parent),
4756 (unsigned long)task_thread_info(p)->flags);
1da177e4 4757
5fb5e6de 4758 show_stack(p, NULL);
1da177e4
LT
4759}
4760
e59e2ae2 4761void show_state_filter(unsigned long state_filter)
1da177e4 4762{
36c8b586 4763 struct task_struct *g, *p;
1da177e4 4764
4bd77321 4765#if BITS_PER_LONG == 32
3df0fc5b
PZ
4766 printk(KERN_INFO
4767 " task PC stack pid father\n");
1da177e4 4768#else
3df0fc5b
PZ
4769 printk(KERN_INFO
4770 " task PC stack pid father\n");
1da177e4 4771#endif
510f5acc 4772 rcu_read_lock();
1da177e4
LT
4773 do_each_thread(g, p) {
4774 /*
4775 * reset the NMI-timeout, listing all files on a slow
25985edc 4776 * console might take a lot of time:
1da177e4
LT
4777 */
4778 touch_nmi_watchdog();
39bc89fd 4779 if (!state_filter || (p->state & state_filter))
82a1fcb9 4780 sched_show_task(p);
1da177e4
LT
4781 } while_each_thread(g, p);
4782
04c9167f
JF
4783 touch_all_softlockup_watchdogs();
4784
dd41f596
IM
4785#ifdef CONFIG_SCHED_DEBUG
4786 sysrq_sched_debug_show();
4787#endif
510f5acc 4788 rcu_read_unlock();
e59e2ae2
IM
4789 /*
4790 * Only show locks if all tasks are dumped:
4791 */
93335a21 4792 if (!state_filter)
e59e2ae2 4793 debug_show_all_locks();
1da177e4
LT
4794}
4795
1df21055
IM
4796void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4797{
dd41f596 4798 idle->sched_class = &idle_sched_class;
1df21055
IM
4799}
4800
f340c0d1
IM
4801/**
4802 * init_idle - set up an idle thread for a given CPU
4803 * @idle: task in question
4804 * @cpu: cpu the idle task belongs to
4805 *
4806 * NOTE: this function does not set the idle thread's NEED_RESCHED
4807 * flag, to make booting more robust.
4808 */
5c1e1767 4809void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 4810{
70b97a7f 4811 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4812 unsigned long flags;
4813
05fa785c 4814 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 4815
dd41f596 4816 __sched_fork(idle);
06b83b5f 4817 idle->state = TASK_RUNNING;
dd41f596
IM
4818 idle->se.exec_start = sched_clock();
4819
1e1b6c51 4820 do_set_cpus_allowed(idle, cpumask_of(cpu));
6506cf6c
PZ
4821 /*
4822 * We're having a chicken and egg problem, even though we are
4823 * holding rq->lock, the cpu isn't yet set to this cpu so the
4824 * lockdep check in task_group() will fail.
4825 *
4826 * Similar case to sched_fork(). / Alternatively we could
4827 * use task_rq_lock() here and obtain the other rq->lock.
4828 *
4829 * Silence PROVE_RCU
4830 */
4831 rcu_read_lock();
dd41f596 4832 __set_task_cpu(idle, cpu);
6506cf6c 4833 rcu_read_unlock();
1da177e4 4834
1da177e4 4835 rq->curr = rq->idle = idle;
3ca7a440
PZ
4836#if defined(CONFIG_SMP)
4837 idle->on_cpu = 1;
4866cde0 4838#endif
05fa785c 4839 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
4840
4841 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 4842 task_thread_info(idle)->preempt_count = 0;
625f2a37 4843
dd41f596
IM
4844 /*
4845 * The idle tasks have their own, simple scheduling class:
4846 */
4847 idle->sched_class = &idle_sched_class;
868baf07 4848 ftrace_graph_init_idle_task(idle, cpu);
f1c6f1a7
CE
4849#if defined(CONFIG_SMP)
4850 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4851#endif
1da177e4
LT
4852}
4853
1da177e4 4854#ifdef CONFIG_SMP
1e1b6c51
KM
4855void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4856{
4857 if (p->sched_class && p->sched_class->set_cpus_allowed)
4858 p->sched_class->set_cpus_allowed(p, new_mask);
4939602a
PZ
4859
4860 cpumask_copy(&p->cpus_allowed, new_mask);
4861 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
1e1b6c51
KM
4862}
4863
1da177e4
LT
4864/*
4865 * This is how migration works:
4866 *
969c7921
TH
4867 * 1) we invoke migration_cpu_stop() on the target CPU using
4868 * stop_one_cpu().
4869 * 2) stopper starts to run (implicitly forcing the migrated thread
4870 * off the CPU)
4871 * 3) it checks whether the migrated task is still in the wrong runqueue.
4872 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 4873 * it and puts it into the right queue.
969c7921
TH
4874 * 5) stopper completes and stop_one_cpu() returns and the migration
4875 * is done.
1da177e4
LT
4876 */
4877
4878/*
4879 * Change a given task's CPU affinity. Migrate the thread to a
4880 * proper CPU and schedule it away if the CPU it's executing on
4881 * is removed from the allowed bitmask.
4882 *
4883 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 4884 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
4885 * call is not atomic; no spinlocks may be held.
4886 */
96f874e2 4887int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
4888{
4889 unsigned long flags;
70b97a7f 4890 struct rq *rq;
969c7921 4891 unsigned int dest_cpu;
48f24c4d 4892 int ret = 0;
1da177e4
LT
4893
4894 rq = task_rq_lock(p, &flags);
e2912009 4895
db44fc01
YZ
4896 if (cpumask_equal(&p->cpus_allowed, new_mask))
4897 goto out;
4898
6ad4c188 4899 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
4900 ret = -EINVAL;
4901 goto out;
4902 }
4903
db44fc01 4904 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
9985b0ba
DR
4905 ret = -EINVAL;
4906 goto out;
4907 }
4908
1e1b6c51 4909 do_set_cpus_allowed(p, new_mask);
73fe6aae 4910
1da177e4 4911 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 4912 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
4913 goto out;
4914
969c7921 4915 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 4916 if (p->on_rq) {
969c7921 4917 struct migration_arg arg = { p, dest_cpu };
1da177e4 4918 /* Need help from migration thread: drop lock and wait. */
0122ec5b 4919 task_rq_unlock(rq, p, &flags);
969c7921 4920 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
4921 tlb_migrate_finish(p->mm);
4922 return 0;
4923 }
4924out:
0122ec5b 4925 task_rq_unlock(rq, p, &flags);
48f24c4d 4926
1da177e4
LT
4927 return ret;
4928}
cd8ba7cd 4929EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
4930
4931/*
41a2d6cf 4932 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
4933 * this because either it can't run here any more (set_cpus_allowed()
4934 * away from this CPU, or CPU going down), or because we're
4935 * attempting to rebalance this task on exec (sched_exec).
4936 *
4937 * So we race with normal scheduler movements, but that's OK, as long
4938 * as the task is no longer on this CPU.
efc30814
KK
4939 *
4940 * Returns non-zero if task was successfully migrated.
1da177e4 4941 */
efc30814 4942static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 4943{
70b97a7f 4944 struct rq *rq_dest, *rq_src;
e2912009 4945 int ret = 0;
1da177e4 4946
e761b772 4947 if (unlikely(!cpu_active(dest_cpu)))
efc30814 4948 return ret;
1da177e4
LT
4949
4950 rq_src = cpu_rq(src_cpu);
4951 rq_dest = cpu_rq(dest_cpu);
4952
0122ec5b 4953 raw_spin_lock(&p->pi_lock);
1da177e4
LT
4954 double_rq_lock(rq_src, rq_dest);
4955 /* Already moved. */
4956 if (task_cpu(p) != src_cpu)
b1e38734 4957 goto done;
1da177e4 4958 /* Affinity changed (again). */
fa17b507 4959 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
b1e38734 4960 goto fail;
1da177e4 4961
e2912009
PZ
4962 /*
4963 * If we're not on a rq, the next wake-up will ensure we're
4964 * placed properly.
4965 */
fd2f4419 4966 if (p->on_rq) {
2e1cb74a 4967 deactivate_task(rq_src, p, 0);
e2912009 4968 set_task_cpu(p, dest_cpu);
dd41f596 4969 activate_task(rq_dest, p, 0);
15afe09b 4970 check_preempt_curr(rq_dest, p, 0);
1da177e4 4971 }
b1e38734 4972done:
efc30814 4973 ret = 1;
b1e38734 4974fail:
1da177e4 4975 double_rq_unlock(rq_src, rq_dest);
0122ec5b 4976 raw_spin_unlock(&p->pi_lock);
efc30814 4977 return ret;
1da177e4
LT
4978}
4979
4980/*
969c7921
TH
4981 * migration_cpu_stop - this will be executed by a highprio stopper thread
4982 * and performs thread migration by bumping thread off CPU then
4983 * 'pushing' onto another runqueue.
1da177e4 4984 */
969c7921 4985static int migration_cpu_stop(void *data)
1da177e4 4986{
969c7921 4987 struct migration_arg *arg = data;
f7b4cddc 4988
969c7921
TH
4989 /*
4990 * The original target cpu might have gone down and we might
4991 * be on another cpu but it doesn't matter.
4992 */
f7b4cddc 4993 local_irq_disable();
969c7921 4994 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 4995 local_irq_enable();
1da177e4 4996 return 0;
f7b4cddc
ON
4997}
4998
1da177e4 4999#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 5000
054b9108 5001/*
48c5ccae
PZ
5002 * Ensures that the idle task is using init_mm right before its cpu goes
5003 * offline.
054b9108 5004 */
48c5ccae 5005void idle_task_exit(void)
1da177e4 5006{
48c5ccae 5007 struct mm_struct *mm = current->active_mm;
e76bd8d9 5008
48c5ccae 5009 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5010
48c5ccae
PZ
5011 if (mm != &init_mm)
5012 switch_mm(mm, &init_mm, current);
5013 mmdrop(mm);
1da177e4
LT
5014}
5015
5016/*
5017 * While a dead CPU has no uninterruptible tasks queued at this point,
5018 * it might still have a nonzero ->nr_uninterruptible counter, because
5019 * for performance reasons the counter is not stricly tracking tasks to
5020 * their home CPUs. So we just add the counter to another CPU's counter,
5021 * to keep the global sum constant after CPU-down:
5022 */
70b97a7f 5023static void migrate_nr_uninterruptible(struct rq *rq_src)
1da177e4 5024{
6ad4c188 5025 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
1da177e4 5026
1da177e4
LT
5027 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5028 rq_src->nr_uninterruptible = 0;
1da177e4
LT
5029}
5030
dd41f596 5031/*
48c5ccae 5032 * remove the tasks which were accounted by rq from calc_load_tasks.
1da177e4 5033 */
48c5ccae 5034static void calc_global_load_remove(struct rq *rq)
1da177e4 5035{
48c5ccae
PZ
5036 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
5037 rq->calc_load_active = 0;
1da177e4
LT
5038}
5039
48f24c4d 5040/*
48c5ccae
PZ
5041 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5042 * try_to_wake_up()->select_task_rq().
5043 *
5044 * Called with rq->lock held even though we'er in stop_machine() and
5045 * there's no concurrency possible, we hold the required locks anyway
5046 * because of lock validation efforts.
1da177e4 5047 */
48c5ccae 5048static void migrate_tasks(unsigned int dead_cpu)
1da177e4 5049{
70b97a7f 5050 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
5051 struct task_struct *next, *stop = rq->stop;
5052 int dest_cpu;
1da177e4
LT
5053
5054 /*
48c5ccae
PZ
5055 * Fudge the rq selection such that the below task selection loop
5056 * doesn't get stuck on the currently eligible stop task.
5057 *
5058 * We're currently inside stop_machine() and the rq is either stuck
5059 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5060 * either way we should never end up calling schedule() until we're
5061 * done here.
1da177e4 5062 */
48c5ccae 5063 rq->stop = NULL;
48f24c4d 5064
8cb120d3
PT
5065 /* Ensure any throttled groups are reachable by pick_next_task */
5066 unthrottle_offline_cfs_rqs(rq);
5067
dd41f596 5068 for ( ; ; ) {
48c5ccae
PZ
5069 /*
5070 * There's this thread running, bail when that's the only
5071 * remaining thread.
5072 */
5073 if (rq->nr_running == 1)
dd41f596 5074 break;
48c5ccae 5075
b67802ea 5076 next = pick_next_task(rq);
48c5ccae 5077 BUG_ON(!next);
79c53799 5078 next->sched_class->put_prev_task(rq, next);
e692ab53 5079
48c5ccae
PZ
5080 /* Find suitable destination for @next, with force if needed. */
5081 dest_cpu = select_fallback_rq(dead_cpu, next);
5082 raw_spin_unlock(&rq->lock);
5083
5084 __migrate_task(next, dead_cpu, dest_cpu);
5085
5086 raw_spin_lock(&rq->lock);
1da177e4 5087 }
dce48a84 5088
48c5ccae 5089 rq->stop = stop;
dce48a84 5090}
48c5ccae 5091
1da177e4
LT
5092#endif /* CONFIG_HOTPLUG_CPU */
5093
e692ab53
NP
5094#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5095
5096static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5097 {
5098 .procname = "sched_domain",
c57baf1e 5099 .mode = 0555,
e0361851 5100 },
56992309 5101 {}
e692ab53
NP
5102};
5103
5104static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5105 {
5106 .procname = "kernel",
c57baf1e 5107 .mode = 0555,
e0361851
AD
5108 .child = sd_ctl_dir,
5109 },
56992309 5110 {}
e692ab53
NP
5111};
5112
5113static struct ctl_table *sd_alloc_ctl_entry(int n)
5114{
5115 struct ctl_table *entry =
5cf9f062 5116 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5117
e692ab53
NP
5118 return entry;
5119}
5120
6382bc90
MM
5121static void sd_free_ctl_entry(struct ctl_table **tablep)
5122{
cd790076 5123 struct ctl_table *entry;
6382bc90 5124
cd790076
MM
5125 /*
5126 * In the intermediate directories, both the child directory and
5127 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5128 * will always be set. In the lowest directory the names are
cd790076
MM
5129 * static strings and all have proc handlers.
5130 */
5131 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5132 if (entry->child)
5133 sd_free_ctl_entry(&entry->child);
cd790076
MM
5134 if (entry->proc_handler == NULL)
5135 kfree(entry->procname);
5136 }
6382bc90
MM
5137
5138 kfree(*tablep);
5139 *tablep = NULL;
5140}
5141
e692ab53 5142static void
e0361851 5143set_table_entry(struct ctl_table *entry,
e692ab53
NP
5144 const char *procname, void *data, int maxlen,
5145 mode_t mode, proc_handler *proc_handler)
5146{
e692ab53
NP
5147 entry->procname = procname;
5148 entry->data = data;
5149 entry->maxlen = maxlen;
5150 entry->mode = mode;
5151 entry->proc_handler = proc_handler;
5152}
5153
5154static struct ctl_table *
5155sd_alloc_ctl_domain_table(struct sched_domain *sd)
5156{
a5d8c348 5157 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 5158
ad1cdc1d
MM
5159 if (table == NULL)
5160 return NULL;
5161
e0361851 5162 set_table_entry(&table[0], "min_interval", &sd->min_interval,
e692ab53 5163 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5164 set_table_entry(&table[1], "max_interval", &sd->max_interval,
e692ab53 5165 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5166 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
e692ab53 5167 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5168 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
e692ab53 5169 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5170 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
e692ab53 5171 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5172 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
e692ab53 5173 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5174 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
e692ab53 5175 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5176 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
e692ab53 5177 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5178 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
e692ab53 5179 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5180 set_table_entry(&table[9], "cache_nice_tries",
e692ab53
NP
5181 &sd->cache_nice_tries,
5182 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5183 set_table_entry(&table[10], "flags", &sd->flags,
e692ab53 5184 sizeof(int), 0644, proc_dointvec_minmax);
a5d8c348
IM
5185 set_table_entry(&table[11], "name", sd->name,
5186 CORENAME_MAX_SIZE, 0444, proc_dostring);
5187 /* &table[12] is terminator */
e692ab53
NP
5188
5189 return table;
5190}
5191
9a4e7159 5192static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5193{
5194 struct ctl_table *entry, *table;
5195 struct sched_domain *sd;
5196 int domain_num = 0, i;
5197 char buf[32];
5198
5199 for_each_domain(cpu, sd)
5200 domain_num++;
5201 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5202 if (table == NULL)
5203 return NULL;
e692ab53
NP
5204
5205 i = 0;
5206 for_each_domain(cpu, sd) {
5207 snprintf(buf, 32, "domain%d", i);
e692ab53 5208 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5209 entry->mode = 0555;
e692ab53
NP
5210 entry->child = sd_alloc_ctl_domain_table(sd);
5211 entry++;
5212 i++;
5213 }
5214 return table;
5215}
5216
5217static struct ctl_table_header *sd_sysctl_header;
6382bc90 5218static void register_sched_domain_sysctl(void)
e692ab53 5219{
6ad4c188 5220 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5221 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5222 char buf[32];
5223
7378547f
MM
5224 WARN_ON(sd_ctl_dir[0].child);
5225 sd_ctl_dir[0].child = entry;
5226
ad1cdc1d
MM
5227 if (entry == NULL)
5228 return;
5229
6ad4c188 5230 for_each_possible_cpu(i) {
e692ab53 5231 snprintf(buf, 32, "cpu%d", i);
e692ab53 5232 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5233 entry->mode = 0555;
e692ab53 5234 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5235 entry++;
e692ab53 5236 }
7378547f
MM
5237
5238 WARN_ON(sd_sysctl_header);
e692ab53
NP
5239 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5240}
6382bc90 5241
7378547f 5242/* may be called multiple times per register */
6382bc90
MM
5243static void unregister_sched_domain_sysctl(void)
5244{
7378547f
MM
5245 if (sd_sysctl_header)
5246 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5247 sd_sysctl_header = NULL;
7378547f
MM
5248 if (sd_ctl_dir[0].child)
5249 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5250}
e692ab53 5251#else
6382bc90
MM
5252static void register_sched_domain_sysctl(void)
5253{
5254}
5255static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5256{
5257}
5258#endif
5259
1f11eb6a
GH
5260static void set_rq_online(struct rq *rq)
5261{
5262 if (!rq->online) {
5263 const struct sched_class *class;
5264
c6c4927b 5265 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5266 rq->online = 1;
5267
5268 for_each_class(class) {
5269 if (class->rq_online)
5270 class->rq_online(rq);
5271 }
5272 }
5273}
5274
5275static void set_rq_offline(struct rq *rq)
5276{
5277 if (rq->online) {
5278 const struct sched_class *class;
5279
5280 for_each_class(class) {
5281 if (class->rq_offline)
5282 class->rq_offline(rq);
5283 }
5284
c6c4927b 5285 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5286 rq->online = 0;
5287 }
5288}
5289
1da177e4
LT
5290/*
5291 * migration_call - callback that gets triggered when a CPU is added.
5292 * Here we can start up the necessary migration thread for the new CPU.
5293 */
48f24c4d
IM
5294static int __cpuinit
5295migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5296{
48f24c4d 5297 int cpu = (long)hcpu;
1da177e4 5298 unsigned long flags;
969c7921 5299 struct rq *rq = cpu_rq(cpu);
1da177e4 5300
48c5ccae 5301 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5302
1da177e4 5303 case CPU_UP_PREPARE:
a468d389 5304 rq->calc_load_update = calc_load_update;
1da177e4 5305 break;
48f24c4d 5306
1da177e4 5307 case CPU_ONLINE:
1f94ef59 5308 /* Update our root-domain */
05fa785c 5309 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5310 if (rq->rd) {
c6c4927b 5311 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5312
5313 set_rq_online(rq);
1f94ef59 5314 }
05fa785c 5315 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5316 break;
48f24c4d 5317
1da177e4 5318#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5319 case CPU_DYING:
317f3941 5320 sched_ttwu_pending();
57d885fe 5321 /* Update our root-domain */
05fa785c 5322 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5323 if (rq->rd) {
c6c4927b 5324 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5325 set_rq_offline(rq);
57d885fe 5326 }
48c5ccae
PZ
5327 migrate_tasks(cpu);
5328 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5329 raw_spin_unlock_irqrestore(&rq->lock, flags);
48c5ccae
PZ
5330
5331 migrate_nr_uninterruptible(rq);
5332 calc_global_load_remove(rq);
57d885fe 5333 break;
1da177e4
LT
5334#endif
5335 }
49c022e6
PZ
5336
5337 update_max_interval();
5338
1da177e4
LT
5339 return NOTIFY_OK;
5340}
5341
f38b0820
PM
5342/*
5343 * Register at high priority so that task migration (migrate_all_tasks)
5344 * happens before everything else. This has to be lower priority than
cdd6c482 5345 * the notifier in the perf_event subsystem, though.
1da177e4 5346 */
26c2143b 5347static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 5348 .notifier_call = migration_call,
50a323b7 5349 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5350};
5351
3a101d05
TH
5352static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5353 unsigned long action, void *hcpu)
5354{
5355 switch (action & ~CPU_TASKS_FROZEN) {
5356 case CPU_ONLINE:
5357 case CPU_DOWN_FAILED:
5358 set_cpu_active((long)hcpu, true);
5359 return NOTIFY_OK;
5360 default:
5361 return NOTIFY_DONE;
5362 }
5363}
5364
5365static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5366 unsigned long action, void *hcpu)
5367{
5368 switch (action & ~CPU_TASKS_FROZEN) {
5369 case CPU_DOWN_PREPARE:
5370 set_cpu_active((long)hcpu, false);
5371 return NOTIFY_OK;
5372 default:
5373 return NOTIFY_DONE;
5374 }
5375}
5376
7babe8db 5377static int __init migration_init(void)
1da177e4
LT
5378{
5379 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5380 int err;
48f24c4d 5381
3a101d05 5382 /* Initialize migration for the boot CPU */
07dccf33
AM
5383 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5384 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5385 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5386 register_cpu_notifier(&migration_notifier);
7babe8db 5387
3a101d05
TH
5388 /* Register cpu active notifiers */
5389 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5390 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5391
a004cd42 5392 return 0;
1da177e4 5393}
7babe8db 5394early_initcall(migration_init);
1da177e4
LT
5395#endif
5396
5397#ifdef CONFIG_SMP
476f3534 5398
4cb98839
PZ
5399static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5400
3e9830dc 5401#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5402
f6630114
MT
5403static __read_mostly int sched_domain_debug_enabled;
5404
5405static int __init sched_domain_debug_setup(char *str)
5406{
5407 sched_domain_debug_enabled = 1;
5408
5409 return 0;
5410}
5411early_param("sched_debug", sched_domain_debug_setup);
5412
7c16ec58 5413static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5414 struct cpumask *groupmask)
1da177e4 5415{
4dcf6aff 5416 struct sched_group *group = sd->groups;
434d53b0 5417 char str[256];
1da177e4 5418
968ea6d8 5419 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 5420 cpumask_clear(groupmask);
4dcf6aff
IM
5421
5422 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5423
5424 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5425 printk("does not load-balance\n");
4dcf6aff 5426 if (sd->parent)
3df0fc5b
PZ
5427 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5428 " has parent");
4dcf6aff 5429 return -1;
41c7ce9a
NP
5430 }
5431
3df0fc5b 5432 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 5433
758b2cdc 5434 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5435 printk(KERN_ERR "ERROR: domain->span does not contain "
5436 "CPU%d\n", cpu);
4dcf6aff 5437 }
758b2cdc 5438 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5439 printk(KERN_ERR "ERROR: domain->groups does not contain"
5440 " CPU%d\n", cpu);
4dcf6aff 5441 }
1da177e4 5442
4dcf6aff 5443 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5444 do {
4dcf6aff 5445 if (!group) {
3df0fc5b
PZ
5446 printk("\n");
5447 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5448 break;
5449 }
5450
9c3f75cb 5451 if (!group->sgp->power) {
3df0fc5b
PZ
5452 printk(KERN_CONT "\n");
5453 printk(KERN_ERR "ERROR: domain->cpu_power not "
5454 "set\n");
4dcf6aff
IM
5455 break;
5456 }
1da177e4 5457
758b2cdc 5458 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5459 printk(KERN_CONT "\n");
5460 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5461 break;
5462 }
1da177e4 5463
758b2cdc 5464 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5465 printk(KERN_CONT "\n");
5466 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5467 break;
5468 }
1da177e4 5469
758b2cdc 5470 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5471
968ea6d8 5472 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 5473
3df0fc5b 5474 printk(KERN_CONT " %s", str);
9c3f75cb 5475 if (group->sgp->power != SCHED_POWER_SCALE) {
3df0fc5b 5476 printk(KERN_CONT " (cpu_power = %d)",
9c3f75cb 5477 group->sgp->power);
381512cf 5478 }
1da177e4 5479
4dcf6aff
IM
5480 group = group->next;
5481 } while (group != sd->groups);
3df0fc5b 5482 printk(KERN_CONT "\n");
1da177e4 5483
758b2cdc 5484 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5485 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5486
758b2cdc
RR
5487 if (sd->parent &&
5488 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5489 printk(KERN_ERR "ERROR: parent span is not a superset "
5490 "of domain->span\n");
4dcf6aff
IM
5491 return 0;
5492}
1da177e4 5493
4dcf6aff
IM
5494static void sched_domain_debug(struct sched_domain *sd, int cpu)
5495{
5496 int level = 0;
1da177e4 5497
f6630114
MT
5498 if (!sched_domain_debug_enabled)
5499 return;
5500
4dcf6aff
IM
5501 if (!sd) {
5502 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5503 return;
5504 }
1da177e4 5505
4dcf6aff
IM
5506 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5507
5508 for (;;) {
4cb98839 5509 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5510 break;
1da177e4
LT
5511 level++;
5512 sd = sd->parent;
33859f7f 5513 if (!sd)
4dcf6aff
IM
5514 break;
5515 }
1da177e4 5516}
6d6bc0ad 5517#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5518# define sched_domain_debug(sd, cpu) do { } while (0)
6d6bc0ad 5519#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5520
1a20ff27 5521static int sd_degenerate(struct sched_domain *sd)
245af2c7 5522{
758b2cdc 5523 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5524 return 1;
5525
5526 /* Following flags need at least 2 groups */
5527 if (sd->flags & (SD_LOAD_BALANCE |
5528 SD_BALANCE_NEWIDLE |
5529 SD_BALANCE_FORK |
89c4710e
SS
5530 SD_BALANCE_EXEC |
5531 SD_SHARE_CPUPOWER |
5532 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
5533 if (sd->groups != sd->groups->next)
5534 return 0;
5535 }
5536
5537 /* Following flags don't use groups */
c88d5910 5538 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5539 return 0;
5540
5541 return 1;
5542}
5543
48f24c4d
IM
5544static int
5545sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5546{
5547 unsigned long cflags = sd->flags, pflags = parent->flags;
5548
5549 if (sd_degenerate(parent))
5550 return 1;
5551
758b2cdc 5552 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5553 return 0;
5554
245af2c7
SS
5555 /* Flags needing groups don't count if only 1 group in parent */
5556 if (parent->groups == parent->groups->next) {
5557 pflags &= ~(SD_LOAD_BALANCE |
5558 SD_BALANCE_NEWIDLE |
5559 SD_BALANCE_FORK |
89c4710e
SS
5560 SD_BALANCE_EXEC |
5561 SD_SHARE_CPUPOWER |
5562 SD_SHARE_PKG_RESOURCES);
5436499e
KC
5563 if (nr_node_ids == 1)
5564 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5565 }
5566 if (~cflags & pflags)
5567 return 0;
5568
5569 return 1;
5570}
5571
dce840a0 5572static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5573{
dce840a0 5574 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5575
68e74568 5576 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
5577 free_cpumask_var(rd->rto_mask);
5578 free_cpumask_var(rd->online);
5579 free_cpumask_var(rd->span);
5580 kfree(rd);
5581}
5582
57d885fe
GH
5583static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5584{
a0490fa3 5585 struct root_domain *old_rd = NULL;
57d885fe 5586 unsigned long flags;
57d885fe 5587
05fa785c 5588 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5589
5590 if (rq->rd) {
a0490fa3 5591 old_rd = rq->rd;
57d885fe 5592
c6c4927b 5593 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5594 set_rq_offline(rq);
57d885fe 5595
c6c4927b 5596 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5597
a0490fa3
IM
5598 /*
5599 * If we dont want to free the old_rt yet then
5600 * set old_rd to NULL to skip the freeing later
5601 * in this function:
5602 */
5603 if (!atomic_dec_and_test(&old_rd->refcount))
5604 old_rd = NULL;
57d885fe
GH
5605 }
5606
5607 atomic_inc(&rd->refcount);
5608 rq->rd = rd;
5609
c6c4927b 5610 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5611 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5612 set_rq_online(rq);
57d885fe 5613
05fa785c 5614 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5615
5616 if (old_rd)
dce840a0 5617 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5618}
5619
68c38fc3 5620static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5621{
5622 memset(rd, 0, sizeof(*rd));
5623
68c38fc3 5624 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5625 goto out;
68c38fc3 5626 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5627 goto free_span;
68c38fc3 5628 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 5629 goto free_online;
6e0534f2 5630
68c38fc3 5631 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5632 goto free_rto_mask;
c6c4927b 5633 return 0;
6e0534f2 5634
68e74568
RR
5635free_rto_mask:
5636 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
5637free_online:
5638 free_cpumask_var(rd->online);
5639free_span:
5640 free_cpumask_var(rd->span);
0c910d28 5641out:
c6c4927b 5642 return -ENOMEM;
57d885fe
GH
5643}
5644
029632fb
PZ
5645/*
5646 * By default the system creates a single root-domain with all cpus as
5647 * members (mimicking the global state we have today).
5648 */
5649struct root_domain def_root_domain;
5650
57d885fe
GH
5651static void init_defrootdomain(void)
5652{
68c38fc3 5653 init_rootdomain(&def_root_domain);
c6c4927b 5654
57d885fe
GH
5655 atomic_set(&def_root_domain.refcount, 1);
5656}
5657
dc938520 5658static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5659{
5660 struct root_domain *rd;
5661
5662 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5663 if (!rd)
5664 return NULL;
5665
68c38fc3 5666 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5667 kfree(rd);
5668 return NULL;
5669 }
57d885fe
GH
5670
5671 return rd;
5672}
5673
e3589f6c
PZ
5674static void free_sched_groups(struct sched_group *sg, int free_sgp)
5675{
5676 struct sched_group *tmp, *first;
5677
5678 if (!sg)
5679 return;
5680
5681 first = sg;
5682 do {
5683 tmp = sg->next;
5684
5685 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5686 kfree(sg->sgp);
5687
5688 kfree(sg);
5689 sg = tmp;
5690 } while (sg != first);
5691}
5692
dce840a0
PZ
5693static void free_sched_domain(struct rcu_head *rcu)
5694{
5695 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5696
5697 /*
5698 * If its an overlapping domain it has private groups, iterate and
5699 * nuke them all.
5700 */
5701 if (sd->flags & SD_OVERLAP) {
5702 free_sched_groups(sd->groups, 1);
5703 } else if (atomic_dec_and_test(&sd->groups->ref)) {
9c3f75cb 5704 kfree(sd->groups->sgp);
dce840a0 5705 kfree(sd->groups);
9c3f75cb 5706 }
dce840a0
PZ
5707 kfree(sd);
5708}
5709
5710static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5711{
5712 call_rcu(&sd->rcu, free_sched_domain);
5713}
5714
5715static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5716{
5717 for (; sd; sd = sd->parent)
5718 destroy_sched_domain(sd, cpu);
5719}
5720
1da177e4 5721/*
0eab9146 5722 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5723 * hold the hotplug lock.
5724 */
0eab9146
IM
5725static void
5726cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5727{
70b97a7f 5728 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5729 struct sched_domain *tmp;
5730
5731 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5732 for (tmp = sd; tmp; ) {
245af2c7
SS
5733 struct sched_domain *parent = tmp->parent;
5734 if (!parent)
5735 break;
f29c9b1c 5736
1a848870 5737 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5738 tmp->parent = parent->parent;
1a848870
SS
5739 if (parent->parent)
5740 parent->parent->child = tmp;
dce840a0 5741 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5742 } else
5743 tmp = tmp->parent;
245af2c7
SS
5744 }
5745
1a848870 5746 if (sd && sd_degenerate(sd)) {
dce840a0 5747 tmp = sd;
245af2c7 5748 sd = sd->parent;
dce840a0 5749 destroy_sched_domain(tmp, cpu);
1a848870
SS
5750 if (sd)
5751 sd->child = NULL;
5752 }
1da177e4 5753
4cb98839 5754 sched_domain_debug(sd, cpu);
1da177e4 5755
57d885fe 5756 rq_attach_root(rq, rd);
dce840a0 5757 tmp = rq->sd;
674311d5 5758 rcu_assign_pointer(rq->sd, sd);
dce840a0 5759 destroy_sched_domains(tmp, cpu);
1da177e4
LT
5760}
5761
5762/* cpus with isolated domains */
dcc30a35 5763static cpumask_var_t cpu_isolated_map;
1da177e4
LT
5764
5765/* Setup the mask of cpus configured for isolated domains */
5766static int __init isolated_cpu_setup(char *str)
5767{
bdddd296 5768 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 5769 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
5770 return 1;
5771}
5772
8927f494 5773__setup("isolcpus=", isolated_cpu_setup);
1da177e4 5774
9c1cfda2 5775#ifdef CONFIG_NUMA
198e2f18 5776
9c1cfda2
JH
5777/**
5778 * find_next_best_node - find the next node to include in a sched_domain
5779 * @node: node whose sched_domain we're building
5780 * @used_nodes: nodes already in the sched_domain
5781 *
41a2d6cf 5782 * Find the next node to include in a given scheduling domain. Simply
9c1cfda2
JH
5783 * finds the closest node not already in the @used_nodes map.
5784 *
5785 * Should use nodemask_t.
5786 */
c5f59f08 5787static int find_next_best_node(int node, nodemask_t *used_nodes)
9c1cfda2 5788{
7142d17e 5789 int i, n, val, min_val, best_node = -1;
9c1cfda2
JH
5790
5791 min_val = INT_MAX;
5792
076ac2af 5793 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2 5794 /* Start at @node */
076ac2af 5795 n = (node + i) % nr_node_ids;
9c1cfda2
JH
5796
5797 if (!nr_cpus_node(n))
5798 continue;
5799
5800 /* Skip already used nodes */
c5f59f08 5801 if (node_isset(n, *used_nodes))
9c1cfda2
JH
5802 continue;
5803
5804 /* Simple min distance search */
5805 val = node_distance(node, n);
5806
5807 if (val < min_val) {
5808 min_val = val;
5809 best_node = n;
5810 }
5811 }
5812
7142d17e
HD
5813 if (best_node != -1)
5814 node_set(best_node, *used_nodes);
9c1cfda2
JH
5815 return best_node;
5816}
5817
5818/**
5819 * sched_domain_node_span - get a cpumask for a node's sched_domain
5820 * @node: node whose cpumask we're constructing
73486722 5821 * @span: resulting cpumask
9c1cfda2 5822 *
41a2d6cf 5823 * Given a node, construct a good cpumask for its sched_domain to span. It
9c1cfda2
JH
5824 * should be one that prevents unnecessary balancing, but also spreads tasks
5825 * out optimally.
5826 */
96f874e2 5827static void sched_domain_node_span(int node, struct cpumask *span)
9c1cfda2 5828{
c5f59f08 5829 nodemask_t used_nodes;
48f24c4d 5830 int i;
9c1cfda2 5831
6ca09dfc 5832 cpumask_clear(span);
c5f59f08 5833 nodes_clear(used_nodes);
9c1cfda2 5834
6ca09dfc 5835 cpumask_or(span, span, cpumask_of_node(node));
c5f59f08 5836 node_set(node, used_nodes);
9c1cfda2
JH
5837
5838 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
c5f59f08 5839 int next_node = find_next_best_node(node, &used_nodes);
7142d17e
HD
5840 if (next_node < 0)
5841 break;
6ca09dfc 5842 cpumask_or(span, span, cpumask_of_node(next_node));
9c1cfda2 5843 }
9c1cfda2 5844}
d3081f52
PZ
5845
5846static const struct cpumask *cpu_node_mask(int cpu)
5847{
5848 lockdep_assert_held(&sched_domains_mutex);
5849
5850 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
5851
5852 return sched_domains_tmpmask;
5853}
2c402dc3
PZ
5854
5855static const struct cpumask *cpu_allnodes_mask(int cpu)
5856{
5857 return cpu_possible_mask;
5858}
6d6bc0ad 5859#endif /* CONFIG_NUMA */
9c1cfda2 5860
d3081f52
PZ
5861static const struct cpumask *cpu_cpu_mask(int cpu)
5862{
5863 return cpumask_of_node(cpu_to_node(cpu));
5864}
5865
5c45bf27 5866int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
48f24c4d 5867
dce840a0
PZ
5868struct sd_data {
5869 struct sched_domain **__percpu sd;
5870 struct sched_group **__percpu sg;
9c3f75cb 5871 struct sched_group_power **__percpu sgp;
dce840a0
PZ
5872};
5873
49a02c51 5874struct s_data {
21d42ccf 5875 struct sched_domain ** __percpu sd;
49a02c51
AH
5876 struct root_domain *rd;
5877};
5878
2109b99e 5879enum s_alloc {
2109b99e 5880 sa_rootdomain,
21d42ccf 5881 sa_sd,
dce840a0 5882 sa_sd_storage,
2109b99e
AH
5883 sa_none,
5884};
5885
54ab4ff4
PZ
5886struct sched_domain_topology_level;
5887
5888typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
5889typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5890
e3589f6c
PZ
5891#define SDTL_OVERLAP 0x01
5892
eb7a74e6 5893struct sched_domain_topology_level {
2c402dc3
PZ
5894 sched_domain_init_f init;
5895 sched_domain_mask_f mask;
e3589f6c 5896 int flags;
54ab4ff4 5897 struct sd_data data;
eb7a74e6
PZ
5898};
5899
e3589f6c
PZ
5900static int
5901build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5902{
5903 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5904 const struct cpumask *span = sched_domain_span(sd);
5905 struct cpumask *covered = sched_domains_tmpmask;
5906 struct sd_data *sdd = sd->private;
5907 struct sched_domain *child;
5908 int i;
5909
5910 cpumask_clear(covered);
5911
5912 for_each_cpu(i, span) {
5913 struct cpumask *sg_span;
5914
5915 if (cpumask_test_cpu(i, covered))
5916 continue;
5917
5918 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 5919 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
5920
5921 if (!sg)
5922 goto fail;
5923
5924 sg_span = sched_group_cpus(sg);
5925
5926 child = *per_cpu_ptr(sdd->sd, i);
5927 if (child->child) {
5928 child = child->child;
5929 cpumask_copy(sg_span, sched_domain_span(child));
5930 } else
5931 cpumask_set_cpu(i, sg_span);
5932
5933 cpumask_or(covered, covered, sg_span);
5934
5935 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
5936 atomic_inc(&sg->sgp->ref);
5937
5938 if (cpumask_test_cpu(cpu, sg_span))
5939 groups = sg;
5940
5941 if (!first)
5942 first = sg;
5943 if (last)
5944 last->next = sg;
5945 last = sg;
5946 last->next = first;
5947 }
5948 sd->groups = groups;
5949
5950 return 0;
5951
5952fail:
5953 free_sched_groups(first, 0);
5954
5955 return -ENOMEM;
5956}
5957
dce840a0 5958static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 5959{
dce840a0
PZ
5960 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5961 struct sched_domain *child = sd->child;
1da177e4 5962
dce840a0
PZ
5963 if (child)
5964 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 5965
9c3f75cb 5966 if (sg) {
dce840a0 5967 *sg = *per_cpu_ptr(sdd->sg, cpu);
9c3f75cb 5968 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
e3589f6c 5969 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
9c3f75cb 5970 }
dce840a0
PZ
5971
5972 return cpu;
1e9f28fa 5973}
1e9f28fa 5974
01a08546 5975/*
dce840a0
PZ
5976 * build_sched_groups will build a circular linked list of the groups
5977 * covered by the given span, and will set each group's ->cpumask correctly,
5978 * and ->cpu_power to 0.
e3589f6c
PZ
5979 *
5980 * Assumes the sched_domain tree is fully constructed
01a08546 5981 */
e3589f6c
PZ
5982static int
5983build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 5984{
dce840a0
PZ
5985 struct sched_group *first = NULL, *last = NULL;
5986 struct sd_data *sdd = sd->private;
5987 const struct cpumask *span = sched_domain_span(sd);
f96225fd 5988 struct cpumask *covered;
dce840a0 5989 int i;
9c1cfda2 5990
e3589f6c
PZ
5991 get_group(cpu, sdd, &sd->groups);
5992 atomic_inc(&sd->groups->ref);
5993
5994 if (cpu != cpumask_first(sched_domain_span(sd)))
5995 return 0;
5996
f96225fd
PZ
5997 lockdep_assert_held(&sched_domains_mutex);
5998 covered = sched_domains_tmpmask;
5999
dce840a0 6000 cpumask_clear(covered);
6711cab4 6001
dce840a0
PZ
6002 for_each_cpu(i, span) {
6003 struct sched_group *sg;
6004 int group = get_group(i, sdd, &sg);
6005 int j;
6711cab4 6006
dce840a0
PZ
6007 if (cpumask_test_cpu(i, covered))
6008 continue;
6711cab4 6009
dce840a0 6010 cpumask_clear(sched_group_cpus(sg));
9c3f75cb 6011 sg->sgp->power = 0;
0601a88d 6012
dce840a0
PZ
6013 for_each_cpu(j, span) {
6014 if (get_group(j, sdd, NULL) != group)
6015 continue;
0601a88d 6016
dce840a0
PZ
6017 cpumask_set_cpu(j, covered);
6018 cpumask_set_cpu(j, sched_group_cpus(sg));
6019 }
0601a88d 6020
dce840a0
PZ
6021 if (!first)
6022 first = sg;
6023 if (last)
6024 last->next = sg;
6025 last = sg;
6026 }
6027 last->next = first;
e3589f6c
PZ
6028
6029 return 0;
0601a88d 6030}
51888ca2 6031
89c4710e
SS
6032/*
6033 * Initialize sched groups cpu_power.
6034 *
6035 * cpu_power indicates the capacity of sched group, which is used while
6036 * distributing the load between different sched groups in a sched domain.
6037 * Typically cpu_power for all the groups in a sched domain will be same unless
6038 * there are asymmetries in the topology. If there are asymmetries, group
6039 * having more cpu_power will pickup more load compared to the group having
6040 * less cpu_power.
89c4710e
SS
6041 */
6042static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6043{
e3589f6c 6044 struct sched_group *sg = sd->groups;
89c4710e 6045
e3589f6c
PZ
6046 WARN_ON(!sd || !sg);
6047
6048 do {
6049 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6050 sg = sg->next;
6051 } while (sg != sd->groups);
89c4710e 6052
e3589f6c
PZ
6053 if (cpu != group_first_cpu(sg))
6054 return;
aae6d3dd 6055
d274cb30 6056 update_group_power(sd, cpu);
69e1e811 6057 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6058}
6059
029632fb
PZ
6060int __weak arch_sd_sibling_asym_packing(void)
6061{
6062 return 0*SD_ASYM_PACKING;
6063}
6064
7c16ec58
MT
6065/*
6066 * Initializers for schedule domains
6067 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6068 */
6069
a5d8c348
IM
6070#ifdef CONFIG_SCHED_DEBUG
6071# define SD_INIT_NAME(sd, type) sd->name = #type
6072#else
6073# define SD_INIT_NAME(sd, type) do { } while (0)
6074#endif
6075
54ab4ff4
PZ
6076#define SD_INIT_FUNC(type) \
6077static noinline struct sched_domain * \
6078sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
6079{ \
6080 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
6081 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
6082 SD_INIT_NAME(sd, type); \
6083 sd->private = &tl->data; \
6084 return sd; \
7c16ec58
MT
6085}
6086
6087SD_INIT_FUNC(CPU)
6088#ifdef CONFIG_NUMA
6089 SD_INIT_FUNC(ALLNODES)
6090 SD_INIT_FUNC(NODE)
6091#endif
6092#ifdef CONFIG_SCHED_SMT
6093 SD_INIT_FUNC(SIBLING)
6094#endif
6095#ifdef CONFIG_SCHED_MC
6096 SD_INIT_FUNC(MC)
6097#endif
01a08546
HC
6098#ifdef CONFIG_SCHED_BOOK
6099 SD_INIT_FUNC(BOOK)
6100#endif
7c16ec58 6101
1d3504fc 6102static int default_relax_domain_level = -1;
60495e77 6103int sched_domain_level_max;
1d3504fc
HS
6104
6105static int __init setup_relax_domain_level(char *str)
6106{
30e0e178
LZ
6107 unsigned long val;
6108
6109 val = simple_strtoul(str, NULL, 0);
60495e77 6110 if (val < sched_domain_level_max)
30e0e178
LZ
6111 default_relax_domain_level = val;
6112
1d3504fc
HS
6113 return 1;
6114}
6115__setup("relax_domain_level=", setup_relax_domain_level);
6116
6117static void set_domain_attribute(struct sched_domain *sd,
6118 struct sched_domain_attr *attr)
6119{
6120 int request;
6121
6122 if (!attr || attr->relax_domain_level < 0) {
6123 if (default_relax_domain_level < 0)
6124 return;
6125 else
6126 request = default_relax_domain_level;
6127 } else
6128 request = attr->relax_domain_level;
6129 if (request < sd->level) {
6130 /* turn off idle balance on this domain */
c88d5910 6131 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6132 } else {
6133 /* turn on idle balance on this domain */
c88d5910 6134 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6135 }
6136}
6137
54ab4ff4
PZ
6138static void __sdt_free(const struct cpumask *cpu_map);
6139static int __sdt_alloc(const struct cpumask *cpu_map);
6140
2109b99e
AH
6141static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6142 const struct cpumask *cpu_map)
6143{
6144 switch (what) {
2109b99e 6145 case sa_rootdomain:
822ff793
PZ
6146 if (!atomic_read(&d->rd->refcount))
6147 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6148 case sa_sd:
6149 free_percpu(d->sd); /* fall through */
dce840a0 6150 case sa_sd_storage:
54ab4ff4 6151 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6152 case sa_none:
6153 break;
6154 }
6155}
3404c8d9 6156
2109b99e
AH
6157static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6158 const struct cpumask *cpu_map)
6159{
dce840a0
PZ
6160 memset(d, 0, sizeof(*d));
6161
54ab4ff4
PZ
6162 if (__sdt_alloc(cpu_map))
6163 return sa_sd_storage;
dce840a0
PZ
6164 d->sd = alloc_percpu(struct sched_domain *);
6165 if (!d->sd)
6166 return sa_sd_storage;
2109b99e 6167 d->rd = alloc_rootdomain();
dce840a0 6168 if (!d->rd)
21d42ccf 6169 return sa_sd;
2109b99e
AH
6170 return sa_rootdomain;
6171}
57d885fe 6172
dce840a0
PZ
6173/*
6174 * NULL the sd_data elements we've used to build the sched_domain and
6175 * sched_group structure so that the subsequent __free_domain_allocs()
6176 * will not free the data we're using.
6177 */
6178static void claim_allocations(int cpu, struct sched_domain *sd)
6179{
6180 struct sd_data *sdd = sd->private;
dce840a0
PZ
6181
6182 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6183 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6184
e3589f6c 6185 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6186 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c
PZ
6187
6188 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
9c3f75cb 6189 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
dce840a0
PZ
6190}
6191
2c402dc3
PZ
6192#ifdef CONFIG_SCHED_SMT
6193static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 6194{
2c402dc3 6195 return topology_thread_cpumask(cpu);
3bd65a80 6196}
2c402dc3 6197#endif
7f4588f3 6198
d069b916
PZ
6199/*
6200 * Topology list, bottom-up.
6201 */
2c402dc3 6202static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
6203#ifdef CONFIG_SCHED_SMT
6204 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 6205#endif
1e9f28fa 6206#ifdef CONFIG_SCHED_MC
2c402dc3 6207 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 6208#endif
d069b916
PZ
6209#ifdef CONFIG_SCHED_BOOK
6210 { sd_init_BOOK, cpu_book_mask, },
6211#endif
6212 { sd_init_CPU, cpu_cpu_mask, },
6213#ifdef CONFIG_NUMA
e3589f6c 6214 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
d069b916 6215 { sd_init_ALLNODES, cpu_allnodes_mask, },
1da177e4 6216#endif
eb7a74e6
PZ
6217 { NULL, },
6218};
6219
6220static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6221
54ab4ff4
PZ
6222static int __sdt_alloc(const struct cpumask *cpu_map)
6223{
6224 struct sched_domain_topology_level *tl;
6225 int j;
6226
6227 for (tl = sched_domain_topology; tl->init; tl++) {
6228 struct sd_data *sdd = &tl->data;
6229
6230 sdd->sd = alloc_percpu(struct sched_domain *);
6231 if (!sdd->sd)
6232 return -ENOMEM;
6233
6234 sdd->sg = alloc_percpu(struct sched_group *);
6235 if (!sdd->sg)
6236 return -ENOMEM;
6237
9c3f75cb
PZ
6238 sdd->sgp = alloc_percpu(struct sched_group_power *);
6239 if (!sdd->sgp)
6240 return -ENOMEM;
6241
54ab4ff4
PZ
6242 for_each_cpu(j, cpu_map) {
6243 struct sched_domain *sd;
6244 struct sched_group *sg;
9c3f75cb 6245 struct sched_group_power *sgp;
54ab4ff4
PZ
6246
6247 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6248 GFP_KERNEL, cpu_to_node(j));
6249 if (!sd)
6250 return -ENOMEM;
6251
6252 *per_cpu_ptr(sdd->sd, j) = sd;
6253
6254 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6255 GFP_KERNEL, cpu_to_node(j));
6256 if (!sg)
6257 return -ENOMEM;
6258
6259 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb
PZ
6260
6261 sgp = kzalloc_node(sizeof(struct sched_group_power),
6262 GFP_KERNEL, cpu_to_node(j));
6263 if (!sgp)
6264 return -ENOMEM;
6265
6266 *per_cpu_ptr(sdd->sgp, j) = sgp;
54ab4ff4
PZ
6267 }
6268 }
6269
6270 return 0;
6271}
6272
6273static void __sdt_free(const struct cpumask *cpu_map)
6274{
6275 struct sched_domain_topology_level *tl;
6276 int j;
6277
6278 for (tl = sched_domain_topology; tl->init; tl++) {
6279 struct sd_data *sdd = &tl->data;
6280
6281 for_each_cpu(j, cpu_map) {
e3589f6c
PZ
6282 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
6283 if (sd && (sd->flags & SD_OVERLAP))
6284 free_sched_groups(sd->groups, 0);
feff8fa0 6285 kfree(*per_cpu_ptr(sdd->sd, j));
54ab4ff4 6286 kfree(*per_cpu_ptr(sdd->sg, j));
9c3f75cb 6287 kfree(*per_cpu_ptr(sdd->sgp, j));
54ab4ff4
PZ
6288 }
6289 free_percpu(sdd->sd);
6290 free_percpu(sdd->sg);
9c3f75cb 6291 free_percpu(sdd->sgp);
54ab4ff4
PZ
6292 }
6293}
6294
2c402dc3
PZ
6295struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6296 struct s_data *d, const struct cpumask *cpu_map,
d069b916 6297 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
6298 int cpu)
6299{
54ab4ff4 6300 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 6301 if (!sd)
d069b916 6302 return child;
2c402dc3
PZ
6303
6304 set_domain_attribute(sd, attr);
6305 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6306 if (child) {
6307 sd->level = child->level + 1;
6308 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6309 child->parent = sd;
60495e77 6310 }
d069b916 6311 sd->child = child;
2c402dc3
PZ
6312
6313 return sd;
6314}
6315
2109b99e
AH
6316/*
6317 * Build sched domains for a given set of cpus and attach the sched domains
6318 * to the individual cpus
6319 */
dce840a0
PZ
6320static int build_sched_domains(const struct cpumask *cpu_map,
6321 struct sched_domain_attr *attr)
2109b99e
AH
6322{
6323 enum s_alloc alloc_state = sa_none;
dce840a0 6324 struct sched_domain *sd;
2109b99e 6325 struct s_data d;
822ff793 6326 int i, ret = -ENOMEM;
9c1cfda2 6327
2109b99e
AH
6328 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6329 if (alloc_state != sa_rootdomain)
6330 goto error;
9c1cfda2 6331
dce840a0 6332 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6333 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6334 struct sched_domain_topology_level *tl;
6335
3bd65a80 6336 sd = NULL;
e3589f6c 6337 for (tl = sched_domain_topology; tl->init; tl++) {
2c402dc3 6338 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
e3589f6c
PZ
6339 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6340 sd->flags |= SD_OVERLAP;
d110235d
PZ
6341 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6342 break;
e3589f6c 6343 }
d274cb30 6344
d069b916
PZ
6345 while (sd->child)
6346 sd = sd->child;
6347
21d42ccf 6348 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
6349 }
6350
6351 /* Build the groups for the domains */
6352 for_each_cpu(i, cpu_map) {
6353 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6354 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6355 if (sd->flags & SD_OVERLAP) {
6356 if (build_overlap_sched_groups(sd, i))
6357 goto error;
6358 } else {
6359 if (build_sched_groups(sd, i))
6360 goto error;
6361 }
1cf51902 6362 }
a06dadbe 6363 }
9c1cfda2 6364
1da177e4 6365 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
6366 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6367 if (!cpumask_test_cpu(i, cpu_map))
6368 continue;
9c1cfda2 6369
dce840a0
PZ
6370 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6371 claim_allocations(i, sd);
cd4ea6ae 6372 init_sched_groups_power(i, sd);
dce840a0 6373 }
f712c0c7 6374 }
9c1cfda2 6375
1da177e4 6376 /* Attach the domains */
dce840a0 6377 rcu_read_lock();
abcd083a 6378 for_each_cpu(i, cpu_map) {
21d42ccf 6379 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6380 cpu_attach_domain(sd, d.rd, i);
1da177e4 6381 }
dce840a0 6382 rcu_read_unlock();
51888ca2 6383
822ff793 6384 ret = 0;
51888ca2 6385error:
2109b99e 6386 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6387 return ret;
1da177e4 6388}
029190c5 6389
acc3f5d7 6390static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6391static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6392static struct sched_domain_attr *dattr_cur;
6393 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6394
6395/*
6396 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6397 * cpumask) fails, then fallback to a single sched domain,
6398 * as determined by the single cpumask fallback_doms.
029190c5 6399 */
4212823f 6400static cpumask_var_t fallback_doms;
029190c5 6401
ee79d1bd
HC
6402/*
6403 * arch_update_cpu_topology lets virtualized architectures update the
6404 * cpu core maps. It is supposed to return 1 if the topology changed
6405 * or 0 if it stayed the same.
6406 */
6407int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 6408{
ee79d1bd 6409 return 0;
22e52b07
HC
6410}
6411
acc3f5d7
RR
6412cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6413{
6414 int i;
6415 cpumask_var_t *doms;
6416
6417 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6418 if (!doms)
6419 return NULL;
6420 for (i = 0; i < ndoms; i++) {
6421 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6422 free_sched_domains(doms, i);
6423 return NULL;
6424 }
6425 }
6426 return doms;
6427}
6428
6429void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6430{
6431 unsigned int i;
6432 for (i = 0; i < ndoms; i++)
6433 free_cpumask_var(doms[i]);
6434 kfree(doms);
6435}
6436
1a20ff27 6437/*
41a2d6cf 6438 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
6439 * For now this just excludes isolated cpus, but could be used to
6440 * exclude other special cases in the future.
1a20ff27 6441 */
c4a8849a 6442static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 6443{
7378547f
MM
6444 int err;
6445
22e52b07 6446 arch_update_cpu_topology();
029190c5 6447 ndoms_cur = 1;
acc3f5d7 6448 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 6449 if (!doms_cur)
acc3f5d7
RR
6450 doms_cur = &fallback_doms;
6451 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1d3504fc 6452 dattr_cur = NULL;
dce840a0 6453 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 6454 register_sched_domain_sysctl();
7378547f
MM
6455
6456 return err;
1a20ff27
DG
6457}
6458
1a20ff27
DG
6459/*
6460 * Detach sched domains from a group of cpus specified in cpu_map
6461 * These cpus will now be attached to the NULL domain
6462 */
96f874e2 6463static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
6464{
6465 int i;
6466
dce840a0 6467 rcu_read_lock();
abcd083a 6468 for_each_cpu(i, cpu_map)
57d885fe 6469 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 6470 rcu_read_unlock();
1a20ff27
DG
6471}
6472
1d3504fc
HS
6473/* handle null as "default" */
6474static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6475 struct sched_domain_attr *new, int idx_new)
6476{
6477 struct sched_domain_attr tmp;
6478
6479 /* fast path */
6480 if (!new && !cur)
6481 return 1;
6482
6483 tmp = SD_ATTR_INIT;
6484 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6485 new ? (new + idx_new) : &tmp,
6486 sizeof(struct sched_domain_attr));
6487}
6488
029190c5
PJ
6489/*
6490 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 6491 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
6492 * doms_new[] to the current sched domain partitioning, doms_cur[].
6493 * It destroys each deleted domain and builds each new domain.
6494 *
acc3f5d7 6495 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
6496 * The masks don't intersect (don't overlap.) We should setup one
6497 * sched domain for each mask. CPUs not in any of the cpumasks will
6498 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
6499 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6500 * it as it is.
6501 *
acc3f5d7
RR
6502 * The passed in 'doms_new' should be allocated using
6503 * alloc_sched_domains. This routine takes ownership of it and will
6504 * free_sched_domains it when done with it. If the caller failed the
6505 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6506 * and partition_sched_domains() will fallback to the single partition
6507 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 6508 *
96f874e2 6509 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
6510 * ndoms_new == 0 is a special case for destroying existing domains,
6511 * and it will not create the default domain.
dfb512ec 6512 *
029190c5
PJ
6513 * Call with hotplug lock held
6514 */
acc3f5d7 6515void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 6516 struct sched_domain_attr *dattr_new)
029190c5 6517{
dfb512ec 6518 int i, j, n;
d65bd5ec 6519 int new_topology;
029190c5 6520
712555ee 6521 mutex_lock(&sched_domains_mutex);
a1835615 6522
7378547f
MM
6523 /* always unregister in case we don't destroy any domains */
6524 unregister_sched_domain_sysctl();
6525
d65bd5ec
HC
6526 /* Let architecture update cpu core mappings. */
6527 new_topology = arch_update_cpu_topology();
6528
dfb512ec 6529 n = doms_new ? ndoms_new : 0;
029190c5
PJ
6530
6531 /* Destroy deleted domains */
6532 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 6533 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 6534 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 6535 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
6536 goto match1;
6537 }
6538 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 6539 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
6540match1:
6541 ;
6542 }
6543
e761b772
MK
6544 if (doms_new == NULL) {
6545 ndoms_cur = 0;
acc3f5d7 6546 doms_new = &fallback_doms;
6ad4c188 6547 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 6548 WARN_ON_ONCE(dattr_new);
e761b772
MK
6549 }
6550
029190c5
PJ
6551 /* Build new domains */
6552 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 6553 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 6554 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 6555 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
6556 goto match2;
6557 }
6558 /* no match - add a new doms_new */
dce840a0 6559 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
6560match2:
6561 ;
6562 }
6563
6564 /* Remember the new sched domains */
acc3f5d7
RR
6565 if (doms_cur != &fallback_doms)
6566 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 6567 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 6568 doms_cur = doms_new;
1d3504fc 6569 dattr_cur = dattr_new;
029190c5 6570 ndoms_cur = ndoms_new;
7378547f
MM
6571
6572 register_sched_domain_sysctl();
a1835615 6573
712555ee 6574 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
6575}
6576
5c45bf27 6577#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
c4a8849a 6578static void reinit_sched_domains(void)
5c45bf27 6579{
95402b38 6580 get_online_cpus();
dfb512ec
MK
6581
6582 /* Destroy domains first to force the rebuild */
6583 partition_sched_domains(0, NULL, NULL);
6584
e761b772 6585 rebuild_sched_domains();
95402b38 6586 put_online_cpus();
5c45bf27
SS
6587}
6588
6589static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6590{
afb8a9b7 6591 unsigned int level = 0;
5c45bf27 6592
afb8a9b7
GS
6593 if (sscanf(buf, "%u", &level) != 1)
6594 return -EINVAL;
6595
6596 /*
6597 * level is always be positive so don't check for
6598 * level < POWERSAVINGS_BALANCE_NONE which is 0
6599 * What happens on 0 or 1 byte write,
6600 * need to check for count as well?
6601 */
6602
6603 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5c45bf27
SS
6604 return -EINVAL;
6605
6606 if (smt)
afb8a9b7 6607 sched_smt_power_savings = level;
5c45bf27 6608 else
afb8a9b7 6609 sched_mc_power_savings = level;
5c45bf27 6610
c4a8849a 6611 reinit_sched_domains();
5c45bf27 6612
c70f22d2 6613 return count;
5c45bf27
SS
6614}
6615
5c45bf27 6616#ifdef CONFIG_SCHED_MC
f718cd4a 6617static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
c9be0a36 6618 struct sysdev_class_attribute *attr,
f718cd4a 6619 char *page)
5c45bf27
SS
6620{
6621 return sprintf(page, "%u\n", sched_mc_power_savings);
6622}
f718cd4a 6623static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
c9be0a36 6624 struct sysdev_class_attribute *attr,
48f24c4d 6625 const char *buf, size_t count)
5c45bf27
SS
6626{
6627 return sched_power_savings_store(buf, count, 0);
6628}
f718cd4a
AK
6629static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
6630 sched_mc_power_savings_show,
6631 sched_mc_power_savings_store);
5c45bf27
SS
6632#endif
6633
6634#ifdef CONFIG_SCHED_SMT
f718cd4a 6635static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
c9be0a36 6636 struct sysdev_class_attribute *attr,
f718cd4a 6637 char *page)
5c45bf27
SS
6638{
6639 return sprintf(page, "%u\n", sched_smt_power_savings);
6640}
f718cd4a 6641static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
c9be0a36 6642 struct sysdev_class_attribute *attr,
48f24c4d 6643 const char *buf, size_t count)
5c45bf27
SS
6644{
6645 return sched_power_savings_store(buf, count, 1);
6646}
f718cd4a
AK
6647static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
6648 sched_smt_power_savings_show,
6707de00
AB
6649 sched_smt_power_savings_store);
6650#endif
6651
39aac648 6652int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6707de00
AB
6653{
6654 int err = 0;
6655
6656#ifdef CONFIG_SCHED_SMT
6657 if (smt_capable())
6658 err = sysfs_create_file(&cls->kset.kobj,
6659 &attr_sched_smt_power_savings.attr);
6660#endif
6661#ifdef CONFIG_SCHED_MC
6662 if (!err && mc_capable())
6663 err = sysfs_create_file(&cls->kset.kobj,
6664 &attr_sched_mc_power_savings.attr);
6665#endif
6666 return err;
6667}
6d6bc0ad 6668#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5c45bf27 6669
1da177e4 6670/*
3a101d05
TH
6671 * Update cpusets according to cpu_active mask. If cpusets are
6672 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6673 * around partition_sched_domains().
1da177e4 6674 */
0b2e918a
TH
6675static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6676 void *hcpu)
e761b772 6677{
3a101d05 6678 switch (action & ~CPU_TASKS_FROZEN) {
e761b772 6679 case CPU_ONLINE:
6ad4c188 6680 case CPU_DOWN_FAILED:
3a101d05 6681 cpuset_update_active_cpus();
e761b772 6682 return NOTIFY_OK;
3a101d05
TH
6683 default:
6684 return NOTIFY_DONE;
6685 }
6686}
e761b772 6687
0b2e918a
TH
6688static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6689 void *hcpu)
3a101d05
TH
6690{
6691 switch (action & ~CPU_TASKS_FROZEN) {
6692 case CPU_DOWN_PREPARE:
6693 cpuset_update_active_cpus();
6694 return NOTIFY_OK;
e761b772
MK
6695 default:
6696 return NOTIFY_DONE;
6697 }
6698}
e761b772 6699
1da177e4
LT
6700void __init sched_init_smp(void)
6701{
dcc30a35
RR
6702 cpumask_var_t non_isolated_cpus;
6703
6704 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 6705 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 6706
95402b38 6707 get_online_cpus();
712555ee 6708 mutex_lock(&sched_domains_mutex);
c4a8849a 6709 init_sched_domains(cpu_active_mask);
dcc30a35
RR
6710 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6711 if (cpumask_empty(non_isolated_cpus))
6712 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 6713 mutex_unlock(&sched_domains_mutex);
95402b38 6714 put_online_cpus();
e761b772 6715
3a101d05
TH
6716 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6717 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
6718
6719 /* RT runtime code needs to handle some hotplug events */
6720 hotcpu_notifier(update_runtime, 0);
6721
b328ca18 6722 init_hrtick();
5c1e1767
NP
6723
6724 /* Move init over to a non-isolated CPU */
dcc30a35 6725 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 6726 BUG();
19978ca6 6727 sched_init_granularity();
dcc30a35 6728 free_cpumask_var(non_isolated_cpus);
4212823f 6729
0e3900e6 6730 init_sched_rt_class();
1da177e4
LT
6731}
6732#else
6733void __init sched_init_smp(void)
6734{
19978ca6 6735 sched_init_granularity();
1da177e4
LT
6736}
6737#endif /* CONFIG_SMP */
6738
cd1bb94b
AB
6739const_debug unsigned int sysctl_timer_migration = 1;
6740
1da177e4
LT
6741int in_sched_functions(unsigned long addr)
6742{
1da177e4
LT
6743 return in_lock_functions(addr) ||
6744 (addr >= (unsigned long)__sched_text_start
6745 && addr < (unsigned long)__sched_text_end);
6746}
6747
029632fb
PZ
6748#ifdef CONFIG_CGROUP_SCHED
6749struct task_group root_task_group;
052f1dc7 6750#endif
6f505b16 6751
029632fb 6752DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
6f505b16 6753
1da177e4
LT
6754void __init sched_init(void)
6755{
dd41f596 6756 int i, j;
434d53b0
MT
6757 unsigned long alloc_size = 0, ptr;
6758
6759#ifdef CONFIG_FAIR_GROUP_SCHED
6760 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6761#endif
6762#ifdef CONFIG_RT_GROUP_SCHED
6763 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 6764#endif
df7c8e84 6765#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 6766 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 6767#endif
434d53b0 6768 if (alloc_size) {
36b7b6d4 6769 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
6770
6771#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 6772 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
6773 ptr += nr_cpu_ids * sizeof(void **);
6774
07e06b01 6775 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 6776 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 6777
6d6bc0ad 6778#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 6779#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6780 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
6781 ptr += nr_cpu_ids * sizeof(void **);
6782
07e06b01 6783 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
6784 ptr += nr_cpu_ids * sizeof(void **);
6785
6d6bc0ad 6786#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
6787#ifdef CONFIG_CPUMASK_OFFSTACK
6788 for_each_possible_cpu(i) {
6789 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6790 ptr += cpumask_size();
6791 }
6792#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 6793 }
dd41f596 6794
57d885fe
GH
6795#ifdef CONFIG_SMP
6796 init_defrootdomain();
6797#endif
6798
d0b27fa7
PZ
6799 init_rt_bandwidth(&def_rt_bandwidth,
6800 global_rt_period(), global_rt_runtime());
6801
6802#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6803 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 6804 global_rt_period(), global_rt_runtime());
6d6bc0ad 6805#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 6806
7c941438 6807#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
6808 list_add(&root_task_group.list, &task_groups);
6809 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 6810 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 6811 autogroup_init(&init_task);
54c707e9 6812
7c941438 6813#endif /* CONFIG_CGROUP_SCHED */
6f505b16 6814
54c707e9
GC
6815#ifdef CONFIG_CGROUP_CPUACCT
6816 root_cpuacct.cpustat = &kernel_cpustat;
6817 root_cpuacct.cpuusage = alloc_percpu(u64);
6818 /* Too early, not expected to fail */
6819 BUG_ON(!root_cpuacct.cpuusage);
6820#endif
0a945022 6821 for_each_possible_cpu(i) {
70b97a7f 6822 struct rq *rq;
1da177e4
LT
6823
6824 rq = cpu_rq(i);
05fa785c 6825 raw_spin_lock_init(&rq->lock);
7897986b 6826 rq->nr_running = 0;
dce48a84
TG
6827 rq->calc_load_active = 0;
6828 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 6829 init_cfs_rq(&rq->cfs);
6f505b16 6830 init_rt_rq(&rq->rt, rq);
dd41f596 6831#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 6832 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 6833 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 6834 /*
07e06b01 6835 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
6836 *
6837 * In case of task-groups formed thr' the cgroup filesystem, it
6838 * gets 100% of the cpu resources in the system. This overall
6839 * system cpu resource is divided among the tasks of
07e06b01 6840 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
6841 * based on each entity's (task or task-group's) weight
6842 * (se->load.weight).
6843 *
07e06b01 6844 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
6845 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6846 * then A0's share of the cpu resource is:
6847 *
0d905bca 6848 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 6849 *
07e06b01
YZ
6850 * We achieve this by letting root_task_group's tasks sit
6851 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 6852 */
ab84d31e 6853 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 6854 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
6855#endif /* CONFIG_FAIR_GROUP_SCHED */
6856
6857 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 6858#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 6859 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 6860 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 6861#endif
1da177e4 6862
dd41f596
IM
6863 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6864 rq->cpu_load[j] = 0;
fdf3e95d
VP
6865
6866 rq->last_load_update_tick = jiffies;
6867
1da177e4 6868#ifdef CONFIG_SMP
41c7ce9a 6869 rq->sd = NULL;
57d885fe 6870 rq->rd = NULL;
1399fa78 6871 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 6872 rq->post_schedule = 0;
1da177e4 6873 rq->active_balance = 0;
dd41f596 6874 rq->next_balance = jiffies;
1da177e4 6875 rq->push_cpu = 0;
0a2966b4 6876 rq->cpu = i;
1f11eb6a 6877 rq->online = 0;
eae0c9df
MG
6878 rq->idle_stamp = 0;
6879 rq->avg_idle = 2*sysctl_sched_migration_cost;
dc938520 6880 rq_attach_root(rq, &def_root_domain);
83cd4fe2 6881#ifdef CONFIG_NO_HZ
1c792db7 6882 rq->nohz_flags = 0;
83cd4fe2 6883#endif
1da177e4 6884#endif
8f4d37ec 6885 init_rq_hrtick(rq);
1da177e4 6886 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
6887 }
6888
2dd73a4f 6889 set_load_weight(&init_task);
b50f60ce 6890
e107be36
AK
6891#ifdef CONFIG_PREEMPT_NOTIFIERS
6892 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6893#endif
6894
b50f60ce 6895#ifdef CONFIG_RT_MUTEXES
732375c6 6896 plist_head_init(&init_task.pi_waiters);
b50f60ce
HC
6897#endif
6898
1da177e4
LT
6899 /*
6900 * The boot idle thread does lazy MMU switching as well:
6901 */
6902 atomic_inc(&init_mm.mm_count);
6903 enter_lazy_tlb(&init_mm, current);
6904
6905 /*
6906 * Make us the idle thread. Technically, schedule() should not be
6907 * called from this thread, however somewhere below it might be,
6908 * but because we are the idle thread, we just pick up running again
6909 * when this runqueue becomes "idle".
6910 */
6911 init_idle(current, smp_processor_id());
dce48a84
TG
6912
6913 calc_load_update = jiffies + LOAD_FREQ;
6914
dd41f596
IM
6915 /*
6916 * During early bootup we pretend to be a normal task:
6917 */
6918 current->sched_class = &fair_sched_class;
6892b75e 6919
bf4d83f6 6920#ifdef CONFIG_SMP
4cb98839 6921 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
6922 /* May be allocated at isolcpus cmdline parse time */
6923 if (cpu_isolated_map == NULL)
6924 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
029632fb
PZ
6925#endif
6926 init_sched_fair_class();
6a7b3dc3 6927
6892b75e 6928 scheduler_running = 1;
1da177e4
LT
6929}
6930
d902db1e 6931#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
6932static inline int preempt_count_equals(int preempt_offset)
6933{
234da7bc 6934 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 6935
4ba8216c 6936 return (nested == preempt_offset);
e4aafea2
FW
6937}
6938
d894837f 6939void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6940{
1da177e4
LT
6941 static unsigned long prev_jiffy; /* ratelimiting */
6942
b3fbab05 6943 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
e4aafea2
FW
6944 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6945 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
6946 return;
6947 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6948 return;
6949 prev_jiffy = jiffies;
6950
3df0fc5b
PZ
6951 printk(KERN_ERR
6952 "BUG: sleeping function called from invalid context at %s:%d\n",
6953 file, line);
6954 printk(KERN_ERR
6955 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6956 in_atomic(), irqs_disabled(),
6957 current->pid, current->comm);
aef745fc
IM
6958
6959 debug_show_held_locks(current);
6960 if (irqs_disabled())
6961 print_irqtrace_events(current);
6962 dump_stack();
1da177e4
LT
6963}
6964EXPORT_SYMBOL(__might_sleep);
6965#endif
6966
6967#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
6968static void normalize_task(struct rq *rq, struct task_struct *p)
6969{
da7a735e
PZ
6970 const struct sched_class *prev_class = p->sched_class;
6971 int old_prio = p->prio;
3a5e4dc1 6972 int on_rq;
3e51f33f 6973
fd2f4419 6974 on_rq = p->on_rq;
3a5e4dc1
AK
6975 if (on_rq)
6976 deactivate_task(rq, p, 0);
6977 __setscheduler(rq, p, SCHED_NORMAL, 0);
6978 if (on_rq) {
6979 activate_task(rq, p, 0);
6980 resched_task(rq->curr);
6981 }
da7a735e
PZ
6982
6983 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
6984}
6985
1da177e4
LT
6986void normalize_rt_tasks(void)
6987{
a0f98a1c 6988 struct task_struct *g, *p;
1da177e4 6989 unsigned long flags;
70b97a7f 6990 struct rq *rq;
1da177e4 6991
4cf5d77a 6992 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 6993 do_each_thread(g, p) {
178be793
IM
6994 /*
6995 * Only normalize user tasks:
6996 */
6997 if (!p->mm)
6998 continue;
6999
6cfb0d5d 7000 p->se.exec_start = 0;
6cfb0d5d 7001#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7002 p->se.statistics.wait_start = 0;
7003 p->se.statistics.sleep_start = 0;
7004 p->se.statistics.block_start = 0;
6cfb0d5d 7005#endif
dd41f596
IM
7006
7007 if (!rt_task(p)) {
7008 /*
7009 * Renice negative nice level userspace
7010 * tasks back to 0:
7011 */
7012 if (TASK_NICE(p) < 0 && p->mm)
7013 set_user_nice(p, 0);
1da177e4 7014 continue;
dd41f596 7015 }
1da177e4 7016
1d615482 7017 raw_spin_lock(&p->pi_lock);
b29739f9 7018 rq = __task_rq_lock(p);
1da177e4 7019
178be793 7020 normalize_task(rq, p);
3a5e4dc1 7021
b29739f9 7022 __task_rq_unlock(rq);
1d615482 7023 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
7024 } while_each_thread(g, p);
7025
4cf5d77a 7026 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
7027}
7028
7029#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7030
67fc4e0c 7031#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7032/*
67fc4e0c 7033 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7034 *
7035 * They can only be called when the whole system has been
7036 * stopped - every CPU needs to be quiescent, and no scheduling
7037 * activity can take place. Using them for anything else would
7038 * be a serious bug, and as a result, they aren't even visible
7039 * under any other configuration.
7040 */
7041
7042/**
7043 * curr_task - return the current task for a given cpu.
7044 * @cpu: the processor in question.
7045 *
7046 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7047 */
36c8b586 7048struct task_struct *curr_task(int cpu)
1df5c10a
LT
7049{
7050 return cpu_curr(cpu);
7051}
7052
67fc4e0c
JW
7053#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7054
7055#ifdef CONFIG_IA64
1df5c10a
LT
7056/**
7057 * set_curr_task - set the current task for a given cpu.
7058 * @cpu: the processor in question.
7059 * @p: the task pointer to set.
7060 *
7061 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7062 * are serviced on a separate stack. It allows the architecture to switch the
7063 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7064 * must be called with all CPU's synchronized, and interrupts disabled, the
7065 * and caller must save the original value of the current task (see
7066 * curr_task() above) and restore that value before reenabling interrupts and
7067 * re-starting the system.
7068 *
7069 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7070 */
36c8b586 7071void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7072{
7073 cpu_curr(cpu) = p;
7074}
7075
7076#endif
29f59db3 7077
052f1dc7 7078#ifdef CONFIG_RT_GROUP_SCHED
6d6bc0ad 7079#else /* !CONFIG_RT_GROUP_SCHED */
6d6bc0ad 7080#endif /* CONFIG_RT_GROUP_SCHED */
bccbe08a 7081
7c941438 7082#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7083/* task_group_lock serializes the addition/removal of task groups */
7084static DEFINE_SPINLOCK(task_group_lock);
7085
bccbe08a
PZ
7086static void free_sched_group(struct task_group *tg)
7087{
7088 free_fair_sched_group(tg);
7089 free_rt_sched_group(tg);
e9aa1dd1 7090 autogroup_free(tg);
bccbe08a
PZ
7091 kfree(tg);
7092}
7093
7094/* allocate runqueue etc for a new task group */
ec7dc8ac 7095struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7096{
7097 struct task_group *tg;
7098 unsigned long flags;
bccbe08a
PZ
7099
7100 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7101 if (!tg)
7102 return ERR_PTR(-ENOMEM);
7103
ec7dc8ac 7104 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7105 goto err;
7106
ec7dc8ac 7107 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7108 goto err;
7109
8ed36996 7110 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7111 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7112
7113 WARN_ON(!parent); /* root should already exist */
7114
7115 tg->parent = parent;
f473aa5e 7116 INIT_LIST_HEAD(&tg->children);
09f2724a 7117 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7118 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 7119
9b5b7751 7120 return tg;
29f59db3
SV
7121
7122err:
6f505b16 7123 free_sched_group(tg);
29f59db3
SV
7124 return ERR_PTR(-ENOMEM);
7125}
7126
9b5b7751 7127/* rcu callback to free various structures associated with a task group */
6f505b16 7128static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7129{
29f59db3 7130 /* now it should be safe to free those cfs_rqs */
6f505b16 7131 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7132}
7133
9b5b7751 7134/* Destroy runqueue etc associated with a task group */
4cf86d77 7135void sched_destroy_group(struct task_group *tg)
29f59db3 7136{
8ed36996 7137 unsigned long flags;
9b5b7751 7138 int i;
29f59db3 7139
3d4b47b4
PZ
7140 /* end participation in shares distribution */
7141 for_each_possible_cpu(i)
bccbe08a 7142 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7143
7144 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7145 list_del_rcu(&tg->list);
f473aa5e 7146 list_del_rcu(&tg->siblings);
8ed36996 7147 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 7148
9b5b7751 7149 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 7150 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
7151}
7152
9b5b7751 7153/* change task's runqueue when it moves between groups.
3a252015
IM
7154 * The caller of this function should have put the task in its new group
7155 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7156 * reflect its new group.
9b5b7751
SV
7157 */
7158void sched_move_task(struct task_struct *tsk)
29f59db3
SV
7159{
7160 int on_rq, running;
7161 unsigned long flags;
7162 struct rq *rq;
7163
7164 rq = task_rq_lock(tsk, &flags);
7165
051a1d1a 7166 running = task_current(rq, tsk);
fd2f4419 7167 on_rq = tsk->on_rq;
29f59db3 7168
0e1f3483 7169 if (on_rq)
29f59db3 7170 dequeue_task(rq, tsk, 0);
0e1f3483
HS
7171 if (unlikely(running))
7172 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 7173
810b3817 7174#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
7175 if (tsk->sched_class->task_move_group)
7176 tsk->sched_class->task_move_group(tsk, on_rq);
7177 else
810b3817 7178#endif
b2b5ce02 7179 set_task_rq(tsk, task_cpu(tsk));
810b3817 7180
0e1f3483
HS
7181 if (unlikely(running))
7182 tsk->sched_class->set_curr_task(rq);
7183 if (on_rq)
371fd7e7 7184 enqueue_task(rq, tsk, 0);
29f59db3 7185
0122ec5b 7186 task_rq_unlock(rq, tsk, &flags);
29f59db3 7187}
7c941438 7188#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7189
052f1dc7 7190#ifdef CONFIG_FAIR_GROUP_SCHED
052f1dc7 7191#endif
5cb350ba 7192
a790de99 7193#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
9f0c1e56
PZ
7194static unsigned long to_ratio(u64 period, u64 runtime)
7195{
7196 if (runtime == RUNTIME_INF)
9a7e0b18 7197 return 1ULL << 20;
9f0c1e56 7198
9a7e0b18 7199 return div64_u64(runtime << 20, period);
9f0c1e56 7200}
a790de99
PT
7201#endif
7202
7203#ifdef CONFIG_RT_GROUP_SCHED
7204/*
7205 * Ensure that the real time constraints are schedulable.
7206 */
7207static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7208
9a7e0b18
PZ
7209/* Must be called with tasklist_lock held */
7210static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7211{
9a7e0b18 7212 struct task_struct *g, *p;
b40b2e8e 7213
9a7e0b18 7214 do_each_thread(g, p) {
029632fb 7215 if (rt_task(p) && task_rq(p)->rt.tg == tg)
9a7e0b18
PZ
7216 return 1;
7217 } while_each_thread(g, p);
b40b2e8e 7218
9a7e0b18
PZ
7219 return 0;
7220}
b40b2e8e 7221
9a7e0b18
PZ
7222struct rt_schedulable_data {
7223 struct task_group *tg;
7224 u64 rt_period;
7225 u64 rt_runtime;
7226};
b40b2e8e 7227
a790de99 7228static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7229{
7230 struct rt_schedulable_data *d = data;
7231 struct task_group *child;
7232 unsigned long total, sum = 0;
7233 u64 period, runtime;
b40b2e8e 7234
9a7e0b18
PZ
7235 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7236 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7237
9a7e0b18
PZ
7238 if (tg == d->tg) {
7239 period = d->rt_period;
7240 runtime = d->rt_runtime;
b40b2e8e 7241 }
b40b2e8e 7242
4653f803
PZ
7243 /*
7244 * Cannot have more runtime than the period.
7245 */
7246 if (runtime > period && runtime != RUNTIME_INF)
7247 return -EINVAL;
6f505b16 7248
4653f803
PZ
7249 /*
7250 * Ensure we don't starve existing RT tasks.
7251 */
9a7e0b18
PZ
7252 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7253 return -EBUSY;
6f505b16 7254
9a7e0b18 7255 total = to_ratio(period, runtime);
6f505b16 7256
4653f803
PZ
7257 /*
7258 * Nobody can have more than the global setting allows.
7259 */
7260 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7261 return -EINVAL;
6f505b16 7262
4653f803
PZ
7263 /*
7264 * The sum of our children's runtime should not exceed our own.
7265 */
9a7e0b18
PZ
7266 list_for_each_entry_rcu(child, &tg->children, siblings) {
7267 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7268 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7269
9a7e0b18
PZ
7270 if (child == d->tg) {
7271 period = d->rt_period;
7272 runtime = d->rt_runtime;
7273 }
6f505b16 7274
9a7e0b18 7275 sum += to_ratio(period, runtime);
9f0c1e56 7276 }
6f505b16 7277
9a7e0b18
PZ
7278 if (sum > total)
7279 return -EINVAL;
7280
7281 return 0;
6f505b16
PZ
7282}
7283
9a7e0b18 7284static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7285{
8277434e
PT
7286 int ret;
7287
9a7e0b18
PZ
7288 struct rt_schedulable_data data = {
7289 .tg = tg,
7290 .rt_period = period,
7291 .rt_runtime = runtime,
7292 };
7293
8277434e
PT
7294 rcu_read_lock();
7295 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7296 rcu_read_unlock();
7297
7298 return ret;
521f1a24
DG
7299}
7300
ab84d31e 7301static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7302 u64 rt_period, u64 rt_runtime)
6f505b16 7303{
ac086bc2 7304 int i, err = 0;
9f0c1e56 7305
9f0c1e56 7306 mutex_lock(&rt_constraints_mutex);
521f1a24 7307 read_lock(&tasklist_lock);
9a7e0b18
PZ
7308 err = __rt_schedulable(tg, rt_period, rt_runtime);
7309 if (err)
9f0c1e56 7310 goto unlock;
ac086bc2 7311
0986b11b 7312 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7313 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7314 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7315
7316 for_each_possible_cpu(i) {
7317 struct rt_rq *rt_rq = tg->rt_rq[i];
7318
0986b11b 7319 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7320 rt_rq->rt_runtime = rt_runtime;
0986b11b 7321 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7322 }
0986b11b 7323 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7324unlock:
521f1a24 7325 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7326 mutex_unlock(&rt_constraints_mutex);
7327
7328 return err;
6f505b16
PZ
7329}
7330
d0b27fa7
PZ
7331int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7332{
7333 u64 rt_runtime, rt_period;
7334
7335 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7336 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7337 if (rt_runtime_us < 0)
7338 rt_runtime = RUNTIME_INF;
7339
ab84d31e 7340 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7341}
7342
9f0c1e56
PZ
7343long sched_group_rt_runtime(struct task_group *tg)
7344{
7345 u64 rt_runtime_us;
7346
d0b27fa7 7347 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7348 return -1;
7349
d0b27fa7 7350 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7351 do_div(rt_runtime_us, NSEC_PER_USEC);
7352 return rt_runtime_us;
7353}
d0b27fa7
PZ
7354
7355int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7356{
7357 u64 rt_runtime, rt_period;
7358
7359 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7360 rt_runtime = tg->rt_bandwidth.rt_runtime;
7361
619b0488
R
7362 if (rt_period == 0)
7363 return -EINVAL;
7364
ab84d31e 7365 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7366}
7367
7368long sched_group_rt_period(struct task_group *tg)
7369{
7370 u64 rt_period_us;
7371
7372 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7373 do_div(rt_period_us, NSEC_PER_USEC);
7374 return rt_period_us;
7375}
7376
7377static int sched_rt_global_constraints(void)
7378{
4653f803 7379 u64 runtime, period;
d0b27fa7
PZ
7380 int ret = 0;
7381
ec5d4989
HS
7382 if (sysctl_sched_rt_period <= 0)
7383 return -EINVAL;
7384
4653f803
PZ
7385 runtime = global_rt_runtime();
7386 period = global_rt_period();
7387
7388 /*
7389 * Sanity check on the sysctl variables.
7390 */
7391 if (runtime > period && runtime != RUNTIME_INF)
7392 return -EINVAL;
10b612f4 7393
d0b27fa7 7394 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7395 read_lock(&tasklist_lock);
4653f803 7396 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7397 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7398 mutex_unlock(&rt_constraints_mutex);
7399
7400 return ret;
7401}
54e99124
DG
7402
7403int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7404{
7405 /* Don't accept realtime tasks when there is no way for them to run */
7406 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7407 return 0;
7408
7409 return 1;
7410}
7411
6d6bc0ad 7412#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7413static int sched_rt_global_constraints(void)
7414{
ac086bc2
PZ
7415 unsigned long flags;
7416 int i;
7417
ec5d4989
HS
7418 if (sysctl_sched_rt_period <= 0)
7419 return -EINVAL;
7420
60aa605d
PZ
7421 /*
7422 * There's always some RT tasks in the root group
7423 * -- migration, kstopmachine etc..
7424 */
7425 if (sysctl_sched_rt_runtime == 0)
7426 return -EBUSY;
7427
0986b11b 7428 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7429 for_each_possible_cpu(i) {
7430 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7431
0986b11b 7432 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7433 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7434 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7435 }
0986b11b 7436 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7437
d0b27fa7
PZ
7438 return 0;
7439}
6d6bc0ad 7440#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7441
7442int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 7443 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
7444 loff_t *ppos)
7445{
7446 int ret;
7447 int old_period, old_runtime;
7448 static DEFINE_MUTEX(mutex);
7449
7450 mutex_lock(&mutex);
7451 old_period = sysctl_sched_rt_period;
7452 old_runtime = sysctl_sched_rt_runtime;
7453
8d65af78 7454 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
7455
7456 if (!ret && write) {
7457 ret = sched_rt_global_constraints();
7458 if (ret) {
7459 sysctl_sched_rt_period = old_period;
7460 sysctl_sched_rt_runtime = old_runtime;
7461 } else {
7462 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7463 def_rt_bandwidth.rt_period =
7464 ns_to_ktime(global_rt_period());
7465 }
7466 }
7467 mutex_unlock(&mutex);
7468
7469 return ret;
7470}
68318b8e 7471
052f1dc7 7472#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
7473
7474/* return corresponding task_group object of a cgroup */
2b01dfe3 7475static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 7476{
2b01dfe3
PM
7477 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7478 struct task_group, css);
68318b8e
SV
7479}
7480
7481static struct cgroup_subsys_state *
2b01dfe3 7482cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 7483{
ec7dc8ac 7484 struct task_group *tg, *parent;
68318b8e 7485
2b01dfe3 7486 if (!cgrp->parent) {
68318b8e 7487 /* This is early initialization for the top cgroup */
07e06b01 7488 return &root_task_group.css;
68318b8e
SV
7489 }
7490
ec7dc8ac
DG
7491 parent = cgroup_tg(cgrp->parent);
7492 tg = sched_create_group(parent);
68318b8e
SV
7493 if (IS_ERR(tg))
7494 return ERR_PTR(-ENOMEM);
7495
68318b8e
SV
7496 return &tg->css;
7497}
7498
41a2d6cf
IM
7499static void
7500cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 7501{
2b01dfe3 7502 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
7503
7504 sched_destroy_group(tg);
7505}
7506
41a2d6cf 7507static int
be367d09 7508cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e 7509{
b68aa230 7510#ifdef CONFIG_RT_GROUP_SCHED
54e99124 7511 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
b68aa230
PZ
7512 return -EINVAL;
7513#else
68318b8e
SV
7514 /* We don't support RT-tasks being in separate groups */
7515 if (tsk->sched_class != &fair_sched_class)
7516 return -EINVAL;
b68aa230 7517#endif
be367d09
BB
7518 return 0;
7519}
68318b8e 7520
68318b8e 7521static void
f780bdb7 7522cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e
SV
7523{
7524 sched_move_task(tsk);
7525}
7526
068c5cc5 7527static void
d41d5a01
PZ
7528cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7529 struct cgroup *old_cgrp, struct task_struct *task)
068c5cc5
PZ
7530{
7531 /*
7532 * cgroup_exit() is called in the copy_process() failure path.
7533 * Ignore this case since the task hasn't ran yet, this avoids
7534 * trying to poke a half freed task state from generic code.
7535 */
7536 if (!(task->flags & PF_EXITING))
7537 return;
7538
7539 sched_move_task(task);
7540}
7541
052f1dc7 7542#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 7543static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 7544 u64 shareval)
68318b8e 7545{
c8b28116 7546 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
7547}
7548
f4c753b7 7549static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 7550{
2b01dfe3 7551 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 7552
c8b28116 7553 return (u64) scale_load_down(tg->shares);
68318b8e 7554}
ab84d31e
PT
7555
7556#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
7557static DEFINE_MUTEX(cfs_constraints_mutex);
7558
ab84d31e
PT
7559const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7560const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7561
a790de99
PT
7562static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7563
ab84d31e
PT
7564static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7565{
56f570e5 7566 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 7567 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
7568
7569 if (tg == &root_task_group)
7570 return -EINVAL;
7571
7572 /*
7573 * Ensure we have at some amount of bandwidth every period. This is
7574 * to prevent reaching a state of large arrears when throttled via
7575 * entity_tick() resulting in prolonged exit starvation.
7576 */
7577 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7578 return -EINVAL;
7579
7580 /*
7581 * Likewise, bound things on the otherside by preventing insane quota
7582 * periods. This also allows us to normalize in computing quota
7583 * feasibility.
7584 */
7585 if (period > max_cfs_quota_period)
7586 return -EINVAL;
7587
a790de99
PT
7588 mutex_lock(&cfs_constraints_mutex);
7589 ret = __cfs_schedulable(tg, period, quota);
7590 if (ret)
7591 goto out_unlock;
7592
58088ad0 7593 runtime_enabled = quota != RUNTIME_INF;
56f570e5
PT
7594 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7595 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
ab84d31e
PT
7596 raw_spin_lock_irq(&cfs_b->lock);
7597 cfs_b->period = ns_to_ktime(period);
7598 cfs_b->quota = quota;
58088ad0 7599
a9cf55b2 7600 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0
PT
7601 /* restart the period timer (if active) to handle new period expiry */
7602 if (runtime_enabled && cfs_b->timer_active) {
7603 /* force a reprogram */
7604 cfs_b->timer_active = 0;
7605 __start_cfs_bandwidth(cfs_b);
7606 }
ab84d31e
PT
7607 raw_spin_unlock_irq(&cfs_b->lock);
7608
7609 for_each_possible_cpu(i) {
7610 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 7611 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
7612
7613 raw_spin_lock_irq(&rq->lock);
58088ad0 7614 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 7615 cfs_rq->runtime_remaining = 0;
671fd9da 7616
029632fb 7617 if (cfs_rq->throttled)
671fd9da 7618 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
7619 raw_spin_unlock_irq(&rq->lock);
7620 }
a790de99
PT
7621out_unlock:
7622 mutex_unlock(&cfs_constraints_mutex);
ab84d31e 7623
a790de99 7624 return ret;
ab84d31e
PT
7625}
7626
7627int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7628{
7629 u64 quota, period;
7630
029632fb 7631 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7632 if (cfs_quota_us < 0)
7633 quota = RUNTIME_INF;
7634 else
7635 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7636
7637 return tg_set_cfs_bandwidth(tg, period, quota);
7638}
7639
7640long tg_get_cfs_quota(struct task_group *tg)
7641{
7642 u64 quota_us;
7643
029632fb 7644 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
7645 return -1;
7646
029632fb 7647 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
7648 do_div(quota_us, NSEC_PER_USEC);
7649
7650 return quota_us;
7651}
7652
7653int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7654{
7655 u64 quota, period;
7656
7657 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 7658 quota = tg->cfs_bandwidth.quota;
ab84d31e
PT
7659
7660 if (period <= 0)
7661 return -EINVAL;
7662
7663 return tg_set_cfs_bandwidth(tg, period, quota);
7664}
7665
7666long tg_get_cfs_period(struct task_group *tg)
7667{
7668 u64 cfs_period_us;
7669
029632fb 7670 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7671 do_div(cfs_period_us, NSEC_PER_USEC);
7672
7673 return cfs_period_us;
7674}
7675
7676static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7677{
7678 return tg_get_cfs_quota(cgroup_tg(cgrp));
7679}
7680
7681static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7682 s64 cfs_quota_us)
7683{
7684 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7685}
7686
7687static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7688{
7689 return tg_get_cfs_period(cgroup_tg(cgrp));
7690}
7691
7692static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7693 u64 cfs_period_us)
7694{
7695 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7696}
7697
a790de99
PT
7698struct cfs_schedulable_data {
7699 struct task_group *tg;
7700 u64 period, quota;
7701};
7702
7703/*
7704 * normalize group quota/period to be quota/max_period
7705 * note: units are usecs
7706 */
7707static u64 normalize_cfs_quota(struct task_group *tg,
7708 struct cfs_schedulable_data *d)
7709{
7710 u64 quota, period;
7711
7712 if (tg == d->tg) {
7713 period = d->period;
7714 quota = d->quota;
7715 } else {
7716 period = tg_get_cfs_period(tg);
7717 quota = tg_get_cfs_quota(tg);
7718 }
7719
7720 /* note: these should typically be equivalent */
7721 if (quota == RUNTIME_INF || quota == -1)
7722 return RUNTIME_INF;
7723
7724 return to_ratio(period, quota);
7725}
7726
7727static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7728{
7729 struct cfs_schedulable_data *d = data;
029632fb 7730 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
7731 s64 quota = 0, parent_quota = -1;
7732
7733 if (!tg->parent) {
7734 quota = RUNTIME_INF;
7735 } else {
029632fb 7736 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
7737
7738 quota = normalize_cfs_quota(tg, d);
7739 parent_quota = parent_b->hierarchal_quota;
7740
7741 /*
7742 * ensure max(child_quota) <= parent_quota, inherit when no
7743 * limit is set
7744 */
7745 if (quota == RUNTIME_INF)
7746 quota = parent_quota;
7747 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7748 return -EINVAL;
7749 }
7750 cfs_b->hierarchal_quota = quota;
7751
7752 return 0;
7753}
7754
7755static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7756{
8277434e 7757 int ret;
a790de99
PT
7758 struct cfs_schedulable_data data = {
7759 .tg = tg,
7760 .period = period,
7761 .quota = quota,
7762 };
7763
7764 if (quota != RUNTIME_INF) {
7765 do_div(data.period, NSEC_PER_USEC);
7766 do_div(data.quota, NSEC_PER_USEC);
7767 }
7768
8277434e
PT
7769 rcu_read_lock();
7770 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7771 rcu_read_unlock();
7772
7773 return ret;
a790de99 7774}
e8da1b18
NR
7775
7776static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7777 struct cgroup_map_cb *cb)
7778{
7779 struct task_group *tg = cgroup_tg(cgrp);
029632fb 7780 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18
NR
7781
7782 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7783 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7784 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7785
7786 return 0;
7787}
ab84d31e 7788#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 7789#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 7790
052f1dc7 7791#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 7792static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 7793 s64 val)
6f505b16 7794{
06ecb27c 7795 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
7796}
7797
06ecb27c 7798static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 7799{
06ecb27c 7800 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 7801}
d0b27fa7
PZ
7802
7803static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7804 u64 rt_period_us)
7805{
7806 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7807}
7808
7809static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7810{
7811 return sched_group_rt_period(cgroup_tg(cgrp));
7812}
6d6bc0ad 7813#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 7814
fe5c7cc2 7815static struct cftype cpu_files[] = {
052f1dc7 7816#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
7817 {
7818 .name = "shares",
f4c753b7
PM
7819 .read_u64 = cpu_shares_read_u64,
7820 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 7821 },
052f1dc7 7822#endif
ab84d31e
PT
7823#ifdef CONFIG_CFS_BANDWIDTH
7824 {
7825 .name = "cfs_quota_us",
7826 .read_s64 = cpu_cfs_quota_read_s64,
7827 .write_s64 = cpu_cfs_quota_write_s64,
7828 },
7829 {
7830 .name = "cfs_period_us",
7831 .read_u64 = cpu_cfs_period_read_u64,
7832 .write_u64 = cpu_cfs_period_write_u64,
7833 },
e8da1b18
NR
7834 {
7835 .name = "stat",
7836 .read_map = cpu_stats_show,
7837 },
ab84d31e 7838#endif
052f1dc7 7839#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7840 {
9f0c1e56 7841 .name = "rt_runtime_us",
06ecb27c
PM
7842 .read_s64 = cpu_rt_runtime_read,
7843 .write_s64 = cpu_rt_runtime_write,
6f505b16 7844 },
d0b27fa7
PZ
7845 {
7846 .name = "rt_period_us",
f4c753b7
PM
7847 .read_u64 = cpu_rt_period_read_uint,
7848 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 7849 },
052f1dc7 7850#endif
68318b8e
SV
7851};
7852
7853static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7854{
fe5c7cc2 7855 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
68318b8e
SV
7856}
7857
7858struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
7859 .name = "cpu",
7860 .create = cpu_cgroup_create,
7861 .destroy = cpu_cgroup_destroy,
f780bdb7
BB
7862 .can_attach_task = cpu_cgroup_can_attach_task,
7863 .attach_task = cpu_cgroup_attach_task,
068c5cc5 7864 .exit = cpu_cgroup_exit,
38605cae
IM
7865 .populate = cpu_cgroup_populate,
7866 .subsys_id = cpu_cgroup_subsys_id,
68318b8e
SV
7867 .early_init = 1,
7868};
7869
052f1dc7 7870#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
7871
7872#ifdef CONFIG_CGROUP_CPUACCT
7873
7874/*
7875 * CPU accounting code for task groups.
7876 *
7877 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7878 * (balbir@in.ibm.com).
7879 */
7880
d842de87
SV
7881/* create a new cpu accounting group */
7882static struct cgroup_subsys_state *cpuacct_create(
32cd756a 7883 struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 7884{
54c707e9 7885 struct cpuacct *ca;
d842de87 7886
54c707e9
GC
7887 if (!cgrp->parent)
7888 return &root_cpuacct.css;
7889
7890 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
d842de87 7891 if (!ca)
ef12fefa 7892 goto out;
d842de87
SV
7893
7894 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
7895 if (!ca->cpuusage)
7896 goto out_free_ca;
7897
54c707e9
GC
7898 ca->cpustat = alloc_percpu(struct kernel_cpustat);
7899 if (!ca->cpustat)
7900 goto out_free_cpuusage;
d842de87
SV
7901
7902 return &ca->css;
ef12fefa 7903
54c707e9 7904out_free_cpuusage:
ef12fefa
BR
7905 free_percpu(ca->cpuusage);
7906out_free_ca:
7907 kfree(ca);
7908out:
7909 return ERR_PTR(-ENOMEM);
d842de87
SV
7910}
7911
7912/* destroy an existing cpu accounting group */
41a2d6cf 7913static void
32cd756a 7914cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 7915{
32cd756a 7916 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87 7917
54c707e9 7918 free_percpu(ca->cpustat);
d842de87
SV
7919 free_percpu(ca->cpuusage);
7920 kfree(ca);
7921}
7922
720f5498
KC
7923static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
7924{
b36128c8 7925 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7926 u64 data;
7927
7928#ifndef CONFIG_64BIT
7929 /*
7930 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
7931 */
05fa785c 7932 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7933 data = *cpuusage;
05fa785c 7934 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7935#else
7936 data = *cpuusage;
7937#endif
7938
7939 return data;
7940}
7941
7942static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
7943{
b36128c8 7944 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7945
7946#ifndef CONFIG_64BIT
7947 /*
7948 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
7949 */
05fa785c 7950 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7951 *cpuusage = val;
05fa785c 7952 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7953#else
7954 *cpuusage = val;
7955#endif
7956}
7957
d842de87 7958/* return total cpu usage (in nanoseconds) of a group */
32cd756a 7959static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 7960{
32cd756a 7961 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
7962 u64 totalcpuusage = 0;
7963 int i;
7964
720f5498
KC
7965 for_each_present_cpu(i)
7966 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
7967
7968 return totalcpuusage;
7969}
7970
0297b803
DG
7971static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
7972 u64 reset)
7973{
7974 struct cpuacct *ca = cgroup_ca(cgrp);
7975 int err = 0;
7976 int i;
7977
7978 if (reset) {
7979 err = -EINVAL;
7980 goto out;
7981 }
7982
720f5498
KC
7983 for_each_present_cpu(i)
7984 cpuacct_cpuusage_write(ca, i, 0);
0297b803 7985
0297b803
DG
7986out:
7987 return err;
7988}
7989
e9515c3c
KC
7990static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
7991 struct seq_file *m)
7992{
7993 struct cpuacct *ca = cgroup_ca(cgroup);
7994 u64 percpu;
7995 int i;
7996
7997 for_each_present_cpu(i) {
7998 percpu = cpuacct_cpuusage_read(ca, i);
7999 seq_printf(m, "%llu ", (unsigned long long) percpu);
8000 }
8001 seq_printf(m, "\n");
8002 return 0;
8003}
8004
ef12fefa
BR
8005static const char *cpuacct_stat_desc[] = {
8006 [CPUACCT_STAT_USER] = "user",
8007 [CPUACCT_STAT_SYSTEM] = "system",
8008};
8009
8010static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
54c707e9 8011 struct cgroup_map_cb *cb)
ef12fefa
BR
8012{
8013 struct cpuacct *ca = cgroup_ca(cgrp);
54c707e9
GC
8014 int cpu;
8015 s64 val = 0;
ef12fefa 8016
54c707e9
GC
8017 for_each_online_cpu(cpu) {
8018 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8019 val += kcpustat->cpustat[CPUTIME_USER];
8020 val += kcpustat->cpustat[CPUTIME_NICE];
ef12fefa 8021 }
54c707e9
GC
8022 val = cputime64_to_clock_t(val);
8023 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
8024
8025 val = 0;
8026 for_each_online_cpu(cpu) {
8027 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8028 val += kcpustat->cpustat[CPUTIME_SYSTEM];
8029 val += kcpustat->cpustat[CPUTIME_IRQ];
8030 val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
8031 }
8032
8033 val = cputime64_to_clock_t(val);
8034 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
8035
ef12fefa
BR
8036 return 0;
8037}
8038
d842de87
SV
8039static struct cftype files[] = {
8040 {
8041 .name = "usage",
f4c753b7
PM
8042 .read_u64 = cpuusage_read,
8043 .write_u64 = cpuusage_write,
d842de87 8044 },
e9515c3c
KC
8045 {
8046 .name = "usage_percpu",
8047 .read_seq_string = cpuacct_percpu_seq_read,
8048 },
ef12fefa
BR
8049 {
8050 .name = "stat",
8051 .read_map = cpuacct_stats_show,
8052 },
d842de87
SV
8053};
8054
32cd756a 8055static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 8056{
32cd756a 8057 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
d842de87
SV
8058}
8059
8060/*
8061 * charge this task's execution time to its accounting group.
8062 *
8063 * called with rq->lock held.
8064 */
029632fb 8065void cpuacct_charge(struct task_struct *tsk, u64 cputime)
d842de87
SV
8066{
8067 struct cpuacct *ca;
934352f2 8068 int cpu;
d842de87 8069
c40c6f85 8070 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
8071 return;
8072
934352f2 8073 cpu = task_cpu(tsk);
a18b83b7
BR
8074
8075 rcu_read_lock();
8076
d842de87 8077 ca = task_ca(tsk);
d842de87 8078
44252e42 8079 for (; ca; ca = parent_ca(ca)) {
b36128c8 8080 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
8081 *cpuusage += cputime;
8082 }
a18b83b7
BR
8083
8084 rcu_read_unlock();
d842de87
SV
8085}
8086
8087struct cgroup_subsys cpuacct_subsys = {
8088 .name = "cpuacct",
8089 .create = cpuacct_create,
8090 .destroy = cpuacct_destroy,
8091 .populate = cpuacct_populate,
8092 .subsys_id = cpuacct_subsys_id,
8093};
8094#endif /* CONFIG_CGROUP_CPUACCT */