]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/core.c
mm: sched: Adapt the scanning rate if a NUMA hinting fault does not migrate
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
1da177e4 75
96f951ed 76#include <asm/switch_to.h>
5517d86b 77#include <asm/tlb.h>
838225b4 78#include <asm/irq_regs.h>
db7e527d 79#include <asm/mutex.h>
e6e6685a
GC
80#ifdef CONFIG_PARAVIRT
81#include <asm/paravirt.h>
82#endif
1da177e4 83
029632fb 84#include "sched.h"
391e43da 85#include "../workqueue_sched.h"
29d5e047 86#include "../smpboot.h"
6e0534f2 87
a8d154b0 88#define CREATE_TRACE_POINTS
ad8d75ff 89#include <trace/events/sched.h>
a8d154b0 90
029632fb 91void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
d0b27fa7 92{
58088ad0
PT
93 unsigned long delta;
94 ktime_t soft, hard, now;
d0b27fa7 95
58088ad0
PT
96 for (;;) {
97 if (hrtimer_active(period_timer))
98 break;
99
100 now = hrtimer_cb_get_time(period_timer);
101 hrtimer_forward(period_timer, now, period);
d0b27fa7 102
58088ad0
PT
103 soft = hrtimer_get_softexpires(period_timer);
104 hard = hrtimer_get_expires(period_timer);
105 delta = ktime_to_ns(ktime_sub(hard, soft));
106 __hrtimer_start_range_ns(period_timer, soft, delta,
107 HRTIMER_MODE_ABS_PINNED, 0);
108 }
109}
110
029632fb
PZ
111DEFINE_MUTEX(sched_domains_mutex);
112DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 113
fe44d621 114static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 115
029632fb 116void update_rq_clock(struct rq *rq)
3e51f33f 117{
fe44d621 118 s64 delta;
305e6835 119
61eadef6 120 if (rq->skip_clock_update > 0)
f26f9aff 121 return;
aa483808 122
fe44d621
PZ
123 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
124 rq->clock += delta;
125 update_rq_clock_task(rq, delta);
3e51f33f
PZ
126}
127
bf5c91ba
IM
128/*
129 * Debugging: various feature bits
130 */
f00b45c1 131
f00b45c1
PZ
132#define SCHED_FEAT(name, enabled) \
133 (1UL << __SCHED_FEAT_##name) * enabled |
134
bf5c91ba 135const_debug unsigned int sysctl_sched_features =
391e43da 136#include "features.h"
f00b45c1
PZ
137 0;
138
139#undef SCHED_FEAT
140
141#ifdef CONFIG_SCHED_DEBUG
142#define SCHED_FEAT(name, enabled) \
143 #name ,
144
1292531f 145static const char * const sched_feat_names[] = {
391e43da 146#include "features.h"
f00b45c1
PZ
147};
148
149#undef SCHED_FEAT
150
34f3a814 151static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 152{
f00b45c1
PZ
153 int i;
154
f8b6d1cc 155 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
156 if (!(sysctl_sched_features & (1UL << i)))
157 seq_puts(m, "NO_");
158 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 159 }
34f3a814 160 seq_puts(m, "\n");
f00b45c1 161
34f3a814 162 return 0;
f00b45c1
PZ
163}
164
f8b6d1cc
PZ
165#ifdef HAVE_JUMP_LABEL
166
c5905afb
IM
167#define jump_label_key__true STATIC_KEY_INIT_TRUE
168#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
169
170#define SCHED_FEAT(name, enabled) \
171 jump_label_key__##enabled ,
172
c5905afb 173struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
174#include "features.h"
175};
176
177#undef SCHED_FEAT
178
179static void sched_feat_disable(int i)
180{
c5905afb
IM
181 if (static_key_enabled(&sched_feat_keys[i]))
182 static_key_slow_dec(&sched_feat_keys[i]);
f8b6d1cc
PZ
183}
184
185static void sched_feat_enable(int i)
186{
c5905afb
IM
187 if (!static_key_enabled(&sched_feat_keys[i]))
188 static_key_slow_inc(&sched_feat_keys[i]);
f8b6d1cc
PZ
189}
190#else
191static void sched_feat_disable(int i) { };
192static void sched_feat_enable(int i) { };
193#endif /* HAVE_JUMP_LABEL */
194
f00b45c1
PZ
195static ssize_t
196sched_feat_write(struct file *filp, const char __user *ubuf,
197 size_t cnt, loff_t *ppos)
198{
199 char buf[64];
7740191c 200 char *cmp;
f00b45c1
PZ
201 int neg = 0;
202 int i;
203
204 if (cnt > 63)
205 cnt = 63;
206
207 if (copy_from_user(&buf, ubuf, cnt))
208 return -EFAULT;
209
210 buf[cnt] = 0;
7740191c 211 cmp = strstrip(buf);
f00b45c1 212
524429c3 213 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
214 neg = 1;
215 cmp += 3;
216 }
217
f8b6d1cc 218 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 219 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 220 if (neg) {
f00b45c1 221 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
222 sched_feat_disable(i);
223 } else {
f00b45c1 224 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
225 sched_feat_enable(i);
226 }
f00b45c1
PZ
227 break;
228 }
229 }
230
f8b6d1cc 231 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
232 return -EINVAL;
233
42994724 234 *ppos += cnt;
f00b45c1
PZ
235
236 return cnt;
237}
238
34f3a814
LZ
239static int sched_feat_open(struct inode *inode, struct file *filp)
240{
241 return single_open(filp, sched_feat_show, NULL);
242}
243
828c0950 244static const struct file_operations sched_feat_fops = {
34f3a814
LZ
245 .open = sched_feat_open,
246 .write = sched_feat_write,
247 .read = seq_read,
248 .llseek = seq_lseek,
249 .release = single_release,
f00b45c1
PZ
250};
251
252static __init int sched_init_debug(void)
253{
f00b45c1
PZ
254 debugfs_create_file("sched_features", 0644, NULL, NULL,
255 &sched_feat_fops);
256
257 return 0;
258}
259late_initcall(sched_init_debug);
f8b6d1cc 260#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 261
b82d9fdd
PZ
262/*
263 * Number of tasks to iterate in a single balance run.
264 * Limited because this is done with IRQs disabled.
265 */
266const_debug unsigned int sysctl_sched_nr_migrate = 32;
267
e9e9250b
PZ
268/*
269 * period over which we average the RT time consumption, measured
270 * in ms.
271 *
272 * default: 1s
273 */
274const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
275
fa85ae24 276/*
9f0c1e56 277 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
278 * default: 1s
279 */
9f0c1e56 280unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 281
029632fb 282__read_mostly int scheduler_running;
6892b75e 283
9f0c1e56
PZ
284/*
285 * part of the period that we allow rt tasks to run in us.
286 * default: 0.95s
287 */
288int sysctl_sched_rt_runtime = 950000;
fa85ae24 289
fa85ae24 290
1da177e4 291
0970d299 292/*
0122ec5b 293 * __task_rq_lock - lock the rq @p resides on.
b29739f9 294 */
70b97a7f 295static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
296 __acquires(rq->lock)
297{
0970d299
PZ
298 struct rq *rq;
299
0122ec5b
PZ
300 lockdep_assert_held(&p->pi_lock);
301
3a5c359a 302 for (;;) {
0970d299 303 rq = task_rq(p);
05fa785c 304 raw_spin_lock(&rq->lock);
65cc8e48 305 if (likely(rq == task_rq(p)))
3a5c359a 306 return rq;
05fa785c 307 raw_spin_unlock(&rq->lock);
b29739f9 308 }
b29739f9
IM
309}
310
1da177e4 311/*
0122ec5b 312 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 313 */
70b97a7f 314static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 315 __acquires(p->pi_lock)
1da177e4
LT
316 __acquires(rq->lock)
317{
70b97a7f 318 struct rq *rq;
1da177e4 319
3a5c359a 320 for (;;) {
0122ec5b 321 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 322 rq = task_rq(p);
05fa785c 323 raw_spin_lock(&rq->lock);
65cc8e48 324 if (likely(rq == task_rq(p)))
3a5c359a 325 return rq;
0122ec5b
PZ
326 raw_spin_unlock(&rq->lock);
327 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 328 }
1da177e4
LT
329}
330
a9957449 331static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
332 __releases(rq->lock)
333{
05fa785c 334 raw_spin_unlock(&rq->lock);
b29739f9
IM
335}
336
0122ec5b
PZ
337static inline void
338task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 339 __releases(rq->lock)
0122ec5b 340 __releases(p->pi_lock)
1da177e4 341{
0122ec5b
PZ
342 raw_spin_unlock(&rq->lock);
343 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
344}
345
1da177e4 346/*
cc2a73b5 347 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 348 */
a9957449 349static struct rq *this_rq_lock(void)
1da177e4
LT
350 __acquires(rq->lock)
351{
70b97a7f 352 struct rq *rq;
1da177e4
LT
353
354 local_irq_disable();
355 rq = this_rq();
05fa785c 356 raw_spin_lock(&rq->lock);
1da177e4
LT
357
358 return rq;
359}
360
8f4d37ec
PZ
361#ifdef CONFIG_SCHED_HRTICK
362/*
363 * Use HR-timers to deliver accurate preemption points.
364 *
365 * Its all a bit involved since we cannot program an hrt while holding the
366 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
367 * reschedule event.
368 *
369 * When we get rescheduled we reprogram the hrtick_timer outside of the
370 * rq->lock.
371 */
8f4d37ec 372
8f4d37ec
PZ
373static void hrtick_clear(struct rq *rq)
374{
375 if (hrtimer_active(&rq->hrtick_timer))
376 hrtimer_cancel(&rq->hrtick_timer);
377}
378
8f4d37ec
PZ
379/*
380 * High-resolution timer tick.
381 * Runs from hardirq context with interrupts disabled.
382 */
383static enum hrtimer_restart hrtick(struct hrtimer *timer)
384{
385 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
386
387 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
388
05fa785c 389 raw_spin_lock(&rq->lock);
3e51f33f 390 update_rq_clock(rq);
8f4d37ec 391 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 392 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
393
394 return HRTIMER_NORESTART;
395}
396
95e904c7 397#ifdef CONFIG_SMP
31656519
PZ
398/*
399 * called from hardirq (IPI) context
400 */
401static void __hrtick_start(void *arg)
b328ca18 402{
31656519 403 struct rq *rq = arg;
b328ca18 404
05fa785c 405 raw_spin_lock(&rq->lock);
31656519
PZ
406 hrtimer_restart(&rq->hrtick_timer);
407 rq->hrtick_csd_pending = 0;
05fa785c 408 raw_spin_unlock(&rq->lock);
b328ca18
PZ
409}
410
31656519
PZ
411/*
412 * Called to set the hrtick timer state.
413 *
414 * called with rq->lock held and irqs disabled
415 */
029632fb 416void hrtick_start(struct rq *rq, u64 delay)
b328ca18 417{
31656519
PZ
418 struct hrtimer *timer = &rq->hrtick_timer;
419 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 420
cc584b21 421 hrtimer_set_expires(timer, time);
31656519
PZ
422
423 if (rq == this_rq()) {
424 hrtimer_restart(timer);
425 } else if (!rq->hrtick_csd_pending) {
6e275637 426 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
427 rq->hrtick_csd_pending = 1;
428 }
b328ca18
PZ
429}
430
431static int
432hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
433{
434 int cpu = (int)(long)hcpu;
435
436 switch (action) {
437 case CPU_UP_CANCELED:
438 case CPU_UP_CANCELED_FROZEN:
439 case CPU_DOWN_PREPARE:
440 case CPU_DOWN_PREPARE_FROZEN:
441 case CPU_DEAD:
442 case CPU_DEAD_FROZEN:
31656519 443 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
444 return NOTIFY_OK;
445 }
446
447 return NOTIFY_DONE;
448}
449
fa748203 450static __init void init_hrtick(void)
b328ca18
PZ
451{
452 hotcpu_notifier(hotplug_hrtick, 0);
453}
31656519
PZ
454#else
455/*
456 * Called to set the hrtick timer state.
457 *
458 * called with rq->lock held and irqs disabled
459 */
029632fb 460void hrtick_start(struct rq *rq, u64 delay)
31656519 461{
7f1e2ca9 462 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 463 HRTIMER_MODE_REL_PINNED, 0);
31656519 464}
b328ca18 465
006c75f1 466static inline void init_hrtick(void)
8f4d37ec 467{
8f4d37ec 468}
31656519 469#endif /* CONFIG_SMP */
8f4d37ec 470
31656519 471static void init_rq_hrtick(struct rq *rq)
8f4d37ec 472{
31656519
PZ
473#ifdef CONFIG_SMP
474 rq->hrtick_csd_pending = 0;
8f4d37ec 475
31656519
PZ
476 rq->hrtick_csd.flags = 0;
477 rq->hrtick_csd.func = __hrtick_start;
478 rq->hrtick_csd.info = rq;
479#endif
8f4d37ec 480
31656519
PZ
481 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
482 rq->hrtick_timer.function = hrtick;
8f4d37ec 483}
006c75f1 484#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
485static inline void hrtick_clear(struct rq *rq)
486{
487}
488
8f4d37ec
PZ
489static inline void init_rq_hrtick(struct rq *rq)
490{
491}
492
b328ca18
PZ
493static inline void init_hrtick(void)
494{
495}
006c75f1 496#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 497
c24d20db
IM
498/*
499 * resched_task - mark a task 'to be rescheduled now'.
500 *
501 * On UP this means the setting of the need_resched flag, on SMP it
502 * might also involve a cross-CPU call to trigger the scheduler on
503 * the target CPU.
504 */
505#ifdef CONFIG_SMP
506
507#ifndef tsk_is_polling
16a80163 508#define tsk_is_polling(t) 0
c24d20db
IM
509#endif
510
029632fb 511void resched_task(struct task_struct *p)
c24d20db
IM
512{
513 int cpu;
514
05fa785c 515 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 516
5ed0cec0 517 if (test_tsk_need_resched(p))
c24d20db
IM
518 return;
519
5ed0cec0 520 set_tsk_need_resched(p);
c24d20db
IM
521
522 cpu = task_cpu(p);
523 if (cpu == smp_processor_id())
524 return;
525
526 /* NEED_RESCHED must be visible before we test polling */
527 smp_mb();
528 if (!tsk_is_polling(p))
529 smp_send_reschedule(cpu);
530}
531
029632fb 532void resched_cpu(int cpu)
c24d20db
IM
533{
534 struct rq *rq = cpu_rq(cpu);
535 unsigned long flags;
536
05fa785c 537 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
538 return;
539 resched_task(cpu_curr(cpu));
05fa785c 540 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 541}
06d8308c
TG
542
543#ifdef CONFIG_NO_HZ
83cd4fe2
VP
544/*
545 * In the semi idle case, use the nearest busy cpu for migrating timers
546 * from an idle cpu. This is good for power-savings.
547 *
548 * We don't do similar optimization for completely idle system, as
549 * selecting an idle cpu will add more delays to the timers than intended
550 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
551 */
552int get_nohz_timer_target(void)
553{
554 int cpu = smp_processor_id();
555 int i;
556 struct sched_domain *sd;
557
057f3fad 558 rcu_read_lock();
83cd4fe2 559 for_each_domain(cpu, sd) {
057f3fad
PZ
560 for_each_cpu(i, sched_domain_span(sd)) {
561 if (!idle_cpu(i)) {
562 cpu = i;
563 goto unlock;
564 }
565 }
83cd4fe2 566 }
057f3fad
PZ
567unlock:
568 rcu_read_unlock();
83cd4fe2
VP
569 return cpu;
570}
06d8308c
TG
571/*
572 * When add_timer_on() enqueues a timer into the timer wheel of an
573 * idle CPU then this timer might expire before the next timer event
574 * which is scheduled to wake up that CPU. In case of a completely
575 * idle system the next event might even be infinite time into the
576 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
577 * leaves the inner idle loop so the newly added timer is taken into
578 * account when the CPU goes back to idle and evaluates the timer
579 * wheel for the next timer event.
580 */
581void wake_up_idle_cpu(int cpu)
582{
583 struct rq *rq = cpu_rq(cpu);
584
585 if (cpu == smp_processor_id())
586 return;
587
588 /*
589 * This is safe, as this function is called with the timer
590 * wheel base lock of (cpu) held. When the CPU is on the way
591 * to idle and has not yet set rq->curr to idle then it will
592 * be serialized on the timer wheel base lock and take the new
593 * timer into account automatically.
594 */
595 if (rq->curr != rq->idle)
596 return;
45bf76df 597
45bf76df 598 /*
06d8308c
TG
599 * We can set TIF_RESCHED on the idle task of the other CPU
600 * lockless. The worst case is that the other CPU runs the
601 * idle task through an additional NOOP schedule()
45bf76df 602 */
5ed0cec0 603 set_tsk_need_resched(rq->idle);
45bf76df 604
06d8308c
TG
605 /* NEED_RESCHED must be visible before we test polling */
606 smp_mb();
607 if (!tsk_is_polling(rq->idle))
608 smp_send_reschedule(cpu);
45bf76df
IM
609}
610
ca38062e 611static inline bool got_nohz_idle_kick(void)
45bf76df 612{
1c792db7
SS
613 int cpu = smp_processor_id();
614 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
45bf76df
IM
615}
616
ca38062e 617#else /* CONFIG_NO_HZ */
45bf76df 618
ca38062e 619static inline bool got_nohz_idle_kick(void)
2069dd75 620{
ca38062e 621 return false;
2069dd75
PZ
622}
623
6d6bc0ad 624#endif /* CONFIG_NO_HZ */
d842de87 625
029632fb 626void sched_avg_update(struct rq *rq)
18d95a28 627{
e9e9250b
PZ
628 s64 period = sched_avg_period();
629
630 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
631 /*
632 * Inline assembly required to prevent the compiler
633 * optimising this loop into a divmod call.
634 * See __iter_div_u64_rem() for another example of this.
635 */
636 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
637 rq->age_stamp += period;
638 rq->rt_avg /= 2;
639 }
18d95a28
PZ
640}
641
6d6bc0ad 642#else /* !CONFIG_SMP */
029632fb 643void resched_task(struct task_struct *p)
18d95a28 644{
05fa785c 645 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 646 set_tsk_need_resched(p);
18d95a28 647}
6d6bc0ad 648#endif /* CONFIG_SMP */
18d95a28 649
a790de99
PT
650#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
651 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 652/*
8277434e
PT
653 * Iterate task_group tree rooted at *from, calling @down when first entering a
654 * node and @up when leaving it for the final time.
655 *
656 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 657 */
029632fb 658int walk_tg_tree_from(struct task_group *from,
8277434e 659 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
660{
661 struct task_group *parent, *child;
eb755805 662 int ret;
c09595f6 663
8277434e
PT
664 parent = from;
665
c09595f6 666down:
eb755805
PZ
667 ret = (*down)(parent, data);
668 if (ret)
8277434e 669 goto out;
c09595f6
PZ
670 list_for_each_entry_rcu(child, &parent->children, siblings) {
671 parent = child;
672 goto down;
673
674up:
675 continue;
676 }
eb755805 677 ret = (*up)(parent, data);
8277434e
PT
678 if (ret || parent == from)
679 goto out;
c09595f6
PZ
680
681 child = parent;
682 parent = parent->parent;
683 if (parent)
684 goto up;
8277434e 685out:
eb755805 686 return ret;
c09595f6
PZ
687}
688
029632fb 689int tg_nop(struct task_group *tg, void *data)
eb755805 690{
e2b245f8 691 return 0;
eb755805 692}
18d95a28
PZ
693#endif
694
45bf76df
IM
695static void set_load_weight(struct task_struct *p)
696{
f05998d4
NR
697 int prio = p->static_prio - MAX_RT_PRIO;
698 struct load_weight *load = &p->se.load;
699
dd41f596
IM
700 /*
701 * SCHED_IDLE tasks get minimal weight:
702 */
703 if (p->policy == SCHED_IDLE) {
c8b28116 704 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 705 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
706 return;
707 }
71f8bd46 708
c8b28116 709 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 710 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
711}
712
371fd7e7 713static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 714{
a64692a3 715 update_rq_clock(rq);
dd41f596 716 sched_info_queued(p);
371fd7e7 717 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
718}
719
371fd7e7 720static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 721{
a64692a3 722 update_rq_clock(rq);
46ac22ba 723 sched_info_dequeued(p);
371fd7e7 724 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
725}
726
029632fb 727void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
728{
729 if (task_contributes_to_load(p))
730 rq->nr_uninterruptible--;
731
371fd7e7 732 enqueue_task(rq, p, flags);
1e3c88bd
PZ
733}
734
029632fb 735void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
736{
737 if (task_contributes_to_load(p))
738 rq->nr_uninterruptible++;
739
371fd7e7 740 dequeue_task(rq, p, flags);
1e3c88bd
PZ
741}
742
fe44d621 743static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 744{
095c0aa8
GC
745/*
746 * In theory, the compile should just see 0 here, and optimize out the call
747 * to sched_rt_avg_update. But I don't trust it...
748 */
749#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
750 s64 steal = 0, irq_delta = 0;
751#endif
752#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 753 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
754
755 /*
756 * Since irq_time is only updated on {soft,}irq_exit, we might run into
757 * this case when a previous update_rq_clock() happened inside a
758 * {soft,}irq region.
759 *
760 * When this happens, we stop ->clock_task and only update the
761 * prev_irq_time stamp to account for the part that fit, so that a next
762 * update will consume the rest. This ensures ->clock_task is
763 * monotonic.
764 *
765 * It does however cause some slight miss-attribution of {soft,}irq
766 * time, a more accurate solution would be to update the irq_time using
767 * the current rq->clock timestamp, except that would require using
768 * atomic ops.
769 */
770 if (irq_delta > delta)
771 irq_delta = delta;
772
773 rq->prev_irq_time += irq_delta;
774 delta -= irq_delta;
095c0aa8
GC
775#endif
776#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 777 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
778 u64 st;
779
780 steal = paravirt_steal_clock(cpu_of(rq));
781 steal -= rq->prev_steal_time_rq;
782
783 if (unlikely(steal > delta))
784 steal = delta;
785
786 st = steal_ticks(steal);
787 steal = st * TICK_NSEC;
788
789 rq->prev_steal_time_rq += steal;
790
791 delta -= steal;
792 }
793#endif
794
fe44d621
PZ
795 rq->clock_task += delta;
796
095c0aa8
GC
797#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
798 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
799 sched_rt_avg_update(rq, irq_delta + steal);
800#endif
aa483808
VP
801}
802
34f971f6
PZ
803void sched_set_stop_task(int cpu, struct task_struct *stop)
804{
805 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
806 struct task_struct *old_stop = cpu_rq(cpu)->stop;
807
808 if (stop) {
809 /*
810 * Make it appear like a SCHED_FIFO task, its something
811 * userspace knows about and won't get confused about.
812 *
813 * Also, it will make PI more or less work without too
814 * much confusion -- but then, stop work should not
815 * rely on PI working anyway.
816 */
817 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
818
819 stop->sched_class = &stop_sched_class;
820 }
821
822 cpu_rq(cpu)->stop = stop;
823
824 if (old_stop) {
825 /*
826 * Reset it back to a normal scheduling class so that
827 * it can die in pieces.
828 */
829 old_stop->sched_class = &rt_sched_class;
830 }
831}
832
14531189 833/*
dd41f596 834 * __normal_prio - return the priority that is based on the static prio
14531189 835 */
14531189
IM
836static inline int __normal_prio(struct task_struct *p)
837{
dd41f596 838 return p->static_prio;
14531189
IM
839}
840
b29739f9
IM
841/*
842 * Calculate the expected normal priority: i.e. priority
843 * without taking RT-inheritance into account. Might be
844 * boosted by interactivity modifiers. Changes upon fork,
845 * setprio syscalls, and whenever the interactivity
846 * estimator recalculates.
847 */
36c8b586 848static inline int normal_prio(struct task_struct *p)
b29739f9
IM
849{
850 int prio;
851
e05606d3 852 if (task_has_rt_policy(p))
b29739f9
IM
853 prio = MAX_RT_PRIO-1 - p->rt_priority;
854 else
855 prio = __normal_prio(p);
856 return prio;
857}
858
859/*
860 * Calculate the current priority, i.e. the priority
861 * taken into account by the scheduler. This value might
862 * be boosted by RT tasks, or might be boosted by
863 * interactivity modifiers. Will be RT if the task got
864 * RT-boosted. If not then it returns p->normal_prio.
865 */
36c8b586 866static int effective_prio(struct task_struct *p)
b29739f9
IM
867{
868 p->normal_prio = normal_prio(p);
869 /*
870 * If we are RT tasks or we were boosted to RT priority,
871 * keep the priority unchanged. Otherwise, update priority
872 * to the normal priority:
873 */
874 if (!rt_prio(p->prio))
875 return p->normal_prio;
876 return p->prio;
877}
878
1da177e4
LT
879/**
880 * task_curr - is this task currently executing on a CPU?
881 * @p: the task in question.
882 */
36c8b586 883inline int task_curr(const struct task_struct *p)
1da177e4
LT
884{
885 return cpu_curr(task_cpu(p)) == p;
886}
887
cb469845
SR
888static inline void check_class_changed(struct rq *rq, struct task_struct *p,
889 const struct sched_class *prev_class,
da7a735e 890 int oldprio)
cb469845
SR
891{
892 if (prev_class != p->sched_class) {
893 if (prev_class->switched_from)
da7a735e
PZ
894 prev_class->switched_from(rq, p);
895 p->sched_class->switched_to(rq, p);
896 } else if (oldprio != p->prio)
897 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
898}
899
029632fb 900void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
901{
902 const struct sched_class *class;
903
904 if (p->sched_class == rq->curr->sched_class) {
905 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
906 } else {
907 for_each_class(class) {
908 if (class == rq->curr->sched_class)
909 break;
910 if (class == p->sched_class) {
911 resched_task(rq->curr);
912 break;
913 }
914 }
915 }
916
917 /*
918 * A queue event has occurred, and we're going to schedule. In
919 * this case, we can save a useless back to back clock update.
920 */
fd2f4419 921 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
922 rq->skip_clock_update = 1;
923}
924
1da177e4 925#ifdef CONFIG_SMP
dd41f596 926void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 927{
e2912009
PZ
928#ifdef CONFIG_SCHED_DEBUG
929 /*
930 * We should never call set_task_cpu() on a blocked task,
931 * ttwu() will sort out the placement.
932 */
077614ee
PZ
933 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
934 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
935
936#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
937 /*
938 * The caller should hold either p->pi_lock or rq->lock, when changing
939 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
940 *
941 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 942 * see task_group().
6c6c54e1
PZ
943 *
944 * Furthermore, all task_rq users should acquire both locks, see
945 * task_rq_lock().
946 */
0122ec5b
PZ
947 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
948 lockdep_is_held(&task_rq(p)->lock)));
949#endif
e2912009
PZ
950#endif
951
de1d7286 952 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 953
0c69774e
PZ
954 if (task_cpu(p) != new_cpu) {
955 p->se.nr_migrations++;
a8b0ca17 956 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
0c69774e 957 }
dd41f596
IM
958
959 __set_task_cpu(p, new_cpu);
c65cc870
IM
960}
961
969c7921 962struct migration_arg {
36c8b586 963 struct task_struct *task;
1da177e4 964 int dest_cpu;
70b97a7f 965};
1da177e4 966
969c7921
TH
967static int migration_cpu_stop(void *data);
968
1da177e4
LT
969/*
970 * wait_task_inactive - wait for a thread to unschedule.
971 *
85ba2d86
RM
972 * If @match_state is nonzero, it's the @p->state value just checked and
973 * not expected to change. If it changes, i.e. @p might have woken up,
974 * then return zero. When we succeed in waiting for @p to be off its CPU,
975 * we return a positive number (its total switch count). If a second call
976 * a short while later returns the same number, the caller can be sure that
977 * @p has remained unscheduled the whole time.
978 *
1da177e4
LT
979 * The caller must ensure that the task *will* unschedule sometime soon,
980 * else this function might spin for a *long* time. This function can't
981 * be called with interrupts off, or it may introduce deadlock with
982 * smp_call_function() if an IPI is sent by the same process we are
983 * waiting to become inactive.
984 */
85ba2d86 985unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
986{
987 unsigned long flags;
dd41f596 988 int running, on_rq;
85ba2d86 989 unsigned long ncsw;
70b97a7f 990 struct rq *rq;
1da177e4 991
3a5c359a
AK
992 for (;;) {
993 /*
994 * We do the initial early heuristics without holding
995 * any task-queue locks at all. We'll only try to get
996 * the runqueue lock when things look like they will
997 * work out!
998 */
999 rq = task_rq(p);
fa490cfd 1000
3a5c359a
AK
1001 /*
1002 * If the task is actively running on another CPU
1003 * still, just relax and busy-wait without holding
1004 * any locks.
1005 *
1006 * NOTE! Since we don't hold any locks, it's not
1007 * even sure that "rq" stays as the right runqueue!
1008 * But we don't care, since "task_running()" will
1009 * return false if the runqueue has changed and p
1010 * is actually now running somewhere else!
1011 */
85ba2d86
RM
1012 while (task_running(rq, p)) {
1013 if (match_state && unlikely(p->state != match_state))
1014 return 0;
3a5c359a 1015 cpu_relax();
85ba2d86 1016 }
fa490cfd 1017
3a5c359a
AK
1018 /*
1019 * Ok, time to look more closely! We need the rq
1020 * lock now, to be *sure*. If we're wrong, we'll
1021 * just go back and repeat.
1022 */
1023 rq = task_rq_lock(p, &flags);
27a9da65 1024 trace_sched_wait_task(p);
3a5c359a 1025 running = task_running(rq, p);
fd2f4419 1026 on_rq = p->on_rq;
85ba2d86 1027 ncsw = 0;
f31e11d8 1028 if (!match_state || p->state == match_state)
93dcf55f 1029 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1030 task_rq_unlock(rq, p, &flags);
fa490cfd 1031
85ba2d86
RM
1032 /*
1033 * If it changed from the expected state, bail out now.
1034 */
1035 if (unlikely(!ncsw))
1036 break;
1037
3a5c359a
AK
1038 /*
1039 * Was it really running after all now that we
1040 * checked with the proper locks actually held?
1041 *
1042 * Oops. Go back and try again..
1043 */
1044 if (unlikely(running)) {
1045 cpu_relax();
1046 continue;
1047 }
fa490cfd 1048
3a5c359a
AK
1049 /*
1050 * It's not enough that it's not actively running,
1051 * it must be off the runqueue _entirely_, and not
1052 * preempted!
1053 *
80dd99b3 1054 * So if it was still runnable (but just not actively
3a5c359a
AK
1055 * running right now), it's preempted, and we should
1056 * yield - it could be a while.
1057 */
1058 if (unlikely(on_rq)) {
8eb90c30
TG
1059 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1060
1061 set_current_state(TASK_UNINTERRUPTIBLE);
1062 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1063 continue;
1064 }
fa490cfd 1065
3a5c359a
AK
1066 /*
1067 * Ahh, all good. It wasn't running, and it wasn't
1068 * runnable, which means that it will never become
1069 * running in the future either. We're all done!
1070 */
1071 break;
1072 }
85ba2d86
RM
1073
1074 return ncsw;
1da177e4
LT
1075}
1076
1077/***
1078 * kick_process - kick a running thread to enter/exit the kernel
1079 * @p: the to-be-kicked thread
1080 *
1081 * Cause a process which is running on another CPU to enter
1082 * kernel-mode, without any delay. (to get signals handled.)
1083 *
25985edc 1084 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1085 * because all it wants to ensure is that the remote task enters
1086 * the kernel. If the IPI races and the task has been migrated
1087 * to another CPU then no harm is done and the purpose has been
1088 * achieved as well.
1089 */
36c8b586 1090void kick_process(struct task_struct *p)
1da177e4
LT
1091{
1092 int cpu;
1093
1094 preempt_disable();
1095 cpu = task_cpu(p);
1096 if ((cpu != smp_processor_id()) && task_curr(p))
1097 smp_send_reschedule(cpu);
1098 preempt_enable();
1099}
b43e3521 1100EXPORT_SYMBOL_GPL(kick_process);
476d139c 1101#endif /* CONFIG_SMP */
1da177e4 1102
970b13ba 1103#ifdef CONFIG_SMP
30da688e 1104/*
013fdb80 1105 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1106 */
5da9a0fb
PZ
1107static int select_fallback_rq(int cpu, struct task_struct *p)
1108{
5da9a0fb 1109 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2baab4e9
PZ
1110 enum { cpuset, possible, fail } state = cpuset;
1111 int dest_cpu;
5da9a0fb
PZ
1112
1113 /* Look for allowed, online CPU in same node. */
e3831edd 1114 for_each_cpu(dest_cpu, nodemask) {
2baab4e9
PZ
1115 if (!cpu_online(dest_cpu))
1116 continue;
1117 if (!cpu_active(dest_cpu))
1118 continue;
fa17b507 1119 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5da9a0fb 1120 return dest_cpu;
2baab4e9 1121 }
5da9a0fb 1122
2baab4e9
PZ
1123 for (;;) {
1124 /* Any allowed, online CPU? */
e3831edd 1125 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1126 if (!cpu_online(dest_cpu))
1127 continue;
1128 if (!cpu_active(dest_cpu))
1129 continue;
1130 goto out;
1131 }
5da9a0fb 1132
2baab4e9
PZ
1133 switch (state) {
1134 case cpuset:
1135 /* No more Mr. Nice Guy. */
1136 cpuset_cpus_allowed_fallback(p);
1137 state = possible;
1138 break;
1139
1140 case possible:
1141 do_set_cpus_allowed(p, cpu_possible_mask);
1142 state = fail;
1143 break;
1144
1145 case fail:
1146 BUG();
1147 break;
1148 }
1149 }
1150
1151out:
1152 if (state != cpuset) {
1153 /*
1154 * Don't tell them about moving exiting tasks or
1155 * kernel threads (both mm NULL), since they never
1156 * leave kernel.
1157 */
1158 if (p->mm && printk_ratelimit()) {
1159 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1160 task_pid_nr(p), p->comm, cpu);
1161 }
5da9a0fb
PZ
1162 }
1163
1164 return dest_cpu;
1165}
1166
e2912009 1167/*
013fdb80 1168 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1169 */
970b13ba 1170static inline
7608dec2 1171int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 1172{
7608dec2 1173 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
1174
1175 /*
1176 * In order not to call set_task_cpu() on a blocking task we need
1177 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1178 * cpu.
1179 *
1180 * Since this is common to all placement strategies, this lives here.
1181 *
1182 * [ this allows ->select_task() to simply return task_cpu(p) and
1183 * not worry about this generic constraint ]
1184 */
fa17b507 1185 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1186 !cpu_online(cpu)))
5da9a0fb 1187 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1188
1189 return cpu;
970b13ba 1190}
09a40af5
MG
1191
1192static void update_avg(u64 *avg, u64 sample)
1193{
1194 s64 diff = sample - *avg;
1195 *avg += diff >> 3;
1196}
970b13ba
PZ
1197#endif
1198
d7c01d27 1199static void
b84cb5df 1200ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1201{
d7c01d27 1202#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1203 struct rq *rq = this_rq();
1204
d7c01d27
PZ
1205#ifdef CONFIG_SMP
1206 int this_cpu = smp_processor_id();
1207
1208 if (cpu == this_cpu) {
1209 schedstat_inc(rq, ttwu_local);
1210 schedstat_inc(p, se.statistics.nr_wakeups_local);
1211 } else {
1212 struct sched_domain *sd;
1213
1214 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1215 rcu_read_lock();
d7c01d27
PZ
1216 for_each_domain(this_cpu, sd) {
1217 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1218 schedstat_inc(sd, ttwu_wake_remote);
1219 break;
1220 }
1221 }
057f3fad 1222 rcu_read_unlock();
d7c01d27 1223 }
f339b9dc
PZ
1224
1225 if (wake_flags & WF_MIGRATED)
1226 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1227
d7c01d27
PZ
1228#endif /* CONFIG_SMP */
1229
1230 schedstat_inc(rq, ttwu_count);
9ed3811a 1231 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1232
1233 if (wake_flags & WF_SYNC)
9ed3811a 1234 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1235
d7c01d27
PZ
1236#endif /* CONFIG_SCHEDSTATS */
1237}
1238
1239static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1240{
9ed3811a 1241 activate_task(rq, p, en_flags);
fd2f4419 1242 p->on_rq = 1;
c2f7115e
PZ
1243
1244 /* if a worker is waking up, notify workqueue */
1245 if (p->flags & PF_WQ_WORKER)
1246 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1247}
1248
23f41eeb
PZ
1249/*
1250 * Mark the task runnable and perform wakeup-preemption.
1251 */
89363381 1252static void
23f41eeb 1253ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1254{
89363381 1255 trace_sched_wakeup(p, true);
9ed3811a
TH
1256 check_preempt_curr(rq, p, wake_flags);
1257
1258 p->state = TASK_RUNNING;
1259#ifdef CONFIG_SMP
1260 if (p->sched_class->task_woken)
1261 p->sched_class->task_woken(rq, p);
1262
e69c6341 1263 if (rq->idle_stamp) {
9ed3811a
TH
1264 u64 delta = rq->clock - rq->idle_stamp;
1265 u64 max = 2*sysctl_sched_migration_cost;
1266
1267 if (delta > max)
1268 rq->avg_idle = max;
1269 else
1270 update_avg(&rq->avg_idle, delta);
1271 rq->idle_stamp = 0;
1272 }
1273#endif
1274}
1275
c05fbafb
PZ
1276static void
1277ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1278{
1279#ifdef CONFIG_SMP
1280 if (p->sched_contributes_to_load)
1281 rq->nr_uninterruptible--;
1282#endif
1283
1284 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1285 ttwu_do_wakeup(rq, p, wake_flags);
1286}
1287
1288/*
1289 * Called in case the task @p isn't fully descheduled from its runqueue,
1290 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1291 * since all we need to do is flip p->state to TASK_RUNNING, since
1292 * the task is still ->on_rq.
1293 */
1294static int ttwu_remote(struct task_struct *p, int wake_flags)
1295{
1296 struct rq *rq;
1297 int ret = 0;
1298
1299 rq = __task_rq_lock(p);
1300 if (p->on_rq) {
1301 ttwu_do_wakeup(rq, p, wake_flags);
1302 ret = 1;
1303 }
1304 __task_rq_unlock(rq);
1305
1306 return ret;
1307}
1308
317f3941 1309#ifdef CONFIG_SMP
fa14ff4a 1310static void sched_ttwu_pending(void)
317f3941
PZ
1311{
1312 struct rq *rq = this_rq();
fa14ff4a
PZ
1313 struct llist_node *llist = llist_del_all(&rq->wake_list);
1314 struct task_struct *p;
317f3941
PZ
1315
1316 raw_spin_lock(&rq->lock);
1317
fa14ff4a
PZ
1318 while (llist) {
1319 p = llist_entry(llist, struct task_struct, wake_entry);
1320 llist = llist_next(llist);
317f3941
PZ
1321 ttwu_do_activate(rq, p, 0);
1322 }
1323
1324 raw_spin_unlock(&rq->lock);
1325}
1326
1327void scheduler_ipi(void)
1328{
ca38062e 1329 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1330 return;
1331
1332 /*
1333 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1334 * traditionally all their work was done from the interrupt return
1335 * path. Now that we actually do some work, we need to make sure
1336 * we do call them.
1337 *
1338 * Some archs already do call them, luckily irq_enter/exit nest
1339 * properly.
1340 *
1341 * Arguably we should visit all archs and update all handlers,
1342 * however a fair share of IPIs are still resched only so this would
1343 * somewhat pessimize the simple resched case.
1344 */
1345 irq_enter();
fa14ff4a 1346 sched_ttwu_pending();
ca38062e
SS
1347
1348 /*
1349 * Check if someone kicked us for doing the nohz idle load balance.
1350 */
6eb57e0d
SS
1351 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1352 this_rq()->idle_balance = 1;
ca38062e 1353 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1354 }
c5d753a5 1355 irq_exit();
317f3941
PZ
1356}
1357
1358static void ttwu_queue_remote(struct task_struct *p, int cpu)
1359{
fa14ff4a 1360 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
317f3941
PZ
1361 smp_send_reschedule(cpu);
1362}
d6aa8f85 1363
39be3501 1364bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1365{
1366 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1367}
d6aa8f85 1368#endif /* CONFIG_SMP */
317f3941 1369
c05fbafb
PZ
1370static void ttwu_queue(struct task_struct *p, int cpu)
1371{
1372 struct rq *rq = cpu_rq(cpu);
1373
17d9f311 1374#if defined(CONFIG_SMP)
39be3501 1375 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1376 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1377 ttwu_queue_remote(p, cpu);
1378 return;
1379 }
1380#endif
1381
c05fbafb
PZ
1382 raw_spin_lock(&rq->lock);
1383 ttwu_do_activate(rq, p, 0);
1384 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1385}
1386
1387/**
1da177e4 1388 * try_to_wake_up - wake up a thread
9ed3811a 1389 * @p: the thread to be awakened
1da177e4 1390 * @state: the mask of task states that can be woken
9ed3811a 1391 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1392 *
1393 * Put it on the run-queue if it's not already there. The "current"
1394 * thread is always on the run-queue (except when the actual
1395 * re-schedule is in progress), and as such you're allowed to do
1396 * the simpler "current->state = TASK_RUNNING" to mark yourself
1397 * runnable without the overhead of this.
1398 *
9ed3811a
TH
1399 * Returns %true if @p was woken up, %false if it was already running
1400 * or @state didn't match @p's state.
1da177e4 1401 */
e4a52bcb
PZ
1402static int
1403try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1404{
1da177e4 1405 unsigned long flags;
c05fbafb 1406 int cpu, success = 0;
2398f2c6 1407
04e2f174 1408 smp_wmb();
013fdb80 1409 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1410 if (!(p->state & state))
1da177e4
LT
1411 goto out;
1412
c05fbafb 1413 success = 1; /* we're going to change ->state */
1da177e4 1414 cpu = task_cpu(p);
1da177e4 1415
c05fbafb
PZ
1416 if (p->on_rq && ttwu_remote(p, wake_flags))
1417 goto stat;
1da177e4 1418
1da177e4 1419#ifdef CONFIG_SMP
e9c84311 1420 /*
c05fbafb
PZ
1421 * If the owning (remote) cpu is still in the middle of schedule() with
1422 * this task as prev, wait until its done referencing the task.
e9c84311 1423 */
f3e94786 1424 while (p->on_cpu)
e4a52bcb 1425 cpu_relax();
0970d299 1426 /*
e4a52bcb 1427 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1428 */
e4a52bcb 1429 smp_rmb();
1da177e4 1430
a8e4f2ea 1431 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1432 p->state = TASK_WAKING;
e7693a36 1433
e4a52bcb 1434 if (p->sched_class->task_waking)
74f8e4b2 1435 p->sched_class->task_waking(p);
efbbd05a 1436
7608dec2 1437 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1438 if (task_cpu(p) != cpu) {
1439 wake_flags |= WF_MIGRATED;
e4a52bcb 1440 set_task_cpu(p, cpu);
f339b9dc 1441 }
1da177e4 1442#endif /* CONFIG_SMP */
1da177e4 1443
c05fbafb
PZ
1444 ttwu_queue(p, cpu);
1445stat:
b84cb5df 1446 ttwu_stat(p, cpu, wake_flags);
1da177e4 1447out:
013fdb80 1448 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1449
1450 return success;
1451}
1452
21aa9af0
TH
1453/**
1454 * try_to_wake_up_local - try to wake up a local task with rq lock held
1455 * @p: the thread to be awakened
1456 *
2acca55e 1457 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1458 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1459 * the current task.
21aa9af0
TH
1460 */
1461static void try_to_wake_up_local(struct task_struct *p)
1462{
1463 struct rq *rq = task_rq(p);
21aa9af0
TH
1464
1465 BUG_ON(rq != this_rq());
1466 BUG_ON(p == current);
1467 lockdep_assert_held(&rq->lock);
1468
2acca55e
PZ
1469 if (!raw_spin_trylock(&p->pi_lock)) {
1470 raw_spin_unlock(&rq->lock);
1471 raw_spin_lock(&p->pi_lock);
1472 raw_spin_lock(&rq->lock);
1473 }
1474
21aa9af0 1475 if (!(p->state & TASK_NORMAL))
2acca55e 1476 goto out;
21aa9af0 1477
fd2f4419 1478 if (!p->on_rq)
d7c01d27
PZ
1479 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1480
23f41eeb 1481 ttwu_do_wakeup(rq, p, 0);
b84cb5df 1482 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
1483out:
1484 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
1485}
1486
50fa610a
DH
1487/**
1488 * wake_up_process - Wake up a specific process
1489 * @p: The process to be woken up.
1490 *
1491 * Attempt to wake up the nominated process and move it to the set of runnable
1492 * processes. Returns 1 if the process was woken up, 0 if it was already
1493 * running.
1494 *
1495 * It may be assumed that this function implies a write memory barrier before
1496 * changing the task state if and only if any tasks are woken up.
1497 */
7ad5b3a5 1498int wake_up_process(struct task_struct *p)
1da177e4 1499{
d9514f6c 1500 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 1501}
1da177e4
LT
1502EXPORT_SYMBOL(wake_up_process);
1503
7ad5b3a5 1504int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
1505{
1506 return try_to_wake_up(p, state, 0);
1507}
1508
1da177e4
LT
1509/*
1510 * Perform scheduler related setup for a newly forked process p.
1511 * p is forked by current.
dd41f596
IM
1512 *
1513 * __sched_fork() is basic setup used by init_idle() too:
1514 */
1515static void __sched_fork(struct task_struct *p)
1516{
fd2f4419
PZ
1517 p->on_rq = 0;
1518
1519 p->se.on_rq = 0;
dd41f596
IM
1520 p->se.exec_start = 0;
1521 p->se.sum_exec_runtime = 0;
f6cf891c 1522 p->se.prev_sum_exec_runtime = 0;
6c594c21 1523 p->se.nr_migrations = 0;
da7a735e 1524 p->se.vruntime = 0;
fd2f4419 1525 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
1526
1527#ifdef CONFIG_SCHEDSTATS
41acab88 1528 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 1529#endif
476d139c 1530
fa717060 1531 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 1532
e107be36
AK
1533#ifdef CONFIG_PREEMPT_NOTIFIERS
1534 INIT_HLIST_HEAD(&p->preempt_notifiers);
1535#endif
cbee9f88
PZ
1536
1537#ifdef CONFIG_NUMA_BALANCING
1538 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1539 p->mm->numa_next_scan = jiffies;
b8593bfd 1540 p->mm->numa_next_reset = jiffies;
cbee9f88
PZ
1541 p->mm->numa_scan_seq = 0;
1542 }
1543
1544 p->node_stamp = 0ULL;
1545 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1546 p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
4b96a29b 1547 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88
PZ
1548 p->numa_work.next = &p->numa_work;
1549#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
1550}
1551
1552/*
1553 * fork()/clone()-time setup:
1554 */
3e51e3ed 1555void sched_fork(struct task_struct *p)
dd41f596 1556{
0122ec5b 1557 unsigned long flags;
dd41f596
IM
1558 int cpu = get_cpu();
1559
1560 __sched_fork(p);
06b83b5f 1561 /*
0017d735 1562 * We mark the process as running here. This guarantees that
06b83b5f
PZ
1563 * nobody will actually run it, and a signal or other external
1564 * event cannot wake it up and insert it on the runqueue either.
1565 */
0017d735 1566 p->state = TASK_RUNNING;
dd41f596 1567
c350a04e
MG
1568 /*
1569 * Make sure we do not leak PI boosting priority to the child.
1570 */
1571 p->prio = current->normal_prio;
1572
b9dc29e7
MG
1573 /*
1574 * Revert to default priority/policy on fork if requested.
1575 */
1576 if (unlikely(p->sched_reset_on_fork)) {
c350a04e 1577 if (task_has_rt_policy(p)) {
b9dc29e7 1578 p->policy = SCHED_NORMAL;
6c697bdf 1579 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
1580 p->rt_priority = 0;
1581 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1582 p->static_prio = NICE_TO_PRIO(0);
1583
1584 p->prio = p->normal_prio = __normal_prio(p);
1585 set_load_weight(p);
6c697bdf 1586
b9dc29e7
MG
1587 /*
1588 * We don't need the reset flag anymore after the fork. It has
1589 * fulfilled its duty:
1590 */
1591 p->sched_reset_on_fork = 0;
1592 }
ca94c442 1593
2ddbf952
HS
1594 if (!rt_prio(p->prio))
1595 p->sched_class = &fair_sched_class;
b29739f9 1596
cd29fe6f
PZ
1597 if (p->sched_class->task_fork)
1598 p->sched_class->task_fork(p);
1599
86951599
PZ
1600 /*
1601 * The child is not yet in the pid-hash so no cgroup attach races,
1602 * and the cgroup is pinned to this child due to cgroup_fork()
1603 * is ran before sched_fork().
1604 *
1605 * Silence PROVE_RCU.
1606 */
0122ec5b 1607 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 1608 set_task_cpu(p, cpu);
0122ec5b 1609 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 1610
52f17b6c 1611#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 1612 if (likely(sched_info_on()))
52f17b6c 1613 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 1614#endif
3ca7a440
PZ
1615#if defined(CONFIG_SMP)
1616 p->on_cpu = 0;
4866cde0 1617#endif
bdd4e85d 1618#ifdef CONFIG_PREEMPT_COUNT
4866cde0 1619 /* Want to start with kernel preemption disabled. */
a1261f54 1620 task_thread_info(p)->preempt_count = 1;
1da177e4 1621#endif
806c09a7 1622#ifdef CONFIG_SMP
917b627d 1623 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 1624#endif
917b627d 1625
476d139c 1626 put_cpu();
1da177e4
LT
1627}
1628
1629/*
1630 * wake_up_new_task - wake up a newly created task for the first time.
1631 *
1632 * This function will do some initial scheduler statistics housekeeping
1633 * that must be done for every newly created context, then puts the task
1634 * on the runqueue and wakes it.
1635 */
3e51e3ed 1636void wake_up_new_task(struct task_struct *p)
1da177e4
LT
1637{
1638 unsigned long flags;
dd41f596 1639 struct rq *rq;
fabf318e 1640
ab2515c4 1641 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
1642#ifdef CONFIG_SMP
1643 /*
1644 * Fork balancing, do it here and not earlier because:
1645 * - cpus_allowed can change in the fork path
1646 * - any previously selected cpu might disappear through hotplug
fabf318e 1647 */
ab2515c4 1648 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
1649#endif
1650
ab2515c4 1651 rq = __task_rq_lock(p);
cd29fe6f 1652 activate_task(rq, p, 0);
fd2f4419 1653 p->on_rq = 1;
89363381 1654 trace_sched_wakeup_new(p, true);
a7558e01 1655 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 1656#ifdef CONFIG_SMP
efbbd05a
PZ
1657 if (p->sched_class->task_woken)
1658 p->sched_class->task_woken(rq, p);
9a897c5a 1659#endif
0122ec5b 1660 task_rq_unlock(rq, p, &flags);
1da177e4
LT
1661}
1662
e107be36
AK
1663#ifdef CONFIG_PREEMPT_NOTIFIERS
1664
1665/**
80dd99b3 1666 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 1667 * @notifier: notifier struct to register
e107be36
AK
1668 */
1669void preempt_notifier_register(struct preempt_notifier *notifier)
1670{
1671 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1672}
1673EXPORT_SYMBOL_GPL(preempt_notifier_register);
1674
1675/**
1676 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 1677 * @notifier: notifier struct to unregister
e107be36
AK
1678 *
1679 * This is safe to call from within a preemption notifier.
1680 */
1681void preempt_notifier_unregister(struct preempt_notifier *notifier)
1682{
1683 hlist_del(&notifier->link);
1684}
1685EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1686
1687static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1688{
1689 struct preempt_notifier *notifier;
1690 struct hlist_node *node;
1691
1692 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1693 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1694}
1695
1696static void
1697fire_sched_out_preempt_notifiers(struct task_struct *curr,
1698 struct task_struct *next)
1699{
1700 struct preempt_notifier *notifier;
1701 struct hlist_node *node;
1702
1703 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1704 notifier->ops->sched_out(notifier, next);
1705}
1706
6d6bc0ad 1707#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
1708
1709static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1710{
1711}
1712
1713static void
1714fire_sched_out_preempt_notifiers(struct task_struct *curr,
1715 struct task_struct *next)
1716{
1717}
1718
6d6bc0ad 1719#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 1720
4866cde0
NP
1721/**
1722 * prepare_task_switch - prepare to switch tasks
1723 * @rq: the runqueue preparing to switch
421cee29 1724 * @prev: the current task that is being switched out
4866cde0
NP
1725 * @next: the task we are going to switch to.
1726 *
1727 * This is called with the rq lock held and interrupts off. It must
1728 * be paired with a subsequent finish_task_switch after the context
1729 * switch.
1730 *
1731 * prepare_task_switch sets up locking and calls architecture specific
1732 * hooks.
1733 */
e107be36
AK
1734static inline void
1735prepare_task_switch(struct rq *rq, struct task_struct *prev,
1736 struct task_struct *next)
4866cde0 1737{
895dd92c 1738 trace_sched_switch(prev, next);
fe4b04fa
PZ
1739 sched_info_switch(prev, next);
1740 perf_event_task_sched_out(prev, next);
e107be36 1741 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
1742 prepare_lock_switch(rq, next);
1743 prepare_arch_switch(next);
1744}
1745
1da177e4
LT
1746/**
1747 * finish_task_switch - clean up after a task-switch
344babaa 1748 * @rq: runqueue associated with task-switch
1da177e4
LT
1749 * @prev: the thread we just switched away from.
1750 *
4866cde0
NP
1751 * finish_task_switch must be called after the context switch, paired
1752 * with a prepare_task_switch call before the context switch.
1753 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1754 * and do any other architecture-specific cleanup actions.
1da177e4
LT
1755 *
1756 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 1757 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
1758 * with the lock held can cause deadlocks; see schedule() for
1759 * details.)
1760 */
a9957449 1761static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
1762 __releases(rq->lock)
1763{
1da177e4 1764 struct mm_struct *mm = rq->prev_mm;
55a101f8 1765 long prev_state;
1da177e4
LT
1766
1767 rq->prev_mm = NULL;
1768
1769 /*
1770 * A task struct has one reference for the use as "current".
c394cc9f 1771 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
1772 * schedule one last time. The schedule call will never return, and
1773 * the scheduled task must drop that reference.
c394cc9f 1774 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
1775 * still held, otherwise prev could be scheduled on another cpu, die
1776 * there before we look at prev->state, and then the reference would
1777 * be dropped twice.
1778 * Manfred Spraul <manfred@colorfullife.com>
1779 */
55a101f8 1780 prev_state = prev->state;
bf9fae9f 1781 vtime_task_switch(prev);
4866cde0 1782 finish_arch_switch(prev);
a8d757ef 1783 perf_event_task_sched_in(prev, current);
4866cde0 1784 finish_lock_switch(rq, prev);
01f23e16 1785 finish_arch_post_lock_switch();
e8fa1362 1786
e107be36 1787 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
1788 if (mm)
1789 mmdrop(mm);
c394cc9f 1790 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 1791 /*
1792 * Remove function-return probe instances associated with this
1793 * task and put them back on the free list.
9761eea8 1794 */
c6fd91f0 1795 kprobe_flush_task(prev);
1da177e4 1796 put_task_struct(prev);
c6fd91f0 1797 }
1da177e4
LT
1798}
1799
3f029d3c
GH
1800#ifdef CONFIG_SMP
1801
1802/* assumes rq->lock is held */
1803static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1804{
1805 if (prev->sched_class->pre_schedule)
1806 prev->sched_class->pre_schedule(rq, prev);
1807}
1808
1809/* rq->lock is NOT held, but preemption is disabled */
1810static inline void post_schedule(struct rq *rq)
1811{
1812 if (rq->post_schedule) {
1813 unsigned long flags;
1814
05fa785c 1815 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
1816 if (rq->curr->sched_class->post_schedule)
1817 rq->curr->sched_class->post_schedule(rq);
05fa785c 1818 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
1819
1820 rq->post_schedule = 0;
1821 }
1822}
1823
1824#else
da19ab51 1825
3f029d3c
GH
1826static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1827{
1828}
1829
1830static inline void post_schedule(struct rq *rq)
1831{
1da177e4
LT
1832}
1833
3f029d3c
GH
1834#endif
1835
1da177e4
LT
1836/**
1837 * schedule_tail - first thing a freshly forked thread must call.
1838 * @prev: the thread we just switched away from.
1839 */
36c8b586 1840asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
1841 __releases(rq->lock)
1842{
70b97a7f
IM
1843 struct rq *rq = this_rq();
1844
4866cde0 1845 finish_task_switch(rq, prev);
da19ab51 1846
3f029d3c
GH
1847 /*
1848 * FIXME: do we need to worry about rq being invalidated by the
1849 * task_switch?
1850 */
1851 post_schedule(rq);
70b97a7f 1852
4866cde0
NP
1853#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1854 /* In this case, finish_task_switch does not reenable preemption */
1855 preempt_enable();
1856#endif
1da177e4 1857 if (current->set_child_tid)
b488893a 1858 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
1859}
1860
1861/*
1862 * context_switch - switch to the new MM and the new
1863 * thread's register state.
1864 */
dd41f596 1865static inline void
70b97a7f 1866context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 1867 struct task_struct *next)
1da177e4 1868{
dd41f596 1869 struct mm_struct *mm, *oldmm;
1da177e4 1870
e107be36 1871 prepare_task_switch(rq, prev, next);
fe4b04fa 1872
dd41f596
IM
1873 mm = next->mm;
1874 oldmm = prev->active_mm;
9226d125
ZA
1875 /*
1876 * For paravirt, this is coupled with an exit in switch_to to
1877 * combine the page table reload and the switch backend into
1878 * one hypercall.
1879 */
224101ed 1880 arch_start_context_switch(prev);
9226d125 1881
31915ab4 1882 if (!mm) {
1da177e4
LT
1883 next->active_mm = oldmm;
1884 atomic_inc(&oldmm->mm_count);
1885 enter_lazy_tlb(oldmm, next);
1886 } else
1887 switch_mm(oldmm, mm, next);
1888
31915ab4 1889 if (!prev->mm) {
1da177e4 1890 prev->active_mm = NULL;
1da177e4
LT
1891 rq->prev_mm = oldmm;
1892 }
3a5f5e48
IM
1893 /*
1894 * Since the runqueue lock will be released by the next
1895 * task (which is an invalid locking op but in the case
1896 * of the scheduler it's an obvious special-case), so we
1897 * do an early lockdep release here:
1898 */
1899#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 1900 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 1901#endif
1da177e4
LT
1902
1903 /* Here we just switch the register state and the stack. */
04e7e951 1904 rcu_switch(prev, next);
1da177e4
LT
1905 switch_to(prev, next, prev);
1906
dd41f596
IM
1907 barrier();
1908 /*
1909 * this_rq must be evaluated again because prev may have moved
1910 * CPUs since it called schedule(), thus the 'rq' on its stack
1911 * frame will be invalid.
1912 */
1913 finish_task_switch(this_rq(), prev);
1da177e4
LT
1914}
1915
1916/*
1917 * nr_running, nr_uninterruptible and nr_context_switches:
1918 *
1919 * externally visible scheduler statistics: current number of runnable
1920 * threads, current number of uninterruptible-sleeping threads, total
1921 * number of context switches performed since bootup.
1922 */
1923unsigned long nr_running(void)
1924{
1925 unsigned long i, sum = 0;
1926
1927 for_each_online_cpu(i)
1928 sum += cpu_rq(i)->nr_running;
1929
1930 return sum;
f711f609 1931}
1da177e4
LT
1932
1933unsigned long nr_uninterruptible(void)
f711f609 1934{
1da177e4 1935 unsigned long i, sum = 0;
f711f609 1936
0a945022 1937 for_each_possible_cpu(i)
1da177e4 1938 sum += cpu_rq(i)->nr_uninterruptible;
f711f609
GS
1939
1940 /*
1da177e4
LT
1941 * Since we read the counters lockless, it might be slightly
1942 * inaccurate. Do not allow it to go below zero though:
f711f609 1943 */
1da177e4
LT
1944 if (unlikely((long)sum < 0))
1945 sum = 0;
f711f609 1946
1da177e4 1947 return sum;
f711f609 1948}
f711f609 1949
1da177e4 1950unsigned long long nr_context_switches(void)
46cb4b7c 1951{
cc94abfc
SR
1952 int i;
1953 unsigned long long sum = 0;
46cb4b7c 1954
0a945022 1955 for_each_possible_cpu(i)
1da177e4 1956 sum += cpu_rq(i)->nr_switches;
46cb4b7c 1957
1da177e4
LT
1958 return sum;
1959}
483b4ee6 1960
1da177e4
LT
1961unsigned long nr_iowait(void)
1962{
1963 unsigned long i, sum = 0;
483b4ee6 1964
0a945022 1965 for_each_possible_cpu(i)
1da177e4 1966 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 1967
1da177e4
LT
1968 return sum;
1969}
483b4ee6 1970
8c215bd3 1971unsigned long nr_iowait_cpu(int cpu)
69d25870 1972{
8c215bd3 1973 struct rq *this = cpu_rq(cpu);
69d25870
AV
1974 return atomic_read(&this->nr_iowait);
1975}
46cb4b7c 1976
69d25870
AV
1977unsigned long this_cpu_load(void)
1978{
1979 struct rq *this = this_rq();
1980 return this->cpu_load[0];
1981}
e790fb0b 1982
46cb4b7c 1983
5167e8d5
PZ
1984/*
1985 * Global load-average calculations
1986 *
1987 * We take a distributed and async approach to calculating the global load-avg
1988 * in order to minimize overhead.
1989 *
1990 * The global load average is an exponentially decaying average of nr_running +
1991 * nr_uninterruptible.
1992 *
1993 * Once every LOAD_FREQ:
1994 *
1995 * nr_active = 0;
1996 * for_each_possible_cpu(cpu)
1997 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
1998 *
1999 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
2000 *
2001 * Due to a number of reasons the above turns in the mess below:
2002 *
2003 * - for_each_possible_cpu() is prohibitively expensive on machines with
2004 * serious number of cpus, therefore we need to take a distributed approach
2005 * to calculating nr_active.
2006 *
2007 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
2008 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
2009 *
2010 * So assuming nr_active := 0 when we start out -- true per definition, we
2011 * can simply take per-cpu deltas and fold those into a global accumulate
2012 * to obtain the same result. See calc_load_fold_active().
2013 *
2014 * Furthermore, in order to avoid synchronizing all per-cpu delta folding
2015 * across the machine, we assume 10 ticks is sufficient time for every
2016 * cpu to have completed this task.
2017 *
2018 * This places an upper-bound on the IRQ-off latency of the machine. Then
2019 * again, being late doesn't loose the delta, just wrecks the sample.
2020 *
2021 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
2022 * this would add another cross-cpu cacheline miss and atomic operation
2023 * to the wakeup path. Instead we increment on whatever cpu the task ran
2024 * when it went into uninterruptible state and decrement on whatever cpu
2025 * did the wakeup. This means that only the sum of nr_uninterruptible over
2026 * all cpus yields the correct result.
2027 *
2028 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
2029 */
2030
dce48a84
TG
2031/* Variables and functions for calc_load */
2032static atomic_long_t calc_load_tasks;
2033static unsigned long calc_load_update;
2034unsigned long avenrun[3];
5167e8d5
PZ
2035EXPORT_SYMBOL(avenrun); /* should be removed */
2036
2037/**
2038 * get_avenrun - get the load average array
2039 * @loads: pointer to dest load array
2040 * @offset: offset to add
2041 * @shift: shift count to shift the result left
2042 *
2043 * These values are estimates at best, so no need for locking.
2044 */
2045void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2046{
2047 loads[0] = (avenrun[0] + offset) << shift;
2048 loads[1] = (avenrun[1] + offset) << shift;
2049 loads[2] = (avenrun[2] + offset) << shift;
2050}
46cb4b7c 2051
74f5187a
PZ
2052static long calc_load_fold_active(struct rq *this_rq)
2053{
2054 long nr_active, delta = 0;
2055
2056 nr_active = this_rq->nr_running;
2057 nr_active += (long) this_rq->nr_uninterruptible;
2058
2059 if (nr_active != this_rq->calc_load_active) {
2060 delta = nr_active - this_rq->calc_load_active;
2061 this_rq->calc_load_active = nr_active;
2062 }
2063
2064 return delta;
2065}
2066
5167e8d5
PZ
2067/*
2068 * a1 = a0 * e + a * (1 - e)
2069 */
0f004f5a
PZ
2070static unsigned long
2071calc_load(unsigned long load, unsigned long exp, unsigned long active)
2072{
2073 load *= exp;
2074 load += active * (FIXED_1 - exp);
2075 load += 1UL << (FSHIFT - 1);
2076 return load >> FSHIFT;
2077}
2078
74f5187a
PZ
2079#ifdef CONFIG_NO_HZ
2080/*
5167e8d5
PZ
2081 * Handle NO_HZ for the global load-average.
2082 *
2083 * Since the above described distributed algorithm to compute the global
2084 * load-average relies on per-cpu sampling from the tick, it is affected by
2085 * NO_HZ.
2086 *
2087 * The basic idea is to fold the nr_active delta into a global idle-delta upon
2088 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
2089 * when we read the global state.
2090 *
2091 * Obviously reality has to ruin such a delightfully simple scheme:
2092 *
2093 * - When we go NO_HZ idle during the window, we can negate our sample
2094 * contribution, causing under-accounting.
2095 *
2096 * We avoid this by keeping two idle-delta counters and flipping them
2097 * when the window starts, thus separating old and new NO_HZ load.
2098 *
2099 * The only trick is the slight shift in index flip for read vs write.
2100 *
2101 * 0s 5s 10s 15s
2102 * +10 +10 +10 +10
2103 * |-|-----------|-|-----------|-|-----------|-|
2104 * r:0 0 1 1 0 0 1 1 0
2105 * w:0 1 1 0 0 1 1 0 0
2106 *
2107 * This ensures we'll fold the old idle contribution in this window while
2108 * accumlating the new one.
2109 *
2110 * - When we wake up from NO_HZ idle during the window, we push up our
2111 * contribution, since we effectively move our sample point to a known
2112 * busy state.
2113 *
2114 * This is solved by pushing the window forward, and thus skipping the
2115 * sample, for this cpu (effectively using the idle-delta for this cpu which
2116 * was in effect at the time the window opened). This also solves the issue
2117 * of having to deal with a cpu having been in NOHZ idle for multiple
2118 * LOAD_FREQ intervals.
74f5187a
PZ
2119 *
2120 * When making the ILB scale, we should try to pull this in as well.
2121 */
5167e8d5
PZ
2122static atomic_long_t calc_load_idle[2];
2123static int calc_load_idx;
74f5187a 2124
5167e8d5 2125static inline int calc_load_write_idx(void)
74f5187a 2126{
5167e8d5
PZ
2127 int idx = calc_load_idx;
2128
2129 /*
2130 * See calc_global_nohz(), if we observe the new index, we also
2131 * need to observe the new update time.
2132 */
2133 smp_rmb();
2134
2135 /*
2136 * If the folding window started, make sure we start writing in the
2137 * next idle-delta.
2138 */
2139 if (!time_before(jiffies, calc_load_update))
2140 idx++;
2141
2142 return idx & 1;
2143}
2144
2145static inline int calc_load_read_idx(void)
2146{
2147 return calc_load_idx & 1;
2148}
2149
2150void calc_load_enter_idle(void)
2151{
2152 struct rq *this_rq = this_rq();
74f5187a
PZ
2153 long delta;
2154
5167e8d5
PZ
2155 /*
2156 * We're going into NOHZ mode, if there's any pending delta, fold it
2157 * into the pending idle delta.
2158 */
74f5187a 2159 delta = calc_load_fold_active(this_rq);
5167e8d5
PZ
2160 if (delta) {
2161 int idx = calc_load_write_idx();
2162 atomic_long_add(delta, &calc_load_idle[idx]);
2163 }
74f5187a
PZ
2164}
2165
5167e8d5 2166void calc_load_exit_idle(void)
74f5187a 2167{
5167e8d5
PZ
2168 struct rq *this_rq = this_rq();
2169
2170 /*
2171 * If we're still before the sample window, we're done.
2172 */
2173 if (time_before(jiffies, this_rq->calc_load_update))
2174 return;
74f5187a
PZ
2175
2176 /*
5167e8d5
PZ
2177 * We woke inside or after the sample window, this means we're already
2178 * accounted through the nohz accounting, so skip the entire deal and
2179 * sync up for the next window.
74f5187a 2180 */
5167e8d5
PZ
2181 this_rq->calc_load_update = calc_load_update;
2182 if (time_before(jiffies, this_rq->calc_load_update + 10))
2183 this_rq->calc_load_update += LOAD_FREQ;
2184}
2185
2186static long calc_load_fold_idle(void)
2187{
2188 int idx = calc_load_read_idx();
2189 long delta = 0;
2190
2191 if (atomic_long_read(&calc_load_idle[idx]))
2192 delta = atomic_long_xchg(&calc_load_idle[idx], 0);
74f5187a
PZ
2193
2194 return delta;
2195}
0f004f5a
PZ
2196
2197/**
2198 * fixed_power_int - compute: x^n, in O(log n) time
2199 *
2200 * @x: base of the power
2201 * @frac_bits: fractional bits of @x
2202 * @n: power to raise @x to.
2203 *
2204 * By exploiting the relation between the definition of the natural power
2205 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2206 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2207 * (where: n_i \elem {0, 1}, the binary vector representing n),
2208 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2209 * of course trivially computable in O(log_2 n), the length of our binary
2210 * vector.
2211 */
2212static unsigned long
2213fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2214{
2215 unsigned long result = 1UL << frac_bits;
2216
2217 if (n) for (;;) {
2218 if (n & 1) {
2219 result *= x;
2220 result += 1UL << (frac_bits - 1);
2221 result >>= frac_bits;
2222 }
2223 n >>= 1;
2224 if (!n)
2225 break;
2226 x *= x;
2227 x += 1UL << (frac_bits - 1);
2228 x >>= frac_bits;
2229 }
2230
2231 return result;
2232}
2233
2234/*
2235 * a1 = a0 * e + a * (1 - e)
2236 *
2237 * a2 = a1 * e + a * (1 - e)
2238 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2239 * = a0 * e^2 + a * (1 - e) * (1 + e)
2240 *
2241 * a3 = a2 * e + a * (1 - e)
2242 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2243 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2244 *
2245 * ...
2246 *
2247 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2248 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2249 * = a0 * e^n + a * (1 - e^n)
2250 *
2251 * [1] application of the geometric series:
2252 *
2253 * n 1 - x^(n+1)
2254 * S_n := \Sum x^i = -------------
2255 * i=0 1 - x
2256 */
2257static unsigned long
2258calc_load_n(unsigned long load, unsigned long exp,
2259 unsigned long active, unsigned int n)
2260{
2261
2262 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2263}
2264
2265/*
2266 * NO_HZ can leave us missing all per-cpu ticks calling
2267 * calc_load_account_active(), but since an idle CPU folds its delta into
2268 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2269 * in the pending idle delta if our idle period crossed a load cycle boundary.
2270 *
2271 * Once we've updated the global active value, we need to apply the exponential
2272 * weights adjusted to the number of cycles missed.
2273 */
c308b56b 2274static void calc_global_nohz(void)
0f004f5a
PZ
2275{
2276 long delta, active, n;
2277
5167e8d5
PZ
2278 if (!time_before(jiffies, calc_load_update + 10)) {
2279 /*
2280 * Catch-up, fold however many we are behind still
2281 */
2282 delta = jiffies - calc_load_update - 10;
2283 n = 1 + (delta / LOAD_FREQ);
0f004f5a 2284
5167e8d5
PZ
2285 active = atomic_long_read(&calc_load_tasks);
2286 active = active > 0 ? active * FIXED_1 : 0;
0f004f5a 2287
5167e8d5
PZ
2288 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2289 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2290 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
0f004f5a 2291
5167e8d5
PZ
2292 calc_load_update += n * LOAD_FREQ;
2293 }
74f5187a 2294
5167e8d5
PZ
2295 /*
2296 * Flip the idle index...
2297 *
2298 * Make sure we first write the new time then flip the index, so that
2299 * calc_load_write_idx() will see the new time when it reads the new
2300 * index, this avoids a double flip messing things up.
2301 */
2302 smp_wmb();
2303 calc_load_idx++;
74f5187a 2304}
5167e8d5 2305#else /* !CONFIG_NO_HZ */
0f004f5a 2306
5167e8d5
PZ
2307static inline long calc_load_fold_idle(void) { return 0; }
2308static inline void calc_global_nohz(void) { }
74f5187a 2309
5167e8d5 2310#endif /* CONFIG_NO_HZ */
46cb4b7c 2311
46cb4b7c 2312/*
dce48a84
TG
2313 * calc_load - update the avenrun load estimates 10 ticks after the
2314 * CPUs have updated calc_load_tasks.
7835b98b 2315 */
0f004f5a 2316void calc_global_load(unsigned long ticks)
7835b98b 2317{
5167e8d5 2318 long active, delta;
1da177e4 2319
0f004f5a 2320 if (time_before(jiffies, calc_load_update + 10))
dce48a84 2321 return;
1da177e4 2322
5167e8d5
PZ
2323 /*
2324 * Fold the 'old' idle-delta to include all NO_HZ cpus.
2325 */
2326 delta = calc_load_fold_idle();
2327 if (delta)
2328 atomic_long_add(delta, &calc_load_tasks);
2329
dce48a84
TG
2330 active = atomic_long_read(&calc_load_tasks);
2331 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 2332
dce48a84
TG
2333 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2334 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2335 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 2336
dce48a84 2337 calc_load_update += LOAD_FREQ;
c308b56b
PZ
2338
2339 /*
5167e8d5 2340 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
c308b56b
PZ
2341 */
2342 calc_global_nohz();
dce48a84 2343}
1da177e4 2344
dce48a84 2345/*
74f5187a
PZ
2346 * Called from update_cpu_load() to periodically update this CPU's
2347 * active count.
dce48a84
TG
2348 */
2349static void calc_load_account_active(struct rq *this_rq)
2350{
74f5187a 2351 long delta;
08c183f3 2352
74f5187a
PZ
2353 if (time_before(jiffies, this_rq->calc_load_update))
2354 return;
783609c6 2355
74f5187a 2356 delta = calc_load_fold_active(this_rq);
74f5187a 2357 if (delta)
dce48a84 2358 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
2359
2360 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
2361}
2362
5167e8d5
PZ
2363/*
2364 * End of global load-average stuff
2365 */
2366
fdf3e95d
VP
2367/*
2368 * The exact cpuload at various idx values, calculated at every tick would be
2369 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2370 *
2371 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2372 * on nth tick when cpu may be busy, then we have:
2373 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2374 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2375 *
2376 * decay_load_missed() below does efficient calculation of
2377 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2378 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2379 *
2380 * The calculation is approximated on a 128 point scale.
2381 * degrade_zero_ticks is the number of ticks after which load at any
2382 * particular idx is approximated to be zero.
2383 * degrade_factor is a precomputed table, a row for each load idx.
2384 * Each column corresponds to degradation factor for a power of two ticks,
2385 * based on 128 point scale.
2386 * Example:
2387 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2388 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2389 *
2390 * With this power of 2 load factors, we can degrade the load n times
2391 * by looking at 1 bits in n and doing as many mult/shift instead of
2392 * n mult/shifts needed by the exact degradation.
2393 */
2394#define DEGRADE_SHIFT 7
2395static const unsigned char
2396 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2397static const unsigned char
2398 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2399 {0, 0, 0, 0, 0, 0, 0, 0},
2400 {64, 32, 8, 0, 0, 0, 0, 0},
2401 {96, 72, 40, 12, 1, 0, 0},
2402 {112, 98, 75, 43, 15, 1, 0},
2403 {120, 112, 98, 76, 45, 16, 2} };
2404
2405/*
2406 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2407 * would be when CPU is idle and so we just decay the old load without
2408 * adding any new load.
2409 */
2410static unsigned long
2411decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2412{
2413 int j = 0;
2414
2415 if (!missed_updates)
2416 return load;
2417
2418 if (missed_updates >= degrade_zero_ticks[idx])
2419 return 0;
2420
2421 if (idx == 1)
2422 return load >> missed_updates;
2423
2424 while (missed_updates) {
2425 if (missed_updates % 2)
2426 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2427
2428 missed_updates >>= 1;
2429 j++;
2430 }
2431 return load;
2432}
2433
46cb4b7c 2434/*
dd41f596 2435 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
2436 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2437 * every tick. We fix it up based on jiffies.
46cb4b7c 2438 */
556061b0
PZ
2439static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2440 unsigned long pending_updates)
46cb4b7c 2441{
dd41f596 2442 int i, scale;
46cb4b7c 2443
dd41f596 2444 this_rq->nr_load_updates++;
46cb4b7c 2445
dd41f596 2446 /* Update our load: */
fdf3e95d
VP
2447 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2448 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 2449 unsigned long old_load, new_load;
7d1e6a9b 2450
dd41f596 2451 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 2452
dd41f596 2453 old_load = this_rq->cpu_load[i];
fdf3e95d 2454 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 2455 new_load = this_load;
a25707f3
IM
2456 /*
2457 * Round up the averaging division if load is increasing. This
2458 * prevents us from getting stuck on 9 if the load is 10, for
2459 * example.
2460 */
2461 if (new_load > old_load)
fdf3e95d
VP
2462 new_load += scale - 1;
2463
2464 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 2465 }
da2b71ed
SS
2466
2467 sched_avg_update(this_rq);
fdf3e95d
VP
2468}
2469
5aaa0b7a
PZ
2470#ifdef CONFIG_NO_HZ
2471/*
2472 * There is no sane way to deal with nohz on smp when using jiffies because the
2473 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
2474 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
2475 *
2476 * Therefore we cannot use the delta approach from the regular tick since that
2477 * would seriously skew the load calculation. However we'll make do for those
2478 * updates happening while idle (nohz_idle_balance) or coming out of idle
2479 * (tick_nohz_idle_exit).
2480 *
2481 * This means we might still be one tick off for nohz periods.
2482 */
2483
556061b0
PZ
2484/*
2485 * Called from nohz_idle_balance() to update the load ratings before doing the
2486 * idle balance.
2487 */
2488void update_idle_cpu_load(struct rq *this_rq)
2489{
5aaa0b7a 2490 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
556061b0
PZ
2491 unsigned long load = this_rq->load.weight;
2492 unsigned long pending_updates;
2493
2494 /*
5aaa0b7a 2495 * bail if there's load or we're actually up-to-date.
556061b0
PZ
2496 */
2497 if (load || curr_jiffies == this_rq->last_load_update_tick)
2498 return;
2499
2500 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2501 this_rq->last_load_update_tick = curr_jiffies;
2502
2503 __update_cpu_load(this_rq, load, pending_updates);
2504}
2505
5aaa0b7a
PZ
2506/*
2507 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
2508 */
2509void update_cpu_load_nohz(void)
2510{
2511 struct rq *this_rq = this_rq();
2512 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2513 unsigned long pending_updates;
2514
2515 if (curr_jiffies == this_rq->last_load_update_tick)
2516 return;
2517
2518 raw_spin_lock(&this_rq->lock);
2519 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2520 if (pending_updates) {
2521 this_rq->last_load_update_tick = curr_jiffies;
2522 /*
2523 * We were idle, this means load 0, the current load might be
2524 * !0 due to remote wakeups and the sort.
2525 */
2526 __update_cpu_load(this_rq, 0, pending_updates);
2527 }
2528 raw_spin_unlock(&this_rq->lock);
2529}
2530#endif /* CONFIG_NO_HZ */
2531
556061b0
PZ
2532/*
2533 * Called from scheduler_tick()
2534 */
fdf3e95d
VP
2535static void update_cpu_load_active(struct rq *this_rq)
2536{
556061b0 2537 /*
5aaa0b7a 2538 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
556061b0
PZ
2539 */
2540 this_rq->last_load_update_tick = jiffies;
2541 __update_cpu_load(this_rq, this_rq->load.weight, 1);
46cb4b7c 2542
74f5187a 2543 calc_load_account_active(this_rq);
46cb4b7c
SS
2544}
2545
dd41f596 2546#ifdef CONFIG_SMP
8a0be9ef 2547
46cb4b7c 2548/*
38022906
PZ
2549 * sched_exec - execve() is a valuable balancing opportunity, because at
2550 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2551 */
38022906 2552void sched_exec(void)
46cb4b7c 2553{
38022906 2554 struct task_struct *p = current;
1da177e4 2555 unsigned long flags;
0017d735 2556 int dest_cpu;
46cb4b7c 2557
8f42ced9 2558 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 2559 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
2560 if (dest_cpu == smp_processor_id())
2561 goto unlock;
38022906 2562
8f42ced9 2563 if (likely(cpu_active(dest_cpu))) {
969c7921 2564 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2565
8f42ced9
PZ
2566 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2567 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2568 return;
2569 }
0017d735 2570unlock:
8f42ced9 2571 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2572}
dd41f596 2573
1da177e4
LT
2574#endif
2575
1da177e4 2576DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2577DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2578
2579EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2580EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4
LT
2581
2582/*
c5f8d995 2583 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 2584 * @p in case that task is currently running.
c5f8d995
HS
2585 *
2586 * Called with task_rq_lock() held on @rq.
1da177e4 2587 */
c5f8d995
HS
2588static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2589{
2590 u64 ns = 0;
2591
2592 if (task_current(rq, p)) {
2593 update_rq_clock(rq);
305e6835 2594 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
2595 if ((s64)ns < 0)
2596 ns = 0;
2597 }
2598
2599 return ns;
2600}
2601
bb34d92f 2602unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 2603{
1da177e4 2604 unsigned long flags;
41b86e9c 2605 struct rq *rq;
bb34d92f 2606 u64 ns = 0;
48f24c4d 2607
41b86e9c 2608 rq = task_rq_lock(p, &flags);
c5f8d995 2609 ns = do_task_delta_exec(p, rq);
0122ec5b 2610 task_rq_unlock(rq, p, &flags);
1508487e 2611
c5f8d995
HS
2612 return ns;
2613}
f06febc9 2614
c5f8d995
HS
2615/*
2616 * Return accounted runtime for the task.
2617 * In case the task is currently running, return the runtime plus current's
2618 * pending runtime that have not been accounted yet.
2619 */
2620unsigned long long task_sched_runtime(struct task_struct *p)
2621{
2622 unsigned long flags;
2623 struct rq *rq;
2624 u64 ns = 0;
2625
2626 rq = task_rq_lock(p, &flags);
2627 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 2628 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2629
2630 return ns;
2631}
48f24c4d 2632
7835b98b
CL
2633/*
2634 * This function gets called by the timer code, with HZ frequency.
2635 * We call it with interrupts disabled.
7835b98b
CL
2636 */
2637void scheduler_tick(void)
2638{
7835b98b
CL
2639 int cpu = smp_processor_id();
2640 struct rq *rq = cpu_rq(cpu);
dd41f596 2641 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2642
2643 sched_clock_tick();
dd41f596 2644
05fa785c 2645 raw_spin_lock(&rq->lock);
3e51f33f 2646 update_rq_clock(rq);
fdf3e95d 2647 update_cpu_load_active(rq);
fa85ae24 2648 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 2649 raw_spin_unlock(&rq->lock);
7835b98b 2650
e9d2b064 2651 perf_event_task_tick();
e220d2dc 2652
e418e1c2 2653#ifdef CONFIG_SMP
6eb57e0d 2654 rq->idle_balance = idle_cpu(cpu);
dd41f596 2655 trigger_load_balance(rq, cpu);
e418e1c2 2656#endif
1da177e4
LT
2657}
2658
132380a0 2659notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2660{
2661 if (in_lock_functions(addr)) {
2662 addr = CALLER_ADDR2;
2663 if (in_lock_functions(addr))
2664 addr = CALLER_ADDR3;
2665 }
2666 return addr;
2667}
1da177e4 2668
7e49fcce
SR
2669#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2670 defined(CONFIG_PREEMPT_TRACER))
2671
43627582 2672void __kprobes add_preempt_count(int val)
1da177e4 2673{
6cd8a4bb 2674#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2675 /*
2676 * Underflow?
2677 */
9a11b49a
IM
2678 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2679 return;
6cd8a4bb 2680#endif
1da177e4 2681 preempt_count() += val;
6cd8a4bb 2682#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2683 /*
2684 * Spinlock count overflowing soon?
2685 */
33859f7f
MOS
2686 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2687 PREEMPT_MASK - 10);
6cd8a4bb
SR
2688#endif
2689 if (preempt_count() == val)
2690 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
2691}
2692EXPORT_SYMBOL(add_preempt_count);
2693
43627582 2694void __kprobes sub_preempt_count(int val)
1da177e4 2695{
6cd8a4bb 2696#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2697 /*
2698 * Underflow?
2699 */
01e3eb82 2700 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2701 return;
1da177e4
LT
2702 /*
2703 * Is the spinlock portion underflowing?
2704 */
9a11b49a
IM
2705 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2706 !(preempt_count() & PREEMPT_MASK)))
2707 return;
6cd8a4bb 2708#endif
9a11b49a 2709
6cd8a4bb
SR
2710 if (preempt_count() == val)
2711 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
2712 preempt_count() -= val;
2713}
2714EXPORT_SYMBOL(sub_preempt_count);
2715
2716#endif
2717
2718/*
dd41f596 2719 * Print scheduling while atomic bug:
1da177e4 2720 */
dd41f596 2721static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 2722{
664dfa65
DJ
2723 if (oops_in_progress)
2724 return;
2725
3df0fc5b
PZ
2726 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2727 prev->comm, prev->pid, preempt_count());
838225b4 2728
dd41f596 2729 debug_show_held_locks(prev);
e21f5b15 2730 print_modules();
dd41f596
IM
2731 if (irqs_disabled())
2732 print_irqtrace_events(prev);
6135fc1e 2733 dump_stack();
1c2927f1 2734 add_taint(TAINT_WARN);
dd41f596 2735}
1da177e4 2736
dd41f596
IM
2737/*
2738 * Various schedule()-time debugging checks and statistics:
2739 */
2740static inline void schedule_debug(struct task_struct *prev)
2741{
1da177e4 2742 /*
41a2d6cf 2743 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
2744 * schedule() atomically, we ignore that path for now.
2745 * Otherwise, whine if we are scheduling when we should not be.
2746 */
3f33a7ce 2747 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596 2748 __schedule_bug(prev);
b3fbab05 2749 rcu_sleep_check();
dd41f596 2750
1da177e4
LT
2751 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2752
2d72376b 2753 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
2754}
2755
6cecd084 2756static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 2757{
61eadef6 2758 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 2759 update_rq_clock(rq);
6cecd084 2760 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
2761}
2762
dd41f596
IM
2763/*
2764 * Pick up the highest-prio task:
2765 */
2766static inline struct task_struct *
b67802ea 2767pick_next_task(struct rq *rq)
dd41f596 2768{
5522d5d5 2769 const struct sched_class *class;
dd41f596 2770 struct task_struct *p;
1da177e4
LT
2771
2772 /*
dd41f596
IM
2773 * Optimization: we know that if all tasks are in
2774 * the fair class we can call that function directly:
1da177e4 2775 */
953bfcd1 2776 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
fb8d4724 2777 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
2778 if (likely(p))
2779 return p;
1da177e4
LT
2780 }
2781
34f971f6 2782 for_each_class(class) {
fb8d4724 2783 p = class->pick_next_task(rq);
dd41f596
IM
2784 if (p)
2785 return p;
dd41f596 2786 }
34f971f6
PZ
2787
2788 BUG(); /* the idle class will always have a runnable task */
dd41f596 2789}
1da177e4 2790
dd41f596 2791/*
c259e01a 2792 * __schedule() is the main scheduler function.
edde96ea
PE
2793 *
2794 * The main means of driving the scheduler and thus entering this function are:
2795 *
2796 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2797 *
2798 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2799 * paths. For example, see arch/x86/entry_64.S.
2800 *
2801 * To drive preemption between tasks, the scheduler sets the flag in timer
2802 * interrupt handler scheduler_tick().
2803 *
2804 * 3. Wakeups don't really cause entry into schedule(). They add a
2805 * task to the run-queue and that's it.
2806 *
2807 * Now, if the new task added to the run-queue preempts the current
2808 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2809 * called on the nearest possible occasion:
2810 *
2811 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
2812 *
2813 * - in syscall or exception context, at the next outmost
2814 * preempt_enable(). (this might be as soon as the wake_up()'s
2815 * spin_unlock()!)
2816 *
2817 * - in IRQ context, return from interrupt-handler to
2818 * preemptible context
2819 *
2820 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2821 * then at the next:
2822 *
2823 * - cond_resched() call
2824 * - explicit schedule() call
2825 * - return from syscall or exception to user-space
2826 * - return from interrupt-handler to user-space
dd41f596 2827 */
c259e01a 2828static void __sched __schedule(void)
dd41f596
IM
2829{
2830 struct task_struct *prev, *next;
67ca7bde 2831 unsigned long *switch_count;
dd41f596 2832 struct rq *rq;
31656519 2833 int cpu;
dd41f596 2834
ff743345
PZ
2835need_resched:
2836 preempt_disable();
dd41f596
IM
2837 cpu = smp_processor_id();
2838 rq = cpu_rq(cpu);
25502a6c 2839 rcu_note_context_switch(cpu);
dd41f596 2840 prev = rq->curr;
dd41f596 2841
dd41f596 2842 schedule_debug(prev);
1da177e4 2843
31656519 2844 if (sched_feat(HRTICK))
f333fdc9 2845 hrtick_clear(rq);
8f4d37ec 2846
05fa785c 2847 raw_spin_lock_irq(&rq->lock);
1da177e4 2848
246d86b5 2849 switch_count = &prev->nivcsw;
1da177e4 2850 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 2851 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 2852 prev->state = TASK_RUNNING;
21aa9af0 2853 } else {
2acca55e
PZ
2854 deactivate_task(rq, prev, DEQUEUE_SLEEP);
2855 prev->on_rq = 0;
2856
21aa9af0 2857 /*
2acca55e
PZ
2858 * If a worker went to sleep, notify and ask workqueue
2859 * whether it wants to wake up a task to maintain
2860 * concurrency.
21aa9af0
TH
2861 */
2862 if (prev->flags & PF_WQ_WORKER) {
2863 struct task_struct *to_wakeup;
2864
2865 to_wakeup = wq_worker_sleeping(prev, cpu);
2866 if (to_wakeup)
2867 try_to_wake_up_local(to_wakeup);
2868 }
21aa9af0 2869 }
dd41f596 2870 switch_count = &prev->nvcsw;
1da177e4
LT
2871 }
2872
3f029d3c 2873 pre_schedule(rq, prev);
f65eda4f 2874
dd41f596 2875 if (unlikely(!rq->nr_running))
1da177e4 2876 idle_balance(cpu, rq);
1da177e4 2877
df1c99d4 2878 put_prev_task(rq, prev);
b67802ea 2879 next = pick_next_task(rq);
f26f9aff
MG
2880 clear_tsk_need_resched(prev);
2881 rq->skip_clock_update = 0;
1da177e4 2882
1da177e4 2883 if (likely(prev != next)) {
1da177e4
LT
2884 rq->nr_switches++;
2885 rq->curr = next;
2886 ++*switch_count;
2887
dd41f596 2888 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 2889 /*
246d86b5
ON
2890 * The context switch have flipped the stack from under us
2891 * and restored the local variables which were saved when
2892 * this task called schedule() in the past. prev == current
2893 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
2894 */
2895 cpu = smp_processor_id();
2896 rq = cpu_rq(cpu);
1da177e4 2897 } else
05fa785c 2898 raw_spin_unlock_irq(&rq->lock);
1da177e4 2899
3f029d3c 2900 post_schedule(rq);
1da177e4 2901
ba74c144 2902 sched_preempt_enable_no_resched();
ff743345 2903 if (need_resched())
1da177e4
LT
2904 goto need_resched;
2905}
c259e01a 2906
9c40cef2
TG
2907static inline void sched_submit_work(struct task_struct *tsk)
2908{
3c7d5184 2909 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
2910 return;
2911 /*
2912 * If we are going to sleep and we have plugged IO queued,
2913 * make sure to submit it to avoid deadlocks.
2914 */
2915 if (blk_needs_flush_plug(tsk))
2916 blk_schedule_flush_plug(tsk);
2917}
2918
6ebbe7a0 2919asmlinkage void __sched schedule(void)
c259e01a 2920{
9c40cef2
TG
2921 struct task_struct *tsk = current;
2922
2923 sched_submit_work(tsk);
c259e01a
TG
2924 __schedule();
2925}
1da177e4
LT
2926EXPORT_SYMBOL(schedule);
2927
20ab65e3
FW
2928#ifdef CONFIG_RCU_USER_QS
2929asmlinkage void __sched schedule_user(void)
2930{
2931 /*
2932 * If we come here after a random call to set_need_resched(),
2933 * or we have been woken up remotely but the IPI has not yet arrived,
2934 * we haven't yet exited the RCU idle mode. Do it here manually until
2935 * we find a better solution.
2936 */
2937 rcu_user_exit();
2938 schedule();
2939 rcu_user_enter();
2940}
2941#endif
2942
c5491ea7
TG
2943/**
2944 * schedule_preempt_disabled - called with preemption disabled
2945 *
2946 * Returns with preemption disabled. Note: preempt_count must be 1
2947 */
2948void __sched schedule_preempt_disabled(void)
2949{
ba74c144 2950 sched_preempt_enable_no_resched();
c5491ea7
TG
2951 schedule();
2952 preempt_disable();
2953}
2954
c08f7829 2955#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d 2956
c6eb3dda
PZ
2957static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
2958{
c6eb3dda 2959 if (lock->owner != owner)
307bf980 2960 return false;
0d66bf6d
PZ
2961
2962 /*
c6eb3dda
PZ
2963 * Ensure we emit the owner->on_cpu, dereference _after_ checking
2964 * lock->owner still matches owner, if that fails, owner might
2965 * point to free()d memory, if it still matches, the rcu_read_lock()
2966 * ensures the memory stays valid.
0d66bf6d 2967 */
c6eb3dda 2968 barrier();
0d66bf6d 2969
307bf980 2970 return owner->on_cpu;
c6eb3dda 2971}
0d66bf6d 2972
c6eb3dda
PZ
2973/*
2974 * Look out! "owner" is an entirely speculative pointer
2975 * access and not reliable.
2976 */
2977int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
2978{
2979 if (!sched_feat(OWNER_SPIN))
2980 return 0;
0d66bf6d 2981
307bf980 2982 rcu_read_lock();
c6eb3dda
PZ
2983 while (owner_running(lock, owner)) {
2984 if (need_resched())
307bf980 2985 break;
0d66bf6d 2986
335d7afb 2987 arch_mutex_cpu_relax();
0d66bf6d 2988 }
307bf980 2989 rcu_read_unlock();
4b402210 2990
c6eb3dda 2991 /*
307bf980
TG
2992 * We break out the loop above on need_resched() and when the
2993 * owner changed, which is a sign for heavy contention. Return
2994 * success only when lock->owner is NULL.
c6eb3dda 2995 */
307bf980 2996 return lock->owner == NULL;
0d66bf6d
PZ
2997}
2998#endif
2999
1da177e4
LT
3000#ifdef CONFIG_PREEMPT
3001/*
2ed6e34f 3002 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3003 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3004 * occur there and call schedule directly.
3005 */
d1f74e20 3006asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
3007{
3008 struct thread_info *ti = current_thread_info();
6478d880 3009
1da177e4
LT
3010 /*
3011 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3012 * we do not want to preempt the current task. Just return..
1da177e4 3013 */
beed33a8 3014 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
3015 return;
3016
3a5c359a 3017 do {
d1f74e20 3018 add_preempt_count_notrace(PREEMPT_ACTIVE);
c259e01a 3019 __schedule();
d1f74e20 3020 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 3021
3a5c359a
AK
3022 /*
3023 * Check again in case we missed a preemption opportunity
3024 * between schedule and now.
3025 */
3026 barrier();
5ed0cec0 3027 } while (need_resched());
1da177e4 3028}
1da177e4
LT
3029EXPORT_SYMBOL(preempt_schedule);
3030
3031/*
2ed6e34f 3032 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3033 * off of irq context.
3034 * Note, that this is called and return with irqs disabled. This will
3035 * protect us against recursive calling from irq.
3036 */
3037asmlinkage void __sched preempt_schedule_irq(void)
3038{
3039 struct thread_info *ti = current_thread_info();
6478d880 3040
2ed6e34f 3041 /* Catch callers which need to be fixed */
1da177e4
LT
3042 BUG_ON(ti->preempt_count || !irqs_disabled());
3043
90a340ed 3044 rcu_user_exit();
3a5c359a
AK
3045 do {
3046 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 3047 local_irq_enable();
c259e01a 3048 __schedule();
3a5c359a 3049 local_irq_disable();
3a5c359a 3050 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 3051
3a5c359a
AK
3052 /*
3053 * Check again in case we missed a preemption opportunity
3054 * between schedule and now.
3055 */
3056 barrier();
5ed0cec0 3057 } while (need_resched());
1da177e4
LT
3058}
3059
3060#endif /* CONFIG_PREEMPT */
3061
63859d4f 3062int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3063 void *key)
1da177e4 3064{
63859d4f 3065 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3066}
1da177e4
LT
3067EXPORT_SYMBOL(default_wake_function);
3068
3069/*
41a2d6cf
IM
3070 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3071 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
3072 * number) then we wake all the non-exclusive tasks and one exclusive task.
3073 *
3074 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 3075 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
3076 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3077 */
78ddb08f 3078static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 3079 int nr_exclusive, int wake_flags, void *key)
1da177e4 3080{
2e45874c 3081 wait_queue_t *curr, *next;
1da177e4 3082
2e45874c 3083 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
3084 unsigned flags = curr->flags;
3085
63859d4f 3086 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 3087 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
3088 break;
3089 }
3090}
3091
3092/**
3093 * __wake_up - wake up threads blocked on a waitqueue.
3094 * @q: the waitqueue
3095 * @mode: which threads
3096 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 3097 * @key: is directly passed to the wakeup function
50fa610a
DH
3098 *
3099 * It may be assumed that this function implies a write memory barrier before
3100 * changing the task state if and only if any tasks are woken up.
1da177e4 3101 */
7ad5b3a5 3102void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 3103 int nr_exclusive, void *key)
1da177e4
LT
3104{
3105 unsigned long flags;
3106
3107 spin_lock_irqsave(&q->lock, flags);
3108 __wake_up_common(q, mode, nr_exclusive, 0, key);
3109 spin_unlock_irqrestore(&q->lock, flags);
3110}
1da177e4
LT
3111EXPORT_SYMBOL(__wake_up);
3112
3113/*
3114 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3115 */
63b20011 3116void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
1da177e4 3117{
63b20011 3118 __wake_up_common(q, mode, nr, 0, NULL);
1da177e4 3119}
22c43c81 3120EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 3121
4ede816a
DL
3122void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3123{
3124 __wake_up_common(q, mode, 1, 0, key);
3125}
bf294b41 3126EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 3127
1da177e4 3128/**
4ede816a 3129 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
3130 * @q: the waitqueue
3131 * @mode: which threads
3132 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 3133 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
3134 *
3135 * The sync wakeup differs that the waker knows that it will schedule
3136 * away soon, so while the target thread will be woken up, it will not
3137 * be migrated to another CPU - ie. the two threads are 'synchronized'
3138 * with each other. This can prevent needless bouncing between CPUs.
3139 *
3140 * On UP it can prevent extra preemption.
50fa610a
DH
3141 *
3142 * It may be assumed that this function implies a write memory barrier before
3143 * changing the task state if and only if any tasks are woken up.
1da177e4 3144 */
4ede816a
DL
3145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3146 int nr_exclusive, void *key)
1da177e4
LT
3147{
3148 unsigned long flags;
7d478721 3149 int wake_flags = WF_SYNC;
1da177e4
LT
3150
3151 if (unlikely(!q))
3152 return;
3153
3154 if (unlikely(!nr_exclusive))
7d478721 3155 wake_flags = 0;
1da177e4
LT
3156
3157 spin_lock_irqsave(&q->lock, flags);
7d478721 3158 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
3159 spin_unlock_irqrestore(&q->lock, flags);
3160}
4ede816a
DL
3161EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3162
3163/*
3164 * __wake_up_sync - see __wake_up_sync_key()
3165 */
3166void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3167{
3168 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3169}
1da177e4
LT
3170EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3171
65eb3dc6
KD
3172/**
3173 * complete: - signals a single thread waiting on this completion
3174 * @x: holds the state of this particular completion
3175 *
3176 * This will wake up a single thread waiting on this completion. Threads will be
3177 * awakened in the same order in which they were queued.
3178 *
3179 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
3180 *
3181 * It may be assumed that this function implies a write memory barrier before
3182 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3183 */
b15136e9 3184void complete(struct completion *x)
1da177e4
LT
3185{
3186 unsigned long flags;
3187
3188 spin_lock_irqsave(&x->wait.lock, flags);
3189 x->done++;
d9514f6c 3190 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
3191 spin_unlock_irqrestore(&x->wait.lock, flags);
3192}
3193EXPORT_SYMBOL(complete);
3194
65eb3dc6
KD
3195/**
3196 * complete_all: - signals all threads waiting on this completion
3197 * @x: holds the state of this particular completion
3198 *
3199 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
3200 *
3201 * It may be assumed that this function implies a write memory barrier before
3202 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3203 */
b15136e9 3204void complete_all(struct completion *x)
1da177e4
LT
3205{
3206 unsigned long flags;
3207
3208 spin_lock_irqsave(&x->wait.lock, flags);
3209 x->done += UINT_MAX/2;
d9514f6c 3210 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
3211 spin_unlock_irqrestore(&x->wait.lock, flags);
3212}
3213EXPORT_SYMBOL(complete_all);
3214
8cbbe86d
AK
3215static inline long __sched
3216do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3217{
1da177e4
LT
3218 if (!x->done) {
3219 DECLARE_WAITQUEUE(wait, current);
3220
a93d2f17 3221 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 3222 do {
94d3d824 3223 if (signal_pending_state(state, current)) {
ea71a546
ON
3224 timeout = -ERESTARTSYS;
3225 break;
8cbbe86d
AK
3226 }
3227 __set_current_state(state);
1da177e4
LT
3228 spin_unlock_irq(&x->wait.lock);
3229 timeout = schedule_timeout(timeout);
3230 spin_lock_irq(&x->wait.lock);
ea71a546 3231 } while (!x->done && timeout);
1da177e4 3232 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
3233 if (!x->done)
3234 return timeout;
1da177e4
LT
3235 }
3236 x->done--;
ea71a546 3237 return timeout ?: 1;
1da177e4 3238}
1da177e4 3239
8cbbe86d
AK
3240static long __sched
3241wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3242{
1da177e4
LT
3243 might_sleep();
3244
3245 spin_lock_irq(&x->wait.lock);
8cbbe86d 3246 timeout = do_wait_for_common(x, timeout, state);
1da177e4 3247 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
3248 return timeout;
3249}
1da177e4 3250
65eb3dc6
KD
3251/**
3252 * wait_for_completion: - waits for completion of a task
3253 * @x: holds the state of this particular completion
3254 *
3255 * This waits to be signaled for completion of a specific task. It is NOT
3256 * interruptible and there is no timeout.
3257 *
3258 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3259 * and interrupt capability. Also see complete().
3260 */
b15136e9 3261void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
3262{
3263 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 3264}
8cbbe86d 3265EXPORT_SYMBOL(wait_for_completion);
1da177e4 3266
65eb3dc6
KD
3267/**
3268 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3269 * @x: holds the state of this particular completion
3270 * @timeout: timeout value in jiffies
3271 *
3272 * This waits for either a completion of a specific task to be signaled or for a
3273 * specified timeout to expire. The timeout is in jiffies. It is not
3274 * interruptible.
c6dc7f05
BF
3275 *
3276 * The return value is 0 if timed out, and positive (at least 1, or number of
3277 * jiffies left till timeout) if completed.
65eb3dc6 3278 */
b15136e9 3279unsigned long __sched
8cbbe86d 3280wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 3281{
8cbbe86d 3282 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 3283}
8cbbe86d 3284EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 3285
65eb3dc6
KD
3286/**
3287 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3288 * @x: holds the state of this particular completion
3289 *
3290 * This waits for completion of a specific task to be signaled. It is
3291 * interruptible.
c6dc7f05
BF
3292 *
3293 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3294 */
8cbbe86d 3295int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 3296{
51e97990
AK
3297 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3298 if (t == -ERESTARTSYS)
3299 return t;
3300 return 0;
0fec171c 3301}
8cbbe86d 3302EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 3303
65eb3dc6
KD
3304/**
3305 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3306 * @x: holds the state of this particular completion
3307 * @timeout: timeout value in jiffies
3308 *
3309 * This waits for either a completion of a specific task to be signaled or for a
3310 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
c6dc7f05
BF
3311 *
3312 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3313 * positive (at least 1, or number of jiffies left till timeout) if completed.
65eb3dc6 3314 */
6bf41237 3315long __sched
8cbbe86d
AK
3316wait_for_completion_interruptible_timeout(struct completion *x,
3317 unsigned long timeout)
0fec171c 3318{
8cbbe86d 3319 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 3320}
8cbbe86d 3321EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 3322
65eb3dc6
KD
3323/**
3324 * wait_for_completion_killable: - waits for completion of a task (killable)
3325 * @x: holds the state of this particular completion
3326 *
3327 * This waits to be signaled for completion of a specific task. It can be
3328 * interrupted by a kill signal.
c6dc7f05
BF
3329 *
3330 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3331 */
009e577e
MW
3332int __sched wait_for_completion_killable(struct completion *x)
3333{
3334 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3335 if (t == -ERESTARTSYS)
3336 return t;
3337 return 0;
3338}
3339EXPORT_SYMBOL(wait_for_completion_killable);
3340
0aa12fb4
SW
3341/**
3342 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3343 * @x: holds the state of this particular completion
3344 * @timeout: timeout value in jiffies
3345 *
3346 * This waits for either a completion of a specific task to be
3347 * signaled or for a specified timeout to expire. It can be
3348 * interrupted by a kill signal. The timeout is in jiffies.
c6dc7f05
BF
3349 *
3350 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3351 * positive (at least 1, or number of jiffies left till timeout) if completed.
0aa12fb4 3352 */
6bf41237 3353long __sched
0aa12fb4
SW
3354wait_for_completion_killable_timeout(struct completion *x,
3355 unsigned long timeout)
3356{
3357 return wait_for_common(x, timeout, TASK_KILLABLE);
3358}
3359EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3360
be4de352
DC
3361/**
3362 * try_wait_for_completion - try to decrement a completion without blocking
3363 * @x: completion structure
3364 *
3365 * Returns: 0 if a decrement cannot be done without blocking
3366 * 1 if a decrement succeeded.
3367 *
3368 * If a completion is being used as a counting completion,
3369 * attempt to decrement the counter without blocking. This
3370 * enables us to avoid waiting if the resource the completion
3371 * is protecting is not available.
3372 */
3373bool try_wait_for_completion(struct completion *x)
3374{
7539a3b3 3375 unsigned long flags;
be4de352
DC
3376 int ret = 1;
3377
7539a3b3 3378 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3379 if (!x->done)
3380 ret = 0;
3381 else
3382 x->done--;
7539a3b3 3383 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3384 return ret;
3385}
3386EXPORT_SYMBOL(try_wait_for_completion);
3387
3388/**
3389 * completion_done - Test to see if a completion has any waiters
3390 * @x: completion structure
3391 *
3392 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3393 * 1 if there are no waiters.
3394 *
3395 */
3396bool completion_done(struct completion *x)
3397{
7539a3b3 3398 unsigned long flags;
be4de352
DC
3399 int ret = 1;
3400
7539a3b3 3401 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3402 if (!x->done)
3403 ret = 0;
7539a3b3 3404 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3405 return ret;
3406}
3407EXPORT_SYMBOL(completion_done);
3408
8cbbe86d
AK
3409static long __sched
3410sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 3411{
0fec171c
IM
3412 unsigned long flags;
3413 wait_queue_t wait;
3414
3415 init_waitqueue_entry(&wait, current);
1da177e4 3416
8cbbe86d 3417 __set_current_state(state);
1da177e4 3418
8cbbe86d
AK
3419 spin_lock_irqsave(&q->lock, flags);
3420 __add_wait_queue(q, &wait);
3421 spin_unlock(&q->lock);
3422 timeout = schedule_timeout(timeout);
3423 spin_lock_irq(&q->lock);
3424 __remove_wait_queue(q, &wait);
3425 spin_unlock_irqrestore(&q->lock, flags);
3426
3427 return timeout;
3428}
3429
3430void __sched interruptible_sleep_on(wait_queue_head_t *q)
3431{
3432 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3433}
1da177e4
LT
3434EXPORT_SYMBOL(interruptible_sleep_on);
3435
0fec171c 3436long __sched
95cdf3b7 3437interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3438{
8cbbe86d 3439 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 3440}
1da177e4
LT
3441EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3442
0fec171c 3443void __sched sleep_on(wait_queue_head_t *q)
1da177e4 3444{
8cbbe86d 3445 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3446}
1da177e4
LT
3447EXPORT_SYMBOL(sleep_on);
3448
0fec171c 3449long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3450{
8cbbe86d 3451 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 3452}
1da177e4
LT
3453EXPORT_SYMBOL(sleep_on_timeout);
3454
b29739f9
IM
3455#ifdef CONFIG_RT_MUTEXES
3456
3457/*
3458 * rt_mutex_setprio - set the current priority of a task
3459 * @p: task
3460 * @prio: prio value (kernel-internal form)
3461 *
3462 * This function changes the 'effective' priority of a task. It does
3463 * not touch ->normal_prio like __setscheduler().
3464 *
3465 * Used by the rt_mutex code to implement priority inheritance logic.
3466 */
36c8b586 3467void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3468{
83b699ed 3469 int oldprio, on_rq, running;
70b97a7f 3470 struct rq *rq;
83ab0aa0 3471 const struct sched_class *prev_class;
b29739f9
IM
3472
3473 BUG_ON(prio < 0 || prio > MAX_PRIO);
3474
0122ec5b 3475 rq = __task_rq_lock(p);
b29739f9 3476
1c4dd99b
TG
3477 /*
3478 * Idle task boosting is a nono in general. There is one
3479 * exception, when PREEMPT_RT and NOHZ is active:
3480 *
3481 * The idle task calls get_next_timer_interrupt() and holds
3482 * the timer wheel base->lock on the CPU and another CPU wants
3483 * to access the timer (probably to cancel it). We can safely
3484 * ignore the boosting request, as the idle CPU runs this code
3485 * with interrupts disabled and will complete the lock
3486 * protected section without being interrupted. So there is no
3487 * real need to boost.
3488 */
3489 if (unlikely(p == rq->idle)) {
3490 WARN_ON(p != rq->curr);
3491 WARN_ON(p->pi_blocked_on);
3492 goto out_unlock;
3493 }
3494
a8027073 3495 trace_sched_pi_setprio(p, prio);
d5f9f942 3496 oldprio = p->prio;
83ab0aa0 3497 prev_class = p->sched_class;
fd2f4419 3498 on_rq = p->on_rq;
051a1d1a 3499 running = task_current(rq, p);
0e1f3483 3500 if (on_rq)
69be72c1 3501 dequeue_task(rq, p, 0);
0e1f3483
HS
3502 if (running)
3503 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
3504
3505 if (rt_prio(prio))
3506 p->sched_class = &rt_sched_class;
3507 else
3508 p->sched_class = &fair_sched_class;
3509
b29739f9
IM
3510 p->prio = prio;
3511
0e1f3483
HS
3512 if (running)
3513 p->sched_class->set_curr_task(rq);
da7a735e 3514 if (on_rq)
371fd7e7 3515 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 3516
da7a735e 3517 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3518out_unlock:
0122ec5b 3519 __task_rq_unlock(rq);
b29739f9 3520}
b29739f9 3521#endif
36c8b586 3522void set_user_nice(struct task_struct *p, long nice)
1da177e4 3523{
dd41f596 3524 int old_prio, delta, on_rq;
1da177e4 3525 unsigned long flags;
70b97a7f 3526 struct rq *rq;
1da177e4
LT
3527
3528 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3529 return;
3530 /*
3531 * We have to be careful, if called from sys_setpriority(),
3532 * the task might be in the middle of scheduling on another CPU.
3533 */
3534 rq = task_rq_lock(p, &flags);
3535 /*
3536 * The RT priorities are set via sched_setscheduler(), but we still
3537 * allow the 'normal' nice value to be set - but as expected
3538 * it wont have any effect on scheduling until the task is
dd41f596 3539 * SCHED_FIFO/SCHED_RR:
1da177e4 3540 */
e05606d3 3541 if (task_has_rt_policy(p)) {
1da177e4
LT
3542 p->static_prio = NICE_TO_PRIO(nice);
3543 goto out_unlock;
3544 }
fd2f4419 3545 on_rq = p->on_rq;
c09595f6 3546 if (on_rq)
69be72c1 3547 dequeue_task(rq, p, 0);
1da177e4 3548
1da177e4 3549 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3550 set_load_weight(p);
b29739f9
IM
3551 old_prio = p->prio;
3552 p->prio = effective_prio(p);
3553 delta = p->prio - old_prio;
1da177e4 3554
dd41f596 3555 if (on_rq) {
371fd7e7 3556 enqueue_task(rq, p, 0);
1da177e4 3557 /*
d5f9f942
AM
3558 * If the task increased its priority or is running and
3559 * lowered its priority, then reschedule its CPU:
1da177e4 3560 */
d5f9f942 3561 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
3562 resched_task(rq->curr);
3563 }
3564out_unlock:
0122ec5b 3565 task_rq_unlock(rq, p, &flags);
1da177e4 3566}
1da177e4
LT
3567EXPORT_SYMBOL(set_user_nice);
3568
e43379f1
MM
3569/*
3570 * can_nice - check if a task can reduce its nice value
3571 * @p: task
3572 * @nice: nice value
3573 */
36c8b586 3574int can_nice(const struct task_struct *p, const int nice)
e43379f1 3575{
024f4747
MM
3576 /* convert nice value [19,-20] to rlimit style value [1,40] */
3577 int nice_rlim = 20 - nice;
48f24c4d 3578
78d7d407 3579 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3580 capable(CAP_SYS_NICE));
3581}
3582
1da177e4
LT
3583#ifdef __ARCH_WANT_SYS_NICE
3584
3585/*
3586 * sys_nice - change the priority of the current process.
3587 * @increment: priority increment
3588 *
3589 * sys_setpriority is a more generic, but much slower function that
3590 * does similar things.
3591 */
5add95d4 3592SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3593{
48f24c4d 3594 long nice, retval;
1da177e4
LT
3595
3596 /*
3597 * Setpriority might change our priority at the same moment.
3598 * We don't have to worry. Conceptually one call occurs first
3599 * and we have a single winner.
3600 */
e43379f1
MM
3601 if (increment < -40)
3602 increment = -40;
1da177e4
LT
3603 if (increment > 40)
3604 increment = 40;
3605
2b8f836f 3606 nice = TASK_NICE(current) + increment;
1da177e4
LT
3607 if (nice < -20)
3608 nice = -20;
3609 if (nice > 19)
3610 nice = 19;
3611
e43379f1
MM
3612 if (increment < 0 && !can_nice(current, nice))
3613 return -EPERM;
3614
1da177e4
LT
3615 retval = security_task_setnice(current, nice);
3616 if (retval)
3617 return retval;
3618
3619 set_user_nice(current, nice);
3620 return 0;
3621}
3622
3623#endif
3624
3625/**
3626 * task_prio - return the priority value of a given task.
3627 * @p: the task in question.
3628 *
3629 * This is the priority value as seen by users in /proc.
3630 * RT tasks are offset by -200. Normal tasks are centered
3631 * around 0, value goes from -16 to +15.
3632 */
36c8b586 3633int task_prio(const struct task_struct *p)
1da177e4
LT
3634{
3635 return p->prio - MAX_RT_PRIO;
3636}
3637
3638/**
3639 * task_nice - return the nice value of a given task.
3640 * @p: the task in question.
3641 */
36c8b586 3642int task_nice(const struct task_struct *p)
1da177e4
LT
3643{
3644 return TASK_NICE(p);
3645}
150d8bed 3646EXPORT_SYMBOL(task_nice);
1da177e4
LT
3647
3648/**
3649 * idle_cpu - is a given cpu idle currently?
3650 * @cpu: the processor in question.
3651 */
3652int idle_cpu(int cpu)
3653{
908a3283
TG
3654 struct rq *rq = cpu_rq(cpu);
3655
3656 if (rq->curr != rq->idle)
3657 return 0;
3658
3659 if (rq->nr_running)
3660 return 0;
3661
3662#ifdef CONFIG_SMP
3663 if (!llist_empty(&rq->wake_list))
3664 return 0;
3665#endif
3666
3667 return 1;
1da177e4
LT
3668}
3669
1da177e4
LT
3670/**
3671 * idle_task - return the idle task for a given cpu.
3672 * @cpu: the processor in question.
3673 */
36c8b586 3674struct task_struct *idle_task(int cpu)
1da177e4
LT
3675{
3676 return cpu_rq(cpu)->idle;
3677}
3678
3679/**
3680 * find_process_by_pid - find a process with a matching PID value.
3681 * @pid: the pid in question.
3682 */
a9957449 3683static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3684{
228ebcbe 3685 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3686}
3687
3688/* Actually do priority change: must hold rq lock. */
dd41f596
IM
3689static void
3690__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 3691{
1da177e4
LT
3692 p->policy = policy;
3693 p->rt_priority = prio;
b29739f9
IM
3694 p->normal_prio = normal_prio(p);
3695 /* we are holding p->pi_lock already */
3696 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
3697 if (rt_prio(p->prio))
3698 p->sched_class = &rt_sched_class;
3699 else
3700 p->sched_class = &fair_sched_class;
2dd73a4f 3701 set_load_weight(p);
1da177e4
LT
3702}
3703
c69e8d9c
DH
3704/*
3705 * check the target process has a UID that matches the current process's
3706 */
3707static bool check_same_owner(struct task_struct *p)
3708{
3709 const struct cred *cred = current_cred(), *pcred;
3710 bool match;
3711
3712 rcu_read_lock();
3713 pcred = __task_cred(p);
9c806aa0
EB
3714 match = (uid_eq(cred->euid, pcred->euid) ||
3715 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3716 rcu_read_unlock();
3717 return match;
3718}
3719
961ccddd 3720static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3721 const struct sched_param *param, bool user)
1da177e4 3722{
83b699ed 3723 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 3724 unsigned long flags;
83ab0aa0 3725 const struct sched_class *prev_class;
70b97a7f 3726 struct rq *rq;
ca94c442 3727 int reset_on_fork;
1da177e4 3728
66e5393a
SR
3729 /* may grab non-irq protected spin_locks */
3730 BUG_ON(in_interrupt());
1da177e4
LT
3731recheck:
3732 /* double check policy once rq lock held */
ca94c442
LP
3733 if (policy < 0) {
3734 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3735 policy = oldpolicy = p->policy;
ca94c442
LP
3736 } else {
3737 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3738 policy &= ~SCHED_RESET_ON_FORK;
3739
3740 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3741 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3742 policy != SCHED_IDLE)
3743 return -EINVAL;
3744 }
3745
1da177e4
LT
3746 /*
3747 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3748 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3749 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
3750 */
3751 if (param->sched_priority < 0 ||
95cdf3b7 3752 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 3753 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 3754 return -EINVAL;
e05606d3 3755 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
3756 return -EINVAL;
3757
37e4ab3f
OC
3758 /*
3759 * Allow unprivileged RT tasks to decrease priority:
3760 */
961ccddd 3761 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 3762 if (rt_policy(policy)) {
a44702e8
ON
3763 unsigned long rlim_rtprio =
3764 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3765
3766 /* can't set/change the rt policy */
3767 if (policy != p->policy && !rlim_rtprio)
3768 return -EPERM;
3769
3770 /* can't increase priority */
3771 if (param->sched_priority > p->rt_priority &&
3772 param->sched_priority > rlim_rtprio)
3773 return -EPERM;
3774 }
c02aa73b 3775
dd41f596 3776 /*
c02aa73b
DH
3777 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3778 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 3779 */
c02aa73b
DH
3780 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3781 if (!can_nice(p, TASK_NICE(p)))
3782 return -EPERM;
3783 }
5fe1d75f 3784
37e4ab3f 3785 /* can't change other user's priorities */
c69e8d9c 3786 if (!check_same_owner(p))
37e4ab3f 3787 return -EPERM;
ca94c442
LP
3788
3789 /* Normal users shall not reset the sched_reset_on_fork flag */
3790 if (p->sched_reset_on_fork && !reset_on_fork)
3791 return -EPERM;
37e4ab3f 3792 }
1da177e4 3793
725aad24 3794 if (user) {
b0ae1981 3795 retval = security_task_setscheduler(p);
725aad24
JF
3796 if (retval)
3797 return retval;
3798 }
3799
b29739f9
IM
3800 /*
3801 * make sure no PI-waiters arrive (or leave) while we are
3802 * changing the priority of the task:
0122ec5b 3803 *
25985edc 3804 * To be able to change p->policy safely, the appropriate
1da177e4
LT
3805 * runqueue lock must be held.
3806 */
0122ec5b 3807 rq = task_rq_lock(p, &flags);
dc61b1d6 3808
34f971f6
PZ
3809 /*
3810 * Changing the policy of the stop threads its a very bad idea
3811 */
3812 if (p == rq->stop) {
0122ec5b 3813 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
3814 return -EINVAL;
3815 }
3816
a51e9198
DF
3817 /*
3818 * If not changing anything there's no need to proceed further:
3819 */
3820 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
3821 param->sched_priority == p->rt_priority))) {
45afb173 3822 task_rq_unlock(rq, p, &flags);
a51e9198
DF
3823 return 0;
3824 }
3825
dc61b1d6
PZ
3826#ifdef CONFIG_RT_GROUP_SCHED
3827 if (user) {
3828 /*
3829 * Do not allow realtime tasks into groups that have no runtime
3830 * assigned.
3831 */
3832 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
3833 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3834 !task_group_is_autogroup(task_group(p))) {
0122ec5b 3835 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
3836 return -EPERM;
3837 }
3838 }
3839#endif
3840
1da177e4
LT
3841 /* recheck policy now with rq lock held */
3842 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3843 policy = oldpolicy = -1;
0122ec5b 3844 task_rq_unlock(rq, p, &flags);
1da177e4
LT
3845 goto recheck;
3846 }
fd2f4419 3847 on_rq = p->on_rq;
051a1d1a 3848 running = task_current(rq, p);
0e1f3483 3849 if (on_rq)
4ca9b72b 3850 dequeue_task(rq, p, 0);
0e1f3483
HS
3851 if (running)
3852 p->sched_class->put_prev_task(rq, p);
f6b53205 3853
ca94c442
LP
3854 p->sched_reset_on_fork = reset_on_fork;
3855
1da177e4 3856 oldprio = p->prio;
83ab0aa0 3857 prev_class = p->sched_class;
dd41f596 3858 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 3859
0e1f3483
HS
3860 if (running)
3861 p->sched_class->set_curr_task(rq);
da7a735e 3862 if (on_rq)
4ca9b72b 3863 enqueue_task(rq, p, 0);
cb469845 3864
da7a735e 3865 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 3866 task_rq_unlock(rq, p, &flags);
b29739f9 3867
95e02ca9
TG
3868 rt_mutex_adjust_pi(p);
3869
1da177e4
LT
3870 return 0;
3871}
961ccddd
RR
3872
3873/**
3874 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3875 * @p: the task in question.
3876 * @policy: new policy.
3877 * @param: structure containing the new RT priority.
3878 *
3879 * NOTE that the task may be already dead.
3880 */
3881int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3882 const struct sched_param *param)
961ccddd
RR
3883{
3884 return __sched_setscheduler(p, policy, param, true);
3885}
1da177e4
LT
3886EXPORT_SYMBOL_GPL(sched_setscheduler);
3887
961ccddd
RR
3888/**
3889 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3890 * @p: the task in question.
3891 * @policy: new policy.
3892 * @param: structure containing the new RT priority.
3893 *
3894 * Just like sched_setscheduler, only don't bother checking if the
3895 * current context has permission. For example, this is needed in
3896 * stop_machine(): we create temporary high priority worker threads,
3897 * but our caller might not have that capability.
3898 */
3899int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 3900 const struct sched_param *param)
961ccddd
RR
3901{
3902 return __sched_setscheduler(p, policy, param, false);
3903}
3904
95cdf3b7
IM
3905static int
3906do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 3907{
1da177e4
LT
3908 struct sched_param lparam;
3909 struct task_struct *p;
36c8b586 3910 int retval;
1da177e4
LT
3911
3912 if (!param || pid < 0)
3913 return -EINVAL;
3914 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3915 return -EFAULT;
5fe1d75f
ON
3916
3917 rcu_read_lock();
3918 retval = -ESRCH;
1da177e4 3919 p = find_process_by_pid(pid);
5fe1d75f
ON
3920 if (p != NULL)
3921 retval = sched_setscheduler(p, policy, &lparam);
3922 rcu_read_unlock();
36c8b586 3923
1da177e4
LT
3924 return retval;
3925}
3926
3927/**
3928 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3929 * @pid: the pid in question.
3930 * @policy: new policy.
3931 * @param: structure containing the new RT priority.
3932 */
5add95d4
HC
3933SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3934 struct sched_param __user *, param)
1da177e4 3935{
c21761f1
JB
3936 /* negative values for policy are not valid */
3937 if (policy < 0)
3938 return -EINVAL;
3939
1da177e4
LT
3940 return do_sched_setscheduler(pid, policy, param);
3941}
3942
3943/**
3944 * sys_sched_setparam - set/change the RT priority of a thread
3945 * @pid: the pid in question.
3946 * @param: structure containing the new RT priority.
3947 */
5add95d4 3948SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
3949{
3950 return do_sched_setscheduler(pid, -1, param);
3951}
3952
3953/**
3954 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3955 * @pid: the pid in question.
3956 */
5add95d4 3957SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 3958{
36c8b586 3959 struct task_struct *p;
3a5c359a 3960 int retval;
1da177e4
LT
3961
3962 if (pid < 0)
3a5c359a 3963 return -EINVAL;
1da177e4
LT
3964
3965 retval = -ESRCH;
5fe85be0 3966 rcu_read_lock();
1da177e4
LT
3967 p = find_process_by_pid(pid);
3968 if (p) {
3969 retval = security_task_getscheduler(p);
3970 if (!retval)
ca94c442
LP
3971 retval = p->policy
3972 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 3973 }
5fe85be0 3974 rcu_read_unlock();
1da177e4
LT
3975 return retval;
3976}
3977
3978/**
ca94c442 3979 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
3980 * @pid: the pid in question.
3981 * @param: structure containing the RT priority.
3982 */
5add95d4 3983SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
3984{
3985 struct sched_param lp;
36c8b586 3986 struct task_struct *p;
3a5c359a 3987 int retval;
1da177e4
LT
3988
3989 if (!param || pid < 0)
3a5c359a 3990 return -EINVAL;
1da177e4 3991
5fe85be0 3992 rcu_read_lock();
1da177e4
LT
3993 p = find_process_by_pid(pid);
3994 retval = -ESRCH;
3995 if (!p)
3996 goto out_unlock;
3997
3998 retval = security_task_getscheduler(p);
3999 if (retval)
4000 goto out_unlock;
4001
4002 lp.sched_priority = p->rt_priority;
5fe85be0 4003 rcu_read_unlock();
1da177e4
LT
4004
4005 /*
4006 * This one might sleep, we cannot do it with a spinlock held ...
4007 */
4008 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4009
1da177e4
LT
4010 return retval;
4011
4012out_unlock:
5fe85be0 4013 rcu_read_unlock();
1da177e4
LT
4014 return retval;
4015}
4016
96f874e2 4017long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4018{
5a16f3d3 4019 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4020 struct task_struct *p;
4021 int retval;
1da177e4 4022
95402b38 4023 get_online_cpus();
23f5d142 4024 rcu_read_lock();
1da177e4
LT
4025
4026 p = find_process_by_pid(pid);
4027 if (!p) {
23f5d142 4028 rcu_read_unlock();
95402b38 4029 put_online_cpus();
1da177e4
LT
4030 return -ESRCH;
4031 }
4032
23f5d142 4033 /* Prevent p going away */
1da177e4 4034 get_task_struct(p);
23f5d142 4035 rcu_read_unlock();
1da177e4 4036
5a16f3d3
RR
4037 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4038 retval = -ENOMEM;
4039 goto out_put_task;
4040 }
4041 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4042 retval = -ENOMEM;
4043 goto out_free_cpus_allowed;
4044 }
1da177e4 4045 retval = -EPERM;
f1c84dae 4046 if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
1da177e4
LT
4047 goto out_unlock;
4048
b0ae1981 4049 retval = security_task_setscheduler(p);
e7834f8f
DQ
4050 if (retval)
4051 goto out_unlock;
4052
5a16f3d3
RR
4053 cpuset_cpus_allowed(p, cpus_allowed);
4054 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 4055again:
5a16f3d3 4056 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 4057
8707d8b8 4058 if (!retval) {
5a16f3d3
RR
4059 cpuset_cpus_allowed(p, cpus_allowed);
4060 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4061 /*
4062 * We must have raced with a concurrent cpuset
4063 * update. Just reset the cpus_allowed to the
4064 * cpuset's cpus_allowed
4065 */
5a16f3d3 4066 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4067 goto again;
4068 }
4069 }
1da177e4 4070out_unlock:
5a16f3d3
RR
4071 free_cpumask_var(new_mask);
4072out_free_cpus_allowed:
4073 free_cpumask_var(cpus_allowed);
4074out_put_task:
1da177e4 4075 put_task_struct(p);
95402b38 4076 put_online_cpus();
1da177e4
LT
4077 return retval;
4078}
4079
4080static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4081 struct cpumask *new_mask)
1da177e4 4082{
96f874e2
RR
4083 if (len < cpumask_size())
4084 cpumask_clear(new_mask);
4085 else if (len > cpumask_size())
4086 len = cpumask_size();
4087
1da177e4
LT
4088 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4089}
4090
4091/**
4092 * sys_sched_setaffinity - set the cpu affinity of a process
4093 * @pid: pid of the process
4094 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4095 * @user_mask_ptr: user-space pointer to the new cpu mask
4096 */
5add95d4
HC
4097SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4098 unsigned long __user *, user_mask_ptr)
1da177e4 4099{
5a16f3d3 4100 cpumask_var_t new_mask;
1da177e4
LT
4101 int retval;
4102
5a16f3d3
RR
4103 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4104 return -ENOMEM;
1da177e4 4105
5a16f3d3
RR
4106 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4107 if (retval == 0)
4108 retval = sched_setaffinity(pid, new_mask);
4109 free_cpumask_var(new_mask);
4110 return retval;
1da177e4
LT
4111}
4112
96f874e2 4113long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4114{
36c8b586 4115 struct task_struct *p;
31605683 4116 unsigned long flags;
1da177e4 4117 int retval;
1da177e4 4118
95402b38 4119 get_online_cpus();
23f5d142 4120 rcu_read_lock();
1da177e4
LT
4121
4122 retval = -ESRCH;
4123 p = find_process_by_pid(pid);
4124 if (!p)
4125 goto out_unlock;
4126
e7834f8f
DQ
4127 retval = security_task_getscheduler(p);
4128 if (retval)
4129 goto out_unlock;
4130
013fdb80 4131 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 4132 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 4133 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4134
4135out_unlock:
23f5d142 4136 rcu_read_unlock();
95402b38 4137 put_online_cpus();
1da177e4 4138
9531b62f 4139 return retval;
1da177e4
LT
4140}
4141
4142/**
4143 * sys_sched_getaffinity - get the cpu affinity of a process
4144 * @pid: pid of the process
4145 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4146 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4147 */
5add95d4
HC
4148SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4149 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4150{
4151 int ret;
f17c8607 4152 cpumask_var_t mask;
1da177e4 4153
84fba5ec 4154 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4155 return -EINVAL;
4156 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4157 return -EINVAL;
4158
f17c8607
RR
4159 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4160 return -ENOMEM;
1da177e4 4161
f17c8607
RR
4162 ret = sched_getaffinity(pid, mask);
4163 if (ret == 0) {
8bc037fb 4164 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4165
4166 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4167 ret = -EFAULT;
4168 else
cd3d8031 4169 ret = retlen;
f17c8607
RR
4170 }
4171 free_cpumask_var(mask);
1da177e4 4172
f17c8607 4173 return ret;
1da177e4
LT
4174}
4175
4176/**
4177 * sys_sched_yield - yield the current processor to other threads.
4178 *
dd41f596
IM
4179 * This function yields the current CPU to other tasks. If there are no
4180 * other threads running on this CPU then this function will return.
1da177e4 4181 */
5add95d4 4182SYSCALL_DEFINE0(sched_yield)
1da177e4 4183{
70b97a7f 4184 struct rq *rq = this_rq_lock();
1da177e4 4185
2d72376b 4186 schedstat_inc(rq, yld_count);
4530d7ab 4187 current->sched_class->yield_task(rq);
1da177e4
LT
4188
4189 /*
4190 * Since we are going to call schedule() anyway, there's
4191 * no need to preempt or enable interrupts:
4192 */
4193 __release(rq->lock);
8a25d5de 4194 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4195 do_raw_spin_unlock(&rq->lock);
ba74c144 4196 sched_preempt_enable_no_resched();
1da177e4
LT
4197
4198 schedule();
4199
4200 return 0;
4201}
4202
d86ee480
PZ
4203static inline int should_resched(void)
4204{
4205 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4206}
4207
e7b38404 4208static void __cond_resched(void)
1da177e4 4209{
e7aaaa69 4210 add_preempt_count(PREEMPT_ACTIVE);
c259e01a 4211 __schedule();
e7aaaa69 4212 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
4213}
4214
02b67cc3 4215int __sched _cond_resched(void)
1da177e4 4216{
d86ee480 4217 if (should_resched()) {
1da177e4
LT
4218 __cond_resched();
4219 return 1;
4220 }
4221 return 0;
4222}
02b67cc3 4223EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4224
4225/*
613afbf8 4226 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4227 * call schedule, and on return reacquire the lock.
4228 *
41a2d6cf 4229 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4230 * operations here to prevent schedule() from being called twice (once via
4231 * spin_unlock(), once by hand).
4232 */
613afbf8 4233int __cond_resched_lock(spinlock_t *lock)
1da177e4 4234{
d86ee480 4235 int resched = should_resched();
6df3cecb
JK
4236 int ret = 0;
4237
f607c668
PZ
4238 lockdep_assert_held(lock);
4239
95c354fe 4240 if (spin_needbreak(lock) || resched) {
1da177e4 4241 spin_unlock(lock);
d86ee480 4242 if (resched)
95c354fe
NP
4243 __cond_resched();
4244 else
4245 cpu_relax();
6df3cecb 4246 ret = 1;
1da177e4 4247 spin_lock(lock);
1da177e4 4248 }
6df3cecb 4249 return ret;
1da177e4 4250}
613afbf8 4251EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4252
613afbf8 4253int __sched __cond_resched_softirq(void)
1da177e4
LT
4254{
4255 BUG_ON(!in_softirq());
4256
d86ee480 4257 if (should_resched()) {
98d82567 4258 local_bh_enable();
1da177e4
LT
4259 __cond_resched();
4260 local_bh_disable();
4261 return 1;
4262 }
4263 return 0;
4264}
613afbf8 4265EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4266
1da177e4
LT
4267/**
4268 * yield - yield the current processor to other threads.
4269 *
8e3fabfd
PZ
4270 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4271 *
4272 * The scheduler is at all times free to pick the calling task as the most
4273 * eligible task to run, if removing the yield() call from your code breaks
4274 * it, its already broken.
4275 *
4276 * Typical broken usage is:
4277 *
4278 * while (!event)
4279 * yield();
4280 *
4281 * where one assumes that yield() will let 'the other' process run that will
4282 * make event true. If the current task is a SCHED_FIFO task that will never
4283 * happen. Never use yield() as a progress guarantee!!
4284 *
4285 * If you want to use yield() to wait for something, use wait_event().
4286 * If you want to use yield() to be 'nice' for others, use cond_resched().
4287 * If you still want to use yield(), do not!
1da177e4
LT
4288 */
4289void __sched yield(void)
4290{
4291 set_current_state(TASK_RUNNING);
4292 sys_sched_yield();
4293}
1da177e4
LT
4294EXPORT_SYMBOL(yield);
4295
d95f4122
MG
4296/**
4297 * yield_to - yield the current processor to another thread in
4298 * your thread group, or accelerate that thread toward the
4299 * processor it's on.
16addf95
RD
4300 * @p: target task
4301 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4302 *
4303 * It's the caller's job to ensure that the target task struct
4304 * can't go away on us before we can do any checks.
4305 *
4306 * Returns true if we indeed boosted the target task.
4307 */
4308bool __sched yield_to(struct task_struct *p, bool preempt)
4309{
4310 struct task_struct *curr = current;
4311 struct rq *rq, *p_rq;
4312 unsigned long flags;
4313 bool yielded = 0;
4314
4315 local_irq_save(flags);
4316 rq = this_rq();
4317
4318again:
4319 p_rq = task_rq(p);
4320 double_rq_lock(rq, p_rq);
4321 while (task_rq(p) != p_rq) {
4322 double_rq_unlock(rq, p_rq);
4323 goto again;
4324 }
4325
4326 if (!curr->sched_class->yield_to_task)
4327 goto out;
4328
4329 if (curr->sched_class != p->sched_class)
4330 goto out;
4331
4332 if (task_running(p_rq, p) || p->state)
4333 goto out;
4334
4335 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4336 if (yielded) {
d95f4122 4337 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4338 /*
4339 * Make p's CPU reschedule; pick_next_entity takes care of
4340 * fairness.
4341 */
4342 if (preempt && rq != p_rq)
4343 resched_task(p_rq->curr);
4344 }
d95f4122
MG
4345
4346out:
4347 double_rq_unlock(rq, p_rq);
4348 local_irq_restore(flags);
4349
4350 if (yielded)
4351 schedule();
4352
4353 return yielded;
4354}
4355EXPORT_SYMBOL_GPL(yield_to);
4356
1da177e4 4357/*
41a2d6cf 4358 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4359 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
4360 */
4361void __sched io_schedule(void)
4362{
54d35f29 4363 struct rq *rq = raw_rq();
1da177e4 4364
0ff92245 4365 delayacct_blkio_start();
1da177e4 4366 atomic_inc(&rq->nr_iowait);
73c10101 4367 blk_flush_plug(current);
8f0dfc34 4368 current->in_iowait = 1;
1da177e4 4369 schedule();
8f0dfc34 4370 current->in_iowait = 0;
1da177e4 4371 atomic_dec(&rq->nr_iowait);
0ff92245 4372 delayacct_blkio_end();
1da177e4 4373}
1da177e4
LT
4374EXPORT_SYMBOL(io_schedule);
4375
4376long __sched io_schedule_timeout(long timeout)
4377{
54d35f29 4378 struct rq *rq = raw_rq();
1da177e4
LT
4379 long ret;
4380
0ff92245 4381 delayacct_blkio_start();
1da177e4 4382 atomic_inc(&rq->nr_iowait);
73c10101 4383 blk_flush_plug(current);
8f0dfc34 4384 current->in_iowait = 1;
1da177e4 4385 ret = schedule_timeout(timeout);
8f0dfc34 4386 current->in_iowait = 0;
1da177e4 4387 atomic_dec(&rq->nr_iowait);
0ff92245 4388 delayacct_blkio_end();
1da177e4
LT
4389 return ret;
4390}
4391
4392/**
4393 * sys_sched_get_priority_max - return maximum RT priority.
4394 * @policy: scheduling class.
4395 *
4396 * this syscall returns the maximum rt_priority that can be used
4397 * by a given scheduling class.
4398 */
5add95d4 4399SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4400{
4401 int ret = -EINVAL;
4402
4403 switch (policy) {
4404 case SCHED_FIFO:
4405 case SCHED_RR:
4406 ret = MAX_USER_RT_PRIO-1;
4407 break;
4408 case SCHED_NORMAL:
b0a9499c 4409 case SCHED_BATCH:
dd41f596 4410 case SCHED_IDLE:
1da177e4
LT
4411 ret = 0;
4412 break;
4413 }
4414 return ret;
4415}
4416
4417/**
4418 * sys_sched_get_priority_min - return minimum RT priority.
4419 * @policy: scheduling class.
4420 *
4421 * this syscall returns the minimum rt_priority that can be used
4422 * by a given scheduling class.
4423 */
5add95d4 4424SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4425{
4426 int ret = -EINVAL;
4427
4428 switch (policy) {
4429 case SCHED_FIFO:
4430 case SCHED_RR:
4431 ret = 1;
4432 break;
4433 case SCHED_NORMAL:
b0a9499c 4434 case SCHED_BATCH:
dd41f596 4435 case SCHED_IDLE:
1da177e4
LT
4436 ret = 0;
4437 }
4438 return ret;
4439}
4440
4441/**
4442 * sys_sched_rr_get_interval - return the default timeslice of a process.
4443 * @pid: pid of the process.
4444 * @interval: userspace pointer to the timeslice value.
4445 *
4446 * this syscall writes the default timeslice value of a given process
4447 * into the user-space timespec buffer. A value of '0' means infinity.
4448 */
17da2bd9 4449SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4450 struct timespec __user *, interval)
1da177e4 4451{
36c8b586 4452 struct task_struct *p;
a4ec24b4 4453 unsigned int time_slice;
dba091b9
TG
4454 unsigned long flags;
4455 struct rq *rq;
3a5c359a 4456 int retval;
1da177e4 4457 struct timespec t;
1da177e4
LT
4458
4459 if (pid < 0)
3a5c359a 4460 return -EINVAL;
1da177e4
LT
4461
4462 retval = -ESRCH;
1a551ae7 4463 rcu_read_lock();
1da177e4
LT
4464 p = find_process_by_pid(pid);
4465 if (!p)
4466 goto out_unlock;
4467
4468 retval = security_task_getscheduler(p);
4469 if (retval)
4470 goto out_unlock;
4471
dba091b9
TG
4472 rq = task_rq_lock(p, &flags);
4473 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4474 task_rq_unlock(rq, p, &flags);
a4ec24b4 4475
1a551ae7 4476 rcu_read_unlock();
a4ec24b4 4477 jiffies_to_timespec(time_slice, &t);
1da177e4 4478 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4479 return retval;
3a5c359a 4480
1da177e4 4481out_unlock:
1a551ae7 4482 rcu_read_unlock();
1da177e4
LT
4483 return retval;
4484}
4485
7c731e0a 4486static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4487
82a1fcb9 4488void sched_show_task(struct task_struct *p)
1da177e4 4489{
1da177e4 4490 unsigned long free = 0;
36c8b586 4491 unsigned state;
1da177e4 4492
1da177e4 4493 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 4494 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4495 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4496#if BITS_PER_LONG == 32
1da177e4 4497 if (state == TASK_RUNNING)
3df0fc5b 4498 printk(KERN_CONT " running ");
1da177e4 4499 else
3df0fc5b 4500 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4501#else
4502 if (state == TASK_RUNNING)
3df0fc5b 4503 printk(KERN_CONT " running task ");
1da177e4 4504 else
3df0fc5b 4505 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4506#endif
4507#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4508 free = stack_not_used(p);
1da177e4 4509#endif
3df0fc5b 4510 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
07cde260 4511 task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
aa47b7e0 4512 (unsigned long)task_thread_info(p)->flags);
1da177e4 4513
5fb5e6de 4514 show_stack(p, NULL);
1da177e4
LT
4515}
4516
e59e2ae2 4517void show_state_filter(unsigned long state_filter)
1da177e4 4518{
36c8b586 4519 struct task_struct *g, *p;
1da177e4 4520
4bd77321 4521#if BITS_PER_LONG == 32
3df0fc5b
PZ
4522 printk(KERN_INFO
4523 " task PC stack pid father\n");
1da177e4 4524#else
3df0fc5b
PZ
4525 printk(KERN_INFO
4526 " task PC stack pid father\n");
1da177e4 4527#endif
510f5acc 4528 rcu_read_lock();
1da177e4
LT
4529 do_each_thread(g, p) {
4530 /*
4531 * reset the NMI-timeout, listing all files on a slow
25985edc 4532 * console might take a lot of time:
1da177e4
LT
4533 */
4534 touch_nmi_watchdog();
39bc89fd 4535 if (!state_filter || (p->state & state_filter))
82a1fcb9 4536 sched_show_task(p);
1da177e4
LT
4537 } while_each_thread(g, p);
4538
04c9167f
JF
4539 touch_all_softlockup_watchdogs();
4540
dd41f596
IM
4541#ifdef CONFIG_SCHED_DEBUG
4542 sysrq_sched_debug_show();
4543#endif
510f5acc 4544 rcu_read_unlock();
e59e2ae2
IM
4545 /*
4546 * Only show locks if all tasks are dumped:
4547 */
93335a21 4548 if (!state_filter)
e59e2ae2 4549 debug_show_all_locks();
1da177e4
LT
4550}
4551
1df21055
IM
4552void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4553{
dd41f596 4554 idle->sched_class = &idle_sched_class;
1df21055
IM
4555}
4556
f340c0d1
IM
4557/**
4558 * init_idle - set up an idle thread for a given CPU
4559 * @idle: task in question
4560 * @cpu: cpu the idle task belongs to
4561 *
4562 * NOTE: this function does not set the idle thread's NEED_RESCHED
4563 * flag, to make booting more robust.
4564 */
5c1e1767 4565void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 4566{
70b97a7f 4567 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4568 unsigned long flags;
4569
05fa785c 4570 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 4571
dd41f596 4572 __sched_fork(idle);
06b83b5f 4573 idle->state = TASK_RUNNING;
dd41f596
IM
4574 idle->se.exec_start = sched_clock();
4575
1e1b6c51 4576 do_set_cpus_allowed(idle, cpumask_of(cpu));
6506cf6c
PZ
4577 /*
4578 * We're having a chicken and egg problem, even though we are
4579 * holding rq->lock, the cpu isn't yet set to this cpu so the
4580 * lockdep check in task_group() will fail.
4581 *
4582 * Similar case to sched_fork(). / Alternatively we could
4583 * use task_rq_lock() here and obtain the other rq->lock.
4584 *
4585 * Silence PROVE_RCU
4586 */
4587 rcu_read_lock();
dd41f596 4588 __set_task_cpu(idle, cpu);
6506cf6c 4589 rcu_read_unlock();
1da177e4 4590
1da177e4 4591 rq->curr = rq->idle = idle;
3ca7a440
PZ
4592#if defined(CONFIG_SMP)
4593 idle->on_cpu = 1;
4866cde0 4594#endif
05fa785c 4595 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
4596
4597 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 4598 task_thread_info(idle)->preempt_count = 0;
55cd5340 4599
dd41f596
IM
4600 /*
4601 * The idle tasks have their own, simple scheduling class:
4602 */
4603 idle->sched_class = &idle_sched_class;
868baf07 4604 ftrace_graph_init_idle_task(idle, cpu);
f1c6f1a7
CE
4605#if defined(CONFIG_SMP)
4606 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4607#endif
19978ca6
IM
4608}
4609
1da177e4 4610#ifdef CONFIG_SMP
1e1b6c51
KM
4611void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4612{
4613 if (p->sched_class && p->sched_class->set_cpus_allowed)
4614 p->sched_class->set_cpus_allowed(p, new_mask);
4939602a
PZ
4615
4616 cpumask_copy(&p->cpus_allowed, new_mask);
29baa747 4617 p->nr_cpus_allowed = cpumask_weight(new_mask);
1e1b6c51
KM
4618}
4619
1da177e4
LT
4620/*
4621 * This is how migration works:
4622 *
969c7921
TH
4623 * 1) we invoke migration_cpu_stop() on the target CPU using
4624 * stop_one_cpu().
4625 * 2) stopper starts to run (implicitly forcing the migrated thread
4626 * off the CPU)
4627 * 3) it checks whether the migrated task is still in the wrong runqueue.
4628 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 4629 * it and puts it into the right queue.
969c7921
TH
4630 * 5) stopper completes and stop_one_cpu() returns and the migration
4631 * is done.
1da177e4
LT
4632 */
4633
4634/*
4635 * Change a given task's CPU affinity. Migrate the thread to a
4636 * proper CPU and schedule it away if the CPU it's executing on
4637 * is removed from the allowed bitmask.
4638 *
4639 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 4640 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
4641 * call is not atomic; no spinlocks may be held.
4642 */
96f874e2 4643int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
4644{
4645 unsigned long flags;
70b97a7f 4646 struct rq *rq;
969c7921 4647 unsigned int dest_cpu;
48f24c4d 4648 int ret = 0;
1da177e4
LT
4649
4650 rq = task_rq_lock(p, &flags);
e2912009 4651
db44fc01
YZ
4652 if (cpumask_equal(&p->cpus_allowed, new_mask))
4653 goto out;
4654
6ad4c188 4655 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
4656 ret = -EINVAL;
4657 goto out;
4658 }
4659
db44fc01 4660 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
9985b0ba
DR
4661 ret = -EINVAL;
4662 goto out;
4663 }
4664
1e1b6c51 4665 do_set_cpus_allowed(p, new_mask);
73fe6aae 4666
1da177e4 4667 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 4668 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
4669 goto out;
4670
969c7921 4671 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 4672 if (p->on_rq) {
969c7921 4673 struct migration_arg arg = { p, dest_cpu };
1da177e4 4674 /* Need help from migration thread: drop lock and wait. */
0122ec5b 4675 task_rq_unlock(rq, p, &flags);
969c7921 4676 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
4677 tlb_migrate_finish(p->mm);
4678 return 0;
4679 }
4680out:
0122ec5b 4681 task_rq_unlock(rq, p, &flags);
48f24c4d 4682
1da177e4
LT
4683 return ret;
4684}
cd8ba7cd 4685EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
4686
4687/*
41a2d6cf 4688 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
4689 * this because either it can't run here any more (set_cpus_allowed()
4690 * away from this CPU, or CPU going down), or because we're
4691 * attempting to rebalance this task on exec (sched_exec).
4692 *
4693 * So we race with normal scheduler movements, but that's OK, as long
4694 * as the task is no longer on this CPU.
efc30814
KK
4695 *
4696 * Returns non-zero if task was successfully migrated.
1da177e4 4697 */
efc30814 4698static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 4699{
70b97a7f 4700 struct rq *rq_dest, *rq_src;
e2912009 4701 int ret = 0;
1da177e4 4702
e761b772 4703 if (unlikely(!cpu_active(dest_cpu)))
efc30814 4704 return ret;
1da177e4
LT
4705
4706 rq_src = cpu_rq(src_cpu);
4707 rq_dest = cpu_rq(dest_cpu);
4708
0122ec5b 4709 raw_spin_lock(&p->pi_lock);
1da177e4
LT
4710 double_rq_lock(rq_src, rq_dest);
4711 /* Already moved. */
4712 if (task_cpu(p) != src_cpu)
b1e38734 4713 goto done;
1da177e4 4714 /* Affinity changed (again). */
fa17b507 4715 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
b1e38734 4716 goto fail;
1da177e4 4717
e2912009
PZ
4718 /*
4719 * If we're not on a rq, the next wake-up will ensure we're
4720 * placed properly.
4721 */
fd2f4419 4722 if (p->on_rq) {
4ca9b72b 4723 dequeue_task(rq_src, p, 0);
e2912009 4724 set_task_cpu(p, dest_cpu);
4ca9b72b 4725 enqueue_task(rq_dest, p, 0);
15afe09b 4726 check_preempt_curr(rq_dest, p, 0);
1da177e4 4727 }
b1e38734 4728done:
efc30814 4729 ret = 1;
b1e38734 4730fail:
1da177e4 4731 double_rq_unlock(rq_src, rq_dest);
0122ec5b 4732 raw_spin_unlock(&p->pi_lock);
efc30814 4733 return ret;
1da177e4
LT
4734}
4735
4736/*
969c7921
TH
4737 * migration_cpu_stop - this will be executed by a highprio stopper thread
4738 * and performs thread migration by bumping thread off CPU then
4739 * 'pushing' onto another runqueue.
1da177e4 4740 */
969c7921 4741static int migration_cpu_stop(void *data)
1da177e4 4742{
969c7921 4743 struct migration_arg *arg = data;
f7b4cddc 4744
969c7921
TH
4745 /*
4746 * The original target cpu might have gone down and we might
4747 * be on another cpu but it doesn't matter.
4748 */
f7b4cddc 4749 local_irq_disable();
969c7921 4750 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 4751 local_irq_enable();
1da177e4 4752 return 0;
f7b4cddc
ON
4753}
4754
1da177e4 4755#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 4756
054b9108 4757/*
48c5ccae
PZ
4758 * Ensures that the idle task is using init_mm right before its cpu goes
4759 * offline.
054b9108 4760 */
48c5ccae 4761void idle_task_exit(void)
1da177e4 4762{
48c5ccae 4763 struct mm_struct *mm = current->active_mm;
e76bd8d9 4764
48c5ccae 4765 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 4766
48c5ccae
PZ
4767 if (mm != &init_mm)
4768 switch_mm(mm, &init_mm, current);
4769 mmdrop(mm);
1da177e4
LT
4770}
4771
4772/*
5d180232
PZ
4773 * Since this CPU is going 'away' for a while, fold any nr_active delta
4774 * we might have. Assumes we're called after migrate_tasks() so that the
4775 * nr_active count is stable.
4776 *
4777 * Also see the comment "Global load-average calculations".
1da177e4 4778 */
5d180232 4779static void calc_load_migrate(struct rq *rq)
1da177e4 4780{
5d180232
PZ
4781 long delta = calc_load_fold_active(rq);
4782 if (delta)
4783 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
4784}
4785
48f24c4d 4786/*
48c5ccae
PZ
4787 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4788 * try_to_wake_up()->select_task_rq().
4789 *
4790 * Called with rq->lock held even though we'er in stop_machine() and
4791 * there's no concurrency possible, we hold the required locks anyway
4792 * because of lock validation efforts.
1da177e4 4793 */
48c5ccae 4794static void migrate_tasks(unsigned int dead_cpu)
1da177e4 4795{
70b97a7f 4796 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
4797 struct task_struct *next, *stop = rq->stop;
4798 int dest_cpu;
1da177e4
LT
4799
4800 /*
48c5ccae
PZ
4801 * Fudge the rq selection such that the below task selection loop
4802 * doesn't get stuck on the currently eligible stop task.
4803 *
4804 * We're currently inside stop_machine() and the rq is either stuck
4805 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4806 * either way we should never end up calling schedule() until we're
4807 * done here.
1da177e4 4808 */
48c5ccae 4809 rq->stop = NULL;
48f24c4d 4810
dd41f596 4811 for ( ; ; ) {
48c5ccae
PZ
4812 /*
4813 * There's this thread running, bail when that's the only
4814 * remaining thread.
4815 */
4816 if (rq->nr_running == 1)
dd41f596 4817 break;
48c5ccae 4818
b67802ea 4819 next = pick_next_task(rq);
48c5ccae 4820 BUG_ON(!next);
79c53799 4821 next->sched_class->put_prev_task(rq, next);
e692ab53 4822
48c5ccae
PZ
4823 /* Find suitable destination for @next, with force if needed. */
4824 dest_cpu = select_fallback_rq(dead_cpu, next);
4825 raw_spin_unlock(&rq->lock);
4826
4827 __migrate_task(next, dead_cpu, dest_cpu);
4828
4829 raw_spin_lock(&rq->lock);
1da177e4 4830 }
dce48a84 4831
48c5ccae 4832 rq->stop = stop;
dce48a84 4833}
48c5ccae 4834
1da177e4
LT
4835#endif /* CONFIG_HOTPLUG_CPU */
4836
e692ab53
NP
4837#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4838
4839static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
4840 {
4841 .procname = "sched_domain",
c57baf1e 4842 .mode = 0555,
e0361851 4843 },
56992309 4844 {}
e692ab53
NP
4845};
4846
4847static struct ctl_table sd_ctl_root[] = {
e0361851
AD
4848 {
4849 .procname = "kernel",
c57baf1e 4850 .mode = 0555,
e0361851
AD
4851 .child = sd_ctl_dir,
4852 },
56992309 4853 {}
e692ab53
NP
4854};
4855
4856static struct ctl_table *sd_alloc_ctl_entry(int n)
4857{
4858 struct ctl_table *entry =
5cf9f062 4859 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 4860
e692ab53
NP
4861 return entry;
4862}
4863
6382bc90
MM
4864static void sd_free_ctl_entry(struct ctl_table **tablep)
4865{
cd790076 4866 struct ctl_table *entry;
6382bc90 4867
cd790076
MM
4868 /*
4869 * In the intermediate directories, both the child directory and
4870 * procname are dynamically allocated and could fail but the mode
41a2d6cf 4871 * will always be set. In the lowest directory the names are
cd790076
MM
4872 * static strings and all have proc handlers.
4873 */
4874 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
4875 if (entry->child)
4876 sd_free_ctl_entry(&entry->child);
cd790076
MM
4877 if (entry->proc_handler == NULL)
4878 kfree(entry->procname);
4879 }
6382bc90
MM
4880
4881 kfree(*tablep);
4882 *tablep = NULL;
4883}
4884
201c373e
NK
4885static int min_load_idx = 0;
4886static int max_load_idx = CPU_LOAD_IDX_MAX;
4887
e692ab53 4888static void
e0361851 4889set_table_entry(struct ctl_table *entry,
e692ab53 4890 const char *procname, void *data, int maxlen,
201c373e
NK
4891 umode_t mode, proc_handler *proc_handler,
4892 bool load_idx)
e692ab53 4893{
e692ab53
NP
4894 entry->procname = procname;
4895 entry->data = data;
4896 entry->maxlen = maxlen;
4897 entry->mode = mode;
4898 entry->proc_handler = proc_handler;
201c373e
NK
4899
4900 if (load_idx) {
4901 entry->extra1 = &min_load_idx;
4902 entry->extra2 = &max_load_idx;
4903 }
e692ab53
NP
4904}
4905
4906static struct ctl_table *
4907sd_alloc_ctl_domain_table(struct sched_domain *sd)
4908{
a5d8c348 4909 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 4910
ad1cdc1d
MM
4911 if (table == NULL)
4912 return NULL;
4913
e0361851 4914 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 4915 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 4916 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 4917 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 4918 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 4919 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4920 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 4921 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4922 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 4923 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4924 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 4925 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4926 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 4927 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 4928 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 4929 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 4930 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 4931 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 4932 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 4933 &sd->cache_nice_tries,
201c373e 4934 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 4935 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 4936 sizeof(int), 0644, proc_dointvec_minmax, false);
a5d8c348 4937 set_table_entry(&table[11], "name", sd->name,
201c373e 4938 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
a5d8c348 4939 /* &table[12] is terminator */
e692ab53
NP
4940
4941 return table;
4942}
4943
9a4e7159 4944static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
4945{
4946 struct ctl_table *entry, *table;
4947 struct sched_domain *sd;
4948 int domain_num = 0, i;
4949 char buf[32];
4950
4951 for_each_domain(cpu, sd)
4952 domain_num++;
4953 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
4954 if (table == NULL)
4955 return NULL;
e692ab53
NP
4956
4957 i = 0;
4958 for_each_domain(cpu, sd) {
4959 snprintf(buf, 32, "domain%d", i);
e692ab53 4960 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 4961 entry->mode = 0555;
e692ab53
NP
4962 entry->child = sd_alloc_ctl_domain_table(sd);
4963 entry++;
4964 i++;
4965 }
4966 return table;
4967}
4968
4969static struct ctl_table_header *sd_sysctl_header;
6382bc90 4970static void register_sched_domain_sysctl(void)
e692ab53 4971{
6ad4c188 4972 int i, cpu_num = num_possible_cpus();
e692ab53
NP
4973 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4974 char buf[32];
4975
7378547f
MM
4976 WARN_ON(sd_ctl_dir[0].child);
4977 sd_ctl_dir[0].child = entry;
4978
ad1cdc1d
MM
4979 if (entry == NULL)
4980 return;
4981
6ad4c188 4982 for_each_possible_cpu(i) {
e692ab53 4983 snprintf(buf, 32, "cpu%d", i);
e692ab53 4984 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 4985 entry->mode = 0555;
e692ab53 4986 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 4987 entry++;
e692ab53 4988 }
7378547f
MM
4989
4990 WARN_ON(sd_sysctl_header);
e692ab53
NP
4991 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4992}
6382bc90 4993
7378547f 4994/* may be called multiple times per register */
6382bc90
MM
4995static void unregister_sched_domain_sysctl(void)
4996{
7378547f
MM
4997 if (sd_sysctl_header)
4998 unregister_sysctl_table(sd_sysctl_header);
6382bc90 4999 sd_sysctl_header = NULL;
7378547f
MM
5000 if (sd_ctl_dir[0].child)
5001 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5002}
e692ab53 5003#else
6382bc90
MM
5004static void register_sched_domain_sysctl(void)
5005{
5006}
5007static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5008{
5009}
5010#endif
5011
1f11eb6a
GH
5012static void set_rq_online(struct rq *rq)
5013{
5014 if (!rq->online) {
5015 const struct sched_class *class;
5016
c6c4927b 5017 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5018 rq->online = 1;
5019
5020 for_each_class(class) {
5021 if (class->rq_online)
5022 class->rq_online(rq);
5023 }
5024 }
5025}
5026
5027static void set_rq_offline(struct rq *rq)
5028{
5029 if (rq->online) {
5030 const struct sched_class *class;
5031
5032 for_each_class(class) {
5033 if (class->rq_offline)
5034 class->rq_offline(rq);
5035 }
5036
c6c4927b 5037 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5038 rq->online = 0;
5039 }
5040}
5041
1da177e4
LT
5042/*
5043 * migration_call - callback that gets triggered when a CPU is added.
5044 * Here we can start up the necessary migration thread for the new CPU.
5045 */
48f24c4d
IM
5046static int __cpuinit
5047migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5048{
48f24c4d 5049 int cpu = (long)hcpu;
1da177e4 5050 unsigned long flags;
969c7921 5051 struct rq *rq = cpu_rq(cpu);
1da177e4 5052
48c5ccae 5053 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5054
1da177e4 5055 case CPU_UP_PREPARE:
a468d389 5056 rq->calc_load_update = calc_load_update;
1da177e4 5057 break;
48f24c4d 5058
1da177e4 5059 case CPU_ONLINE:
1f94ef59 5060 /* Update our root-domain */
05fa785c 5061 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5062 if (rq->rd) {
c6c4927b 5063 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5064
5065 set_rq_online(rq);
1f94ef59 5066 }
05fa785c 5067 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5068 break;
48f24c4d 5069
1da177e4 5070#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5071 case CPU_DYING:
317f3941 5072 sched_ttwu_pending();
57d885fe 5073 /* Update our root-domain */
05fa785c 5074 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5075 if (rq->rd) {
c6c4927b 5076 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5077 set_rq_offline(rq);
57d885fe 5078 }
48c5ccae
PZ
5079 migrate_tasks(cpu);
5080 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5081 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 5082 break;
48c5ccae 5083
5d180232 5084 case CPU_DEAD:
f319da0c 5085 calc_load_migrate(rq);
57d885fe 5086 break;
1da177e4
LT
5087#endif
5088 }
49c022e6
PZ
5089
5090 update_max_interval();
5091
1da177e4
LT
5092 return NOTIFY_OK;
5093}
5094
f38b0820
PM
5095/*
5096 * Register at high priority so that task migration (migrate_all_tasks)
5097 * happens before everything else. This has to be lower priority than
cdd6c482 5098 * the notifier in the perf_event subsystem, though.
1da177e4 5099 */
26c2143b 5100static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 5101 .notifier_call = migration_call,
50a323b7 5102 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5103};
5104
3a101d05
TH
5105static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5106 unsigned long action, void *hcpu)
5107{
5108 switch (action & ~CPU_TASKS_FROZEN) {
5fbd036b 5109 case CPU_STARTING:
3a101d05
TH
5110 case CPU_DOWN_FAILED:
5111 set_cpu_active((long)hcpu, true);
5112 return NOTIFY_OK;
5113 default:
5114 return NOTIFY_DONE;
5115 }
5116}
5117
5118static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5119 unsigned long action, void *hcpu)
5120{
5121 switch (action & ~CPU_TASKS_FROZEN) {
5122 case CPU_DOWN_PREPARE:
5123 set_cpu_active((long)hcpu, false);
5124 return NOTIFY_OK;
5125 default:
5126 return NOTIFY_DONE;
5127 }
5128}
5129
7babe8db 5130static int __init migration_init(void)
1da177e4
LT
5131{
5132 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5133 int err;
48f24c4d 5134
3a101d05 5135 /* Initialize migration for the boot CPU */
07dccf33
AM
5136 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5137 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5138 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5139 register_cpu_notifier(&migration_notifier);
7babe8db 5140
3a101d05
TH
5141 /* Register cpu active notifiers */
5142 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5143 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5144
a004cd42 5145 return 0;
1da177e4 5146}
7babe8db 5147early_initcall(migration_init);
1da177e4
LT
5148#endif
5149
5150#ifdef CONFIG_SMP
476f3534 5151
4cb98839
PZ
5152static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5153
3e9830dc 5154#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5155
d039ac60 5156static __read_mostly int sched_debug_enabled;
f6630114 5157
d039ac60 5158static int __init sched_debug_setup(char *str)
f6630114 5159{
d039ac60 5160 sched_debug_enabled = 1;
f6630114
MT
5161
5162 return 0;
5163}
d039ac60
PZ
5164early_param("sched_debug", sched_debug_setup);
5165
5166static inline bool sched_debug(void)
5167{
5168 return sched_debug_enabled;
5169}
f6630114 5170
7c16ec58 5171static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5172 struct cpumask *groupmask)
1da177e4 5173{
4dcf6aff 5174 struct sched_group *group = sd->groups;
434d53b0 5175 char str[256];
1da177e4 5176
968ea6d8 5177 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 5178 cpumask_clear(groupmask);
4dcf6aff
IM
5179
5180 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5181
5182 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5183 printk("does not load-balance\n");
4dcf6aff 5184 if (sd->parent)
3df0fc5b
PZ
5185 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5186 " has parent");
4dcf6aff 5187 return -1;
41c7ce9a
NP
5188 }
5189
3df0fc5b 5190 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 5191
758b2cdc 5192 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5193 printk(KERN_ERR "ERROR: domain->span does not contain "
5194 "CPU%d\n", cpu);
4dcf6aff 5195 }
758b2cdc 5196 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5197 printk(KERN_ERR "ERROR: domain->groups does not contain"
5198 " CPU%d\n", cpu);
4dcf6aff 5199 }
1da177e4 5200
4dcf6aff 5201 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5202 do {
4dcf6aff 5203 if (!group) {
3df0fc5b
PZ
5204 printk("\n");
5205 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5206 break;
5207 }
5208
c3decf0d
PZ
5209 /*
5210 * Even though we initialize ->power to something semi-sane,
5211 * we leave power_orig unset. This allows us to detect if
5212 * domain iteration is still funny without causing /0 traps.
5213 */
5214 if (!group->sgp->power_orig) {
3df0fc5b
PZ
5215 printk(KERN_CONT "\n");
5216 printk(KERN_ERR "ERROR: domain->cpu_power not "
5217 "set\n");
4dcf6aff
IM
5218 break;
5219 }
1da177e4 5220
758b2cdc 5221 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5222 printk(KERN_CONT "\n");
5223 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5224 break;
5225 }
1da177e4 5226
cb83b629
PZ
5227 if (!(sd->flags & SD_OVERLAP) &&
5228 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5229 printk(KERN_CONT "\n");
5230 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5231 break;
5232 }
1da177e4 5233
758b2cdc 5234 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5235
968ea6d8 5236 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 5237
3df0fc5b 5238 printk(KERN_CONT " %s", str);
9c3f75cb 5239 if (group->sgp->power != SCHED_POWER_SCALE) {
3df0fc5b 5240 printk(KERN_CONT " (cpu_power = %d)",
9c3f75cb 5241 group->sgp->power);
381512cf 5242 }
1da177e4 5243
4dcf6aff
IM
5244 group = group->next;
5245 } while (group != sd->groups);
3df0fc5b 5246 printk(KERN_CONT "\n");
1da177e4 5247
758b2cdc 5248 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5249 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5250
758b2cdc
RR
5251 if (sd->parent &&
5252 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5253 printk(KERN_ERR "ERROR: parent span is not a superset "
5254 "of domain->span\n");
4dcf6aff
IM
5255 return 0;
5256}
1da177e4 5257
4dcf6aff
IM
5258static void sched_domain_debug(struct sched_domain *sd, int cpu)
5259{
5260 int level = 0;
1da177e4 5261
d039ac60 5262 if (!sched_debug_enabled)
f6630114
MT
5263 return;
5264
4dcf6aff
IM
5265 if (!sd) {
5266 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5267 return;
5268 }
1da177e4 5269
4dcf6aff
IM
5270 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5271
5272 for (;;) {
4cb98839 5273 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5274 break;
1da177e4
LT
5275 level++;
5276 sd = sd->parent;
33859f7f 5277 if (!sd)
4dcf6aff
IM
5278 break;
5279 }
1da177e4 5280}
6d6bc0ad 5281#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5282# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5283static inline bool sched_debug(void)
5284{
5285 return false;
5286}
6d6bc0ad 5287#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5288
1a20ff27 5289static int sd_degenerate(struct sched_domain *sd)
245af2c7 5290{
758b2cdc 5291 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5292 return 1;
5293
5294 /* Following flags need at least 2 groups */
5295 if (sd->flags & (SD_LOAD_BALANCE |
5296 SD_BALANCE_NEWIDLE |
5297 SD_BALANCE_FORK |
89c4710e
SS
5298 SD_BALANCE_EXEC |
5299 SD_SHARE_CPUPOWER |
5300 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
5301 if (sd->groups != sd->groups->next)
5302 return 0;
5303 }
5304
5305 /* Following flags don't use groups */
c88d5910 5306 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5307 return 0;
5308
5309 return 1;
5310}
5311
48f24c4d
IM
5312static int
5313sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5314{
5315 unsigned long cflags = sd->flags, pflags = parent->flags;
5316
5317 if (sd_degenerate(parent))
5318 return 1;
5319
758b2cdc 5320 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5321 return 0;
5322
245af2c7
SS
5323 /* Flags needing groups don't count if only 1 group in parent */
5324 if (parent->groups == parent->groups->next) {
5325 pflags &= ~(SD_LOAD_BALANCE |
5326 SD_BALANCE_NEWIDLE |
5327 SD_BALANCE_FORK |
89c4710e
SS
5328 SD_BALANCE_EXEC |
5329 SD_SHARE_CPUPOWER |
5330 SD_SHARE_PKG_RESOURCES);
5436499e
KC
5331 if (nr_node_ids == 1)
5332 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5333 }
5334 if (~cflags & pflags)
5335 return 0;
5336
5337 return 1;
5338}
5339
dce840a0 5340static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5341{
dce840a0 5342 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5343
68e74568 5344 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
5345 free_cpumask_var(rd->rto_mask);
5346 free_cpumask_var(rd->online);
5347 free_cpumask_var(rd->span);
5348 kfree(rd);
5349}
5350
57d885fe
GH
5351static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5352{
a0490fa3 5353 struct root_domain *old_rd = NULL;
57d885fe 5354 unsigned long flags;
57d885fe 5355
05fa785c 5356 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5357
5358 if (rq->rd) {
a0490fa3 5359 old_rd = rq->rd;
57d885fe 5360
c6c4927b 5361 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5362 set_rq_offline(rq);
57d885fe 5363
c6c4927b 5364 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5365
a0490fa3
IM
5366 /*
5367 * If we dont want to free the old_rt yet then
5368 * set old_rd to NULL to skip the freeing later
5369 * in this function:
5370 */
5371 if (!atomic_dec_and_test(&old_rd->refcount))
5372 old_rd = NULL;
57d885fe
GH
5373 }
5374
5375 atomic_inc(&rd->refcount);
5376 rq->rd = rd;
5377
c6c4927b 5378 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5379 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5380 set_rq_online(rq);
57d885fe 5381
05fa785c 5382 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5383
5384 if (old_rd)
dce840a0 5385 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5386}
5387
68c38fc3 5388static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5389{
5390 memset(rd, 0, sizeof(*rd));
5391
68c38fc3 5392 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5393 goto out;
68c38fc3 5394 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5395 goto free_span;
68c38fc3 5396 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 5397 goto free_online;
6e0534f2 5398
68c38fc3 5399 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5400 goto free_rto_mask;
c6c4927b 5401 return 0;
6e0534f2 5402
68e74568
RR
5403free_rto_mask:
5404 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
5405free_online:
5406 free_cpumask_var(rd->online);
5407free_span:
5408 free_cpumask_var(rd->span);
0c910d28 5409out:
c6c4927b 5410 return -ENOMEM;
57d885fe
GH
5411}
5412
029632fb
PZ
5413/*
5414 * By default the system creates a single root-domain with all cpus as
5415 * members (mimicking the global state we have today).
5416 */
5417struct root_domain def_root_domain;
5418
57d885fe
GH
5419static void init_defrootdomain(void)
5420{
68c38fc3 5421 init_rootdomain(&def_root_domain);
c6c4927b 5422
57d885fe
GH
5423 atomic_set(&def_root_domain.refcount, 1);
5424}
5425
dc938520 5426static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5427{
5428 struct root_domain *rd;
5429
5430 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5431 if (!rd)
5432 return NULL;
5433
68c38fc3 5434 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5435 kfree(rd);
5436 return NULL;
5437 }
57d885fe
GH
5438
5439 return rd;
5440}
5441
e3589f6c
PZ
5442static void free_sched_groups(struct sched_group *sg, int free_sgp)
5443{
5444 struct sched_group *tmp, *first;
5445
5446 if (!sg)
5447 return;
5448
5449 first = sg;
5450 do {
5451 tmp = sg->next;
5452
5453 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5454 kfree(sg->sgp);
5455
5456 kfree(sg);
5457 sg = tmp;
5458 } while (sg != first);
5459}
5460
dce840a0
PZ
5461static void free_sched_domain(struct rcu_head *rcu)
5462{
5463 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5464
5465 /*
5466 * If its an overlapping domain it has private groups, iterate and
5467 * nuke them all.
5468 */
5469 if (sd->flags & SD_OVERLAP) {
5470 free_sched_groups(sd->groups, 1);
5471 } else if (atomic_dec_and_test(&sd->groups->ref)) {
9c3f75cb 5472 kfree(sd->groups->sgp);
dce840a0 5473 kfree(sd->groups);
9c3f75cb 5474 }
dce840a0
PZ
5475 kfree(sd);
5476}
5477
5478static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5479{
5480 call_rcu(&sd->rcu, free_sched_domain);
5481}
5482
5483static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5484{
5485 for (; sd; sd = sd->parent)
5486 destroy_sched_domain(sd, cpu);
5487}
5488
518cd623
PZ
5489/*
5490 * Keep a special pointer to the highest sched_domain that has
5491 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5492 * allows us to avoid some pointer chasing select_idle_sibling().
5493 *
5494 * Also keep a unique ID per domain (we use the first cpu number in
5495 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5496 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5497 */
5498DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5499DEFINE_PER_CPU(int, sd_llc_id);
5500
5501static void update_top_cache_domain(int cpu)
5502{
5503 struct sched_domain *sd;
5504 int id = cpu;
5505
5506 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
37407ea7 5507 if (sd)
518cd623
PZ
5508 id = cpumask_first(sched_domain_span(sd));
5509
5510 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5511 per_cpu(sd_llc_id, cpu) = id;
5512}
5513
1da177e4 5514/*
0eab9146 5515 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5516 * hold the hotplug lock.
5517 */
0eab9146
IM
5518static void
5519cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5520{
70b97a7f 5521 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5522 struct sched_domain *tmp;
5523
5524 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5525 for (tmp = sd; tmp; ) {
245af2c7
SS
5526 struct sched_domain *parent = tmp->parent;
5527 if (!parent)
5528 break;
f29c9b1c 5529
1a848870 5530 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5531 tmp->parent = parent->parent;
1a848870
SS
5532 if (parent->parent)
5533 parent->parent->child = tmp;
dce840a0 5534 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5535 } else
5536 tmp = tmp->parent;
245af2c7
SS
5537 }
5538
1a848870 5539 if (sd && sd_degenerate(sd)) {
dce840a0 5540 tmp = sd;
245af2c7 5541 sd = sd->parent;
dce840a0 5542 destroy_sched_domain(tmp, cpu);
1a848870
SS
5543 if (sd)
5544 sd->child = NULL;
5545 }
1da177e4 5546
4cb98839 5547 sched_domain_debug(sd, cpu);
1da177e4 5548
57d885fe 5549 rq_attach_root(rq, rd);
dce840a0 5550 tmp = rq->sd;
674311d5 5551 rcu_assign_pointer(rq->sd, sd);
dce840a0 5552 destroy_sched_domains(tmp, cpu);
518cd623
PZ
5553
5554 update_top_cache_domain(cpu);
1da177e4
LT
5555}
5556
5557/* cpus with isolated domains */
dcc30a35 5558static cpumask_var_t cpu_isolated_map;
1da177e4
LT
5559
5560/* Setup the mask of cpus configured for isolated domains */
5561static int __init isolated_cpu_setup(char *str)
5562{
bdddd296 5563 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 5564 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
5565 return 1;
5566}
5567
8927f494 5568__setup("isolcpus=", isolated_cpu_setup);
1da177e4 5569
d3081f52
PZ
5570static const struct cpumask *cpu_cpu_mask(int cpu)
5571{
5572 return cpumask_of_node(cpu_to_node(cpu));
5573}
5574
dce840a0
PZ
5575struct sd_data {
5576 struct sched_domain **__percpu sd;
5577 struct sched_group **__percpu sg;
9c3f75cb 5578 struct sched_group_power **__percpu sgp;
dce840a0
PZ
5579};
5580
49a02c51 5581struct s_data {
21d42ccf 5582 struct sched_domain ** __percpu sd;
49a02c51
AH
5583 struct root_domain *rd;
5584};
5585
2109b99e 5586enum s_alloc {
2109b99e 5587 sa_rootdomain,
21d42ccf 5588 sa_sd,
dce840a0 5589 sa_sd_storage,
2109b99e
AH
5590 sa_none,
5591};
5592
54ab4ff4
PZ
5593struct sched_domain_topology_level;
5594
5595typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
5596typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5597
e3589f6c
PZ
5598#define SDTL_OVERLAP 0x01
5599
eb7a74e6 5600struct sched_domain_topology_level {
2c402dc3
PZ
5601 sched_domain_init_f init;
5602 sched_domain_mask_f mask;
e3589f6c 5603 int flags;
cb83b629 5604 int numa_level;
54ab4ff4 5605 struct sd_data data;
eb7a74e6
PZ
5606};
5607
c1174876
PZ
5608/*
5609 * Build an iteration mask that can exclude certain CPUs from the upwards
5610 * domain traversal.
5611 *
5612 * Asymmetric node setups can result in situations where the domain tree is of
5613 * unequal depth, make sure to skip domains that already cover the entire
5614 * range.
5615 *
5616 * In that case build_sched_domains() will have terminated the iteration early
5617 * and our sibling sd spans will be empty. Domains should always include the
5618 * cpu they're built on, so check that.
5619 *
5620 */
5621static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5622{
5623 const struct cpumask *span = sched_domain_span(sd);
5624 struct sd_data *sdd = sd->private;
5625 struct sched_domain *sibling;
5626 int i;
5627
5628 for_each_cpu(i, span) {
5629 sibling = *per_cpu_ptr(sdd->sd, i);
5630 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5631 continue;
5632
5633 cpumask_set_cpu(i, sched_group_mask(sg));
5634 }
5635}
5636
5637/*
5638 * Return the canonical balance cpu for this group, this is the first cpu
5639 * of this group that's also in the iteration mask.
5640 */
5641int group_balance_cpu(struct sched_group *sg)
5642{
5643 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5644}
5645
e3589f6c
PZ
5646static int
5647build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5648{
5649 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5650 const struct cpumask *span = sched_domain_span(sd);
5651 struct cpumask *covered = sched_domains_tmpmask;
5652 struct sd_data *sdd = sd->private;
5653 struct sched_domain *child;
5654 int i;
5655
5656 cpumask_clear(covered);
5657
5658 for_each_cpu(i, span) {
5659 struct cpumask *sg_span;
5660
5661 if (cpumask_test_cpu(i, covered))
5662 continue;
5663
c1174876
PZ
5664 child = *per_cpu_ptr(sdd->sd, i);
5665
5666 /* See the comment near build_group_mask(). */
5667 if (!cpumask_test_cpu(i, sched_domain_span(child)))
5668 continue;
5669
e3589f6c 5670 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 5671 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
5672
5673 if (!sg)
5674 goto fail;
5675
5676 sg_span = sched_group_cpus(sg);
e3589f6c
PZ
5677 if (child->child) {
5678 child = child->child;
5679 cpumask_copy(sg_span, sched_domain_span(child));
5680 } else
5681 cpumask_set_cpu(i, sg_span);
5682
5683 cpumask_or(covered, covered, sg_span);
5684
74a5ce20 5685 sg->sgp = *per_cpu_ptr(sdd->sgp, i);
c1174876
PZ
5686 if (atomic_inc_return(&sg->sgp->ref) == 1)
5687 build_group_mask(sd, sg);
5688
c3decf0d
PZ
5689 /*
5690 * Initialize sgp->power such that even if we mess up the
5691 * domains and no possible iteration will get us here, we won't
5692 * die on a /0 trap.
5693 */
5694 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
e3589f6c 5695
c1174876
PZ
5696 /*
5697 * Make sure the first group of this domain contains the
5698 * canonical balance cpu. Otherwise the sched_domain iteration
5699 * breaks. See update_sg_lb_stats().
5700 */
74a5ce20 5701 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 5702 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
5703 groups = sg;
5704
5705 if (!first)
5706 first = sg;
5707 if (last)
5708 last->next = sg;
5709 last = sg;
5710 last->next = first;
5711 }
5712 sd->groups = groups;
5713
5714 return 0;
5715
5716fail:
5717 free_sched_groups(first, 0);
5718
5719 return -ENOMEM;
5720}
5721
dce840a0 5722static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 5723{
dce840a0
PZ
5724 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5725 struct sched_domain *child = sd->child;
1da177e4 5726
dce840a0
PZ
5727 if (child)
5728 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 5729
9c3f75cb 5730 if (sg) {
dce840a0 5731 *sg = *per_cpu_ptr(sdd->sg, cpu);
9c3f75cb 5732 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
e3589f6c 5733 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
9c3f75cb 5734 }
dce840a0
PZ
5735
5736 return cpu;
1e9f28fa 5737}
1e9f28fa 5738
01a08546 5739/*
dce840a0
PZ
5740 * build_sched_groups will build a circular linked list of the groups
5741 * covered by the given span, and will set each group's ->cpumask correctly,
5742 * and ->cpu_power to 0.
e3589f6c
PZ
5743 *
5744 * Assumes the sched_domain tree is fully constructed
01a08546 5745 */
e3589f6c
PZ
5746static int
5747build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 5748{
dce840a0
PZ
5749 struct sched_group *first = NULL, *last = NULL;
5750 struct sd_data *sdd = sd->private;
5751 const struct cpumask *span = sched_domain_span(sd);
f96225fd 5752 struct cpumask *covered;
dce840a0 5753 int i;
9c1cfda2 5754
e3589f6c
PZ
5755 get_group(cpu, sdd, &sd->groups);
5756 atomic_inc(&sd->groups->ref);
5757
5758 if (cpu != cpumask_first(sched_domain_span(sd)))
5759 return 0;
5760
f96225fd
PZ
5761 lockdep_assert_held(&sched_domains_mutex);
5762 covered = sched_domains_tmpmask;
5763
dce840a0 5764 cpumask_clear(covered);
6711cab4 5765
dce840a0
PZ
5766 for_each_cpu(i, span) {
5767 struct sched_group *sg;
5768 int group = get_group(i, sdd, &sg);
5769 int j;
6711cab4 5770
dce840a0
PZ
5771 if (cpumask_test_cpu(i, covered))
5772 continue;
6711cab4 5773
dce840a0 5774 cpumask_clear(sched_group_cpus(sg));
9c3f75cb 5775 sg->sgp->power = 0;
c1174876 5776 cpumask_setall(sched_group_mask(sg));
0601a88d 5777
dce840a0
PZ
5778 for_each_cpu(j, span) {
5779 if (get_group(j, sdd, NULL) != group)
5780 continue;
0601a88d 5781
dce840a0
PZ
5782 cpumask_set_cpu(j, covered);
5783 cpumask_set_cpu(j, sched_group_cpus(sg));
5784 }
0601a88d 5785
dce840a0
PZ
5786 if (!first)
5787 first = sg;
5788 if (last)
5789 last->next = sg;
5790 last = sg;
5791 }
5792 last->next = first;
e3589f6c
PZ
5793
5794 return 0;
0601a88d 5795}
51888ca2 5796
89c4710e
SS
5797/*
5798 * Initialize sched groups cpu_power.
5799 *
5800 * cpu_power indicates the capacity of sched group, which is used while
5801 * distributing the load between different sched groups in a sched domain.
5802 * Typically cpu_power for all the groups in a sched domain will be same unless
5803 * there are asymmetries in the topology. If there are asymmetries, group
5804 * having more cpu_power will pickup more load compared to the group having
5805 * less cpu_power.
89c4710e
SS
5806 */
5807static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5808{
e3589f6c 5809 struct sched_group *sg = sd->groups;
89c4710e 5810
e3589f6c
PZ
5811 WARN_ON(!sd || !sg);
5812
5813 do {
5814 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5815 sg = sg->next;
5816 } while (sg != sd->groups);
89c4710e 5817
c1174876 5818 if (cpu != group_balance_cpu(sg))
e3589f6c 5819 return;
aae6d3dd 5820
d274cb30 5821 update_group_power(sd, cpu);
69e1e811 5822 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
89c4710e
SS
5823}
5824
029632fb
PZ
5825int __weak arch_sd_sibling_asym_packing(void)
5826{
5827 return 0*SD_ASYM_PACKING;
89c4710e
SS
5828}
5829
7c16ec58
MT
5830/*
5831 * Initializers for schedule domains
5832 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5833 */
5834
a5d8c348
IM
5835#ifdef CONFIG_SCHED_DEBUG
5836# define SD_INIT_NAME(sd, type) sd->name = #type
5837#else
5838# define SD_INIT_NAME(sd, type) do { } while (0)
5839#endif
5840
54ab4ff4
PZ
5841#define SD_INIT_FUNC(type) \
5842static noinline struct sched_domain * \
5843sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
5844{ \
5845 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
5846 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
5847 SD_INIT_NAME(sd, type); \
5848 sd->private = &tl->data; \
5849 return sd; \
7c16ec58
MT
5850}
5851
5852SD_INIT_FUNC(CPU)
7c16ec58
MT
5853#ifdef CONFIG_SCHED_SMT
5854 SD_INIT_FUNC(SIBLING)
5855#endif
5856#ifdef CONFIG_SCHED_MC
5857 SD_INIT_FUNC(MC)
5858#endif
01a08546
HC
5859#ifdef CONFIG_SCHED_BOOK
5860 SD_INIT_FUNC(BOOK)
5861#endif
7c16ec58 5862
1d3504fc 5863static int default_relax_domain_level = -1;
60495e77 5864int sched_domain_level_max;
1d3504fc
HS
5865
5866static int __init setup_relax_domain_level(char *str)
5867{
a841f8ce
DS
5868 if (kstrtoint(str, 0, &default_relax_domain_level))
5869 pr_warn("Unable to set relax_domain_level\n");
30e0e178 5870
1d3504fc
HS
5871 return 1;
5872}
5873__setup("relax_domain_level=", setup_relax_domain_level);
5874
5875static void set_domain_attribute(struct sched_domain *sd,
5876 struct sched_domain_attr *attr)
5877{
5878 int request;
5879
5880 if (!attr || attr->relax_domain_level < 0) {
5881 if (default_relax_domain_level < 0)
5882 return;
5883 else
5884 request = default_relax_domain_level;
5885 } else
5886 request = attr->relax_domain_level;
5887 if (request < sd->level) {
5888 /* turn off idle balance on this domain */
c88d5910 5889 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
5890 } else {
5891 /* turn on idle balance on this domain */
c88d5910 5892 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
5893 }
5894}
5895
54ab4ff4
PZ
5896static void __sdt_free(const struct cpumask *cpu_map);
5897static int __sdt_alloc(const struct cpumask *cpu_map);
5898
2109b99e
AH
5899static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
5900 const struct cpumask *cpu_map)
5901{
5902 switch (what) {
2109b99e 5903 case sa_rootdomain:
822ff793
PZ
5904 if (!atomic_read(&d->rd->refcount))
5905 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
5906 case sa_sd:
5907 free_percpu(d->sd); /* fall through */
dce840a0 5908 case sa_sd_storage:
54ab4ff4 5909 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
5910 case sa_none:
5911 break;
5912 }
5913}
3404c8d9 5914
2109b99e
AH
5915static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
5916 const struct cpumask *cpu_map)
5917{
dce840a0
PZ
5918 memset(d, 0, sizeof(*d));
5919
54ab4ff4
PZ
5920 if (__sdt_alloc(cpu_map))
5921 return sa_sd_storage;
dce840a0
PZ
5922 d->sd = alloc_percpu(struct sched_domain *);
5923 if (!d->sd)
5924 return sa_sd_storage;
2109b99e 5925 d->rd = alloc_rootdomain();
dce840a0 5926 if (!d->rd)
21d42ccf 5927 return sa_sd;
2109b99e
AH
5928 return sa_rootdomain;
5929}
57d885fe 5930
dce840a0
PZ
5931/*
5932 * NULL the sd_data elements we've used to build the sched_domain and
5933 * sched_group structure so that the subsequent __free_domain_allocs()
5934 * will not free the data we're using.
5935 */
5936static void claim_allocations(int cpu, struct sched_domain *sd)
5937{
5938 struct sd_data *sdd = sd->private;
dce840a0
PZ
5939
5940 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
5941 *per_cpu_ptr(sdd->sd, cpu) = NULL;
5942
e3589f6c 5943 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 5944 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c
PZ
5945
5946 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
9c3f75cb 5947 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
dce840a0
PZ
5948}
5949
2c402dc3
PZ
5950#ifdef CONFIG_SCHED_SMT
5951static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 5952{
2c402dc3 5953 return topology_thread_cpumask(cpu);
3bd65a80 5954}
2c402dc3 5955#endif
7f4588f3 5956
d069b916
PZ
5957/*
5958 * Topology list, bottom-up.
5959 */
2c402dc3 5960static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
5961#ifdef CONFIG_SCHED_SMT
5962 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 5963#endif
1e9f28fa 5964#ifdef CONFIG_SCHED_MC
2c402dc3 5965 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 5966#endif
d069b916
PZ
5967#ifdef CONFIG_SCHED_BOOK
5968 { sd_init_BOOK, cpu_book_mask, },
5969#endif
5970 { sd_init_CPU, cpu_cpu_mask, },
eb7a74e6
PZ
5971 { NULL, },
5972};
5973
5974static struct sched_domain_topology_level *sched_domain_topology = default_topology;
5975
cb83b629
PZ
5976#ifdef CONFIG_NUMA
5977
5978static int sched_domains_numa_levels;
cb83b629
PZ
5979static int *sched_domains_numa_distance;
5980static struct cpumask ***sched_domains_numa_masks;
5981static int sched_domains_curr_level;
5982
cb83b629
PZ
5983static inline int sd_local_flags(int level)
5984{
10717dcd 5985 if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
cb83b629
PZ
5986 return 0;
5987
5988 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
5989}
5990
5991static struct sched_domain *
5992sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
5993{
5994 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
5995 int level = tl->numa_level;
5996 int sd_weight = cpumask_weight(
5997 sched_domains_numa_masks[level][cpu_to_node(cpu)]);
5998
5999 *sd = (struct sched_domain){
6000 .min_interval = sd_weight,
6001 .max_interval = 2*sd_weight,
6002 .busy_factor = 32,
870a0bb5 6003 .imbalance_pct = 125,
cb83b629
PZ
6004 .cache_nice_tries = 2,
6005 .busy_idx = 3,
6006 .idle_idx = 2,
6007 .newidle_idx = 0,
6008 .wake_idx = 0,
6009 .forkexec_idx = 0,
6010
6011 .flags = 1*SD_LOAD_BALANCE
6012 | 1*SD_BALANCE_NEWIDLE
6013 | 0*SD_BALANCE_EXEC
6014 | 0*SD_BALANCE_FORK
6015 | 0*SD_BALANCE_WAKE
6016 | 0*SD_WAKE_AFFINE
cb83b629 6017 | 0*SD_SHARE_CPUPOWER
cb83b629
PZ
6018 | 0*SD_SHARE_PKG_RESOURCES
6019 | 1*SD_SERIALIZE
6020 | 0*SD_PREFER_SIBLING
6021 | sd_local_flags(level)
6022 ,
6023 .last_balance = jiffies,
6024 .balance_interval = sd_weight,
6025 };
6026 SD_INIT_NAME(sd, NUMA);
6027 sd->private = &tl->data;
6028
6029 /*
6030 * Ugly hack to pass state to sd_numa_mask()...
6031 */
6032 sched_domains_curr_level = tl->numa_level;
6033
6034 return sd;
6035}
6036
6037static const struct cpumask *sd_numa_mask(int cpu)
6038{
6039 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6040}
6041
d039ac60
PZ
6042static void sched_numa_warn(const char *str)
6043{
6044 static int done = false;
6045 int i,j;
6046
6047 if (done)
6048 return;
6049
6050 done = true;
6051
6052 printk(KERN_WARNING "ERROR: %s\n\n", str);
6053
6054 for (i = 0; i < nr_node_ids; i++) {
6055 printk(KERN_WARNING " ");
6056 for (j = 0; j < nr_node_ids; j++)
6057 printk(KERN_CONT "%02d ", node_distance(i,j));
6058 printk(KERN_CONT "\n");
6059 }
6060 printk(KERN_WARNING "\n");
6061}
6062
6063static bool find_numa_distance(int distance)
6064{
6065 int i;
6066
6067 if (distance == node_distance(0, 0))
6068 return true;
6069
6070 for (i = 0; i < sched_domains_numa_levels; i++) {
6071 if (sched_domains_numa_distance[i] == distance)
6072 return true;
6073 }
6074
6075 return false;
6076}
6077
cb83b629
PZ
6078static void sched_init_numa(void)
6079{
6080 int next_distance, curr_distance = node_distance(0, 0);
6081 struct sched_domain_topology_level *tl;
6082 int level = 0;
6083 int i, j, k;
6084
cb83b629
PZ
6085 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6086 if (!sched_domains_numa_distance)
6087 return;
6088
6089 /*
6090 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6091 * unique distances in the node_distance() table.
6092 *
6093 * Assumes node_distance(0,j) includes all distances in
6094 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6095 */
6096 next_distance = curr_distance;
6097 for (i = 0; i < nr_node_ids; i++) {
6098 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6099 for (k = 0; k < nr_node_ids; k++) {
6100 int distance = node_distance(i, k);
6101
6102 if (distance > curr_distance &&
6103 (distance < next_distance ||
6104 next_distance == curr_distance))
6105 next_distance = distance;
6106
6107 /*
6108 * While not a strong assumption it would be nice to know
6109 * about cases where if node A is connected to B, B is not
6110 * equally connected to A.
6111 */
6112 if (sched_debug() && node_distance(k, i) != distance)
6113 sched_numa_warn("Node-distance not symmetric");
6114
6115 if (sched_debug() && i && !find_numa_distance(distance))
6116 sched_numa_warn("Node-0 not representative");
6117 }
6118 if (next_distance != curr_distance) {
6119 sched_domains_numa_distance[level++] = next_distance;
6120 sched_domains_numa_levels = level;
6121 curr_distance = next_distance;
6122 } else break;
cb83b629 6123 }
d039ac60
PZ
6124
6125 /*
6126 * In case of sched_debug() we verify the above assumption.
6127 */
6128 if (!sched_debug())
6129 break;
cb83b629
PZ
6130 }
6131 /*
6132 * 'level' contains the number of unique distances, excluding the
6133 * identity distance node_distance(i,i).
6134 *
6135 * The sched_domains_nume_distance[] array includes the actual distance
6136 * numbers.
6137 */
6138
5f7865f3
TC
6139 /*
6140 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6141 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6142 * the array will contain less then 'level' members. This could be
6143 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6144 * in other functions.
6145 *
6146 * We reset it to 'level' at the end of this function.
6147 */
6148 sched_domains_numa_levels = 0;
6149
cb83b629
PZ
6150 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6151 if (!sched_domains_numa_masks)
6152 return;
6153
6154 /*
6155 * Now for each level, construct a mask per node which contains all
6156 * cpus of nodes that are that many hops away from us.
6157 */
6158 for (i = 0; i < level; i++) {
6159 sched_domains_numa_masks[i] =
6160 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6161 if (!sched_domains_numa_masks[i])
6162 return;
6163
6164 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6165 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6166 if (!mask)
6167 return;
6168
6169 sched_domains_numa_masks[i][j] = mask;
6170
6171 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 6172 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6173 continue;
6174
6175 cpumask_or(mask, mask, cpumask_of_node(k));
6176 }
6177 }
6178 }
6179
6180 tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
6181 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6182 if (!tl)
6183 return;
6184
6185 /*
6186 * Copy the default topology bits..
6187 */
6188 for (i = 0; default_topology[i].init; i++)
6189 tl[i] = default_topology[i];
6190
6191 /*
6192 * .. and append 'j' levels of NUMA goodness.
6193 */
6194 for (j = 0; j < level; i++, j++) {
6195 tl[i] = (struct sched_domain_topology_level){
6196 .init = sd_numa_init,
6197 .mask = sd_numa_mask,
6198 .flags = SDTL_OVERLAP,
6199 .numa_level = j,
6200 };
6201 }
6202
6203 sched_domain_topology = tl;
5f7865f3
TC
6204
6205 sched_domains_numa_levels = level;
cb83b629 6206}
301a5cba
TC
6207
6208static void sched_domains_numa_masks_set(int cpu)
6209{
6210 int i, j;
6211 int node = cpu_to_node(cpu);
6212
6213 for (i = 0; i < sched_domains_numa_levels; i++) {
6214 for (j = 0; j < nr_node_ids; j++) {
6215 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6216 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6217 }
6218 }
6219}
6220
6221static void sched_domains_numa_masks_clear(int cpu)
6222{
6223 int i, j;
6224 for (i = 0; i < sched_domains_numa_levels; i++) {
6225 for (j = 0; j < nr_node_ids; j++)
6226 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6227 }
6228}
6229
6230/*
6231 * Update sched_domains_numa_masks[level][node] array when new cpus
6232 * are onlined.
6233 */
6234static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6235 unsigned long action,
6236 void *hcpu)
6237{
6238 int cpu = (long)hcpu;
6239
6240 switch (action & ~CPU_TASKS_FROZEN) {
6241 case CPU_ONLINE:
6242 sched_domains_numa_masks_set(cpu);
6243 break;
6244
6245 case CPU_DEAD:
6246 sched_domains_numa_masks_clear(cpu);
6247 break;
6248
6249 default:
6250 return NOTIFY_DONE;
6251 }
6252
6253 return NOTIFY_OK;
cb83b629
PZ
6254}
6255#else
6256static inline void sched_init_numa(void)
6257{
6258}
301a5cba
TC
6259
6260static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6261 unsigned long action,
6262 void *hcpu)
6263{
6264 return 0;
6265}
cb83b629
PZ
6266#endif /* CONFIG_NUMA */
6267
54ab4ff4
PZ
6268static int __sdt_alloc(const struct cpumask *cpu_map)
6269{
6270 struct sched_domain_topology_level *tl;
6271 int j;
6272
6273 for (tl = sched_domain_topology; tl->init; tl++) {
6274 struct sd_data *sdd = &tl->data;
6275
6276 sdd->sd = alloc_percpu(struct sched_domain *);
6277 if (!sdd->sd)
6278 return -ENOMEM;
6279
6280 sdd->sg = alloc_percpu(struct sched_group *);
6281 if (!sdd->sg)
6282 return -ENOMEM;
6283
9c3f75cb
PZ
6284 sdd->sgp = alloc_percpu(struct sched_group_power *);
6285 if (!sdd->sgp)
6286 return -ENOMEM;
6287
54ab4ff4
PZ
6288 for_each_cpu(j, cpu_map) {
6289 struct sched_domain *sd;
6290 struct sched_group *sg;
9c3f75cb 6291 struct sched_group_power *sgp;
54ab4ff4
PZ
6292
6293 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6294 GFP_KERNEL, cpu_to_node(j));
6295 if (!sd)
6296 return -ENOMEM;
6297
6298 *per_cpu_ptr(sdd->sd, j) = sd;
6299
6300 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6301 GFP_KERNEL, cpu_to_node(j));
6302 if (!sg)
6303 return -ENOMEM;
6304
30b4e9eb
IM
6305 sg->next = sg;
6306
54ab4ff4 6307 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6308
c1174876 6309 sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
9c3f75cb
PZ
6310 GFP_KERNEL, cpu_to_node(j));
6311 if (!sgp)
6312 return -ENOMEM;
6313
6314 *per_cpu_ptr(sdd->sgp, j) = sgp;
54ab4ff4
PZ
6315 }
6316 }
6317
6318 return 0;
6319}
6320
6321static void __sdt_free(const struct cpumask *cpu_map)
6322{
6323 struct sched_domain_topology_level *tl;
6324 int j;
6325
6326 for (tl = sched_domain_topology; tl->init; tl++) {
6327 struct sd_data *sdd = &tl->data;
6328
6329 for_each_cpu(j, cpu_map) {
fb2cf2c6 6330 struct sched_domain *sd;
6331
6332 if (sdd->sd) {
6333 sd = *per_cpu_ptr(sdd->sd, j);
6334 if (sd && (sd->flags & SD_OVERLAP))
6335 free_sched_groups(sd->groups, 0);
6336 kfree(*per_cpu_ptr(sdd->sd, j));
6337 }
6338
6339 if (sdd->sg)
6340 kfree(*per_cpu_ptr(sdd->sg, j));
6341 if (sdd->sgp)
6342 kfree(*per_cpu_ptr(sdd->sgp, j));
54ab4ff4
PZ
6343 }
6344 free_percpu(sdd->sd);
fb2cf2c6 6345 sdd->sd = NULL;
54ab4ff4 6346 free_percpu(sdd->sg);
fb2cf2c6 6347 sdd->sg = NULL;
9c3f75cb 6348 free_percpu(sdd->sgp);
fb2cf2c6 6349 sdd->sgp = NULL;
54ab4ff4
PZ
6350 }
6351}
6352
2c402dc3
PZ
6353struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6354 struct s_data *d, const struct cpumask *cpu_map,
d069b916 6355 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
6356 int cpu)
6357{
54ab4ff4 6358 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 6359 if (!sd)
d069b916 6360 return child;
2c402dc3 6361
2c402dc3 6362 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6363 if (child) {
6364 sd->level = child->level + 1;
6365 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6366 child->parent = sd;
60495e77 6367 }
d069b916 6368 sd->child = child;
a841f8ce 6369 set_domain_attribute(sd, attr);
2c402dc3
PZ
6370
6371 return sd;
6372}
6373
2109b99e
AH
6374/*
6375 * Build sched domains for a given set of cpus and attach the sched domains
6376 * to the individual cpus
6377 */
dce840a0
PZ
6378static int build_sched_domains(const struct cpumask *cpu_map,
6379 struct sched_domain_attr *attr)
2109b99e
AH
6380{
6381 enum s_alloc alloc_state = sa_none;
dce840a0 6382 struct sched_domain *sd;
2109b99e 6383 struct s_data d;
822ff793 6384 int i, ret = -ENOMEM;
9c1cfda2 6385
2109b99e
AH
6386 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6387 if (alloc_state != sa_rootdomain)
6388 goto error;
9c1cfda2 6389
dce840a0 6390 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6391 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6392 struct sched_domain_topology_level *tl;
6393
3bd65a80 6394 sd = NULL;
e3589f6c 6395 for (tl = sched_domain_topology; tl->init; tl++) {
2c402dc3 6396 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
e3589f6c
PZ
6397 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6398 sd->flags |= SD_OVERLAP;
d110235d
PZ
6399 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6400 break;
e3589f6c 6401 }
d274cb30 6402
d069b916
PZ
6403 while (sd->child)
6404 sd = sd->child;
6405
21d42ccf 6406 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
6407 }
6408
6409 /* Build the groups for the domains */
6410 for_each_cpu(i, cpu_map) {
6411 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6412 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6413 if (sd->flags & SD_OVERLAP) {
6414 if (build_overlap_sched_groups(sd, i))
6415 goto error;
6416 } else {
6417 if (build_sched_groups(sd, i))
6418 goto error;
6419 }
1cf51902 6420 }
a06dadbe 6421 }
9c1cfda2 6422
1da177e4 6423 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
6424 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6425 if (!cpumask_test_cpu(i, cpu_map))
6426 continue;
9c1cfda2 6427
dce840a0
PZ
6428 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6429 claim_allocations(i, sd);
cd4ea6ae 6430 init_sched_groups_power(i, sd);
dce840a0 6431 }
f712c0c7 6432 }
9c1cfda2 6433
1da177e4 6434 /* Attach the domains */
dce840a0 6435 rcu_read_lock();
abcd083a 6436 for_each_cpu(i, cpu_map) {
21d42ccf 6437 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6438 cpu_attach_domain(sd, d.rd, i);
1da177e4 6439 }
dce840a0 6440 rcu_read_unlock();
51888ca2 6441
822ff793 6442 ret = 0;
51888ca2 6443error:
2109b99e 6444 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6445 return ret;
1da177e4 6446}
029190c5 6447
acc3f5d7 6448static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6449static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6450static struct sched_domain_attr *dattr_cur;
6451 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6452
6453/*
6454 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6455 * cpumask) fails, then fallback to a single sched domain,
6456 * as determined by the single cpumask fallback_doms.
029190c5 6457 */
4212823f 6458static cpumask_var_t fallback_doms;
029190c5 6459
ee79d1bd
HC
6460/*
6461 * arch_update_cpu_topology lets virtualized architectures update the
6462 * cpu core maps. It is supposed to return 1 if the topology changed
6463 * or 0 if it stayed the same.
6464 */
6465int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 6466{
ee79d1bd 6467 return 0;
22e52b07
HC
6468}
6469
acc3f5d7
RR
6470cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6471{
6472 int i;
6473 cpumask_var_t *doms;
6474
6475 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6476 if (!doms)
6477 return NULL;
6478 for (i = 0; i < ndoms; i++) {
6479 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6480 free_sched_domains(doms, i);
6481 return NULL;
6482 }
6483 }
6484 return doms;
6485}
6486
6487void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6488{
6489 unsigned int i;
6490 for (i = 0; i < ndoms; i++)
6491 free_cpumask_var(doms[i]);
6492 kfree(doms);
6493}
6494
1a20ff27 6495/*
41a2d6cf 6496 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
6497 * For now this just excludes isolated cpus, but could be used to
6498 * exclude other special cases in the future.
1a20ff27 6499 */
c4a8849a 6500static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 6501{
7378547f
MM
6502 int err;
6503
22e52b07 6504 arch_update_cpu_topology();
029190c5 6505 ndoms_cur = 1;
acc3f5d7 6506 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 6507 if (!doms_cur)
acc3f5d7
RR
6508 doms_cur = &fallback_doms;
6509 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 6510 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 6511 register_sched_domain_sysctl();
7378547f
MM
6512
6513 return err;
1a20ff27
DG
6514}
6515
1a20ff27
DG
6516/*
6517 * Detach sched domains from a group of cpus specified in cpu_map
6518 * These cpus will now be attached to the NULL domain
6519 */
96f874e2 6520static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
6521{
6522 int i;
6523
dce840a0 6524 rcu_read_lock();
abcd083a 6525 for_each_cpu(i, cpu_map)
57d885fe 6526 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 6527 rcu_read_unlock();
1a20ff27
DG
6528}
6529
1d3504fc
HS
6530/* handle null as "default" */
6531static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6532 struct sched_domain_attr *new, int idx_new)
6533{
6534 struct sched_domain_attr tmp;
6535
6536 /* fast path */
6537 if (!new && !cur)
6538 return 1;
6539
6540 tmp = SD_ATTR_INIT;
6541 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6542 new ? (new + idx_new) : &tmp,
6543 sizeof(struct sched_domain_attr));
6544}
6545
029190c5
PJ
6546/*
6547 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 6548 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
6549 * doms_new[] to the current sched domain partitioning, doms_cur[].
6550 * It destroys each deleted domain and builds each new domain.
6551 *
acc3f5d7 6552 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
6553 * The masks don't intersect (don't overlap.) We should setup one
6554 * sched domain for each mask. CPUs not in any of the cpumasks will
6555 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
6556 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6557 * it as it is.
6558 *
acc3f5d7
RR
6559 * The passed in 'doms_new' should be allocated using
6560 * alloc_sched_domains. This routine takes ownership of it and will
6561 * free_sched_domains it when done with it. If the caller failed the
6562 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6563 * and partition_sched_domains() will fallback to the single partition
6564 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 6565 *
96f874e2 6566 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
6567 * ndoms_new == 0 is a special case for destroying existing domains,
6568 * and it will not create the default domain.
dfb512ec 6569 *
029190c5
PJ
6570 * Call with hotplug lock held
6571 */
acc3f5d7 6572void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 6573 struct sched_domain_attr *dattr_new)
029190c5 6574{
dfb512ec 6575 int i, j, n;
d65bd5ec 6576 int new_topology;
029190c5 6577
712555ee 6578 mutex_lock(&sched_domains_mutex);
a1835615 6579
7378547f
MM
6580 /* always unregister in case we don't destroy any domains */
6581 unregister_sched_domain_sysctl();
6582
d65bd5ec
HC
6583 /* Let architecture update cpu core mappings. */
6584 new_topology = arch_update_cpu_topology();
6585
dfb512ec 6586 n = doms_new ? ndoms_new : 0;
029190c5
PJ
6587
6588 /* Destroy deleted domains */
6589 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 6590 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 6591 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 6592 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
6593 goto match1;
6594 }
6595 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 6596 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
6597match1:
6598 ;
6599 }
6600
e761b772
MK
6601 if (doms_new == NULL) {
6602 ndoms_cur = 0;
acc3f5d7 6603 doms_new = &fallback_doms;
6ad4c188 6604 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 6605 WARN_ON_ONCE(dattr_new);
e761b772
MK
6606 }
6607
029190c5
PJ
6608 /* Build new domains */
6609 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 6610 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 6611 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 6612 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
6613 goto match2;
6614 }
6615 /* no match - add a new doms_new */
dce840a0 6616 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
6617match2:
6618 ;
6619 }
6620
6621 /* Remember the new sched domains */
acc3f5d7
RR
6622 if (doms_cur != &fallback_doms)
6623 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 6624 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 6625 doms_cur = doms_new;
1d3504fc 6626 dattr_cur = dattr_new;
029190c5 6627 ndoms_cur = ndoms_new;
7378547f
MM
6628
6629 register_sched_domain_sysctl();
a1835615 6630
712555ee 6631 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
6632}
6633
d35be8ba
SB
6634static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
6635
1da177e4 6636/*
3a101d05
TH
6637 * Update cpusets according to cpu_active mask. If cpusets are
6638 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6639 * around partition_sched_domains().
d35be8ba
SB
6640 *
6641 * If we come here as part of a suspend/resume, don't touch cpusets because we
6642 * want to restore it back to its original state upon resume anyway.
1da177e4 6643 */
0b2e918a
TH
6644static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6645 void *hcpu)
e761b772 6646{
d35be8ba
SB
6647 switch (action) {
6648 case CPU_ONLINE_FROZEN:
6649 case CPU_DOWN_FAILED_FROZEN:
6650
6651 /*
6652 * num_cpus_frozen tracks how many CPUs are involved in suspend
6653 * resume sequence. As long as this is not the last online
6654 * operation in the resume sequence, just build a single sched
6655 * domain, ignoring cpusets.
6656 */
6657 num_cpus_frozen--;
6658 if (likely(num_cpus_frozen)) {
6659 partition_sched_domains(1, NULL, NULL);
6660 break;
6661 }
6662
6663 /*
6664 * This is the last CPU online operation. So fall through and
6665 * restore the original sched domains by considering the
6666 * cpuset configurations.
6667 */
6668
e761b772 6669 case CPU_ONLINE:
6ad4c188 6670 case CPU_DOWN_FAILED:
7ddf96b0 6671 cpuset_update_active_cpus(true);
d35be8ba 6672 break;
3a101d05
TH
6673 default:
6674 return NOTIFY_DONE;
6675 }
d35be8ba 6676 return NOTIFY_OK;
3a101d05 6677}
e761b772 6678
0b2e918a
TH
6679static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6680 void *hcpu)
3a101d05 6681{
d35be8ba 6682 switch (action) {
3a101d05 6683 case CPU_DOWN_PREPARE:
7ddf96b0 6684 cpuset_update_active_cpus(false);
d35be8ba
SB
6685 break;
6686 case CPU_DOWN_PREPARE_FROZEN:
6687 num_cpus_frozen++;
6688 partition_sched_domains(1, NULL, NULL);
6689 break;
e761b772
MK
6690 default:
6691 return NOTIFY_DONE;
6692 }
d35be8ba 6693 return NOTIFY_OK;
e761b772 6694}
e761b772 6695
1da177e4
LT
6696void __init sched_init_smp(void)
6697{
dcc30a35
RR
6698 cpumask_var_t non_isolated_cpus;
6699
6700 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 6701 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 6702
cb83b629
PZ
6703 sched_init_numa();
6704
95402b38 6705 get_online_cpus();
712555ee 6706 mutex_lock(&sched_domains_mutex);
c4a8849a 6707 init_sched_domains(cpu_active_mask);
dcc30a35
RR
6708 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6709 if (cpumask_empty(non_isolated_cpus))
6710 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 6711 mutex_unlock(&sched_domains_mutex);
95402b38 6712 put_online_cpus();
e761b772 6713
301a5cba 6714 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
6715 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6716 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
6717
6718 /* RT runtime code needs to handle some hotplug events */
6719 hotcpu_notifier(update_runtime, 0);
6720
b328ca18 6721 init_hrtick();
5c1e1767
NP
6722
6723 /* Move init over to a non-isolated CPU */
dcc30a35 6724 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 6725 BUG();
19978ca6 6726 sched_init_granularity();
dcc30a35 6727 free_cpumask_var(non_isolated_cpus);
4212823f 6728
0e3900e6 6729 init_sched_rt_class();
1da177e4
LT
6730}
6731#else
6732void __init sched_init_smp(void)
6733{
19978ca6 6734 sched_init_granularity();
1da177e4
LT
6735}
6736#endif /* CONFIG_SMP */
6737
cd1bb94b
AB
6738const_debug unsigned int sysctl_timer_migration = 1;
6739
1da177e4
LT
6740int in_sched_functions(unsigned long addr)
6741{
1da177e4
LT
6742 return in_lock_functions(addr) ||
6743 (addr >= (unsigned long)__sched_text_start
6744 && addr < (unsigned long)__sched_text_end);
6745}
6746
029632fb
PZ
6747#ifdef CONFIG_CGROUP_SCHED
6748struct task_group root_task_group;
35cf4e50 6749LIST_HEAD(task_groups);
052f1dc7 6750#endif
6f505b16 6751
029632fb 6752DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
6f505b16 6753
1da177e4
LT
6754void __init sched_init(void)
6755{
dd41f596 6756 int i, j;
434d53b0
MT
6757 unsigned long alloc_size = 0, ptr;
6758
6759#ifdef CONFIG_FAIR_GROUP_SCHED
6760 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6761#endif
6762#ifdef CONFIG_RT_GROUP_SCHED
6763 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 6764#endif
df7c8e84 6765#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 6766 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 6767#endif
434d53b0 6768 if (alloc_size) {
36b7b6d4 6769 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
6770
6771#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 6772 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
6773 ptr += nr_cpu_ids * sizeof(void **);
6774
07e06b01 6775 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 6776 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 6777
6d6bc0ad 6778#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 6779#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6780 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
6781 ptr += nr_cpu_ids * sizeof(void **);
6782
07e06b01 6783 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
6784 ptr += nr_cpu_ids * sizeof(void **);
6785
6d6bc0ad 6786#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
6787#ifdef CONFIG_CPUMASK_OFFSTACK
6788 for_each_possible_cpu(i) {
6789 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6790 ptr += cpumask_size();
6791 }
6792#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 6793 }
dd41f596 6794
57d885fe
GH
6795#ifdef CONFIG_SMP
6796 init_defrootdomain();
6797#endif
6798
d0b27fa7
PZ
6799 init_rt_bandwidth(&def_rt_bandwidth,
6800 global_rt_period(), global_rt_runtime());
6801
6802#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6803 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 6804 global_rt_period(), global_rt_runtime());
6d6bc0ad 6805#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 6806
7c941438 6807#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
6808 list_add(&root_task_group.list, &task_groups);
6809 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 6810 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 6811 autogroup_init(&init_task);
54c707e9 6812
7c941438 6813#endif /* CONFIG_CGROUP_SCHED */
6f505b16 6814
54c707e9
GC
6815#ifdef CONFIG_CGROUP_CPUACCT
6816 root_cpuacct.cpustat = &kernel_cpustat;
6817 root_cpuacct.cpuusage = alloc_percpu(u64);
6818 /* Too early, not expected to fail */
6819 BUG_ON(!root_cpuacct.cpuusage);
6820#endif
0a945022 6821 for_each_possible_cpu(i) {
70b97a7f 6822 struct rq *rq;
1da177e4
LT
6823
6824 rq = cpu_rq(i);
05fa785c 6825 raw_spin_lock_init(&rq->lock);
7897986b 6826 rq->nr_running = 0;
dce48a84
TG
6827 rq->calc_load_active = 0;
6828 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 6829 init_cfs_rq(&rq->cfs);
6f505b16 6830 init_rt_rq(&rq->rt, rq);
dd41f596 6831#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 6832 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 6833 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 6834 /*
07e06b01 6835 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
6836 *
6837 * In case of task-groups formed thr' the cgroup filesystem, it
6838 * gets 100% of the cpu resources in the system. This overall
6839 * system cpu resource is divided among the tasks of
07e06b01 6840 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
6841 * based on each entity's (task or task-group's) weight
6842 * (se->load.weight).
6843 *
07e06b01 6844 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
6845 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6846 * then A0's share of the cpu resource is:
6847 *
0d905bca 6848 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 6849 *
07e06b01
YZ
6850 * We achieve this by letting root_task_group's tasks sit
6851 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 6852 */
ab84d31e 6853 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 6854 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
6855#endif /* CONFIG_FAIR_GROUP_SCHED */
6856
6857 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 6858#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 6859 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 6860 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 6861#endif
1da177e4 6862
dd41f596
IM
6863 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6864 rq->cpu_load[j] = 0;
fdf3e95d
VP
6865
6866 rq->last_load_update_tick = jiffies;
6867
1da177e4 6868#ifdef CONFIG_SMP
41c7ce9a 6869 rq->sd = NULL;
57d885fe 6870 rq->rd = NULL;
1399fa78 6871 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 6872 rq->post_schedule = 0;
1da177e4 6873 rq->active_balance = 0;
dd41f596 6874 rq->next_balance = jiffies;
1da177e4 6875 rq->push_cpu = 0;
0a2966b4 6876 rq->cpu = i;
1f11eb6a 6877 rq->online = 0;
eae0c9df
MG
6878 rq->idle_stamp = 0;
6879 rq->avg_idle = 2*sysctl_sched_migration_cost;
367456c7
PZ
6880
6881 INIT_LIST_HEAD(&rq->cfs_tasks);
6882
dc938520 6883 rq_attach_root(rq, &def_root_domain);
83cd4fe2 6884#ifdef CONFIG_NO_HZ
1c792db7 6885 rq->nohz_flags = 0;
83cd4fe2 6886#endif
1da177e4 6887#endif
8f4d37ec 6888 init_rq_hrtick(rq);
1da177e4 6889 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
6890 }
6891
2dd73a4f 6892 set_load_weight(&init_task);
b50f60ce 6893
e107be36
AK
6894#ifdef CONFIG_PREEMPT_NOTIFIERS
6895 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6896#endif
6897
b50f60ce 6898#ifdef CONFIG_RT_MUTEXES
732375c6 6899 plist_head_init(&init_task.pi_waiters);
b50f60ce
HC
6900#endif
6901
1da177e4
LT
6902 /*
6903 * The boot idle thread does lazy MMU switching as well:
6904 */
6905 atomic_inc(&init_mm.mm_count);
6906 enter_lazy_tlb(&init_mm, current);
6907
6908 /*
6909 * Make us the idle thread. Technically, schedule() should not be
6910 * called from this thread, however somewhere below it might be,
6911 * but because we are the idle thread, we just pick up running again
6912 * when this runqueue becomes "idle".
6913 */
6914 init_idle(current, smp_processor_id());
dce48a84
TG
6915
6916 calc_load_update = jiffies + LOAD_FREQ;
6917
dd41f596
IM
6918 /*
6919 * During early bootup we pretend to be a normal task:
6920 */
6921 current->sched_class = &fair_sched_class;
6892b75e 6922
bf4d83f6 6923#ifdef CONFIG_SMP
4cb98839 6924 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
6925 /* May be allocated at isolcpus cmdline parse time */
6926 if (cpu_isolated_map == NULL)
6927 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 6928 idle_thread_set_boot_cpu();
029632fb
PZ
6929#endif
6930 init_sched_fair_class();
6a7b3dc3 6931
6892b75e 6932 scheduler_running = 1;
1da177e4
LT
6933}
6934
d902db1e 6935#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
6936static inline int preempt_count_equals(int preempt_offset)
6937{
234da7bc 6938 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 6939
4ba8216c 6940 return (nested == preempt_offset);
e4aafea2
FW
6941}
6942
d894837f 6943void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6944{
1da177e4
LT
6945 static unsigned long prev_jiffy; /* ratelimiting */
6946
b3fbab05 6947 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
e4aafea2
FW
6948 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6949 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
6950 return;
6951 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6952 return;
6953 prev_jiffy = jiffies;
6954
3df0fc5b
PZ
6955 printk(KERN_ERR
6956 "BUG: sleeping function called from invalid context at %s:%d\n",
6957 file, line);
6958 printk(KERN_ERR
6959 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6960 in_atomic(), irqs_disabled(),
6961 current->pid, current->comm);
aef745fc
IM
6962
6963 debug_show_held_locks(current);
6964 if (irqs_disabled())
6965 print_irqtrace_events(current);
6966 dump_stack();
1da177e4
LT
6967}
6968EXPORT_SYMBOL(__might_sleep);
6969#endif
6970
6971#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
6972static void normalize_task(struct rq *rq, struct task_struct *p)
6973{
da7a735e
PZ
6974 const struct sched_class *prev_class = p->sched_class;
6975 int old_prio = p->prio;
3a5e4dc1 6976 int on_rq;
3e51f33f 6977
fd2f4419 6978 on_rq = p->on_rq;
3a5e4dc1 6979 if (on_rq)
4ca9b72b 6980 dequeue_task(rq, p, 0);
3a5e4dc1
AK
6981 __setscheduler(rq, p, SCHED_NORMAL, 0);
6982 if (on_rq) {
4ca9b72b 6983 enqueue_task(rq, p, 0);
3a5e4dc1
AK
6984 resched_task(rq->curr);
6985 }
da7a735e
PZ
6986
6987 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
6988}
6989
1da177e4
LT
6990void normalize_rt_tasks(void)
6991{
a0f98a1c 6992 struct task_struct *g, *p;
1da177e4 6993 unsigned long flags;
70b97a7f 6994 struct rq *rq;
1da177e4 6995
4cf5d77a 6996 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 6997 do_each_thread(g, p) {
178be793
IM
6998 /*
6999 * Only normalize user tasks:
7000 */
7001 if (!p->mm)
7002 continue;
7003
6cfb0d5d 7004 p->se.exec_start = 0;
6cfb0d5d 7005#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7006 p->se.statistics.wait_start = 0;
7007 p->se.statistics.sleep_start = 0;
7008 p->se.statistics.block_start = 0;
6cfb0d5d 7009#endif
dd41f596
IM
7010
7011 if (!rt_task(p)) {
7012 /*
7013 * Renice negative nice level userspace
7014 * tasks back to 0:
7015 */
7016 if (TASK_NICE(p) < 0 && p->mm)
7017 set_user_nice(p, 0);
1da177e4 7018 continue;
dd41f596 7019 }
1da177e4 7020
1d615482 7021 raw_spin_lock(&p->pi_lock);
b29739f9 7022 rq = __task_rq_lock(p);
1da177e4 7023
178be793 7024 normalize_task(rq, p);
3a5e4dc1 7025
b29739f9 7026 __task_rq_unlock(rq);
1d615482 7027 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
7028 } while_each_thread(g, p);
7029
4cf5d77a 7030 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
7031}
7032
7033#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7034
67fc4e0c 7035#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7036/*
67fc4e0c 7037 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7038 *
7039 * They can only be called when the whole system has been
7040 * stopped - every CPU needs to be quiescent, and no scheduling
7041 * activity can take place. Using them for anything else would
7042 * be a serious bug, and as a result, they aren't even visible
7043 * under any other configuration.
7044 */
7045
7046/**
7047 * curr_task - return the current task for a given cpu.
7048 * @cpu: the processor in question.
7049 *
7050 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7051 */
36c8b586 7052struct task_struct *curr_task(int cpu)
1df5c10a
LT
7053{
7054 return cpu_curr(cpu);
7055}
7056
67fc4e0c
JW
7057#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7058
7059#ifdef CONFIG_IA64
1df5c10a
LT
7060/**
7061 * set_curr_task - set the current task for a given cpu.
7062 * @cpu: the processor in question.
7063 * @p: the task pointer to set.
7064 *
7065 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7066 * are serviced on a separate stack. It allows the architecture to switch the
7067 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7068 * must be called with all CPU's synchronized, and interrupts disabled, the
7069 * and caller must save the original value of the current task (see
7070 * curr_task() above) and restore that value before reenabling interrupts and
7071 * re-starting the system.
7072 *
7073 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7074 */
36c8b586 7075void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7076{
7077 cpu_curr(cpu) = p;
7078}
7079
7080#endif
29f59db3 7081
7c941438 7082#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7083/* task_group_lock serializes the addition/removal of task groups */
7084static DEFINE_SPINLOCK(task_group_lock);
7085
bccbe08a
PZ
7086static void free_sched_group(struct task_group *tg)
7087{
7088 free_fair_sched_group(tg);
7089 free_rt_sched_group(tg);
e9aa1dd1 7090 autogroup_free(tg);
bccbe08a
PZ
7091 kfree(tg);
7092}
7093
7094/* allocate runqueue etc for a new task group */
ec7dc8ac 7095struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7096{
7097 struct task_group *tg;
7098 unsigned long flags;
bccbe08a
PZ
7099
7100 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7101 if (!tg)
7102 return ERR_PTR(-ENOMEM);
7103
ec7dc8ac 7104 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7105 goto err;
7106
ec7dc8ac 7107 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7108 goto err;
7109
8ed36996 7110 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7111 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7112
7113 WARN_ON(!parent); /* root should already exist */
7114
7115 tg->parent = parent;
f473aa5e 7116 INIT_LIST_HEAD(&tg->children);
09f2724a 7117 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7118 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 7119
9b5b7751 7120 return tg;
29f59db3
SV
7121
7122err:
6f505b16 7123 free_sched_group(tg);
29f59db3
SV
7124 return ERR_PTR(-ENOMEM);
7125}
7126
9b5b7751 7127/* rcu callback to free various structures associated with a task group */
6f505b16 7128static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7129{
29f59db3 7130 /* now it should be safe to free those cfs_rqs */
6f505b16 7131 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7132}
7133
9b5b7751 7134/* Destroy runqueue etc associated with a task group */
4cf86d77 7135void sched_destroy_group(struct task_group *tg)
29f59db3 7136{
8ed36996 7137 unsigned long flags;
9b5b7751 7138 int i;
29f59db3 7139
3d4b47b4
PZ
7140 /* end participation in shares distribution */
7141 for_each_possible_cpu(i)
bccbe08a 7142 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7143
7144 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7145 list_del_rcu(&tg->list);
f473aa5e 7146 list_del_rcu(&tg->siblings);
8ed36996 7147 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 7148
9b5b7751 7149 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 7150 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
7151}
7152
9b5b7751 7153/* change task's runqueue when it moves between groups.
3a252015
IM
7154 * The caller of this function should have put the task in its new group
7155 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7156 * reflect its new group.
9b5b7751
SV
7157 */
7158void sched_move_task(struct task_struct *tsk)
29f59db3 7159{
8323f26c 7160 struct task_group *tg;
29f59db3
SV
7161 int on_rq, running;
7162 unsigned long flags;
7163 struct rq *rq;
7164
7165 rq = task_rq_lock(tsk, &flags);
7166
051a1d1a 7167 running = task_current(rq, tsk);
fd2f4419 7168 on_rq = tsk->on_rq;
29f59db3 7169
0e1f3483 7170 if (on_rq)
29f59db3 7171 dequeue_task(rq, tsk, 0);
0e1f3483
HS
7172 if (unlikely(running))
7173 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 7174
8323f26c
PZ
7175 tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7176 lockdep_is_held(&tsk->sighand->siglock)),
7177 struct task_group, css);
7178 tg = autogroup_task_group(tsk, tg);
7179 tsk->sched_task_group = tg;
7180
810b3817 7181#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
7182 if (tsk->sched_class->task_move_group)
7183 tsk->sched_class->task_move_group(tsk, on_rq);
7184 else
810b3817 7185#endif
b2b5ce02 7186 set_task_rq(tsk, task_cpu(tsk));
810b3817 7187
0e1f3483
HS
7188 if (unlikely(running))
7189 tsk->sched_class->set_curr_task(rq);
7190 if (on_rq)
371fd7e7 7191 enqueue_task(rq, tsk, 0);
29f59db3 7192
0122ec5b 7193 task_rq_unlock(rq, tsk, &flags);
29f59db3 7194}
7c941438 7195#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7196
a790de99 7197#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
9f0c1e56
PZ
7198static unsigned long to_ratio(u64 period, u64 runtime)
7199{
7200 if (runtime == RUNTIME_INF)
9a7e0b18 7201 return 1ULL << 20;
9f0c1e56 7202
9a7e0b18 7203 return div64_u64(runtime << 20, period);
9f0c1e56 7204}
a790de99
PT
7205#endif
7206
7207#ifdef CONFIG_RT_GROUP_SCHED
7208/*
7209 * Ensure that the real time constraints are schedulable.
7210 */
7211static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7212
9a7e0b18
PZ
7213/* Must be called with tasklist_lock held */
7214static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7215{
9a7e0b18 7216 struct task_struct *g, *p;
b40b2e8e 7217
9a7e0b18 7218 do_each_thread(g, p) {
029632fb 7219 if (rt_task(p) && task_rq(p)->rt.tg == tg)
9a7e0b18
PZ
7220 return 1;
7221 } while_each_thread(g, p);
b40b2e8e 7222
9a7e0b18
PZ
7223 return 0;
7224}
b40b2e8e 7225
9a7e0b18
PZ
7226struct rt_schedulable_data {
7227 struct task_group *tg;
7228 u64 rt_period;
7229 u64 rt_runtime;
7230};
b40b2e8e 7231
a790de99 7232static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7233{
7234 struct rt_schedulable_data *d = data;
7235 struct task_group *child;
7236 unsigned long total, sum = 0;
7237 u64 period, runtime;
b40b2e8e 7238
9a7e0b18
PZ
7239 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7240 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7241
9a7e0b18
PZ
7242 if (tg == d->tg) {
7243 period = d->rt_period;
7244 runtime = d->rt_runtime;
b40b2e8e 7245 }
b40b2e8e 7246
4653f803
PZ
7247 /*
7248 * Cannot have more runtime than the period.
7249 */
7250 if (runtime > period && runtime != RUNTIME_INF)
7251 return -EINVAL;
6f505b16 7252
4653f803
PZ
7253 /*
7254 * Ensure we don't starve existing RT tasks.
7255 */
9a7e0b18
PZ
7256 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7257 return -EBUSY;
6f505b16 7258
9a7e0b18 7259 total = to_ratio(period, runtime);
6f505b16 7260
4653f803
PZ
7261 /*
7262 * Nobody can have more than the global setting allows.
7263 */
7264 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7265 return -EINVAL;
6f505b16 7266
4653f803
PZ
7267 /*
7268 * The sum of our children's runtime should not exceed our own.
7269 */
9a7e0b18
PZ
7270 list_for_each_entry_rcu(child, &tg->children, siblings) {
7271 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7272 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7273
9a7e0b18
PZ
7274 if (child == d->tg) {
7275 period = d->rt_period;
7276 runtime = d->rt_runtime;
7277 }
6f505b16 7278
9a7e0b18 7279 sum += to_ratio(period, runtime);
9f0c1e56 7280 }
6f505b16 7281
9a7e0b18
PZ
7282 if (sum > total)
7283 return -EINVAL;
7284
7285 return 0;
6f505b16
PZ
7286}
7287
9a7e0b18 7288static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7289{
8277434e
PT
7290 int ret;
7291
9a7e0b18
PZ
7292 struct rt_schedulable_data data = {
7293 .tg = tg,
7294 .rt_period = period,
7295 .rt_runtime = runtime,
7296 };
7297
8277434e
PT
7298 rcu_read_lock();
7299 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7300 rcu_read_unlock();
7301
7302 return ret;
521f1a24
DG
7303}
7304
ab84d31e 7305static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7306 u64 rt_period, u64 rt_runtime)
6f505b16 7307{
ac086bc2 7308 int i, err = 0;
9f0c1e56 7309
9f0c1e56 7310 mutex_lock(&rt_constraints_mutex);
521f1a24 7311 read_lock(&tasklist_lock);
9a7e0b18
PZ
7312 err = __rt_schedulable(tg, rt_period, rt_runtime);
7313 if (err)
9f0c1e56 7314 goto unlock;
ac086bc2 7315
0986b11b 7316 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7317 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7318 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7319
7320 for_each_possible_cpu(i) {
7321 struct rt_rq *rt_rq = tg->rt_rq[i];
7322
0986b11b 7323 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7324 rt_rq->rt_runtime = rt_runtime;
0986b11b 7325 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7326 }
0986b11b 7327 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7328unlock:
521f1a24 7329 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7330 mutex_unlock(&rt_constraints_mutex);
7331
7332 return err;
6f505b16
PZ
7333}
7334
d0b27fa7
PZ
7335int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7336{
7337 u64 rt_runtime, rt_period;
7338
7339 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7340 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7341 if (rt_runtime_us < 0)
7342 rt_runtime = RUNTIME_INF;
7343
ab84d31e 7344 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7345}
7346
9f0c1e56
PZ
7347long sched_group_rt_runtime(struct task_group *tg)
7348{
7349 u64 rt_runtime_us;
7350
d0b27fa7 7351 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7352 return -1;
7353
d0b27fa7 7354 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7355 do_div(rt_runtime_us, NSEC_PER_USEC);
7356 return rt_runtime_us;
7357}
d0b27fa7
PZ
7358
7359int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7360{
7361 u64 rt_runtime, rt_period;
7362
7363 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7364 rt_runtime = tg->rt_bandwidth.rt_runtime;
7365
619b0488
R
7366 if (rt_period == 0)
7367 return -EINVAL;
7368
ab84d31e 7369 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7370}
7371
7372long sched_group_rt_period(struct task_group *tg)
7373{
7374 u64 rt_period_us;
7375
7376 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7377 do_div(rt_period_us, NSEC_PER_USEC);
7378 return rt_period_us;
7379}
7380
7381static int sched_rt_global_constraints(void)
7382{
4653f803 7383 u64 runtime, period;
d0b27fa7
PZ
7384 int ret = 0;
7385
ec5d4989
HS
7386 if (sysctl_sched_rt_period <= 0)
7387 return -EINVAL;
7388
4653f803
PZ
7389 runtime = global_rt_runtime();
7390 period = global_rt_period();
7391
7392 /*
7393 * Sanity check on the sysctl variables.
7394 */
7395 if (runtime > period && runtime != RUNTIME_INF)
7396 return -EINVAL;
10b612f4 7397
d0b27fa7 7398 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7399 read_lock(&tasklist_lock);
4653f803 7400 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7401 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7402 mutex_unlock(&rt_constraints_mutex);
7403
7404 return ret;
7405}
54e99124
DG
7406
7407int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7408{
7409 /* Don't accept realtime tasks when there is no way for them to run */
7410 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7411 return 0;
7412
7413 return 1;
7414}
7415
6d6bc0ad 7416#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7417static int sched_rt_global_constraints(void)
7418{
ac086bc2
PZ
7419 unsigned long flags;
7420 int i;
7421
ec5d4989
HS
7422 if (sysctl_sched_rt_period <= 0)
7423 return -EINVAL;
7424
60aa605d
PZ
7425 /*
7426 * There's always some RT tasks in the root group
7427 * -- migration, kstopmachine etc..
7428 */
7429 if (sysctl_sched_rt_runtime == 0)
7430 return -EBUSY;
7431
0986b11b 7432 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7433 for_each_possible_cpu(i) {
7434 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7435
0986b11b 7436 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7437 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7438 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7439 }
0986b11b 7440 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7441
d0b27fa7
PZ
7442 return 0;
7443}
6d6bc0ad 7444#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7445
7446int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 7447 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
7448 loff_t *ppos)
7449{
7450 int ret;
7451 int old_period, old_runtime;
7452 static DEFINE_MUTEX(mutex);
7453
7454 mutex_lock(&mutex);
7455 old_period = sysctl_sched_rt_period;
7456 old_runtime = sysctl_sched_rt_runtime;
7457
8d65af78 7458 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
7459
7460 if (!ret && write) {
7461 ret = sched_rt_global_constraints();
7462 if (ret) {
7463 sysctl_sched_rt_period = old_period;
7464 sysctl_sched_rt_runtime = old_runtime;
7465 } else {
7466 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7467 def_rt_bandwidth.rt_period =
7468 ns_to_ktime(global_rt_period());
7469 }
7470 }
7471 mutex_unlock(&mutex);
7472
7473 return ret;
7474}
68318b8e 7475
052f1dc7 7476#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
7477
7478/* return corresponding task_group object of a cgroup */
2b01dfe3 7479static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 7480{
2b01dfe3
PM
7481 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7482 struct task_group, css);
68318b8e
SV
7483}
7484
761b3ef5 7485static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
68318b8e 7486{
ec7dc8ac 7487 struct task_group *tg, *parent;
68318b8e 7488
2b01dfe3 7489 if (!cgrp->parent) {
68318b8e 7490 /* This is early initialization for the top cgroup */
07e06b01 7491 return &root_task_group.css;
68318b8e
SV
7492 }
7493
ec7dc8ac
DG
7494 parent = cgroup_tg(cgrp->parent);
7495 tg = sched_create_group(parent);
68318b8e
SV
7496 if (IS_ERR(tg))
7497 return ERR_PTR(-ENOMEM);
7498
68318b8e
SV
7499 return &tg->css;
7500}
7501
761b3ef5 7502static void cpu_cgroup_destroy(struct cgroup *cgrp)
68318b8e 7503{
2b01dfe3 7504 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
7505
7506 sched_destroy_group(tg);
7507}
7508
761b3ef5 7509static int cpu_cgroup_can_attach(struct cgroup *cgrp,
bb9d97b6 7510 struct cgroup_taskset *tset)
68318b8e 7511{
bb9d97b6
TH
7512 struct task_struct *task;
7513
7514 cgroup_taskset_for_each(task, cgrp, tset) {
b68aa230 7515#ifdef CONFIG_RT_GROUP_SCHED
bb9d97b6
TH
7516 if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
7517 return -EINVAL;
b68aa230 7518#else
bb9d97b6
TH
7519 /* We don't support RT-tasks being in separate groups */
7520 if (task->sched_class != &fair_sched_class)
7521 return -EINVAL;
b68aa230 7522#endif
bb9d97b6 7523 }
be367d09
BB
7524 return 0;
7525}
68318b8e 7526
761b3ef5 7527static void cpu_cgroup_attach(struct cgroup *cgrp,
bb9d97b6 7528 struct cgroup_taskset *tset)
68318b8e 7529{
bb9d97b6
TH
7530 struct task_struct *task;
7531
7532 cgroup_taskset_for_each(task, cgrp, tset)
7533 sched_move_task(task);
68318b8e
SV
7534}
7535
068c5cc5 7536static void
761b3ef5
LZ
7537cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7538 struct task_struct *task)
068c5cc5
PZ
7539{
7540 /*
7541 * cgroup_exit() is called in the copy_process() failure path.
7542 * Ignore this case since the task hasn't ran yet, this avoids
7543 * trying to poke a half freed task state from generic code.
7544 */
7545 if (!(task->flags & PF_EXITING))
7546 return;
7547
7548 sched_move_task(task);
7549}
7550
052f1dc7 7551#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 7552static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 7553 u64 shareval)
68318b8e 7554{
c8b28116 7555 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
7556}
7557
f4c753b7 7558static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 7559{
2b01dfe3 7560 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 7561
c8b28116 7562 return (u64) scale_load_down(tg->shares);
68318b8e 7563}
ab84d31e
PT
7564
7565#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
7566static DEFINE_MUTEX(cfs_constraints_mutex);
7567
ab84d31e
PT
7568const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7569const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7570
a790de99
PT
7571static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7572
ab84d31e
PT
7573static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7574{
56f570e5 7575 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 7576 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
7577
7578 if (tg == &root_task_group)
7579 return -EINVAL;
7580
7581 /*
7582 * Ensure we have at some amount of bandwidth every period. This is
7583 * to prevent reaching a state of large arrears when throttled via
7584 * entity_tick() resulting in prolonged exit starvation.
7585 */
7586 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7587 return -EINVAL;
7588
7589 /*
7590 * Likewise, bound things on the otherside by preventing insane quota
7591 * periods. This also allows us to normalize in computing quota
7592 * feasibility.
7593 */
7594 if (period > max_cfs_quota_period)
7595 return -EINVAL;
7596
a790de99
PT
7597 mutex_lock(&cfs_constraints_mutex);
7598 ret = __cfs_schedulable(tg, period, quota);
7599 if (ret)
7600 goto out_unlock;
7601
58088ad0 7602 runtime_enabled = quota != RUNTIME_INF;
56f570e5
PT
7603 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7604 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
ab84d31e
PT
7605 raw_spin_lock_irq(&cfs_b->lock);
7606 cfs_b->period = ns_to_ktime(period);
7607 cfs_b->quota = quota;
58088ad0 7608
a9cf55b2 7609 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0
PT
7610 /* restart the period timer (if active) to handle new period expiry */
7611 if (runtime_enabled && cfs_b->timer_active) {
7612 /* force a reprogram */
7613 cfs_b->timer_active = 0;
7614 __start_cfs_bandwidth(cfs_b);
7615 }
ab84d31e
PT
7616 raw_spin_unlock_irq(&cfs_b->lock);
7617
7618 for_each_possible_cpu(i) {
7619 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 7620 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
7621
7622 raw_spin_lock_irq(&rq->lock);
58088ad0 7623 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 7624 cfs_rq->runtime_remaining = 0;
671fd9da 7625
029632fb 7626 if (cfs_rq->throttled)
671fd9da 7627 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
7628 raw_spin_unlock_irq(&rq->lock);
7629 }
a790de99
PT
7630out_unlock:
7631 mutex_unlock(&cfs_constraints_mutex);
ab84d31e 7632
a790de99 7633 return ret;
ab84d31e
PT
7634}
7635
7636int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7637{
7638 u64 quota, period;
7639
029632fb 7640 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7641 if (cfs_quota_us < 0)
7642 quota = RUNTIME_INF;
7643 else
7644 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7645
7646 return tg_set_cfs_bandwidth(tg, period, quota);
7647}
7648
7649long tg_get_cfs_quota(struct task_group *tg)
7650{
7651 u64 quota_us;
7652
029632fb 7653 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
7654 return -1;
7655
029632fb 7656 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
7657 do_div(quota_us, NSEC_PER_USEC);
7658
7659 return quota_us;
7660}
7661
7662int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7663{
7664 u64 quota, period;
7665
7666 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 7667 quota = tg->cfs_bandwidth.quota;
ab84d31e 7668
ab84d31e
PT
7669 return tg_set_cfs_bandwidth(tg, period, quota);
7670}
7671
7672long tg_get_cfs_period(struct task_group *tg)
7673{
7674 u64 cfs_period_us;
7675
029632fb 7676 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7677 do_div(cfs_period_us, NSEC_PER_USEC);
7678
7679 return cfs_period_us;
7680}
7681
7682static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7683{
7684 return tg_get_cfs_quota(cgroup_tg(cgrp));
7685}
7686
7687static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7688 s64 cfs_quota_us)
7689{
7690 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7691}
7692
7693static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7694{
7695 return tg_get_cfs_period(cgroup_tg(cgrp));
7696}
7697
7698static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7699 u64 cfs_period_us)
7700{
7701 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7702}
7703
a790de99
PT
7704struct cfs_schedulable_data {
7705 struct task_group *tg;
7706 u64 period, quota;
7707};
7708
7709/*
7710 * normalize group quota/period to be quota/max_period
7711 * note: units are usecs
7712 */
7713static u64 normalize_cfs_quota(struct task_group *tg,
7714 struct cfs_schedulable_data *d)
7715{
7716 u64 quota, period;
7717
7718 if (tg == d->tg) {
7719 period = d->period;
7720 quota = d->quota;
7721 } else {
7722 period = tg_get_cfs_period(tg);
7723 quota = tg_get_cfs_quota(tg);
7724 }
7725
7726 /* note: these should typically be equivalent */
7727 if (quota == RUNTIME_INF || quota == -1)
7728 return RUNTIME_INF;
7729
7730 return to_ratio(period, quota);
7731}
7732
7733static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7734{
7735 struct cfs_schedulable_data *d = data;
029632fb 7736 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
7737 s64 quota = 0, parent_quota = -1;
7738
7739 if (!tg->parent) {
7740 quota = RUNTIME_INF;
7741 } else {
029632fb 7742 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
7743
7744 quota = normalize_cfs_quota(tg, d);
7745 parent_quota = parent_b->hierarchal_quota;
7746
7747 /*
7748 * ensure max(child_quota) <= parent_quota, inherit when no
7749 * limit is set
7750 */
7751 if (quota == RUNTIME_INF)
7752 quota = parent_quota;
7753 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7754 return -EINVAL;
7755 }
7756 cfs_b->hierarchal_quota = quota;
7757
7758 return 0;
7759}
7760
7761static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7762{
8277434e 7763 int ret;
a790de99
PT
7764 struct cfs_schedulable_data data = {
7765 .tg = tg,
7766 .period = period,
7767 .quota = quota,
7768 };
7769
7770 if (quota != RUNTIME_INF) {
7771 do_div(data.period, NSEC_PER_USEC);
7772 do_div(data.quota, NSEC_PER_USEC);
7773 }
7774
8277434e
PT
7775 rcu_read_lock();
7776 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7777 rcu_read_unlock();
7778
7779 return ret;
a790de99 7780}
e8da1b18
NR
7781
7782static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7783 struct cgroup_map_cb *cb)
7784{
7785 struct task_group *tg = cgroup_tg(cgrp);
029632fb 7786 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18
NR
7787
7788 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7789 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7790 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7791
7792 return 0;
7793}
ab84d31e 7794#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 7795#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 7796
052f1dc7 7797#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 7798static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 7799 s64 val)
6f505b16 7800{
06ecb27c 7801 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
7802}
7803
06ecb27c 7804static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 7805{
06ecb27c 7806 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 7807}
d0b27fa7
PZ
7808
7809static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7810 u64 rt_period_us)
7811{
7812 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7813}
7814
7815static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7816{
7817 return sched_group_rt_period(cgroup_tg(cgrp));
7818}
6d6bc0ad 7819#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 7820
fe5c7cc2 7821static struct cftype cpu_files[] = {
052f1dc7 7822#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
7823 {
7824 .name = "shares",
f4c753b7
PM
7825 .read_u64 = cpu_shares_read_u64,
7826 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 7827 },
052f1dc7 7828#endif
ab84d31e
PT
7829#ifdef CONFIG_CFS_BANDWIDTH
7830 {
7831 .name = "cfs_quota_us",
7832 .read_s64 = cpu_cfs_quota_read_s64,
7833 .write_s64 = cpu_cfs_quota_write_s64,
7834 },
7835 {
7836 .name = "cfs_period_us",
7837 .read_u64 = cpu_cfs_period_read_u64,
7838 .write_u64 = cpu_cfs_period_write_u64,
7839 },
e8da1b18
NR
7840 {
7841 .name = "stat",
7842 .read_map = cpu_stats_show,
7843 },
ab84d31e 7844#endif
052f1dc7 7845#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7846 {
9f0c1e56 7847 .name = "rt_runtime_us",
06ecb27c
PM
7848 .read_s64 = cpu_rt_runtime_read,
7849 .write_s64 = cpu_rt_runtime_write,
6f505b16 7850 },
d0b27fa7
PZ
7851 {
7852 .name = "rt_period_us",
f4c753b7
PM
7853 .read_u64 = cpu_rt_period_read_uint,
7854 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 7855 },
052f1dc7 7856#endif
4baf6e33 7857 { } /* terminate */
68318b8e
SV
7858};
7859
68318b8e 7860struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
7861 .name = "cpu",
7862 .create = cpu_cgroup_create,
7863 .destroy = cpu_cgroup_destroy,
bb9d97b6
TH
7864 .can_attach = cpu_cgroup_can_attach,
7865 .attach = cpu_cgroup_attach,
068c5cc5 7866 .exit = cpu_cgroup_exit,
38605cae 7867 .subsys_id = cpu_cgroup_subsys_id,
4baf6e33 7868 .base_cftypes = cpu_files,
68318b8e
SV
7869 .early_init = 1,
7870};
7871
052f1dc7 7872#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
7873
7874#ifdef CONFIG_CGROUP_CPUACCT
7875
7876/*
7877 * CPU accounting code for task groups.
7878 *
7879 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7880 * (balbir@in.ibm.com).
7881 */
7882
73fbec60
FW
7883struct cpuacct root_cpuacct;
7884
d842de87 7885/* create a new cpu accounting group */
761b3ef5 7886static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
d842de87 7887{
54c707e9 7888 struct cpuacct *ca;
d842de87 7889
54c707e9
GC
7890 if (!cgrp->parent)
7891 return &root_cpuacct.css;
7892
7893 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
d842de87 7894 if (!ca)
ef12fefa 7895 goto out;
d842de87
SV
7896
7897 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
7898 if (!ca->cpuusage)
7899 goto out_free_ca;
7900
54c707e9
GC
7901 ca->cpustat = alloc_percpu(struct kernel_cpustat);
7902 if (!ca->cpustat)
7903 goto out_free_cpuusage;
934352f2 7904
d842de87 7905 return &ca->css;
ef12fefa 7906
54c707e9 7907out_free_cpuusage:
ef12fefa
BR
7908 free_percpu(ca->cpuusage);
7909out_free_ca:
7910 kfree(ca);
7911out:
7912 return ERR_PTR(-ENOMEM);
d842de87
SV
7913}
7914
7915/* destroy an existing cpu accounting group */
761b3ef5 7916static void cpuacct_destroy(struct cgroup *cgrp)
d842de87 7917{
32cd756a 7918 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87 7919
54c707e9 7920 free_percpu(ca->cpustat);
d842de87
SV
7921 free_percpu(ca->cpuusage);
7922 kfree(ca);
7923}
7924
720f5498
KC
7925static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
7926{
b36128c8 7927 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7928 u64 data;
7929
7930#ifndef CONFIG_64BIT
7931 /*
7932 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
7933 */
05fa785c 7934 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7935 data = *cpuusage;
05fa785c 7936 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7937#else
7938 data = *cpuusage;
7939#endif
7940
7941 return data;
7942}
7943
7944static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
7945{
b36128c8 7946 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7947
7948#ifndef CONFIG_64BIT
7949 /*
7950 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
7951 */
05fa785c 7952 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7953 *cpuusage = val;
05fa785c 7954 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7955#else
7956 *cpuusage = val;
7957#endif
7958}
7959
d842de87 7960/* return total cpu usage (in nanoseconds) of a group */
32cd756a 7961static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 7962{
32cd756a 7963 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
7964 u64 totalcpuusage = 0;
7965 int i;
7966
720f5498
KC
7967 for_each_present_cpu(i)
7968 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
7969
7970 return totalcpuusage;
7971}
7972
0297b803
DG
7973static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
7974 u64 reset)
7975{
7976 struct cpuacct *ca = cgroup_ca(cgrp);
7977 int err = 0;
7978 int i;
7979
7980 if (reset) {
7981 err = -EINVAL;
7982 goto out;
7983 }
7984
720f5498
KC
7985 for_each_present_cpu(i)
7986 cpuacct_cpuusage_write(ca, i, 0);
0297b803 7987
0297b803
DG
7988out:
7989 return err;
7990}
7991
e9515c3c
KC
7992static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
7993 struct seq_file *m)
7994{
7995 struct cpuacct *ca = cgroup_ca(cgroup);
7996 u64 percpu;
7997 int i;
7998
7999 for_each_present_cpu(i) {
8000 percpu = cpuacct_cpuusage_read(ca, i);
8001 seq_printf(m, "%llu ", (unsigned long long) percpu);
8002 }
8003 seq_printf(m, "\n");
8004 return 0;
8005}
8006
ef12fefa
BR
8007static const char *cpuacct_stat_desc[] = {
8008 [CPUACCT_STAT_USER] = "user",
8009 [CPUACCT_STAT_SYSTEM] = "system",
8010};
8011
8012static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
54c707e9 8013 struct cgroup_map_cb *cb)
ef12fefa
BR
8014{
8015 struct cpuacct *ca = cgroup_ca(cgrp);
54c707e9
GC
8016 int cpu;
8017 s64 val = 0;
ef12fefa 8018
54c707e9
GC
8019 for_each_online_cpu(cpu) {
8020 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8021 val += kcpustat->cpustat[CPUTIME_USER];
8022 val += kcpustat->cpustat[CPUTIME_NICE];
ef12fefa 8023 }
54c707e9
GC
8024 val = cputime64_to_clock_t(val);
8025 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
ef12fefa 8026
54c707e9
GC
8027 val = 0;
8028 for_each_online_cpu(cpu) {
8029 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8030 val += kcpustat->cpustat[CPUTIME_SYSTEM];
8031 val += kcpustat->cpustat[CPUTIME_IRQ];
8032 val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
ef12fefa 8033 }
54c707e9
GC
8034
8035 val = cputime64_to_clock_t(val);
8036 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
8037
ef12fefa
BR
8038 return 0;
8039}
8040
d842de87
SV
8041static struct cftype files[] = {
8042 {
8043 .name = "usage",
f4c753b7
PM
8044 .read_u64 = cpuusage_read,
8045 .write_u64 = cpuusage_write,
d842de87 8046 },
e9515c3c
KC
8047 {
8048 .name = "usage_percpu",
8049 .read_seq_string = cpuacct_percpu_seq_read,
8050 },
ef12fefa
BR
8051 {
8052 .name = "stat",
8053 .read_map = cpuacct_stats_show,
8054 },
4baf6e33 8055 { } /* terminate */
d842de87
SV
8056};
8057
d842de87
SV
8058/*
8059 * charge this task's execution time to its accounting group.
8060 *
8061 * called with rq->lock held.
8062 */
029632fb 8063void cpuacct_charge(struct task_struct *tsk, u64 cputime)
d842de87
SV
8064{
8065 struct cpuacct *ca;
934352f2 8066 int cpu;
d842de87 8067
c40c6f85 8068 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
8069 return;
8070
934352f2 8071 cpu = task_cpu(tsk);
a18b83b7
BR
8072
8073 rcu_read_lock();
8074
d842de87 8075 ca = task_ca(tsk);
d842de87 8076
44252e42 8077 for (; ca; ca = parent_ca(ca)) {
b36128c8 8078 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
8079 *cpuusage += cputime;
8080 }
a18b83b7
BR
8081
8082 rcu_read_unlock();
d842de87
SV
8083}
8084
8085struct cgroup_subsys cpuacct_subsys = {
8086 .name = "cpuacct",
8087 .create = cpuacct_create,
8088 .destroy = cpuacct_destroy,
d842de87 8089 .subsys_id = cpuacct_subsys_id,
4baf6e33 8090 .base_cftypes = files,
d842de87
SV
8091};
8092#endif /* CONFIG_CGROUP_CPUACCT */