]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/sched.c
sched: Nominate idle load balancer from a semi-idle package.
[mirror_ubuntu-hirsute-kernel.git] / kernel / sched.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4
LT
34#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
c59ede7b 38#include <linux/capability.h>
1da177e4
LT
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
9a11b49a 41#include <linux/debug_locks.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/kthread.h>
b5aadf7f 58#include <linux/proc_fs.h>
1da177e4 59#include <linux/seq_file.h>
e692ab53 60#include <linux/sysctl.h>
1da177e4
LT
61#include <linux/syscalls.h>
62#include <linux/times.h>
8f0ab514 63#include <linux/tsacct_kern.h>
c6fd91f0 64#include <linux/kprobes.h>
0ff92245 65#include <linux/delayacct.h>
5517d86b 66#include <linux/reciprocal_div.h>
dff06c15 67#include <linux/unistd.h>
f5ff8422 68#include <linux/pagemap.h>
8f4d37ec 69#include <linux/hrtimer.h>
30914a58 70#include <linux/tick.h>
434d53b0 71#include <linux/bootmem.h>
f00b45c1
PZ
72#include <linux/debugfs.h>
73#include <linux/ctype.h>
6cd8a4bb 74#include <linux/ftrace.h>
0a16b607 75#include <trace/sched.h>
1da177e4 76
5517d86b 77#include <asm/tlb.h>
838225b4 78#include <asm/irq_regs.h>
1da177e4 79
6e0534f2
GH
80#include "sched_cpupri.h"
81
1da177e4
LT
82/*
83 * Convert user-nice values [ -20 ... 0 ... 19 ]
84 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
85 * and back.
86 */
87#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
88#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
89#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
90
91/*
92 * 'User priority' is the nice value converted to something we
93 * can work with better when scaling various scheduler parameters,
94 * it's a [ 0 ... 39 ] range.
95 */
96#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
97#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
98#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
99
100/*
d7876a08 101 * Helpers for converting nanosecond timing to jiffy resolution
1da177e4 102 */
d6322faf 103#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
1da177e4 104
6aa645ea
IM
105#define NICE_0_LOAD SCHED_LOAD_SCALE
106#define NICE_0_SHIFT SCHED_LOAD_SHIFT
107
1da177e4
LT
108/*
109 * These are the 'tuning knobs' of the scheduler:
110 *
a4ec24b4 111 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1da177e4
LT
112 * Timeslices get refilled after they expire.
113 */
1da177e4 114#define DEF_TIMESLICE (100 * HZ / 1000)
2dd73a4f 115
d0b27fa7
PZ
116/*
117 * single value that denotes runtime == period, ie unlimited time.
118 */
119#define RUNTIME_INF ((u64)~0ULL)
120
7e066fb8
MD
121DEFINE_TRACE(sched_wait_task);
122DEFINE_TRACE(sched_wakeup);
123DEFINE_TRACE(sched_wakeup_new);
124DEFINE_TRACE(sched_switch);
125DEFINE_TRACE(sched_migrate_task);
126
5517d86b 127#ifdef CONFIG_SMP
fd2ab30b
SN
128
129static void double_rq_lock(struct rq *rq1, struct rq *rq2);
130
5517d86b
ED
131/*
132 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
133 * Since cpu_power is a 'constant', we can use a reciprocal divide.
134 */
135static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
136{
137 return reciprocal_divide(load, sg->reciprocal_cpu_power);
138}
139
140/*
141 * Each time a sched group cpu_power is changed,
142 * we must compute its reciprocal value
143 */
144static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
145{
146 sg->__cpu_power += val;
147 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
148}
149#endif
150
e05606d3
IM
151static inline int rt_policy(int policy)
152{
3f33a7ce 153 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
e05606d3
IM
154 return 1;
155 return 0;
156}
157
158static inline int task_has_rt_policy(struct task_struct *p)
159{
160 return rt_policy(p->policy);
161}
162
1da177e4 163/*
6aa645ea 164 * This is the priority-queue data structure of the RT scheduling class:
1da177e4 165 */
6aa645ea
IM
166struct rt_prio_array {
167 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
168 struct list_head queue[MAX_RT_PRIO];
169};
170
d0b27fa7 171struct rt_bandwidth {
ea736ed5
IM
172 /* nests inside the rq lock: */
173 spinlock_t rt_runtime_lock;
174 ktime_t rt_period;
175 u64 rt_runtime;
176 struct hrtimer rt_period_timer;
d0b27fa7
PZ
177};
178
179static struct rt_bandwidth def_rt_bandwidth;
180
181static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
182
183static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
184{
185 struct rt_bandwidth *rt_b =
186 container_of(timer, struct rt_bandwidth, rt_period_timer);
187 ktime_t now;
188 int overrun;
189 int idle = 0;
190
191 for (;;) {
192 now = hrtimer_cb_get_time(timer);
193 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
194
195 if (!overrun)
196 break;
197
198 idle = do_sched_rt_period_timer(rt_b, overrun);
199 }
200
201 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
202}
203
204static
205void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
206{
207 rt_b->rt_period = ns_to_ktime(period);
208 rt_b->rt_runtime = runtime;
209
ac086bc2
PZ
210 spin_lock_init(&rt_b->rt_runtime_lock);
211
d0b27fa7
PZ
212 hrtimer_init(&rt_b->rt_period_timer,
213 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
214 rt_b->rt_period_timer.function = sched_rt_period_timer;
d0b27fa7
PZ
215}
216
c8bfff6d
KH
217static inline int rt_bandwidth_enabled(void)
218{
219 return sysctl_sched_rt_runtime >= 0;
d0b27fa7
PZ
220}
221
222static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{
224 ktime_t now;
225
cac64d00 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
d0b27fa7
PZ
227 return;
228
229 if (hrtimer_active(&rt_b->rt_period_timer))
230 return;
231
232 spin_lock(&rt_b->rt_runtime_lock);
233 for (;;) {
7f1e2ca9
PZ
234 unsigned long delta;
235 ktime_t soft, hard;
236
d0b27fa7
PZ
237 if (hrtimer_active(&rt_b->rt_period_timer))
238 break;
239
240 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
241 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
7f1e2ca9
PZ
242
243 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
244 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
245 delta = ktime_to_ns(ktime_sub(hard, soft));
246 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
247 HRTIMER_MODE_ABS, 0);
d0b27fa7
PZ
248 }
249 spin_unlock(&rt_b->rt_runtime_lock);
250}
251
252#ifdef CONFIG_RT_GROUP_SCHED
253static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
254{
255 hrtimer_cancel(&rt_b->rt_period_timer);
256}
257#endif
258
712555ee
HC
259/*
260 * sched_domains_mutex serializes calls to arch_init_sched_domains,
261 * detach_destroy_domains and partition_sched_domains.
262 */
263static DEFINE_MUTEX(sched_domains_mutex);
264
052f1dc7 265#ifdef CONFIG_GROUP_SCHED
29f59db3 266
68318b8e
SV
267#include <linux/cgroup.h>
268
29f59db3
SV
269struct cfs_rq;
270
6f505b16
PZ
271static LIST_HEAD(task_groups);
272
29f59db3 273/* task group related information */
4cf86d77 274struct task_group {
052f1dc7 275#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
276 struct cgroup_subsys_state css;
277#endif
052f1dc7 278
6c415b92
AB
279#ifdef CONFIG_USER_SCHED
280 uid_t uid;
281#endif
282
052f1dc7 283#ifdef CONFIG_FAIR_GROUP_SCHED
29f59db3
SV
284 /* schedulable entities of this group on each cpu */
285 struct sched_entity **se;
286 /* runqueue "owned" by this group on each cpu */
287 struct cfs_rq **cfs_rq;
288 unsigned long shares;
052f1dc7
PZ
289#endif
290
291#ifdef CONFIG_RT_GROUP_SCHED
292 struct sched_rt_entity **rt_se;
293 struct rt_rq **rt_rq;
294
d0b27fa7 295 struct rt_bandwidth rt_bandwidth;
052f1dc7 296#endif
6b2d7700 297
ae8393e5 298 struct rcu_head rcu;
6f505b16 299 struct list_head list;
f473aa5e
PZ
300
301 struct task_group *parent;
302 struct list_head siblings;
303 struct list_head children;
29f59db3
SV
304};
305
354d60c2 306#ifdef CONFIG_USER_SCHED
eff766a6 307
6c415b92
AB
308/* Helper function to pass uid information to create_sched_user() */
309void set_tg_uid(struct user_struct *user)
310{
311 user->tg->uid = user->uid;
312}
313
eff766a6
PZ
314/*
315 * Root task group.
316 * Every UID task group (including init_task_group aka UID-0) will
317 * be a child to this group.
318 */
319struct task_group root_task_group;
320
052f1dc7 321#ifdef CONFIG_FAIR_GROUP_SCHED
29f59db3
SV
322/* Default task group's sched entity on each cpu */
323static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
324/* Default task group's cfs_rq on each cpu */
325static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
6d6bc0ad 326#endif /* CONFIG_FAIR_GROUP_SCHED */
052f1dc7
PZ
327
328#ifdef CONFIG_RT_GROUP_SCHED
329static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
330static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
6d6bc0ad 331#endif /* CONFIG_RT_GROUP_SCHED */
9a7e0b18 332#else /* !CONFIG_USER_SCHED */
eff766a6 333#define root_task_group init_task_group
9a7e0b18 334#endif /* CONFIG_USER_SCHED */
6f505b16 335
8ed36996 336/* task_group_lock serializes add/remove of task groups and also changes to
ec2c507f
SV
337 * a task group's cpu shares.
338 */
8ed36996 339static DEFINE_SPINLOCK(task_group_lock);
ec2c507f 340
57310a98
PZ
341#ifdef CONFIG_SMP
342static int root_task_group_empty(void)
343{
344 return list_empty(&root_task_group.children);
345}
346#endif
347
052f1dc7 348#ifdef CONFIG_FAIR_GROUP_SCHED
052f1dc7
PZ
349#ifdef CONFIG_USER_SCHED
350# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
6d6bc0ad 351#else /* !CONFIG_USER_SCHED */
052f1dc7 352# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
6d6bc0ad 353#endif /* CONFIG_USER_SCHED */
052f1dc7 354
cb4ad1ff 355/*
2e084786
LJ
356 * A weight of 0 or 1 can cause arithmetics problems.
357 * A weight of a cfs_rq is the sum of weights of which entities
358 * are queued on this cfs_rq, so a weight of a entity should not be
359 * too large, so as the shares value of a task group.
cb4ad1ff
MX
360 * (The default weight is 1024 - so there's no practical
361 * limitation from this.)
362 */
18d95a28 363#define MIN_SHARES 2
2e084786 364#define MAX_SHARES (1UL << 18)
18d95a28 365
052f1dc7
PZ
366static int init_task_group_load = INIT_TASK_GROUP_LOAD;
367#endif
368
29f59db3 369/* Default task group.
3a252015 370 * Every task in system belong to this group at bootup.
29f59db3 371 */
434d53b0 372struct task_group init_task_group;
29f59db3
SV
373
374/* return group to which a task belongs */
4cf86d77 375static inline struct task_group *task_group(struct task_struct *p)
29f59db3 376{
4cf86d77 377 struct task_group *tg;
9b5b7751 378
052f1dc7 379#ifdef CONFIG_USER_SCHED
c69e8d9c
DH
380 rcu_read_lock();
381 tg = __task_cred(p)->user->tg;
382 rcu_read_unlock();
052f1dc7 383#elif defined(CONFIG_CGROUP_SCHED)
68318b8e
SV
384 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
385 struct task_group, css);
24e377a8 386#else
41a2d6cf 387 tg = &init_task_group;
24e377a8 388#endif
9b5b7751 389 return tg;
29f59db3
SV
390}
391
392/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
6f505b16 393static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
29f59db3 394{
052f1dc7 395#ifdef CONFIG_FAIR_GROUP_SCHED
ce96b5ac
DA
396 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
397 p->se.parent = task_group(p)->se[cpu];
052f1dc7 398#endif
6f505b16 399
052f1dc7 400#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
401 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
402 p->rt.parent = task_group(p)->rt_se[cpu];
052f1dc7 403#endif
29f59db3
SV
404}
405
406#else
407
57310a98
PZ
408#ifdef CONFIG_SMP
409static int root_task_group_empty(void)
410{
411 return 1;
412}
413#endif
414
6f505b16 415static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
83378269
PZ
416static inline struct task_group *task_group(struct task_struct *p)
417{
418 return NULL;
419}
29f59db3 420
052f1dc7 421#endif /* CONFIG_GROUP_SCHED */
29f59db3 422
6aa645ea
IM
423/* CFS-related fields in a runqueue */
424struct cfs_rq {
425 struct load_weight load;
426 unsigned long nr_running;
427
6aa645ea 428 u64 exec_clock;
e9acbff6 429 u64 min_vruntime;
6aa645ea
IM
430
431 struct rb_root tasks_timeline;
432 struct rb_node *rb_leftmost;
4a55bd5e
PZ
433
434 struct list_head tasks;
435 struct list_head *balance_iterator;
436
437 /*
438 * 'curr' points to currently running entity on this cfs_rq.
6aa645ea
IM
439 * It is set to NULL otherwise (i.e when none are currently running).
440 */
4793241b 441 struct sched_entity *curr, *next, *last;
ddc97297 442
5ac5c4d6 443 unsigned int nr_spread_over;
ddc97297 444
62160e3f 445#ifdef CONFIG_FAIR_GROUP_SCHED
6aa645ea
IM
446 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
447
41a2d6cf
IM
448 /*
449 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
6aa645ea
IM
450 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
451 * (like users, containers etc.)
452 *
453 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
454 * list is used during load balance.
455 */
41a2d6cf
IM
456 struct list_head leaf_cfs_rq_list;
457 struct task_group *tg; /* group that "owns" this runqueue */
c09595f6
PZ
458
459#ifdef CONFIG_SMP
c09595f6 460 /*
c8cba857 461 * the part of load.weight contributed by tasks
c09595f6 462 */
c8cba857 463 unsigned long task_weight;
c09595f6 464
c8cba857
PZ
465 /*
466 * h_load = weight * f(tg)
467 *
468 * Where f(tg) is the recursive weight fraction assigned to
469 * this group.
470 */
471 unsigned long h_load;
c09595f6 472
c8cba857
PZ
473 /*
474 * this cpu's part of tg->shares
475 */
476 unsigned long shares;
f1d239f7
PZ
477
478 /*
479 * load.weight at the time we set shares
480 */
481 unsigned long rq_weight;
c09595f6 482#endif
6aa645ea
IM
483#endif
484};
1da177e4 485
6aa645ea
IM
486/* Real-Time classes' related field in a runqueue: */
487struct rt_rq {
488 struct rt_prio_array active;
63489e45 489 unsigned long rt_nr_running;
052f1dc7 490#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499
GH
491 struct {
492 int curr; /* highest queued rt task prio */
398a153b 493#ifdef CONFIG_SMP
e864c499 494 int next; /* next highest */
398a153b 495#endif
e864c499 496 } highest_prio;
6f505b16 497#endif
fa85ae24 498#ifdef CONFIG_SMP
73fe6aae 499 unsigned long rt_nr_migratory;
a22d7fc1 500 int overloaded;
917b627d 501 struct plist_head pushable_tasks;
fa85ae24 502#endif
6f505b16 503 int rt_throttled;
fa85ae24 504 u64 rt_time;
ac086bc2 505 u64 rt_runtime;
ea736ed5 506 /* Nests inside the rq lock: */
ac086bc2 507 spinlock_t rt_runtime_lock;
6f505b16 508
052f1dc7 509#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
510 unsigned long rt_nr_boosted;
511
6f505b16
PZ
512 struct rq *rq;
513 struct list_head leaf_rt_rq_list;
514 struct task_group *tg;
515 struct sched_rt_entity *rt_se;
516#endif
6aa645ea
IM
517};
518
57d885fe
GH
519#ifdef CONFIG_SMP
520
521/*
522 * We add the notion of a root-domain which will be used to define per-domain
0eab9146
IM
523 * variables. Each exclusive cpuset essentially defines an island domain by
524 * fully partitioning the member cpus from any other cpuset. Whenever a new
57d885fe
GH
525 * exclusive cpuset is created, we also create and attach a new root-domain
526 * object.
527 *
57d885fe
GH
528 */
529struct root_domain {
530 atomic_t refcount;
c6c4927b
RR
531 cpumask_var_t span;
532 cpumask_var_t online;
637f5085 533
0eab9146 534 /*
637f5085
GH
535 * The "RT overload" flag: it gets set if a CPU has more than
536 * one runnable RT task.
537 */
c6c4927b 538 cpumask_var_t rto_mask;
0eab9146 539 atomic_t rto_count;
6e0534f2
GH
540#ifdef CONFIG_SMP
541 struct cpupri cpupri;
542#endif
7a09b1a2
VS
543#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
544 /*
545 * Preferred wake up cpu nominated by sched_mc balance that will be
546 * used when most cpus are idle in the system indicating overall very
547 * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
548 */
549 unsigned int sched_mc_preferred_wakeup_cpu;
550#endif
57d885fe
GH
551};
552
dc938520
GH
553/*
554 * By default the system creates a single root-domain with all cpus as
555 * members (mimicking the global state we have today).
556 */
57d885fe
GH
557static struct root_domain def_root_domain;
558
559#endif
560
1da177e4
LT
561/*
562 * This is the main, per-CPU runqueue data structure.
563 *
564 * Locking rule: those places that want to lock multiple runqueues
565 * (such as the load balancing or the thread migration code), lock
566 * acquire operations must be ordered by ascending &runqueue.
567 */
70b97a7f 568struct rq {
d8016491
IM
569 /* runqueue lock: */
570 spinlock_t lock;
1da177e4
LT
571
572 /*
573 * nr_running and cpu_load should be in the same cacheline because
574 * remote CPUs use both these fields when doing load calculation.
575 */
576 unsigned long nr_running;
6aa645ea
IM
577 #define CPU_LOAD_IDX_MAX 5
578 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
46cb4b7c 579#ifdef CONFIG_NO_HZ
15934a37 580 unsigned long last_tick_seen;
46cb4b7c
SS
581 unsigned char in_nohz_recently;
582#endif
d8016491
IM
583 /* capture load from *all* tasks on this cpu: */
584 struct load_weight load;
6aa645ea
IM
585 unsigned long nr_load_updates;
586 u64 nr_switches;
587
588 struct cfs_rq cfs;
6f505b16 589 struct rt_rq rt;
6f505b16 590
6aa645ea 591#ifdef CONFIG_FAIR_GROUP_SCHED
d8016491
IM
592 /* list of leaf cfs_rq on this cpu: */
593 struct list_head leaf_cfs_rq_list;
052f1dc7
PZ
594#endif
595#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 596 struct list_head leaf_rt_rq_list;
1da177e4 597#endif
1da177e4
LT
598
599 /*
600 * This is part of a global counter where only the total sum
601 * over all CPUs matters. A task can increase this counter on
602 * one CPU and if it got migrated afterwards it may decrease
603 * it on another CPU. Always updated under the runqueue lock:
604 */
605 unsigned long nr_uninterruptible;
606
36c8b586 607 struct task_struct *curr, *idle;
c9819f45 608 unsigned long next_balance;
1da177e4 609 struct mm_struct *prev_mm;
6aa645ea 610
3e51f33f 611 u64 clock;
6aa645ea 612
1da177e4
LT
613 atomic_t nr_iowait;
614
615#ifdef CONFIG_SMP
0eab9146 616 struct root_domain *rd;
1da177e4
LT
617 struct sched_domain *sd;
618
a0a522ce 619 unsigned char idle_at_tick;
1da177e4
LT
620 /* For active balancing */
621 int active_balance;
622 int push_cpu;
d8016491
IM
623 /* cpu of this runqueue: */
624 int cpu;
1f11eb6a 625 int online;
1da177e4 626
a8a51d5e 627 unsigned long avg_load_per_task;
1da177e4 628
36c8b586 629 struct task_struct *migration_thread;
1da177e4
LT
630 struct list_head migration_queue;
631#endif
632
8f4d37ec 633#ifdef CONFIG_SCHED_HRTICK
31656519
PZ
634#ifdef CONFIG_SMP
635 int hrtick_csd_pending;
636 struct call_single_data hrtick_csd;
637#endif
8f4d37ec
PZ
638 struct hrtimer hrtick_timer;
639#endif
640
1da177e4
LT
641#ifdef CONFIG_SCHEDSTATS
642 /* latency stats */
643 struct sched_info rq_sched_info;
9c2c4802
KC
644 unsigned long long rq_cpu_time;
645 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1da177e4
LT
646
647 /* sys_sched_yield() stats */
480b9434 648 unsigned int yld_count;
1da177e4
LT
649
650 /* schedule() stats */
480b9434
KC
651 unsigned int sched_switch;
652 unsigned int sched_count;
653 unsigned int sched_goidle;
1da177e4
LT
654
655 /* try_to_wake_up() stats */
480b9434
KC
656 unsigned int ttwu_count;
657 unsigned int ttwu_local;
b8efb561
IM
658
659 /* BKL stats */
480b9434 660 unsigned int bkl_count;
1da177e4
LT
661#endif
662};
663
f34e3b61 664static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1da177e4 665
15afe09b 666static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
dd41f596 667{
15afe09b 668 rq->curr->sched_class->check_preempt_curr(rq, p, sync);
dd41f596
IM
669}
670
0a2966b4
CL
671static inline int cpu_of(struct rq *rq)
672{
673#ifdef CONFIG_SMP
674 return rq->cpu;
675#else
676 return 0;
677#endif
678}
679
674311d5
NP
680/*
681 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1a20ff27 682 * See detach_destroy_domains: synchronize_sched for details.
674311d5
NP
683 *
684 * The domain tree of any CPU may only be accessed from within
685 * preempt-disabled sections.
686 */
48f24c4d
IM
687#define for_each_domain(cpu, __sd) \
688 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
1da177e4
LT
689
690#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
691#define this_rq() (&__get_cpu_var(runqueues))
692#define task_rq(p) cpu_rq(task_cpu(p))
693#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
694
3e51f33f
PZ
695static inline void update_rq_clock(struct rq *rq)
696{
697 rq->clock = sched_clock_cpu(cpu_of(rq));
698}
699
bf5c91ba
IM
700/*
701 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
702 */
703#ifdef CONFIG_SCHED_DEBUG
704# define const_debug __read_mostly
705#else
706# define const_debug static const
707#endif
708
017730c1
IM
709/**
710 * runqueue_is_locked
711 *
712 * Returns true if the current cpu runqueue is locked.
713 * This interface allows printk to be called with the runqueue lock
714 * held and know whether or not it is OK to wake up the klogd.
715 */
716int runqueue_is_locked(void)
717{
718 int cpu = get_cpu();
719 struct rq *rq = cpu_rq(cpu);
720 int ret;
721
722 ret = spin_is_locked(&rq->lock);
723 put_cpu();
724 return ret;
725}
726
bf5c91ba
IM
727/*
728 * Debugging: various feature bits
729 */
f00b45c1
PZ
730
731#define SCHED_FEAT(name, enabled) \
732 __SCHED_FEAT_##name ,
733
bf5c91ba 734enum {
f00b45c1 735#include "sched_features.h"
bf5c91ba
IM
736};
737
f00b45c1
PZ
738#undef SCHED_FEAT
739
740#define SCHED_FEAT(name, enabled) \
741 (1UL << __SCHED_FEAT_##name) * enabled |
742
bf5c91ba 743const_debug unsigned int sysctl_sched_features =
f00b45c1
PZ
744#include "sched_features.h"
745 0;
746
747#undef SCHED_FEAT
748
749#ifdef CONFIG_SCHED_DEBUG
750#define SCHED_FEAT(name, enabled) \
751 #name ,
752
983ed7a6 753static __read_mostly char *sched_feat_names[] = {
f00b45c1
PZ
754#include "sched_features.h"
755 NULL
756};
757
758#undef SCHED_FEAT
759
34f3a814 760static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 761{
f00b45c1
PZ
762 int i;
763
764 for (i = 0; sched_feat_names[i]; i++) {
34f3a814
LZ
765 if (!(sysctl_sched_features & (1UL << i)))
766 seq_puts(m, "NO_");
767 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 768 }
34f3a814 769 seq_puts(m, "\n");
f00b45c1 770
34f3a814 771 return 0;
f00b45c1
PZ
772}
773
774static ssize_t
775sched_feat_write(struct file *filp, const char __user *ubuf,
776 size_t cnt, loff_t *ppos)
777{
778 char buf[64];
779 char *cmp = buf;
780 int neg = 0;
781 int i;
782
783 if (cnt > 63)
784 cnt = 63;
785
786 if (copy_from_user(&buf, ubuf, cnt))
787 return -EFAULT;
788
789 buf[cnt] = 0;
790
c24b7c52 791 if (strncmp(buf, "NO_", 3) == 0) {
f00b45c1
PZ
792 neg = 1;
793 cmp += 3;
794 }
795
796 for (i = 0; sched_feat_names[i]; i++) {
797 int len = strlen(sched_feat_names[i]);
798
799 if (strncmp(cmp, sched_feat_names[i], len) == 0) {
800 if (neg)
801 sysctl_sched_features &= ~(1UL << i);
802 else
803 sysctl_sched_features |= (1UL << i);
804 break;
805 }
806 }
807
808 if (!sched_feat_names[i])
809 return -EINVAL;
810
811 filp->f_pos += cnt;
812
813 return cnt;
814}
815
34f3a814
LZ
816static int sched_feat_open(struct inode *inode, struct file *filp)
817{
818 return single_open(filp, sched_feat_show, NULL);
819}
820
f00b45c1 821static struct file_operations sched_feat_fops = {
34f3a814
LZ
822 .open = sched_feat_open,
823 .write = sched_feat_write,
824 .read = seq_read,
825 .llseek = seq_lseek,
826 .release = single_release,
f00b45c1
PZ
827};
828
829static __init int sched_init_debug(void)
830{
f00b45c1
PZ
831 debugfs_create_file("sched_features", 0644, NULL, NULL,
832 &sched_feat_fops);
833
834 return 0;
835}
836late_initcall(sched_init_debug);
837
838#endif
839
840#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
bf5c91ba 841
b82d9fdd
PZ
842/*
843 * Number of tasks to iterate in a single balance run.
844 * Limited because this is done with IRQs disabled.
845 */
846const_debug unsigned int sysctl_sched_nr_migrate = 32;
847
2398f2c6
PZ
848/*
849 * ratelimit for updating the group shares.
55cd5340 850 * default: 0.25ms
2398f2c6 851 */
55cd5340 852unsigned int sysctl_sched_shares_ratelimit = 250000;
2398f2c6 853
ffda12a1
PZ
854/*
855 * Inject some fuzzyness into changing the per-cpu group shares
856 * this avoids remote rq-locks at the expense of fairness.
857 * default: 4
858 */
859unsigned int sysctl_sched_shares_thresh = 4;
860
fa85ae24 861/*
9f0c1e56 862 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
863 * default: 1s
864 */
9f0c1e56 865unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 866
6892b75e
IM
867static __read_mostly int scheduler_running;
868
9f0c1e56
PZ
869/*
870 * part of the period that we allow rt tasks to run in us.
871 * default: 0.95s
872 */
873int sysctl_sched_rt_runtime = 950000;
fa85ae24 874
d0b27fa7
PZ
875static inline u64 global_rt_period(void)
876{
877 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
878}
879
880static inline u64 global_rt_runtime(void)
881{
e26873bb 882 if (sysctl_sched_rt_runtime < 0)
d0b27fa7
PZ
883 return RUNTIME_INF;
884
885 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
886}
fa85ae24 887
1da177e4 888#ifndef prepare_arch_switch
4866cde0
NP
889# define prepare_arch_switch(next) do { } while (0)
890#endif
891#ifndef finish_arch_switch
892# define finish_arch_switch(prev) do { } while (0)
893#endif
894
051a1d1a
DA
895static inline int task_current(struct rq *rq, struct task_struct *p)
896{
897 return rq->curr == p;
898}
899
4866cde0 900#ifndef __ARCH_WANT_UNLOCKED_CTXSW
70b97a7f 901static inline int task_running(struct rq *rq, struct task_struct *p)
4866cde0 902{
051a1d1a 903 return task_current(rq, p);
4866cde0
NP
904}
905
70b97a7f 906static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0
NP
907{
908}
909
70b97a7f 910static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0 911{
da04c035
IM
912#ifdef CONFIG_DEBUG_SPINLOCK
913 /* this is a valid case when another task releases the spinlock */
914 rq->lock.owner = current;
915#endif
8a25d5de
IM
916 /*
917 * If we are tracking spinlock dependencies then we have to
918 * fix up the runqueue lock - which gets 'carried over' from
919 * prev into current:
920 */
921 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
922
4866cde0
NP
923 spin_unlock_irq(&rq->lock);
924}
925
926#else /* __ARCH_WANT_UNLOCKED_CTXSW */
70b97a7f 927static inline int task_running(struct rq *rq, struct task_struct *p)
4866cde0
NP
928{
929#ifdef CONFIG_SMP
930 return p->oncpu;
931#else
051a1d1a 932 return task_current(rq, p);
4866cde0
NP
933#endif
934}
935
70b97a7f 936static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0
NP
937{
938#ifdef CONFIG_SMP
939 /*
940 * We can optimise this out completely for !SMP, because the
941 * SMP rebalancing from interrupt is the only thing that cares
942 * here.
943 */
944 next->oncpu = 1;
945#endif
946#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
947 spin_unlock_irq(&rq->lock);
948#else
949 spin_unlock(&rq->lock);
950#endif
951}
952
70b97a7f 953static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0
NP
954{
955#ifdef CONFIG_SMP
956 /*
957 * After ->oncpu is cleared, the task can be moved to a different CPU.
958 * We must ensure this doesn't happen until the switch is completely
959 * finished.
960 */
961 smp_wmb();
962 prev->oncpu = 0;
963#endif
964#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
965 local_irq_enable();
1da177e4 966#endif
4866cde0
NP
967}
968#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
1da177e4 969
b29739f9
IM
970/*
971 * __task_rq_lock - lock the runqueue a given task resides on.
972 * Must be called interrupts disabled.
973 */
70b97a7f 974static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
975 __acquires(rq->lock)
976{
3a5c359a
AK
977 for (;;) {
978 struct rq *rq = task_rq(p);
979 spin_lock(&rq->lock);
980 if (likely(rq == task_rq(p)))
981 return rq;
b29739f9 982 spin_unlock(&rq->lock);
b29739f9 983 }
b29739f9
IM
984}
985
1da177e4
LT
986/*
987 * task_rq_lock - lock the runqueue a given task resides on and disable
41a2d6cf 988 * interrupts. Note the ordering: we can safely lookup the task_rq without
1da177e4
LT
989 * explicitly disabling preemption.
990 */
70b97a7f 991static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1da177e4
LT
992 __acquires(rq->lock)
993{
70b97a7f 994 struct rq *rq;
1da177e4 995
3a5c359a
AK
996 for (;;) {
997 local_irq_save(*flags);
998 rq = task_rq(p);
999 spin_lock(&rq->lock);
1000 if (likely(rq == task_rq(p)))
1001 return rq;
1da177e4 1002 spin_unlock_irqrestore(&rq->lock, *flags);
1da177e4 1003 }
1da177e4
LT
1004}
1005
ad474cac
ON
1006void task_rq_unlock_wait(struct task_struct *p)
1007{
1008 struct rq *rq = task_rq(p);
1009
1010 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
1011 spin_unlock_wait(&rq->lock);
1012}
1013
a9957449 1014static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
1015 __releases(rq->lock)
1016{
1017 spin_unlock(&rq->lock);
1018}
1019
70b97a7f 1020static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
1da177e4
LT
1021 __releases(rq->lock)
1022{
1023 spin_unlock_irqrestore(&rq->lock, *flags);
1024}
1025
1da177e4 1026/*
cc2a73b5 1027 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 1028 */
a9957449 1029static struct rq *this_rq_lock(void)
1da177e4
LT
1030 __acquires(rq->lock)
1031{
70b97a7f 1032 struct rq *rq;
1da177e4
LT
1033
1034 local_irq_disable();
1035 rq = this_rq();
1036 spin_lock(&rq->lock);
1037
1038 return rq;
1039}
1040
8f4d37ec
PZ
1041#ifdef CONFIG_SCHED_HRTICK
1042/*
1043 * Use HR-timers to deliver accurate preemption points.
1044 *
1045 * Its all a bit involved since we cannot program an hrt while holding the
1046 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1047 * reschedule event.
1048 *
1049 * When we get rescheduled we reprogram the hrtick_timer outside of the
1050 * rq->lock.
1051 */
8f4d37ec
PZ
1052
1053/*
1054 * Use hrtick when:
1055 * - enabled by features
1056 * - hrtimer is actually high res
1057 */
1058static inline int hrtick_enabled(struct rq *rq)
1059{
1060 if (!sched_feat(HRTICK))
1061 return 0;
ba42059f 1062 if (!cpu_active(cpu_of(rq)))
b328ca18 1063 return 0;
8f4d37ec
PZ
1064 return hrtimer_is_hres_active(&rq->hrtick_timer);
1065}
1066
8f4d37ec
PZ
1067static void hrtick_clear(struct rq *rq)
1068{
1069 if (hrtimer_active(&rq->hrtick_timer))
1070 hrtimer_cancel(&rq->hrtick_timer);
1071}
1072
8f4d37ec
PZ
1073/*
1074 * High-resolution timer tick.
1075 * Runs from hardirq context with interrupts disabled.
1076 */
1077static enum hrtimer_restart hrtick(struct hrtimer *timer)
1078{
1079 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1080
1081 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1082
1083 spin_lock(&rq->lock);
3e51f33f 1084 update_rq_clock(rq);
8f4d37ec
PZ
1085 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1086 spin_unlock(&rq->lock);
1087
1088 return HRTIMER_NORESTART;
1089}
1090
95e904c7 1091#ifdef CONFIG_SMP
31656519
PZ
1092/*
1093 * called from hardirq (IPI) context
1094 */
1095static void __hrtick_start(void *arg)
b328ca18 1096{
31656519 1097 struct rq *rq = arg;
b328ca18 1098
31656519
PZ
1099 spin_lock(&rq->lock);
1100 hrtimer_restart(&rq->hrtick_timer);
1101 rq->hrtick_csd_pending = 0;
1102 spin_unlock(&rq->lock);
b328ca18
PZ
1103}
1104
31656519
PZ
1105/*
1106 * Called to set the hrtick timer state.
1107 *
1108 * called with rq->lock held and irqs disabled
1109 */
1110static void hrtick_start(struct rq *rq, u64 delay)
b328ca18 1111{
31656519
PZ
1112 struct hrtimer *timer = &rq->hrtick_timer;
1113 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 1114
cc584b21 1115 hrtimer_set_expires(timer, time);
31656519
PZ
1116
1117 if (rq == this_rq()) {
1118 hrtimer_restart(timer);
1119 } else if (!rq->hrtick_csd_pending) {
6e275637 1120 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
1121 rq->hrtick_csd_pending = 1;
1122 }
b328ca18
PZ
1123}
1124
1125static int
1126hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1127{
1128 int cpu = (int)(long)hcpu;
1129
1130 switch (action) {
1131 case CPU_UP_CANCELED:
1132 case CPU_UP_CANCELED_FROZEN:
1133 case CPU_DOWN_PREPARE:
1134 case CPU_DOWN_PREPARE_FROZEN:
1135 case CPU_DEAD:
1136 case CPU_DEAD_FROZEN:
31656519 1137 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
1138 return NOTIFY_OK;
1139 }
1140
1141 return NOTIFY_DONE;
1142}
1143
fa748203 1144static __init void init_hrtick(void)
b328ca18
PZ
1145{
1146 hotcpu_notifier(hotplug_hrtick, 0);
1147}
31656519
PZ
1148#else
1149/*
1150 * Called to set the hrtick timer state.
1151 *
1152 * called with rq->lock held and irqs disabled
1153 */
1154static void hrtick_start(struct rq *rq, u64 delay)
1155{
7f1e2ca9
PZ
1156 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
1157 HRTIMER_MODE_REL, 0);
31656519 1158}
b328ca18 1159
006c75f1 1160static inline void init_hrtick(void)
8f4d37ec 1161{
8f4d37ec 1162}
31656519 1163#endif /* CONFIG_SMP */
8f4d37ec 1164
31656519 1165static void init_rq_hrtick(struct rq *rq)
8f4d37ec 1166{
31656519
PZ
1167#ifdef CONFIG_SMP
1168 rq->hrtick_csd_pending = 0;
8f4d37ec 1169
31656519
PZ
1170 rq->hrtick_csd.flags = 0;
1171 rq->hrtick_csd.func = __hrtick_start;
1172 rq->hrtick_csd.info = rq;
1173#endif
8f4d37ec 1174
31656519
PZ
1175 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1176 rq->hrtick_timer.function = hrtick;
8f4d37ec 1177}
006c75f1 1178#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1179static inline void hrtick_clear(struct rq *rq)
1180{
1181}
1182
8f4d37ec
PZ
1183static inline void init_rq_hrtick(struct rq *rq)
1184{
1185}
1186
b328ca18
PZ
1187static inline void init_hrtick(void)
1188{
1189}
006c75f1 1190#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 1191
c24d20db
IM
1192/*
1193 * resched_task - mark a task 'to be rescheduled now'.
1194 *
1195 * On UP this means the setting of the need_resched flag, on SMP it
1196 * might also involve a cross-CPU call to trigger the scheduler on
1197 * the target CPU.
1198 */
1199#ifdef CONFIG_SMP
1200
1201#ifndef tsk_is_polling
1202#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1203#endif
1204
31656519 1205static void resched_task(struct task_struct *p)
c24d20db
IM
1206{
1207 int cpu;
1208
1209 assert_spin_locked(&task_rq(p)->lock);
1210
5ed0cec0 1211 if (test_tsk_need_resched(p))
c24d20db
IM
1212 return;
1213
5ed0cec0 1214 set_tsk_need_resched(p);
c24d20db
IM
1215
1216 cpu = task_cpu(p);
1217 if (cpu == smp_processor_id())
1218 return;
1219
1220 /* NEED_RESCHED must be visible before we test polling */
1221 smp_mb();
1222 if (!tsk_is_polling(p))
1223 smp_send_reschedule(cpu);
1224}
1225
1226static void resched_cpu(int cpu)
1227{
1228 struct rq *rq = cpu_rq(cpu);
1229 unsigned long flags;
1230
1231 if (!spin_trylock_irqsave(&rq->lock, flags))
1232 return;
1233 resched_task(cpu_curr(cpu));
1234 spin_unlock_irqrestore(&rq->lock, flags);
1235}
06d8308c
TG
1236
1237#ifdef CONFIG_NO_HZ
1238/*
1239 * When add_timer_on() enqueues a timer into the timer wheel of an
1240 * idle CPU then this timer might expire before the next timer event
1241 * which is scheduled to wake up that CPU. In case of a completely
1242 * idle system the next event might even be infinite time into the
1243 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1244 * leaves the inner idle loop so the newly added timer is taken into
1245 * account when the CPU goes back to idle and evaluates the timer
1246 * wheel for the next timer event.
1247 */
1248void wake_up_idle_cpu(int cpu)
1249{
1250 struct rq *rq = cpu_rq(cpu);
1251
1252 if (cpu == smp_processor_id())
1253 return;
1254
1255 /*
1256 * This is safe, as this function is called with the timer
1257 * wheel base lock of (cpu) held. When the CPU is on the way
1258 * to idle and has not yet set rq->curr to idle then it will
1259 * be serialized on the timer wheel base lock and take the new
1260 * timer into account automatically.
1261 */
1262 if (rq->curr != rq->idle)
1263 return;
1264
1265 /*
1266 * We can set TIF_RESCHED on the idle task of the other CPU
1267 * lockless. The worst case is that the other CPU runs the
1268 * idle task through an additional NOOP schedule()
1269 */
5ed0cec0 1270 set_tsk_need_resched(rq->idle);
06d8308c
TG
1271
1272 /* NEED_RESCHED must be visible before we test polling */
1273 smp_mb();
1274 if (!tsk_is_polling(rq->idle))
1275 smp_send_reschedule(cpu);
1276}
6d6bc0ad 1277#endif /* CONFIG_NO_HZ */
06d8308c 1278
6d6bc0ad 1279#else /* !CONFIG_SMP */
31656519 1280static void resched_task(struct task_struct *p)
c24d20db
IM
1281{
1282 assert_spin_locked(&task_rq(p)->lock);
31656519 1283 set_tsk_need_resched(p);
c24d20db 1284}
6d6bc0ad 1285#endif /* CONFIG_SMP */
c24d20db 1286
45bf76df
IM
1287#if BITS_PER_LONG == 32
1288# define WMULT_CONST (~0UL)
1289#else
1290# define WMULT_CONST (1UL << 32)
1291#endif
1292
1293#define WMULT_SHIFT 32
1294
194081eb
IM
1295/*
1296 * Shift right and round:
1297 */
cf2ab469 1298#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
194081eb 1299
a7be37ac
PZ
1300/*
1301 * delta *= weight / lw
1302 */
cb1c4fc9 1303static unsigned long
45bf76df
IM
1304calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1305 struct load_weight *lw)
1306{
1307 u64 tmp;
1308
7a232e03
LJ
1309 if (!lw->inv_weight) {
1310 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1311 lw->inv_weight = 1;
1312 else
1313 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1314 / (lw->weight+1);
1315 }
45bf76df
IM
1316
1317 tmp = (u64)delta_exec * weight;
1318 /*
1319 * Check whether we'd overflow the 64-bit multiplication:
1320 */
194081eb 1321 if (unlikely(tmp > WMULT_CONST))
cf2ab469 1322 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
194081eb
IM
1323 WMULT_SHIFT/2);
1324 else
cf2ab469 1325 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
45bf76df 1326
ecf691da 1327 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
45bf76df
IM
1328}
1329
1091985b 1330static inline void update_load_add(struct load_weight *lw, unsigned long inc)
45bf76df
IM
1331{
1332 lw->weight += inc;
e89996ae 1333 lw->inv_weight = 0;
45bf76df
IM
1334}
1335
1091985b 1336static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
45bf76df
IM
1337{
1338 lw->weight -= dec;
e89996ae 1339 lw->inv_weight = 0;
45bf76df
IM
1340}
1341
2dd73a4f
PW
1342/*
1343 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1344 * of tasks with abnormal "nice" values across CPUs the contribution that
1345 * each task makes to its run queue's load is weighted according to its
41a2d6cf 1346 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
2dd73a4f
PW
1347 * scaled version of the new time slice allocation that they receive on time
1348 * slice expiry etc.
1349 */
1350
cce7ade8
PZ
1351#define WEIGHT_IDLEPRIO 3
1352#define WMULT_IDLEPRIO 1431655765
dd41f596
IM
1353
1354/*
1355 * Nice levels are multiplicative, with a gentle 10% change for every
1356 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1357 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1358 * that remained on nice 0.
1359 *
1360 * The "10% effect" is relative and cumulative: from _any_ nice level,
1361 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
f9153ee6
IM
1362 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1363 * If a task goes up by ~10% and another task goes down by ~10% then
1364 * the relative distance between them is ~25%.)
dd41f596
IM
1365 */
1366static const int prio_to_weight[40] = {
254753dc
IM
1367 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1368 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1369 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1370 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1371 /* 0 */ 1024, 820, 655, 526, 423,
1372 /* 5 */ 335, 272, 215, 172, 137,
1373 /* 10 */ 110, 87, 70, 56, 45,
1374 /* 15 */ 36, 29, 23, 18, 15,
dd41f596
IM
1375};
1376
5714d2de
IM
1377/*
1378 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1379 *
1380 * In cases where the weight does not change often, we can use the
1381 * precalculated inverse to speed up arithmetics by turning divisions
1382 * into multiplications:
1383 */
dd41f596 1384static const u32 prio_to_wmult[40] = {
254753dc
IM
1385 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1386 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1387 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1388 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1389 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1390 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1391 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1392 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
dd41f596 1393};
2dd73a4f 1394
dd41f596
IM
1395static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1396
1397/*
1398 * runqueue iterator, to support SMP load-balancing between different
1399 * scheduling classes, without having to expose their internal data
1400 * structures to the load-balancing proper:
1401 */
1402struct rq_iterator {
1403 void *arg;
1404 struct task_struct *(*start)(void *);
1405 struct task_struct *(*next)(void *);
1406};
1407
e1d1484f
PW
1408#ifdef CONFIG_SMP
1409static unsigned long
1410balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1411 unsigned long max_load_move, struct sched_domain *sd,
1412 enum cpu_idle_type idle, int *all_pinned,
1413 int *this_best_prio, struct rq_iterator *iterator);
1414
1415static int
1416iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1417 struct sched_domain *sd, enum cpu_idle_type idle,
1418 struct rq_iterator *iterator);
e1d1484f 1419#endif
dd41f596 1420
ef12fefa
BR
1421/* Time spent by the tasks of the cpu accounting group executing in ... */
1422enum cpuacct_stat_index {
1423 CPUACCT_STAT_USER, /* ... user mode */
1424 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1425
1426 CPUACCT_STAT_NSTATS,
1427};
1428
d842de87
SV
1429#ifdef CONFIG_CGROUP_CPUACCT
1430static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
ef12fefa
BR
1431static void cpuacct_update_stats(struct task_struct *tsk,
1432 enum cpuacct_stat_index idx, cputime_t val);
d842de87
SV
1433#else
1434static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
ef12fefa
BR
1435static inline void cpuacct_update_stats(struct task_struct *tsk,
1436 enum cpuacct_stat_index idx, cputime_t val) {}
d842de87
SV
1437#endif
1438
18d95a28
PZ
1439static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1440{
1441 update_load_add(&rq->load, load);
1442}
1443
1444static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1445{
1446 update_load_sub(&rq->load, load);
1447}
1448
7940ca36 1449#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
eb755805 1450typedef int (*tg_visitor)(struct task_group *, void *);
c09595f6
PZ
1451
1452/*
1453 * Iterate the full tree, calling @down when first entering a node and @up when
1454 * leaving it for the final time.
1455 */
eb755805 1456static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
1457{
1458 struct task_group *parent, *child;
eb755805 1459 int ret;
c09595f6
PZ
1460
1461 rcu_read_lock();
1462 parent = &root_task_group;
1463down:
eb755805
PZ
1464 ret = (*down)(parent, data);
1465 if (ret)
1466 goto out_unlock;
c09595f6
PZ
1467 list_for_each_entry_rcu(child, &parent->children, siblings) {
1468 parent = child;
1469 goto down;
1470
1471up:
1472 continue;
1473 }
eb755805
PZ
1474 ret = (*up)(parent, data);
1475 if (ret)
1476 goto out_unlock;
c09595f6
PZ
1477
1478 child = parent;
1479 parent = parent->parent;
1480 if (parent)
1481 goto up;
eb755805 1482out_unlock:
c09595f6 1483 rcu_read_unlock();
eb755805
PZ
1484
1485 return ret;
c09595f6
PZ
1486}
1487
eb755805
PZ
1488static int tg_nop(struct task_group *tg, void *data)
1489{
1490 return 0;
c09595f6 1491}
eb755805
PZ
1492#endif
1493
1494#ifdef CONFIG_SMP
1495static unsigned long source_load(int cpu, int type);
1496static unsigned long target_load(int cpu, int type);
1497static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1498
1499static unsigned long cpu_avg_load_per_task(int cpu)
1500{
1501 struct rq *rq = cpu_rq(cpu);
af6d596f 1502 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
eb755805 1503
4cd42620
SR
1504 if (nr_running)
1505 rq->avg_load_per_task = rq->load.weight / nr_running;
a2d47777
BS
1506 else
1507 rq->avg_load_per_task = 0;
eb755805
PZ
1508
1509 return rq->avg_load_per_task;
1510}
1511
1512#ifdef CONFIG_FAIR_GROUP_SCHED
c09595f6 1513
c09595f6
PZ
1514static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1515
1516/*
1517 * Calculate and set the cpu's group shares.
1518 */
1519static void
ffda12a1
PZ
1520update_group_shares_cpu(struct task_group *tg, int cpu,
1521 unsigned long sd_shares, unsigned long sd_rq_weight)
18d95a28 1522{
c09595f6
PZ
1523 unsigned long shares;
1524 unsigned long rq_weight;
1525
c8cba857 1526 if (!tg->se[cpu])
c09595f6
PZ
1527 return;
1528
ec4e0e2f 1529 rq_weight = tg->cfs_rq[cpu]->rq_weight;
c8cba857 1530
c09595f6
PZ
1531 /*
1532 * \Sum shares * rq_weight
1533 * shares = -----------------------
1534 * \Sum rq_weight
1535 *
1536 */
ec4e0e2f 1537 shares = (sd_shares * rq_weight) / sd_rq_weight;
ffda12a1 1538 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
c09595f6 1539
ffda12a1
PZ
1540 if (abs(shares - tg->se[cpu]->load.weight) >
1541 sysctl_sched_shares_thresh) {
1542 struct rq *rq = cpu_rq(cpu);
1543 unsigned long flags;
c09595f6 1544
ffda12a1 1545 spin_lock_irqsave(&rq->lock, flags);
ec4e0e2f 1546 tg->cfs_rq[cpu]->shares = shares;
c09595f6 1547
ffda12a1
PZ
1548 __set_se_shares(tg->se[cpu], shares);
1549 spin_unlock_irqrestore(&rq->lock, flags);
1550 }
18d95a28 1551}
c09595f6
PZ
1552
1553/*
c8cba857
PZ
1554 * Re-compute the task group their per cpu shares over the given domain.
1555 * This needs to be done in a bottom-up fashion because the rq weight of a
1556 * parent group depends on the shares of its child groups.
c09595f6 1557 */
eb755805 1558static int tg_shares_up(struct task_group *tg, void *data)
c09595f6 1559{
ec4e0e2f 1560 unsigned long weight, rq_weight = 0;
c8cba857 1561 unsigned long shares = 0;
eb755805 1562 struct sched_domain *sd = data;
c8cba857 1563 int i;
c09595f6 1564
758b2cdc 1565 for_each_cpu(i, sched_domain_span(sd)) {
ec4e0e2f
KC
1566 /*
1567 * If there are currently no tasks on the cpu pretend there
1568 * is one of average load so that when a new task gets to
1569 * run here it will not get delayed by group starvation.
1570 */
1571 weight = tg->cfs_rq[i]->load.weight;
1572 if (!weight)
1573 weight = NICE_0_LOAD;
1574
1575 tg->cfs_rq[i]->rq_weight = weight;
1576 rq_weight += weight;
c8cba857 1577 shares += tg->cfs_rq[i]->shares;
c09595f6 1578 }
c09595f6 1579
c8cba857
PZ
1580 if ((!shares && rq_weight) || shares > tg->shares)
1581 shares = tg->shares;
1582
1583 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1584 shares = tg->shares;
c09595f6 1585
758b2cdc 1586 for_each_cpu(i, sched_domain_span(sd))
ffda12a1 1587 update_group_shares_cpu(tg, i, shares, rq_weight);
eb755805
PZ
1588
1589 return 0;
c09595f6
PZ
1590}
1591
1592/*
c8cba857
PZ
1593 * Compute the cpu's hierarchical load factor for each task group.
1594 * This needs to be done in a top-down fashion because the load of a child
1595 * group is a fraction of its parents load.
c09595f6 1596 */
eb755805 1597static int tg_load_down(struct task_group *tg, void *data)
c09595f6 1598{
c8cba857 1599 unsigned long load;
eb755805 1600 long cpu = (long)data;
c09595f6 1601
c8cba857
PZ
1602 if (!tg->parent) {
1603 load = cpu_rq(cpu)->load.weight;
1604 } else {
1605 load = tg->parent->cfs_rq[cpu]->h_load;
1606 load *= tg->cfs_rq[cpu]->shares;
1607 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1608 }
c09595f6 1609
c8cba857 1610 tg->cfs_rq[cpu]->h_load = load;
c09595f6 1611
eb755805 1612 return 0;
c09595f6
PZ
1613}
1614
c8cba857 1615static void update_shares(struct sched_domain *sd)
4d8d595d 1616{
2398f2c6
PZ
1617 u64 now = cpu_clock(raw_smp_processor_id());
1618 s64 elapsed = now - sd->last_update;
1619
1620 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1621 sd->last_update = now;
eb755805 1622 walk_tg_tree(tg_nop, tg_shares_up, sd);
2398f2c6 1623 }
4d8d595d
PZ
1624}
1625
3e5459b4
PZ
1626static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1627{
1628 spin_unlock(&rq->lock);
1629 update_shares(sd);
1630 spin_lock(&rq->lock);
1631}
1632
eb755805 1633static void update_h_load(long cpu)
c09595f6 1634{
eb755805 1635 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
c09595f6
PZ
1636}
1637
c09595f6
PZ
1638#else
1639
c8cba857 1640static inline void update_shares(struct sched_domain *sd)
4d8d595d
PZ
1641{
1642}
1643
3e5459b4
PZ
1644static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1645{
1646}
1647
18d95a28
PZ
1648#endif
1649
8f45e2b5
GH
1650#ifdef CONFIG_PREEMPT
1651
70574a99 1652/*
8f45e2b5
GH
1653 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1654 * way at the expense of forcing extra atomic operations in all
1655 * invocations. This assures that the double_lock is acquired using the
1656 * same underlying policy as the spinlock_t on this architecture, which
1657 * reduces latency compared to the unfair variant below. However, it
1658 * also adds more overhead and therefore may reduce throughput.
70574a99 1659 */
8f45e2b5
GH
1660static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1661 __releases(this_rq->lock)
1662 __acquires(busiest->lock)
1663 __acquires(this_rq->lock)
1664{
1665 spin_unlock(&this_rq->lock);
1666 double_rq_lock(this_rq, busiest);
1667
1668 return 1;
1669}
1670
1671#else
1672/*
1673 * Unfair double_lock_balance: Optimizes throughput at the expense of
1674 * latency by eliminating extra atomic operations when the locks are
1675 * already in proper order on entry. This favors lower cpu-ids and will
1676 * grant the double lock to lower cpus over higher ids under contention,
1677 * regardless of entry order into the function.
1678 */
1679static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
70574a99
AD
1680 __releases(this_rq->lock)
1681 __acquires(busiest->lock)
1682 __acquires(this_rq->lock)
1683{
1684 int ret = 0;
1685
70574a99
AD
1686 if (unlikely(!spin_trylock(&busiest->lock))) {
1687 if (busiest < this_rq) {
1688 spin_unlock(&this_rq->lock);
1689 spin_lock(&busiest->lock);
1690 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1691 ret = 1;
1692 } else
1693 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1694 }
1695 return ret;
1696}
1697
8f45e2b5
GH
1698#endif /* CONFIG_PREEMPT */
1699
1700/*
1701 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1702 */
1703static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1704{
1705 if (unlikely(!irqs_disabled())) {
1706 /* printk() doesn't work good under rq->lock */
1707 spin_unlock(&this_rq->lock);
1708 BUG_ON(1);
1709 }
1710
1711 return _double_lock_balance(this_rq, busiest);
1712}
1713
70574a99
AD
1714static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1715 __releases(busiest->lock)
1716{
1717 spin_unlock(&busiest->lock);
1718 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1719}
18d95a28
PZ
1720#endif
1721
30432094 1722#ifdef CONFIG_FAIR_GROUP_SCHED
34e83e85
IM
1723static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1724{
30432094 1725#ifdef CONFIG_SMP
34e83e85
IM
1726 cfs_rq->shares = shares;
1727#endif
1728}
30432094 1729#endif
e7693a36 1730
dd41f596 1731#include "sched_stats.h"
dd41f596 1732#include "sched_idletask.c"
5522d5d5
IM
1733#include "sched_fair.c"
1734#include "sched_rt.c"
dd41f596
IM
1735#ifdef CONFIG_SCHED_DEBUG
1736# include "sched_debug.c"
1737#endif
1738
1739#define sched_class_highest (&rt_sched_class)
1f11eb6a
GH
1740#define for_each_class(class) \
1741 for (class = sched_class_highest; class; class = class->next)
dd41f596 1742
c09595f6 1743static void inc_nr_running(struct rq *rq)
9c217245
IM
1744{
1745 rq->nr_running++;
9c217245
IM
1746}
1747
c09595f6 1748static void dec_nr_running(struct rq *rq)
9c217245
IM
1749{
1750 rq->nr_running--;
9c217245
IM
1751}
1752
45bf76df
IM
1753static void set_load_weight(struct task_struct *p)
1754{
1755 if (task_has_rt_policy(p)) {
dd41f596
IM
1756 p->se.load.weight = prio_to_weight[0] * 2;
1757 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1758 return;
1759 }
45bf76df 1760
dd41f596
IM
1761 /*
1762 * SCHED_IDLE tasks get minimal weight:
1763 */
1764 if (p->policy == SCHED_IDLE) {
1765 p->se.load.weight = WEIGHT_IDLEPRIO;
1766 p->se.load.inv_weight = WMULT_IDLEPRIO;
1767 return;
1768 }
71f8bd46 1769
dd41f596
IM
1770 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1771 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
71f8bd46
IM
1772}
1773
2087a1ad
GH
1774static void update_avg(u64 *avg, u64 sample)
1775{
1776 s64 diff = sample - *avg;
1777 *avg += diff >> 3;
1778}
1779
8159f87e 1780static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
71f8bd46 1781{
831451ac
PZ
1782 if (wakeup)
1783 p->se.start_runtime = p->se.sum_exec_runtime;
1784
dd41f596 1785 sched_info_queued(p);
fd390f6a 1786 p->sched_class->enqueue_task(rq, p, wakeup);
dd41f596 1787 p->se.on_rq = 1;
71f8bd46
IM
1788}
1789
69be72c1 1790static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
71f8bd46 1791{
831451ac
PZ
1792 if (sleep) {
1793 if (p->se.last_wakeup) {
1794 update_avg(&p->se.avg_overlap,
1795 p->se.sum_exec_runtime - p->se.last_wakeup);
1796 p->se.last_wakeup = 0;
1797 } else {
1798 update_avg(&p->se.avg_wakeup,
1799 sysctl_sched_wakeup_granularity);
1800 }
2087a1ad
GH
1801 }
1802
46ac22ba 1803 sched_info_dequeued(p);
f02231e5 1804 p->sched_class->dequeue_task(rq, p, sleep);
dd41f596 1805 p->se.on_rq = 0;
71f8bd46
IM
1806}
1807
14531189 1808/*
dd41f596 1809 * __normal_prio - return the priority that is based on the static prio
14531189 1810 */
14531189
IM
1811static inline int __normal_prio(struct task_struct *p)
1812{
dd41f596 1813 return p->static_prio;
14531189
IM
1814}
1815
b29739f9
IM
1816/*
1817 * Calculate the expected normal priority: i.e. priority
1818 * without taking RT-inheritance into account. Might be
1819 * boosted by interactivity modifiers. Changes upon fork,
1820 * setprio syscalls, and whenever the interactivity
1821 * estimator recalculates.
1822 */
36c8b586 1823static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1824{
1825 int prio;
1826
e05606d3 1827 if (task_has_rt_policy(p))
b29739f9
IM
1828 prio = MAX_RT_PRIO-1 - p->rt_priority;
1829 else
1830 prio = __normal_prio(p);
1831 return prio;
1832}
1833
1834/*
1835 * Calculate the current priority, i.e. the priority
1836 * taken into account by the scheduler. This value might
1837 * be boosted by RT tasks, or might be boosted by
1838 * interactivity modifiers. Will be RT if the task got
1839 * RT-boosted. If not then it returns p->normal_prio.
1840 */
36c8b586 1841static int effective_prio(struct task_struct *p)
b29739f9
IM
1842{
1843 p->normal_prio = normal_prio(p);
1844 /*
1845 * If we are RT tasks or we were boosted to RT priority,
1846 * keep the priority unchanged. Otherwise, update priority
1847 * to the normal priority:
1848 */
1849 if (!rt_prio(p->prio))
1850 return p->normal_prio;
1851 return p->prio;
1852}
1853
1da177e4 1854/*
dd41f596 1855 * activate_task - move a task to the runqueue.
1da177e4 1856 */
dd41f596 1857static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1da177e4 1858{
d9514f6c 1859 if (task_contributes_to_load(p))
dd41f596 1860 rq->nr_uninterruptible--;
1da177e4 1861
8159f87e 1862 enqueue_task(rq, p, wakeup);
c09595f6 1863 inc_nr_running(rq);
1da177e4
LT
1864}
1865
1da177e4
LT
1866/*
1867 * deactivate_task - remove a task from the runqueue.
1868 */
2e1cb74a 1869static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1da177e4 1870{
d9514f6c 1871 if (task_contributes_to_load(p))
dd41f596
IM
1872 rq->nr_uninterruptible++;
1873
69be72c1 1874 dequeue_task(rq, p, sleep);
c09595f6 1875 dec_nr_running(rq);
1da177e4
LT
1876}
1877
1da177e4
LT
1878/**
1879 * task_curr - is this task currently executing on a CPU?
1880 * @p: the task in question.
1881 */
36c8b586 1882inline int task_curr(const struct task_struct *p)
1da177e4
LT
1883{
1884 return cpu_curr(task_cpu(p)) == p;
1885}
1886
dd41f596
IM
1887static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1888{
6f505b16 1889 set_task_rq(p, cpu);
dd41f596 1890#ifdef CONFIG_SMP
ce96b5ac
DA
1891 /*
1892 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1893 * successfuly executed on another CPU. We must ensure that updates of
1894 * per-task data have been completed by this moment.
1895 */
1896 smp_wmb();
dd41f596 1897 task_thread_info(p)->cpu = cpu;
dd41f596 1898#endif
2dd73a4f
PW
1899}
1900
cb469845
SR
1901static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1902 const struct sched_class *prev_class,
1903 int oldprio, int running)
1904{
1905 if (prev_class != p->sched_class) {
1906 if (prev_class->switched_from)
1907 prev_class->switched_from(rq, p, running);
1908 p->sched_class->switched_to(rq, p, running);
1909 } else
1910 p->sched_class->prio_changed(rq, p, oldprio, running);
1911}
1912
1da177e4 1913#ifdef CONFIG_SMP
c65cc870 1914
e958b360
TG
1915/* Used instead of source_load when we know the type == 0 */
1916static unsigned long weighted_cpuload(const int cpu)
1917{
1918 return cpu_rq(cpu)->load.weight;
1919}
1920
cc367732
IM
1921/*
1922 * Is this task likely cache-hot:
1923 */
e7693a36 1924static int
cc367732
IM
1925task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1926{
1927 s64 delta;
1928
f540a608
IM
1929 /*
1930 * Buddy candidates are cache hot:
1931 */
4793241b
PZ
1932 if (sched_feat(CACHE_HOT_BUDDY) &&
1933 (&p->se == cfs_rq_of(&p->se)->next ||
1934 &p->se == cfs_rq_of(&p->se)->last))
f540a608
IM
1935 return 1;
1936
cc367732
IM
1937 if (p->sched_class != &fair_sched_class)
1938 return 0;
1939
6bc1665b
IM
1940 if (sysctl_sched_migration_cost == -1)
1941 return 1;
1942 if (sysctl_sched_migration_cost == 0)
1943 return 0;
1944
cc367732
IM
1945 delta = now - p->se.exec_start;
1946
1947 return delta < (s64)sysctl_sched_migration_cost;
1948}
1949
1950
dd41f596 1951void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1952{
dd41f596
IM
1953 int old_cpu = task_cpu(p);
1954 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
2830cf8c
SV
1955 struct cfs_rq *old_cfsrq = task_cfs_rq(p),
1956 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
bbdba7c0 1957 u64 clock_offset;
dd41f596
IM
1958
1959 clock_offset = old_rq->clock - new_rq->clock;
6cfb0d5d 1960
cbc34ed1
PZ
1961 trace_sched_migrate_task(p, task_cpu(p), new_cpu);
1962
6cfb0d5d
IM
1963#ifdef CONFIG_SCHEDSTATS
1964 if (p->se.wait_start)
1965 p->se.wait_start -= clock_offset;
dd41f596
IM
1966 if (p->se.sleep_start)
1967 p->se.sleep_start -= clock_offset;
1968 if (p->se.block_start)
1969 p->se.block_start -= clock_offset;
cc367732
IM
1970 if (old_cpu != new_cpu) {
1971 schedstat_inc(p, se.nr_migrations);
1972 if (task_hot(p, old_rq->clock, NULL))
1973 schedstat_inc(p, se.nr_forced2_migrations);
1974 }
6cfb0d5d 1975#endif
2830cf8c
SV
1976 p->se.vruntime -= old_cfsrq->min_vruntime -
1977 new_cfsrq->min_vruntime;
dd41f596
IM
1978
1979 __set_task_cpu(p, new_cpu);
c65cc870
IM
1980}
1981
70b97a7f 1982struct migration_req {
1da177e4 1983 struct list_head list;
1da177e4 1984
36c8b586 1985 struct task_struct *task;
1da177e4
LT
1986 int dest_cpu;
1987
1da177e4 1988 struct completion done;
70b97a7f 1989};
1da177e4
LT
1990
1991/*
1992 * The task's runqueue lock must be held.
1993 * Returns true if you have to wait for migration thread.
1994 */
36c8b586 1995static int
70b97a7f 1996migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1da177e4 1997{
70b97a7f 1998 struct rq *rq = task_rq(p);
1da177e4
LT
1999
2000 /*
2001 * If the task is not on a runqueue (and not running), then
2002 * it is sufficient to simply update the task's cpu field.
2003 */
dd41f596 2004 if (!p->se.on_rq && !task_running(rq, p)) {
1da177e4
LT
2005 set_task_cpu(p, dest_cpu);
2006 return 0;
2007 }
2008
2009 init_completion(&req->done);
1da177e4
LT
2010 req->task = p;
2011 req->dest_cpu = dest_cpu;
2012 list_add(&req->list, &rq->migration_queue);
48f24c4d 2013
1da177e4
LT
2014 return 1;
2015}
2016
2017/*
2018 * wait_task_inactive - wait for a thread to unschedule.
2019 *
85ba2d86
RM
2020 * If @match_state is nonzero, it's the @p->state value just checked and
2021 * not expected to change. If it changes, i.e. @p might have woken up,
2022 * then return zero. When we succeed in waiting for @p to be off its CPU,
2023 * we return a positive number (its total switch count). If a second call
2024 * a short while later returns the same number, the caller can be sure that
2025 * @p has remained unscheduled the whole time.
2026 *
1da177e4
LT
2027 * The caller must ensure that the task *will* unschedule sometime soon,
2028 * else this function might spin for a *long* time. This function can't
2029 * be called with interrupts off, or it may introduce deadlock with
2030 * smp_call_function() if an IPI is sent by the same process we are
2031 * waiting to become inactive.
2032 */
85ba2d86 2033unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
2034{
2035 unsigned long flags;
dd41f596 2036 int running, on_rq;
85ba2d86 2037 unsigned long ncsw;
70b97a7f 2038 struct rq *rq;
1da177e4 2039
3a5c359a
AK
2040 for (;;) {
2041 /*
2042 * We do the initial early heuristics without holding
2043 * any task-queue locks at all. We'll only try to get
2044 * the runqueue lock when things look like they will
2045 * work out!
2046 */
2047 rq = task_rq(p);
fa490cfd 2048
3a5c359a
AK
2049 /*
2050 * If the task is actively running on another CPU
2051 * still, just relax and busy-wait without holding
2052 * any locks.
2053 *
2054 * NOTE! Since we don't hold any locks, it's not
2055 * even sure that "rq" stays as the right runqueue!
2056 * But we don't care, since "task_running()" will
2057 * return false if the runqueue has changed and p
2058 * is actually now running somewhere else!
2059 */
85ba2d86
RM
2060 while (task_running(rq, p)) {
2061 if (match_state && unlikely(p->state != match_state))
2062 return 0;
3a5c359a 2063 cpu_relax();
85ba2d86 2064 }
fa490cfd 2065
3a5c359a
AK
2066 /*
2067 * Ok, time to look more closely! We need the rq
2068 * lock now, to be *sure*. If we're wrong, we'll
2069 * just go back and repeat.
2070 */
2071 rq = task_rq_lock(p, &flags);
0a16b607 2072 trace_sched_wait_task(rq, p);
3a5c359a
AK
2073 running = task_running(rq, p);
2074 on_rq = p->se.on_rq;
85ba2d86 2075 ncsw = 0;
f31e11d8 2076 if (!match_state || p->state == match_state)
93dcf55f 2077 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
3a5c359a 2078 task_rq_unlock(rq, &flags);
fa490cfd 2079
85ba2d86
RM
2080 /*
2081 * If it changed from the expected state, bail out now.
2082 */
2083 if (unlikely(!ncsw))
2084 break;
2085
3a5c359a
AK
2086 /*
2087 * Was it really running after all now that we
2088 * checked with the proper locks actually held?
2089 *
2090 * Oops. Go back and try again..
2091 */
2092 if (unlikely(running)) {
2093 cpu_relax();
2094 continue;
2095 }
fa490cfd 2096
3a5c359a
AK
2097 /*
2098 * It's not enough that it's not actively running,
2099 * it must be off the runqueue _entirely_, and not
2100 * preempted!
2101 *
80dd99b3 2102 * So if it was still runnable (but just not actively
3a5c359a
AK
2103 * running right now), it's preempted, and we should
2104 * yield - it could be a while.
2105 */
2106 if (unlikely(on_rq)) {
2107 schedule_timeout_uninterruptible(1);
2108 continue;
2109 }
fa490cfd 2110
3a5c359a
AK
2111 /*
2112 * Ahh, all good. It wasn't running, and it wasn't
2113 * runnable, which means that it will never become
2114 * running in the future either. We're all done!
2115 */
2116 break;
2117 }
85ba2d86
RM
2118
2119 return ncsw;
1da177e4
LT
2120}
2121
2122/***
2123 * kick_process - kick a running thread to enter/exit the kernel
2124 * @p: the to-be-kicked thread
2125 *
2126 * Cause a process which is running on another CPU to enter
2127 * kernel-mode, without any delay. (to get signals handled.)
2128 *
2129 * NOTE: this function doesnt have to take the runqueue lock,
2130 * because all it wants to ensure is that the remote task enters
2131 * the kernel. If the IPI races and the task has been migrated
2132 * to another CPU then no harm is done and the purpose has been
2133 * achieved as well.
2134 */
36c8b586 2135void kick_process(struct task_struct *p)
1da177e4
LT
2136{
2137 int cpu;
2138
2139 preempt_disable();
2140 cpu = task_cpu(p);
2141 if ((cpu != smp_processor_id()) && task_curr(p))
2142 smp_send_reschedule(cpu);
2143 preempt_enable();
2144}
2145
2146/*
2dd73a4f
PW
2147 * Return a low guess at the load of a migration-source cpu weighted
2148 * according to the scheduling class and "nice" value.
1da177e4
LT
2149 *
2150 * We want to under-estimate the load of migration sources, to
2151 * balance conservatively.
2152 */
a9957449 2153static unsigned long source_load(int cpu, int type)
1da177e4 2154{
70b97a7f 2155 struct rq *rq = cpu_rq(cpu);
dd41f596 2156 unsigned long total = weighted_cpuload(cpu);
2dd73a4f 2157
93b75217 2158 if (type == 0 || !sched_feat(LB_BIAS))
dd41f596 2159 return total;
b910472d 2160
dd41f596 2161 return min(rq->cpu_load[type-1], total);
1da177e4
LT
2162}
2163
2164/*
2dd73a4f
PW
2165 * Return a high guess at the load of a migration-target cpu weighted
2166 * according to the scheduling class and "nice" value.
1da177e4 2167 */
a9957449 2168static unsigned long target_load(int cpu, int type)
1da177e4 2169{
70b97a7f 2170 struct rq *rq = cpu_rq(cpu);
dd41f596 2171 unsigned long total = weighted_cpuload(cpu);
2dd73a4f 2172
93b75217 2173 if (type == 0 || !sched_feat(LB_BIAS))
dd41f596 2174 return total;
3b0bd9bc 2175
dd41f596 2176 return max(rq->cpu_load[type-1], total);
2dd73a4f
PW
2177}
2178
147cbb4b
NP
2179/*
2180 * find_idlest_group finds and returns the least busy CPU group within the
2181 * domain.
2182 */
2183static struct sched_group *
2184find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2185{
2186 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
2187 unsigned long min_load = ULONG_MAX, this_load = 0;
2188 int load_idx = sd->forkexec_idx;
2189 int imbalance = 100 + (sd->imbalance_pct-100)/2;
2190
2191 do {
2192 unsigned long load, avg_load;
2193 int local_group;
2194 int i;
2195
da5a5522 2196 /* Skip over this group if it has no CPUs allowed */
758b2cdc
RR
2197 if (!cpumask_intersects(sched_group_cpus(group),
2198 &p->cpus_allowed))
3a5c359a 2199 continue;
da5a5522 2200
758b2cdc
RR
2201 local_group = cpumask_test_cpu(this_cpu,
2202 sched_group_cpus(group));
147cbb4b
NP
2203
2204 /* Tally up the load of all CPUs in the group */
2205 avg_load = 0;
2206
758b2cdc 2207 for_each_cpu(i, sched_group_cpus(group)) {
147cbb4b
NP
2208 /* Bias balancing toward cpus of our domain */
2209 if (local_group)
2210 load = source_load(i, load_idx);
2211 else
2212 load = target_load(i, load_idx);
2213
2214 avg_load += load;
2215 }
2216
2217 /* Adjust by relative CPU power of the group */
5517d86b
ED
2218 avg_load = sg_div_cpu_power(group,
2219 avg_load * SCHED_LOAD_SCALE);
147cbb4b
NP
2220
2221 if (local_group) {
2222 this_load = avg_load;
2223 this = group;
2224 } else if (avg_load < min_load) {
2225 min_load = avg_load;
2226 idlest = group;
2227 }
3a5c359a 2228 } while (group = group->next, group != sd->groups);
147cbb4b
NP
2229
2230 if (!idlest || 100*this_load < imbalance*min_load)
2231 return NULL;
2232 return idlest;
2233}
2234
2235/*
0feaece9 2236 * find_idlest_cpu - find the idlest cpu among the cpus in group.
147cbb4b 2237 */
95cdf3b7 2238static int
758b2cdc 2239find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
147cbb4b
NP
2240{
2241 unsigned long load, min_load = ULONG_MAX;
2242 int idlest = -1;
2243 int i;
2244
da5a5522 2245 /* Traverse only the allowed CPUs */
758b2cdc 2246 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
2dd73a4f 2247 load = weighted_cpuload(i);
147cbb4b
NP
2248
2249 if (load < min_load || (load == min_load && i == this_cpu)) {
2250 min_load = load;
2251 idlest = i;
2252 }
2253 }
2254
2255 return idlest;
2256}
2257
476d139c
NP
2258/*
2259 * sched_balance_self: balance the current task (running on cpu) in domains
2260 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2261 * SD_BALANCE_EXEC.
2262 *
2263 * Balance, ie. select the least loaded group.
2264 *
2265 * Returns the target CPU number, or the same CPU if no balancing is needed.
2266 *
2267 * preempt must be disabled.
2268 */
2269static int sched_balance_self(int cpu, int flag)
2270{
2271 struct task_struct *t = current;
2272 struct sched_domain *tmp, *sd = NULL;
147cbb4b 2273
c96d145e 2274 for_each_domain(cpu, tmp) {
9761eea8
IM
2275 /*
2276 * If power savings logic is enabled for a domain, stop there.
2277 */
5c45bf27
SS
2278 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
2279 break;
476d139c
NP
2280 if (tmp->flags & flag)
2281 sd = tmp;
c96d145e 2282 }
476d139c 2283
039a1c41
PZ
2284 if (sd)
2285 update_shares(sd);
2286
476d139c 2287 while (sd) {
476d139c 2288 struct sched_group *group;
1a848870
SS
2289 int new_cpu, weight;
2290
2291 if (!(sd->flags & flag)) {
2292 sd = sd->child;
2293 continue;
2294 }
476d139c 2295
476d139c 2296 group = find_idlest_group(sd, t, cpu);
1a848870
SS
2297 if (!group) {
2298 sd = sd->child;
2299 continue;
2300 }
476d139c 2301
758b2cdc 2302 new_cpu = find_idlest_cpu(group, t, cpu);
1a848870
SS
2303 if (new_cpu == -1 || new_cpu == cpu) {
2304 /* Now try balancing at a lower domain level of cpu */
2305 sd = sd->child;
2306 continue;
2307 }
476d139c 2308
1a848870 2309 /* Now try balancing at a lower domain level of new_cpu */
476d139c 2310 cpu = new_cpu;
758b2cdc 2311 weight = cpumask_weight(sched_domain_span(sd));
476d139c 2312 sd = NULL;
476d139c 2313 for_each_domain(cpu, tmp) {
758b2cdc 2314 if (weight <= cpumask_weight(sched_domain_span(tmp)))
476d139c
NP
2315 break;
2316 if (tmp->flags & flag)
2317 sd = tmp;
2318 }
2319 /* while loop will break here if sd == NULL */
2320 }
2321
2322 return cpu;
2323}
2324
2325#endif /* CONFIG_SMP */
1da177e4 2326
1da177e4
LT
2327/***
2328 * try_to_wake_up - wake up a thread
2329 * @p: the to-be-woken-up thread
2330 * @state: the mask of task states that can be woken
2331 * @sync: do a synchronous wakeup?
2332 *
2333 * Put it on the run-queue if it's not already there. The "current"
2334 * thread is always on the run-queue (except when the actual
2335 * re-schedule is in progress), and as such you're allowed to do
2336 * the simpler "current->state = TASK_RUNNING" to mark yourself
2337 * runnable without the overhead of this.
2338 *
2339 * returns failure only if the task is already active.
2340 */
36c8b586 2341static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1da177e4 2342{
cc367732 2343 int cpu, orig_cpu, this_cpu, success = 0;
1da177e4
LT
2344 unsigned long flags;
2345 long old_state;
70b97a7f 2346 struct rq *rq;
1da177e4 2347
b85d0667
IM
2348 if (!sched_feat(SYNC_WAKEUPS))
2349 sync = 0;
2350
2398f2c6 2351#ifdef CONFIG_SMP
57310a98 2352 if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
2398f2c6
PZ
2353 struct sched_domain *sd;
2354
2355 this_cpu = raw_smp_processor_id();
2356 cpu = task_cpu(p);
2357
2358 for_each_domain(this_cpu, sd) {
758b2cdc 2359 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2398f2c6
PZ
2360 update_shares(sd);
2361 break;
2362 }
2363 }
2364 }
2365#endif
2366
04e2f174 2367 smp_wmb();
1da177e4 2368 rq = task_rq_lock(p, &flags);
03e89e45 2369 update_rq_clock(rq);
1da177e4
LT
2370 old_state = p->state;
2371 if (!(old_state & state))
2372 goto out;
2373
dd41f596 2374 if (p->se.on_rq)
1da177e4
LT
2375 goto out_running;
2376
2377 cpu = task_cpu(p);
cc367732 2378 orig_cpu = cpu;
1da177e4
LT
2379 this_cpu = smp_processor_id();
2380
2381#ifdef CONFIG_SMP
2382 if (unlikely(task_running(rq, p)))
2383 goto out_activate;
2384
5d2f5a61
DA
2385 cpu = p->sched_class->select_task_rq(p, sync);
2386 if (cpu != orig_cpu) {
2387 set_task_cpu(p, cpu);
1da177e4
LT
2388 task_rq_unlock(rq, &flags);
2389 /* might preempt at this point */
2390 rq = task_rq_lock(p, &flags);
2391 old_state = p->state;
2392 if (!(old_state & state))
2393 goto out;
dd41f596 2394 if (p->se.on_rq)
1da177e4
LT
2395 goto out_running;
2396
2397 this_cpu = smp_processor_id();
2398 cpu = task_cpu(p);
2399 }
2400
e7693a36
GH
2401#ifdef CONFIG_SCHEDSTATS
2402 schedstat_inc(rq, ttwu_count);
2403 if (cpu == this_cpu)
2404 schedstat_inc(rq, ttwu_local);
2405 else {
2406 struct sched_domain *sd;
2407 for_each_domain(this_cpu, sd) {
758b2cdc 2408 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
e7693a36
GH
2409 schedstat_inc(sd, ttwu_wake_remote);
2410 break;
2411 }
2412 }
2413 }
6d6bc0ad 2414#endif /* CONFIG_SCHEDSTATS */
e7693a36 2415
1da177e4
LT
2416out_activate:
2417#endif /* CONFIG_SMP */
cc367732
IM
2418 schedstat_inc(p, se.nr_wakeups);
2419 if (sync)
2420 schedstat_inc(p, se.nr_wakeups_sync);
2421 if (orig_cpu != cpu)
2422 schedstat_inc(p, se.nr_wakeups_migrate);
2423 if (cpu == this_cpu)
2424 schedstat_inc(p, se.nr_wakeups_local);
2425 else
2426 schedstat_inc(p, se.nr_wakeups_remote);
dd41f596 2427 activate_task(rq, p, 1);
1da177e4
LT
2428 success = 1;
2429
831451ac
PZ
2430 /*
2431 * Only attribute actual wakeups done by this task.
2432 */
2433 if (!in_interrupt()) {
2434 struct sched_entity *se = &current->se;
2435 u64 sample = se->sum_exec_runtime;
2436
2437 if (se->last_wakeup)
2438 sample -= se->last_wakeup;
2439 else
2440 sample -= se->start_runtime;
2441 update_avg(&se->avg_wakeup, sample);
2442
2443 se->last_wakeup = se->sum_exec_runtime;
2444 }
2445
1da177e4 2446out_running:
468a15bb 2447 trace_sched_wakeup(rq, p, success);
15afe09b 2448 check_preempt_curr(rq, p, sync);
4ae7d5ce 2449
1da177e4 2450 p->state = TASK_RUNNING;
9a897c5a
SR
2451#ifdef CONFIG_SMP
2452 if (p->sched_class->task_wake_up)
2453 p->sched_class->task_wake_up(rq, p);
2454#endif
1da177e4
LT
2455out:
2456 task_rq_unlock(rq, &flags);
2457
2458 return success;
2459}
2460
7ad5b3a5 2461int wake_up_process(struct task_struct *p)
1da177e4 2462{
d9514f6c 2463 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 2464}
1da177e4
LT
2465EXPORT_SYMBOL(wake_up_process);
2466
7ad5b3a5 2467int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2468{
2469 return try_to_wake_up(p, state, 0);
2470}
2471
1da177e4
LT
2472/*
2473 * Perform scheduler related setup for a newly forked process p.
2474 * p is forked by current.
dd41f596
IM
2475 *
2476 * __sched_fork() is basic setup used by init_idle() too:
2477 */
2478static void __sched_fork(struct task_struct *p)
2479{
dd41f596
IM
2480 p->se.exec_start = 0;
2481 p->se.sum_exec_runtime = 0;
f6cf891c 2482 p->se.prev_sum_exec_runtime = 0;
4ae7d5ce
IM
2483 p->se.last_wakeup = 0;
2484 p->se.avg_overlap = 0;
831451ac
PZ
2485 p->se.start_runtime = 0;
2486 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
6cfb0d5d
IM
2487
2488#ifdef CONFIG_SCHEDSTATS
2489 p->se.wait_start = 0;
dd41f596
IM
2490 p->se.sum_sleep_runtime = 0;
2491 p->se.sleep_start = 0;
dd41f596
IM
2492 p->se.block_start = 0;
2493 p->se.sleep_max = 0;
2494 p->se.block_max = 0;
2495 p->se.exec_max = 0;
eba1ed4b 2496 p->se.slice_max = 0;
dd41f596 2497 p->se.wait_max = 0;
6cfb0d5d 2498#endif
476d139c 2499
fa717060 2500 INIT_LIST_HEAD(&p->rt.run_list);
dd41f596 2501 p->se.on_rq = 0;
4a55bd5e 2502 INIT_LIST_HEAD(&p->se.group_node);
476d139c 2503
e107be36
AK
2504#ifdef CONFIG_PREEMPT_NOTIFIERS
2505 INIT_HLIST_HEAD(&p->preempt_notifiers);
2506#endif
2507
1da177e4
LT
2508 /*
2509 * We mark the process as running here, but have not actually
2510 * inserted it onto the runqueue yet. This guarantees that
2511 * nobody will actually run it, and a signal or other external
2512 * event cannot wake it up and insert it on the runqueue either.
2513 */
2514 p->state = TASK_RUNNING;
dd41f596
IM
2515}
2516
2517/*
2518 * fork()/clone()-time setup:
2519 */
2520void sched_fork(struct task_struct *p, int clone_flags)
2521{
2522 int cpu = get_cpu();
2523
2524 __sched_fork(p);
2525
2526#ifdef CONFIG_SMP
2527 cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
2528#endif
02e4bac2 2529 set_task_cpu(p, cpu);
b29739f9
IM
2530
2531 /*
2532 * Make sure we do not leak PI boosting priority to the child:
2533 */
2534 p->prio = current->normal_prio;
2ddbf952
HS
2535 if (!rt_prio(p->prio))
2536 p->sched_class = &fair_sched_class;
b29739f9 2537
52f17b6c 2538#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 2539 if (likely(sched_info_on()))
52f17b6c 2540 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2541#endif
d6077cb8 2542#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4866cde0
NP
2543 p->oncpu = 0;
2544#endif
1da177e4 2545#ifdef CONFIG_PREEMPT
4866cde0 2546 /* Want to start with kernel preemption disabled. */
a1261f54 2547 task_thread_info(p)->preempt_count = 1;
1da177e4 2548#endif
917b627d
GH
2549 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2550
476d139c 2551 put_cpu();
1da177e4
LT
2552}
2553
2554/*
2555 * wake_up_new_task - wake up a newly created task for the first time.
2556 *
2557 * This function will do some initial scheduler statistics housekeeping
2558 * that must be done for every newly created context, then puts the task
2559 * on the runqueue and wakes it.
2560 */
7ad5b3a5 2561void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1da177e4
LT
2562{
2563 unsigned long flags;
dd41f596 2564 struct rq *rq;
1da177e4
LT
2565
2566 rq = task_rq_lock(p, &flags);
147cbb4b 2567 BUG_ON(p->state != TASK_RUNNING);
a8e504d2 2568 update_rq_clock(rq);
1da177e4
LT
2569
2570 p->prio = effective_prio(p);
2571
b9dca1e0 2572 if (!p->sched_class->task_new || !current->se.on_rq) {
dd41f596 2573 activate_task(rq, p, 0);
1da177e4 2574 } else {
1da177e4 2575 /*
dd41f596
IM
2576 * Let the scheduling class do new task startup
2577 * management (if any):
1da177e4 2578 */
ee0827d8 2579 p->sched_class->task_new(rq, p);
c09595f6 2580 inc_nr_running(rq);
1da177e4 2581 }
c71dd42d 2582 trace_sched_wakeup_new(rq, p, 1);
15afe09b 2583 check_preempt_curr(rq, p, 0);
9a897c5a
SR
2584#ifdef CONFIG_SMP
2585 if (p->sched_class->task_wake_up)
2586 p->sched_class->task_wake_up(rq, p);
2587#endif
dd41f596 2588 task_rq_unlock(rq, &flags);
1da177e4
LT
2589}
2590
e107be36
AK
2591#ifdef CONFIG_PREEMPT_NOTIFIERS
2592
2593/**
80dd99b3 2594 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2595 * @notifier: notifier struct to register
e107be36
AK
2596 */
2597void preempt_notifier_register(struct preempt_notifier *notifier)
2598{
2599 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2600}
2601EXPORT_SYMBOL_GPL(preempt_notifier_register);
2602
2603/**
2604 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2605 * @notifier: notifier struct to unregister
e107be36
AK
2606 *
2607 * This is safe to call from within a preemption notifier.
2608 */
2609void preempt_notifier_unregister(struct preempt_notifier *notifier)
2610{
2611 hlist_del(&notifier->link);
2612}
2613EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2614
2615static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2616{
2617 struct preempt_notifier *notifier;
2618 struct hlist_node *node;
2619
2620 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2621 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2622}
2623
2624static void
2625fire_sched_out_preempt_notifiers(struct task_struct *curr,
2626 struct task_struct *next)
2627{
2628 struct preempt_notifier *notifier;
2629 struct hlist_node *node;
2630
2631 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2632 notifier->ops->sched_out(notifier, next);
2633}
2634
6d6bc0ad 2635#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
2636
2637static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2638{
2639}
2640
2641static void
2642fire_sched_out_preempt_notifiers(struct task_struct *curr,
2643 struct task_struct *next)
2644{
2645}
2646
6d6bc0ad 2647#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2648
4866cde0
NP
2649/**
2650 * prepare_task_switch - prepare to switch tasks
2651 * @rq: the runqueue preparing to switch
421cee29 2652 * @prev: the current task that is being switched out
4866cde0
NP
2653 * @next: the task we are going to switch to.
2654 *
2655 * This is called with the rq lock held and interrupts off. It must
2656 * be paired with a subsequent finish_task_switch after the context
2657 * switch.
2658 *
2659 * prepare_task_switch sets up locking and calls architecture specific
2660 * hooks.
2661 */
e107be36
AK
2662static inline void
2663prepare_task_switch(struct rq *rq, struct task_struct *prev,
2664 struct task_struct *next)
4866cde0 2665{
e107be36 2666 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2667 prepare_lock_switch(rq, next);
2668 prepare_arch_switch(next);
2669}
2670
1da177e4
LT
2671/**
2672 * finish_task_switch - clean up after a task-switch
344babaa 2673 * @rq: runqueue associated with task-switch
1da177e4
LT
2674 * @prev: the thread we just switched away from.
2675 *
4866cde0
NP
2676 * finish_task_switch must be called after the context switch, paired
2677 * with a prepare_task_switch call before the context switch.
2678 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2679 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2680 *
2681 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2682 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2683 * with the lock held can cause deadlocks; see schedule() for
2684 * details.)
2685 */
a9957449 2686static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
2687 __releases(rq->lock)
2688{
1da177e4 2689 struct mm_struct *mm = rq->prev_mm;
55a101f8 2690 long prev_state;
967fc046
GH
2691#ifdef CONFIG_SMP
2692 int post_schedule = 0;
2693
2694 if (current->sched_class->needs_post_schedule)
2695 post_schedule = current->sched_class->needs_post_schedule(rq);
2696#endif
1da177e4
LT
2697
2698 rq->prev_mm = NULL;
2699
2700 /*
2701 * A task struct has one reference for the use as "current".
c394cc9f 2702 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2703 * schedule one last time. The schedule call will never return, and
2704 * the scheduled task must drop that reference.
c394cc9f 2705 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
2706 * still held, otherwise prev could be scheduled on another cpu, die
2707 * there before we look at prev->state, and then the reference would
2708 * be dropped twice.
2709 * Manfred Spraul <manfred@colorfullife.com>
2710 */
55a101f8 2711 prev_state = prev->state;
4866cde0
NP
2712 finish_arch_switch(prev);
2713 finish_lock_switch(rq, prev);
9a897c5a 2714#ifdef CONFIG_SMP
967fc046 2715 if (post_schedule)
9a897c5a
SR
2716 current->sched_class->post_schedule(rq);
2717#endif
e8fa1362 2718
e107be36 2719 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2720 if (mm)
2721 mmdrop(mm);
c394cc9f 2722 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 2723 /*
2724 * Remove function-return probe instances associated with this
2725 * task and put them back on the free list.
9761eea8 2726 */
c6fd91f0 2727 kprobe_flush_task(prev);
1da177e4 2728 put_task_struct(prev);
c6fd91f0 2729 }
1da177e4
LT
2730}
2731
2732/**
2733 * schedule_tail - first thing a freshly forked thread must call.
2734 * @prev: the thread we just switched away from.
2735 */
36c8b586 2736asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
2737 __releases(rq->lock)
2738{
70b97a7f
IM
2739 struct rq *rq = this_rq();
2740
4866cde0
NP
2741 finish_task_switch(rq, prev);
2742#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2743 /* In this case, finish_task_switch does not reenable preemption */
2744 preempt_enable();
2745#endif
1da177e4 2746 if (current->set_child_tid)
b488893a 2747 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2748}
2749
2750/*
2751 * context_switch - switch to the new MM and the new
2752 * thread's register state.
2753 */
dd41f596 2754static inline void
70b97a7f 2755context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2756 struct task_struct *next)
1da177e4 2757{
dd41f596 2758 struct mm_struct *mm, *oldmm;
1da177e4 2759
e107be36 2760 prepare_task_switch(rq, prev, next);
0a16b607 2761 trace_sched_switch(rq, prev, next);
dd41f596
IM
2762 mm = next->mm;
2763 oldmm = prev->active_mm;
9226d125
ZA
2764 /*
2765 * For paravirt, this is coupled with an exit in switch_to to
2766 * combine the page table reload and the switch backend into
2767 * one hypercall.
2768 */
2769 arch_enter_lazy_cpu_mode();
2770
dd41f596 2771 if (unlikely(!mm)) {
1da177e4
LT
2772 next->active_mm = oldmm;
2773 atomic_inc(&oldmm->mm_count);
2774 enter_lazy_tlb(oldmm, next);
2775 } else
2776 switch_mm(oldmm, mm, next);
2777
dd41f596 2778 if (unlikely(!prev->mm)) {
1da177e4 2779 prev->active_mm = NULL;
1da177e4
LT
2780 rq->prev_mm = oldmm;
2781 }
3a5f5e48
IM
2782 /*
2783 * Since the runqueue lock will be released by the next
2784 * task (which is an invalid locking op but in the case
2785 * of the scheduler it's an obvious special-case), so we
2786 * do an early lockdep release here:
2787 */
2788#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2789 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2790#endif
1da177e4
LT
2791
2792 /* Here we just switch the register state and the stack. */
2793 switch_to(prev, next, prev);
2794
dd41f596
IM
2795 barrier();
2796 /*
2797 * this_rq must be evaluated again because prev may have moved
2798 * CPUs since it called schedule(), thus the 'rq' on its stack
2799 * frame will be invalid.
2800 */
2801 finish_task_switch(this_rq(), prev);
1da177e4
LT
2802}
2803
2804/*
2805 * nr_running, nr_uninterruptible and nr_context_switches:
2806 *
2807 * externally visible scheduler statistics: current number of runnable
2808 * threads, current number of uninterruptible-sleeping threads, total
2809 * number of context switches performed since bootup.
2810 */
2811unsigned long nr_running(void)
2812{
2813 unsigned long i, sum = 0;
2814
2815 for_each_online_cpu(i)
2816 sum += cpu_rq(i)->nr_running;
2817
2818 return sum;
2819}
2820
2821unsigned long nr_uninterruptible(void)
2822{
2823 unsigned long i, sum = 0;
2824
0a945022 2825 for_each_possible_cpu(i)
1da177e4
LT
2826 sum += cpu_rq(i)->nr_uninterruptible;
2827
2828 /*
2829 * Since we read the counters lockless, it might be slightly
2830 * inaccurate. Do not allow it to go below zero though:
2831 */
2832 if (unlikely((long)sum < 0))
2833 sum = 0;
2834
2835 return sum;
2836}
2837
2838unsigned long long nr_context_switches(void)
2839{
cc94abfc
SR
2840 int i;
2841 unsigned long long sum = 0;
1da177e4 2842
0a945022 2843 for_each_possible_cpu(i)
1da177e4
LT
2844 sum += cpu_rq(i)->nr_switches;
2845
2846 return sum;
2847}
2848
2849unsigned long nr_iowait(void)
2850{
2851 unsigned long i, sum = 0;
2852
0a945022 2853 for_each_possible_cpu(i)
1da177e4
LT
2854 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2855
2856 return sum;
2857}
2858
db1b1fef
JS
2859unsigned long nr_active(void)
2860{
2861 unsigned long i, running = 0, uninterruptible = 0;
2862
2863 for_each_online_cpu(i) {
2864 running += cpu_rq(i)->nr_running;
2865 uninterruptible += cpu_rq(i)->nr_uninterruptible;
2866 }
2867
2868 if (unlikely((long)uninterruptible < 0))
2869 uninterruptible = 0;
2870
2871 return running + uninterruptible;
2872}
2873
48f24c4d 2874/*
dd41f596
IM
2875 * Update rq->cpu_load[] statistics. This function is usually called every
2876 * scheduler tick (TICK_NSEC).
48f24c4d 2877 */
dd41f596 2878static void update_cpu_load(struct rq *this_rq)
48f24c4d 2879{
495eca49 2880 unsigned long this_load = this_rq->load.weight;
dd41f596
IM
2881 int i, scale;
2882
2883 this_rq->nr_load_updates++;
dd41f596
IM
2884
2885 /* Update our load: */
2886 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2887 unsigned long old_load, new_load;
2888
2889 /* scale is effectively 1 << i now, and >> i divides by scale */
2890
2891 old_load = this_rq->cpu_load[i];
2892 new_load = this_load;
a25707f3
IM
2893 /*
2894 * Round up the averaging division if load is increasing. This
2895 * prevents us from getting stuck on 9 if the load is 10, for
2896 * example.
2897 */
2898 if (new_load > old_load)
2899 new_load += scale-1;
dd41f596
IM
2900 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2901 }
48f24c4d
IM
2902}
2903
dd41f596
IM
2904#ifdef CONFIG_SMP
2905
1da177e4
LT
2906/*
2907 * double_rq_lock - safely lock two runqueues
2908 *
2909 * Note this does not disable interrupts like task_rq_lock,
2910 * you need to do so manually before calling.
2911 */
70b97a7f 2912static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1da177e4
LT
2913 __acquires(rq1->lock)
2914 __acquires(rq2->lock)
2915{
054b9108 2916 BUG_ON(!irqs_disabled());
1da177e4
LT
2917 if (rq1 == rq2) {
2918 spin_lock(&rq1->lock);
2919 __acquire(rq2->lock); /* Fake it out ;) */
2920 } else {
c96d145e 2921 if (rq1 < rq2) {
1da177e4 2922 spin_lock(&rq1->lock);
5e710e37 2923 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1da177e4
LT
2924 } else {
2925 spin_lock(&rq2->lock);
5e710e37 2926 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1da177e4
LT
2927 }
2928 }
6e82a3be
IM
2929 update_rq_clock(rq1);
2930 update_rq_clock(rq2);
1da177e4
LT
2931}
2932
2933/*
2934 * double_rq_unlock - safely unlock two runqueues
2935 *
2936 * Note this does not restore interrupts like task_rq_unlock,
2937 * you need to do so manually after calling.
2938 */
70b97a7f 2939static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1da177e4
LT
2940 __releases(rq1->lock)
2941 __releases(rq2->lock)
2942{
2943 spin_unlock(&rq1->lock);
2944 if (rq1 != rq2)
2945 spin_unlock(&rq2->lock);
2946 else
2947 __release(rq2->lock);
2948}
2949
1da177e4
LT
2950/*
2951 * If dest_cpu is allowed for this process, migrate the task to it.
2952 * This is accomplished by forcing the cpu_allowed mask to only
41a2d6cf 2953 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
1da177e4
LT
2954 * the cpu_allowed mask is restored.
2955 */
36c8b586 2956static void sched_migrate_task(struct task_struct *p, int dest_cpu)
1da177e4 2957{
70b97a7f 2958 struct migration_req req;
1da177e4 2959 unsigned long flags;
70b97a7f 2960 struct rq *rq;
1da177e4
LT
2961
2962 rq = task_rq_lock(p, &flags);
96f874e2 2963 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
e761b772 2964 || unlikely(!cpu_active(dest_cpu)))
1da177e4
LT
2965 goto out;
2966
2967 /* force the process onto the specified CPU */
2968 if (migrate_task(p, dest_cpu, &req)) {
2969 /* Need to wait for migration thread (might exit: take ref). */
2970 struct task_struct *mt = rq->migration_thread;
36c8b586 2971
1da177e4
LT
2972 get_task_struct(mt);
2973 task_rq_unlock(rq, &flags);
2974 wake_up_process(mt);
2975 put_task_struct(mt);
2976 wait_for_completion(&req.done);
36c8b586 2977
1da177e4
LT
2978 return;
2979 }
2980out:
2981 task_rq_unlock(rq, &flags);
2982}
2983
2984/*
476d139c
NP
2985 * sched_exec - execve() is a valuable balancing opportunity, because at
2986 * this point the task has the smallest effective memory and cache footprint.
1da177e4
LT
2987 */
2988void sched_exec(void)
2989{
1da177e4 2990 int new_cpu, this_cpu = get_cpu();
476d139c 2991 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
1da177e4 2992 put_cpu();
476d139c
NP
2993 if (new_cpu != this_cpu)
2994 sched_migrate_task(current, new_cpu);
1da177e4
LT
2995}
2996
2997/*
2998 * pull_task - move a task from a remote runqueue to the local runqueue.
2999 * Both runqueues must be locked.
3000 */
dd41f596
IM
3001static void pull_task(struct rq *src_rq, struct task_struct *p,
3002 struct rq *this_rq, int this_cpu)
1da177e4 3003{
2e1cb74a 3004 deactivate_task(src_rq, p, 0);
1da177e4 3005 set_task_cpu(p, this_cpu);
dd41f596 3006 activate_task(this_rq, p, 0);
1da177e4
LT
3007 /*
3008 * Note that idle threads have a prio of MAX_PRIO, for this test
3009 * to be always true for them.
3010 */
15afe09b 3011 check_preempt_curr(this_rq, p, 0);
1da177e4
LT
3012}
3013
3014/*
3015 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3016 */
858119e1 3017static
70b97a7f 3018int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
d15bcfdb 3019 struct sched_domain *sd, enum cpu_idle_type idle,
95cdf3b7 3020 int *all_pinned)
1da177e4 3021{
708dc512 3022 int tsk_cache_hot = 0;
1da177e4
LT
3023 /*
3024 * We do not migrate tasks that are:
3025 * 1) running (obviously), or
3026 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3027 * 3) are cache-hot on their current CPU.
3028 */
96f874e2 3029 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
cc367732 3030 schedstat_inc(p, se.nr_failed_migrations_affine);
1da177e4 3031 return 0;
cc367732 3032 }
81026794
NP
3033 *all_pinned = 0;
3034
cc367732
IM
3035 if (task_running(rq, p)) {
3036 schedstat_inc(p, se.nr_failed_migrations_running);
81026794 3037 return 0;
cc367732 3038 }
1da177e4 3039
da84d961
IM
3040 /*
3041 * Aggressive migration if:
3042 * 1) task is cache cold, or
3043 * 2) too many balance attempts have failed.
3044 */
3045
708dc512
LH
3046 tsk_cache_hot = task_hot(p, rq->clock, sd);
3047 if (!tsk_cache_hot ||
3048 sd->nr_balance_failed > sd->cache_nice_tries) {
da84d961 3049#ifdef CONFIG_SCHEDSTATS
708dc512 3050 if (tsk_cache_hot) {
da84d961 3051 schedstat_inc(sd, lb_hot_gained[idle]);
cc367732
IM
3052 schedstat_inc(p, se.nr_forced_migrations);
3053 }
da84d961
IM
3054#endif
3055 return 1;
3056 }
3057
708dc512 3058 if (tsk_cache_hot) {
cc367732 3059 schedstat_inc(p, se.nr_failed_migrations_hot);
da84d961 3060 return 0;
cc367732 3061 }
1da177e4
LT
3062 return 1;
3063}
3064
e1d1484f
PW
3065static unsigned long
3066balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3067 unsigned long max_load_move, struct sched_domain *sd,
3068 enum cpu_idle_type idle, int *all_pinned,
3069 int *this_best_prio, struct rq_iterator *iterator)
1da177e4 3070{
051c6764 3071 int loops = 0, pulled = 0, pinned = 0;
dd41f596
IM
3072 struct task_struct *p;
3073 long rem_load_move = max_load_move;
1da177e4 3074
e1d1484f 3075 if (max_load_move == 0)
1da177e4
LT
3076 goto out;
3077
81026794
NP
3078 pinned = 1;
3079
1da177e4 3080 /*
dd41f596 3081 * Start the load-balancing iterator:
1da177e4 3082 */
dd41f596
IM
3083 p = iterator->start(iterator->arg);
3084next:
b82d9fdd 3085 if (!p || loops++ > sysctl_sched_nr_migrate)
1da177e4 3086 goto out;
051c6764
PZ
3087
3088 if ((p->se.load.weight >> 1) > rem_load_move ||
dd41f596 3089 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
dd41f596
IM
3090 p = iterator->next(iterator->arg);
3091 goto next;
1da177e4
LT
3092 }
3093
dd41f596 3094 pull_task(busiest, p, this_rq, this_cpu);
1da177e4 3095 pulled++;
dd41f596 3096 rem_load_move -= p->se.load.weight;
1da177e4 3097
7e96fa58
GH
3098#ifdef CONFIG_PREEMPT
3099 /*
3100 * NEWIDLE balancing is a source of latency, so preemptible kernels
3101 * will stop after the first task is pulled to minimize the critical
3102 * section.
3103 */
3104 if (idle == CPU_NEWLY_IDLE)
3105 goto out;
3106#endif
3107
2dd73a4f 3108 /*
b82d9fdd 3109 * We only want to steal up to the prescribed amount of weighted load.
2dd73a4f 3110 */
e1d1484f 3111 if (rem_load_move > 0) {
a4ac01c3
PW
3112 if (p->prio < *this_best_prio)
3113 *this_best_prio = p->prio;
dd41f596
IM
3114 p = iterator->next(iterator->arg);
3115 goto next;
1da177e4
LT
3116 }
3117out:
3118 /*
e1d1484f 3119 * Right now, this is one of only two places pull_task() is called,
1da177e4
LT
3120 * so we can safely collect pull_task() stats here rather than
3121 * inside pull_task().
3122 */
3123 schedstat_add(sd, lb_gained[idle], pulled);
81026794
NP
3124
3125 if (all_pinned)
3126 *all_pinned = pinned;
e1d1484f
PW
3127
3128 return max_load_move - rem_load_move;
1da177e4
LT
3129}
3130
dd41f596 3131/*
43010659
PW
3132 * move_tasks tries to move up to max_load_move weighted load from busiest to
3133 * this_rq, as part of a balancing operation within domain "sd".
3134 * Returns 1 if successful and 0 otherwise.
dd41f596
IM
3135 *
3136 * Called with both runqueues locked.
3137 */
3138static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
43010659 3139 unsigned long max_load_move,
dd41f596
IM
3140 struct sched_domain *sd, enum cpu_idle_type idle,
3141 int *all_pinned)
3142{
5522d5d5 3143 const struct sched_class *class = sched_class_highest;
43010659 3144 unsigned long total_load_moved = 0;
a4ac01c3 3145 int this_best_prio = this_rq->curr->prio;
dd41f596
IM
3146
3147 do {
43010659
PW
3148 total_load_moved +=
3149 class->load_balance(this_rq, this_cpu, busiest,
e1d1484f 3150 max_load_move - total_load_moved,
a4ac01c3 3151 sd, idle, all_pinned, &this_best_prio);
dd41f596 3152 class = class->next;
c4acb2c0 3153
7e96fa58
GH
3154#ifdef CONFIG_PREEMPT
3155 /*
3156 * NEWIDLE balancing is a source of latency, so preemptible
3157 * kernels will stop after the first task is pulled to minimize
3158 * the critical section.
3159 */
c4acb2c0
GH
3160 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3161 break;
7e96fa58 3162#endif
43010659 3163 } while (class && max_load_move > total_load_moved);
dd41f596 3164
43010659
PW
3165 return total_load_moved > 0;
3166}
3167
e1d1484f
PW
3168static int
3169iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3170 struct sched_domain *sd, enum cpu_idle_type idle,
3171 struct rq_iterator *iterator)
3172{
3173 struct task_struct *p = iterator->start(iterator->arg);
3174 int pinned = 0;
3175
3176 while (p) {
3177 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
3178 pull_task(busiest, p, this_rq, this_cpu);
3179 /*
3180 * Right now, this is only the second place pull_task()
3181 * is called, so we can safely collect pull_task()
3182 * stats here rather than inside pull_task().
3183 */
3184 schedstat_inc(sd, lb_gained[idle]);
3185
3186 return 1;
3187 }
3188 p = iterator->next(iterator->arg);
3189 }
3190
3191 return 0;
3192}
3193
43010659
PW
3194/*
3195 * move_one_task tries to move exactly one task from busiest to this_rq, as
3196 * part of active balancing operations within "domain".
3197 * Returns 1 if successful and 0 otherwise.
3198 *
3199 * Called with both runqueues locked.
3200 */
3201static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3202 struct sched_domain *sd, enum cpu_idle_type idle)
3203{
5522d5d5 3204 const struct sched_class *class;
43010659
PW
3205
3206 for (class = sched_class_highest; class; class = class->next)
e1d1484f 3207 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
43010659
PW
3208 return 1;
3209
3210 return 0;
dd41f596 3211}
67bb6c03 3212/********** Helpers for find_busiest_group ************************/
1da177e4 3213/*
222d656d
GS
3214 * sd_lb_stats - Structure to store the statistics of a sched_domain
3215 * during load balancing.
1da177e4 3216 */
222d656d
GS
3217struct sd_lb_stats {
3218 struct sched_group *busiest; /* Busiest group in this sd */
3219 struct sched_group *this; /* Local group in this sd */
3220 unsigned long total_load; /* Total load of all groups in sd */
3221 unsigned long total_pwr; /* Total power of all groups in sd */
3222 unsigned long avg_load; /* Average load across all groups in sd */
3223
3224 /** Statistics of this group */
3225 unsigned long this_load;
3226 unsigned long this_load_per_task;
3227 unsigned long this_nr_running;
3228
3229 /* Statistics of the busiest group */
3230 unsigned long max_load;
3231 unsigned long busiest_load_per_task;
3232 unsigned long busiest_nr_running;
3233
3234 int group_imb; /* Is there imbalance in this sd */
5c45bf27 3235#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
222d656d
GS
3236 int power_savings_balance; /* Is powersave balance needed for this sd */
3237 struct sched_group *group_min; /* Least loaded group in sd */
3238 struct sched_group *group_leader; /* Group which relieves group_min */
3239 unsigned long min_load_per_task; /* load_per_task in group_min */
3240 unsigned long leader_nr_running; /* Nr running of group_leader */
3241 unsigned long min_nr_running; /* Nr running of group_min */
5c45bf27 3242#endif
222d656d 3243};
1da177e4 3244
d5ac537e 3245/*
381be78f
GS
3246 * sg_lb_stats - stats of a sched_group required for load_balancing
3247 */
3248struct sg_lb_stats {
3249 unsigned long avg_load; /*Avg load across the CPUs of the group */
3250 unsigned long group_load; /* Total load over the CPUs of the group */
3251 unsigned long sum_nr_running; /* Nr tasks running in the group */
3252 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3253 unsigned long group_capacity;
3254 int group_imb; /* Is there an imbalance in the group ? */
3255};
408ed066 3256
67bb6c03
GS
3257/**
3258 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3259 * @group: The group whose first cpu is to be returned.
3260 */
3261static inline unsigned int group_first_cpu(struct sched_group *group)
3262{
3263 return cpumask_first(sched_group_cpus(group));
3264}
3265
3266/**
3267 * get_sd_load_idx - Obtain the load index for a given sched domain.
3268 * @sd: The sched_domain whose load_idx is to be obtained.
3269 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3270 */
3271static inline int get_sd_load_idx(struct sched_domain *sd,
3272 enum cpu_idle_type idle)
3273{
3274 int load_idx;
3275
3276 switch (idle) {
3277 case CPU_NOT_IDLE:
7897986b 3278 load_idx = sd->busy_idx;
67bb6c03
GS
3279 break;
3280
3281 case CPU_NEWLY_IDLE:
7897986b 3282 load_idx = sd->newidle_idx;
67bb6c03
GS
3283 break;
3284 default:
7897986b 3285 load_idx = sd->idle_idx;
67bb6c03
GS
3286 break;
3287 }
1da177e4 3288
67bb6c03
GS
3289 return load_idx;
3290}
1da177e4 3291
1da177e4 3292
c071df18
GS
3293#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3294/**
3295 * init_sd_power_savings_stats - Initialize power savings statistics for
3296 * the given sched_domain, during load balancing.
3297 *
3298 * @sd: Sched domain whose power-savings statistics are to be initialized.
3299 * @sds: Variable containing the statistics for sd.
3300 * @idle: Idle status of the CPU at which we're performing load-balancing.
3301 */
3302static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3303 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3304{
3305 /*
3306 * Busy processors will not participate in power savings
3307 * balance.
3308 */
3309 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3310 sds->power_savings_balance = 0;
3311 else {
3312 sds->power_savings_balance = 1;
3313 sds->min_nr_running = ULONG_MAX;
3314 sds->leader_nr_running = 0;
3315 }
3316}
783609c6 3317
c071df18
GS
3318/**
3319 * update_sd_power_savings_stats - Update the power saving stats for a
3320 * sched_domain while performing load balancing.
3321 *
3322 * @group: sched_group belonging to the sched_domain under consideration.
3323 * @sds: Variable containing the statistics of the sched_domain
3324 * @local_group: Does group contain the CPU for which we're performing
3325 * load balancing ?
3326 * @sgs: Variable containing the statistics of the group.
3327 */
3328static inline void update_sd_power_savings_stats(struct sched_group *group,
3329 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3330{
408ed066 3331
c071df18
GS
3332 if (!sds->power_savings_balance)
3333 return;
1da177e4 3334
c071df18
GS
3335 /*
3336 * If the local group is idle or completely loaded
3337 * no need to do power savings balance at this domain
3338 */
3339 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3340 !sds->this_nr_running))
3341 sds->power_savings_balance = 0;
2dd73a4f 3342
c071df18
GS
3343 /*
3344 * If a group is already running at full capacity or idle,
3345 * don't include that group in power savings calculations
3346 */
3347 if (!sds->power_savings_balance ||
3348 sgs->sum_nr_running >= sgs->group_capacity ||
3349 !sgs->sum_nr_running)
3350 return;
5969fe06 3351
c071df18
GS
3352 /*
3353 * Calculate the group which has the least non-idle load.
3354 * This is the group from where we need to pick up the load
3355 * for saving power
3356 */
3357 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3358 (sgs->sum_nr_running == sds->min_nr_running &&
3359 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3360 sds->group_min = group;
3361 sds->min_nr_running = sgs->sum_nr_running;
3362 sds->min_load_per_task = sgs->sum_weighted_load /
3363 sgs->sum_nr_running;
3364 }
783609c6 3365
c071df18
GS
3366 /*
3367 * Calculate the group which is almost near its
3368 * capacity but still has some space to pick up some load
3369 * from other group and save more power
3370 */
3371 if (sgs->sum_nr_running > sgs->group_capacity - 1)
3372 return;
1da177e4 3373
c071df18
GS
3374 if (sgs->sum_nr_running > sds->leader_nr_running ||
3375 (sgs->sum_nr_running == sds->leader_nr_running &&
3376 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3377 sds->group_leader = group;
3378 sds->leader_nr_running = sgs->sum_nr_running;
3379 }
3380}
408ed066 3381
c071df18 3382/**
d5ac537e 3383 * check_power_save_busiest_group - see if there is potential for some power-savings balance
c071df18
GS
3384 * @sds: Variable containing the statistics of the sched_domain
3385 * under consideration.
3386 * @this_cpu: Cpu at which we're currently performing load-balancing.
3387 * @imbalance: Variable to store the imbalance.
3388 *
d5ac537e
RD
3389 * Description:
3390 * Check if we have potential to perform some power-savings balance.
3391 * If yes, set the busiest group to be the least loaded group in the
3392 * sched_domain, so that it's CPUs can be put to idle.
3393 *
c071df18
GS
3394 * Returns 1 if there is potential to perform power-savings balance.
3395 * Else returns 0.
3396 */
3397static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3398 int this_cpu, unsigned long *imbalance)
3399{
3400 if (!sds->power_savings_balance)
3401 return 0;
1da177e4 3402
c071df18
GS
3403 if (sds->this != sds->group_leader ||
3404 sds->group_leader == sds->group_min)
3405 return 0;
783609c6 3406
c071df18
GS
3407 *imbalance = sds->min_load_per_task;
3408 sds->busiest = sds->group_min;
1da177e4 3409
c071df18
GS
3410 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3411 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3412 group_first_cpu(sds->group_leader);
3413 }
3414
3415 return 1;
1da177e4 3416
c071df18
GS
3417}
3418#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3419static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3420 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3421{
3422 return;
3423}
408ed066 3424
c071df18
GS
3425static inline void update_sd_power_savings_stats(struct sched_group *group,
3426 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3427{
3428 return;
3429}
3430
3431static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3432 int this_cpu, unsigned long *imbalance)
3433{
3434 return 0;
3435}
3436#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3437
3438
1f8c553d
GS
3439/**
3440 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3441 * @group: sched_group whose statistics are to be updated.
3442 * @this_cpu: Cpu for which load balance is currently performed.
3443 * @idle: Idle status of this_cpu
3444 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3445 * @sd_idle: Idle status of the sched_domain containing group.
3446 * @local_group: Does group contain this_cpu.
3447 * @cpus: Set of cpus considered for load balancing.
3448 * @balance: Should we balance.
3449 * @sgs: variable to hold the statistics for this group.
3450 */
3451static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3452 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3453 int local_group, const struct cpumask *cpus,
3454 int *balance, struct sg_lb_stats *sgs)
3455{
3456 unsigned long load, max_cpu_load, min_cpu_load;
3457 int i;
3458 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3459 unsigned long sum_avg_load_per_task;
3460 unsigned long avg_load_per_task;
3461
3462 if (local_group)
3463 balance_cpu = group_first_cpu(group);
3464
3465 /* Tally up the load of all CPUs in the group */
3466 sum_avg_load_per_task = avg_load_per_task = 0;
3467 max_cpu_load = 0;
3468 min_cpu_load = ~0UL;
408ed066 3469
1f8c553d
GS
3470 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3471 struct rq *rq = cpu_rq(i);
908a7c1b 3472
1f8c553d
GS
3473 if (*sd_idle && rq->nr_running)
3474 *sd_idle = 0;
5c45bf27 3475
1f8c553d 3476 /* Bias balancing toward cpus of our domain */
1da177e4 3477 if (local_group) {
1f8c553d
GS
3478 if (idle_cpu(i) && !first_idle_cpu) {
3479 first_idle_cpu = 1;
3480 balance_cpu = i;
3481 }
3482
3483 load = target_load(i, load_idx);
3484 } else {
3485 load = source_load(i, load_idx);
3486 if (load > max_cpu_load)
3487 max_cpu_load = load;
3488 if (min_cpu_load > load)
3489 min_cpu_load = load;
1da177e4 3490 }
5c45bf27 3491
1f8c553d
GS
3492 sgs->group_load += load;
3493 sgs->sum_nr_running += rq->nr_running;
3494 sgs->sum_weighted_load += weighted_cpuload(i);
5c45bf27 3495
1f8c553d
GS
3496 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3497 }
5c45bf27 3498
1f8c553d
GS
3499 /*
3500 * First idle cpu or the first cpu(busiest) in this sched group
3501 * is eligible for doing load balancing at this and above
3502 * domains. In the newly idle case, we will allow all the cpu's
3503 * to do the newly idle load balance.
3504 */
3505 if (idle != CPU_NEWLY_IDLE && local_group &&
3506 balance_cpu != this_cpu && balance) {
3507 *balance = 0;
3508 return;
3509 }
5c45bf27 3510
1f8c553d
GS
3511 /* Adjust by relative CPU power of the group */
3512 sgs->avg_load = sg_div_cpu_power(group,
3513 sgs->group_load * SCHED_LOAD_SCALE);
5c45bf27 3514
1f8c553d
GS
3515
3516 /*
3517 * Consider the group unbalanced when the imbalance is larger
3518 * than the average weight of two tasks.
3519 *
3520 * APZ: with cgroup the avg task weight can vary wildly and
3521 * might not be a suitable number - should we keep a
3522 * normalized nr_running number somewhere that negates
3523 * the hierarchy?
3524 */
3525 avg_load_per_task = sg_div_cpu_power(group,
3526 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3527
3528 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3529 sgs->group_imb = 1;
3530
3531 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3532
3533}
dd41f596 3534
37abe198
GS
3535/**
3536 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3537 * @sd: sched_domain whose statistics are to be updated.
3538 * @this_cpu: Cpu for which load balance is currently performed.
3539 * @idle: Idle status of this_cpu
3540 * @sd_idle: Idle status of the sched_domain containing group.
3541 * @cpus: Set of cpus considered for load balancing.
3542 * @balance: Should we balance.
3543 * @sds: variable to hold the statistics for this sched_domain.
1da177e4 3544 */
37abe198
GS
3545static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3546 enum cpu_idle_type idle, int *sd_idle,
3547 const struct cpumask *cpus, int *balance,
3548 struct sd_lb_stats *sds)
1da177e4 3549{
222d656d 3550 struct sched_group *group = sd->groups;
37abe198 3551 struct sg_lb_stats sgs;
222d656d
GS
3552 int load_idx;
3553
c071df18 3554 init_sd_power_savings_stats(sd, sds, idle);
67bb6c03 3555 load_idx = get_sd_load_idx(sd, idle);
1da177e4
LT
3556
3557 do {
1da177e4 3558 int local_group;
1da177e4 3559
758b2cdc
RR
3560 local_group = cpumask_test_cpu(this_cpu,
3561 sched_group_cpus(group));
381be78f 3562 memset(&sgs, 0, sizeof(sgs));
1f8c553d
GS
3563 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
3564 local_group, cpus, balance, &sgs);
1da177e4 3565
37abe198
GS
3566 if (local_group && balance && !(*balance))
3567 return;
783609c6 3568
37abe198
GS
3569 sds->total_load += sgs.group_load;
3570 sds->total_pwr += group->__cpu_power;
1da177e4 3571
1da177e4 3572 if (local_group) {
37abe198
GS
3573 sds->this_load = sgs.avg_load;
3574 sds->this = group;
3575 sds->this_nr_running = sgs.sum_nr_running;
3576 sds->this_load_per_task = sgs.sum_weighted_load;
3577 } else if (sgs.avg_load > sds->max_load &&
381be78f
GS
3578 (sgs.sum_nr_running > sgs.group_capacity ||
3579 sgs.group_imb)) {
37abe198
GS
3580 sds->max_load = sgs.avg_load;
3581 sds->busiest = group;
3582 sds->busiest_nr_running = sgs.sum_nr_running;
3583 sds->busiest_load_per_task = sgs.sum_weighted_load;
3584 sds->group_imb = sgs.group_imb;
48f24c4d 3585 }
5c45bf27 3586
c071df18 3587 update_sd_power_savings_stats(group, sds, local_group, &sgs);
1da177e4
LT
3588 group = group->next;
3589 } while (group != sd->groups);
3590
37abe198 3591}
1da177e4 3592
2e6f44ae
GS
3593/**
3594 * fix_small_imbalance - Calculate the minor imbalance that exists
dbc523a3
GS
3595 * amongst the groups of a sched_domain, during
3596 * load balancing.
2e6f44ae
GS
3597 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3598 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3599 * @imbalance: Variable to store the imbalance.
3600 */
3601static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3602 int this_cpu, unsigned long *imbalance)
3603{
3604 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3605 unsigned int imbn = 2;
3606
3607 if (sds->this_nr_running) {
3608 sds->this_load_per_task /= sds->this_nr_running;
3609 if (sds->busiest_load_per_task >
3610 sds->this_load_per_task)
3611 imbn = 1;
3612 } else
3613 sds->this_load_per_task =
3614 cpu_avg_load_per_task(this_cpu);
1da177e4 3615
2e6f44ae
GS
3616 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3617 sds->busiest_load_per_task * imbn) {
3618 *imbalance = sds->busiest_load_per_task;
3619 return;
3620 }
908a7c1b 3621
1da177e4 3622 /*
2e6f44ae
GS
3623 * OK, we don't have enough imbalance to justify moving tasks,
3624 * however we may be able to increase total CPU power used by
3625 * moving them.
1da177e4 3626 */
2dd73a4f 3627
2e6f44ae
GS
3628 pwr_now += sds->busiest->__cpu_power *
3629 min(sds->busiest_load_per_task, sds->max_load);
3630 pwr_now += sds->this->__cpu_power *
3631 min(sds->this_load_per_task, sds->this_load);
3632 pwr_now /= SCHED_LOAD_SCALE;
3633
3634 /* Amount of load we'd subtract */
3635 tmp = sg_div_cpu_power(sds->busiest,
3636 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3637 if (sds->max_load > tmp)
3638 pwr_move += sds->busiest->__cpu_power *
3639 min(sds->busiest_load_per_task, sds->max_load - tmp);
3640
3641 /* Amount of load we'd add */
3642 if (sds->max_load * sds->busiest->__cpu_power <
3643 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3644 tmp = sg_div_cpu_power(sds->this,
3645 sds->max_load * sds->busiest->__cpu_power);
3646 else
3647 tmp = sg_div_cpu_power(sds->this,
3648 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3649 pwr_move += sds->this->__cpu_power *
3650 min(sds->this_load_per_task, sds->this_load + tmp);
3651 pwr_move /= SCHED_LOAD_SCALE;
3652
3653 /* Move if we gain throughput */
3654 if (pwr_move > pwr_now)
3655 *imbalance = sds->busiest_load_per_task;
3656}
dbc523a3
GS
3657
3658/**
3659 * calculate_imbalance - Calculate the amount of imbalance present within the
3660 * groups of a given sched_domain during load balance.
3661 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3662 * @this_cpu: Cpu for which currently load balance is being performed.
3663 * @imbalance: The variable to store the imbalance.
3664 */
3665static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3666 unsigned long *imbalance)
3667{
3668 unsigned long max_pull;
2dd73a4f
PW
3669 /*
3670 * In the presence of smp nice balancing, certain scenarios can have
3671 * max load less than avg load(as we skip the groups at or below
3672 * its cpu_power, while calculating max_load..)
3673 */
dbc523a3 3674 if (sds->max_load < sds->avg_load) {
2dd73a4f 3675 *imbalance = 0;
dbc523a3 3676 return fix_small_imbalance(sds, this_cpu, imbalance);
2dd73a4f 3677 }
0c117f1b
SS
3678
3679 /* Don't want to pull so many tasks that a group would go idle */
dbc523a3
GS
3680 max_pull = min(sds->max_load - sds->avg_load,
3681 sds->max_load - sds->busiest_load_per_task);
0c117f1b 3682
1da177e4 3683 /* How much load to actually move to equalise the imbalance */
dbc523a3
GS
3684 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3685 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
1da177e4
LT
3686 / SCHED_LOAD_SCALE;
3687
2dd73a4f
PW
3688 /*
3689 * if *imbalance is less than the average load per runnable task
3690 * there is no gaurantee that any tasks will be moved so we'll have
3691 * a think about bumping its value to force at least one task to be
3692 * moved
3693 */
dbc523a3
GS
3694 if (*imbalance < sds->busiest_load_per_task)
3695 return fix_small_imbalance(sds, this_cpu, imbalance);
1da177e4 3696
dbc523a3 3697}
37abe198 3698/******* find_busiest_group() helpers end here *********************/
1da177e4 3699
b7bb4c9b
GS
3700/**
3701 * find_busiest_group - Returns the busiest group within the sched_domain
3702 * if there is an imbalance. If there isn't an imbalance, and
3703 * the user has opted for power-savings, it returns a group whose
3704 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3705 * such a group exists.
3706 *
3707 * Also calculates the amount of weighted load which should be moved
3708 * to restore balance.
3709 *
3710 * @sd: The sched_domain whose busiest group is to be returned.
3711 * @this_cpu: The cpu for which load balancing is currently being performed.
3712 * @imbalance: Variable which stores amount of weighted load which should
3713 * be moved to restore balance/put a group to idle.
3714 * @idle: The idle status of this_cpu.
3715 * @sd_idle: The idleness of sd
3716 * @cpus: The set of CPUs under consideration for load-balancing.
3717 * @balance: Pointer to a variable indicating if this_cpu
3718 * is the appropriate cpu to perform load balancing at this_level.
3719 *
3720 * Returns: - the busiest group if imbalance exists.
3721 * - If no imbalance and user has opted for power-savings balance,
3722 * return the least loaded group whose CPUs can be
3723 * put to idle by rebalancing its tasks onto our group.
37abe198
GS
3724 */
3725static struct sched_group *
3726find_busiest_group(struct sched_domain *sd, int this_cpu,
3727 unsigned long *imbalance, enum cpu_idle_type idle,
3728 int *sd_idle, const struct cpumask *cpus, int *balance)
3729{
3730 struct sd_lb_stats sds;
1da177e4 3731
37abe198 3732 memset(&sds, 0, sizeof(sds));
1da177e4 3733
37abe198
GS
3734 /*
3735 * Compute the various statistics relavent for load balancing at
3736 * this level.
3737 */
3738 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3739 balance, &sds);
3740
b7bb4c9b
GS
3741 /* Cases where imbalance does not exist from POV of this_cpu */
3742 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3743 * at this level.
3744 * 2) There is no busy sibling group to pull from.
3745 * 3) This group is the busiest group.
3746 * 4) This group is more busy than the avg busieness at this
3747 * sched_domain.
3748 * 5) The imbalance is within the specified limit.
3749 * 6) Any rebalance would lead to ping-pong
3750 */
37abe198
GS
3751 if (balance && !(*balance))
3752 goto ret;
1da177e4 3753
b7bb4c9b
GS
3754 if (!sds.busiest || sds.busiest_nr_running == 0)
3755 goto out_balanced;
1da177e4 3756
b7bb4c9b 3757 if (sds.this_load >= sds.max_load)
1da177e4 3758 goto out_balanced;
1da177e4 3759
222d656d 3760 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
1da177e4 3761
b7bb4c9b
GS
3762 if (sds.this_load >= sds.avg_load)
3763 goto out_balanced;
3764
3765 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
1da177e4
LT
3766 goto out_balanced;
3767
222d656d
GS
3768 sds.busiest_load_per_task /= sds.busiest_nr_running;
3769 if (sds.group_imb)
3770 sds.busiest_load_per_task =
3771 min(sds.busiest_load_per_task, sds.avg_load);
908a7c1b 3772
1da177e4
LT
3773 /*
3774 * We're trying to get all the cpus to the average_load, so we don't
3775 * want to push ourselves above the average load, nor do we wish to
3776 * reduce the max loaded cpu below the average load, as either of these
3777 * actions would just result in more rebalancing later, and ping-pong
3778 * tasks around. Thus we look for the minimum possible imbalance.
3779 * Negative imbalances (*we* are more loaded than anyone else) will
3780 * be counted as no imbalance for these purposes -- we can't fix that
41a2d6cf 3781 * by pulling tasks to us. Be careful of negative numbers as they'll
1da177e4
LT
3782 * appear as very large values with unsigned longs.
3783 */
222d656d 3784 if (sds.max_load <= sds.busiest_load_per_task)
2dd73a4f
PW
3785 goto out_balanced;
3786
dbc523a3
GS
3787 /* Looks like there is an imbalance. Compute it */
3788 calculate_imbalance(&sds, this_cpu, imbalance);
222d656d 3789 return sds.busiest;
1da177e4
LT
3790
3791out_balanced:
c071df18
GS
3792 /*
3793 * There is no obvious imbalance. But check if we can do some balancing
3794 * to save power.
3795 */
3796 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3797 return sds.busiest;
783609c6 3798ret:
1da177e4
LT
3799 *imbalance = 0;
3800 return NULL;
3801}
3802
3803/*
3804 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3805 */
70b97a7f 3806static struct rq *
d15bcfdb 3807find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
96f874e2 3808 unsigned long imbalance, const struct cpumask *cpus)
1da177e4 3809{
70b97a7f 3810 struct rq *busiest = NULL, *rq;
2dd73a4f 3811 unsigned long max_load = 0;
1da177e4
LT
3812 int i;
3813
758b2cdc 3814 for_each_cpu(i, sched_group_cpus(group)) {
dd41f596 3815 unsigned long wl;
0a2966b4 3816
96f874e2 3817 if (!cpumask_test_cpu(i, cpus))
0a2966b4
CL
3818 continue;
3819
48f24c4d 3820 rq = cpu_rq(i);
dd41f596 3821 wl = weighted_cpuload(i);
2dd73a4f 3822
dd41f596 3823 if (rq->nr_running == 1 && wl > imbalance)
2dd73a4f 3824 continue;
1da177e4 3825
dd41f596
IM
3826 if (wl > max_load) {
3827 max_load = wl;
48f24c4d 3828 busiest = rq;
1da177e4
LT
3829 }
3830 }
3831
3832 return busiest;
3833}
3834
77391d71
NP
3835/*
3836 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3837 * so long as it is large enough.
3838 */
3839#define MAX_PINNED_INTERVAL 512
3840
df7c8e84
RR
3841/* Working cpumask for load_balance and load_balance_newidle. */
3842static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3843
1da177e4
LT
3844/*
3845 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3846 * tasks if there is an imbalance.
1da177e4 3847 */
70b97a7f 3848static int load_balance(int this_cpu, struct rq *this_rq,
d15bcfdb 3849 struct sched_domain *sd, enum cpu_idle_type idle,
df7c8e84 3850 int *balance)
1da177e4 3851{
43010659 3852 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
1da177e4 3853 struct sched_group *group;
1da177e4 3854 unsigned long imbalance;
70b97a7f 3855 struct rq *busiest;
fe2eea3f 3856 unsigned long flags;
df7c8e84 3857 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
5969fe06 3858
96f874e2 3859 cpumask_setall(cpus);
7c16ec58 3860
89c4710e
SS
3861 /*
3862 * When power savings policy is enabled for the parent domain, idle
3863 * sibling can pick up load irrespective of busy siblings. In this case,
dd41f596 3864 * let the state of idle sibling percolate up as CPU_IDLE, instead of
d15bcfdb 3865 * portraying it as CPU_NOT_IDLE.
89c4710e 3866 */
d15bcfdb 3867 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
89c4710e 3868 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
5969fe06 3869 sd_idle = 1;
1da177e4 3870
2d72376b 3871 schedstat_inc(sd, lb_count[idle]);
1da177e4 3872
0a2966b4 3873redo:
c8cba857 3874 update_shares(sd);
0a2966b4 3875 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
7c16ec58 3876 cpus, balance);
783609c6 3877
06066714 3878 if (*balance == 0)
783609c6 3879 goto out_balanced;
783609c6 3880
1da177e4
LT
3881 if (!group) {
3882 schedstat_inc(sd, lb_nobusyg[idle]);
3883 goto out_balanced;
3884 }
3885
7c16ec58 3886 busiest = find_busiest_queue(group, idle, imbalance, cpus);
1da177e4
LT
3887 if (!busiest) {
3888 schedstat_inc(sd, lb_nobusyq[idle]);
3889 goto out_balanced;
3890 }
3891
db935dbd 3892 BUG_ON(busiest == this_rq);
1da177e4
LT
3893
3894 schedstat_add(sd, lb_imbalance[idle], imbalance);
3895
43010659 3896 ld_moved = 0;
1da177e4
LT
3897 if (busiest->nr_running > 1) {
3898 /*
3899 * Attempt to move tasks. If find_busiest_group has found
3900 * an imbalance but busiest->nr_running <= 1, the group is
43010659 3901 * still unbalanced. ld_moved simply stays zero, so it is
1da177e4
LT
3902 * correctly treated as an imbalance.
3903 */
fe2eea3f 3904 local_irq_save(flags);
e17224bf 3905 double_rq_lock(this_rq, busiest);
43010659 3906 ld_moved = move_tasks(this_rq, this_cpu, busiest,
48f24c4d 3907 imbalance, sd, idle, &all_pinned);
e17224bf 3908 double_rq_unlock(this_rq, busiest);
fe2eea3f 3909 local_irq_restore(flags);
81026794 3910
46cb4b7c
SS
3911 /*
3912 * some other cpu did the load balance for us.
3913 */
43010659 3914 if (ld_moved && this_cpu != smp_processor_id())
46cb4b7c
SS
3915 resched_cpu(this_cpu);
3916
81026794 3917 /* All tasks on this runqueue were pinned by CPU affinity */
0a2966b4 3918 if (unlikely(all_pinned)) {
96f874e2
RR
3919 cpumask_clear_cpu(cpu_of(busiest), cpus);
3920 if (!cpumask_empty(cpus))
0a2966b4 3921 goto redo;
81026794 3922 goto out_balanced;
0a2966b4 3923 }
1da177e4 3924 }
81026794 3925
43010659 3926 if (!ld_moved) {
1da177e4
LT
3927 schedstat_inc(sd, lb_failed[idle]);
3928 sd->nr_balance_failed++;
3929
3930 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
1da177e4 3931
fe2eea3f 3932 spin_lock_irqsave(&busiest->lock, flags);
fa3b6ddc
SS
3933
3934 /* don't kick the migration_thread, if the curr
3935 * task on busiest cpu can't be moved to this_cpu
3936 */
96f874e2
RR
3937 if (!cpumask_test_cpu(this_cpu,
3938 &busiest->curr->cpus_allowed)) {
fe2eea3f 3939 spin_unlock_irqrestore(&busiest->lock, flags);
fa3b6ddc
SS
3940 all_pinned = 1;
3941 goto out_one_pinned;
3942 }
3943
1da177e4
LT
3944 if (!busiest->active_balance) {
3945 busiest->active_balance = 1;
3946 busiest->push_cpu = this_cpu;
81026794 3947 active_balance = 1;
1da177e4 3948 }
fe2eea3f 3949 spin_unlock_irqrestore(&busiest->lock, flags);
81026794 3950 if (active_balance)
1da177e4
LT
3951 wake_up_process(busiest->migration_thread);
3952
3953 /*
3954 * We've kicked active balancing, reset the failure
3955 * counter.
3956 */
39507451 3957 sd->nr_balance_failed = sd->cache_nice_tries+1;
1da177e4 3958 }
81026794 3959 } else
1da177e4
LT
3960 sd->nr_balance_failed = 0;
3961
81026794 3962 if (likely(!active_balance)) {
1da177e4
LT
3963 /* We were unbalanced, so reset the balancing interval */
3964 sd->balance_interval = sd->min_interval;
81026794
NP
3965 } else {
3966 /*
3967 * If we've begun active balancing, start to back off. This
3968 * case may not be covered by the all_pinned logic if there
3969 * is only 1 task on the busy runqueue (because we don't call
3970 * move_tasks).
3971 */
3972 if (sd->balance_interval < sd->max_interval)
3973 sd->balance_interval *= 2;
1da177e4
LT
3974 }
3975
43010659 3976 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
89c4710e 3977 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
c09595f6
PZ
3978 ld_moved = -1;
3979
3980 goto out;
1da177e4
LT
3981
3982out_balanced:
1da177e4
LT
3983 schedstat_inc(sd, lb_balanced[idle]);
3984
16cfb1c0 3985 sd->nr_balance_failed = 0;
fa3b6ddc
SS
3986
3987out_one_pinned:
1da177e4 3988 /* tune up the balancing interval */
77391d71
NP
3989 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3990 (sd->balance_interval < sd->max_interval))
1da177e4
LT
3991 sd->balance_interval *= 2;
3992
48f24c4d 3993 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
89c4710e 3994 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
c09595f6
PZ
3995 ld_moved = -1;
3996 else
3997 ld_moved = 0;
3998out:
c8cba857
PZ
3999 if (ld_moved)
4000 update_shares(sd);
c09595f6 4001 return ld_moved;
1da177e4
LT
4002}
4003
4004/*
4005 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4006 * tasks if there is an imbalance.
4007 *
d15bcfdb 4008 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
1da177e4
LT
4009 * this_rq is locked.
4010 */
48f24c4d 4011static int
df7c8e84 4012load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
1da177e4
LT
4013{
4014 struct sched_group *group;
70b97a7f 4015 struct rq *busiest = NULL;
1da177e4 4016 unsigned long imbalance;
43010659 4017 int ld_moved = 0;
5969fe06 4018 int sd_idle = 0;
969bb4e4 4019 int all_pinned = 0;
df7c8e84 4020 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
7c16ec58 4021
96f874e2 4022 cpumask_setall(cpus);
5969fe06 4023
89c4710e
SS
4024 /*
4025 * When power savings policy is enabled for the parent domain, idle
4026 * sibling can pick up load irrespective of busy siblings. In this case,
4027 * let the state of idle sibling percolate up as IDLE, instead of
d15bcfdb 4028 * portraying it as CPU_NOT_IDLE.
89c4710e
SS
4029 */
4030 if (sd->flags & SD_SHARE_CPUPOWER &&
4031 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
5969fe06 4032 sd_idle = 1;
1da177e4 4033
2d72376b 4034 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
0a2966b4 4035redo:
3e5459b4 4036 update_shares_locked(this_rq, sd);
d15bcfdb 4037 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
7c16ec58 4038 &sd_idle, cpus, NULL);
1da177e4 4039 if (!group) {
d15bcfdb 4040 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
16cfb1c0 4041 goto out_balanced;
1da177e4
LT
4042 }
4043
7c16ec58 4044 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
db935dbd 4045 if (!busiest) {
d15bcfdb 4046 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
16cfb1c0 4047 goto out_balanced;
1da177e4
LT
4048 }
4049
db935dbd
NP
4050 BUG_ON(busiest == this_rq);
4051
d15bcfdb 4052 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
d6d5cfaf 4053
43010659 4054 ld_moved = 0;
d6d5cfaf
NP
4055 if (busiest->nr_running > 1) {
4056 /* Attempt to move tasks */
4057 double_lock_balance(this_rq, busiest);
6e82a3be
IM
4058 /* this_rq->clock is already updated */
4059 update_rq_clock(busiest);
43010659 4060 ld_moved = move_tasks(this_rq, this_cpu, busiest,
969bb4e4
SS
4061 imbalance, sd, CPU_NEWLY_IDLE,
4062 &all_pinned);
1b12bbc7 4063 double_unlock_balance(this_rq, busiest);
0a2966b4 4064
969bb4e4 4065 if (unlikely(all_pinned)) {
96f874e2
RR
4066 cpumask_clear_cpu(cpu_of(busiest), cpus);
4067 if (!cpumask_empty(cpus))
0a2966b4
CL
4068 goto redo;
4069 }
d6d5cfaf
NP
4070 }
4071
43010659 4072 if (!ld_moved) {
36dffab6 4073 int active_balance = 0;
ad273b32 4074
d15bcfdb 4075 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
89c4710e
SS
4076 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4077 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
5969fe06 4078 return -1;
ad273b32
VS
4079
4080 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
4081 return -1;
4082
4083 if (sd->nr_balance_failed++ < 2)
4084 return -1;
4085
4086 /*
4087 * The only task running in a non-idle cpu can be moved to this
4088 * cpu in an attempt to completely freeup the other CPU
4089 * package. The same method used to move task in load_balance()
4090 * have been extended for load_balance_newidle() to speedup
4091 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
4092 *
4093 * The package power saving logic comes from
4094 * find_busiest_group(). If there are no imbalance, then
4095 * f_b_g() will return NULL. However when sched_mc={1,2} then
4096 * f_b_g() will select a group from which a running task may be
4097 * pulled to this cpu in order to make the other package idle.
4098 * If there is no opportunity to make a package idle and if
4099 * there are no imbalance, then f_b_g() will return NULL and no
4100 * action will be taken in load_balance_newidle().
4101 *
4102 * Under normal task pull operation due to imbalance, there
4103 * will be more than one task in the source run queue and
4104 * move_tasks() will succeed. ld_moved will be true and this
4105 * active balance code will not be triggered.
4106 */
4107
4108 /* Lock busiest in correct order while this_rq is held */
4109 double_lock_balance(this_rq, busiest);
4110
4111 /*
4112 * don't kick the migration_thread, if the curr
4113 * task on busiest cpu can't be moved to this_cpu
4114 */
6ca09dfc 4115 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
ad273b32
VS
4116 double_unlock_balance(this_rq, busiest);
4117 all_pinned = 1;
4118 return ld_moved;
4119 }
4120
4121 if (!busiest->active_balance) {
4122 busiest->active_balance = 1;
4123 busiest->push_cpu = this_cpu;
4124 active_balance = 1;
4125 }
4126
4127 double_unlock_balance(this_rq, busiest);
da8d5089
PZ
4128 /*
4129 * Should not call ttwu while holding a rq->lock
4130 */
4131 spin_unlock(&this_rq->lock);
ad273b32
VS
4132 if (active_balance)
4133 wake_up_process(busiest->migration_thread);
da8d5089 4134 spin_lock(&this_rq->lock);
ad273b32 4135
5969fe06 4136 } else
16cfb1c0 4137 sd->nr_balance_failed = 0;
1da177e4 4138
3e5459b4 4139 update_shares_locked(this_rq, sd);
43010659 4140 return ld_moved;
16cfb1c0
NP
4141
4142out_balanced:
d15bcfdb 4143 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
48f24c4d 4144 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
89c4710e 4145 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
5969fe06 4146 return -1;
16cfb1c0 4147 sd->nr_balance_failed = 0;
48f24c4d 4148
16cfb1c0 4149 return 0;
1da177e4
LT
4150}
4151
4152/*
4153 * idle_balance is called by schedule() if this_cpu is about to become
4154 * idle. Attempts to pull tasks from other CPUs.
4155 */
70b97a7f 4156static void idle_balance(int this_cpu, struct rq *this_rq)
1da177e4
LT
4157{
4158 struct sched_domain *sd;
efbe027e 4159 int pulled_task = 0;
dd41f596 4160 unsigned long next_balance = jiffies + HZ;
1da177e4
LT
4161
4162 for_each_domain(this_cpu, sd) {
92c4ca5c
CL
4163 unsigned long interval;
4164
4165 if (!(sd->flags & SD_LOAD_BALANCE))
4166 continue;
4167
4168 if (sd->flags & SD_BALANCE_NEWIDLE)
48f24c4d 4169 /* If we've pulled tasks over stop searching: */
7c16ec58 4170 pulled_task = load_balance_newidle(this_cpu, this_rq,
df7c8e84 4171 sd);
92c4ca5c
CL
4172
4173 interval = msecs_to_jiffies(sd->balance_interval);
4174 if (time_after(next_balance, sd->last_balance + interval))
4175 next_balance = sd->last_balance + interval;
4176 if (pulled_task)
4177 break;
1da177e4 4178 }
dd41f596 4179 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
1bd77f2d
CL
4180 /*
4181 * We are going idle. next_balance may be set based on
4182 * a busy processor. So reset next_balance.
4183 */
4184 this_rq->next_balance = next_balance;
dd41f596 4185 }
1da177e4
LT
4186}
4187
4188/*
4189 * active_load_balance is run by migration threads. It pushes running tasks
4190 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
4191 * running on each physical CPU where possible, and avoids physical /
4192 * logical imbalances.
4193 *
4194 * Called with busiest_rq locked.
4195 */
70b97a7f 4196static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
1da177e4 4197{
39507451 4198 int target_cpu = busiest_rq->push_cpu;
70b97a7f
IM
4199 struct sched_domain *sd;
4200 struct rq *target_rq;
39507451 4201
48f24c4d 4202 /* Is there any task to move? */
39507451 4203 if (busiest_rq->nr_running <= 1)
39507451
NP
4204 return;
4205
4206 target_rq = cpu_rq(target_cpu);
1da177e4
LT
4207
4208 /*
39507451 4209 * This condition is "impossible", if it occurs
41a2d6cf 4210 * we need to fix it. Originally reported by
39507451 4211 * Bjorn Helgaas on a 128-cpu setup.
1da177e4 4212 */
39507451 4213 BUG_ON(busiest_rq == target_rq);
1da177e4 4214
39507451
NP
4215 /* move a task from busiest_rq to target_rq */
4216 double_lock_balance(busiest_rq, target_rq);
6e82a3be
IM
4217 update_rq_clock(busiest_rq);
4218 update_rq_clock(target_rq);
39507451
NP
4219
4220 /* Search for an sd spanning us and the target CPU. */
c96d145e 4221 for_each_domain(target_cpu, sd) {
39507451 4222 if ((sd->flags & SD_LOAD_BALANCE) &&
758b2cdc 4223 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
39507451 4224 break;
c96d145e 4225 }
39507451 4226
48f24c4d 4227 if (likely(sd)) {
2d72376b 4228 schedstat_inc(sd, alb_count);
39507451 4229
43010659
PW
4230 if (move_one_task(target_rq, target_cpu, busiest_rq,
4231 sd, CPU_IDLE))
48f24c4d
IM
4232 schedstat_inc(sd, alb_pushed);
4233 else
4234 schedstat_inc(sd, alb_failed);
4235 }
1b12bbc7 4236 double_unlock_balance(busiest_rq, target_rq);
1da177e4
LT
4237}
4238
46cb4b7c
SS
4239#ifdef CONFIG_NO_HZ
4240static struct {
4241 atomic_t load_balancer;
7d1e6a9b 4242 cpumask_var_t cpu_mask;
f711f609 4243 cpumask_var_t ilb_grp_nohz_mask;
46cb4b7c
SS
4244} nohz ____cacheline_aligned = {
4245 .load_balancer = ATOMIC_INIT(-1),
46cb4b7c
SS
4246};
4247
f711f609
GS
4248#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4249/**
4250 * lowest_flag_domain - Return lowest sched_domain containing flag.
4251 * @cpu: The cpu whose lowest level of sched domain is to
4252 * be returned.
4253 * @flag: The flag to check for the lowest sched_domain
4254 * for the given cpu.
4255 *
4256 * Returns the lowest sched_domain of a cpu which contains the given flag.
4257 */
4258static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4259{
4260 struct sched_domain *sd;
4261
4262 for_each_domain(cpu, sd)
4263 if (sd && (sd->flags & flag))
4264 break;
4265
4266 return sd;
4267}
4268
4269/**
4270 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4271 * @cpu: The cpu whose domains we're iterating over.
4272 * @sd: variable holding the value of the power_savings_sd
4273 * for cpu.
4274 * @flag: The flag to filter the sched_domains to be iterated.
4275 *
4276 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4277 * set, starting from the lowest sched_domain to the highest.
4278 */
4279#define for_each_flag_domain(cpu, sd, flag) \
4280 for (sd = lowest_flag_domain(cpu, flag); \
4281 (sd && (sd->flags & flag)); sd = sd->parent)
4282
4283/**
4284 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4285 * @ilb_group: group to be checked for semi-idleness
4286 *
4287 * Returns: 1 if the group is semi-idle. 0 otherwise.
4288 *
4289 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4290 * and atleast one non-idle CPU. This helper function checks if the given
4291 * sched_group is semi-idle or not.
4292 */
4293static inline int is_semi_idle_group(struct sched_group *ilb_group)
4294{
4295 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
4296 sched_group_cpus(ilb_group));
4297
4298 /*
4299 * A sched_group is semi-idle when it has atleast one busy cpu
4300 * and atleast one idle cpu.
4301 */
4302 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
4303 return 0;
4304
4305 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
4306 return 0;
4307
4308 return 1;
4309}
4310/**
4311 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4312 * @cpu: The cpu which is nominating a new idle_load_balancer.
4313 *
4314 * Returns: Returns the id of the idle load balancer if it exists,
4315 * Else, returns >= nr_cpu_ids.
4316 *
4317 * This algorithm picks the idle load balancer such that it belongs to a
4318 * semi-idle powersavings sched_domain. The idea is to try and avoid
4319 * completely idle packages/cores just for the purpose of idle load balancing
4320 * when there are other idle cpu's which are better suited for that job.
4321 */
4322static int find_new_ilb(int cpu)
4323{
4324 struct sched_domain *sd;
4325 struct sched_group *ilb_group;
4326
4327 /*
4328 * Have idle load balancer selection from semi-idle packages only
4329 * when power-aware load balancing is enabled
4330 */
4331 if (!(sched_smt_power_savings || sched_mc_power_savings))
4332 goto out_done;
4333
4334 /*
4335 * Optimize for the case when we have no idle CPUs or only one
4336 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4337 */
4338 if (cpumask_weight(nohz.cpu_mask) < 2)
4339 goto out_done;
4340
4341 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4342 ilb_group = sd->groups;
4343
4344 do {
4345 if (is_semi_idle_group(ilb_group))
4346 return cpumask_first(nohz.ilb_grp_nohz_mask);
4347
4348 ilb_group = ilb_group->next;
4349
4350 } while (ilb_group != sd->groups);
4351 }
4352
4353out_done:
4354 return cpumask_first(nohz.cpu_mask);
4355}
4356#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4357static inline int find_new_ilb(int call_cpu)
4358{
4359 return first_cpu(nohz.cpu_mask);
4360}
4361#endif
4362
7835b98b 4363/*
46cb4b7c
SS
4364 * This routine will try to nominate the ilb (idle load balancing)
4365 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4366 * load balancing on behalf of all those cpus. If all the cpus in the system
4367 * go into this tickless mode, then there will be no ilb owner (as there is
4368 * no need for one) and all the cpus will sleep till the next wakeup event
4369 * arrives...
4370 *
4371 * For the ilb owner, tick is not stopped. And this tick will be used
4372 * for idle load balancing. ilb owner will still be part of
4373 * nohz.cpu_mask..
7835b98b 4374 *
46cb4b7c
SS
4375 * While stopping the tick, this cpu will become the ilb owner if there
4376 * is no other owner. And will be the owner till that cpu becomes busy
4377 * or if all cpus in the system stop their ticks at which point
4378 * there is no need for ilb owner.
4379 *
4380 * When the ilb owner becomes busy, it nominates another owner, during the
4381 * next busy scheduler_tick()
4382 */
4383int select_nohz_load_balancer(int stop_tick)
4384{
4385 int cpu = smp_processor_id();
4386
4387 if (stop_tick) {
46cb4b7c
SS
4388 cpu_rq(cpu)->in_nohz_recently = 1;
4389
483b4ee6
SS
4390 if (!cpu_active(cpu)) {
4391 if (atomic_read(&nohz.load_balancer) != cpu)
4392 return 0;
4393
4394 /*
4395 * If we are going offline and still the leader,
4396 * give up!
4397 */
46cb4b7c
SS
4398 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4399 BUG();
483b4ee6 4400
46cb4b7c
SS
4401 return 0;
4402 }
4403
483b4ee6
SS
4404 cpumask_set_cpu(cpu, nohz.cpu_mask);
4405
46cb4b7c 4406 /* time for ilb owner also to sleep */
7d1e6a9b 4407 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
46cb4b7c
SS
4408 if (atomic_read(&nohz.load_balancer) == cpu)
4409 atomic_set(&nohz.load_balancer, -1);
4410 return 0;
4411 }
4412
4413 if (atomic_read(&nohz.load_balancer) == -1) {
4414 /* make me the ilb owner */
4415 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4416 return 1;
4417 } else if (atomic_read(&nohz.load_balancer) == cpu)
4418 return 1;
4419 } else {
7d1e6a9b 4420 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
46cb4b7c
SS
4421 return 0;
4422
7d1e6a9b 4423 cpumask_clear_cpu(cpu, nohz.cpu_mask);
46cb4b7c
SS
4424
4425 if (atomic_read(&nohz.load_balancer) == cpu)
4426 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4427 BUG();
4428 }
4429 return 0;
4430}
4431#endif
4432
4433static DEFINE_SPINLOCK(balancing);
4434
4435/*
7835b98b
CL
4436 * It checks each scheduling domain to see if it is due to be balanced,
4437 * and initiates a balancing operation if so.
4438 *
4439 * Balancing parameters are set up in arch_init_sched_domains.
4440 */
a9957449 4441static void rebalance_domains(int cpu, enum cpu_idle_type idle)
7835b98b 4442{
46cb4b7c
SS
4443 int balance = 1;
4444 struct rq *rq = cpu_rq(cpu);
7835b98b
CL
4445 unsigned long interval;
4446 struct sched_domain *sd;
46cb4b7c 4447 /* Earliest time when we have to do rebalance again */
c9819f45 4448 unsigned long next_balance = jiffies + 60*HZ;
f549da84 4449 int update_next_balance = 0;
d07355f5 4450 int need_serialize;
1da177e4 4451
46cb4b7c 4452 for_each_domain(cpu, sd) {
1da177e4
LT
4453 if (!(sd->flags & SD_LOAD_BALANCE))
4454 continue;
4455
4456 interval = sd->balance_interval;
d15bcfdb 4457 if (idle != CPU_IDLE)
1da177e4
LT
4458 interval *= sd->busy_factor;
4459
4460 /* scale ms to jiffies */
4461 interval = msecs_to_jiffies(interval);
4462 if (unlikely(!interval))
4463 interval = 1;
dd41f596
IM
4464 if (interval > HZ*NR_CPUS/10)
4465 interval = HZ*NR_CPUS/10;
4466
d07355f5 4467 need_serialize = sd->flags & SD_SERIALIZE;
1da177e4 4468
d07355f5 4469 if (need_serialize) {
08c183f3
CL
4470 if (!spin_trylock(&balancing))
4471 goto out;
4472 }
4473
c9819f45 4474 if (time_after_eq(jiffies, sd->last_balance + interval)) {
df7c8e84 4475 if (load_balance(cpu, rq, sd, idle, &balance)) {
fa3b6ddc
SS
4476 /*
4477 * We've pulled tasks over so either we're no
5969fe06
NP
4478 * longer idle, or one of our SMT siblings is
4479 * not idle.
4480 */
d15bcfdb 4481 idle = CPU_NOT_IDLE;
1da177e4 4482 }
1bd77f2d 4483 sd->last_balance = jiffies;
1da177e4 4484 }
d07355f5 4485 if (need_serialize)
08c183f3
CL
4486 spin_unlock(&balancing);
4487out:
f549da84 4488 if (time_after(next_balance, sd->last_balance + interval)) {
c9819f45 4489 next_balance = sd->last_balance + interval;
f549da84
SS
4490 update_next_balance = 1;
4491 }
783609c6
SS
4492
4493 /*
4494 * Stop the load balance at this level. There is another
4495 * CPU in our sched group which is doing load balancing more
4496 * actively.
4497 */
4498 if (!balance)
4499 break;
1da177e4 4500 }
f549da84
SS
4501
4502 /*
4503 * next_balance will be updated only when there is a need.
4504 * When the cpu is attached to null domain for ex, it will not be
4505 * updated.
4506 */
4507 if (likely(update_next_balance))
4508 rq->next_balance = next_balance;
46cb4b7c
SS
4509}
4510
4511/*
4512 * run_rebalance_domains is triggered when needed from the scheduler tick.
4513 * In CONFIG_NO_HZ case, the idle load balance owner will do the
4514 * rebalancing for all the cpus for whom scheduler ticks are stopped.
4515 */
4516static void run_rebalance_domains(struct softirq_action *h)
4517{
dd41f596
IM
4518 int this_cpu = smp_processor_id();
4519 struct rq *this_rq = cpu_rq(this_cpu);
4520 enum cpu_idle_type idle = this_rq->idle_at_tick ?
4521 CPU_IDLE : CPU_NOT_IDLE;
46cb4b7c 4522
dd41f596 4523 rebalance_domains(this_cpu, idle);
46cb4b7c
SS
4524
4525#ifdef CONFIG_NO_HZ
4526 /*
4527 * If this cpu is the owner for idle load balancing, then do the
4528 * balancing on behalf of the other idle cpus whose ticks are
4529 * stopped.
4530 */
dd41f596
IM
4531 if (this_rq->idle_at_tick &&
4532 atomic_read(&nohz.load_balancer) == this_cpu) {
46cb4b7c
SS
4533 struct rq *rq;
4534 int balance_cpu;
4535
7d1e6a9b
RR
4536 for_each_cpu(balance_cpu, nohz.cpu_mask) {
4537 if (balance_cpu == this_cpu)
4538 continue;
4539
46cb4b7c
SS
4540 /*
4541 * If this cpu gets work to do, stop the load balancing
4542 * work being done for other cpus. Next load
4543 * balancing owner will pick it up.
4544 */
4545 if (need_resched())
4546 break;
4547
de0cf899 4548 rebalance_domains(balance_cpu, CPU_IDLE);
46cb4b7c
SS
4549
4550 rq = cpu_rq(balance_cpu);
dd41f596
IM
4551 if (time_after(this_rq->next_balance, rq->next_balance))
4552 this_rq->next_balance = rq->next_balance;
46cb4b7c
SS
4553 }
4554 }
4555#endif
4556}
4557
8a0be9ef
FW
4558static inline int on_null_domain(int cpu)
4559{
4560 return !rcu_dereference(cpu_rq(cpu)->sd);
4561}
4562
46cb4b7c
SS
4563/*
4564 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4565 *
4566 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
4567 * idle load balancing owner or decide to stop the periodic load balancing,
4568 * if the whole system is idle.
4569 */
dd41f596 4570static inline void trigger_load_balance(struct rq *rq, int cpu)
46cb4b7c 4571{
46cb4b7c
SS
4572#ifdef CONFIG_NO_HZ
4573 /*
4574 * If we were in the nohz mode recently and busy at the current
4575 * scheduler tick, then check if we need to nominate new idle
4576 * load balancer.
4577 */
4578 if (rq->in_nohz_recently && !rq->idle_at_tick) {
4579 rq->in_nohz_recently = 0;
4580
4581 if (atomic_read(&nohz.load_balancer) == cpu) {
7d1e6a9b 4582 cpumask_clear_cpu(cpu, nohz.cpu_mask);
46cb4b7c
SS
4583 atomic_set(&nohz.load_balancer, -1);
4584 }
4585
4586 if (atomic_read(&nohz.load_balancer) == -1) {
f711f609 4587 int ilb = find_new_ilb(cpu);
46cb4b7c 4588
434d53b0 4589 if (ilb < nr_cpu_ids)
46cb4b7c
SS
4590 resched_cpu(ilb);
4591 }
4592 }
4593
4594 /*
4595 * If this cpu is idle and doing idle load balancing for all the
4596 * cpus with ticks stopped, is it time for that to stop?
4597 */
4598 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
7d1e6a9b 4599 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
46cb4b7c
SS
4600 resched_cpu(cpu);
4601 return;
4602 }
4603
4604 /*
4605 * If this cpu is idle and the idle load balancing is done by
4606 * someone else, then no need raise the SCHED_SOFTIRQ
4607 */
4608 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
7d1e6a9b 4609 cpumask_test_cpu(cpu, nohz.cpu_mask))
46cb4b7c
SS
4610 return;
4611#endif
8a0be9ef
FW
4612 /* Don't need to rebalance while attached to NULL domain */
4613 if (time_after_eq(jiffies, rq->next_balance) &&
4614 likely(!on_null_domain(cpu)))
46cb4b7c 4615 raise_softirq(SCHED_SOFTIRQ);
1da177e4 4616}
dd41f596
IM
4617
4618#else /* CONFIG_SMP */
4619
1da177e4
LT
4620/*
4621 * on UP we do not need to balance between CPUs:
4622 */
70b97a7f 4623static inline void idle_balance(int cpu, struct rq *rq)
1da177e4
LT
4624{
4625}
dd41f596 4626
1da177e4
LT
4627#endif
4628
1da177e4
LT
4629DEFINE_PER_CPU(struct kernel_stat, kstat);
4630
4631EXPORT_PER_CPU_SYMBOL(kstat);
4632
4633/*
c5f8d995 4634 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 4635 * @p in case that task is currently running.
c5f8d995
HS
4636 *
4637 * Called with task_rq_lock() held on @rq.
1da177e4 4638 */
c5f8d995
HS
4639static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
4640{
4641 u64 ns = 0;
4642
4643 if (task_current(rq, p)) {
4644 update_rq_clock(rq);
4645 ns = rq->clock - p->se.exec_start;
4646 if ((s64)ns < 0)
4647 ns = 0;
4648 }
4649
4650 return ns;
4651}
4652
bb34d92f 4653unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 4654{
1da177e4 4655 unsigned long flags;
41b86e9c 4656 struct rq *rq;
bb34d92f 4657 u64 ns = 0;
48f24c4d 4658
41b86e9c 4659 rq = task_rq_lock(p, &flags);
c5f8d995
HS
4660 ns = do_task_delta_exec(p, rq);
4661 task_rq_unlock(rq, &flags);
1508487e 4662
c5f8d995
HS
4663 return ns;
4664}
f06febc9 4665
c5f8d995
HS
4666/*
4667 * Return accounted runtime for the task.
4668 * In case the task is currently running, return the runtime plus current's
4669 * pending runtime that have not been accounted yet.
4670 */
4671unsigned long long task_sched_runtime(struct task_struct *p)
4672{
4673 unsigned long flags;
4674 struct rq *rq;
4675 u64 ns = 0;
4676
4677 rq = task_rq_lock(p, &flags);
4678 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
4679 task_rq_unlock(rq, &flags);
4680
4681 return ns;
4682}
48f24c4d 4683
c5f8d995
HS
4684/*
4685 * Return sum_exec_runtime for the thread group.
4686 * In case the task is currently running, return the sum plus current's
4687 * pending runtime that have not been accounted yet.
4688 *
4689 * Note that the thread group might have other running tasks as well,
4690 * so the return value not includes other pending runtime that other
4691 * running tasks might have.
4692 */
4693unsigned long long thread_group_sched_runtime(struct task_struct *p)
4694{
4695 struct task_cputime totals;
4696 unsigned long flags;
4697 struct rq *rq;
4698 u64 ns;
4699
4700 rq = task_rq_lock(p, &flags);
4701 thread_group_cputime(p, &totals);
4702 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
41b86e9c 4703 task_rq_unlock(rq, &flags);
48f24c4d 4704
1da177e4
LT
4705 return ns;
4706}
4707
1da177e4
LT
4708/*
4709 * Account user cpu time to a process.
4710 * @p: the process that the cpu time gets accounted to
1da177e4 4711 * @cputime: the cpu time spent in user space since the last update
457533a7 4712 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4 4713 */
457533a7
MS
4714void account_user_time(struct task_struct *p, cputime_t cputime,
4715 cputime_t cputime_scaled)
1da177e4
LT
4716{
4717 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4718 cputime64_t tmp;
4719
457533a7 4720 /* Add user time to process. */
1da177e4 4721 p->utime = cputime_add(p->utime, cputime);
457533a7 4722 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 4723 account_group_user_time(p, cputime);
1da177e4
LT
4724
4725 /* Add user time to cpustat. */
4726 tmp = cputime_to_cputime64(cputime);
4727 if (TASK_NICE(p) > 0)
4728 cpustat->nice = cputime64_add(cpustat->nice, tmp);
4729 else
4730 cpustat->user = cputime64_add(cpustat->user, tmp);
ef12fefa
BR
4731
4732 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
49b5cf34
JL
4733 /* Account for user time used */
4734 acct_update_integrals(p);
1da177e4
LT
4735}
4736
94886b84
LV
4737/*
4738 * Account guest cpu time to a process.
4739 * @p: the process that the cpu time gets accounted to
4740 * @cputime: the cpu time spent in virtual machine since the last update
457533a7 4741 * @cputime_scaled: cputime scaled by cpu frequency
94886b84 4742 */
457533a7
MS
4743static void account_guest_time(struct task_struct *p, cputime_t cputime,
4744 cputime_t cputime_scaled)
94886b84
LV
4745{
4746 cputime64_t tmp;
4747 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4748
4749 tmp = cputime_to_cputime64(cputime);
4750
457533a7 4751 /* Add guest time to process. */
94886b84 4752 p->utime = cputime_add(p->utime, cputime);
457533a7 4753 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 4754 account_group_user_time(p, cputime);
94886b84
LV
4755 p->gtime = cputime_add(p->gtime, cputime);
4756
457533a7 4757 /* Add guest time to cpustat. */
94886b84
LV
4758 cpustat->user = cputime64_add(cpustat->user, tmp);
4759 cpustat->guest = cputime64_add(cpustat->guest, tmp);
4760}
4761
1da177e4
LT
4762/*
4763 * Account system cpu time to a process.
4764 * @p: the process that the cpu time gets accounted to
4765 * @hardirq_offset: the offset to subtract from hardirq_count()
4766 * @cputime: the cpu time spent in kernel space since the last update
457533a7 4767 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4
LT
4768 */
4769void account_system_time(struct task_struct *p, int hardirq_offset,
457533a7 4770 cputime_t cputime, cputime_t cputime_scaled)
1da177e4
LT
4771{
4772 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1da177e4
LT
4773 cputime64_t tmp;
4774
983ed7a6 4775 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
457533a7 4776 account_guest_time(p, cputime, cputime_scaled);
983ed7a6
HH
4777 return;
4778 }
94886b84 4779
457533a7 4780 /* Add system time to process. */
1da177e4 4781 p->stime = cputime_add(p->stime, cputime);
457533a7 4782 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
f06febc9 4783 account_group_system_time(p, cputime);
1da177e4
LT
4784
4785 /* Add system time to cpustat. */
4786 tmp = cputime_to_cputime64(cputime);
4787 if (hardirq_count() - hardirq_offset)
4788 cpustat->irq = cputime64_add(cpustat->irq, tmp);
4789 else if (softirq_count())
4790 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
1da177e4 4791 else
79741dd3
MS
4792 cpustat->system = cputime64_add(cpustat->system, tmp);
4793
ef12fefa
BR
4794 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
4795
1da177e4
LT
4796 /* Account for system time used */
4797 acct_update_integrals(p);
1da177e4
LT
4798}
4799
c66f08be 4800/*
1da177e4 4801 * Account for involuntary wait time.
1da177e4 4802 * @steal: the cpu time spent in involuntary wait
c66f08be 4803 */
79741dd3 4804void account_steal_time(cputime_t cputime)
c66f08be 4805{
79741dd3
MS
4806 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4807 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4808
4809 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
c66f08be
MN
4810}
4811
1da177e4 4812/*
79741dd3
MS
4813 * Account for idle time.
4814 * @cputime: the cpu time spent in idle wait
1da177e4 4815 */
79741dd3 4816void account_idle_time(cputime_t cputime)
1da177e4
LT
4817{
4818 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
79741dd3 4819 cputime64_t cputime64 = cputime_to_cputime64(cputime);
70b97a7f 4820 struct rq *rq = this_rq();
1da177e4 4821
79741dd3
MS
4822 if (atomic_read(&rq->nr_iowait) > 0)
4823 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
4824 else
4825 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
1da177e4
LT
4826}
4827
79741dd3
MS
4828#ifndef CONFIG_VIRT_CPU_ACCOUNTING
4829
4830/*
4831 * Account a single tick of cpu time.
4832 * @p: the process that the cpu time gets accounted to
4833 * @user_tick: indicates if the tick is a user or a system tick
4834 */
4835void account_process_tick(struct task_struct *p, int user_tick)
4836{
4837 cputime_t one_jiffy = jiffies_to_cputime(1);
4838 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
4839 struct rq *rq = this_rq();
4840
4841 if (user_tick)
4842 account_user_time(p, one_jiffy, one_jiffy_scaled);
4843 else if (p != rq->idle)
4844 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4845 one_jiffy_scaled);
4846 else
4847 account_idle_time(one_jiffy);
4848}
4849
4850/*
4851 * Account multiple ticks of steal time.
4852 * @p: the process from which the cpu time has been stolen
4853 * @ticks: number of stolen ticks
4854 */
4855void account_steal_ticks(unsigned long ticks)
4856{
4857 account_steal_time(jiffies_to_cputime(ticks));
4858}
4859
4860/*
4861 * Account multiple ticks of idle time.
4862 * @ticks: number of stolen ticks
4863 */
4864void account_idle_ticks(unsigned long ticks)
4865{
4866 account_idle_time(jiffies_to_cputime(ticks));
1da177e4
LT
4867}
4868
79741dd3
MS
4869#endif
4870
49048622
BS
4871/*
4872 * Use precise platform statistics if available:
4873 */
4874#ifdef CONFIG_VIRT_CPU_ACCOUNTING
4875cputime_t task_utime(struct task_struct *p)
4876{
4877 return p->utime;
4878}
4879
4880cputime_t task_stime(struct task_struct *p)
4881{
4882 return p->stime;
4883}
4884#else
4885cputime_t task_utime(struct task_struct *p)
4886{
4887 clock_t utime = cputime_to_clock_t(p->utime),
4888 total = utime + cputime_to_clock_t(p->stime);
4889 u64 temp;
4890
4891 /*
4892 * Use CFS's precise accounting:
4893 */
4894 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
4895
4896 if (total) {
4897 temp *= utime;
4898 do_div(temp, total);
4899 }
4900 utime = (clock_t)temp;
4901
4902 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
4903 return p->prev_utime;
4904}
4905
4906cputime_t task_stime(struct task_struct *p)
4907{
4908 clock_t stime;
4909
4910 /*
4911 * Use CFS's precise accounting. (we subtract utime from
4912 * the total, to make sure the total observed by userspace
4913 * grows monotonically - apps rely on that):
4914 */
4915 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
4916 cputime_to_clock_t(task_utime(p));
4917
4918 if (stime >= 0)
4919 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
4920
4921 return p->prev_stime;
4922}
4923#endif
4924
4925inline cputime_t task_gtime(struct task_struct *p)
4926{
4927 return p->gtime;
4928}
4929
7835b98b
CL
4930/*
4931 * This function gets called by the timer code, with HZ frequency.
4932 * We call it with interrupts disabled.
4933 *
4934 * It also gets called by the fork code, when changing the parent's
4935 * timeslices.
4936 */
4937void scheduler_tick(void)
4938{
7835b98b
CL
4939 int cpu = smp_processor_id();
4940 struct rq *rq = cpu_rq(cpu);
dd41f596 4941 struct task_struct *curr = rq->curr;
3e51f33f
PZ
4942
4943 sched_clock_tick();
dd41f596
IM
4944
4945 spin_lock(&rq->lock);
3e51f33f 4946 update_rq_clock(rq);
f1a438d8 4947 update_cpu_load(rq);
fa85ae24 4948 curr->sched_class->task_tick(rq, curr, 0);
dd41f596 4949 spin_unlock(&rq->lock);
7835b98b 4950
e418e1c2 4951#ifdef CONFIG_SMP
dd41f596
IM
4952 rq->idle_at_tick = idle_cpu(cpu);
4953 trigger_load_balance(rq, cpu);
e418e1c2 4954#endif
1da177e4
LT
4955}
4956
7e49fcce 4957unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
4958{
4959 if (in_lock_functions(addr)) {
4960 addr = CALLER_ADDR2;
4961 if (in_lock_functions(addr))
4962 addr = CALLER_ADDR3;
4963 }
4964 return addr;
4965}
1da177e4 4966
7e49fcce
SR
4967#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4968 defined(CONFIG_PREEMPT_TRACER))
4969
43627582 4970void __kprobes add_preempt_count(int val)
1da177e4 4971{
6cd8a4bb 4972#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4973 /*
4974 * Underflow?
4975 */
9a11b49a
IM
4976 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4977 return;
6cd8a4bb 4978#endif
1da177e4 4979 preempt_count() += val;
6cd8a4bb 4980#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4981 /*
4982 * Spinlock count overflowing soon?
4983 */
33859f7f
MOS
4984 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4985 PREEMPT_MASK - 10);
6cd8a4bb
SR
4986#endif
4987 if (preempt_count() == val)
4988 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
4989}
4990EXPORT_SYMBOL(add_preempt_count);
4991
43627582 4992void __kprobes sub_preempt_count(int val)
1da177e4 4993{
6cd8a4bb 4994#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4995 /*
4996 * Underflow?
4997 */
01e3eb82 4998 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 4999 return;
1da177e4
LT
5000 /*
5001 * Is the spinlock portion underflowing?
5002 */
9a11b49a
IM
5003 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5004 !(preempt_count() & PREEMPT_MASK)))
5005 return;
6cd8a4bb 5006#endif
9a11b49a 5007
6cd8a4bb
SR
5008 if (preempt_count() == val)
5009 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
5010 preempt_count() -= val;
5011}
5012EXPORT_SYMBOL(sub_preempt_count);
5013
5014#endif
5015
5016/*
dd41f596 5017 * Print scheduling while atomic bug:
1da177e4 5018 */
dd41f596 5019static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 5020{
838225b4
SS
5021 struct pt_regs *regs = get_irq_regs();
5022
5023 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5024 prev->comm, prev->pid, preempt_count());
5025
dd41f596 5026 debug_show_held_locks(prev);
e21f5b15 5027 print_modules();
dd41f596
IM
5028 if (irqs_disabled())
5029 print_irqtrace_events(prev);
838225b4
SS
5030
5031 if (regs)
5032 show_regs(regs);
5033 else
5034 dump_stack();
dd41f596 5035}
1da177e4 5036
dd41f596
IM
5037/*
5038 * Various schedule()-time debugging checks and statistics:
5039 */
5040static inline void schedule_debug(struct task_struct *prev)
5041{
1da177e4 5042 /*
41a2d6cf 5043 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
5044 * schedule() atomically, we ignore that path for now.
5045 * Otherwise, whine if we are scheduling when we should not be.
5046 */
3f33a7ce 5047 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596
IM
5048 __schedule_bug(prev);
5049
1da177e4
LT
5050 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5051
2d72376b 5052 schedstat_inc(this_rq(), sched_count);
b8efb561
IM
5053#ifdef CONFIG_SCHEDSTATS
5054 if (unlikely(prev->lock_depth >= 0)) {
2d72376b
IM
5055 schedstat_inc(this_rq(), bkl_count);
5056 schedstat_inc(prev, sched_info.bkl_count);
b8efb561
IM
5057 }
5058#endif
dd41f596
IM
5059}
5060
df1c99d4
MG
5061static void put_prev_task(struct rq *rq, struct task_struct *prev)
5062{
5063 if (prev->state == TASK_RUNNING) {
5064 u64 runtime = prev->se.sum_exec_runtime;
5065
5066 runtime -= prev->se.prev_sum_exec_runtime;
5067 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5068
5069 /*
5070 * In order to avoid avg_overlap growing stale when we are
5071 * indeed overlapping and hence not getting put to sleep, grow
5072 * the avg_overlap on preemption.
5073 *
5074 * We use the average preemption runtime because that
5075 * correlates to the amount of cache footprint a task can
5076 * build up.
5077 */
5078 update_avg(&prev->se.avg_overlap, runtime);
5079 }
5080 prev->sched_class->put_prev_task(rq, prev);
5081}
5082
dd41f596
IM
5083/*
5084 * Pick up the highest-prio task:
5085 */
5086static inline struct task_struct *
b67802ea 5087pick_next_task(struct rq *rq)
dd41f596 5088{
5522d5d5 5089 const struct sched_class *class;
dd41f596 5090 struct task_struct *p;
1da177e4
LT
5091
5092 /*
dd41f596
IM
5093 * Optimization: we know that if all tasks are in
5094 * the fair class we can call that function directly:
1da177e4 5095 */
dd41f596 5096 if (likely(rq->nr_running == rq->cfs.nr_running)) {
fb8d4724 5097 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
5098 if (likely(p))
5099 return p;
1da177e4
LT
5100 }
5101
dd41f596
IM
5102 class = sched_class_highest;
5103 for ( ; ; ) {
fb8d4724 5104 p = class->pick_next_task(rq);
dd41f596
IM
5105 if (p)
5106 return p;
5107 /*
5108 * Will never be NULL as the idle class always
5109 * returns a non-NULL p:
5110 */
5111 class = class->next;
5112 }
5113}
1da177e4 5114
dd41f596
IM
5115/*
5116 * schedule() is the main scheduler function.
5117 */
41719b03 5118asmlinkage void __sched __schedule(void)
dd41f596
IM
5119{
5120 struct task_struct *prev, *next;
67ca7bde 5121 unsigned long *switch_count;
dd41f596 5122 struct rq *rq;
31656519 5123 int cpu;
dd41f596 5124
dd41f596
IM
5125 cpu = smp_processor_id();
5126 rq = cpu_rq(cpu);
5127 rcu_qsctr_inc(cpu);
5128 prev = rq->curr;
5129 switch_count = &prev->nivcsw;
5130
5131 release_kernel_lock(prev);
5132need_resched_nonpreemptible:
5133
5134 schedule_debug(prev);
1da177e4 5135
31656519 5136 if (sched_feat(HRTICK))
f333fdc9 5137 hrtick_clear(rq);
8f4d37ec 5138
8cd162ce 5139 spin_lock_irq(&rq->lock);
3e51f33f 5140 update_rq_clock(rq);
1e819950 5141 clear_tsk_need_resched(prev);
1da177e4 5142
1da177e4 5143 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
16882c1e 5144 if (unlikely(signal_pending_state(prev->state, prev)))
1da177e4 5145 prev->state = TASK_RUNNING;
16882c1e 5146 else
2e1cb74a 5147 deactivate_task(rq, prev, 1);
dd41f596 5148 switch_count = &prev->nvcsw;
1da177e4
LT
5149 }
5150
9a897c5a
SR
5151#ifdef CONFIG_SMP
5152 if (prev->sched_class->pre_schedule)
5153 prev->sched_class->pre_schedule(rq, prev);
5154#endif
f65eda4f 5155
dd41f596 5156 if (unlikely(!rq->nr_running))
1da177e4 5157 idle_balance(cpu, rq);
1da177e4 5158
df1c99d4 5159 put_prev_task(rq, prev);
b67802ea 5160 next = pick_next_task(rq);
1da177e4 5161
1da177e4 5162 if (likely(prev != next)) {
673a90a1
DS
5163 sched_info_switch(prev, next);
5164
1da177e4
LT
5165 rq->nr_switches++;
5166 rq->curr = next;
5167 ++*switch_count;
5168
dd41f596 5169 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec
PZ
5170 /*
5171 * the context switch might have flipped the stack from under
5172 * us, hence refresh the local variables.
5173 */
5174 cpu = smp_processor_id();
5175 rq = cpu_rq(cpu);
1da177e4
LT
5176 } else
5177 spin_unlock_irq(&rq->lock);
5178
8f4d37ec 5179 if (unlikely(reacquire_kernel_lock(current) < 0))
1da177e4 5180 goto need_resched_nonpreemptible;
41719b03 5181}
8f4d37ec 5182
41719b03
PZ
5183asmlinkage void __sched schedule(void)
5184{
5185need_resched:
5186 preempt_disable();
5187 __schedule();
1da177e4
LT
5188 preempt_enable_no_resched();
5189 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
5190 goto need_resched;
5191}
1da177e4
LT
5192EXPORT_SYMBOL(schedule);
5193
0d66bf6d
PZ
5194#ifdef CONFIG_SMP
5195/*
5196 * Look out! "owner" is an entirely speculative pointer
5197 * access and not reliable.
5198 */
5199int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
5200{
5201 unsigned int cpu;
5202 struct rq *rq;
5203
5204 if (!sched_feat(OWNER_SPIN))
5205 return 0;
5206
5207#ifdef CONFIG_DEBUG_PAGEALLOC
5208 /*
5209 * Need to access the cpu field knowing that
5210 * DEBUG_PAGEALLOC could have unmapped it if
5211 * the mutex owner just released it and exited.
5212 */
5213 if (probe_kernel_address(&owner->cpu, cpu))
5214 goto out;
5215#else
5216 cpu = owner->cpu;
5217#endif
5218
5219 /*
5220 * Even if the access succeeded (likely case),
5221 * the cpu field may no longer be valid.
5222 */
5223 if (cpu >= nr_cpumask_bits)
5224 goto out;
5225
5226 /*
5227 * We need to validate that we can do a
5228 * get_cpu() and that we have the percpu area.
5229 */
5230 if (!cpu_online(cpu))
5231 goto out;
5232
5233 rq = cpu_rq(cpu);
5234
5235 for (;;) {
5236 /*
5237 * Owner changed, break to re-assess state.
5238 */
5239 if (lock->owner != owner)
5240 break;
5241
5242 /*
5243 * Is that owner really running on that cpu?
5244 */
5245 if (task_thread_info(rq->curr) != owner || need_resched())
5246 return 0;
5247
5248 cpu_relax();
5249 }
5250out:
5251 return 1;
5252}
5253#endif
5254
1da177e4
LT
5255#ifdef CONFIG_PREEMPT
5256/*
2ed6e34f 5257 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 5258 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
5259 * occur there and call schedule directly.
5260 */
5261asmlinkage void __sched preempt_schedule(void)
5262{
5263 struct thread_info *ti = current_thread_info();
6478d880 5264
1da177e4
LT
5265 /*
5266 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 5267 * we do not want to preempt the current task. Just return..
1da177e4 5268 */
beed33a8 5269 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
5270 return;
5271
3a5c359a
AK
5272 do {
5273 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 5274 schedule();
3a5c359a 5275 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 5276
3a5c359a
AK
5277 /*
5278 * Check again in case we missed a preemption opportunity
5279 * between schedule and now.
5280 */
5281 barrier();
5ed0cec0 5282 } while (need_resched());
1da177e4 5283}
1da177e4
LT
5284EXPORT_SYMBOL(preempt_schedule);
5285
5286/*
2ed6e34f 5287 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
5288 * off of irq context.
5289 * Note, that this is called and return with irqs disabled. This will
5290 * protect us against recursive calling from irq.
5291 */
5292asmlinkage void __sched preempt_schedule_irq(void)
5293{
5294 struct thread_info *ti = current_thread_info();
6478d880 5295
2ed6e34f 5296 /* Catch callers which need to be fixed */
1da177e4
LT
5297 BUG_ON(ti->preempt_count || !irqs_disabled());
5298
3a5c359a
AK
5299 do {
5300 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a
AK
5301 local_irq_enable();
5302 schedule();
5303 local_irq_disable();
3a5c359a 5304 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 5305
3a5c359a
AK
5306 /*
5307 * Check again in case we missed a preemption opportunity
5308 * between schedule and now.
5309 */
5310 barrier();
5ed0cec0 5311 } while (need_resched());
1da177e4
LT
5312}
5313
5314#endif /* CONFIG_PREEMPT */
5315
95cdf3b7
IM
5316int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
5317 void *key)
1da177e4 5318{
48f24c4d 5319 return try_to_wake_up(curr->private, mode, sync);
1da177e4 5320}
1da177e4
LT
5321EXPORT_SYMBOL(default_wake_function);
5322
5323/*
41a2d6cf
IM
5324 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
5325 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
5326 * number) then we wake all the non-exclusive tasks and one exclusive task.
5327 *
5328 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 5329 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
5330 * zero in this (rare) case, and we handle it by continuing to scan the queue.
5331 */
777c6c5f
JW
5332void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
5333 int nr_exclusive, int sync, void *key)
1da177e4 5334{
2e45874c 5335 wait_queue_t *curr, *next;
1da177e4 5336
2e45874c 5337 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
5338 unsigned flags = curr->flags;
5339
1da177e4 5340 if (curr->func(curr, mode, sync, key) &&
48f24c4d 5341 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
5342 break;
5343 }
5344}
5345
5346/**
5347 * __wake_up - wake up threads blocked on a waitqueue.
5348 * @q: the waitqueue
5349 * @mode: which threads
5350 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 5351 * @key: is directly passed to the wakeup function
1da177e4 5352 */
7ad5b3a5 5353void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 5354 int nr_exclusive, void *key)
1da177e4
LT
5355{
5356 unsigned long flags;
5357
5358 spin_lock_irqsave(&q->lock, flags);
5359 __wake_up_common(q, mode, nr_exclusive, 0, key);
5360 spin_unlock_irqrestore(&q->lock, flags);
5361}
1da177e4
LT
5362EXPORT_SYMBOL(__wake_up);
5363
5364/*
5365 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
5366 */
7ad5b3a5 5367void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1da177e4
LT
5368{
5369 __wake_up_common(q, mode, 1, 0, NULL);
5370}
5371
4ede816a
DL
5372void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5373{
5374 __wake_up_common(q, mode, 1, 0, key);
5375}
5376
1da177e4 5377/**
4ede816a 5378 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
5379 * @q: the waitqueue
5380 * @mode: which threads
5381 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 5382 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
5383 *
5384 * The sync wakeup differs that the waker knows that it will schedule
5385 * away soon, so while the target thread will be woken up, it will not
5386 * be migrated to another CPU - ie. the two threads are 'synchronized'
5387 * with each other. This can prevent needless bouncing between CPUs.
5388 *
5389 * On UP it can prevent extra preemption.
5390 */
4ede816a
DL
5391void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
5392 int nr_exclusive, void *key)
1da177e4
LT
5393{
5394 unsigned long flags;
5395 int sync = 1;
5396
5397 if (unlikely(!q))
5398 return;
5399
5400 if (unlikely(!nr_exclusive))
5401 sync = 0;
5402
5403 spin_lock_irqsave(&q->lock, flags);
4ede816a 5404 __wake_up_common(q, mode, nr_exclusive, sync, key);
1da177e4
LT
5405 spin_unlock_irqrestore(&q->lock, flags);
5406}
4ede816a
DL
5407EXPORT_SYMBOL_GPL(__wake_up_sync_key);
5408
5409/*
5410 * __wake_up_sync - see __wake_up_sync_key()
5411 */
5412void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
5413{
5414 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
5415}
1da177e4
LT
5416EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
5417
65eb3dc6
KD
5418/**
5419 * complete: - signals a single thread waiting on this completion
5420 * @x: holds the state of this particular completion
5421 *
5422 * This will wake up a single thread waiting on this completion. Threads will be
5423 * awakened in the same order in which they were queued.
5424 *
5425 * See also complete_all(), wait_for_completion() and related routines.
5426 */
b15136e9 5427void complete(struct completion *x)
1da177e4
LT
5428{
5429 unsigned long flags;
5430
5431 spin_lock_irqsave(&x->wait.lock, flags);
5432 x->done++;
d9514f6c 5433 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
5434 spin_unlock_irqrestore(&x->wait.lock, flags);
5435}
5436EXPORT_SYMBOL(complete);
5437
65eb3dc6
KD
5438/**
5439 * complete_all: - signals all threads waiting on this completion
5440 * @x: holds the state of this particular completion
5441 *
5442 * This will wake up all threads waiting on this particular completion event.
5443 */
b15136e9 5444void complete_all(struct completion *x)
1da177e4
LT
5445{
5446 unsigned long flags;
5447
5448 spin_lock_irqsave(&x->wait.lock, flags);
5449 x->done += UINT_MAX/2;
d9514f6c 5450 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
5451 spin_unlock_irqrestore(&x->wait.lock, flags);
5452}
5453EXPORT_SYMBOL(complete_all);
5454
8cbbe86d
AK
5455static inline long __sched
5456do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 5457{
1da177e4
LT
5458 if (!x->done) {
5459 DECLARE_WAITQUEUE(wait, current);
5460
5461 wait.flags |= WQ_FLAG_EXCLUSIVE;
5462 __add_wait_queue_tail(&x->wait, &wait);
5463 do {
94d3d824 5464 if (signal_pending_state(state, current)) {
ea71a546
ON
5465 timeout = -ERESTARTSYS;
5466 break;
8cbbe86d
AK
5467 }
5468 __set_current_state(state);
1da177e4
LT
5469 spin_unlock_irq(&x->wait.lock);
5470 timeout = schedule_timeout(timeout);
5471 spin_lock_irq(&x->wait.lock);
ea71a546 5472 } while (!x->done && timeout);
1da177e4 5473 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
5474 if (!x->done)
5475 return timeout;
1da177e4
LT
5476 }
5477 x->done--;
ea71a546 5478 return timeout ?: 1;
1da177e4 5479}
1da177e4 5480
8cbbe86d
AK
5481static long __sched
5482wait_for_common(struct completion *x, long timeout, int state)
1da177e4 5483{
1da177e4
LT
5484 might_sleep();
5485
5486 spin_lock_irq(&x->wait.lock);
8cbbe86d 5487 timeout = do_wait_for_common(x, timeout, state);
1da177e4 5488 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
5489 return timeout;
5490}
1da177e4 5491
65eb3dc6
KD
5492/**
5493 * wait_for_completion: - waits for completion of a task
5494 * @x: holds the state of this particular completion
5495 *
5496 * This waits to be signaled for completion of a specific task. It is NOT
5497 * interruptible and there is no timeout.
5498 *
5499 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
5500 * and interrupt capability. Also see complete().
5501 */
b15136e9 5502void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
5503{
5504 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 5505}
8cbbe86d 5506EXPORT_SYMBOL(wait_for_completion);
1da177e4 5507
65eb3dc6
KD
5508/**
5509 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
5510 * @x: holds the state of this particular completion
5511 * @timeout: timeout value in jiffies
5512 *
5513 * This waits for either a completion of a specific task to be signaled or for a
5514 * specified timeout to expire. The timeout is in jiffies. It is not
5515 * interruptible.
5516 */
b15136e9 5517unsigned long __sched
8cbbe86d 5518wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 5519{
8cbbe86d 5520 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 5521}
8cbbe86d 5522EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 5523
65eb3dc6
KD
5524/**
5525 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
5526 * @x: holds the state of this particular completion
5527 *
5528 * This waits for completion of a specific task to be signaled. It is
5529 * interruptible.
5530 */
8cbbe86d 5531int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 5532{
51e97990
AK
5533 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
5534 if (t == -ERESTARTSYS)
5535 return t;
5536 return 0;
0fec171c 5537}
8cbbe86d 5538EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 5539
65eb3dc6
KD
5540/**
5541 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
5542 * @x: holds the state of this particular completion
5543 * @timeout: timeout value in jiffies
5544 *
5545 * This waits for either a completion of a specific task to be signaled or for a
5546 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
5547 */
b15136e9 5548unsigned long __sched
8cbbe86d
AK
5549wait_for_completion_interruptible_timeout(struct completion *x,
5550 unsigned long timeout)
0fec171c 5551{
8cbbe86d 5552 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 5553}
8cbbe86d 5554EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 5555
65eb3dc6
KD
5556/**
5557 * wait_for_completion_killable: - waits for completion of a task (killable)
5558 * @x: holds the state of this particular completion
5559 *
5560 * This waits to be signaled for completion of a specific task. It can be
5561 * interrupted by a kill signal.
5562 */
009e577e
MW
5563int __sched wait_for_completion_killable(struct completion *x)
5564{
5565 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
5566 if (t == -ERESTARTSYS)
5567 return t;
5568 return 0;
5569}
5570EXPORT_SYMBOL(wait_for_completion_killable);
5571
be4de352
DC
5572/**
5573 * try_wait_for_completion - try to decrement a completion without blocking
5574 * @x: completion structure
5575 *
5576 * Returns: 0 if a decrement cannot be done without blocking
5577 * 1 if a decrement succeeded.
5578 *
5579 * If a completion is being used as a counting completion,
5580 * attempt to decrement the counter without blocking. This
5581 * enables us to avoid waiting if the resource the completion
5582 * is protecting is not available.
5583 */
5584bool try_wait_for_completion(struct completion *x)
5585{
5586 int ret = 1;
5587
5588 spin_lock_irq(&x->wait.lock);
5589 if (!x->done)
5590 ret = 0;
5591 else
5592 x->done--;
5593 spin_unlock_irq(&x->wait.lock);
5594 return ret;
5595}
5596EXPORT_SYMBOL(try_wait_for_completion);
5597
5598/**
5599 * completion_done - Test to see if a completion has any waiters
5600 * @x: completion structure
5601 *
5602 * Returns: 0 if there are waiters (wait_for_completion() in progress)
5603 * 1 if there are no waiters.
5604 *
5605 */
5606bool completion_done(struct completion *x)
5607{
5608 int ret = 1;
5609
5610 spin_lock_irq(&x->wait.lock);
5611 if (!x->done)
5612 ret = 0;
5613 spin_unlock_irq(&x->wait.lock);
5614 return ret;
5615}
5616EXPORT_SYMBOL(completion_done);
5617
8cbbe86d
AK
5618static long __sched
5619sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 5620{
0fec171c
IM
5621 unsigned long flags;
5622 wait_queue_t wait;
5623
5624 init_waitqueue_entry(&wait, current);
1da177e4 5625
8cbbe86d 5626 __set_current_state(state);
1da177e4 5627
8cbbe86d
AK
5628 spin_lock_irqsave(&q->lock, flags);
5629 __add_wait_queue(q, &wait);
5630 spin_unlock(&q->lock);
5631 timeout = schedule_timeout(timeout);
5632 spin_lock_irq(&q->lock);
5633 __remove_wait_queue(q, &wait);
5634 spin_unlock_irqrestore(&q->lock, flags);
5635
5636 return timeout;
5637}
5638
5639void __sched interruptible_sleep_on(wait_queue_head_t *q)
5640{
5641 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 5642}
1da177e4
LT
5643EXPORT_SYMBOL(interruptible_sleep_on);
5644
0fec171c 5645long __sched
95cdf3b7 5646interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 5647{
8cbbe86d 5648 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 5649}
1da177e4
LT
5650EXPORT_SYMBOL(interruptible_sleep_on_timeout);
5651
0fec171c 5652void __sched sleep_on(wait_queue_head_t *q)
1da177e4 5653{
8cbbe86d 5654 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 5655}
1da177e4
LT
5656EXPORT_SYMBOL(sleep_on);
5657
0fec171c 5658long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 5659{
8cbbe86d 5660 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 5661}
1da177e4
LT
5662EXPORT_SYMBOL(sleep_on_timeout);
5663
b29739f9
IM
5664#ifdef CONFIG_RT_MUTEXES
5665
5666/*
5667 * rt_mutex_setprio - set the current priority of a task
5668 * @p: task
5669 * @prio: prio value (kernel-internal form)
5670 *
5671 * This function changes the 'effective' priority of a task. It does
5672 * not touch ->normal_prio like __setscheduler().
5673 *
5674 * Used by the rt_mutex code to implement priority inheritance logic.
5675 */
36c8b586 5676void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9
IM
5677{
5678 unsigned long flags;
83b699ed 5679 int oldprio, on_rq, running;
70b97a7f 5680 struct rq *rq;
cb469845 5681 const struct sched_class *prev_class = p->sched_class;
b29739f9
IM
5682
5683 BUG_ON(prio < 0 || prio > MAX_PRIO);
5684
5685 rq = task_rq_lock(p, &flags);
a8e504d2 5686 update_rq_clock(rq);
b29739f9 5687
d5f9f942 5688 oldprio = p->prio;
dd41f596 5689 on_rq = p->se.on_rq;
051a1d1a 5690 running = task_current(rq, p);
0e1f3483 5691 if (on_rq)
69be72c1 5692 dequeue_task(rq, p, 0);
0e1f3483
HS
5693 if (running)
5694 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
5695
5696 if (rt_prio(prio))
5697 p->sched_class = &rt_sched_class;
5698 else
5699 p->sched_class = &fair_sched_class;
5700
b29739f9
IM
5701 p->prio = prio;
5702
0e1f3483
HS
5703 if (running)
5704 p->sched_class->set_curr_task(rq);
dd41f596 5705 if (on_rq) {
8159f87e 5706 enqueue_task(rq, p, 0);
cb469845
SR
5707
5708 check_class_changed(rq, p, prev_class, oldprio, running);
b29739f9
IM
5709 }
5710 task_rq_unlock(rq, &flags);
5711}
5712
5713#endif
5714
36c8b586 5715void set_user_nice(struct task_struct *p, long nice)
1da177e4 5716{
dd41f596 5717 int old_prio, delta, on_rq;
1da177e4 5718 unsigned long flags;
70b97a7f 5719 struct rq *rq;
1da177e4
LT
5720
5721 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
5722 return;
5723 /*
5724 * We have to be careful, if called from sys_setpriority(),
5725 * the task might be in the middle of scheduling on another CPU.
5726 */
5727 rq = task_rq_lock(p, &flags);
a8e504d2 5728 update_rq_clock(rq);
1da177e4
LT
5729 /*
5730 * The RT priorities are set via sched_setscheduler(), but we still
5731 * allow the 'normal' nice value to be set - but as expected
5732 * it wont have any effect on scheduling until the task is
dd41f596 5733 * SCHED_FIFO/SCHED_RR:
1da177e4 5734 */
e05606d3 5735 if (task_has_rt_policy(p)) {
1da177e4
LT
5736 p->static_prio = NICE_TO_PRIO(nice);
5737 goto out_unlock;
5738 }
dd41f596 5739 on_rq = p->se.on_rq;
c09595f6 5740 if (on_rq)
69be72c1 5741 dequeue_task(rq, p, 0);
1da177e4 5742
1da177e4 5743 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 5744 set_load_weight(p);
b29739f9
IM
5745 old_prio = p->prio;
5746 p->prio = effective_prio(p);
5747 delta = p->prio - old_prio;
1da177e4 5748
dd41f596 5749 if (on_rq) {
8159f87e 5750 enqueue_task(rq, p, 0);
1da177e4 5751 /*
d5f9f942
AM
5752 * If the task increased its priority or is running and
5753 * lowered its priority, then reschedule its CPU:
1da177e4 5754 */
d5f9f942 5755 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
5756 resched_task(rq->curr);
5757 }
5758out_unlock:
5759 task_rq_unlock(rq, &flags);
5760}
1da177e4
LT
5761EXPORT_SYMBOL(set_user_nice);
5762
e43379f1
MM
5763/*
5764 * can_nice - check if a task can reduce its nice value
5765 * @p: task
5766 * @nice: nice value
5767 */
36c8b586 5768int can_nice(const struct task_struct *p, const int nice)
e43379f1 5769{
024f4747
MM
5770 /* convert nice value [19,-20] to rlimit style value [1,40] */
5771 int nice_rlim = 20 - nice;
48f24c4d 5772
e43379f1
MM
5773 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
5774 capable(CAP_SYS_NICE));
5775}
5776
1da177e4
LT
5777#ifdef __ARCH_WANT_SYS_NICE
5778
5779/*
5780 * sys_nice - change the priority of the current process.
5781 * @increment: priority increment
5782 *
5783 * sys_setpriority is a more generic, but much slower function that
5784 * does similar things.
5785 */
5add95d4 5786SYSCALL_DEFINE1(nice, int, increment)
1da177e4 5787{
48f24c4d 5788 long nice, retval;
1da177e4
LT
5789
5790 /*
5791 * Setpriority might change our priority at the same moment.
5792 * We don't have to worry. Conceptually one call occurs first
5793 * and we have a single winner.
5794 */
e43379f1
MM
5795 if (increment < -40)
5796 increment = -40;
1da177e4
LT
5797 if (increment > 40)
5798 increment = 40;
5799
2b8f836f 5800 nice = TASK_NICE(current) + increment;
1da177e4
LT
5801 if (nice < -20)
5802 nice = -20;
5803 if (nice > 19)
5804 nice = 19;
5805
e43379f1
MM
5806 if (increment < 0 && !can_nice(current, nice))
5807 return -EPERM;
5808
1da177e4
LT
5809 retval = security_task_setnice(current, nice);
5810 if (retval)
5811 return retval;
5812
5813 set_user_nice(current, nice);
5814 return 0;
5815}
5816
5817#endif
5818
5819/**
5820 * task_prio - return the priority value of a given task.
5821 * @p: the task in question.
5822 *
5823 * This is the priority value as seen by users in /proc.
5824 * RT tasks are offset by -200. Normal tasks are centered
5825 * around 0, value goes from -16 to +15.
5826 */
36c8b586 5827int task_prio(const struct task_struct *p)
1da177e4
LT
5828{
5829 return p->prio - MAX_RT_PRIO;
5830}
5831
5832/**
5833 * task_nice - return the nice value of a given task.
5834 * @p: the task in question.
5835 */
36c8b586 5836int task_nice(const struct task_struct *p)
1da177e4
LT
5837{
5838 return TASK_NICE(p);
5839}
150d8bed 5840EXPORT_SYMBOL(task_nice);
1da177e4
LT
5841
5842/**
5843 * idle_cpu - is a given cpu idle currently?
5844 * @cpu: the processor in question.
5845 */
5846int idle_cpu(int cpu)
5847{
5848 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
5849}
5850
1da177e4
LT
5851/**
5852 * idle_task - return the idle task for a given cpu.
5853 * @cpu: the processor in question.
5854 */
36c8b586 5855struct task_struct *idle_task(int cpu)
1da177e4
LT
5856{
5857 return cpu_rq(cpu)->idle;
5858}
5859
5860/**
5861 * find_process_by_pid - find a process with a matching PID value.
5862 * @pid: the pid in question.
5863 */
a9957449 5864static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 5865{
228ebcbe 5866 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
5867}
5868
5869/* Actually do priority change: must hold rq lock. */
dd41f596
IM
5870static void
5871__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 5872{
dd41f596 5873 BUG_ON(p->se.on_rq);
48f24c4d 5874
1da177e4 5875 p->policy = policy;
dd41f596
IM
5876 switch (p->policy) {
5877 case SCHED_NORMAL:
5878 case SCHED_BATCH:
5879 case SCHED_IDLE:
5880 p->sched_class = &fair_sched_class;
5881 break;
5882 case SCHED_FIFO:
5883 case SCHED_RR:
5884 p->sched_class = &rt_sched_class;
5885 break;
5886 }
5887
1da177e4 5888 p->rt_priority = prio;
b29739f9
IM
5889 p->normal_prio = normal_prio(p);
5890 /* we are holding p->pi_lock already */
5891 p->prio = rt_mutex_getprio(p);
2dd73a4f 5892 set_load_weight(p);
1da177e4
LT
5893}
5894
c69e8d9c
DH
5895/*
5896 * check the target process has a UID that matches the current process's
5897 */
5898static bool check_same_owner(struct task_struct *p)
5899{
5900 const struct cred *cred = current_cred(), *pcred;
5901 bool match;
5902
5903 rcu_read_lock();
5904 pcred = __task_cred(p);
5905 match = (cred->euid == pcred->euid ||
5906 cred->euid == pcred->uid);
5907 rcu_read_unlock();
5908 return match;
5909}
5910
961ccddd
RR
5911static int __sched_setscheduler(struct task_struct *p, int policy,
5912 struct sched_param *param, bool user)
1da177e4 5913{
83b699ed 5914 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 5915 unsigned long flags;
cb469845 5916 const struct sched_class *prev_class = p->sched_class;
70b97a7f 5917 struct rq *rq;
1da177e4 5918
66e5393a
SR
5919 /* may grab non-irq protected spin_locks */
5920 BUG_ON(in_interrupt());
1da177e4
LT
5921recheck:
5922 /* double check policy once rq lock held */
5923 if (policy < 0)
5924 policy = oldpolicy = p->policy;
5925 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
dd41f596
IM
5926 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5927 policy != SCHED_IDLE)
b0a9499c 5928 return -EINVAL;
1da177e4
LT
5929 /*
5930 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
5931 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5932 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
5933 */
5934 if (param->sched_priority < 0 ||
95cdf3b7 5935 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 5936 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 5937 return -EINVAL;
e05606d3 5938 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
5939 return -EINVAL;
5940
37e4ab3f
OC
5941 /*
5942 * Allow unprivileged RT tasks to decrease priority:
5943 */
961ccddd 5944 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 5945 if (rt_policy(policy)) {
8dc3e909 5946 unsigned long rlim_rtprio;
8dc3e909
ON
5947
5948 if (!lock_task_sighand(p, &flags))
5949 return -ESRCH;
5950 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
5951 unlock_task_sighand(p, &flags);
5952
5953 /* can't set/change the rt policy */
5954 if (policy != p->policy && !rlim_rtprio)
5955 return -EPERM;
5956
5957 /* can't increase priority */
5958 if (param->sched_priority > p->rt_priority &&
5959 param->sched_priority > rlim_rtprio)
5960 return -EPERM;
5961 }
dd41f596
IM
5962 /*
5963 * Like positive nice levels, dont allow tasks to
5964 * move out of SCHED_IDLE either:
5965 */
5966 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
5967 return -EPERM;
5fe1d75f 5968
37e4ab3f 5969 /* can't change other user's priorities */
c69e8d9c 5970 if (!check_same_owner(p))
37e4ab3f
OC
5971 return -EPERM;
5972 }
1da177e4 5973
725aad24 5974 if (user) {
b68aa230 5975#ifdef CONFIG_RT_GROUP_SCHED
725aad24
JF
5976 /*
5977 * Do not allow realtime tasks into groups that have no runtime
5978 * assigned.
5979 */
9a7e0b18
PZ
5980 if (rt_bandwidth_enabled() && rt_policy(policy) &&
5981 task_group(p)->rt_bandwidth.rt_runtime == 0)
725aad24 5982 return -EPERM;
b68aa230
PZ
5983#endif
5984
725aad24
JF
5985 retval = security_task_setscheduler(p, policy, param);
5986 if (retval)
5987 return retval;
5988 }
5989
b29739f9
IM
5990 /*
5991 * make sure no PI-waiters arrive (or leave) while we are
5992 * changing the priority of the task:
5993 */
5994 spin_lock_irqsave(&p->pi_lock, flags);
1da177e4
LT
5995 /*
5996 * To be able to change p->policy safely, the apropriate
5997 * runqueue lock must be held.
5998 */
b29739f9 5999 rq = __task_rq_lock(p);
1da177e4
LT
6000 /* recheck policy now with rq lock held */
6001 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6002 policy = oldpolicy = -1;
b29739f9
IM
6003 __task_rq_unlock(rq);
6004 spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
6005 goto recheck;
6006 }
2daa3577 6007 update_rq_clock(rq);
dd41f596 6008 on_rq = p->se.on_rq;
051a1d1a 6009 running = task_current(rq, p);
0e1f3483 6010 if (on_rq)
2e1cb74a 6011 deactivate_task(rq, p, 0);
0e1f3483
HS
6012 if (running)
6013 p->sched_class->put_prev_task(rq, p);
f6b53205 6014
1da177e4 6015 oldprio = p->prio;
dd41f596 6016 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 6017
0e1f3483
HS
6018 if (running)
6019 p->sched_class->set_curr_task(rq);
dd41f596
IM
6020 if (on_rq) {
6021 activate_task(rq, p, 0);
cb469845
SR
6022
6023 check_class_changed(rq, p, prev_class, oldprio, running);
1da177e4 6024 }
b29739f9
IM
6025 __task_rq_unlock(rq);
6026 spin_unlock_irqrestore(&p->pi_lock, flags);
6027
95e02ca9
TG
6028 rt_mutex_adjust_pi(p);
6029
1da177e4
LT
6030 return 0;
6031}
961ccddd
RR
6032
6033/**
6034 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
6035 * @p: the task in question.
6036 * @policy: new policy.
6037 * @param: structure containing the new RT priority.
6038 *
6039 * NOTE that the task may be already dead.
6040 */
6041int sched_setscheduler(struct task_struct *p, int policy,
6042 struct sched_param *param)
6043{
6044 return __sched_setscheduler(p, policy, param, true);
6045}
1da177e4
LT
6046EXPORT_SYMBOL_GPL(sched_setscheduler);
6047
961ccddd
RR
6048/**
6049 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
6050 * @p: the task in question.
6051 * @policy: new policy.
6052 * @param: structure containing the new RT priority.
6053 *
6054 * Just like sched_setscheduler, only don't bother checking if the
6055 * current context has permission. For example, this is needed in
6056 * stop_machine(): we create temporary high priority worker threads,
6057 * but our caller might not have that capability.
6058 */
6059int sched_setscheduler_nocheck(struct task_struct *p, int policy,
6060 struct sched_param *param)
6061{
6062 return __sched_setscheduler(p, policy, param, false);
6063}
6064
95cdf3b7
IM
6065static int
6066do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 6067{
1da177e4
LT
6068 struct sched_param lparam;
6069 struct task_struct *p;
36c8b586 6070 int retval;
1da177e4
LT
6071
6072 if (!param || pid < 0)
6073 return -EINVAL;
6074 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
6075 return -EFAULT;
5fe1d75f
ON
6076
6077 rcu_read_lock();
6078 retval = -ESRCH;
1da177e4 6079 p = find_process_by_pid(pid);
5fe1d75f
ON
6080 if (p != NULL)
6081 retval = sched_setscheduler(p, policy, &lparam);
6082 rcu_read_unlock();
36c8b586 6083
1da177e4
LT
6084 return retval;
6085}
6086
6087/**
6088 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
6089 * @pid: the pid in question.
6090 * @policy: new policy.
6091 * @param: structure containing the new RT priority.
6092 */
5add95d4
HC
6093SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
6094 struct sched_param __user *, param)
1da177e4 6095{
c21761f1
JB
6096 /* negative values for policy are not valid */
6097 if (policy < 0)
6098 return -EINVAL;
6099
1da177e4
LT
6100 return do_sched_setscheduler(pid, policy, param);
6101}
6102
6103/**
6104 * sys_sched_setparam - set/change the RT priority of a thread
6105 * @pid: the pid in question.
6106 * @param: structure containing the new RT priority.
6107 */
5add95d4 6108SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
6109{
6110 return do_sched_setscheduler(pid, -1, param);
6111}
6112
6113/**
6114 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
6115 * @pid: the pid in question.
6116 */
5add95d4 6117SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 6118{
36c8b586 6119 struct task_struct *p;
3a5c359a 6120 int retval;
1da177e4
LT
6121
6122 if (pid < 0)
3a5c359a 6123 return -EINVAL;
1da177e4
LT
6124
6125 retval = -ESRCH;
6126 read_lock(&tasklist_lock);
6127 p = find_process_by_pid(pid);
6128 if (p) {
6129 retval = security_task_getscheduler(p);
6130 if (!retval)
6131 retval = p->policy;
6132 }
6133 read_unlock(&tasklist_lock);
1da177e4
LT
6134 return retval;
6135}
6136
6137/**
6138 * sys_sched_getscheduler - get the RT priority of a thread
6139 * @pid: the pid in question.
6140 * @param: structure containing the RT priority.
6141 */
5add95d4 6142SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
6143{
6144 struct sched_param lp;
36c8b586 6145 struct task_struct *p;
3a5c359a 6146 int retval;
1da177e4
LT
6147
6148 if (!param || pid < 0)
3a5c359a 6149 return -EINVAL;
1da177e4
LT
6150
6151 read_lock(&tasklist_lock);
6152 p = find_process_by_pid(pid);
6153 retval = -ESRCH;
6154 if (!p)
6155 goto out_unlock;
6156
6157 retval = security_task_getscheduler(p);
6158 if (retval)
6159 goto out_unlock;
6160
6161 lp.sched_priority = p->rt_priority;
6162 read_unlock(&tasklist_lock);
6163
6164 /*
6165 * This one might sleep, we cannot do it with a spinlock held ...
6166 */
6167 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
6168
1da177e4
LT
6169 return retval;
6170
6171out_unlock:
6172 read_unlock(&tasklist_lock);
6173 return retval;
6174}
6175
96f874e2 6176long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 6177{
5a16f3d3 6178 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
6179 struct task_struct *p;
6180 int retval;
1da177e4 6181
95402b38 6182 get_online_cpus();
1da177e4
LT
6183 read_lock(&tasklist_lock);
6184
6185 p = find_process_by_pid(pid);
6186 if (!p) {
6187 read_unlock(&tasklist_lock);
95402b38 6188 put_online_cpus();
1da177e4
LT
6189 return -ESRCH;
6190 }
6191
6192 /*
6193 * It is not safe to call set_cpus_allowed with the
41a2d6cf 6194 * tasklist_lock held. We will bump the task_struct's
1da177e4
LT
6195 * usage count and then drop tasklist_lock.
6196 */
6197 get_task_struct(p);
6198 read_unlock(&tasklist_lock);
6199
5a16f3d3
RR
6200 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6201 retval = -ENOMEM;
6202 goto out_put_task;
6203 }
6204 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
6205 retval = -ENOMEM;
6206 goto out_free_cpus_allowed;
6207 }
1da177e4 6208 retval = -EPERM;
c69e8d9c 6209 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
1da177e4
LT
6210 goto out_unlock;
6211
e7834f8f
DQ
6212 retval = security_task_setscheduler(p, 0, NULL);
6213 if (retval)
6214 goto out_unlock;
6215
5a16f3d3
RR
6216 cpuset_cpus_allowed(p, cpus_allowed);
6217 cpumask_and(new_mask, in_mask, cpus_allowed);
8707d8b8 6218 again:
5a16f3d3 6219 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 6220
8707d8b8 6221 if (!retval) {
5a16f3d3
RR
6222 cpuset_cpus_allowed(p, cpus_allowed);
6223 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
6224 /*
6225 * We must have raced with a concurrent cpuset
6226 * update. Just reset the cpus_allowed to the
6227 * cpuset's cpus_allowed
6228 */
5a16f3d3 6229 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
6230 goto again;
6231 }
6232 }
1da177e4 6233out_unlock:
5a16f3d3
RR
6234 free_cpumask_var(new_mask);
6235out_free_cpus_allowed:
6236 free_cpumask_var(cpus_allowed);
6237out_put_task:
1da177e4 6238 put_task_struct(p);
95402b38 6239 put_online_cpus();
1da177e4
LT
6240 return retval;
6241}
6242
6243static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 6244 struct cpumask *new_mask)
1da177e4 6245{
96f874e2
RR
6246 if (len < cpumask_size())
6247 cpumask_clear(new_mask);
6248 else if (len > cpumask_size())
6249 len = cpumask_size();
6250
1da177e4
LT
6251 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6252}
6253
6254/**
6255 * sys_sched_setaffinity - set the cpu affinity of a process
6256 * @pid: pid of the process
6257 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6258 * @user_mask_ptr: user-space pointer to the new cpu mask
6259 */
5add95d4
HC
6260SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6261 unsigned long __user *, user_mask_ptr)
1da177e4 6262{
5a16f3d3 6263 cpumask_var_t new_mask;
1da177e4
LT
6264 int retval;
6265
5a16f3d3
RR
6266 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6267 return -ENOMEM;
1da177e4 6268
5a16f3d3
RR
6269 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6270 if (retval == 0)
6271 retval = sched_setaffinity(pid, new_mask);
6272 free_cpumask_var(new_mask);
6273 return retval;
1da177e4
LT
6274}
6275
96f874e2 6276long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 6277{
36c8b586 6278 struct task_struct *p;
1da177e4 6279 int retval;
1da177e4 6280
95402b38 6281 get_online_cpus();
1da177e4
LT
6282 read_lock(&tasklist_lock);
6283
6284 retval = -ESRCH;
6285 p = find_process_by_pid(pid);
6286 if (!p)
6287 goto out_unlock;
6288
e7834f8f
DQ
6289 retval = security_task_getscheduler(p);
6290 if (retval)
6291 goto out_unlock;
6292
96f874e2 6293 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
1da177e4
LT
6294
6295out_unlock:
6296 read_unlock(&tasklist_lock);
95402b38 6297 put_online_cpus();
1da177e4 6298
9531b62f 6299 return retval;
1da177e4
LT
6300}
6301
6302/**
6303 * sys_sched_getaffinity - get the cpu affinity of a process
6304 * @pid: pid of the process
6305 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6306 * @user_mask_ptr: user-space pointer to hold the current cpu mask
6307 */
5add95d4
HC
6308SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6309 unsigned long __user *, user_mask_ptr)
1da177e4
LT
6310{
6311 int ret;
f17c8607 6312 cpumask_var_t mask;
1da177e4 6313
f17c8607 6314 if (len < cpumask_size())
1da177e4
LT
6315 return -EINVAL;
6316
f17c8607
RR
6317 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6318 return -ENOMEM;
1da177e4 6319
f17c8607
RR
6320 ret = sched_getaffinity(pid, mask);
6321 if (ret == 0) {
6322 if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
6323 ret = -EFAULT;
6324 else
6325 ret = cpumask_size();
6326 }
6327 free_cpumask_var(mask);
1da177e4 6328
f17c8607 6329 return ret;
1da177e4
LT
6330}
6331
6332/**
6333 * sys_sched_yield - yield the current processor to other threads.
6334 *
dd41f596
IM
6335 * This function yields the current CPU to other tasks. If there are no
6336 * other threads running on this CPU then this function will return.
1da177e4 6337 */
5add95d4 6338SYSCALL_DEFINE0(sched_yield)
1da177e4 6339{
70b97a7f 6340 struct rq *rq = this_rq_lock();
1da177e4 6341
2d72376b 6342 schedstat_inc(rq, yld_count);
4530d7ab 6343 current->sched_class->yield_task(rq);
1da177e4
LT
6344
6345 /*
6346 * Since we are going to call schedule() anyway, there's
6347 * no need to preempt or enable interrupts:
6348 */
6349 __release(rq->lock);
8a25d5de 6350 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1da177e4
LT
6351 _raw_spin_unlock(&rq->lock);
6352 preempt_enable_no_resched();
6353
6354 schedule();
6355
6356 return 0;
6357}
6358
e7b38404 6359static void __cond_resched(void)
1da177e4 6360{
8e0a43d8
IM
6361#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
6362 __might_sleep(__FILE__, __LINE__);
6363#endif
5bbcfd90
IM
6364 /*
6365 * The BKS might be reacquired before we have dropped
6366 * PREEMPT_ACTIVE, which could trigger a second
6367 * cond_resched() call.
6368 */
1da177e4
LT
6369 do {
6370 add_preempt_count(PREEMPT_ACTIVE);
6371 schedule();
6372 sub_preempt_count(PREEMPT_ACTIVE);
6373 } while (need_resched());
6374}
6375
02b67cc3 6376int __sched _cond_resched(void)
1da177e4 6377{
9414232f
IM
6378 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
6379 system_state == SYSTEM_RUNNING) {
1da177e4
LT
6380 __cond_resched();
6381 return 1;
6382 }
6383 return 0;
6384}
02b67cc3 6385EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
6386
6387/*
6388 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
6389 * call schedule, and on return reacquire the lock.
6390 *
41a2d6cf 6391 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
6392 * operations here to prevent schedule() from being called twice (once via
6393 * spin_unlock(), once by hand).
6394 */
95cdf3b7 6395int cond_resched_lock(spinlock_t *lock)
1da177e4 6396{
95c354fe 6397 int resched = need_resched() && system_state == SYSTEM_RUNNING;
6df3cecb
JK
6398 int ret = 0;
6399
95c354fe 6400 if (spin_needbreak(lock) || resched) {
1da177e4 6401 spin_unlock(lock);
95c354fe
NP
6402 if (resched && need_resched())
6403 __cond_resched();
6404 else
6405 cpu_relax();
6df3cecb 6406 ret = 1;
1da177e4 6407 spin_lock(lock);
1da177e4 6408 }
6df3cecb 6409 return ret;
1da177e4 6410}
1da177e4
LT
6411EXPORT_SYMBOL(cond_resched_lock);
6412
6413int __sched cond_resched_softirq(void)
6414{
6415 BUG_ON(!in_softirq());
6416
9414232f 6417 if (need_resched() && system_state == SYSTEM_RUNNING) {
98d82567 6418 local_bh_enable();
1da177e4
LT
6419 __cond_resched();
6420 local_bh_disable();
6421 return 1;
6422 }
6423 return 0;
6424}
1da177e4
LT
6425EXPORT_SYMBOL(cond_resched_softirq);
6426
1da177e4
LT
6427/**
6428 * yield - yield the current processor to other threads.
6429 *
72fd4a35 6430 * This is a shortcut for kernel-space yielding - it marks the
1da177e4
LT
6431 * thread runnable and calls sys_sched_yield().
6432 */
6433void __sched yield(void)
6434{
6435 set_current_state(TASK_RUNNING);
6436 sys_sched_yield();
6437}
1da177e4
LT
6438EXPORT_SYMBOL(yield);
6439
6440/*
41a2d6cf 6441 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4
LT
6442 * that process accounting knows that this is a task in IO wait state.
6443 *
6444 * But don't do that if it is a deliberate, throttling IO wait (this task
6445 * has set its backing_dev_info: the queue against which it should throttle)
6446 */
6447void __sched io_schedule(void)
6448{
70b97a7f 6449 struct rq *rq = &__raw_get_cpu_var(runqueues);
1da177e4 6450
0ff92245 6451 delayacct_blkio_start();
1da177e4
LT
6452 atomic_inc(&rq->nr_iowait);
6453 schedule();
6454 atomic_dec(&rq->nr_iowait);
0ff92245 6455 delayacct_blkio_end();
1da177e4 6456}
1da177e4
LT
6457EXPORT_SYMBOL(io_schedule);
6458
6459long __sched io_schedule_timeout(long timeout)
6460{
70b97a7f 6461 struct rq *rq = &__raw_get_cpu_var(runqueues);
1da177e4
LT
6462 long ret;
6463
0ff92245 6464 delayacct_blkio_start();
1da177e4
LT
6465 atomic_inc(&rq->nr_iowait);
6466 ret = schedule_timeout(timeout);
6467 atomic_dec(&rq->nr_iowait);
0ff92245 6468 delayacct_blkio_end();
1da177e4
LT
6469 return ret;
6470}
6471
6472/**
6473 * sys_sched_get_priority_max - return maximum RT priority.
6474 * @policy: scheduling class.
6475 *
6476 * this syscall returns the maximum rt_priority that can be used
6477 * by a given scheduling class.
6478 */
5add95d4 6479SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
6480{
6481 int ret = -EINVAL;
6482
6483 switch (policy) {
6484 case SCHED_FIFO:
6485 case SCHED_RR:
6486 ret = MAX_USER_RT_PRIO-1;
6487 break;
6488 case SCHED_NORMAL:
b0a9499c 6489 case SCHED_BATCH:
dd41f596 6490 case SCHED_IDLE:
1da177e4
LT
6491 ret = 0;
6492 break;
6493 }
6494 return ret;
6495}
6496
6497/**
6498 * sys_sched_get_priority_min - return minimum RT priority.
6499 * @policy: scheduling class.
6500 *
6501 * this syscall returns the minimum rt_priority that can be used
6502 * by a given scheduling class.
6503 */
5add95d4 6504SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
6505{
6506 int ret = -EINVAL;
6507
6508 switch (policy) {
6509 case SCHED_FIFO:
6510 case SCHED_RR:
6511 ret = 1;
6512 break;
6513 case SCHED_NORMAL:
b0a9499c 6514 case SCHED_BATCH:
dd41f596 6515 case SCHED_IDLE:
1da177e4
LT
6516 ret = 0;
6517 }
6518 return ret;
6519}
6520
6521/**
6522 * sys_sched_rr_get_interval - return the default timeslice of a process.
6523 * @pid: pid of the process.
6524 * @interval: userspace pointer to the timeslice value.
6525 *
6526 * this syscall writes the default timeslice value of a given process
6527 * into the user-space timespec buffer. A value of '0' means infinity.
6528 */
17da2bd9 6529SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 6530 struct timespec __user *, interval)
1da177e4 6531{
36c8b586 6532 struct task_struct *p;
a4ec24b4 6533 unsigned int time_slice;
3a5c359a 6534 int retval;
1da177e4 6535 struct timespec t;
1da177e4
LT
6536
6537 if (pid < 0)
3a5c359a 6538 return -EINVAL;
1da177e4
LT
6539
6540 retval = -ESRCH;
6541 read_lock(&tasklist_lock);
6542 p = find_process_by_pid(pid);
6543 if (!p)
6544 goto out_unlock;
6545
6546 retval = security_task_getscheduler(p);
6547 if (retval)
6548 goto out_unlock;
6549
77034937
IM
6550 /*
6551 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
6552 * tasks that are on an otherwise idle runqueue:
6553 */
6554 time_slice = 0;
6555 if (p->policy == SCHED_RR) {
a4ec24b4 6556 time_slice = DEF_TIMESLICE;
1868f958 6557 } else if (p->policy != SCHED_FIFO) {
a4ec24b4
DA
6558 struct sched_entity *se = &p->se;
6559 unsigned long flags;
6560 struct rq *rq;
6561
6562 rq = task_rq_lock(p, &flags);
77034937
IM
6563 if (rq->cfs.load.weight)
6564 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
a4ec24b4
DA
6565 task_rq_unlock(rq, &flags);
6566 }
1da177e4 6567 read_unlock(&tasklist_lock);
a4ec24b4 6568 jiffies_to_timespec(time_slice, &t);
1da177e4 6569 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 6570 return retval;
3a5c359a 6571
1da177e4
LT
6572out_unlock:
6573 read_unlock(&tasklist_lock);
6574 return retval;
6575}
6576
7c731e0a 6577static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 6578
82a1fcb9 6579void sched_show_task(struct task_struct *p)
1da177e4 6580{
1da177e4 6581 unsigned long free = 0;
36c8b586 6582 unsigned state;
1da177e4 6583
1da177e4 6584 state = p->state ? __ffs(p->state) + 1 : 0;
cc4ea795 6585 printk(KERN_INFO "%-13.13s %c", p->comm,
2ed6e34f 6586 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 6587#if BITS_PER_LONG == 32
1da177e4 6588 if (state == TASK_RUNNING)
cc4ea795 6589 printk(KERN_CONT " running ");
1da177e4 6590 else
cc4ea795 6591 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
6592#else
6593 if (state == TASK_RUNNING)
cc4ea795 6594 printk(KERN_CONT " running task ");
1da177e4 6595 else
cc4ea795 6596 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
6597#endif
6598#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 6599 free = stack_not_used(p);
1da177e4 6600#endif
ba25f9dc 6601 printk(KERN_CONT "%5lu %5d %6d\n", free,
fcfd50af 6602 task_pid_nr(p), task_pid_nr(p->real_parent));
1da177e4 6603
5fb5e6de 6604 show_stack(p, NULL);
1da177e4
LT
6605}
6606
e59e2ae2 6607void show_state_filter(unsigned long state_filter)
1da177e4 6608{
36c8b586 6609 struct task_struct *g, *p;
1da177e4 6610
4bd77321
IM
6611#if BITS_PER_LONG == 32
6612 printk(KERN_INFO
6613 " task PC stack pid father\n");
1da177e4 6614#else
4bd77321
IM
6615 printk(KERN_INFO
6616 " task PC stack pid father\n");
1da177e4
LT
6617#endif
6618 read_lock(&tasklist_lock);
6619 do_each_thread(g, p) {
6620 /*
6621 * reset the NMI-timeout, listing all files on a slow
6622 * console might take alot of time:
6623 */
6624 touch_nmi_watchdog();
39bc89fd 6625 if (!state_filter || (p->state & state_filter))
82a1fcb9 6626 sched_show_task(p);
1da177e4
LT
6627 } while_each_thread(g, p);
6628
04c9167f
JF
6629 touch_all_softlockup_watchdogs();
6630
dd41f596
IM
6631#ifdef CONFIG_SCHED_DEBUG
6632 sysrq_sched_debug_show();
6633#endif
1da177e4 6634 read_unlock(&tasklist_lock);
e59e2ae2
IM
6635 /*
6636 * Only show locks if all tasks are dumped:
6637 */
6638 if (state_filter == -1)
6639 debug_show_all_locks();
1da177e4
LT
6640}
6641
1df21055
IM
6642void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6643{
dd41f596 6644 idle->sched_class = &idle_sched_class;
1df21055
IM
6645}
6646
f340c0d1
IM
6647/**
6648 * init_idle - set up an idle thread for a given CPU
6649 * @idle: task in question
6650 * @cpu: cpu the idle task belongs to
6651 *
6652 * NOTE: this function does not set the idle thread's NEED_RESCHED
6653 * flag, to make booting more robust.
6654 */
5c1e1767 6655void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 6656{
70b97a7f 6657 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
6658 unsigned long flags;
6659
5cbd54ef
IM
6660 spin_lock_irqsave(&rq->lock, flags);
6661
dd41f596
IM
6662 __sched_fork(idle);
6663 idle->se.exec_start = sched_clock();
6664
b29739f9 6665 idle->prio = idle->normal_prio = MAX_PRIO;
96f874e2 6666 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
dd41f596 6667 __set_task_cpu(idle, cpu);
1da177e4 6668
1da177e4 6669 rq->curr = rq->idle = idle;
4866cde0
NP
6670#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6671 idle->oncpu = 1;
6672#endif
1da177e4
LT
6673 spin_unlock_irqrestore(&rq->lock, flags);
6674
6675 /* Set the preempt count _outside_ the spinlocks! */
8e3e076c
LT
6676#if defined(CONFIG_PREEMPT)
6677 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
6678#else
a1261f54 6679 task_thread_info(idle)->preempt_count = 0;
8e3e076c 6680#endif
dd41f596
IM
6681 /*
6682 * The idle tasks have their own, simple scheduling class:
6683 */
6684 idle->sched_class = &idle_sched_class;
fb52607a 6685 ftrace_graph_init_task(idle);
1da177e4
LT
6686}
6687
6688/*
6689 * In a system that switches off the HZ timer nohz_cpu_mask
6690 * indicates which cpus entered this state. This is used
6691 * in the rcu update to wait only for active cpus. For system
6692 * which do not switch off the HZ timer nohz_cpu_mask should
6a7b3dc3 6693 * always be CPU_BITS_NONE.
1da177e4 6694 */
6a7b3dc3 6695cpumask_var_t nohz_cpu_mask;
1da177e4 6696
19978ca6
IM
6697/*
6698 * Increase the granularity value when there are more CPUs,
6699 * because with more CPUs the 'effective latency' as visible
6700 * to users decreases. But the relationship is not linear,
6701 * so pick a second-best guess by going with the log2 of the
6702 * number of CPUs.
6703 *
6704 * This idea comes from the SD scheduler of Con Kolivas:
6705 */
6706static inline void sched_init_granularity(void)
6707{
6708 unsigned int factor = 1 + ilog2(num_online_cpus());
6709 const unsigned long limit = 200000000;
6710
6711 sysctl_sched_min_granularity *= factor;
6712 if (sysctl_sched_min_granularity > limit)
6713 sysctl_sched_min_granularity = limit;
6714
6715 sysctl_sched_latency *= factor;
6716 if (sysctl_sched_latency > limit)
6717 sysctl_sched_latency = limit;
6718
6719 sysctl_sched_wakeup_granularity *= factor;
55cd5340
PZ
6720
6721 sysctl_sched_shares_ratelimit *= factor;
19978ca6
IM
6722}
6723
1da177e4
LT
6724#ifdef CONFIG_SMP
6725/*
6726 * This is how migration works:
6727 *
70b97a7f 6728 * 1) we queue a struct migration_req structure in the source CPU's
1da177e4
LT
6729 * runqueue and wake up that CPU's migration thread.
6730 * 2) we down() the locked semaphore => thread blocks.
6731 * 3) migration thread wakes up (implicitly it forces the migrated
6732 * thread off the CPU)
6733 * 4) it gets the migration request and checks whether the migrated
6734 * task is still in the wrong runqueue.
6735 * 5) if it's in the wrong runqueue then the migration thread removes
6736 * it and puts it into the right queue.
6737 * 6) migration thread up()s the semaphore.
6738 * 7) we wake up and the migration is done.
6739 */
6740
6741/*
6742 * Change a given task's CPU affinity. Migrate the thread to a
6743 * proper CPU and schedule it away if the CPU it's executing on
6744 * is removed from the allowed bitmask.
6745 *
6746 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 6747 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
6748 * call is not atomic; no spinlocks may be held.
6749 */
96f874e2 6750int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4 6751{
70b97a7f 6752 struct migration_req req;
1da177e4 6753 unsigned long flags;
70b97a7f 6754 struct rq *rq;
48f24c4d 6755 int ret = 0;
1da177e4
LT
6756
6757 rq = task_rq_lock(p, &flags);
96f874e2 6758 if (!cpumask_intersects(new_mask, cpu_online_mask)) {
1da177e4
LT
6759 ret = -EINVAL;
6760 goto out;
6761 }
6762
9985b0ba 6763 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
96f874e2 6764 !cpumask_equal(&p->cpus_allowed, new_mask))) {
9985b0ba
DR
6765 ret = -EINVAL;
6766 goto out;
6767 }
6768
73fe6aae 6769 if (p->sched_class->set_cpus_allowed)
cd8ba7cd 6770 p->sched_class->set_cpus_allowed(p, new_mask);
73fe6aae 6771 else {
96f874e2
RR
6772 cpumask_copy(&p->cpus_allowed, new_mask);
6773 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
73fe6aae
GH
6774 }
6775
1da177e4 6776 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 6777 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
6778 goto out;
6779
1e5ce4f4 6780 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
1da177e4
LT
6781 /* Need help from migration thread: drop lock and wait. */
6782 task_rq_unlock(rq, &flags);
6783 wake_up_process(rq->migration_thread);
6784 wait_for_completion(&req.done);
6785 tlb_migrate_finish(p->mm);
6786 return 0;
6787 }
6788out:
6789 task_rq_unlock(rq, &flags);
48f24c4d 6790
1da177e4
LT
6791 return ret;
6792}
cd8ba7cd 6793EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
6794
6795/*
41a2d6cf 6796 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
6797 * this because either it can't run here any more (set_cpus_allowed()
6798 * away from this CPU, or CPU going down), or because we're
6799 * attempting to rebalance this task on exec (sched_exec).
6800 *
6801 * So we race with normal scheduler movements, but that's OK, as long
6802 * as the task is no longer on this CPU.
efc30814
KK
6803 *
6804 * Returns non-zero if task was successfully migrated.
1da177e4 6805 */
efc30814 6806static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 6807{
70b97a7f 6808 struct rq *rq_dest, *rq_src;
dd41f596 6809 int ret = 0, on_rq;
1da177e4 6810
e761b772 6811 if (unlikely(!cpu_active(dest_cpu)))
efc30814 6812 return ret;
1da177e4
LT
6813
6814 rq_src = cpu_rq(src_cpu);
6815 rq_dest = cpu_rq(dest_cpu);
6816
6817 double_rq_lock(rq_src, rq_dest);
6818 /* Already moved. */
6819 if (task_cpu(p) != src_cpu)
b1e38734 6820 goto done;
1da177e4 6821 /* Affinity changed (again). */
96f874e2 6822 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
b1e38734 6823 goto fail;
1da177e4 6824
dd41f596 6825 on_rq = p->se.on_rq;
6e82a3be 6826 if (on_rq)
2e1cb74a 6827 deactivate_task(rq_src, p, 0);
6e82a3be 6828
1da177e4 6829 set_task_cpu(p, dest_cpu);
dd41f596
IM
6830 if (on_rq) {
6831 activate_task(rq_dest, p, 0);
15afe09b 6832 check_preempt_curr(rq_dest, p, 0);
1da177e4 6833 }
b1e38734 6834done:
efc30814 6835 ret = 1;
b1e38734 6836fail:
1da177e4 6837 double_rq_unlock(rq_src, rq_dest);
efc30814 6838 return ret;
1da177e4
LT
6839}
6840
6841/*
6842 * migration_thread - this is a highprio system thread that performs
6843 * thread migration by bumping thread off CPU then 'pushing' onto
6844 * another runqueue.
6845 */
95cdf3b7 6846static int migration_thread(void *data)
1da177e4 6847{
1da177e4 6848 int cpu = (long)data;
70b97a7f 6849 struct rq *rq;
1da177e4
LT
6850
6851 rq = cpu_rq(cpu);
6852 BUG_ON(rq->migration_thread != current);
6853
6854 set_current_state(TASK_INTERRUPTIBLE);
6855 while (!kthread_should_stop()) {
70b97a7f 6856 struct migration_req *req;
1da177e4 6857 struct list_head *head;
1da177e4 6858
1da177e4
LT
6859 spin_lock_irq(&rq->lock);
6860
6861 if (cpu_is_offline(cpu)) {
6862 spin_unlock_irq(&rq->lock);
6863 goto wait_to_die;
6864 }
6865
6866 if (rq->active_balance) {
6867 active_load_balance(rq, cpu);
6868 rq->active_balance = 0;
6869 }
6870
6871 head = &rq->migration_queue;
6872
6873 if (list_empty(head)) {
6874 spin_unlock_irq(&rq->lock);
6875 schedule();
6876 set_current_state(TASK_INTERRUPTIBLE);
6877 continue;
6878 }
70b97a7f 6879 req = list_entry(head->next, struct migration_req, list);
1da177e4
LT
6880 list_del_init(head->next);
6881
674311d5
NP
6882 spin_unlock(&rq->lock);
6883 __migrate_task(req->task, cpu, req->dest_cpu);
6884 local_irq_enable();
1da177e4
LT
6885
6886 complete(&req->done);
6887 }
6888 __set_current_state(TASK_RUNNING);
6889 return 0;
6890
6891wait_to_die:
6892 /* Wait for kthread_stop */
6893 set_current_state(TASK_INTERRUPTIBLE);
6894 while (!kthread_should_stop()) {
6895 schedule();
6896 set_current_state(TASK_INTERRUPTIBLE);
6897 }
6898 __set_current_state(TASK_RUNNING);
6899 return 0;
6900}
6901
6902#ifdef CONFIG_HOTPLUG_CPU
f7b4cddc
ON
6903
6904static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6905{
6906 int ret;
6907
6908 local_irq_disable();
6909 ret = __migrate_task(p, src_cpu, dest_cpu);
6910 local_irq_enable();
6911 return ret;
6912}
6913
054b9108 6914/*
3a4fa0a2 6915 * Figure out where task on dead CPU should go, use force if necessary.
054b9108 6916 */
48f24c4d 6917static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
1da177e4 6918{
70b97a7f 6919 int dest_cpu;
6ca09dfc 6920 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
e76bd8d9
RR
6921
6922again:
6923 /* Look for allowed, online CPU in same node. */
6924 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
6925 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6926 goto move;
6927
6928 /* Any allowed, online CPU? */
6929 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
6930 if (dest_cpu < nr_cpu_ids)
6931 goto move;
6932
6933 /* No more Mr. Nice Guy. */
6934 if (dest_cpu >= nr_cpu_ids) {
e76bd8d9
RR
6935 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
6936 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
1da177e4 6937
e76bd8d9
RR
6938 /*
6939 * Don't tell them about moving exiting tasks or
6940 * kernel threads (both mm NULL), since they never
6941 * leave kernel.
6942 */
6943 if (p->mm && printk_ratelimit()) {
6944 printk(KERN_INFO "process %d (%s) no "
6945 "longer affine to cpu%d\n",
6946 task_pid_nr(p), p->comm, dead_cpu);
3a5c359a 6947 }
e76bd8d9
RR
6948 }
6949
6950move:
6951 /* It can have affinity changed while we were choosing. */
6952 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
6953 goto again;
1da177e4
LT
6954}
6955
6956/*
6957 * While a dead CPU has no uninterruptible tasks queued at this point,
6958 * it might still have a nonzero ->nr_uninterruptible counter, because
6959 * for performance reasons the counter is not stricly tracking tasks to
6960 * their home CPUs. So we just add the counter to another CPU's counter,
6961 * to keep the global sum constant after CPU-down:
6962 */
70b97a7f 6963static void migrate_nr_uninterruptible(struct rq *rq_src)
1da177e4 6964{
1e5ce4f4 6965 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
1da177e4
LT
6966 unsigned long flags;
6967
6968 local_irq_save(flags);
6969 double_rq_lock(rq_src, rq_dest);
6970 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6971 rq_src->nr_uninterruptible = 0;
6972 double_rq_unlock(rq_src, rq_dest);
6973 local_irq_restore(flags);
6974}
6975
6976/* Run through task list and migrate tasks from the dead cpu. */
6977static void migrate_live_tasks(int src_cpu)
6978{
48f24c4d 6979 struct task_struct *p, *t;
1da177e4 6980
f7b4cddc 6981 read_lock(&tasklist_lock);
1da177e4 6982
48f24c4d
IM
6983 do_each_thread(t, p) {
6984 if (p == current)
1da177e4
LT
6985 continue;
6986
48f24c4d
IM
6987 if (task_cpu(p) == src_cpu)
6988 move_task_off_dead_cpu(src_cpu, p);
6989 } while_each_thread(t, p);
1da177e4 6990
f7b4cddc 6991 read_unlock(&tasklist_lock);
1da177e4
LT
6992}
6993
dd41f596
IM
6994/*
6995 * Schedules idle task to be the next runnable task on current CPU.
94bc9a7b
DA
6996 * It does so by boosting its priority to highest possible.
6997 * Used by CPU offline code.
1da177e4
LT
6998 */
6999void sched_idle_next(void)
7000{
48f24c4d 7001 int this_cpu = smp_processor_id();
70b97a7f 7002 struct rq *rq = cpu_rq(this_cpu);
1da177e4
LT
7003 struct task_struct *p = rq->idle;
7004 unsigned long flags;
7005
7006 /* cpu has to be offline */
48f24c4d 7007 BUG_ON(cpu_online(this_cpu));
1da177e4 7008
48f24c4d
IM
7009 /*
7010 * Strictly not necessary since rest of the CPUs are stopped by now
7011 * and interrupts disabled on the current cpu.
1da177e4
LT
7012 */
7013 spin_lock_irqsave(&rq->lock, flags);
7014
dd41f596 7015 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
48f24c4d 7016
94bc9a7b
DA
7017 update_rq_clock(rq);
7018 activate_task(rq, p, 0);
1da177e4
LT
7019
7020 spin_unlock_irqrestore(&rq->lock, flags);
7021}
7022
48f24c4d
IM
7023/*
7024 * Ensures that the idle task is using init_mm right before its cpu goes
1da177e4
LT
7025 * offline.
7026 */
7027void idle_task_exit(void)
7028{
7029 struct mm_struct *mm = current->active_mm;
7030
7031 BUG_ON(cpu_online(smp_processor_id()));
7032
7033 if (mm != &init_mm)
7034 switch_mm(mm, &init_mm, current);
7035 mmdrop(mm);
7036}
7037
054b9108 7038/* called under rq->lock with disabled interrupts */
36c8b586 7039static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
1da177e4 7040{
70b97a7f 7041 struct rq *rq = cpu_rq(dead_cpu);
1da177e4
LT
7042
7043 /* Must be exiting, otherwise would be on tasklist. */
270f722d 7044 BUG_ON(!p->exit_state);
1da177e4
LT
7045
7046 /* Cannot have done final schedule yet: would have vanished. */
c394cc9f 7047 BUG_ON(p->state == TASK_DEAD);
1da177e4 7048
48f24c4d 7049 get_task_struct(p);
1da177e4
LT
7050
7051 /*
7052 * Drop lock around migration; if someone else moves it,
41a2d6cf 7053 * that's OK. No task can be added to this CPU, so iteration is
1da177e4
LT
7054 * fine.
7055 */
f7b4cddc 7056 spin_unlock_irq(&rq->lock);
48f24c4d 7057 move_task_off_dead_cpu(dead_cpu, p);
f7b4cddc 7058 spin_lock_irq(&rq->lock);
1da177e4 7059
48f24c4d 7060 put_task_struct(p);
1da177e4
LT
7061}
7062
7063/* release_task() removes task from tasklist, so we won't find dead tasks. */
7064static void migrate_dead_tasks(unsigned int dead_cpu)
7065{
70b97a7f 7066 struct rq *rq = cpu_rq(dead_cpu);
dd41f596 7067 struct task_struct *next;
48f24c4d 7068
dd41f596
IM
7069 for ( ; ; ) {
7070 if (!rq->nr_running)
7071 break;
a8e504d2 7072 update_rq_clock(rq);
b67802ea 7073 next = pick_next_task(rq);
dd41f596
IM
7074 if (!next)
7075 break;
79c53799 7076 next->sched_class->put_prev_task(rq, next);
dd41f596 7077 migrate_dead(dead_cpu, next);
e692ab53 7078
1da177e4
LT
7079 }
7080}
7081#endif /* CONFIG_HOTPLUG_CPU */
7082
e692ab53
NP
7083#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
7084
7085static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
7086 {
7087 .procname = "sched_domain",
c57baf1e 7088 .mode = 0555,
e0361851 7089 },
38605cae 7090 {0, },
e692ab53
NP
7091};
7092
7093static struct ctl_table sd_ctl_root[] = {
e0361851 7094 {
c57baf1e 7095 .ctl_name = CTL_KERN,
e0361851 7096 .procname = "kernel",
c57baf1e 7097 .mode = 0555,
e0361851
AD
7098 .child = sd_ctl_dir,
7099 },
38605cae 7100 {0, },
e692ab53
NP
7101};
7102
7103static struct ctl_table *sd_alloc_ctl_entry(int n)
7104{
7105 struct ctl_table *entry =
5cf9f062 7106 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 7107
e692ab53
NP
7108 return entry;
7109}
7110
6382bc90
MM
7111static void sd_free_ctl_entry(struct ctl_table **tablep)
7112{
cd790076 7113 struct ctl_table *entry;
6382bc90 7114
cd790076
MM
7115 /*
7116 * In the intermediate directories, both the child directory and
7117 * procname are dynamically allocated and could fail but the mode
41a2d6cf 7118 * will always be set. In the lowest directory the names are
cd790076
MM
7119 * static strings and all have proc handlers.
7120 */
7121 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
7122 if (entry->child)
7123 sd_free_ctl_entry(&entry->child);
cd790076
MM
7124 if (entry->proc_handler == NULL)
7125 kfree(entry->procname);
7126 }
6382bc90
MM
7127
7128 kfree(*tablep);
7129 *tablep = NULL;
7130}
7131
e692ab53 7132static void
e0361851 7133set_table_entry(struct ctl_table *entry,
e692ab53
NP
7134 const char *procname, void *data, int maxlen,
7135 mode_t mode, proc_handler *proc_handler)
7136{
e692ab53
NP
7137 entry->procname = procname;
7138 entry->data = data;
7139 entry->maxlen = maxlen;
7140 entry->mode = mode;
7141 entry->proc_handler = proc_handler;
7142}
7143
7144static struct ctl_table *
7145sd_alloc_ctl_domain_table(struct sched_domain *sd)
7146{
a5d8c348 7147 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 7148
ad1cdc1d
MM
7149 if (table == NULL)
7150 return NULL;
7151
e0361851 7152 set_table_entry(&table[0], "min_interval", &sd->min_interval,
e692ab53 7153 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 7154 set_table_entry(&table[1], "max_interval", &sd->max_interval,
e692ab53 7155 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 7156 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
e692ab53 7157 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7158 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
e692ab53 7159 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7160 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
e692ab53 7161 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7162 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
e692ab53 7163 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7164 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
e692ab53 7165 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7166 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
e692ab53 7167 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 7168 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
e692ab53 7169 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 7170 set_table_entry(&table[9], "cache_nice_tries",
e692ab53
NP
7171 &sd->cache_nice_tries,
7172 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 7173 set_table_entry(&table[10], "flags", &sd->flags,
e692ab53 7174 sizeof(int), 0644, proc_dointvec_minmax);
a5d8c348
IM
7175 set_table_entry(&table[11], "name", sd->name,
7176 CORENAME_MAX_SIZE, 0444, proc_dostring);
7177 /* &table[12] is terminator */
e692ab53
NP
7178
7179 return table;
7180}
7181
9a4e7159 7182static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
7183{
7184 struct ctl_table *entry, *table;
7185 struct sched_domain *sd;
7186 int domain_num = 0, i;
7187 char buf[32];
7188
7189 for_each_domain(cpu, sd)
7190 domain_num++;
7191 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
7192 if (table == NULL)
7193 return NULL;
e692ab53
NP
7194
7195 i = 0;
7196 for_each_domain(cpu, sd) {
7197 snprintf(buf, 32, "domain%d", i);
e692ab53 7198 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 7199 entry->mode = 0555;
e692ab53
NP
7200 entry->child = sd_alloc_ctl_domain_table(sd);
7201 entry++;
7202 i++;
7203 }
7204 return table;
7205}
7206
7207static struct ctl_table_header *sd_sysctl_header;
6382bc90 7208static void register_sched_domain_sysctl(void)
e692ab53
NP
7209{
7210 int i, cpu_num = num_online_cpus();
7211 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
7212 char buf[32];
7213
7378547f
MM
7214 WARN_ON(sd_ctl_dir[0].child);
7215 sd_ctl_dir[0].child = entry;
7216
ad1cdc1d
MM
7217 if (entry == NULL)
7218 return;
7219
97b6ea7b 7220 for_each_online_cpu(i) {
e692ab53 7221 snprintf(buf, 32, "cpu%d", i);
e692ab53 7222 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 7223 entry->mode = 0555;
e692ab53 7224 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 7225 entry++;
e692ab53 7226 }
7378547f
MM
7227
7228 WARN_ON(sd_sysctl_header);
e692ab53
NP
7229 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
7230}
6382bc90 7231
7378547f 7232/* may be called multiple times per register */
6382bc90
MM
7233static void unregister_sched_domain_sysctl(void)
7234{
7378547f
MM
7235 if (sd_sysctl_header)
7236 unregister_sysctl_table(sd_sysctl_header);
6382bc90 7237 sd_sysctl_header = NULL;
7378547f
MM
7238 if (sd_ctl_dir[0].child)
7239 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 7240}
e692ab53 7241#else
6382bc90
MM
7242static void register_sched_domain_sysctl(void)
7243{
7244}
7245static void unregister_sched_domain_sysctl(void)
e692ab53
NP
7246{
7247}
7248#endif
7249
1f11eb6a
GH
7250static void set_rq_online(struct rq *rq)
7251{
7252 if (!rq->online) {
7253 const struct sched_class *class;
7254
c6c4927b 7255 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
7256 rq->online = 1;
7257
7258 for_each_class(class) {
7259 if (class->rq_online)
7260 class->rq_online(rq);
7261 }
7262 }
7263}
7264
7265static void set_rq_offline(struct rq *rq)
7266{
7267 if (rq->online) {
7268 const struct sched_class *class;
7269
7270 for_each_class(class) {
7271 if (class->rq_offline)
7272 class->rq_offline(rq);
7273 }
7274
c6c4927b 7275 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
7276 rq->online = 0;
7277 }
7278}
7279
1da177e4
LT
7280/*
7281 * migration_call - callback that gets triggered when a CPU is added.
7282 * Here we can start up the necessary migration thread for the new CPU.
7283 */
48f24c4d
IM
7284static int __cpuinit
7285migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 7286{
1da177e4 7287 struct task_struct *p;
48f24c4d 7288 int cpu = (long)hcpu;
1da177e4 7289 unsigned long flags;
70b97a7f 7290 struct rq *rq;
1da177e4
LT
7291
7292 switch (action) {
5be9361c 7293
1da177e4 7294 case CPU_UP_PREPARE:
8bb78442 7295 case CPU_UP_PREPARE_FROZEN:
dd41f596 7296 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
1da177e4
LT
7297 if (IS_ERR(p))
7298 return NOTIFY_BAD;
1da177e4
LT
7299 kthread_bind(p, cpu);
7300 /* Must be high prio: stop_machine expects to yield to it. */
7301 rq = task_rq_lock(p, &flags);
dd41f596 7302 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
1da177e4
LT
7303 task_rq_unlock(rq, &flags);
7304 cpu_rq(cpu)->migration_thread = p;
7305 break;
48f24c4d 7306
1da177e4 7307 case CPU_ONLINE:
8bb78442 7308 case CPU_ONLINE_FROZEN:
3a4fa0a2 7309 /* Strictly unnecessary, as first user will wake it. */
1da177e4 7310 wake_up_process(cpu_rq(cpu)->migration_thread);
1f94ef59
GH
7311
7312 /* Update our root-domain */
7313 rq = cpu_rq(cpu);
7314 spin_lock_irqsave(&rq->lock, flags);
7315 if (rq->rd) {
c6c4927b 7316 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
7317
7318 set_rq_online(rq);
1f94ef59
GH
7319 }
7320 spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 7321 break;
48f24c4d 7322
1da177e4
LT
7323#ifdef CONFIG_HOTPLUG_CPU
7324 case CPU_UP_CANCELED:
8bb78442 7325 case CPU_UP_CANCELED_FROZEN:
fc75cdfa
HC
7326 if (!cpu_rq(cpu)->migration_thread)
7327 break;
41a2d6cf 7328 /* Unbind it from offline cpu so it can run. Fall thru. */
a4c4af7c 7329 kthread_bind(cpu_rq(cpu)->migration_thread,
1e5ce4f4 7330 cpumask_any(cpu_online_mask));
1da177e4
LT
7331 kthread_stop(cpu_rq(cpu)->migration_thread);
7332 cpu_rq(cpu)->migration_thread = NULL;
7333 break;
48f24c4d 7334
1da177e4 7335 case CPU_DEAD:
8bb78442 7336 case CPU_DEAD_FROZEN:
470fd646 7337 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
1da177e4
LT
7338 migrate_live_tasks(cpu);
7339 rq = cpu_rq(cpu);
7340 kthread_stop(rq->migration_thread);
7341 rq->migration_thread = NULL;
7342 /* Idle task back to normal (off runqueue, low prio) */
d2da272a 7343 spin_lock_irq(&rq->lock);
a8e504d2 7344 update_rq_clock(rq);
2e1cb74a 7345 deactivate_task(rq, rq->idle, 0);
1da177e4 7346 rq->idle->static_prio = MAX_PRIO;
dd41f596
IM
7347 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7348 rq->idle->sched_class = &idle_sched_class;
1da177e4 7349 migrate_dead_tasks(cpu);
d2da272a 7350 spin_unlock_irq(&rq->lock);
470fd646 7351 cpuset_unlock();
1da177e4
LT
7352 migrate_nr_uninterruptible(rq);
7353 BUG_ON(rq->nr_running != 0);
7354
41a2d6cf
IM
7355 /*
7356 * No need to migrate the tasks: it was best-effort if
7357 * they didn't take sched_hotcpu_mutex. Just wake up
7358 * the requestors.
7359 */
1da177e4
LT
7360 spin_lock_irq(&rq->lock);
7361 while (!list_empty(&rq->migration_queue)) {
70b97a7f
IM
7362 struct migration_req *req;
7363
1da177e4 7364 req = list_entry(rq->migration_queue.next,
70b97a7f 7365 struct migration_req, list);
1da177e4 7366 list_del_init(&req->list);
9a2bd244 7367 spin_unlock_irq(&rq->lock);
1da177e4 7368 complete(&req->done);
9a2bd244 7369 spin_lock_irq(&rq->lock);
1da177e4
LT
7370 }
7371 spin_unlock_irq(&rq->lock);
7372 break;
57d885fe 7373
08f503b0
GH
7374 case CPU_DYING:
7375 case CPU_DYING_FROZEN:
57d885fe
GH
7376 /* Update our root-domain */
7377 rq = cpu_rq(cpu);
7378 spin_lock_irqsave(&rq->lock, flags);
7379 if (rq->rd) {
c6c4927b 7380 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 7381 set_rq_offline(rq);
57d885fe
GH
7382 }
7383 spin_unlock_irqrestore(&rq->lock, flags);
7384 break;
1da177e4
LT
7385#endif
7386 }
7387 return NOTIFY_OK;
7388}
7389
7390/* Register at highest priority so that task migration (migrate_all_tasks)
7391 * happens before everything else.
7392 */
26c2143b 7393static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4
LT
7394 .notifier_call = migration_call,
7395 .priority = 10
7396};
7397
7babe8db 7398static int __init migration_init(void)
1da177e4
LT
7399{
7400 void *cpu = (void *)(long)smp_processor_id();
07dccf33 7401 int err;
48f24c4d
IM
7402
7403 /* Start one for the boot CPU: */
07dccf33
AM
7404 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
7405 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
7406 migration_call(&migration_notifier, CPU_ONLINE, cpu);
7407 register_cpu_notifier(&migration_notifier);
7babe8db
EGM
7408
7409 return err;
1da177e4 7410}
7babe8db 7411early_initcall(migration_init);
1da177e4
LT
7412#endif
7413
7414#ifdef CONFIG_SMP
476f3534 7415
3e9830dc 7416#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 7417
7c16ec58 7418static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 7419 struct cpumask *groupmask)
1da177e4 7420{
4dcf6aff 7421 struct sched_group *group = sd->groups;
434d53b0 7422 char str[256];
1da177e4 7423
968ea6d8 7424 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 7425 cpumask_clear(groupmask);
4dcf6aff
IM
7426
7427 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
7428
7429 if (!(sd->flags & SD_LOAD_BALANCE)) {
7430 printk("does not load-balance\n");
7431 if (sd->parent)
7432 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
7433 " has parent");
7434 return -1;
41c7ce9a
NP
7435 }
7436
eefd796a 7437 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 7438
758b2cdc 7439 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4dcf6aff
IM
7440 printk(KERN_ERR "ERROR: domain->span does not contain "
7441 "CPU%d\n", cpu);
7442 }
758b2cdc 7443 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
4dcf6aff
IM
7444 printk(KERN_ERR "ERROR: domain->groups does not contain"
7445 " CPU%d\n", cpu);
7446 }
1da177e4 7447
4dcf6aff 7448 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 7449 do {
4dcf6aff
IM
7450 if (!group) {
7451 printk("\n");
7452 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
7453 break;
7454 }
7455
4dcf6aff
IM
7456 if (!group->__cpu_power) {
7457 printk(KERN_CONT "\n");
7458 printk(KERN_ERR "ERROR: domain->cpu_power not "
7459 "set\n");
7460 break;
7461 }
1da177e4 7462
758b2cdc 7463 if (!cpumask_weight(sched_group_cpus(group))) {
4dcf6aff
IM
7464 printk(KERN_CONT "\n");
7465 printk(KERN_ERR "ERROR: empty group\n");
7466 break;
7467 }
1da177e4 7468
758b2cdc 7469 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
4dcf6aff
IM
7470 printk(KERN_CONT "\n");
7471 printk(KERN_ERR "ERROR: repeated CPUs\n");
7472 break;
7473 }
1da177e4 7474
758b2cdc 7475 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 7476
968ea6d8 7477 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
46e0bb9c
GS
7478 printk(KERN_CONT " %s (__cpu_power = %d)", str,
7479 group->__cpu_power);
1da177e4 7480
4dcf6aff
IM
7481 group = group->next;
7482 } while (group != sd->groups);
7483 printk(KERN_CONT "\n");
1da177e4 7484
758b2cdc 7485 if (!cpumask_equal(sched_domain_span(sd), groupmask))
4dcf6aff 7486 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 7487
758b2cdc
RR
7488 if (sd->parent &&
7489 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
4dcf6aff
IM
7490 printk(KERN_ERR "ERROR: parent span is not a superset "
7491 "of domain->span\n");
7492 return 0;
7493}
1da177e4 7494
4dcf6aff
IM
7495static void sched_domain_debug(struct sched_domain *sd, int cpu)
7496{
d5dd3db1 7497 cpumask_var_t groupmask;
4dcf6aff 7498 int level = 0;
1da177e4 7499
4dcf6aff
IM
7500 if (!sd) {
7501 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7502 return;
7503 }
1da177e4 7504
4dcf6aff
IM
7505 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
7506
d5dd3db1 7507 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
7c16ec58
MT
7508 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
7509 return;
7510 }
7511
4dcf6aff 7512 for (;;) {
7c16ec58 7513 if (sched_domain_debug_one(sd, cpu, level, groupmask))
4dcf6aff 7514 break;
1da177e4
LT
7515 level++;
7516 sd = sd->parent;
33859f7f 7517 if (!sd)
4dcf6aff
IM
7518 break;
7519 }
d5dd3db1 7520 free_cpumask_var(groupmask);
1da177e4 7521}
6d6bc0ad 7522#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 7523# define sched_domain_debug(sd, cpu) do { } while (0)
6d6bc0ad 7524#endif /* CONFIG_SCHED_DEBUG */
1da177e4 7525
1a20ff27 7526static int sd_degenerate(struct sched_domain *sd)
245af2c7 7527{
758b2cdc 7528 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
7529 return 1;
7530
7531 /* Following flags need at least 2 groups */
7532 if (sd->flags & (SD_LOAD_BALANCE |
7533 SD_BALANCE_NEWIDLE |
7534 SD_BALANCE_FORK |
89c4710e
SS
7535 SD_BALANCE_EXEC |
7536 SD_SHARE_CPUPOWER |
7537 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
7538 if (sd->groups != sd->groups->next)
7539 return 0;
7540 }
7541
7542 /* Following flags don't use groups */
7543 if (sd->flags & (SD_WAKE_IDLE |
7544 SD_WAKE_AFFINE |
7545 SD_WAKE_BALANCE))
7546 return 0;
7547
7548 return 1;
7549}
7550
48f24c4d
IM
7551static int
7552sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
7553{
7554 unsigned long cflags = sd->flags, pflags = parent->flags;
7555
7556 if (sd_degenerate(parent))
7557 return 1;
7558
758b2cdc 7559 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
7560 return 0;
7561
7562 /* Does parent contain flags not in child? */
7563 /* WAKE_BALANCE is a subset of WAKE_AFFINE */
7564 if (cflags & SD_WAKE_AFFINE)
7565 pflags &= ~SD_WAKE_BALANCE;
7566 /* Flags needing groups don't count if only 1 group in parent */
7567 if (parent->groups == parent->groups->next) {
7568 pflags &= ~(SD_LOAD_BALANCE |
7569 SD_BALANCE_NEWIDLE |
7570 SD_BALANCE_FORK |
89c4710e
SS
7571 SD_BALANCE_EXEC |
7572 SD_SHARE_CPUPOWER |
7573 SD_SHARE_PKG_RESOURCES);
5436499e
KC
7574 if (nr_node_ids == 1)
7575 pflags &= ~SD_SERIALIZE;
245af2c7
SS
7576 }
7577 if (~cflags & pflags)
7578 return 0;
7579
7580 return 1;
7581}
7582
c6c4927b
RR
7583static void free_rootdomain(struct root_domain *rd)
7584{
68e74568
RR
7585 cpupri_cleanup(&rd->cpupri);
7586
c6c4927b
RR
7587 free_cpumask_var(rd->rto_mask);
7588 free_cpumask_var(rd->online);
7589 free_cpumask_var(rd->span);
7590 kfree(rd);
7591}
7592
57d885fe
GH
7593static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7594{
a0490fa3 7595 struct root_domain *old_rd = NULL;
57d885fe 7596 unsigned long flags;
57d885fe
GH
7597
7598 spin_lock_irqsave(&rq->lock, flags);
7599
7600 if (rq->rd) {
a0490fa3 7601 old_rd = rq->rd;
57d885fe 7602
c6c4927b 7603 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 7604 set_rq_offline(rq);
57d885fe 7605
c6c4927b 7606 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 7607
a0490fa3
IM
7608 /*
7609 * If we dont want to free the old_rt yet then
7610 * set old_rd to NULL to skip the freeing later
7611 * in this function:
7612 */
7613 if (!atomic_dec_and_test(&old_rd->refcount))
7614 old_rd = NULL;
57d885fe
GH
7615 }
7616
7617 atomic_inc(&rd->refcount);
7618 rq->rd = rd;
7619
c6c4927b
RR
7620 cpumask_set_cpu(rq->cpu, rd->span);
7621 if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
1f11eb6a 7622 set_rq_online(rq);
57d885fe
GH
7623
7624 spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
7625
7626 if (old_rd)
7627 free_rootdomain(old_rd);
57d885fe
GH
7628}
7629
db2f59c8 7630static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
57d885fe
GH
7631{
7632 memset(rd, 0, sizeof(*rd));
7633
c6c4927b
RR
7634 if (bootmem) {
7635 alloc_bootmem_cpumask_var(&def_root_domain.span);
7636 alloc_bootmem_cpumask_var(&def_root_domain.online);
7637 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
68e74568 7638 cpupri_init(&rd->cpupri, true);
c6c4927b
RR
7639 return 0;
7640 }
7641
7642 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 7643 goto out;
c6c4927b
RR
7644 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
7645 goto free_span;
7646 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
7647 goto free_online;
6e0534f2 7648
68e74568
RR
7649 if (cpupri_init(&rd->cpupri, false) != 0)
7650 goto free_rto_mask;
c6c4927b 7651 return 0;
6e0534f2 7652
68e74568
RR
7653free_rto_mask:
7654 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
7655free_online:
7656 free_cpumask_var(rd->online);
7657free_span:
7658 free_cpumask_var(rd->span);
0c910d28 7659out:
c6c4927b 7660 return -ENOMEM;
57d885fe
GH
7661}
7662
7663static void init_defrootdomain(void)
7664{
c6c4927b
RR
7665 init_rootdomain(&def_root_domain, true);
7666
57d885fe
GH
7667 atomic_set(&def_root_domain.refcount, 1);
7668}
7669
dc938520 7670static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
7671{
7672 struct root_domain *rd;
7673
7674 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
7675 if (!rd)
7676 return NULL;
7677
c6c4927b
RR
7678 if (init_rootdomain(rd, false) != 0) {
7679 kfree(rd);
7680 return NULL;
7681 }
57d885fe
GH
7682
7683 return rd;
7684}
7685
1da177e4 7686/*
0eab9146 7687 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
7688 * hold the hotplug lock.
7689 */
0eab9146
IM
7690static void
7691cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 7692{
70b97a7f 7693 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
7694 struct sched_domain *tmp;
7695
7696 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 7697 for (tmp = sd; tmp; ) {
245af2c7
SS
7698 struct sched_domain *parent = tmp->parent;
7699 if (!parent)
7700 break;
f29c9b1c 7701
1a848870 7702 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 7703 tmp->parent = parent->parent;
1a848870
SS
7704 if (parent->parent)
7705 parent->parent->child = tmp;
f29c9b1c
LZ
7706 } else
7707 tmp = tmp->parent;
245af2c7
SS
7708 }
7709
1a848870 7710 if (sd && sd_degenerate(sd)) {
245af2c7 7711 sd = sd->parent;
1a848870
SS
7712 if (sd)
7713 sd->child = NULL;
7714 }
1da177e4
LT
7715
7716 sched_domain_debug(sd, cpu);
7717
57d885fe 7718 rq_attach_root(rq, rd);
674311d5 7719 rcu_assign_pointer(rq->sd, sd);
1da177e4
LT
7720}
7721
7722/* cpus with isolated domains */
dcc30a35 7723static cpumask_var_t cpu_isolated_map;
1da177e4
LT
7724
7725/* Setup the mask of cpus configured for isolated domains */
7726static int __init isolated_cpu_setup(char *str)
7727{
968ea6d8 7728 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
7729 return 1;
7730}
7731
8927f494 7732__setup("isolcpus=", isolated_cpu_setup);
1da177e4
LT
7733
7734/*
6711cab4
SS
7735 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
7736 * to a function which identifies what group(along with sched group) a CPU
96f874e2
RR
7737 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
7738 * (due to the fact that we keep track of groups covered with a struct cpumask).
1da177e4
LT
7739 *
7740 * init_sched_build_groups will build a circular linked list of the groups
7741 * covered by the given span, and will set each group's ->cpumask correctly,
7742 * and ->cpu_power to 0.
7743 */
a616058b 7744static void
96f874e2
RR
7745init_sched_build_groups(const struct cpumask *span,
7746 const struct cpumask *cpu_map,
7747 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
7c16ec58 7748 struct sched_group **sg,
96f874e2
RR
7749 struct cpumask *tmpmask),
7750 struct cpumask *covered, struct cpumask *tmpmask)
1da177e4
LT
7751{
7752 struct sched_group *first = NULL, *last = NULL;
1da177e4
LT
7753 int i;
7754
96f874e2 7755 cpumask_clear(covered);
7c16ec58 7756
abcd083a 7757 for_each_cpu(i, span) {
6711cab4 7758 struct sched_group *sg;
7c16ec58 7759 int group = group_fn(i, cpu_map, &sg, tmpmask);
1da177e4
LT
7760 int j;
7761
758b2cdc 7762 if (cpumask_test_cpu(i, covered))
1da177e4
LT
7763 continue;
7764
758b2cdc 7765 cpumask_clear(sched_group_cpus(sg));
5517d86b 7766 sg->__cpu_power = 0;
1da177e4 7767
abcd083a 7768 for_each_cpu(j, span) {
7c16ec58 7769 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
1da177e4
LT
7770 continue;
7771
96f874e2 7772 cpumask_set_cpu(j, covered);
758b2cdc 7773 cpumask_set_cpu(j, sched_group_cpus(sg));
1da177e4
LT
7774 }
7775 if (!first)
7776 first = sg;
7777 if (last)
7778 last->next = sg;
7779 last = sg;
7780 }
7781 last->next = first;
7782}
7783
9c1cfda2 7784#define SD_NODES_PER_DOMAIN 16
1da177e4 7785
9c1cfda2 7786#ifdef CONFIG_NUMA
198e2f18 7787
9c1cfda2
JH
7788/**
7789 * find_next_best_node - find the next node to include in a sched_domain
7790 * @node: node whose sched_domain we're building
7791 * @used_nodes: nodes already in the sched_domain
7792 *
41a2d6cf 7793 * Find the next node to include in a given scheduling domain. Simply
9c1cfda2
JH
7794 * finds the closest node not already in the @used_nodes map.
7795 *
7796 * Should use nodemask_t.
7797 */
c5f59f08 7798static int find_next_best_node(int node, nodemask_t *used_nodes)
9c1cfda2
JH
7799{
7800 int i, n, val, min_val, best_node = 0;
7801
7802 min_val = INT_MAX;
7803
076ac2af 7804 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2 7805 /* Start at @node */
076ac2af 7806 n = (node + i) % nr_node_ids;
9c1cfda2
JH
7807
7808 if (!nr_cpus_node(n))
7809 continue;
7810
7811 /* Skip already used nodes */
c5f59f08 7812 if (node_isset(n, *used_nodes))
9c1cfda2
JH
7813 continue;
7814
7815 /* Simple min distance search */
7816 val = node_distance(node, n);
7817
7818 if (val < min_val) {
7819 min_val = val;
7820 best_node = n;
7821 }
7822 }
7823
c5f59f08 7824 node_set(best_node, *used_nodes);
9c1cfda2
JH
7825 return best_node;
7826}
7827
7828/**
7829 * sched_domain_node_span - get a cpumask for a node's sched_domain
7830 * @node: node whose cpumask we're constructing
73486722 7831 * @span: resulting cpumask
9c1cfda2 7832 *
41a2d6cf 7833 * Given a node, construct a good cpumask for its sched_domain to span. It
9c1cfda2
JH
7834 * should be one that prevents unnecessary balancing, but also spreads tasks
7835 * out optimally.
7836 */
96f874e2 7837static void sched_domain_node_span(int node, struct cpumask *span)
9c1cfda2 7838{
c5f59f08 7839 nodemask_t used_nodes;
48f24c4d 7840 int i;
9c1cfda2 7841
6ca09dfc 7842 cpumask_clear(span);
c5f59f08 7843 nodes_clear(used_nodes);
9c1cfda2 7844
6ca09dfc 7845 cpumask_or(span, span, cpumask_of_node(node));
c5f59f08 7846 node_set(node, used_nodes);
9c1cfda2
JH
7847
7848 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
c5f59f08 7849 int next_node = find_next_best_node(node, &used_nodes);
48f24c4d 7850
6ca09dfc 7851 cpumask_or(span, span, cpumask_of_node(next_node));
9c1cfda2 7852 }
9c1cfda2 7853}
6d6bc0ad 7854#endif /* CONFIG_NUMA */
9c1cfda2 7855
5c45bf27 7856int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
48f24c4d 7857
6c99e9ad
RR
7858/*
7859 * The cpus mask in sched_group and sched_domain hangs off the end.
7860 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
7861 * for nr_cpu_ids < CONFIG_NR_CPUS.
7862 */
7863struct static_sched_group {
7864 struct sched_group sg;
7865 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
7866};
7867
7868struct static_sched_domain {
7869 struct sched_domain sd;
7870 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
7871};
7872
9c1cfda2 7873/*
48f24c4d 7874 * SMT sched-domains:
9c1cfda2 7875 */
1da177e4 7876#ifdef CONFIG_SCHED_SMT
6c99e9ad
RR
7877static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
7878static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
48f24c4d 7879
41a2d6cf 7880static int
96f874e2
RR
7881cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
7882 struct sched_group **sg, struct cpumask *unused)
1da177e4 7883{
6711cab4 7884 if (sg)
6c99e9ad 7885 *sg = &per_cpu(sched_group_cpus, cpu).sg;
1da177e4
LT
7886 return cpu;
7887}
6d6bc0ad 7888#endif /* CONFIG_SCHED_SMT */
1da177e4 7889
48f24c4d
IM
7890/*
7891 * multi-core sched-domains:
7892 */
1e9f28fa 7893#ifdef CONFIG_SCHED_MC
6c99e9ad
RR
7894static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
7895static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
6d6bc0ad 7896#endif /* CONFIG_SCHED_MC */
1e9f28fa
SS
7897
7898#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
41a2d6cf 7899static int
96f874e2
RR
7900cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7901 struct sched_group **sg, struct cpumask *mask)
1e9f28fa 7902{
6711cab4 7903 int group;
7c16ec58 7904
c69fc56d 7905 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
96f874e2 7906 group = cpumask_first(mask);
6711cab4 7907 if (sg)
6c99e9ad 7908 *sg = &per_cpu(sched_group_core, group).sg;
6711cab4 7909 return group;
1e9f28fa
SS
7910}
7911#elif defined(CONFIG_SCHED_MC)
41a2d6cf 7912static int
96f874e2
RR
7913cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7914 struct sched_group **sg, struct cpumask *unused)
1e9f28fa 7915{
6711cab4 7916 if (sg)
6c99e9ad 7917 *sg = &per_cpu(sched_group_core, cpu).sg;
1e9f28fa
SS
7918 return cpu;
7919}
7920#endif
7921
6c99e9ad
RR
7922static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
7923static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
48f24c4d 7924
41a2d6cf 7925static int
96f874e2
RR
7926cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7927 struct sched_group **sg, struct cpumask *mask)
1da177e4 7928{
6711cab4 7929 int group;
48f24c4d 7930#ifdef CONFIG_SCHED_MC
6ca09dfc 7931 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
96f874e2 7932 group = cpumask_first(mask);
1e9f28fa 7933#elif defined(CONFIG_SCHED_SMT)
c69fc56d 7934 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
96f874e2 7935 group = cpumask_first(mask);
1da177e4 7936#else
6711cab4 7937 group = cpu;
1da177e4 7938#endif
6711cab4 7939 if (sg)
6c99e9ad 7940 *sg = &per_cpu(sched_group_phys, group).sg;
6711cab4 7941 return group;
1da177e4
LT
7942}
7943
7944#ifdef CONFIG_NUMA
1da177e4 7945/*
9c1cfda2
JH
7946 * The init_sched_build_groups can't handle what we want to do with node
7947 * groups, so roll our own. Now each node has its own list of groups which
7948 * gets dynamically allocated.
1da177e4 7949 */
62ea9ceb 7950static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
434d53b0 7951static struct sched_group ***sched_group_nodes_bycpu;
1da177e4 7952
62ea9ceb 7953static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
6c99e9ad 7954static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
9c1cfda2 7955
96f874e2
RR
7956static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7957 struct sched_group **sg,
7958 struct cpumask *nodemask)
9c1cfda2 7959{
6711cab4
SS
7960 int group;
7961
6ca09dfc 7962 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
96f874e2 7963 group = cpumask_first(nodemask);
6711cab4
SS
7964
7965 if (sg)
6c99e9ad 7966 *sg = &per_cpu(sched_group_allnodes, group).sg;
6711cab4 7967 return group;
1da177e4 7968}
6711cab4 7969
08069033
SS
7970static void init_numa_sched_groups_power(struct sched_group *group_head)
7971{
7972 struct sched_group *sg = group_head;
7973 int j;
7974
7975 if (!sg)
7976 return;
3a5c359a 7977 do {
758b2cdc 7978 for_each_cpu(j, sched_group_cpus(sg)) {
3a5c359a 7979 struct sched_domain *sd;
08069033 7980
6c99e9ad 7981 sd = &per_cpu(phys_domains, j).sd;
758b2cdc 7982 if (j != cpumask_first(sched_group_cpus(sd->groups))) {
3a5c359a
AK
7983 /*
7984 * Only add "power" once for each
7985 * physical package.
7986 */
7987 continue;
7988 }
08069033 7989
3a5c359a
AK
7990 sg_inc_cpu_power(sg, sd->groups->__cpu_power);
7991 }
7992 sg = sg->next;
7993 } while (sg != group_head);
08069033 7994}
6d6bc0ad 7995#endif /* CONFIG_NUMA */
1da177e4 7996
a616058b 7997#ifdef CONFIG_NUMA
51888ca2 7998/* Free memory allocated for various sched_group structures */
96f874e2
RR
7999static void free_sched_groups(const struct cpumask *cpu_map,
8000 struct cpumask *nodemask)
51888ca2 8001{
a616058b 8002 int cpu, i;
51888ca2 8003
abcd083a 8004 for_each_cpu(cpu, cpu_map) {
51888ca2
SV
8005 struct sched_group **sched_group_nodes
8006 = sched_group_nodes_bycpu[cpu];
8007
51888ca2
SV
8008 if (!sched_group_nodes)
8009 continue;
8010
076ac2af 8011 for (i = 0; i < nr_node_ids; i++) {
51888ca2
SV
8012 struct sched_group *oldsg, *sg = sched_group_nodes[i];
8013
6ca09dfc 8014 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
96f874e2 8015 if (cpumask_empty(nodemask))
51888ca2
SV
8016 continue;
8017
8018 if (sg == NULL)
8019 continue;
8020 sg = sg->next;
8021next_sg:
8022 oldsg = sg;
8023 sg = sg->next;
8024 kfree(oldsg);
8025 if (oldsg != sched_group_nodes[i])
8026 goto next_sg;
8027 }
8028 kfree(sched_group_nodes);
8029 sched_group_nodes_bycpu[cpu] = NULL;
8030 }
51888ca2 8031}
6d6bc0ad 8032#else /* !CONFIG_NUMA */
96f874e2
RR
8033static void free_sched_groups(const struct cpumask *cpu_map,
8034 struct cpumask *nodemask)
a616058b
SS
8035{
8036}
6d6bc0ad 8037#endif /* CONFIG_NUMA */
51888ca2 8038
89c4710e
SS
8039/*
8040 * Initialize sched groups cpu_power.
8041 *
8042 * cpu_power indicates the capacity of sched group, which is used while
8043 * distributing the load between different sched groups in a sched domain.
8044 * Typically cpu_power for all the groups in a sched domain will be same unless
8045 * there are asymmetries in the topology. If there are asymmetries, group
8046 * having more cpu_power will pickup more load compared to the group having
8047 * less cpu_power.
8048 *
8049 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
8050 * the maximum number of tasks a group can handle in the presence of other idle
8051 * or lightly loaded groups in the same sched domain.
8052 */
8053static void init_sched_groups_power(int cpu, struct sched_domain *sd)
8054{
8055 struct sched_domain *child;
8056 struct sched_group *group;
8057
8058 WARN_ON(!sd || !sd->groups);
8059
758b2cdc 8060 if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
89c4710e
SS
8061 return;
8062
8063 child = sd->child;
8064
5517d86b
ED
8065 sd->groups->__cpu_power = 0;
8066
89c4710e
SS
8067 /*
8068 * For perf policy, if the groups in child domain share resources
8069 * (for example cores sharing some portions of the cache hierarchy
8070 * or SMT), then set this domain groups cpu_power such that each group
8071 * can handle only one task, when there are other idle groups in the
8072 * same sched domain.
8073 */
8074 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
8075 (child->flags &
8076 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
5517d86b 8077 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
89c4710e
SS
8078 return;
8079 }
8080
89c4710e
SS
8081 /*
8082 * add cpu_power of each child group to this groups cpu_power
8083 */
8084 group = child->groups;
8085 do {
5517d86b 8086 sg_inc_cpu_power(sd->groups, group->__cpu_power);
89c4710e
SS
8087 group = group->next;
8088 } while (group != child->groups);
8089}
8090
7c16ec58
MT
8091/*
8092 * Initializers for schedule domains
8093 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
8094 */
8095
a5d8c348
IM
8096#ifdef CONFIG_SCHED_DEBUG
8097# define SD_INIT_NAME(sd, type) sd->name = #type
8098#else
8099# define SD_INIT_NAME(sd, type) do { } while (0)
8100#endif
8101
7c16ec58 8102#define SD_INIT(sd, type) sd_init_##type(sd)
a5d8c348 8103
7c16ec58
MT
8104#define SD_INIT_FUNC(type) \
8105static noinline void sd_init_##type(struct sched_domain *sd) \
8106{ \
8107 memset(sd, 0, sizeof(*sd)); \
8108 *sd = SD_##type##_INIT; \
1d3504fc 8109 sd->level = SD_LV_##type; \
a5d8c348 8110 SD_INIT_NAME(sd, type); \
7c16ec58
MT
8111}
8112
8113SD_INIT_FUNC(CPU)
8114#ifdef CONFIG_NUMA
8115 SD_INIT_FUNC(ALLNODES)
8116 SD_INIT_FUNC(NODE)
8117#endif
8118#ifdef CONFIG_SCHED_SMT
8119 SD_INIT_FUNC(SIBLING)
8120#endif
8121#ifdef CONFIG_SCHED_MC
8122 SD_INIT_FUNC(MC)
8123#endif
8124
1d3504fc
HS
8125static int default_relax_domain_level = -1;
8126
8127static int __init setup_relax_domain_level(char *str)
8128{
30e0e178
LZ
8129 unsigned long val;
8130
8131 val = simple_strtoul(str, NULL, 0);
8132 if (val < SD_LV_MAX)
8133 default_relax_domain_level = val;
8134
1d3504fc
HS
8135 return 1;
8136}
8137__setup("relax_domain_level=", setup_relax_domain_level);
8138
8139static void set_domain_attribute(struct sched_domain *sd,
8140 struct sched_domain_attr *attr)
8141{
8142 int request;
8143
8144 if (!attr || attr->relax_domain_level < 0) {
8145 if (default_relax_domain_level < 0)
8146 return;
8147 else
8148 request = default_relax_domain_level;
8149 } else
8150 request = attr->relax_domain_level;
8151 if (request < sd->level) {
8152 /* turn off idle balance on this domain */
8153 sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
8154 } else {
8155 /* turn on idle balance on this domain */
8156 sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
8157 }
8158}
8159
1da177e4 8160/*
1a20ff27
DG
8161 * Build sched domains for a given set of cpus and attach the sched domains
8162 * to the individual cpus
1da177e4 8163 */
96f874e2 8164static int __build_sched_domains(const struct cpumask *cpu_map,
1d3504fc 8165 struct sched_domain_attr *attr)
1da177e4 8166{
3404c8d9 8167 int i, err = -ENOMEM;
57d885fe 8168 struct root_domain *rd;
3404c8d9
RR
8169 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
8170 tmpmask;
d1b55138 8171#ifdef CONFIG_NUMA
3404c8d9 8172 cpumask_var_t domainspan, covered, notcovered;
d1b55138 8173 struct sched_group **sched_group_nodes = NULL;
6711cab4 8174 int sd_allnodes = 0;
d1b55138 8175
3404c8d9
RR
8176 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
8177 goto out;
8178 if (!alloc_cpumask_var(&covered, GFP_KERNEL))
8179 goto free_domainspan;
8180 if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
8181 goto free_covered;
8182#endif
8183
8184 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
8185 goto free_notcovered;
8186 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
8187 goto free_nodemask;
8188 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
8189 goto free_this_sibling_map;
8190 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
8191 goto free_this_core_map;
8192 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
8193 goto free_send_covered;
8194
8195#ifdef CONFIG_NUMA
d1b55138
JH
8196 /*
8197 * Allocate the per-node list of sched groups
8198 */
076ac2af 8199 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
41a2d6cf 8200 GFP_KERNEL);
d1b55138
JH
8201 if (!sched_group_nodes) {
8202 printk(KERN_WARNING "Can not alloc sched group node list\n");
3404c8d9 8203 goto free_tmpmask;
d1b55138 8204 }
d1b55138 8205#endif
1da177e4 8206
dc938520 8207 rd = alloc_rootdomain();
57d885fe
GH
8208 if (!rd) {
8209 printk(KERN_WARNING "Cannot alloc root domain\n");
3404c8d9 8210 goto free_sched_groups;
57d885fe
GH
8211 }
8212
7c16ec58 8213#ifdef CONFIG_NUMA
96f874e2 8214 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
7c16ec58
MT
8215#endif
8216
1da177e4 8217 /*
1a20ff27 8218 * Set up domains for cpus specified by the cpu_map.
1da177e4 8219 */
abcd083a 8220 for_each_cpu(i, cpu_map) {
1da177e4 8221 struct sched_domain *sd = NULL, *p;
1da177e4 8222
6ca09dfc 8223 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
1da177e4
LT
8224
8225#ifdef CONFIG_NUMA
96f874e2
RR
8226 if (cpumask_weight(cpu_map) >
8227 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
62ea9ceb 8228 sd = &per_cpu(allnodes_domains, i).sd;
7c16ec58 8229 SD_INIT(sd, ALLNODES);
1d3504fc 8230 set_domain_attribute(sd, attr);
758b2cdc 8231 cpumask_copy(sched_domain_span(sd), cpu_map);
7c16ec58 8232 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
9c1cfda2 8233 p = sd;
6711cab4 8234 sd_allnodes = 1;
9c1cfda2
JH
8235 } else
8236 p = NULL;
8237
62ea9ceb 8238 sd = &per_cpu(node_domains, i).sd;
7c16ec58 8239 SD_INIT(sd, NODE);
1d3504fc 8240 set_domain_attribute(sd, attr);
758b2cdc 8241 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
9c1cfda2 8242 sd->parent = p;
1a848870
SS
8243 if (p)
8244 p->child = sd;
758b2cdc
RR
8245 cpumask_and(sched_domain_span(sd),
8246 sched_domain_span(sd), cpu_map);
1da177e4
LT
8247#endif
8248
8249 p = sd;
6c99e9ad 8250 sd = &per_cpu(phys_domains, i).sd;
7c16ec58 8251 SD_INIT(sd, CPU);
1d3504fc 8252 set_domain_attribute(sd, attr);
758b2cdc 8253 cpumask_copy(sched_domain_span(sd), nodemask);
1da177e4 8254 sd->parent = p;
1a848870
SS
8255 if (p)
8256 p->child = sd;
7c16ec58 8257 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
1da177e4 8258
1e9f28fa
SS
8259#ifdef CONFIG_SCHED_MC
8260 p = sd;
6c99e9ad 8261 sd = &per_cpu(core_domains, i).sd;
7c16ec58 8262 SD_INIT(sd, MC);
1d3504fc 8263 set_domain_attribute(sd, attr);
6ca09dfc
MT
8264 cpumask_and(sched_domain_span(sd), cpu_map,
8265 cpu_coregroup_mask(i));
1e9f28fa 8266 sd->parent = p;
1a848870 8267 p->child = sd;
7c16ec58 8268 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
1e9f28fa
SS
8269#endif
8270
1da177e4
LT
8271#ifdef CONFIG_SCHED_SMT
8272 p = sd;
6c99e9ad 8273 sd = &per_cpu(cpu_domains, i).sd;
7c16ec58 8274 SD_INIT(sd, SIBLING);
1d3504fc 8275 set_domain_attribute(sd, attr);
758b2cdc 8276 cpumask_and(sched_domain_span(sd),
c69fc56d 8277 topology_thread_cpumask(i), cpu_map);
1da177e4 8278 sd->parent = p;
1a848870 8279 p->child = sd;
7c16ec58 8280 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
1da177e4
LT
8281#endif
8282 }
8283
8284#ifdef CONFIG_SCHED_SMT
8285 /* Set up CPU (sibling) groups */
abcd083a 8286 for_each_cpu(i, cpu_map) {
96f874e2 8287 cpumask_and(this_sibling_map,
c69fc56d 8288 topology_thread_cpumask(i), cpu_map);
96f874e2 8289 if (i != cpumask_first(this_sibling_map))
1da177e4
LT
8290 continue;
8291
dd41f596 8292 init_sched_build_groups(this_sibling_map, cpu_map,
7c16ec58
MT
8293 &cpu_to_cpu_group,
8294 send_covered, tmpmask);
1da177e4
LT
8295 }
8296#endif
8297
1e9f28fa
SS
8298#ifdef CONFIG_SCHED_MC
8299 /* Set up multi-core groups */
abcd083a 8300 for_each_cpu(i, cpu_map) {
6ca09dfc 8301 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
96f874e2 8302 if (i != cpumask_first(this_core_map))
1e9f28fa 8303 continue;
7c16ec58 8304
dd41f596 8305 init_sched_build_groups(this_core_map, cpu_map,
7c16ec58
MT
8306 &cpu_to_core_group,
8307 send_covered, tmpmask);
1e9f28fa
SS
8308 }
8309#endif
8310
1da177e4 8311 /* Set up physical groups */
076ac2af 8312 for (i = 0; i < nr_node_ids; i++) {
6ca09dfc 8313 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
96f874e2 8314 if (cpumask_empty(nodemask))
1da177e4
LT
8315 continue;
8316
7c16ec58
MT
8317 init_sched_build_groups(nodemask, cpu_map,
8318 &cpu_to_phys_group,
8319 send_covered, tmpmask);
1da177e4
LT
8320 }
8321
8322#ifdef CONFIG_NUMA
8323 /* Set up node groups */
7c16ec58 8324 if (sd_allnodes) {
7c16ec58
MT
8325 init_sched_build_groups(cpu_map, cpu_map,
8326 &cpu_to_allnodes_group,
8327 send_covered, tmpmask);
8328 }
9c1cfda2 8329
076ac2af 8330 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2
JH
8331 /* Set up node groups */
8332 struct sched_group *sg, *prev;
9c1cfda2
JH
8333 int j;
8334
96f874e2 8335 cpumask_clear(covered);
6ca09dfc 8336 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
96f874e2 8337 if (cpumask_empty(nodemask)) {
d1b55138 8338 sched_group_nodes[i] = NULL;
9c1cfda2 8339 continue;
d1b55138 8340 }
9c1cfda2 8341
4bdbaad3 8342 sched_domain_node_span(i, domainspan);
96f874e2 8343 cpumask_and(domainspan, domainspan, cpu_map);
9c1cfda2 8344
6c99e9ad
RR
8345 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8346 GFP_KERNEL, i);
51888ca2
SV
8347 if (!sg) {
8348 printk(KERN_WARNING "Can not alloc domain group for "
8349 "node %d\n", i);
8350 goto error;
8351 }
9c1cfda2 8352 sched_group_nodes[i] = sg;
abcd083a 8353 for_each_cpu(j, nodemask) {
9c1cfda2 8354 struct sched_domain *sd;
9761eea8 8355
62ea9ceb 8356 sd = &per_cpu(node_domains, j).sd;
9c1cfda2 8357 sd->groups = sg;
9c1cfda2 8358 }
5517d86b 8359 sg->__cpu_power = 0;
758b2cdc 8360 cpumask_copy(sched_group_cpus(sg), nodemask);
51888ca2 8361 sg->next = sg;
96f874e2 8362 cpumask_or(covered, covered, nodemask);
9c1cfda2
JH
8363 prev = sg;
8364
076ac2af 8365 for (j = 0; j < nr_node_ids; j++) {
076ac2af 8366 int n = (i + j) % nr_node_ids;
9c1cfda2 8367
96f874e2
RR
8368 cpumask_complement(notcovered, covered);
8369 cpumask_and(tmpmask, notcovered, cpu_map);
8370 cpumask_and(tmpmask, tmpmask, domainspan);
8371 if (cpumask_empty(tmpmask))
9c1cfda2
JH
8372 break;
8373
6ca09dfc 8374 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
96f874e2 8375 if (cpumask_empty(tmpmask))
9c1cfda2
JH
8376 continue;
8377
6c99e9ad
RR
8378 sg = kmalloc_node(sizeof(struct sched_group) +
8379 cpumask_size(),
15f0b676 8380 GFP_KERNEL, i);
9c1cfda2
JH
8381 if (!sg) {
8382 printk(KERN_WARNING
8383 "Can not alloc domain group for node %d\n", j);
51888ca2 8384 goto error;
9c1cfda2 8385 }
5517d86b 8386 sg->__cpu_power = 0;
758b2cdc 8387 cpumask_copy(sched_group_cpus(sg), tmpmask);
51888ca2 8388 sg->next = prev->next;
96f874e2 8389 cpumask_or(covered, covered, tmpmask);
9c1cfda2
JH
8390 prev->next = sg;
8391 prev = sg;
8392 }
9c1cfda2 8393 }
1da177e4
LT
8394#endif
8395
8396 /* Calculate CPU power for physical packages and nodes */
5c45bf27 8397#ifdef CONFIG_SCHED_SMT
abcd083a 8398 for_each_cpu(i, cpu_map) {
6c99e9ad 8399 struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
dd41f596 8400
89c4710e 8401 init_sched_groups_power(i, sd);
5c45bf27 8402 }
1da177e4 8403#endif
1e9f28fa 8404#ifdef CONFIG_SCHED_MC
abcd083a 8405 for_each_cpu(i, cpu_map) {
6c99e9ad 8406 struct sched_domain *sd = &per_cpu(core_domains, i).sd;
dd41f596 8407
89c4710e 8408 init_sched_groups_power(i, sd);
5c45bf27
SS
8409 }
8410#endif
1e9f28fa 8411
abcd083a 8412 for_each_cpu(i, cpu_map) {
6c99e9ad 8413 struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
dd41f596 8414
89c4710e 8415 init_sched_groups_power(i, sd);
1da177e4
LT
8416 }
8417
9c1cfda2 8418#ifdef CONFIG_NUMA
076ac2af 8419 for (i = 0; i < nr_node_ids; i++)
08069033 8420 init_numa_sched_groups_power(sched_group_nodes[i]);
9c1cfda2 8421
6711cab4
SS
8422 if (sd_allnodes) {
8423 struct sched_group *sg;
f712c0c7 8424
96f874e2 8425 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
7c16ec58 8426 tmpmask);
f712c0c7
SS
8427 init_numa_sched_groups_power(sg);
8428 }
9c1cfda2
JH
8429#endif
8430
1da177e4 8431 /* Attach the domains */
abcd083a 8432 for_each_cpu(i, cpu_map) {
1da177e4
LT
8433 struct sched_domain *sd;
8434#ifdef CONFIG_SCHED_SMT
6c99e9ad 8435 sd = &per_cpu(cpu_domains, i).sd;
1e9f28fa 8436#elif defined(CONFIG_SCHED_MC)
6c99e9ad 8437 sd = &per_cpu(core_domains, i).sd;
1da177e4 8438#else
6c99e9ad 8439 sd = &per_cpu(phys_domains, i).sd;
1da177e4 8440#endif
57d885fe 8441 cpu_attach_domain(sd, rd, i);
1da177e4 8442 }
51888ca2 8443
3404c8d9
RR
8444 err = 0;
8445
8446free_tmpmask:
8447 free_cpumask_var(tmpmask);
8448free_send_covered:
8449 free_cpumask_var(send_covered);
8450free_this_core_map:
8451 free_cpumask_var(this_core_map);
8452free_this_sibling_map:
8453 free_cpumask_var(this_sibling_map);
8454free_nodemask:
8455 free_cpumask_var(nodemask);
8456free_notcovered:
8457#ifdef CONFIG_NUMA
8458 free_cpumask_var(notcovered);
8459free_covered:
8460 free_cpumask_var(covered);
8461free_domainspan:
8462 free_cpumask_var(domainspan);
8463out:
8464#endif
8465 return err;
8466
8467free_sched_groups:
8468#ifdef CONFIG_NUMA
8469 kfree(sched_group_nodes);
8470#endif
8471 goto free_tmpmask;
51888ca2 8472
a616058b 8473#ifdef CONFIG_NUMA
51888ca2 8474error:
7c16ec58 8475 free_sched_groups(cpu_map, tmpmask);
c6c4927b 8476 free_rootdomain(rd);
3404c8d9 8477 goto free_tmpmask;
a616058b 8478#endif
1da177e4 8479}
029190c5 8480
96f874e2 8481static int build_sched_domains(const struct cpumask *cpu_map)
1d3504fc
HS
8482{
8483 return __build_sched_domains(cpu_map, NULL);
8484}
8485
96f874e2 8486static struct cpumask *doms_cur; /* current sched domains */
029190c5 8487static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
8488static struct sched_domain_attr *dattr_cur;
8489 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
8490
8491/*
8492 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
8493 * cpumask) fails, then fallback to a single sched domain,
8494 * as determined by the single cpumask fallback_doms.
029190c5 8495 */
4212823f 8496static cpumask_var_t fallback_doms;
029190c5 8497
ee79d1bd
HC
8498/*
8499 * arch_update_cpu_topology lets virtualized architectures update the
8500 * cpu core maps. It is supposed to return 1 if the topology changed
8501 * or 0 if it stayed the same.
8502 */
8503int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 8504{
ee79d1bd 8505 return 0;
22e52b07
HC
8506}
8507
1a20ff27 8508/*
41a2d6cf 8509 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
8510 * For now this just excludes isolated cpus, but could be used to
8511 * exclude other special cases in the future.
1a20ff27 8512 */
96f874e2 8513static int arch_init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 8514{
7378547f
MM
8515 int err;
8516
22e52b07 8517 arch_update_cpu_topology();
029190c5 8518 ndoms_cur = 1;
96f874e2 8519 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
029190c5 8520 if (!doms_cur)
4212823f 8521 doms_cur = fallback_doms;
dcc30a35 8522 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
1d3504fc 8523 dattr_cur = NULL;
7378547f 8524 err = build_sched_domains(doms_cur);
6382bc90 8525 register_sched_domain_sysctl();
7378547f
MM
8526
8527 return err;
1a20ff27
DG
8528}
8529
96f874e2
RR
8530static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
8531 struct cpumask *tmpmask)
1da177e4 8532{
7c16ec58 8533 free_sched_groups(cpu_map, tmpmask);
9c1cfda2 8534}
1da177e4 8535
1a20ff27
DG
8536/*
8537 * Detach sched domains from a group of cpus specified in cpu_map
8538 * These cpus will now be attached to the NULL domain
8539 */
96f874e2 8540static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27 8541{
96f874e2
RR
8542 /* Save because hotplug lock held. */
8543 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
1a20ff27
DG
8544 int i;
8545
abcd083a 8546 for_each_cpu(i, cpu_map)
57d885fe 8547 cpu_attach_domain(NULL, &def_root_domain, i);
1a20ff27 8548 synchronize_sched();
96f874e2 8549 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
1a20ff27
DG
8550}
8551
1d3504fc
HS
8552/* handle null as "default" */
8553static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8554 struct sched_domain_attr *new, int idx_new)
8555{
8556 struct sched_domain_attr tmp;
8557
8558 /* fast path */
8559 if (!new && !cur)
8560 return 1;
8561
8562 tmp = SD_ATTR_INIT;
8563 return !memcmp(cur ? (cur + idx_cur) : &tmp,
8564 new ? (new + idx_new) : &tmp,
8565 sizeof(struct sched_domain_attr));
8566}
8567
029190c5
PJ
8568/*
8569 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 8570 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
8571 * doms_new[] to the current sched domain partitioning, doms_cur[].
8572 * It destroys each deleted domain and builds each new domain.
8573 *
96f874e2 8574 * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
41a2d6cf
IM
8575 * The masks don't intersect (don't overlap.) We should setup one
8576 * sched domain for each mask. CPUs not in any of the cpumasks will
8577 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
8578 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8579 * it as it is.
8580 *
41a2d6cf
IM
8581 * The passed in 'doms_new' should be kmalloc'd. This routine takes
8582 * ownership of it and will kfree it when done with it. If the caller
700018e0
LZ
8583 * failed the kmalloc call, then it can pass in doms_new == NULL &&
8584 * ndoms_new == 1, and partition_sched_domains() will fallback to
8585 * the single partition 'fallback_doms', it also forces the domains
8586 * to be rebuilt.
029190c5 8587 *
96f874e2 8588 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
8589 * ndoms_new == 0 is a special case for destroying existing domains,
8590 * and it will not create the default domain.
dfb512ec 8591 *
029190c5
PJ
8592 * Call with hotplug lock held
8593 */
96f874e2
RR
8594/* FIXME: Change to struct cpumask *doms_new[] */
8595void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1d3504fc 8596 struct sched_domain_attr *dattr_new)
029190c5 8597{
dfb512ec 8598 int i, j, n;
d65bd5ec 8599 int new_topology;
029190c5 8600
712555ee 8601 mutex_lock(&sched_domains_mutex);
a1835615 8602
7378547f
MM
8603 /* always unregister in case we don't destroy any domains */
8604 unregister_sched_domain_sysctl();
8605
d65bd5ec
HC
8606 /* Let architecture update cpu core mappings. */
8607 new_topology = arch_update_cpu_topology();
8608
dfb512ec 8609 n = doms_new ? ndoms_new : 0;
029190c5
PJ
8610
8611 /* Destroy deleted domains */
8612 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 8613 for (j = 0; j < n && !new_topology; j++) {
96f874e2 8614 if (cpumask_equal(&doms_cur[i], &doms_new[j])
1d3504fc 8615 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
8616 goto match1;
8617 }
8618 /* no match - a current sched domain not in new doms_new[] */
8619 detach_destroy_domains(doms_cur + i);
8620match1:
8621 ;
8622 }
8623
e761b772
MK
8624 if (doms_new == NULL) {
8625 ndoms_cur = 0;
4212823f 8626 doms_new = fallback_doms;
dcc30a35 8627 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
faa2f98f 8628 WARN_ON_ONCE(dattr_new);
e761b772
MK
8629 }
8630
029190c5
PJ
8631 /* Build new domains */
8632 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 8633 for (j = 0; j < ndoms_cur && !new_topology; j++) {
96f874e2 8634 if (cpumask_equal(&doms_new[i], &doms_cur[j])
1d3504fc 8635 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
8636 goto match2;
8637 }
8638 /* no match - add a new doms_new */
1d3504fc
HS
8639 __build_sched_domains(doms_new + i,
8640 dattr_new ? dattr_new + i : NULL);
029190c5
PJ
8641match2:
8642 ;
8643 }
8644
8645 /* Remember the new sched domains */
4212823f 8646 if (doms_cur != fallback_doms)
029190c5 8647 kfree(doms_cur);
1d3504fc 8648 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 8649 doms_cur = doms_new;
1d3504fc 8650 dattr_cur = dattr_new;
029190c5 8651 ndoms_cur = ndoms_new;
7378547f
MM
8652
8653 register_sched_domain_sysctl();
a1835615 8654
712555ee 8655 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
8656}
8657
5c45bf27 8658#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
c70f22d2 8659static void arch_reinit_sched_domains(void)
5c45bf27 8660{
95402b38 8661 get_online_cpus();
dfb512ec
MK
8662
8663 /* Destroy domains first to force the rebuild */
8664 partition_sched_domains(0, NULL, NULL);
8665
e761b772 8666 rebuild_sched_domains();
95402b38 8667 put_online_cpus();
5c45bf27
SS
8668}
8669
8670static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
8671{
afb8a9b7 8672 unsigned int level = 0;
5c45bf27 8673
afb8a9b7
GS
8674 if (sscanf(buf, "%u", &level) != 1)
8675 return -EINVAL;
8676
8677 /*
8678 * level is always be positive so don't check for
8679 * level < POWERSAVINGS_BALANCE_NONE which is 0
8680 * What happens on 0 or 1 byte write,
8681 * need to check for count as well?
8682 */
8683
8684 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5c45bf27
SS
8685 return -EINVAL;
8686
8687 if (smt)
afb8a9b7 8688 sched_smt_power_savings = level;
5c45bf27 8689 else
afb8a9b7 8690 sched_mc_power_savings = level;
5c45bf27 8691
c70f22d2 8692 arch_reinit_sched_domains();
5c45bf27 8693
c70f22d2 8694 return count;
5c45bf27
SS
8695}
8696
5c45bf27 8697#ifdef CONFIG_SCHED_MC
f718cd4a
AK
8698static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
8699 char *page)
5c45bf27
SS
8700{
8701 return sprintf(page, "%u\n", sched_mc_power_savings);
8702}
f718cd4a 8703static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
48f24c4d 8704 const char *buf, size_t count)
5c45bf27
SS
8705{
8706 return sched_power_savings_store(buf, count, 0);
8707}
f718cd4a
AK
8708static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
8709 sched_mc_power_savings_show,
8710 sched_mc_power_savings_store);
5c45bf27
SS
8711#endif
8712
8713#ifdef CONFIG_SCHED_SMT
f718cd4a
AK
8714static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
8715 char *page)
5c45bf27
SS
8716{
8717 return sprintf(page, "%u\n", sched_smt_power_savings);
8718}
f718cd4a 8719static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
48f24c4d 8720 const char *buf, size_t count)
5c45bf27
SS
8721{
8722 return sched_power_savings_store(buf, count, 1);
8723}
f718cd4a
AK
8724static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
8725 sched_smt_power_savings_show,
6707de00
AB
8726 sched_smt_power_savings_store);
8727#endif
8728
39aac648 8729int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6707de00
AB
8730{
8731 int err = 0;
8732
8733#ifdef CONFIG_SCHED_SMT
8734 if (smt_capable())
8735 err = sysfs_create_file(&cls->kset.kobj,
8736 &attr_sched_smt_power_savings.attr);
8737#endif
8738#ifdef CONFIG_SCHED_MC
8739 if (!err && mc_capable())
8740 err = sysfs_create_file(&cls->kset.kobj,
8741 &attr_sched_mc_power_savings.attr);
8742#endif
8743 return err;
8744}
6d6bc0ad 8745#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5c45bf27 8746
e761b772 8747#ifndef CONFIG_CPUSETS
1da177e4 8748/*
e761b772
MK
8749 * Add online and remove offline CPUs from the scheduler domains.
8750 * When cpusets are enabled they take over this function.
1da177e4
LT
8751 */
8752static int update_sched_domains(struct notifier_block *nfb,
8753 unsigned long action, void *hcpu)
e761b772
MK
8754{
8755 switch (action) {
8756 case CPU_ONLINE:
8757 case CPU_ONLINE_FROZEN:
8758 case CPU_DEAD:
8759 case CPU_DEAD_FROZEN:
dfb512ec 8760 partition_sched_domains(1, NULL, NULL);
e761b772
MK
8761 return NOTIFY_OK;
8762
8763 default:
8764 return NOTIFY_DONE;
8765 }
8766}
8767#endif
8768
8769static int update_runtime(struct notifier_block *nfb,
8770 unsigned long action, void *hcpu)
1da177e4 8771{
7def2be1
PZ
8772 int cpu = (int)(long)hcpu;
8773
1da177e4 8774 switch (action) {
1da177e4 8775 case CPU_DOWN_PREPARE:
8bb78442 8776 case CPU_DOWN_PREPARE_FROZEN:
7def2be1 8777 disable_runtime(cpu_rq(cpu));
1da177e4
LT
8778 return NOTIFY_OK;
8779
1da177e4 8780 case CPU_DOWN_FAILED:
8bb78442 8781 case CPU_DOWN_FAILED_FROZEN:
1da177e4 8782 case CPU_ONLINE:
8bb78442 8783 case CPU_ONLINE_FROZEN:
7def2be1 8784 enable_runtime(cpu_rq(cpu));
e761b772
MK
8785 return NOTIFY_OK;
8786
1da177e4
LT
8787 default:
8788 return NOTIFY_DONE;
8789 }
1da177e4 8790}
1da177e4
LT
8791
8792void __init sched_init_smp(void)
8793{
dcc30a35
RR
8794 cpumask_var_t non_isolated_cpus;
8795
8796 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
5c1e1767 8797
434d53b0
MT
8798#if defined(CONFIG_NUMA)
8799 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
8800 GFP_KERNEL);
8801 BUG_ON(sched_group_nodes_bycpu == NULL);
8802#endif
95402b38 8803 get_online_cpus();
712555ee 8804 mutex_lock(&sched_domains_mutex);
dcc30a35
RR
8805 arch_init_sched_domains(cpu_online_mask);
8806 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
8807 if (cpumask_empty(non_isolated_cpus))
8808 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 8809 mutex_unlock(&sched_domains_mutex);
95402b38 8810 put_online_cpus();
e761b772
MK
8811
8812#ifndef CONFIG_CPUSETS
1da177e4
LT
8813 /* XXX: Theoretical race here - CPU may be hotplugged now */
8814 hotcpu_notifier(update_sched_domains, 0);
e761b772
MK
8815#endif
8816
8817 /* RT runtime code needs to handle some hotplug events */
8818 hotcpu_notifier(update_runtime, 0);
8819
b328ca18 8820 init_hrtick();
5c1e1767
NP
8821
8822 /* Move init over to a non-isolated CPU */
dcc30a35 8823 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 8824 BUG();
19978ca6 8825 sched_init_granularity();
dcc30a35 8826 free_cpumask_var(non_isolated_cpus);
4212823f
RR
8827
8828 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
0e3900e6 8829 init_sched_rt_class();
1da177e4
LT
8830}
8831#else
8832void __init sched_init_smp(void)
8833{
19978ca6 8834 sched_init_granularity();
1da177e4
LT
8835}
8836#endif /* CONFIG_SMP */
8837
8838int in_sched_functions(unsigned long addr)
8839{
1da177e4
LT
8840 return in_lock_functions(addr) ||
8841 (addr >= (unsigned long)__sched_text_start
8842 && addr < (unsigned long)__sched_text_end);
8843}
8844
a9957449 8845static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
dd41f596
IM
8846{
8847 cfs_rq->tasks_timeline = RB_ROOT;
4a55bd5e 8848 INIT_LIST_HEAD(&cfs_rq->tasks);
dd41f596
IM
8849#ifdef CONFIG_FAIR_GROUP_SCHED
8850 cfs_rq->rq = rq;
8851#endif
67e9fb2a 8852 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
dd41f596
IM
8853}
8854
fa85ae24
PZ
8855static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8856{
8857 struct rt_prio_array *array;
8858 int i;
8859
8860 array = &rt_rq->active;
8861 for (i = 0; i < MAX_RT_PRIO; i++) {
8862 INIT_LIST_HEAD(array->queue + i);
8863 __clear_bit(i, array->bitmap);
8864 }
8865 /* delimiter for bitsearch: */
8866 __set_bit(MAX_RT_PRIO, array->bitmap);
8867
052f1dc7 8868#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499 8869 rt_rq->highest_prio.curr = MAX_RT_PRIO;
398a153b 8870#ifdef CONFIG_SMP
e864c499 8871 rt_rq->highest_prio.next = MAX_RT_PRIO;
48d5e258 8872#endif
48d5e258 8873#endif
fa85ae24
PZ
8874#ifdef CONFIG_SMP
8875 rt_rq->rt_nr_migratory = 0;
fa85ae24 8876 rt_rq->overloaded = 0;
917b627d 8877 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
fa85ae24
PZ
8878#endif
8879
8880 rt_rq->rt_time = 0;
8881 rt_rq->rt_throttled = 0;
ac086bc2
PZ
8882 rt_rq->rt_runtime = 0;
8883 spin_lock_init(&rt_rq->rt_runtime_lock);
6f505b16 8884
052f1dc7 8885#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc 8886 rt_rq->rt_nr_boosted = 0;
6f505b16
PZ
8887 rt_rq->rq = rq;
8888#endif
fa85ae24
PZ
8889}
8890
6f505b16 8891#ifdef CONFIG_FAIR_GROUP_SCHED
ec7dc8ac
DG
8892static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8893 struct sched_entity *se, int cpu, int add,
8894 struct sched_entity *parent)
6f505b16 8895{
ec7dc8ac 8896 struct rq *rq = cpu_rq(cpu);
6f505b16
PZ
8897 tg->cfs_rq[cpu] = cfs_rq;
8898 init_cfs_rq(cfs_rq, rq);
8899 cfs_rq->tg = tg;
8900 if (add)
8901 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
8902
8903 tg->se[cpu] = se;
354d60c2
DG
8904 /* se could be NULL for init_task_group */
8905 if (!se)
8906 return;
8907
ec7dc8ac
DG
8908 if (!parent)
8909 se->cfs_rq = &rq->cfs;
8910 else
8911 se->cfs_rq = parent->my_q;
8912
6f505b16
PZ
8913 se->my_q = cfs_rq;
8914 se->load.weight = tg->shares;
e05510d0 8915 se->load.inv_weight = 0;
ec7dc8ac 8916 se->parent = parent;
6f505b16 8917}
052f1dc7 8918#endif
6f505b16 8919
052f1dc7 8920#ifdef CONFIG_RT_GROUP_SCHED
ec7dc8ac
DG
8921static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
8922 struct sched_rt_entity *rt_se, int cpu, int add,
8923 struct sched_rt_entity *parent)
6f505b16 8924{
ec7dc8ac
DG
8925 struct rq *rq = cpu_rq(cpu);
8926
6f505b16
PZ
8927 tg->rt_rq[cpu] = rt_rq;
8928 init_rt_rq(rt_rq, rq);
8929 rt_rq->tg = tg;
8930 rt_rq->rt_se = rt_se;
ac086bc2 8931 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
6f505b16
PZ
8932 if (add)
8933 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
8934
8935 tg->rt_se[cpu] = rt_se;
354d60c2
DG
8936 if (!rt_se)
8937 return;
8938
ec7dc8ac
DG
8939 if (!parent)
8940 rt_se->rt_rq = &rq->rt;
8941 else
8942 rt_se->rt_rq = parent->my_q;
8943
6f505b16 8944 rt_se->my_q = rt_rq;
ec7dc8ac 8945 rt_se->parent = parent;
6f505b16
PZ
8946 INIT_LIST_HEAD(&rt_se->run_list);
8947}
8948#endif
8949
1da177e4
LT
8950void __init sched_init(void)
8951{
dd41f596 8952 int i, j;
434d53b0
MT
8953 unsigned long alloc_size = 0, ptr;
8954
8955#ifdef CONFIG_FAIR_GROUP_SCHED
8956 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8957#endif
8958#ifdef CONFIG_RT_GROUP_SCHED
8959 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6
PZ
8960#endif
8961#ifdef CONFIG_USER_SCHED
8962 alloc_size *= 2;
df7c8e84
RR
8963#endif
8964#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 8965 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0
MT
8966#endif
8967 /*
8968 * As sched_init() is called before page_alloc is setup,
8969 * we use alloc_bootmem().
8970 */
8971 if (alloc_size) {
5a9d3225 8972 ptr = (unsigned long)alloc_bootmem(alloc_size);
434d53b0
MT
8973
8974#ifdef CONFIG_FAIR_GROUP_SCHED
8975 init_task_group.se = (struct sched_entity **)ptr;
8976 ptr += nr_cpu_ids * sizeof(void **);
8977
8978 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
8979 ptr += nr_cpu_ids * sizeof(void **);
eff766a6
PZ
8980
8981#ifdef CONFIG_USER_SCHED
8982 root_task_group.se = (struct sched_entity **)ptr;
8983 ptr += nr_cpu_ids * sizeof(void **);
8984
8985 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8986 ptr += nr_cpu_ids * sizeof(void **);
6d6bc0ad
DG
8987#endif /* CONFIG_USER_SCHED */
8988#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0
MT
8989#ifdef CONFIG_RT_GROUP_SCHED
8990 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
8991 ptr += nr_cpu_ids * sizeof(void **);
8992
8993 init_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
8994 ptr += nr_cpu_ids * sizeof(void **);
8995
8996#ifdef CONFIG_USER_SCHED
8997 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8998 ptr += nr_cpu_ids * sizeof(void **);
8999
9000 root_task_group.rt_rq = (struct rt_rq **)ptr;
9001 ptr += nr_cpu_ids * sizeof(void **);
6d6bc0ad
DG
9002#endif /* CONFIG_USER_SCHED */
9003#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
9004#ifdef CONFIG_CPUMASK_OFFSTACK
9005 for_each_possible_cpu(i) {
9006 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
9007 ptr += cpumask_size();
9008 }
9009#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 9010 }
dd41f596 9011
57d885fe
GH
9012#ifdef CONFIG_SMP
9013 init_defrootdomain();
9014#endif
9015
d0b27fa7
PZ
9016 init_rt_bandwidth(&def_rt_bandwidth,
9017 global_rt_period(), global_rt_runtime());
9018
9019#ifdef CONFIG_RT_GROUP_SCHED
9020 init_rt_bandwidth(&init_task_group.rt_bandwidth,
9021 global_rt_period(), global_rt_runtime());
eff766a6
PZ
9022#ifdef CONFIG_USER_SCHED
9023 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9024 global_rt_period(), RUNTIME_INF);
6d6bc0ad
DG
9025#endif /* CONFIG_USER_SCHED */
9026#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 9027
052f1dc7 9028#ifdef CONFIG_GROUP_SCHED
6f505b16 9029 list_add(&init_task_group.list, &task_groups);
f473aa5e
PZ
9030 INIT_LIST_HEAD(&init_task_group.children);
9031
9032#ifdef CONFIG_USER_SCHED
9033 INIT_LIST_HEAD(&root_task_group.children);
9034 init_task_group.parent = &root_task_group;
9035 list_add(&init_task_group.siblings, &root_task_group.children);
6d6bc0ad
DG
9036#endif /* CONFIG_USER_SCHED */
9037#endif /* CONFIG_GROUP_SCHED */
6f505b16 9038
0a945022 9039 for_each_possible_cpu(i) {
70b97a7f 9040 struct rq *rq;
1da177e4
LT
9041
9042 rq = cpu_rq(i);
9043 spin_lock_init(&rq->lock);
7897986b 9044 rq->nr_running = 0;
dd41f596 9045 init_cfs_rq(&rq->cfs, rq);
6f505b16 9046 init_rt_rq(&rq->rt, rq);
dd41f596 9047#ifdef CONFIG_FAIR_GROUP_SCHED
4cf86d77 9048 init_task_group.shares = init_task_group_load;
6f505b16 9049 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2
DG
9050#ifdef CONFIG_CGROUP_SCHED
9051 /*
9052 * How much cpu bandwidth does init_task_group get?
9053 *
9054 * In case of task-groups formed thr' the cgroup filesystem, it
9055 * gets 100% of the cpu resources in the system. This overall
9056 * system cpu resource is divided among the tasks of
9057 * init_task_group and its child task-groups in a fair manner,
9058 * based on each entity's (task or task-group's) weight
9059 * (se->load.weight).
9060 *
9061 * In other words, if init_task_group has 10 tasks of weight
9062 * 1024) and two child groups A0 and A1 (of weight 1024 each),
9063 * then A0's share of the cpu resource is:
9064 *
9065 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
9066 *
9067 * We achieve this by letting init_task_group's tasks sit
9068 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
9069 */
ec7dc8ac 9070 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
354d60c2 9071#elif defined CONFIG_USER_SCHED
eff766a6
PZ
9072 root_task_group.shares = NICE_0_LOAD;
9073 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
354d60c2
DG
9074 /*
9075 * In case of task-groups formed thr' the user id of tasks,
9076 * init_task_group represents tasks belonging to root user.
9077 * Hence it forms a sibling of all subsequent groups formed.
9078 * In this case, init_task_group gets only a fraction of overall
9079 * system cpu resource, based on the weight assigned to root
9080 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
9081 * by letting tasks of init_task_group sit in a separate cfs_rq
9082 * (init_cfs_rq) and having one entity represent this group of
9083 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
9084 */
ec7dc8ac 9085 init_tg_cfs_entry(&init_task_group,
6f505b16 9086 &per_cpu(init_cfs_rq, i),
eff766a6
PZ
9087 &per_cpu(init_sched_entity, i), i, 1,
9088 root_task_group.se[i]);
6f505b16 9089
052f1dc7 9090#endif
354d60c2
DG
9091#endif /* CONFIG_FAIR_GROUP_SCHED */
9092
9093 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 9094#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 9095 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
354d60c2 9096#ifdef CONFIG_CGROUP_SCHED
ec7dc8ac 9097 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
354d60c2 9098#elif defined CONFIG_USER_SCHED
eff766a6 9099 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
ec7dc8ac 9100 init_tg_rt_entry(&init_task_group,
6f505b16 9101 &per_cpu(init_rt_rq, i),
eff766a6
PZ
9102 &per_cpu(init_sched_rt_entity, i), i, 1,
9103 root_task_group.rt_se[i]);
354d60c2 9104#endif
dd41f596 9105#endif
1da177e4 9106
dd41f596
IM
9107 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
9108 rq->cpu_load[j] = 0;
1da177e4 9109#ifdef CONFIG_SMP
41c7ce9a 9110 rq->sd = NULL;
57d885fe 9111 rq->rd = NULL;
1da177e4 9112 rq->active_balance = 0;
dd41f596 9113 rq->next_balance = jiffies;
1da177e4 9114 rq->push_cpu = 0;
0a2966b4 9115 rq->cpu = i;
1f11eb6a 9116 rq->online = 0;
1da177e4
LT
9117 rq->migration_thread = NULL;
9118 INIT_LIST_HEAD(&rq->migration_queue);
dc938520 9119 rq_attach_root(rq, &def_root_domain);
1da177e4 9120#endif
8f4d37ec 9121 init_rq_hrtick(rq);
1da177e4 9122 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
9123 }
9124
2dd73a4f 9125 set_load_weight(&init_task);
b50f60ce 9126
e107be36
AK
9127#ifdef CONFIG_PREEMPT_NOTIFIERS
9128 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
9129#endif
9130
c9819f45 9131#ifdef CONFIG_SMP
962cf36c 9132 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
c9819f45
CL
9133#endif
9134
b50f60ce
HC
9135#ifdef CONFIG_RT_MUTEXES
9136 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
9137#endif
9138
1da177e4
LT
9139 /*
9140 * The boot idle thread does lazy MMU switching as well:
9141 */
9142 atomic_inc(&init_mm.mm_count);
9143 enter_lazy_tlb(&init_mm, current);
9144
9145 /*
9146 * Make us the idle thread. Technically, schedule() should not be
9147 * called from this thread, however somewhere below it might be,
9148 * but because we are the idle thread, we just pick up running again
9149 * when this runqueue becomes "idle".
9150 */
9151 init_idle(current, smp_processor_id());
dd41f596
IM
9152 /*
9153 * During early bootup we pretend to be a normal task:
9154 */
9155 current->sched_class = &fair_sched_class;
6892b75e 9156
6a7b3dc3
RR
9157 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9158 alloc_bootmem_cpumask_var(&nohz_cpu_mask);
bf4d83f6 9159#ifdef CONFIG_SMP
7d1e6a9b
RR
9160#ifdef CONFIG_NO_HZ
9161 alloc_bootmem_cpumask_var(&nohz.cpu_mask);
f711f609 9162 alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask);
7d1e6a9b 9163#endif
dcc30a35 9164 alloc_bootmem_cpumask_var(&cpu_isolated_map);
bf4d83f6 9165#endif /* SMP */
6a7b3dc3 9166
6892b75e 9167 scheduler_running = 1;
1da177e4
LT
9168}
9169
9170#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9171void __might_sleep(char *file, int line)
9172{
48f24c4d 9173#ifdef in_atomic
1da177e4
LT
9174 static unsigned long prev_jiffy; /* ratelimiting */
9175
aef745fc
IM
9176 if ((!in_atomic() && !irqs_disabled()) ||
9177 system_state != SYSTEM_RUNNING || oops_in_progress)
9178 return;
9179 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9180 return;
9181 prev_jiffy = jiffies;
9182
9183 printk(KERN_ERR
9184 "BUG: sleeping function called from invalid context at %s:%d\n",
9185 file, line);
9186 printk(KERN_ERR
9187 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9188 in_atomic(), irqs_disabled(),
9189 current->pid, current->comm);
9190
9191 debug_show_held_locks(current);
9192 if (irqs_disabled())
9193 print_irqtrace_events(current);
9194 dump_stack();
1da177e4
LT
9195#endif
9196}
9197EXPORT_SYMBOL(__might_sleep);
9198#endif
9199
9200#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
9201static void normalize_task(struct rq *rq, struct task_struct *p)
9202{
9203 int on_rq;
3e51f33f 9204
3a5e4dc1
AK
9205 update_rq_clock(rq);
9206 on_rq = p->se.on_rq;
9207 if (on_rq)
9208 deactivate_task(rq, p, 0);
9209 __setscheduler(rq, p, SCHED_NORMAL, 0);
9210 if (on_rq) {
9211 activate_task(rq, p, 0);
9212 resched_task(rq->curr);
9213 }
9214}
9215
1da177e4
LT
9216void normalize_rt_tasks(void)
9217{
a0f98a1c 9218 struct task_struct *g, *p;
1da177e4 9219 unsigned long flags;
70b97a7f 9220 struct rq *rq;
1da177e4 9221
4cf5d77a 9222 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 9223 do_each_thread(g, p) {
178be793
IM
9224 /*
9225 * Only normalize user tasks:
9226 */
9227 if (!p->mm)
9228 continue;
9229
6cfb0d5d 9230 p->se.exec_start = 0;
6cfb0d5d 9231#ifdef CONFIG_SCHEDSTATS
dd41f596 9232 p->se.wait_start = 0;
dd41f596 9233 p->se.sleep_start = 0;
dd41f596 9234 p->se.block_start = 0;
6cfb0d5d 9235#endif
dd41f596
IM
9236
9237 if (!rt_task(p)) {
9238 /*
9239 * Renice negative nice level userspace
9240 * tasks back to 0:
9241 */
9242 if (TASK_NICE(p) < 0 && p->mm)
9243 set_user_nice(p, 0);
1da177e4 9244 continue;
dd41f596 9245 }
1da177e4 9246
4cf5d77a 9247 spin_lock(&p->pi_lock);
b29739f9 9248 rq = __task_rq_lock(p);
1da177e4 9249
178be793 9250 normalize_task(rq, p);
3a5e4dc1 9251
b29739f9 9252 __task_rq_unlock(rq);
4cf5d77a 9253 spin_unlock(&p->pi_lock);
a0f98a1c
IM
9254 } while_each_thread(g, p);
9255
4cf5d77a 9256 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
9257}
9258
9259#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a
LT
9260
9261#ifdef CONFIG_IA64
9262/*
9263 * These functions are only useful for the IA64 MCA handling.
9264 *
9265 * They can only be called when the whole system has been
9266 * stopped - every CPU needs to be quiescent, and no scheduling
9267 * activity can take place. Using them for anything else would
9268 * be a serious bug, and as a result, they aren't even visible
9269 * under any other configuration.
9270 */
9271
9272/**
9273 * curr_task - return the current task for a given cpu.
9274 * @cpu: the processor in question.
9275 *
9276 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9277 */
36c8b586 9278struct task_struct *curr_task(int cpu)
1df5c10a
LT
9279{
9280 return cpu_curr(cpu);
9281}
9282
9283/**
9284 * set_curr_task - set the current task for a given cpu.
9285 * @cpu: the processor in question.
9286 * @p: the task pointer to set.
9287 *
9288 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
9289 * are serviced on a separate stack. It allows the architecture to switch the
9290 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
9291 * must be called with all CPU's synchronized, and interrupts disabled, the
9292 * and caller must save the original value of the current task (see
9293 * curr_task() above) and restore that value before reenabling interrupts and
9294 * re-starting the system.
9295 *
9296 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9297 */
36c8b586 9298void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
9299{
9300 cpu_curr(cpu) = p;
9301}
9302
9303#endif
29f59db3 9304
bccbe08a
PZ
9305#ifdef CONFIG_FAIR_GROUP_SCHED
9306static void free_fair_sched_group(struct task_group *tg)
6f505b16
PZ
9307{
9308 int i;
9309
9310 for_each_possible_cpu(i) {
9311 if (tg->cfs_rq)
9312 kfree(tg->cfs_rq[i]);
9313 if (tg->se)
9314 kfree(tg->se[i]);
6f505b16
PZ
9315 }
9316
9317 kfree(tg->cfs_rq);
9318 kfree(tg->se);
6f505b16
PZ
9319}
9320
ec7dc8ac
DG
9321static
9322int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
29f59db3 9323{
29f59db3 9324 struct cfs_rq *cfs_rq;
eab17229 9325 struct sched_entity *se;
9b5b7751 9326 struct rq *rq;
29f59db3
SV
9327 int i;
9328
434d53b0 9329 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
9330 if (!tg->cfs_rq)
9331 goto err;
434d53b0 9332 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
9333 if (!tg->se)
9334 goto err;
052f1dc7
PZ
9335
9336 tg->shares = NICE_0_LOAD;
29f59db3
SV
9337
9338 for_each_possible_cpu(i) {
9b5b7751 9339 rq = cpu_rq(i);
29f59db3 9340
eab17229
LZ
9341 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9342 GFP_KERNEL, cpu_to_node(i));
29f59db3
SV
9343 if (!cfs_rq)
9344 goto err;
9345
eab17229
LZ
9346 se = kzalloc_node(sizeof(struct sched_entity),
9347 GFP_KERNEL, cpu_to_node(i));
29f59db3
SV
9348 if (!se)
9349 goto err;
9350
eab17229 9351 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
bccbe08a
PZ
9352 }
9353
9354 return 1;
9355
9356 err:
9357 return 0;
9358}
9359
9360static inline void register_fair_sched_group(struct task_group *tg, int cpu)
9361{
9362 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
9363 &cpu_rq(cpu)->leaf_cfs_rq_list);
9364}
9365
9366static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
9367{
9368 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
9369}
6d6bc0ad 9370#else /* !CONFG_FAIR_GROUP_SCHED */
bccbe08a
PZ
9371static inline void free_fair_sched_group(struct task_group *tg)
9372{
9373}
9374
ec7dc8ac
DG
9375static inline
9376int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
9377{
9378 return 1;
9379}
9380
9381static inline void register_fair_sched_group(struct task_group *tg, int cpu)
9382{
9383}
9384
9385static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
9386{
9387}
6d6bc0ad 9388#endif /* CONFIG_FAIR_GROUP_SCHED */
052f1dc7
PZ
9389
9390#ifdef CONFIG_RT_GROUP_SCHED
bccbe08a
PZ
9391static void free_rt_sched_group(struct task_group *tg)
9392{
9393 int i;
9394
d0b27fa7
PZ
9395 destroy_rt_bandwidth(&tg->rt_bandwidth);
9396
bccbe08a
PZ
9397 for_each_possible_cpu(i) {
9398 if (tg->rt_rq)
9399 kfree(tg->rt_rq[i]);
9400 if (tg->rt_se)
9401 kfree(tg->rt_se[i]);
9402 }
9403
9404 kfree(tg->rt_rq);
9405 kfree(tg->rt_se);
9406}
9407
ec7dc8ac
DG
9408static
9409int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
9410{
9411 struct rt_rq *rt_rq;
eab17229 9412 struct sched_rt_entity *rt_se;
bccbe08a
PZ
9413 struct rq *rq;
9414 int i;
9415
434d53b0 9416 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
9417 if (!tg->rt_rq)
9418 goto err;
434d53b0 9419 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
9420 if (!tg->rt_se)
9421 goto err;
9422
d0b27fa7
PZ
9423 init_rt_bandwidth(&tg->rt_bandwidth,
9424 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
bccbe08a
PZ
9425
9426 for_each_possible_cpu(i) {
9427 rq = cpu_rq(i);
9428
eab17229
LZ
9429 rt_rq = kzalloc_node(sizeof(struct rt_rq),
9430 GFP_KERNEL, cpu_to_node(i));
6f505b16
PZ
9431 if (!rt_rq)
9432 goto err;
29f59db3 9433
eab17229
LZ
9434 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
9435 GFP_KERNEL, cpu_to_node(i));
6f505b16
PZ
9436 if (!rt_se)
9437 goto err;
29f59db3 9438
eab17229 9439 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
29f59db3
SV
9440 }
9441
bccbe08a
PZ
9442 return 1;
9443
9444 err:
9445 return 0;
9446}
9447
9448static inline void register_rt_sched_group(struct task_group *tg, int cpu)
9449{
9450 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
9451 &cpu_rq(cpu)->leaf_rt_rq_list);
9452}
9453
9454static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
9455{
9456 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
9457}
6d6bc0ad 9458#else /* !CONFIG_RT_GROUP_SCHED */
bccbe08a
PZ
9459static inline void free_rt_sched_group(struct task_group *tg)
9460{
9461}
9462
ec7dc8ac
DG
9463static inline
9464int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
9465{
9466 return 1;
9467}
9468
9469static inline void register_rt_sched_group(struct task_group *tg, int cpu)
9470{
9471}
9472
9473static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
9474{
9475}
6d6bc0ad 9476#endif /* CONFIG_RT_GROUP_SCHED */
bccbe08a 9477
d0b27fa7 9478#ifdef CONFIG_GROUP_SCHED
bccbe08a
PZ
9479static void free_sched_group(struct task_group *tg)
9480{
9481 free_fair_sched_group(tg);
9482 free_rt_sched_group(tg);
9483 kfree(tg);
9484}
9485
9486/* allocate runqueue etc for a new task group */
ec7dc8ac 9487struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
9488{
9489 struct task_group *tg;
9490 unsigned long flags;
9491 int i;
9492
9493 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
9494 if (!tg)
9495 return ERR_PTR(-ENOMEM);
9496
ec7dc8ac 9497 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
9498 goto err;
9499
ec7dc8ac 9500 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
9501 goto err;
9502
8ed36996 9503 spin_lock_irqsave(&task_group_lock, flags);
9b5b7751 9504 for_each_possible_cpu(i) {
bccbe08a
PZ
9505 register_fair_sched_group(tg, i);
9506 register_rt_sched_group(tg, i);
9b5b7751 9507 }
6f505b16 9508 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
9509
9510 WARN_ON(!parent); /* root should already exist */
9511
9512 tg->parent = parent;
f473aa5e 9513 INIT_LIST_HEAD(&tg->children);
09f2724a 9514 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 9515 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 9516
9b5b7751 9517 return tg;
29f59db3
SV
9518
9519err:
6f505b16 9520 free_sched_group(tg);
29f59db3
SV
9521 return ERR_PTR(-ENOMEM);
9522}
9523
9b5b7751 9524/* rcu callback to free various structures associated with a task group */
6f505b16 9525static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 9526{
29f59db3 9527 /* now it should be safe to free those cfs_rqs */
6f505b16 9528 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
9529}
9530
9b5b7751 9531/* Destroy runqueue etc associated with a task group */
4cf86d77 9532void sched_destroy_group(struct task_group *tg)
29f59db3 9533{
8ed36996 9534 unsigned long flags;
9b5b7751 9535 int i;
29f59db3 9536
8ed36996 9537 spin_lock_irqsave(&task_group_lock, flags);
9b5b7751 9538 for_each_possible_cpu(i) {
bccbe08a
PZ
9539 unregister_fair_sched_group(tg, i);
9540 unregister_rt_sched_group(tg, i);
9b5b7751 9541 }
6f505b16 9542 list_del_rcu(&tg->list);
f473aa5e 9543 list_del_rcu(&tg->siblings);
8ed36996 9544 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 9545
9b5b7751 9546 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 9547 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
9548}
9549
9b5b7751 9550/* change task's runqueue when it moves between groups.
3a252015
IM
9551 * The caller of this function should have put the task in its new group
9552 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
9553 * reflect its new group.
9b5b7751
SV
9554 */
9555void sched_move_task(struct task_struct *tsk)
29f59db3
SV
9556{
9557 int on_rq, running;
9558 unsigned long flags;
9559 struct rq *rq;
9560
9561 rq = task_rq_lock(tsk, &flags);
9562
29f59db3
SV
9563 update_rq_clock(rq);
9564
051a1d1a 9565 running = task_current(rq, tsk);
29f59db3
SV
9566 on_rq = tsk->se.on_rq;
9567
0e1f3483 9568 if (on_rq)
29f59db3 9569 dequeue_task(rq, tsk, 0);
0e1f3483
HS
9570 if (unlikely(running))
9571 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 9572
6f505b16 9573 set_task_rq(tsk, task_cpu(tsk));
29f59db3 9574
810b3817
PZ
9575#ifdef CONFIG_FAIR_GROUP_SCHED
9576 if (tsk->sched_class->moved_group)
9577 tsk->sched_class->moved_group(tsk);
9578#endif
9579
0e1f3483
HS
9580 if (unlikely(running))
9581 tsk->sched_class->set_curr_task(rq);
9582 if (on_rq)
7074badb 9583 enqueue_task(rq, tsk, 0);
29f59db3 9584
29f59db3
SV
9585 task_rq_unlock(rq, &flags);
9586}
6d6bc0ad 9587#endif /* CONFIG_GROUP_SCHED */
29f59db3 9588
052f1dc7 9589#ifdef CONFIG_FAIR_GROUP_SCHED
c09595f6 9590static void __set_se_shares(struct sched_entity *se, unsigned long shares)
29f59db3
SV
9591{
9592 struct cfs_rq *cfs_rq = se->cfs_rq;
29f59db3
SV
9593 int on_rq;
9594
29f59db3 9595 on_rq = se->on_rq;
62fb1851 9596 if (on_rq)
29f59db3
SV
9597 dequeue_entity(cfs_rq, se, 0);
9598
9599 se->load.weight = shares;
e05510d0 9600 se->load.inv_weight = 0;
29f59db3 9601
62fb1851 9602 if (on_rq)
29f59db3 9603 enqueue_entity(cfs_rq, se, 0);
c09595f6 9604}
62fb1851 9605
c09595f6
PZ
9606static void set_se_shares(struct sched_entity *se, unsigned long shares)
9607{
9608 struct cfs_rq *cfs_rq = se->cfs_rq;
9609 struct rq *rq = cfs_rq->rq;
9610 unsigned long flags;
9611
9612 spin_lock_irqsave(&rq->lock, flags);
9613 __set_se_shares(se, shares);
9614 spin_unlock_irqrestore(&rq->lock, flags);
29f59db3
SV
9615}
9616
8ed36996
PZ
9617static DEFINE_MUTEX(shares_mutex);
9618
4cf86d77 9619int sched_group_set_shares(struct task_group *tg, unsigned long shares)
29f59db3
SV
9620{
9621 int i;
8ed36996 9622 unsigned long flags;
c61935fd 9623
ec7dc8ac
DG
9624 /*
9625 * We can't change the weight of the root cgroup.
9626 */
9627 if (!tg->se[0])
9628 return -EINVAL;
9629
18d95a28
PZ
9630 if (shares < MIN_SHARES)
9631 shares = MIN_SHARES;
cb4ad1ff
MX
9632 else if (shares > MAX_SHARES)
9633 shares = MAX_SHARES;
62fb1851 9634
8ed36996 9635 mutex_lock(&shares_mutex);
9b5b7751 9636 if (tg->shares == shares)
5cb350ba 9637 goto done;
29f59db3 9638
8ed36996 9639 spin_lock_irqsave(&task_group_lock, flags);
bccbe08a
PZ
9640 for_each_possible_cpu(i)
9641 unregister_fair_sched_group(tg, i);
f473aa5e 9642 list_del_rcu(&tg->siblings);
8ed36996 9643 spin_unlock_irqrestore(&task_group_lock, flags);
6b2d7700
SV
9644
9645 /* wait for any ongoing reference to this group to finish */
9646 synchronize_sched();
9647
9648 /*
9649 * Now we are free to modify the group's share on each cpu
9650 * w/o tripping rebalance_share or load_balance_fair.
9651 */
9b5b7751 9652 tg->shares = shares;
c09595f6
PZ
9653 for_each_possible_cpu(i) {
9654 /*
9655 * force a rebalance
9656 */
9657 cfs_rq_set_shares(tg->cfs_rq[i], 0);
cb4ad1ff 9658 set_se_shares(tg->se[i], shares);
c09595f6 9659 }
29f59db3 9660
6b2d7700
SV
9661 /*
9662 * Enable load balance activity on this group, by inserting it back on
9663 * each cpu's rq->leaf_cfs_rq_list.
9664 */
8ed36996 9665 spin_lock_irqsave(&task_group_lock, flags);
bccbe08a
PZ
9666 for_each_possible_cpu(i)
9667 register_fair_sched_group(tg, i);
f473aa5e 9668 list_add_rcu(&tg->siblings, &tg->parent->children);
8ed36996 9669 spin_unlock_irqrestore(&task_group_lock, flags);
5cb350ba 9670done:
8ed36996 9671 mutex_unlock(&shares_mutex);
9b5b7751 9672 return 0;
29f59db3
SV
9673}
9674
5cb350ba
DG
9675unsigned long sched_group_shares(struct task_group *tg)
9676{
9677 return tg->shares;
9678}
052f1dc7 9679#endif
5cb350ba 9680
052f1dc7 9681#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 9682/*
9f0c1e56 9683 * Ensure that the real time constraints are schedulable.
6f505b16 9684 */
9f0c1e56
PZ
9685static DEFINE_MUTEX(rt_constraints_mutex);
9686
9687static unsigned long to_ratio(u64 period, u64 runtime)
9688{
9689 if (runtime == RUNTIME_INF)
9a7e0b18 9690 return 1ULL << 20;
9f0c1e56 9691
9a7e0b18 9692 return div64_u64(runtime << 20, period);
9f0c1e56
PZ
9693}
9694
9a7e0b18
PZ
9695/* Must be called with tasklist_lock held */
9696static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 9697{
9a7e0b18 9698 struct task_struct *g, *p;
b40b2e8e 9699
9a7e0b18
PZ
9700 do_each_thread(g, p) {
9701 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
9702 return 1;
9703 } while_each_thread(g, p);
b40b2e8e 9704
9a7e0b18
PZ
9705 return 0;
9706}
b40b2e8e 9707
9a7e0b18
PZ
9708struct rt_schedulable_data {
9709 struct task_group *tg;
9710 u64 rt_period;
9711 u64 rt_runtime;
9712};
b40b2e8e 9713
9a7e0b18
PZ
9714static int tg_schedulable(struct task_group *tg, void *data)
9715{
9716 struct rt_schedulable_data *d = data;
9717 struct task_group *child;
9718 unsigned long total, sum = 0;
9719 u64 period, runtime;
b40b2e8e 9720
9a7e0b18
PZ
9721 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
9722 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 9723
9a7e0b18
PZ
9724 if (tg == d->tg) {
9725 period = d->rt_period;
9726 runtime = d->rt_runtime;
b40b2e8e 9727 }
b40b2e8e 9728
98a4826b
PZ
9729#ifdef CONFIG_USER_SCHED
9730 if (tg == &root_task_group) {
9731 period = global_rt_period();
9732 runtime = global_rt_runtime();
9733 }
9734#endif
9735
4653f803
PZ
9736 /*
9737 * Cannot have more runtime than the period.
9738 */
9739 if (runtime > period && runtime != RUNTIME_INF)
9740 return -EINVAL;
6f505b16 9741
4653f803
PZ
9742 /*
9743 * Ensure we don't starve existing RT tasks.
9744 */
9a7e0b18
PZ
9745 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
9746 return -EBUSY;
6f505b16 9747
9a7e0b18 9748 total = to_ratio(period, runtime);
6f505b16 9749
4653f803
PZ
9750 /*
9751 * Nobody can have more than the global setting allows.
9752 */
9753 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
9754 return -EINVAL;
6f505b16 9755
4653f803
PZ
9756 /*
9757 * The sum of our children's runtime should not exceed our own.
9758 */
9a7e0b18
PZ
9759 list_for_each_entry_rcu(child, &tg->children, siblings) {
9760 period = ktime_to_ns(child->rt_bandwidth.rt_period);
9761 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 9762
9a7e0b18
PZ
9763 if (child == d->tg) {
9764 period = d->rt_period;
9765 runtime = d->rt_runtime;
9766 }
6f505b16 9767
9a7e0b18 9768 sum += to_ratio(period, runtime);
9f0c1e56 9769 }
6f505b16 9770
9a7e0b18
PZ
9771 if (sum > total)
9772 return -EINVAL;
9773
9774 return 0;
6f505b16
PZ
9775}
9776
9a7e0b18 9777static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 9778{
9a7e0b18
PZ
9779 struct rt_schedulable_data data = {
9780 .tg = tg,
9781 .rt_period = period,
9782 .rt_runtime = runtime,
9783 };
9784
9785 return walk_tg_tree(tg_schedulable, tg_nop, &data);
521f1a24
DG
9786}
9787
d0b27fa7
PZ
9788static int tg_set_bandwidth(struct task_group *tg,
9789 u64 rt_period, u64 rt_runtime)
6f505b16 9790{
ac086bc2 9791 int i, err = 0;
9f0c1e56 9792
9f0c1e56 9793 mutex_lock(&rt_constraints_mutex);
521f1a24 9794 read_lock(&tasklist_lock);
9a7e0b18
PZ
9795 err = __rt_schedulable(tg, rt_period, rt_runtime);
9796 if (err)
9f0c1e56 9797 goto unlock;
ac086bc2
PZ
9798
9799 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
9800 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
9801 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
9802
9803 for_each_possible_cpu(i) {
9804 struct rt_rq *rt_rq = tg->rt_rq[i];
9805
9806 spin_lock(&rt_rq->rt_runtime_lock);
9807 rt_rq->rt_runtime = rt_runtime;
9808 spin_unlock(&rt_rq->rt_runtime_lock);
9809 }
9810 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
9f0c1e56 9811 unlock:
521f1a24 9812 read_unlock(&tasklist_lock);
9f0c1e56
PZ
9813 mutex_unlock(&rt_constraints_mutex);
9814
9815 return err;
6f505b16
PZ
9816}
9817
d0b27fa7
PZ
9818int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
9819{
9820 u64 rt_runtime, rt_period;
9821
9822 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
9823 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
9824 if (rt_runtime_us < 0)
9825 rt_runtime = RUNTIME_INF;
9826
9827 return tg_set_bandwidth(tg, rt_period, rt_runtime);
9828}
9829
9f0c1e56
PZ
9830long sched_group_rt_runtime(struct task_group *tg)
9831{
9832 u64 rt_runtime_us;
9833
d0b27fa7 9834 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
9835 return -1;
9836
d0b27fa7 9837 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
9838 do_div(rt_runtime_us, NSEC_PER_USEC);
9839 return rt_runtime_us;
9840}
d0b27fa7
PZ
9841
9842int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
9843{
9844 u64 rt_runtime, rt_period;
9845
9846 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
9847 rt_runtime = tg->rt_bandwidth.rt_runtime;
9848
619b0488
R
9849 if (rt_period == 0)
9850 return -EINVAL;
9851
d0b27fa7
PZ
9852 return tg_set_bandwidth(tg, rt_period, rt_runtime);
9853}
9854
9855long sched_group_rt_period(struct task_group *tg)
9856{
9857 u64 rt_period_us;
9858
9859 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
9860 do_div(rt_period_us, NSEC_PER_USEC);
9861 return rt_period_us;
9862}
9863
9864static int sched_rt_global_constraints(void)
9865{
4653f803 9866 u64 runtime, period;
d0b27fa7
PZ
9867 int ret = 0;
9868
ec5d4989
HS
9869 if (sysctl_sched_rt_period <= 0)
9870 return -EINVAL;
9871
4653f803
PZ
9872 runtime = global_rt_runtime();
9873 period = global_rt_period();
9874
9875 /*
9876 * Sanity check on the sysctl variables.
9877 */
9878 if (runtime > period && runtime != RUNTIME_INF)
9879 return -EINVAL;
10b612f4 9880
d0b27fa7 9881 mutex_lock(&rt_constraints_mutex);
9a7e0b18 9882 read_lock(&tasklist_lock);
4653f803 9883 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 9884 read_unlock(&tasklist_lock);
d0b27fa7
PZ
9885 mutex_unlock(&rt_constraints_mutex);
9886
9887 return ret;
9888}
54e99124
DG
9889
9890int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9891{
9892 /* Don't accept realtime tasks when there is no way for them to run */
9893 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9894 return 0;
9895
9896 return 1;
9897}
9898
6d6bc0ad 9899#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
9900static int sched_rt_global_constraints(void)
9901{
ac086bc2
PZ
9902 unsigned long flags;
9903 int i;
9904
ec5d4989
HS
9905 if (sysctl_sched_rt_period <= 0)
9906 return -EINVAL;
9907
ac086bc2
PZ
9908 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
9909 for_each_possible_cpu(i) {
9910 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
9911
9912 spin_lock(&rt_rq->rt_runtime_lock);
9913 rt_rq->rt_runtime = global_rt_runtime();
9914 spin_unlock(&rt_rq->rt_runtime_lock);
9915 }
9916 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
9917
d0b27fa7
PZ
9918 return 0;
9919}
6d6bc0ad 9920#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
9921
9922int sched_rt_handler(struct ctl_table *table, int write,
9923 struct file *filp, void __user *buffer, size_t *lenp,
9924 loff_t *ppos)
9925{
9926 int ret;
9927 int old_period, old_runtime;
9928 static DEFINE_MUTEX(mutex);
9929
9930 mutex_lock(&mutex);
9931 old_period = sysctl_sched_rt_period;
9932 old_runtime = sysctl_sched_rt_runtime;
9933
9934 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
9935
9936 if (!ret && write) {
9937 ret = sched_rt_global_constraints();
9938 if (ret) {
9939 sysctl_sched_rt_period = old_period;
9940 sysctl_sched_rt_runtime = old_runtime;
9941 } else {
9942 def_rt_bandwidth.rt_runtime = global_rt_runtime();
9943 def_rt_bandwidth.rt_period =
9944 ns_to_ktime(global_rt_period());
9945 }
9946 }
9947 mutex_unlock(&mutex);
9948
9949 return ret;
9950}
68318b8e 9951
052f1dc7 9952#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
9953
9954/* return corresponding task_group object of a cgroup */
2b01dfe3 9955static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 9956{
2b01dfe3
PM
9957 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9958 struct task_group, css);
68318b8e
SV
9959}
9960
9961static struct cgroup_subsys_state *
2b01dfe3 9962cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 9963{
ec7dc8ac 9964 struct task_group *tg, *parent;
68318b8e 9965
2b01dfe3 9966 if (!cgrp->parent) {
68318b8e 9967 /* This is early initialization for the top cgroup */
68318b8e
SV
9968 return &init_task_group.css;
9969 }
9970
ec7dc8ac
DG
9971 parent = cgroup_tg(cgrp->parent);
9972 tg = sched_create_group(parent);
68318b8e
SV
9973 if (IS_ERR(tg))
9974 return ERR_PTR(-ENOMEM);
9975
68318b8e
SV
9976 return &tg->css;
9977}
9978
41a2d6cf
IM
9979static void
9980cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 9981{
2b01dfe3 9982 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
9983
9984 sched_destroy_group(tg);
9985}
9986
41a2d6cf
IM
9987static int
9988cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9989 struct task_struct *tsk)
68318b8e 9990{
b68aa230 9991#ifdef CONFIG_RT_GROUP_SCHED
54e99124 9992 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
b68aa230
PZ
9993 return -EINVAL;
9994#else
68318b8e
SV
9995 /* We don't support RT-tasks being in separate groups */
9996 if (tsk->sched_class != &fair_sched_class)
9997 return -EINVAL;
b68aa230 9998#endif
68318b8e
SV
9999
10000 return 0;
10001}
10002
10003static void
2b01dfe3 10004cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
68318b8e
SV
10005 struct cgroup *old_cont, struct task_struct *tsk)
10006{
10007 sched_move_task(tsk);
10008}
10009
052f1dc7 10010#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 10011static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 10012 u64 shareval)
68318b8e 10013{
2b01dfe3 10014 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
68318b8e
SV
10015}
10016
f4c753b7 10017static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 10018{
2b01dfe3 10019 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
10020
10021 return (u64) tg->shares;
10022}
6d6bc0ad 10023#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 10024
052f1dc7 10025#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 10026static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 10027 s64 val)
6f505b16 10028{
06ecb27c 10029 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
10030}
10031
06ecb27c 10032static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 10033{
06ecb27c 10034 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 10035}
d0b27fa7
PZ
10036
10037static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
10038 u64 rt_period_us)
10039{
10040 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
10041}
10042
10043static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
10044{
10045 return sched_group_rt_period(cgroup_tg(cgrp));
10046}
6d6bc0ad 10047#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 10048
fe5c7cc2 10049static struct cftype cpu_files[] = {
052f1dc7 10050#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
10051 {
10052 .name = "shares",
f4c753b7
PM
10053 .read_u64 = cpu_shares_read_u64,
10054 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 10055 },
052f1dc7
PZ
10056#endif
10057#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 10058 {
9f0c1e56 10059 .name = "rt_runtime_us",
06ecb27c
PM
10060 .read_s64 = cpu_rt_runtime_read,
10061 .write_s64 = cpu_rt_runtime_write,
6f505b16 10062 },
d0b27fa7
PZ
10063 {
10064 .name = "rt_period_us",
f4c753b7
PM
10065 .read_u64 = cpu_rt_period_read_uint,
10066 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 10067 },
052f1dc7 10068#endif
68318b8e
SV
10069};
10070
10071static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
10072{
fe5c7cc2 10073 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
68318b8e
SV
10074}
10075
10076struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
10077 .name = "cpu",
10078 .create = cpu_cgroup_create,
10079 .destroy = cpu_cgroup_destroy,
10080 .can_attach = cpu_cgroup_can_attach,
10081 .attach = cpu_cgroup_attach,
10082 .populate = cpu_cgroup_populate,
10083 .subsys_id = cpu_cgroup_subsys_id,
68318b8e
SV
10084 .early_init = 1,
10085};
10086
052f1dc7 10087#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
10088
10089#ifdef CONFIG_CGROUP_CPUACCT
10090
10091/*
10092 * CPU accounting code for task groups.
10093 *
10094 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
10095 * (balbir@in.ibm.com).
10096 */
10097
934352f2 10098/* track cpu usage of a group of tasks and its child groups */
d842de87
SV
10099struct cpuacct {
10100 struct cgroup_subsys_state css;
10101 /* cpuusage holds pointer to a u64-type object on every cpu */
10102 u64 *cpuusage;
ef12fefa 10103 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
934352f2 10104 struct cpuacct *parent;
d842de87
SV
10105};
10106
10107struct cgroup_subsys cpuacct_subsys;
10108
10109/* return cpu accounting group corresponding to this container */
32cd756a 10110static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
d842de87 10111{
32cd756a 10112 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
d842de87
SV
10113 struct cpuacct, css);
10114}
10115
10116/* return cpu accounting group to which this task belongs */
10117static inline struct cpuacct *task_ca(struct task_struct *tsk)
10118{
10119 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
10120 struct cpuacct, css);
10121}
10122
10123/* create a new cpu accounting group */
10124static struct cgroup_subsys_state *cpuacct_create(
32cd756a 10125 struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87
SV
10126{
10127 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ef12fefa 10128 int i;
d842de87
SV
10129
10130 if (!ca)
ef12fefa 10131 goto out;
d842de87
SV
10132
10133 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
10134 if (!ca->cpuusage)
10135 goto out_free_ca;
10136
10137 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
10138 if (percpu_counter_init(&ca->cpustat[i], 0))
10139 goto out_free_counters;
d842de87 10140
934352f2
BR
10141 if (cgrp->parent)
10142 ca->parent = cgroup_ca(cgrp->parent);
10143
d842de87 10144 return &ca->css;
ef12fefa
BR
10145
10146out_free_counters:
10147 while (--i >= 0)
10148 percpu_counter_destroy(&ca->cpustat[i]);
10149 free_percpu(ca->cpuusage);
10150out_free_ca:
10151 kfree(ca);
10152out:
10153 return ERR_PTR(-ENOMEM);
d842de87
SV
10154}
10155
10156/* destroy an existing cpu accounting group */
41a2d6cf 10157static void
32cd756a 10158cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 10159{
32cd756a 10160 struct cpuacct *ca = cgroup_ca(cgrp);
ef12fefa 10161 int i;
d842de87 10162
ef12fefa
BR
10163 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
10164 percpu_counter_destroy(&ca->cpustat[i]);
d842de87
SV
10165 free_percpu(ca->cpuusage);
10166 kfree(ca);
10167}
10168
720f5498
KC
10169static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10170{
b36128c8 10171 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
10172 u64 data;
10173
10174#ifndef CONFIG_64BIT
10175 /*
10176 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10177 */
10178 spin_lock_irq(&cpu_rq(cpu)->lock);
10179 data = *cpuusage;
10180 spin_unlock_irq(&cpu_rq(cpu)->lock);
10181#else
10182 data = *cpuusage;
10183#endif
10184
10185 return data;
10186}
10187
10188static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10189{
b36128c8 10190 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
10191
10192#ifndef CONFIG_64BIT
10193 /*
10194 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10195 */
10196 spin_lock_irq(&cpu_rq(cpu)->lock);
10197 *cpuusage = val;
10198 spin_unlock_irq(&cpu_rq(cpu)->lock);
10199#else
10200 *cpuusage = val;
10201#endif
10202}
10203
d842de87 10204/* return total cpu usage (in nanoseconds) of a group */
32cd756a 10205static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 10206{
32cd756a 10207 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
10208 u64 totalcpuusage = 0;
10209 int i;
10210
720f5498
KC
10211 for_each_present_cpu(i)
10212 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
10213
10214 return totalcpuusage;
10215}
10216
0297b803
DG
10217static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
10218 u64 reset)
10219{
10220 struct cpuacct *ca = cgroup_ca(cgrp);
10221 int err = 0;
10222 int i;
10223
10224 if (reset) {
10225 err = -EINVAL;
10226 goto out;
10227 }
10228
720f5498
KC
10229 for_each_present_cpu(i)
10230 cpuacct_cpuusage_write(ca, i, 0);
0297b803 10231
0297b803
DG
10232out:
10233 return err;
10234}
10235
e9515c3c
KC
10236static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
10237 struct seq_file *m)
10238{
10239 struct cpuacct *ca = cgroup_ca(cgroup);
10240 u64 percpu;
10241 int i;
10242
10243 for_each_present_cpu(i) {
10244 percpu = cpuacct_cpuusage_read(ca, i);
10245 seq_printf(m, "%llu ", (unsigned long long) percpu);
10246 }
10247 seq_printf(m, "\n");
10248 return 0;
10249}
10250
ef12fefa
BR
10251static const char *cpuacct_stat_desc[] = {
10252 [CPUACCT_STAT_USER] = "user",
10253 [CPUACCT_STAT_SYSTEM] = "system",
10254};
10255
10256static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
10257 struct cgroup_map_cb *cb)
10258{
10259 struct cpuacct *ca = cgroup_ca(cgrp);
10260 int i;
10261
10262 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
10263 s64 val = percpu_counter_read(&ca->cpustat[i]);
10264 val = cputime64_to_clock_t(val);
10265 cb->fill(cb, cpuacct_stat_desc[i], val);
10266 }
10267 return 0;
10268}
10269
d842de87
SV
10270static struct cftype files[] = {
10271 {
10272 .name = "usage",
f4c753b7
PM
10273 .read_u64 = cpuusage_read,
10274 .write_u64 = cpuusage_write,
d842de87 10275 },
e9515c3c
KC
10276 {
10277 .name = "usage_percpu",
10278 .read_seq_string = cpuacct_percpu_seq_read,
10279 },
ef12fefa
BR
10280 {
10281 .name = "stat",
10282 .read_map = cpuacct_stats_show,
10283 },
d842de87
SV
10284};
10285
32cd756a 10286static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 10287{
32cd756a 10288 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
d842de87
SV
10289}
10290
10291/*
10292 * charge this task's execution time to its accounting group.
10293 *
10294 * called with rq->lock held.
10295 */
10296static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
10297{
10298 struct cpuacct *ca;
934352f2 10299 int cpu;
d842de87 10300
c40c6f85 10301 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
10302 return;
10303
934352f2 10304 cpu = task_cpu(tsk);
a18b83b7
BR
10305
10306 rcu_read_lock();
10307
d842de87 10308 ca = task_ca(tsk);
d842de87 10309
934352f2 10310 for (; ca; ca = ca->parent) {
b36128c8 10311 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
10312 *cpuusage += cputime;
10313 }
a18b83b7
BR
10314
10315 rcu_read_unlock();
d842de87
SV
10316}
10317
ef12fefa
BR
10318/*
10319 * Charge the system/user time to the task's accounting group.
10320 */
10321static void cpuacct_update_stats(struct task_struct *tsk,
10322 enum cpuacct_stat_index idx, cputime_t val)
10323{
10324 struct cpuacct *ca;
10325
10326 if (unlikely(!cpuacct_subsys.active))
10327 return;
10328
10329 rcu_read_lock();
10330 ca = task_ca(tsk);
10331
10332 do {
10333 percpu_counter_add(&ca->cpustat[idx], val);
10334 ca = ca->parent;
10335 } while (ca);
10336 rcu_read_unlock();
10337}
10338
d842de87
SV
10339struct cgroup_subsys cpuacct_subsys = {
10340 .name = "cpuacct",
10341 .create = cpuacct_create,
10342 .destroy = cpuacct_destroy,
10343 .populate = cpuacct_populate,
10344 .subsys_id = cpuacct_subsys_id,
10345};
10346#endif /* CONFIG_CGROUP_CPUACCT */