]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/sched/core.c
sched/deadline: Fix migration of SCHED_DEADLINE tasks
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
91d1aa43 75#include <linux/context_tracking.h>
52f5684c 76#include <linux/compiler.h>
1da177e4 77
96f951ed 78#include <asm/switch_to.h>
5517d86b 79#include <asm/tlb.h>
838225b4 80#include <asm/irq_regs.h>
db7e527d 81#include <asm/mutex.h>
e6e6685a
GC
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
1da177e4 85
029632fb 86#include "sched.h"
ea138446 87#include "../workqueue_internal.h"
29d5e047 88#include "../smpboot.h"
6e0534f2 89
a8d154b0 90#define CREATE_TRACE_POINTS
ad8d75ff 91#include <trace/events/sched.h>
a8d154b0 92
029632fb
PZ
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 95
fe44d621 96static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 97
029632fb 98void update_rq_clock(struct rq *rq)
3e51f33f 99{
fe44d621 100 s64 delta;
305e6835 101
9edfbfed
PZ
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
f26f9aff 105 return;
aa483808 106
fe44d621 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
4036ac15
MG
108 if (delta < 0)
109 return;
fe44d621
PZ
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
3e51f33f
PZ
112}
113
bf5c91ba
IM
114/*
115 * Debugging: various feature bits
116 */
f00b45c1 117
f00b45c1
PZ
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
bf5c91ba 121const_debug unsigned int sysctl_sched_features =
391e43da 122#include "features.h"
f00b45c1
PZ
123 0;
124
125#undef SCHED_FEAT
126
127#ifdef CONFIG_SCHED_DEBUG
128#define SCHED_FEAT(name, enabled) \
129 #name ,
130
1292531f 131static const char * const sched_feat_names[] = {
391e43da 132#include "features.h"
f00b45c1
PZ
133};
134
135#undef SCHED_FEAT
136
34f3a814 137static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 138{
f00b45c1
PZ
139 int i;
140
f8b6d1cc 141 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
142 if (!(sysctl_sched_features & (1UL << i)))
143 seq_puts(m, "NO_");
144 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 145 }
34f3a814 146 seq_puts(m, "\n");
f00b45c1 147
34f3a814 148 return 0;
f00b45c1
PZ
149}
150
f8b6d1cc
PZ
151#ifdef HAVE_JUMP_LABEL
152
c5905afb
IM
153#define jump_label_key__true STATIC_KEY_INIT_TRUE
154#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
155
156#define SCHED_FEAT(name, enabled) \
157 jump_label_key__##enabled ,
158
c5905afb 159struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
160#include "features.h"
161};
162
163#undef SCHED_FEAT
164
165static void sched_feat_disable(int i)
166{
e33886b3 167 static_key_disable(&sched_feat_keys[i]);
f8b6d1cc
PZ
168}
169
170static void sched_feat_enable(int i)
171{
e33886b3 172 static_key_enable(&sched_feat_keys[i]);
f8b6d1cc
PZ
173}
174#else
175static void sched_feat_disable(int i) { };
176static void sched_feat_enable(int i) { };
177#endif /* HAVE_JUMP_LABEL */
178
1a687c2e 179static int sched_feat_set(char *cmp)
f00b45c1 180{
f00b45c1 181 int i;
1a687c2e 182 int neg = 0;
f00b45c1 183
524429c3 184 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
185 neg = 1;
186 cmp += 3;
187 }
188
f8b6d1cc 189 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 190 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 191 if (neg) {
f00b45c1 192 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
193 sched_feat_disable(i);
194 } else {
f00b45c1 195 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
196 sched_feat_enable(i);
197 }
f00b45c1
PZ
198 break;
199 }
200 }
201
1a687c2e
MG
202 return i;
203}
204
205static ssize_t
206sched_feat_write(struct file *filp, const char __user *ubuf,
207 size_t cnt, loff_t *ppos)
208{
209 char buf[64];
210 char *cmp;
211 int i;
5cd08fbf 212 struct inode *inode;
1a687c2e
MG
213
214 if (cnt > 63)
215 cnt = 63;
216
217 if (copy_from_user(&buf, ubuf, cnt))
218 return -EFAULT;
219
220 buf[cnt] = 0;
221 cmp = strstrip(buf);
222
5cd08fbf
JB
223 /* Ensure the static_key remains in a consistent state */
224 inode = file_inode(filp);
225 mutex_lock(&inode->i_mutex);
1a687c2e 226 i = sched_feat_set(cmp);
5cd08fbf 227 mutex_unlock(&inode->i_mutex);
f8b6d1cc 228 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
229 return -EINVAL;
230
42994724 231 *ppos += cnt;
f00b45c1
PZ
232
233 return cnt;
234}
235
34f3a814
LZ
236static int sched_feat_open(struct inode *inode, struct file *filp)
237{
238 return single_open(filp, sched_feat_show, NULL);
239}
240
828c0950 241static const struct file_operations sched_feat_fops = {
34f3a814
LZ
242 .open = sched_feat_open,
243 .write = sched_feat_write,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = single_release,
f00b45c1
PZ
247};
248
249static __init int sched_init_debug(void)
250{
f00b45c1
PZ
251 debugfs_create_file("sched_features", 0644, NULL, NULL,
252 &sched_feat_fops);
253
254 return 0;
255}
256late_initcall(sched_init_debug);
f8b6d1cc 257#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 258
b82d9fdd
PZ
259/*
260 * Number of tasks to iterate in a single balance run.
261 * Limited because this is done with IRQs disabled.
262 */
263const_debug unsigned int sysctl_sched_nr_migrate = 32;
264
e9e9250b
PZ
265/*
266 * period over which we average the RT time consumption, measured
267 * in ms.
268 *
269 * default: 1s
270 */
271const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
272
fa85ae24 273/*
9f0c1e56 274 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
275 * default: 1s
276 */
9f0c1e56 277unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 278
029632fb 279__read_mostly int scheduler_running;
6892b75e 280
9f0c1e56
PZ
281/*
282 * part of the period that we allow rt tasks to run in us.
283 * default: 0.95s
284 */
285int sysctl_sched_rt_runtime = 950000;
fa85ae24 286
3fa0818b
RR
287/* cpus with isolated domains */
288cpumask_var_t cpu_isolated_map;
289
1da177e4 290/*
cc2a73b5 291 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 292 */
a9957449 293static struct rq *this_rq_lock(void)
1da177e4
LT
294 __acquires(rq->lock)
295{
70b97a7f 296 struct rq *rq;
1da177e4
LT
297
298 local_irq_disable();
299 rq = this_rq();
05fa785c 300 raw_spin_lock(&rq->lock);
1da177e4
LT
301
302 return rq;
303}
304
8f4d37ec
PZ
305#ifdef CONFIG_SCHED_HRTICK
306/*
307 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 308 */
8f4d37ec 309
8f4d37ec
PZ
310static void hrtick_clear(struct rq *rq)
311{
312 if (hrtimer_active(&rq->hrtick_timer))
313 hrtimer_cancel(&rq->hrtick_timer);
314}
315
8f4d37ec
PZ
316/*
317 * High-resolution timer tick.
318 * Runs from hardirq context with interrupts disabled.
319 */
320static enum hrtimer_restart hrtick(struct hrtimer *timer)
321{
322 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
323
324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
325
05fa785c 326 raw_spin_lock(&rq->lock);
3e51f33f 327 update_rq_clock(rq);
8f4d37ec 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 329 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
330
331 return HRTIMER_NORESTART;
332}
333
95e904c7 334#ifdef CONFIG_SMP
971ee28c 335
4961b6e1 336static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
337{
338 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 339
4961b6e1 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
971ee28c
PZ
341}
342
31656519
PZ
343/*
344 * called from hardirq (IPI) context
345 */
346static void __hrtick_start(void *arg)
b328ca18 347{
31656519 348 struct rq *rq = arg;
b328ca18 349
05fa785c 350 raw_spin_lock(&rq->lock);
971ee28c 351 __hrtick_restart(rq);
31656519 352 rq->hrtick_csd_pending = 0;
05fa785c 353 raw_spin_unlock(&rq->lock);
b328ca18
PZ
354}
355
31656519
PZ
356/*
357 * Called to set the hrtick timer state.
358 *
359 * called with rq->lock held and irqs disabled
360 */
029632fb 361void hrtick_start(struct rq *rq, u64 delay)
b328ca18 362{
31656519 363 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 364 ktime_t time;
365 s64 delta;
366
367 /*
368 * Don't schedule slices shorter than 10000ns, that just
369 * doesn't make sense and can cause timer DoS.
370 */
371 delta = max_t(s64, delay, 10000LL);
372 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 373
cc584b21 374 hrtimer_set_expires(timer, time);
31656519
PZ
375
376 if (rq == this_rq()) {
971ee28c 377 __hrtick_restart(rq);
31656519 378 } else if (!rq->hrtick_csd_pending) {
c46fff2a 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
31656519
PZ
380 rq->hrtick_csd_pending = 1;
381 }
b328ca18
PZ
382}
383
384static int
385hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
386{
387 int cpu = (int)(long)hcpu;
388
389 switch (action) {
390 case CPU_UP_CANCELED:
391 case CPU_UP_CANCELED_FROZEN:
392 case CPU_DOWN_PREPARE:
393 case CPU_DOWN_PREPARE_FROZEN:
394 case CPU_DEAD:
395 case CPU_DEAD_FROZEN:
31656519 396 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
397 return NOTIFY_OK;
398 }
399
400 return NOTIFY_DONE;
401}
402
fa748203 403static __init void init_hrtick(void)
b328ca18
PZ
404{
405 hotcpu_notifier(hotplug_hrtick, 0);
406}
31656519
PZ
407#else
408/*
409 * Called to set the hrtick timer state.
410 *
411 * called with rq->lock held and irqs disabled
412 */
029632fb 413void hrtick_start(struct rq *rq, u64 delay)
31656519 414{
86893335
WL
415 /*
416 * Don't schedule slices shorter than 10000ns, that just
417 * doesn't make sense. Rely on vruntime for fairness.
418 */
419 delay = max_t(u64, delay, 10000LL);
4961b6e1
TG
420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
421 HRTIMER_MODE_REL_PINNED);
31656519 422}
b328ca18 423
006c75f1 424static inline void init_hrtick(void)
8f4d37ec 425{
8f4d37ec 426}
31656519 427#endif /* CONFIG_SMP */
8f4d37ec 428
31656519 429static void init_rq_hrtick(struct rq *rq)
8f4d37ec 430{
31656519
PZ
431#ifdef CONFIG_SMP
432 rq->hrtick_csd_pending = 0;
8f4d37ec 433
31656519
PZ
434 rq->hrtick_csd.flags = 0;
435 rq->hrtick_csd.func = __hrtick_start;
436 rq->hrtick_csd.info = rq;
437#endif
8f4d37ec 438
31656519
PZ
439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
440 rq->hrtick_timer.function = hrtick;
8f4d37ec 441}
006c75f1 442#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
443static inline void hrtick_clear(struct rq *rq)
444{
445}
446
8f4d37ec
PZ
447static inline void init_rq_hrtick(struct rq *rq)
448{
449}
450
b328ca18
PZ
451static inline void init_hrtick(void)
452{
453}
006c75f1 454#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 455
fd99f91a
PZ
456/*
457 * cmpxchg based fetch_or, macro so it works for different integer types
458 */
459#define fetch_or(ptr, val) \
460({ typeof(*(ptr)) __old, __val = *(ptr); \
461 for (;;) { \
462 __old = cmpxchg((ptr), __val, __val | (val)); \
463 if (__old == __val) \
464 break; \
465 __val = __old; \
466 } \
467 __old; \
468})
469
e3baac47 470#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
471/*
472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
473 * this avoids any races wrt polling state changes and thereby avoids
474 * spurious IPIs.
475 */
476static bool set_nr_and_not_polling(struct task_struct *p)
477{
478 struct thread_info *ti = task_thread_info(p);
479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
480}
e3baac47
PZ
481
482/*
483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
484 *
485 * If this returns true, then the idle task promises to call
486 * sched_ttwu_pending() and reschedule soon.
487 */
488static bool set_nr_if_polling(struct task_struct *p)
489{
490 struct thread_info *ti = task_thread_info(p);
316c1608 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
492
493 for (;;) {
494 if (!(val & _TIF_POLLING_NRFLAG))
495 return false;
496 if (val & _TIF_NEED_RESCHED)
497 return true;
498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
499 if (old == val)
500 break;
501 val = old;
502 }
503 return true;
504}
505
fd99f91a
PZ
506#else
507static bool set_nr_and_not_polling(struct task_struct *p)
508{
509 set_tsk_need_resched(p);
510 return true;
511}
e3baac47
PZ
512
513#ifdef CONFIG_SMP
514static bool set_nr_if_polling(struct task_struct *p)
515{
516 return false;
517}
518#endif
fd99f91a
PZ
519#endif
520
76751049
PZ
521void wake_q_add(struct wake_q_head *head, struct task_struct *task)
522{
523 struct wake_q_node *node = &task->wake_q;
524
525 /*
526 * Atomically grab the task, if ->wake_q is !nil already it means
527 * its already queued (either by us or someone else) and will get the
528 * wakeup due to that.
529 *
530 * This cmpxchg() implies a full barrier, which pairs with the write
531 * barrier implied by the wakeup in wake_up_list().
532 */
533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
534 return;
535
536 get_task_struct(task);
537
538 /*
539 * The head is context local, there can be no concurrency.
540 */
541 *head->lastp = node;
542 head->lastp = &node->next;
543}
544
545void wake_up_q(struct wake_q_head *head)
546{
547 struct wake_q_node *node = head->first;
548
549 while (node != WAKE_Q_TAIL) {
550 struct task_struct *task;
551
552 task = container_of(node, struct task_struct, wake_q);
553 BUG_ON(!task);
554 /* task can safely be re-inserted now */
555 node = node->next;
556 task->wake_q.next = NULL;
557
558 /*
559 * wake_up_process() implies a wmb() to pair with the queueing
560 * in wake_q_add() so as not to miss wakeups.
561 */
562 wake_up_process(task);
563 put_task_struct(task);
564 }
565}
566
c24d20db 567/*
8875125e 568 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
569 *
570 * On UP this means the setting of the need_resched flag, on SMP it
571 * might also involve a cross-CPU call to trigger the scheduler on
572 * the target CPU.
573 */
8875125e 574void resched_curr(struct rq *rq)
c24d20db 575{
8875125e 576 struct task_struct *curr = rq->curr;
c24d20db
IM
577 int cpu;
578
8875125e 579 lockdep_assert_held(&rq->lock);
c24d20db 580
8875125e 581 if (test_tsk_need_resched(curr))
c24d20db
IM
582 return;
583
8875125e 584 cpu = cpu_of(rq);
fd99f91a 585
f27dde8d 586 if (cpu == smp_processor_id()) {
8875125e 587 set_tsk_need_resched(curr);
f27dde8d 588 set_preempt_need_resched();
c24d20db 589 return;
f27dde8d 590 }
c24d20db 591
8875125e 592 if (set_nr_and_not_polling(curr))
c24d20db 593 smp_send_reschedule(cpu);
dfc68f29
AL
594 else
595 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
596}
597
029632fb 598void resched_cpu(int cpu)
c24d20db
IM
599{
600 struct rq *rq = cpu_rq(cpu);
601 unsigned long flags;
602
05fa785c 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db 604 return;
8875125e 605 resched_curr(rq);
05fa785c 606 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 607}
06d8308c 608
b021fe3e 609#ifdef CONFIG_SMP
3451d024 610#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
611/*
612 * In the semi idle case, use the nearest busy cpu for migrating timers
613 * from an idle cpu. This is good for power-savings.
614 *
615 * We don't do similar optimization for completely idle system, as
616 * selecting an idle cpu will add more delays to the timers than intended
617 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
618 */
bc7a34b8 619int get_nohz_timer_target(void)
83cd4fe2 620{
bc7a34b8 621 int i, cpu = smp_processor_id();
83cd4fe2
VP
622 struct sched_domain *sd;
623
9642d18e 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
6201b4d6
VK
625 return cpu;
626
057f3fad 627 rcu_read_lock();
83cd4fe2 628 for_each_domain(cpu, sd) {
057f3fad 629 for_each_cpu(i, sched_domain_span(sd)) {
9642d18e 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
057f3fad
PZ
631 cpu = i;
632 goto unlock;
633 }
634 }
83cd4fe2 635 }
9642d18e
VH
636
637 if (!is_housekeeping_cpu(cpu))
638 cpu = housekeeping_any_cpu();
057f3fad
PZ
639unlock:
640 rcu_read_unlock();
83cd4fe2
VP
641 return cpu;
642}
06d8308c
TG
643/*
644 * When add_timer_on() enqueues a timer into the timer wheel of an
645 * idle CPU then this timer might expire before the next timer event
646 * which is scheduled to wake up that CPU. In case of a completely
647 * idle system the next event might even be infinite time into the
648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
649 * leaves the inner idle loop so the newly added timer is taken into
650 * account when the CPU goes back to idle and evaluates the timer
651 * wheel for the next timer event.
652 */
1c20091e 653static void wake_up_idle_cpu(int cpu)
06d8308c
TG
654{
655 struct rq *rq = cpu_rq(cpu);
656
657 if (cpu == smp_processor_id())
658 return;
659
67b9ca70 660 if (set_nr_and_not_polling(rq->idle))
06d8308c 661 smp_send_reschedule(cpu);
dfc68f29
AL
662 else
663 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
664}
665
c5bfece2 666static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 667{
53c5fa16
FW
668 /*
669 * We just need the target to call irq_exit() and re-evaluate
670 * the next tick. The nohz full kick at least implies that.
671 * If needed we can still optimize that later with an
672 * empty IRQ.
673 */
c5bfece2 674 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
675 if (cpu != smp_processor_id() ||
676 tick_nohz_tick_stopped())
53c5fa16 677 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
678 return true;
679 }
680
681 return false;
682}
683
684void wake_up_nohz_cpu(int cpu)
685{
c5bfece2 686 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
687 wake_up_idle_cpu(cpu);
688}
689
ca38062e 690static inline bool got_nohz_idle_kick(void)
45bf76df 691{
1c792db7 692 int cpu = smp_processor_id();
873b4c65
VG
693
694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
695 return false;
696
697 if (idle_cpu(cpu) && !need_resched())
698 return true;
699
700 /*
701 * We can't run Idle Load Balance on this CPU for this time so we
702 * cancel it and clear NOHZ_BALANCE_KICK
703 */
704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
705 return false;
45bf76df
IM
706}
707
3451d024 708#else /* CONFIG_NO_HZ_COMMON */
45bf76df 709
ca38062e 710static inline bool got_nohz_idle_kick(void)
2069dd75 711{
ca38062e 712 return false;
2069dd75
PZ
713}
714
3451d024 715#endif /* CONFIG_NO_HZ_COMMON */
d842de87 716
ce831b38
FW
717#ifdef CONFIG_NO_HZ_FULL
718bool sched_can_stop_tick(void)
719{
1e78cdbd
RR
720 /*
721 * FIFO realtime policy runs the highest priority task. Other runnable
722 * tasks are of a lower priority. The scheduler tick does nothing.
723 */
724 if (current->policy == SCHED_FIFO)
725 return true;
726
727 /*
728 * Round-robin realtime tasks time slice with other tasks at the same
729 * realtime priority. Is this task the only one at this priority?
730 */
731 if (current->policy == SCHED_RR) {
732 struct sched_rt_entity *rt_se = &current->rt;
733
734 return rt_se->run_list.prev == rt_se->run_list.next;
735 }
736
3882ec64
FW
737 /*
738 * More than one running task need preemption.
739 * nr_running update is assumed to be visible
740 * after IPI is sent from wakers.
741 */
541b8264
VK
742 if (this_rq()->nr_running > 1)
743 return false;
ce831b38 744
541b8264 745 return true;
ce831b38
FW
746}
747#endif /* CONFIG_NO_HZ_FULL */
d842de87 748
029632fb 749void sched_avg_update(struct rq *rq)
18d95a28 750{
e9e9250b
PZ
751 s64 period = sched_avg_period();
752
78becc27 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
0d98bb26
WD
754 /*
755 * Inline assembly required to prevent the compiler
756 * optimising this loop into a divmod call.
757 * See __iter_div_u64_rem() for another example of this.
758 */
759 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
760 rq->age_stamp += period;
761 rq->rt_avg /= 2;
762 }
18d95a28
PZ
763}
764
6d6bc0ad 765#endif /* CONFIG_SMP */
18d95a28 766
a790de99
PT
767#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 769/*
8277434e
PT
770 * Iterate task_group tree rooted at *from, calling @down when first entering a
771 * node and @up when leaving it for the final time.
772 *
773 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 774 */
029632fb 775int walk_tg_tree_from(struct task_group *from,
8277434e 776 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
777{
778 struct task_group *parent, *child;
eb755805 779 int ret;
c09595f6 780
8277434e
PT
781 parent = from;
782
c09595f6 783down:
eb755805
PZ
784 ret = (*down)(parent, data);
785 if (ret)
8277434e 786 goto out;
c09595f6
PZ
787 list_for_each_entry_rcu(child, &parent->children, siblings) {
788 parent = child;
789 goto down;
790
791up:
792 continue;
793 }
eb755805 794 ret = (*up)(parent, data);
8277434e
PT
795 if (ret || parent == from)
796 goto out;
c09595f6
PZ
797
798 child = parent;
799 parent = parent->parent;
800 if (parent)
801 goto up;
8277434e 802out:
eb755805 803 return ret;
c09595f6
PZ
804}
805
029632fb 806int tg_nop(struct task_group *tg, void *data)
eb755805 807{
e2b245f8 808 return 0;
eb755805 809}
18d95a28
PZ
810#endif
811
45bf76df
IM
812static void set_load_weight(struct task_struct *p)
813{
f05998d4
NR
814 int prio = p->static_prio - MAX_RT_PRIO;
815 struct load_weight *load = &p->se.load;
816
dd41f596
IM
817 /*
818 * SCHED_IDLE tasks get minimal weight:
819 */
820 if (p->policy == SCHED_IDLE) {
c8b28116 821 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 822 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
823 return;
824 }
71f8bd46 825
c8b28116 826 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 827 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
828}
829
371fd7e7 830static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 831{
a64692a3 832 update_rq_clock(rq);
43148951 833 sched_info_queued(rq, p);
371fd7e7 834 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
835}
836
371fd7e7 837static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 838{
a64692a3 839 update_rq_clock(rq);
43148951 840 sched_info_dequeued(rq, p);
371fd7e7 841 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
842}
843
029632fb 844void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
845{
846 if (task_contributes_to_load(p))
847 rq->nr_uninterruptible--;
848
371fd7e7 849 enqueue_task(rq, p, flags);
1e3c88bd
PZ
850}
851
029632fb 852void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
853{
854 if (task_contributes_to_load(p))
855 rq->nr_uninterruptible++;
856
371fd7e7 857 dequeue_task(rq, p, flags);
1e3c88bd
PZ
858}
859
fe44d621 860static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 861{
095c0aa8
GC
862/*
863 * In theory, the compile should just see 0 here, and optimize out the call
864 * to sched_rt_avg_update. But I don't trust it...
865 */
866#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
867 s64 steal = 0, irq_delta = 0;
868#endif
869#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 870 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
871
872 /*
873 * Since irq_time is only updated on {soft,}irq_exit, we might run into
874 * this case when a previous update_rq_clock() happened inside a
875 * {soft,}irq region.
876 *
877 * When this happens, we stop ->clock_task and only update the
878 * prev_irq_time stamp to account for the part that fit, so that a next
879 * update will consume the rest. This ensures ->clock_task is
880 * monotonic.
881 *
882 * It does however cause some slight miss-attribution of {soft,}irq
883 * time, a more accurate solution would be to update the irq_time using
884 * the current rq->clock timestamp, except that would require using
885 * atomic ops.
886 */
887 if (irq_delta > delta)
888 irq_delta = delta;
889
890 rq->prev_irq_time += irq_delta;
891 delta -= irq_delta;
095c0aa8
GC
892#endif
893#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 894 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
895 steal = paravirt_steal_clock(cpu_of(rq));
896 steal -= rq->prev_steal_time_rq;
897
898 if (unlikely(steal > delta))
899 steal = delta;
900
095c0aa8 901 rq->prev_steal_time_rq += steal;
095c0aa8
GC
902 delta -= steal;
903 }
904#endif
905
fe44d621
PZ
906 rq->clock_task += delta;
907
095c0aa8 908#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
5d4dfddd 909 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
095c0aa8
GC
910 sched_rt_avg_update(rq, irq_delta + steal);
911#endif
aa483808
VP
912}
913
34f971f6
PZ
914void sched_set_stop_task(int cpu, struct task_struct *stop)
915{
916 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
917 struct task_struct *old_stop = cpu_rq(cpu)->stop;
918
919 if (stop) {
920 /*
921 * Make it appear like a SCHED_FIFO task, its something
922 * userspace knows about and won't get confused about.
923 *
924 * Also, it will make PI more or less work without too
925 * much confusion -- but then, stop work should not
926 * rely on PI working anyway.
927 */
928 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
929
930 stop->sched_class = &stop_sched_class;
931 }
932
933 cpu_rq(cpu)->stop = stop;
934
935 if (old_stop) {
936 /*
937 * Reset it back to a normal scheduling class so that
938 * it can die in pieces.
939 */
940 old_stop->sched_class = &rt_sched_class;
941 }
942}
943
14531189 944/*
dd41f596 945 * __normal_prio - return the priority that is based on the static prio
14531189 946 */
14531189
IM
947static inline int __normal_prio(struct task_struct *p)
948{
dd41f596 949 return p->static_prio;
14531189
IM
950}
951
b29739f9
IM
952/*
953 * Calculate the expected normal priority: i.e. priority
954 * without taking RT-inheritance into account. Might be
955 * boosted by interactivity modifiers. Changes upon fork,
956 * setprio syscalls, and whenever the interactivity
957 * estimator recalculates.
958 */
36c8b586 959static inline int normal_prio(struct task_struct *p)
b29739f9
IM
960{
961 int prio;
962
aab03e05
DF
963 if (task_has_dl_policy(p))
964 prio = MAX_DL_PRIO-1;
965 else if (task_has_rt_policy(p))
b29739f9
IM
966 prio = MAX_RT_PRIO-1 - p->rt_priority;
967 else
968 prio = __normal_prio(p);
969 return prio;
970}
971
972/*
973 * Calculate the current priority, i.e. the priority
974 * taken into account by the scheduler. This value might
975 * be boosted by RT tasks, or might be boosted by
976 * interactivity modifiers. Will be RT if the task got
977 * RT-boosted. If not then it returns p->normal_prio.
978 */
36c8b586 979static int effective_prio(struct task_struct *p)
b29739f9
IM
980{
981 p->normal_prio = normal_prio(p);
982 /*
983 * If we are RT tasks or we were boosted to RT priority,
984 * keep the priority unchanged. Otherwise, update priority
985 * to the normal priority:
986 */
987 if (!rt_prio(p->prio))
988 return p->normal_prio;
989 return p->prio;
990}
991
1da177e4
LT
992/**
993 * task_curr - is this task currently executing on a CPU?
994 * @p: the task in question.
e69f6186
YB
995 *
996 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 997 */
36c8b586 998inline int task_curr(const struct task_struct *p)
1da177e4
LT
999{
1000 return cpu_curr(task_cpu(p)) == p;
1001}
1002
67dfa1b7 1003/*
4c9a4bc8
PZ
1004 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1005 * use the balance_callback list if you want balancing.
1006 *
1007 * this means any call to check_class_changed() must be followed by a call to
1008 * balance_callback().
67dfa1b7 1009 */
cb469845
SR
1010static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1011 const struct sched_class *prev_class,
da7a735e 1012 int oldprio)
cb469845
SR
1013{
1014 if (prev_class != p->sched_class) {
1015 if (prev_class->switched_from)
da7a735e 1016 prev_class->switched_from(rq, p);
4c9a4bc8 1017
da7a735e 1018 p->sched_class->switched_to(rq, p);
2d3d891d 1019 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1020 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1021}
1022
029632fb 1023void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1024{
1025 const struct sched_class *class;
1026
1027 if (p->sched_class == rq->curr->sched_class) {
1028 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1029 } else {
1030 for_each_class(class) {
1031 if (class == rq->curr->sched_class)
1032 break;
1033 if (class == p->sched_class) {
8875125e 1034 resched_curr(rq);
1e5a7405
PZ
1035 break;
1036 }
1037 }
1038 }
1039
1040 /*
1041 * A queue event has occurred, and we're going to schedule. In
1042 * this case, we can save a useless back to back clock update.
1043 */
da0c1e65 1044 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
9edfbfed 1045 rq_clock_skip_update(rq, true);
1e5a7405
PZ
1046}
1047
1da177e4 1048#ifdef CONFIG_SMP
5cc389bc
PZ
1049/*
1050 * This is how migration works:
1051 *
1052 * 1) we invoke migration_cpu_stop() on the target CPU using
1053 * stop_one_cpu().
1054 * 2) stopper starts to run (implicitly forcing the migrated thread
1055 * off the CPU)
1056 * 3) it checks whether the migrated task is still in the wrong runqueue.
1057 * 4) if it's in the wrong runqueue then the migration thread removes
1058 * it and puts it into the right queue.
1059 * 5) stopper completes and stop_one_cpu() returns and the migration
1060 * is done.
1061 */
1062
1063/*
1064 * move_queued_task - move a queued task to new rq.
1065 *
1066 * Returns (locked) new rq. Old rq's lock is released.
1067 */
5e16bbc2 1068static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
5cc389bc 1069{
5cc389bc
PZ
1070 lockdep_assert_held(&rq->lock);
1071
1072 dequeue_task(rq, p, 0);
1073 p->on_rq = TASK_ON_RQ_MIGRATING;
1074 set_task_cpu(p, new_cpu);
1075 raw_spin_unlock(&rq->lock);
1076
1077 rq = cpu_rq(new_cpu);
1078
1079 raw_spin_lock(&rq->lock);
1080 BUG_ON(task_cpu(p) != new_cpu);
1081 p->on_rq = TASK_ON_RQ_QUEUED;
1082 enqueue_task(rq, p, 0);
1083 check_preempt_curr(rq, p, 0);
1084
1085 return rq;
1086}
1087
1088struct migration_arg {
1089 struct task_struct *task;
1090 int dest_cpu;
1091};
1092
1093/*
1094 * Move (not current) task off this cpu, onto dest cpu. We're doing
1095 * this because either it can't run here any more (set_cpus_allowed()
1096 * away from this CPU, or CPU going down), or because we're
1097 * attempting to rebalance this task on exec (sched_exec).
1098 *
1099 * So we race with normal scheduler movements, but that's OK, as long
1100 * as the task is no longer on this CPU.
5cc389bc 1101 */
5e16bbc2 1102static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
5cc389bc 1103{
5cc389bc 1104 if (unlikely(!cpu_active(dest_cpu)))
5e16bbc2 1105 return rq;
5cc389bc
PZ
1106
1107 /* Affinity changed (again). */
1108 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5e16bbc2 1109 return rq;
5cc389bc 1110
5e16bbc2
PZ
1111 rq = move_queued_task(rq, p, dest_cpu);
1112
1113 return rq;
5cc389bc
PZ
1114}
1115
1116/*
1117 * migration_cpu_stop - this will be executed by a highprio stopper thread
1118 * and performs thread migration by bumping thread off CPU then
1119 * 'pushing' onto another runqueue.
1120 */
1121static int migration_cpu_stop(void *data)
1122{
1123 struct migration_arg *arg = data;
5e16bbc2
PZ
1124 struct task_struct *p = arg->task;
1125 struct rq *rq = this_rq();
5cc389bc
PZ
1126
1127 /*
1128 * The original target cpu might have gone down and we might
1129 * be on another cpu but it doesn't matter.
1130 */
1131 local_irq_disable();
1132 /*
1133 * We need to explicitly wake pending tasks before running
1134 * __migrate_task() such that we will not miss enforcing cpus_allowed
1135 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1136 */
1137 sched_ttwu_pending();
5e16bbc2
PZ
1138
1139 raw_spin_lock(&p->pi_lock);
1140 raw_spin_lock(&rq->lock);
1141 /*
1142 * If task_rq(p) != rq, it cannot be migrated here, because we're
1143 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1144 * we're holding p->pi_lock.
1145 */
1146 if (task_rq(p) == rq && task_on_rq_queued(p))
1147 rq = __migrate_task(rq, p, arg->dest_cpu);
1148 raw_spin_unlock(&rq->lock);
1149 raw_spin_unlock(&p->pi_lock);
1150
5cc389bc
PZ
1151 local_irq_enable();
1152 return 0;
1153}
1154
c5b28038
PZ
1155/*
1156 * sched_class::set_cpus_allowed must do the below, but is not required to
1157 * actually call this function.
1158 */
1159void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1160{
5cc389bc
PZ
1161 cpumask_copy(&p->cpus_allowed, new_mask);
1162 p->nr_cpus_allowed = cpumask_weight(new_mask);
1163}
1164
c5b28038
PZ
1165void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1166{
6c37067e
PZ
1167 struct rq *rq = task_rq(p);
1168 bool queued, running;
1169
c5b28038 1170 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1171
1172 queued = task_on_rq_queued(p);
1173 running = task_current(rq, p);
1174
1175 if (queued) {
1176 /*
1177 * Because __kthread_bind() calls this on blocked tasks without
1178 * holding rq->lock.
1179 */
1180 lockdep_assert_held(&rq->lock);
1181 dequeue_task(rq, p, 0);
1182 }
1183 if (running)
1184 put_prev_task(rq, p);
1185
c5b28038 1186 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e
PZ
1187
1188 if (running)
1189 p->sched_class->set_curr_task(rq);
1190 if (queued)
1191 enqueue_task(rq, p, 0);
c5b28038
PZ
1192}
1193
5cc389bc
PZ
1194/*
1195 * Change a given task's CPU affinity. Migrate the thread to a
1196 * proper CPU and schedule it away if the CPU it's executing on
1197 * is removed from the allowed bitmask.
1198 *
1199 * NOTE: the caller must have a valid reference to the task, the
1200 * task must not exit() & deallocate itself prematurely. The
1201 * call is not atomic; no spinlocks may be held.
1202 */
25834c73
PZ
1203static int __set_cpus_allowed_ptr(struct task_struct *p,
1204 const struct cpumask *new_mask, bool check)
5cc389bc
PZ
1205{
1206 unsigned long flags;
1207 struct rq *rq;
1208 unsigned int dest_cpu;
1209 int ret = 0;
1210
1211 rq = task_rq_lock(p, &flags);
1212
25834c73
PZ
1213 /*
1214 * Must re-check here, to close a race against __kthread_bind(),
1215 * sched_setaffinity() is not guaranteed to observe the flag.
1216 */
1217 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1218 ret = -EINVAL;
1219 goto out;
1220 }
1221
5cc389bc
PZ
1222 if (cpumask_equal(&p->cpus_allowed, new_mask))
1223 goto out;
1224
1225 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1226 ret = -EINVAL;
1227 goto out;
1228 }
1229
1230 do_set_cpus_allowed(p, new_mask);
1231
1232 /* Can the task run on the task's current CPU? If so, we're done */
1233 if (cpumask_test_cpu(task_cpu(p), new_mask))
1234 goto out;
1235
1236 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1237 if (task_running(rq, p) || p->state == TASK_WAKING) {
1238 struct migration_arg arg = { p, dest_cpu };
1239 /* Need help from migration thread: drop lock and wait. */
1240 task_rq_unlock(rq, p, &flags);
1241 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1242 tlb_migrate_finish(p->mm);
1243 return 0;
cbce1a68
PZ
1244 } else if (task_on_rq_queued(p)) {
1245 /*
1246 * OK, since we're going to drop the lock immediately
1247 * afterwards anyway.
1248 */
1249 lockdep_unpin_lock(&rq->lock);
5e16bbc2 1250 rq = move_queued_task(rq, p, dest_cpu);
cbce1a68
PZ
1251 lockdep_pin_lock(&rq->lock);
1252 }
5cc389bc
PZ
1253out:
1254 task_rq_unlock(rq, p, &flags);
1255
1256 return ret;
1257}
25834c73
PZ
1258
1259int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1260{
1261 return __set_cpus_allowed_ptr(p, new_mask, false);
1262}
5cc389bc
PZ
1263EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1264
dd41f596 1265void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1266{
e2912009
PZ
1267#ifdef CONFIG_SCHED_DEBUG
1268 /*
1269 * We should never call set_task_cpu() on a blocked task,
1270 * ttwu() will sort out the placement.
1271 */
077614ee 1272 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1273 !p->on_rq);
0122ec5b
PZ
1274
1275#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1276 /*
1277 * The caller should hold either p->pi_lock or rq->lock, when changing
1278 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1279 *
1280 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1281 * see task_group().
6c6c54e1
PZ
1282 *
1283 * Furthermore, all task_rq users should acquire both locks, see
1284 * task_rq_lock().
1285 */
0122ec5b
PZ
1286 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1287 lockdep_is_held(&task_rq(p)->lock)));
1288#endif
e2912009
PZ
1289#endif
1290
de1d7286 1291 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1292
0c69774e 1293 if (task_cpu(p) != new_cpu) {
0a74bef8
PT
1294 if (p->sched_class->migrate_task_rq)
1295 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1296 p->se.nr_migrations++;
ff303e66 1297 perf_event_task_migrate(p);
0c69774e 1298 }
dd41f596
IM
1299
1300 __set_task_cpu(p, new_cpu);
c65cc870
IM
1301}
1302
ac66f547
PZ
1303static void __migrate_swap_task(struct task_struct *p, int cpu)
1304{
da0c1e65 1305 if (task_on_rq_queued(p)) {
ac66f547
PZ
1306 struct rq *src_rq, *dst_rq;
1307
1308 src_rq = task_rq(p);
1309 dst_rq = cpu_rq(cpu);
1310
1311 deactivate_task(src_rq, p, 0);
1312 set_task_cpu(p, cpu);
1313 activate_task(dst_rq, p, 0);
1314 check_preempt_curr(dst_rq, p, 0);
1315 } else {
1316 /*
1317 * Task isn't running anymore; make it appear like we migrated
1318 * it before it went to sleep. This means on wakeup we make the
1319 * previous cpu our targer instead of where it really is.
1320 */
1321 p->wake_cpu = cpu;
1322 }
1323}
1324
1325struct migration_swap_arg {
1326 struct task_struct *src_task, *dst_task;
1327 int src_cpu, dst_cpu;
1328};
1329
1330static int migrate_swap_stop(void *data)
1331{
1332 struct migration_swap_arg *arg = data;
1333 struct rq *src_rq, *dst_rq;
1334 int ret = -EAGAIN;
1335
1336 src_rq = cpu_rq(arg->src_cpu);
1337 dst_rq = cpu_rq(arg->dst_cpu);
1338
74602315
PZ
1339 double_raw_lock(&arg->src_task->pi_lock,
1340 &arg->dst_task->pi_lock);
ac66f547
PZ
1341 double_rq_lock(src_rq, dst_rq);
1342 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1343 goto unlock;
1344
1345 if (task_cpu(arg->src_task) != arg->src_cpu)
1346 goto unlock;
1347
1348 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1349 goto unlock;
1350
1351 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1352 goto unlock;
1353
1354 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1355 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1356
1357 ret = 0;
1358
1359unlock:
1360 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
1361 raw_spin_unlock(&arg->dst_task->pi_lock);
1362 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
1363
1364 return ret;
1365}
1366
1367/*
1368 * Cross migrate two tasks
1369 */
1370int migrate_swap(struct task_struct *cur, struct task_struct *p)
1371{
1372 struct migration_swap_arg arg;
1373 int ret = -EINVAL;
1374
ac66f547
PZ
1375 arg = (struct migration_swap_arg){
1376 .src_task = cur,
1377 .src_cpu = task_cpu(cur),
1378 .dst_task = p,
1379 .dst_cpu = task_cpu(p),
1380 };
1381
1382 if (arg.src_cpu == arg.dst_cpu)
1383 goto out;
1384
6acce3ef
PZ
1385 /*
1386 * These three tests are all lockless; this is OK since all of them
1387 * will be re-checked with proper locks held further down the line.
1388 */
ac66f547
PZ
1389 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1390 goto out;
1391
1392 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1393 goto out;
1394
1395 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1396 goto out;
1397
286549dc 1398 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
1399 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1400
1401out:
ac66f547
PZ
1402 return ret;
1403}
1404
1da177e4
LT
1405/*
1406 * wait_task_inactive - wait for a thread to unschedule.
1407 *
85ba2d86
RM
1408 * If @match_state is nonzero, it's the @p->state value just checked and
1409 * not expected to change. If it changes, i.e. @p might have woken up,
1410 * then return zero. When we succeed in waiting for @p to be off its CPU,
1411 * we return a positive number (its total switch count). If a second call
1412 * a short while later returns the same number, the caller can be sure that
1413 * @p has remained unscheduled the whole time.
1414 *
1da177e4
LT
1415 * The caller must ensure that the task *will* unschedule sometime soon,
1416 * else this function might spin for a *long* time. This function can't
1417 * be called with interrupts off, or it may introduce deadlock with
1418 * smp_call_function() if an IPI is sent by the same process we are
1419 * waiting to become inactive.
1420 */
85ba2d86 1421unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1422{
1423 unsigned long flags;
da0c1e65 1424 int running, queued;
85ba2d86 1425 unsigned long ncsw;
70b97a7f 1426 struct rq *rq;
1da177e4 1427
3a5c359a
AK
1428 for (;;) {
1429 /*
1430 * We do the initial early heuristics without holding
1431 * any task-queue locks at all. We'll only try to get
1432 * the runqueue lock when things look like they will
1433 * work out!
1434 */
1435 rq = task_rq(p);
fa490cfd 1436
3a5c359a
AK
1437 /*
1438 * If the task is actively running on another CPU
1439 * still, just relax and busy-wait without holding
1440 * any locks.
1441 *
1442 * NOTE! Since we don't hold any locks, it's not
1443 * even sure that "rq" stays as the right runqueue!
1444 * But we don't care, since "task_running()" will
1445 * return false if the runqueue has changed and p
1446 * is actually now running somewhere else!
1447 */
85ba2d86
RM
1448 while (task_running(rq, p)) {
1449 if (match_state && unlikely(p->state != match_state))
1450 return 0;
3a5c359a 1451 cpu_relax();
85ba2d86 1452 }
fa490cfd 1453
3a5c359a
AK
1454 /*
1455 * Ok, time to look more closely! We need the rq
1456 * lock now, to be *sure*. If we're wrong, we'll
1457 * just go back and repeat.
1458 */
1459 rq = task_rq_lock(p, &flags);
27a9da65 1460 trace_sched_wait_task(p);
3a5c359a 1461 running = task_running(rq, p);
da0c1e65 1462 queued = task_on_rq_queued(p);
85ba2d86 1463 ncsw = 0;
f31e11d8 1464 if (!match_state || p->state == match_state)
93dcf55f 1465 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1466 task_rq_unlock(rq, p, &flags);
fa490cfd 1467
85ba2d86
RM
1468 /*
1469 * If it changed from the expected state, bail out now.
1470 */
1471 if (unlikely(!ncsw))
1472 break;
1473
3a5c359a
AK
1474 /*
1475 * Was it really running after all now that we
1476 * checked with the proper locks actually held?
1477 *
1478 * Oops. Go back and try again..
1479 */
1480 if (unlikely(running)) {
1481 cpu_relax();
1482 continue;
1483 }
fa490cfd 1484
3a5c359a
AK
1485 /*
1486 * It's not enough that it's not actively running,
1487 * it must be off the runqueue _entirely_, and not
1488 * preempted!
1489 *
80dd99b3 1490 * So if it was still runnable (but just not actively
3a5c359a
AK
1491 * running right now), it's preempted, and we should
1492 * yield - it could be a while.
1493 */
da0c1e65 1494 if (unlikely(queued)) {
8eb90c30
TG
1495 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1496
1497 set_current_state(TASK_UNINTERRUPTIBLE);
1498 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1499 continue;
1500 }
fa490cfd 1501
3a5c359a
AK
1502 /*
1503 * Ahh, all good. It wasn't running, and it wasn't
1504 * runnable, which means that it will never become
1505 * running in the future either. We're all done!
1506 */
1507 break;
1508 }
85ba2d86
RM
1509
1510 return ncsw;
1da177e4
LT
1511}
1512
1513/***
1514 * kick_process - kick a running thread to enter/exit the kernel
1515 * @p: the to-be-kicked thread
1516 *
1517 * Cause a process which is running on another CPU to enter
1518 * kernel-mode, without any delay. (to get signals handled.)
1519 *
25985edc 1520 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1521 * because all it wants to ensure is that the remote task enters
1522 * the kernel. If the IPI races and the task has been migrated
1523 * to another CPU then no harm is done and the purpose has been
1524 * achieved as well.
1525 */
36c8b586 1526void kick_process(struct task_struct *p)
1da177e4
LT
1527{
1528 int cpu;
1529
1530 preempt_disable();
1531 cpu = task_cpu(p);
1532 if ((cpu != smp_processor_id()) && task_curr(p))
1533 smp_send_reschedule(cpu);
1534 preempt_enable();
1535}
b43e3521 1536EXPORT_SYMBOL_GPL(kick_process);
1da177e4 1537
30da688e 1538/*
013fdb80 1539 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1540 */
5da9a0fb
PZ
1541static int select_fallback_rq(int cpu, struct task_struct *p)
1542{
aa00d89c
TC
1543 int nid = cpu_to_node(cpu);
1544 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1545 enum { cpuset, possible, fail } state = cpuset;
1546 int dest_cpu;
5da9a0fb 1547
aa00d89c
TC
1548 /*
1549 * If the node that the cpu is on has been offlined, cpu_to_node()
1550 * will return -1. There is no cpu on the node, and we should
1551 * select the cpu on the other node.
1552 */
1553 if (nid != -1) {
1554 nodemask = cpumask_of_node(nid);
1555
1556 /* Look for allowed, online CPU in same node. */
1557 for_each_cpu(dest_cpu, nodemask) {
1558 if (!cpu_online(dest_cpu))
1559 continue;
1560 if (!cpu_active(dest_cpu))
1561 continue;
1562 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1563 return dest_cpu;
1564 }
2baab4e9 1565 }
5da9a0fb 1566
2baab4e9
PZ
1567 for (;;) {
1568 /* Any allowed, online CPU? */
e3831edd 1569 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1570 if (!cpu_online(dest_cpu))
1571 continue;
1572 if (!cpu_active(dest_cpu))
1573 continue;
1574 goto out;
1575 }
5da9a0fb 1576
2baab4e9
PZ
1577 switch (state) {
1578 case cpuset:
1579 /* No more Mr. Nice Guy. */
1580 cpuset_cpus_allowed_fallback(p);
1581 state = possible;
1582 break;
1583
1584 case possible:
1585 do_set_cpus_allowed(p, cpu_possible_mask);
1586 state = fail;
1587 break;
1588
1589 case fail:
1590 BUG();
1591 break;
1592 }
1593 }
1594
1595out:
1596 if (state != cpuset) {
1597 /*
1598 * Don't tell them about moving exiting tasks or
1599 * kernel threads (both mm NULL), since they never
1600 * leave kernel.
1601 */
1602 if (p->mm && printk_ratelimit()) {
aac74dc4 1603 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
1604 task_pid_nr(p), p->comm, cpu);
1605 }
5da9a0fb
PZ
1606 }
1607
1608 return dest_cpu;
1609}
1610
e2912009 1611/*
013fdb80 1612 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1613 */
970b13ba 1614static inline
ac66f547 1615int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 1616{
cbce1a68
PZ
1617 lockdep_assert_held(&p->pi_lock);
1618
6c1d9410
WL
1619 if (p->nr_cpus_allowed > 1)
1620 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e2912009
PZ
1621
1622 /*
1623 * In order not to call set_task_cpu() on a blocking task we need
1624 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1625 * cpu.
1626 *
1627 * Since this is common to all placement strategies, this lives here.
1628 *
1629 * [ this allows ->select_task() to simply return task_cpu(p) and
1630 * not worry about this generic constraint ]
1631 */
fa17b507 1632 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1633 !cpu_online(cpu)))
5da9a0fb 1634 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1635
1636 return cpu;
970b13ba 1637}
09a40af5
MG
1638
1639static void update_avg(u64 *avg, u64 sample)
1640{
1641 s64 diff = sample - *avg;
1642 *avg += diff >> 3;
1643}
25834c73
PZ
1644
1645#else
1646
1647static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1648 const struct cpumask *new_mask, bool check)
1649{
1650 return set_cpus_allowed_ptr(p, new_mask);
1651}
1652
5cc389bc 1653#endif /* CONFIG_SMP */
970b13ba 1654
d7c01d27 1655static void
b84cb5df 1656ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1657{
d7c01d27 1658#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1659 struct rq *rq = this_rq();
1660
d7c01d27
PZ
1661#ifdef CONFIG_SMP
1662 int this_cpu = smp_processor_id();
1663
1664 if (cpu == this_cpu) {
1665 schedstat_inc(rq, ttwu_local);
1666 schedstat_inc(p, se.statistics.nr_wakeups_local);
1667 } else {
1668 struct sched_domain *sd;
1669
1670 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1671 rcu_read_lock();
d7c01d27
PZ
1672 for_each_domain(this_cpu, sd) {
1673 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1674 schedstat_inc(sd, ttwu_wake_remote);
1675 break;
1676 }
1677 }
057f3fad 1678 rcu_read_unlock();
d7c01d27 1679 }
f339b9dc
PZ
1680
1681 if (wake_flags & WF_MIGRATED)
1682 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1683
d7c01d27
PZ
1684#endif /* CONFIG_SMP */
1685
1686 schedstat_inc(rq, ttwu_count);
9ed3811a 1687 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1688
1689 if (wake_flags & WF_SYNC)
9ed3811a 1690 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1691
d7c01d27
PZ
1692#endif /* CONFIG_SCHEDSTATS */
1693}
1694
1695static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1696{
9ed3811a 1697 activate_task(rq, p, en_flags);
da0c1e65 1698 p->on_rq = TASK_ON_RQ_QUEUED;
c2f7115e
PZ
1699
1700 /* if a worker is waking up, notify workqueue */
1701 if (p->flags & PF_WQ_WORKER)
1702 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1703}
1704
23f41eeb
PZ
1705/*
1706 * Mark the task runnable and perform wakeup-preemption.
1707 */
89363381 1708static void
23f41eeb 1709ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1710{
9ed3811a 1711 check_preempt_curr(rq, p, wake_flags);
9ed3811a 1712 p->state = TASK_RUNNING;
fbd705a0
PZ
1713 trace_sched_wakeup(p);
1714
9ed3811a 1715#ifdef CONFIG_SMP
4c9a4bc8
PZ
1716 if (p->sched_class->task_woken) {
1717 /*
cbce1a68
PZ
1718 * Our task @p is fully woken up and running; so its safe to
1719 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 1720 */
cbce1a68 1721 lockdep_unpin_lock(&rq->lock);
9ed3811a 1722 p->sched_class->task_woken(rq, p);
cbce1a68 1723 lockdep_pin_lock(&rq->lock);
4c9a4bc8 1724 }
9ed3811a 1725
e69c6341 1726 if (rq->idle_stamp) {
78becc27 1727 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 1728 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 1729
abfafa54
JL
1730 update_avg(&rq->avg_idle, delta);
1731
1732 if (rq->avg_idle > max)
9ed3811a 1733 rq->avg_idle = max;
abfafa54 1734
9ed3811a
TH
1735 rq->idle_stamp = 0;
1736 }
1737#endif
1738}
1739
c05fbafb
PZ
1740static void
1741ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1742{
cbce1a68
PZ
1743 lockdep_assert_held(&rq->lock);
1744
c05fbafb
PZ
1745#ifdef CONFIG_SMP
1746 if (p->sched_contributes_to_load)
1747 rq->nr_uninterruptible--;
1748#endif
1749
1750 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1751 ttwu_do_wakeup(rq, p, wake_flags);
1752}
1753
1754/*
1755 * Called in case the task @p isn't fully descheduled from its runqueue,
1756 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1757 * since all we need to do is flip p->state to TASK_RUNNING, since
1758 * the task is still ->on_rq.
1759 */
1760static int ttwu_remote(struct task_struct *p, int wake_flags)
1761{
1762 struct rq *rq;
1763 int ret = 0;
1764
1765 rq = __task_rq_lock(p);
da0c1e65 1766 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
1767 /* check_preempt_curr() may use rq clock */
1768 update_rq_clock(rq);
c05fbafb
PZ
1769 ttwu_do_wakeup(rq, p, wake_flags);
1770 ret = 1;
1771 }
1772 __task_rq_unlock(rq);
1773
1774 return ret;
1775}
1776
317f3941 1777#ifdef CONFIG_SMP
e3baac47 1778void sched_ttwu_pending(void)
317f3941
PZ
1779{
1780 struct rq *rq = this_rq();
fa14ff4a
PZ
1781 struct llist_node *llist = llist_del_all(&rq->wake_list);
1782 struct task_struct *p;
e3baac47 1783 unsigned long flags;
317f3941 1784
e3baac47
PZ
1785 if (!llist)
1786 return;
1787
1788 raw_spin_lock_irqsave(&rq->lock, flags);
cbce1a68 1789 lockdep_pin_lock(&rq->lock);
317f3941 1790
fa14ff4a
PZ
1791 while (llist) {
1792 p = llist_entry(llist, struct task_struct, wake_entry);
1793 llist = llist_next(llist);
317f3941
PZ
1794 ttwu_do_activate(rq, p, 0);
1795 }
1796
cbce1a68 1797 lockdep_unpin_lock(&rq->lock);
e3baac47 1798 raw_spin_unlock_irqrestore(&rq->lock, flags);
317f3941
PZ
1799}
1800
1801void scheduler_ipi(void)
1802{
f27dde8d
PZ
1803 /*
1804 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1805 * TIF_NEED_RESCHED remotely (for the first time) will also send
1806 * this IPI.
1807 */
8cb75e0c 1808 preempt_fold_need_resched();
f27dde8d 1809
fd2ac4f4 1810 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1811 return;
1812
1813 /*
1814 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1815 * traditionally all their work was done from the interrupt return
1816 * path. Now that we actually do some work, we need to make sure
1817 * we do call them.
1818 *
1819 * Some archs already do call them, luckily irq_enter/exit nest
1820 * properly.
1821 *
1822 * Arguably we should visit all archs and update all handlers,
1823 * however a fair share of IPIs are still resched only so this would
1824 * somewhat pessimize the simple resched case.
1825 */
1826 irq_enter();
fa14ff4a 1827 sched_ttwu_pending();
ca38062e
SS
1828
1829 /*
1830 * Check if someone kicked us for doing the nohz idle load balance.
1831 */
873b4c65 1832 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 1833 this_rq()->idle_balance = 1;
ca38062e 1834 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1835 }
c5d753a5 1836 irq_exit();
317f3941
PZ
1837}
1838
1839static void ttwu_queue_remote(struct task_struct *p, int cpu)
1840{
e3baac47
PZ
1841 struct rq *rq = cpu_rq(cpu);
1842
1843 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1844 if (!set_nr_if_polling(rq->idle))
1845 smp_send_reschedule(cpu);
1846 else
1847 trace_sched_wake_idle_without_ipi(cpu);
1848 }
317f3941 1849}
d6aa8f85 1850
f6be8af1
CL
1851void wake_up_if_idle(int cpu)
1852{
1853 struct rq *rq = cpu_rq(cpu);
1854 unsigned long flags;
1855
fd7de1e8
AL
1856 rcu_read_lock();
1857
1858 if (!is_idle_task(rcu_dereference(rq->curr)))
1859 goto out;
f6be8af1
CL
1860
1861 if (set_nr_if_polling(rq->idle)) {
1862 trace_sched_wake_idle_without_ipi(cpu);
1863 } else {
1864 raw_spin_lock_irqsave(&rq->lock, flags);
1865 if (is_idle_task(rq->curr))
1866 smp_send_reschedule(cpu);
1867 /* Else cpu is not in idle, do nothing here */
1868 raw_spin_unlock_irqrestore(&rq->lock, flags);
1869 }
fd7de1e8
AL
1870
1871out:
1872 rcu_read_unlock();
f6be8af1
CL
1873}
1874
39be3501 1875bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1876{
1877 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1878}
d6aa8f85 1879#endif /* CONFIG_SMP */
317f3941 1880
c05fbafb
PZ
1881static void ttwu_queue(struct task_struct *p, int cpu)
1882{
1883 struct rq *rq = cpu_rq(cpu);
1884
17d9f311 1885#if defined(CONFIG_SMP)
39be3501 1886 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1887 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1888 ttwu_queue_remote(p, cpu);
1889 return;
1890 }
1891#endif
1892
c05fbafb 1893 raw_spin_lock(&rq->lock);
cbce1a68 1894 lockdep_pin_lock(&rq->lock);
c05fbafb 1895 ttwu_do_activate(rq, p, 0);
cbce1a68 1896 lockdep_unpin_lock(&rq->lock);
c05fbafb 1897 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1898}
1899
1900/**
1da177e4 1901 * try_to_wake_up - wake up a thread
9ed3811a 1902 * @p: the thread to be awakened
1da177e4 1903 * @state: the mask of task states that can be woken
9ed3811a 1904 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1905 *
1906 * Put it on the run-queue if it's not already there. The "current"
1907 * thread is always on the run-queue (except when the actual
1908 * re-schedule is in progress), and as such you're allowed to do
1909 * the simpler "current->state = TASK_RUNNING" to mark yourself
1910 * runnable without the overhead of this.
1911 *
e69f6186 1912 * Return: %true if @p was woken up, %false if it was already running.
9ed3811a 1913 * or @state didn't match @p's state.
1da177e4 1914 */
e4a52bcb
PZ
1915static int
1916try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1917{
1da177e4 1918 unsigned long flags;
c05fbafb 1919 int cpu, success = 0;
2398f2c6 1920
e0acd0a6
ON
1921 /*
1922 * If we are going to wake up a thread waiting for CONDITION we
1923 * need to ensure that CONDITION=1 done by the caller can not be
1924 * reordered with p->state check below. This pairs with mb() in
1925 * set_current_state() the waiting thread does.
1926 */
1927 smp_mb__before_spinlock();
013fdb80 1928 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1929 if (!(p->state & state))
1da177e4
LT
1930 goto out;
1931
fbd705a0
PZ
1932 trace_sched_waking(p);
1933
c05fbafb 1934 success = 1; /* we're going to change ->state */
1da177e4 1935 cpu = task_cpu(p);
1da177e4 1936
c05fbafb
PZ
1937 if (p->on_rq && ttwu_remote(p, wake_flags))
1938 goto stat;
1da177e4 1939
1da177e4 1940#ifdef CONFIG_SMP
e9c84311 1941 /*
c05fbafb
PZ
1942 * If the owning (remote) cpu is still in the middle of schedule() with
1943 * this task as prev, wait until its done referencing the task.
e9c84311 1944 */
f3e94786 1945 while (p->on_cpu)
e4a52bcb 1946 cpu_relax();
0970d299 1947 /*
e4a52bcb 1948 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1949 */
e4a52bcb 1950 smp_rmb();
1da177e4 1951
a8e4f2ea 1952 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1953 p->state = TASK_WAKING;
e7693a36 1954
e4a52bcb 1955 if (p->sched_class->task_waking)
74f8e4b2 1956 p->sched_class->task_waking(p);
efbbd05a 1957
ac66f547 1958 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1959 if (task_cpu(p) != cpu) {
1960 wake_flags |= WF_MIGRATED;
e4a52bcb 1961 set_task_cpu(p, cpu);
f339b9dc 1962 }
1da177e4 1963#endif /* CONFIG_SMP */
1da177e4 1964
c05fbafb
PZ
1965 ttwu_queue(p, cpu);
1966stat:
b84cb5df 1967 ttwu_stat(p, cpu, wake_flags);
1da177e4 1968out:
013fdb80 1969 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1970
1971 return success;
1972}
1973
21aa9af0
TH
1974/**
1975 * try_to_wake_up_local - try to wake up a local task with rq lock held
1976 * @p: the thread to be awakened
1977 *
2acca55e 1978 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1979 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1980 * the current task.
21aa9af0
TH
1981 */
1982static void try_to_wake_up_local(struct task_struct *p)
1983{
1984 struct rq *rq = task_rq(p);
21aa9af0 1985
383efcd0
TH
1986 if (WARN_ON_ONCE(rq != this_rq()) ||
1987 WARN_ON_ONCE(p == current))
1988 return;
1989
21aa9af0
TH
1990 lockdep_assert_held(&rq->lock);
1991
2acca55e 1992 if (!raw_spin_trylock(&p->pi_lock)) {
cbce1a68
PZ
1993 /*
1994 * This is OK, because current is on_cpu, which avoids it being
1995 * picked for load-balance and preemption/IRQs are still
1996 * disabled avoiding further scheduler activity on it and we've
1997 * not yet picked a replacement task.
1998 */
1999 lockdep_unpin_lock(&rq->lock);
2acca55e
PZ
2000 raw_spin_unlock(&rq->lock);
2001 raw_spin_lock(&p->pi_lock);
2002 raw_spin_lock(&rq->lock);
cbce1a68 2003 lockdep_pin_lock(&rq->lock);
2acca55e
PZ
2004 }
2005
21aa9af0 2006 if (!(p->state & TASK_NORMAL))
2acca55e 2007 goto out;
21aa9af0 2008
fbd705a0
PZ
2009 trace_sched_waking(p);
2010
da0c1e65 2011 if (!task_on_rq_queued(p))
d7c01d27
PZ
2012 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2013
23f41eeb 2014 ttwu_do_wakeup(rq, p, 0);
b84cb5df 2015 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
2016out:
2017 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
2018}
2019
50fa610a
DH
2020/**
2021 * wake_up_process - Wake up a specific process
2022 * @p: The process to be woken up.
2023 *
2024 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
2025 * processes.
2026 *
2027 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a
DH
2028 *
2029 * It may be assumed that this function implies a write memory barrier before
2030 * changing the task state if and only if any tasks are woken up.
2031 */
7ad5b3a5 2032int wake_up_process(struct task_struct *p)
1da177e4 2033{
9067ac85
ON
2034 WARN_ON(task_is_stopped_or_traced(p));
2035 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 2036}
1da177e4
LT
2037EXPORT_SYMBOL(wake_up_process);
2038
7ad5b3a5 2039int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2040{
2041 return try_to_wake_up(p, state, 0);
2042}
2043
a5e7be3b
JL
2044/*
2045 * This function clears the sched_dl_entity static params.
2046 */
2047void __dl_clear_params(struct task_struct *p)
2048{
2049 struct sched_dl_entity *dl_se = &p->dl;
2050
2051 dl_se->dl_runtime = 0;
2052 dl_se->dl_deadline = 0;
2053 dl_se->dl_period = 0;
2054 dl_se->flags = 0;
2055 dl_se->dl_bw = 0;
40767b0d
PZ
2056
2057 dl_se->dl_throttled = 0;
2058 dl_se->dl_new = 1;
2059 dl_se->dl_yielded = 0;
a5e7be3b
JL
2060}
2061
1da177e4
LT
2062/*
2063 * Perform scheduler related setup for a newly forked process p.
2064 * p is forked by current.
dd41f596
IM
2065 *
2066 * __sched_fork() is basic setup used by init_idle() too:
2067 */
5e1576ed 2068static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2069{
fd2f4419
PZ
2070 p->on_rq = 0;
2071
2072 p->se.on_rq = 0;
dd41f596
IM
2073 p->se.exec_start = 0;
2074 p->se.sum_exec_runtime = 0;
f6cf891c 2075 p->se.prev_sum_exec_runtime = 0;
6c594c21 2076 p->se.nr_migrations = 0;
da7a735e 2077 p->se.vruntime = 0;
fd2f4419 2078 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
2079
2080#ifdef CONFIG_SCHEDSTATS
41acab88 2081 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2082#endif
476d139c 2083
aab03e05 2084 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 2085 init_dl_task_timer(&p->dl);
a5e7be3b 2086 __dl_clear_params(p);
aab03e05 2087
fa717060 2088 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 2089
e107be36
AK
2090#ifdef CONFIG_PREEMPT_NOTIFIERS
2091 INIT_HLIST_HEAD(&p->preempt_notifiers);
2092#endif
cbee9f88
PZ
2093
2094#ifdef CONFIG_NUMA_BALANCING
2095 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
7e8d16b6 2096 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
cbee9f88
PZ
2097 p->mm->numa_scan_seq = 0;
2098 }
2099
5e1576ed
RR
2100 if (clone_flags & CLONE_VM)
2101 p->numa_preferred_nid = current->numa_preferred_nid;
2102 else
2103 p->numa_preferred_nid = -1;
2104
cbee9f88
PZ
2105 p->node_stamp = 0ULL;
2106 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
4b96a29b 2107 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88 2108 p->numa_work.next = &p->numa_work;
44dba3d5 2109 p->numa_faults = NULL;
7e2703e6
RR
2110 p->last_task_numa_placement = 0;
2111 p->last_sum_exec_runtime = 0;
8c8a743c 2112
8c8a743c 2113 p->numa_group = NULL;
cbee9f88 2114#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
2115}
2116
1a687c2e 2117#ifdef CONFIG_NUMA_BALANCING
3105b86a 2118#ifdef CONFIG_SCHED_DEBUG
1a687c2e
MG
2119void set_numabalancing_state(bool enabled)
2120{
2121 if (enabled)
2122 sched_feat_set("NUMA");
2123 else
2124 sched_feat_set("NO_NUMA");
2125}
3105b86a
MG
2126#else
2127__read_mostly bool numabalancing_enabled;
2128
2129void set_numabalancing_state(bool enabled)
2130{
2131 numabalancing_enabled = enabled;
dd41f596 2132}
3105b86a 2133#endif /* CONFIG_SCHED_DEBUG */
54a43d54
AK
2134
2135#ifdef CONFIG_PROC_SYSCTL
2136int sysctl_numa_balancing(struct ctl_table *table, int write,
2137 void __user *buffer, size_t *lenp, loff_t *ppos)
2138{
2139 struct ctl_table t;
2140 int err;
2141 int state = numabalancing_enabled;
2142
2143 if (write && !capable(CAP_SYS_ADMIN))
2144 return -EPERM;
2145
2146 t = *table;
2147 t.data = &state;
2148 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2149 if (err < 0)
2150 return err;
2151 if (write)
2152 set_numabalancing_state(state);
2153 return err;
2154}
2155#endif
2156#endif
dd41f596
IM
2157
2158/*
2159 * fork()/clone()-time setup:
2160 */
aab03e05 2161int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2162{
0122ec5b 2163 unsigned long flags;
dd41f596
IM
2164 int cpu = get_cpu();
2165
5e1576ed 2166 __sched_fork(clone_flags, p);
06b83b5f 2167 /*
0017d735 2168 * We mark the process as running here. This guarantees that
06b83b5f
PZ
2169 * nobody will actually run it, and a signal or other external
2170 * event cannot wake it up and insert it on the runqueue either.
2171 */
0017d735 2172 p->state = TASK_RUNNING;
dd41f596 2173
c350a04e
MG
2174 /*
2175 * Make sure we do not leak PI boosting priority to the child.
2176 */
2177 p->prio = current->normal_prio;
2178
b9dc29e7
MG
2179 /*
2180 * Revert to default priority/policy on fork if requested.
2181 */
2182 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 2183 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 2184 p->policy = SCHED_NORMAL;
6c697bdf 2185 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
2186 p->rt_priority = 0;
2187 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2188 p->static_prio = NICE_TO_PRIO(0);
2189
2190 p->prio = p->normal_prio = __normal_prio(p);
2191 set_load_weight(p);
6c697bdf 2192
b9dc29e7
MG
2193 /*
2194 * We don't need the reset flag anymore after the fork. It has
2195 * fulfilled its duty:
2196 */
2197 p->sched_reset_on_fork = 0;
2198 }
ca94c442 2199
aab03e05
DF
2200 if (dl_prio(p->prio)) {
2201 put_cpu();
2202 return -EAGAIN;
2203 } else if (rt_prio(p->prio)) {
2204 p->sched_class = &rt_sched_class;
2205 } else {
2ddbf952 2206 p->sched_class = &fair_sched_class;
aab03e05 2207 }
b29739f9 2208
cd29fe6f
PZ
2209 if (p->sched_class->task_fork)
2210 p->sched_class->task_fork(p);
2211
86951599
PZ
2212 /*
2213 * The child is not yet in the pid-hash so no cgroup attach races,
2214 * and the cgroup is pinned to this child due to cgroup_fork()
2215 * is ran before sched_fork().
2216 *
2217 * Silence PROVE_RCU.
2218 */
0122ec5b 2219 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 2220 set_task_cpu(p, cpu);
0122ec5b 2221 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2222
f6db8347 2223#ifdef CONFIG_SCHED_INFO
dd41f596 2224 if (likely(sched_info_on()))
52f17b6c 2225 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2226#endif
3ca7a440
PZ
2227#if defined(CONFIG_SMP)
2228 p->on_cpu = 0;
4866cde0 2229#endif
01028747 2230 init_task_preempt_count(p);
806c09a7 2231#ifdef CONFIG_SMP
917b627d 2232 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 2233 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 2234#endif
917b627d 2235
476d139c 2236 put_cpu();
aab03e05 2237 return 0;
1da177e4
LT
2238}
2239
332ac17e
DF
2240unsigned long to_ratio(u64 period, u64 runtime)
2241{
2242 if (runtime == RUNTIME_INF)
2243 return 1ULL << 20;
2244
2245 /*
2246 * Doing this here saves a lot of checks in all
2247 * the calling paths, and returning zero seems
2248 * safe for them anyway.
2249 */
2250 if (period == 0)
2251 return 0;
2252
2253 return div64_u64(runtime << 20, period);
2254}
2255
2256#ifdef CONFIG_SMP
2257inline struct dl_bw *dl_bw_of(int i)
2258{
f78f5b90
PM
2259 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2260 "sched RCU must be held");
332ac17e
DF
2261 return &cpu_rq(i)->rd->dl_bw;
2262}
2263
de212f18 2264static inline int dl_bw_cpus(int i)
332ac17e 2265{
de212f18
PZ
2266 struct root_domain *rd = cpu_rq(i)->rd;
2267 int cpus = 0;
2268
f78f5b90
PM
2269 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2270 "sched RCU must be held");
de212f18
PZ
2271 for_each_cpu_and(i, rd->span, cpu_active_mask)
2272 cpus++;
2273
2274 return cpus;
332ac17e
DF
2275}
2276#else
2277inline struct dl_bw *dl_bw_of(int i)
2278{
2279 return &cpu_rq(i)->dl.dl_bw;
2280}
2281
de212f18 2282static inline int dl_bw_cpus(int i)
332ac17e
DF
2283{
2284 return 1;
2285}
2286#endif
2287
332ac17e
DF
2288/*
2289 * We must be sure that accepting a new task (or allowing changing the
2290 * parameters of an existing one) is consistent with the bandwidth
2291 * constraints. If yes, this function also accordingly updates the currently
2292 * allocated bandwidth to reflect the new situation.
2293 *
2294 * This function is called while holding p's rq->lock.
40767b0d
PZ
2295 *
2296 * XXX we should delay bw change until the task's 0-lag point, see
2297 * __setparam_dl().
332ac17e
DF
2298 */
2299static int dl_overflow(struct task_struct *p, int policy,
2300 const struct sched_attr *attr)
2301{
2302
2303 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
4df1638c 2304 u64 period = attr->sched_period ?: attr->sched_deadline;
332ac17e
DF
2305 u64 runtime = attr->sched_runtime;
2306 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
de212f18 2307 int cpus, err = -1;
332ac17e
DF
2308
2309 if (new_bw == p->dl.dl_bw)
2310 return 0;
2311
2312 /*
2313 * Either if a task, enters, leave, or stays -deadline but changes
2314 * its parameters, we may need to update accordingly the total
2315 * allocated bandwidth of the container.
2316 */
2317 raw_spin_lock(&dl_b->lock);
de212f18 2318 cpus = dl_bw_cpus(task_cpu(p));
332ac17e
DF
2319 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2320 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2321 __dl_add(dl_b, new_bw);
2322 err = 0;
2323 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2324 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2325 __dl_clear(dl_b, p->dl.dl_bw);
2326 __dl_add(dl_b, new_bw);
2327 err = 0;
2328 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2329 __dl_clear(dl_b, p->dl.dl_bw);
2330 err = 0;
2331 }
2332 raw_spin_unlock(&dl_b->lock);
2333
2334 return err;
2335}
2336
2337extern void init_dl_bw(struct dl_bw *dl_b);
2338
1da177e4
LT
2339/*
2340 * wake_up_new_task - wake up a newly created task for the first time.
2341 *
2342 * This function will do some initial scheduler statistics housekeeping
2343 * that must be done for every newly created context, then puts the task
2344 * on the runqueue and wakes it.
2345 */
3e51e3ed 2346void wake_up_new_task(struct task_struct *p)
1da177e4
LT
2347{
2348 unsigned long flags;
dd41f596 2349 struct rq *rq;
fabf318e 2350
ab2515c4 2351 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
2352#ifdef CONFIG_SMP
2353 /*
2354 * Fork balancing, do it here and not earlier because:
2355 * - cpus_allowed can change in the fork path
2356 * - any previously selected cpu might disappear through hotplug
fabf318e 2357 */
ac66f547 2358 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735
PZ
2359#endif
2360
a75cdaa9 2361 /* Initialize new task's runnable average */
540247fb 2362 init_entity_runnable_average(&p->se);
ab2515c4 2363 rq = __task_rq_lock(p);
cd29fe6f 2364 activate_task(rq, p, 0);
da0c1e65 2365 p->on_rq = TASK_ON_RQ_QUEUED;
fbd705a0 2366 trace_sched_wakeup_new(p);
a7558e01 2367 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2368#ifdef CONFIG_SMP
efbbd05a
PZ
2369 if (p->sched_class->task_woken)
2370 p->sched_class->task_woken(rq, p);
9a897c5a 2371#endif
0122ec5b 2372 task_rq_unlock(rq, p, &flags);
1da177e4
LT
2373}
2374
e107be36
AK
2375#ifdef CONFIG_PREEMPT_NOTIFIERS
2376
1cde2930
PZ
2377static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2378
2ecd9d29
PZ
2379void preempt_notifier_inc(void)
2380{
2381 static_key_slow_inc(&preempt_notifier_key);
2382}
2383EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2384
2385void preempt_notifier_dec(void)
2386{
2387 static_key_slow_dec(&preempt_notifier_key);
2388}
2389EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2390
e107be36 2391/**
80dd99b3 2392 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2393 * @notifier: notifier struct to register
e107be36
AK
2394 */
2395void preempt_notifier_register(struct preempt_notifier *notifier)
2396{
2ecd9d29
PZ
2397 if (!static_key_false(&preempt_notifier_key))
2398 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2399
e107be36
AK
2400 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2401}
2402EXPORT_SYMBOL_GPL(preempt_notifier_register);
2403
2404/**
2405 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2406 * @notifier: notifier struct to unregister
e107be36 2407 *
d84525a8 2408 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
2409 */
2410void preempt_notifier_unregister(struct preempt_notifier *notifier)
2411{
2412 hlist_del(&notifier->link);
2413}
2414EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2415
1cde2930 2416static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2417{
2418 struct preempt_notifier *notifier;
e107be36 2419
b67bfe0d 2420 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2421 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2422}
2423
1cde2930
PZ
2424static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2425{
2426 if (static_key_false(&preempt_notifier_key))
2427 __fire_sched_in_preempt_notifiers(curr);
2428}
2429
e107be36 2430static void
1cde2930
PZ
2431__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2432 struct task_struct *next)
e107be36
AK
2433{
2434 struct preempt_notifier *notifier;
e107be36 2435
b67bfe0d 2436 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2437 notifier->ops->sched_out(notifier, next);
2438}
2439
1cde2930
PZ
2440static __always_inline void
2441fire_sched_out_preempt_notifiers(struct task_struct *curr,
2442 struct task_struct *next)
2443{
2444 if (static_key_false(&preempt_notifier_key))
2445 __fire_sched_out_preempt_notifiers(curr, next);
2446}
2447
6d6bc0ad 2448#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 2449
1cde2930 2450static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2451{
2452}
2453
1cde2930 2454static inline void
e107be36
AK
2455fire_sched_out_preempt_notifiers(struct task_struct *curr,
2456 struct task_struct *next)
2457{
2458}
2459
6d6bc0ad 2460#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2461
4866cde0
NP
2462/**
2463 * prepare_task_switch - prepare to switch tasks
2464 * @rq: the runqueue preparing to switch
421cee29 2465 * @prev: the current task that is being switched out
4866cde0
NP
2466 * @next: the task we are going to switch to.
2467 *
2468 * This is called with the rq lock held and interrupts off. It must
2469 * be paired with a subsequent finish_task_switch after the context
2470 * switch.
2471 *
2472 * prepare_task_switch sets up locking and calls architecture specific
2473 * hooks.
2474 */
e107be36
AK
2475static inline void
2476prepare_task_switch(struct rq *rq, struct task_struct *prev,
2477 struct task_struct *next)
4866cde0 2478{
895dd92c 2479 trace_sched_switch(prev, next);
43148951 2480 sched_info_switch(rq, prev, next);
fe4b04fa 2481 perf_event_task_sched_out(prev, next);
e107be36 2482 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2483 prepare_lock_switch(rq, next);
2484 prepare_arch_switch(next);
2485}
2486
1da177e4
LT
2487/**
2488 * finish_task_switch - clean up after a task-switch
2489 * @prev: the thread we just switched away from.
2490 *
4866cde0
NP
2491 * finish_task_switch must be called after the context switch, paired
2492 * with a prepare_task_switch call before the context switch.
2493 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2494 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2495 *
2496 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2497 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2498 * with the lock held can cause deadlocks; see schedule() for
2499 * details.)
dfa50b60
ON
2500 *
2501 * The context switch have flipped the stack from under us and restored the
2502 * local variables which were saved when this task called schedule() in the
2503 * past. prev == current is still correct but we need to recalculate this_rq
2504 * because prev may have moved to another CPU.
1da177e4 2505 */
dfa50b60 2506static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
2507 __releases(rq->lock)
2508{
dfa50b60 2509 struct rq *rq = this_rq();
1da177e4 2510 struct mm_struct *mm = rq->prev_mm;
55a101f8 2511 long prev_state;
1da177e4
LT
2512
2513 rq->prev_mm = NULL;
2514
2515 /*
2516 * A task struct has one reference for the use as "current".
c394cc9f 2517 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2518 * schedule one last time. The schedule call will never return, and
2519 * the scheduled task must drop that reference.
95913d97
PZ
2520 *
2521 * We must observe prev->state before clearing prev->on_cpu (in
2522 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2523 * running on another CPU and we could rave with its RUNNING -> DEAD
2524 * transition, resulting in a double drop.
1da177e4 2525 */
55a101f8 2526 prev_state = prev->state;
bf9fae9f 2527 vtime_task_switch(prev);
a8d757ef 2528 perf_event_task_sched_in(prev, current);
4866cde0 2529 finish_lock_switch(rq, prev);
01f23e16 2530 finish_arch_post_lock_switch();
e8fa1362 2531
e107be36 2532 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2533 if (mm)
2534 mmdrop(mm);
c394cc9f 2535 if (unlikely(prev_state == TASK_DEAD)) {
e6c390f2
DF
2536 if (prev->sched_class->task_dead)
2537 prev->sched_class->task_dead(prev);
2538
c6fd91f0 2539 /*
2540 * Remove function-return probe instances associated with this
2541 * task and put them back on the free list.
9761eea8 2542 */
c6fd91f0 2543 kprobe_flush_task(prev);
1da177e4 2544 put_task_struct(prev);
c6fd91f0 2545 }
99e5ada9 2546
de734f89 2547 tick_nohz_task_switch();
dfa50b60 2548 return rq;
1da177e4
LT
2549}
2550
3f029d3c
GH
2551#ifdef CONFIG_SMP
2552
3f029d3c 2553/* rq->lock is NOT held, but preemption is disabled */
e3fca9e7 2554static void __balance_callback(struct rq *rq)
3f029d3c 2555{
e3fca9e7
PZ
2556 struct callback_head *head, *next;
2557 void (*func)(struct rq *rq);
2558 unsigned long flags;
3f029d3c 2559
e3fca9e7
PZ
2560 raw_spin_lock_irqsave(&rq->lock, flags);
2561 head = rq->balance_callback;
2562 rq->balance_callback = NULL;
2563 while (head) {
2564 func = (void (*)(struct rq *))head->func;
2565 next = head->next;
2566 head->next = NULL;
2567 head = next;
3f029d3c 2568
e3fca9e7 2569 func(rq);
3f029d3c 2570 }
e3fca9e7
PZ
2571 raw_spin_unlock_irqrestore(&rq->lock, flags);
2572}
2573
2574static inline void balance_callback(struct rq *rq)
2575{
2576 if (unlikely(rq->balance_callback))
2577 __balance_callback(rq);
3f029d3c
GH
2578}
2579
2580#else
da19ab51 2581
e3fca9e7 2582static inline void balance_callback(struct rq *rq)
3f029d3c 2583{
1da177e4
LT
2584}
2585
3f029d3c
GH
2586#endif
2587
1da177e4
LT
2588/**
2589 * schedule_tail - first thing a freshly forked thread must call.
2590 * @prev: the thread we just switched away from.
2591 */
722a9f92 2592asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
2593 __releases(rq->lock)
2594{
1a43a14a 2595 struct rq *rq;
da19ab51 2596
1a43a14a
ON
2597 /* finish_task_switch() drops rq->lock and enables preemtion */
2598 preempt_disable();
dfa50b60 2599 rq = finish_task_switch(prev);
e3fca9e7 2600 balance_callback(rq);
1a43a14a 2601 preempt_enable();
70b97a7f 2602
1da177e4 2603 if (current->set_child_tid)
b488893a 2604 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2605}
2606
2607/*
dfa50b60 2608 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 2609 */
dfa50b60 2610static inline struct rq *
70b97a7f 2611context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2612 struct task_struct *next)
1da177e4 2613{
dd41f596 2614 struct mm_struct *mm, *oldmm;
1da177e4 2615
e107be36 2616 prepare_task_switch(rq, prev, next);
fe4b04fa 2617
dd41f596
IM
2618 mm = next->mm;
2619 oldmm = prev->active_mm;
9226d125
ZA
2620 /*
2621 * For paravirt, this is coupled with an exit in switch_to to
2622 * combine the page table reload and the switch backend into
2623 * one hypercall.
2624 */
224101ed 2625 arch_start_context_switch(prev);
9226d125 2626
31915ab4 2627 if (!mm) {
1da177e4
LT
2628 next->active_mm = oldmm;
2629 atomic_inc(&oldmm->mm_count);
2630 enter_lazy_tlb(oldmm, next);
2631 } else
2632 switch_mm(oldmm, mm, next);
2633
31915ab4 2634 if (!prev->mm) {
1da177e4 2635 prev->active_mm = NULL;
1da177e4
LT
2636 rq->prev_mm = oldmm;
2637 }
3a5f5e48
IM
2638 /*
2639 * Since the runqueue lock will be released by the next
2640 * task (which is an invalid locking op but in the case
2641 * of the scheduler it's an obvious special-case), so we
2642 * do an early lockdep release here:
2643 */
cbce1a68 2644 lockdep_unpin_lock(&rq->lock);
8a25d5de 2645 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1da177e4
LT
2646
2647 /* Here we just switch the register state and the stack. */
2648 switch_to(prev, next, prev);
dd41f596 2649 barrier();
dfa50b60
ON
2650
2651 return finish_task_switch(prev);
1da177e4
LT
2652}
2653
2654/*
1c3e8264 2655 * nr_running and nr_context_switches:
1da177e4
LT
2656 *
2657 * externally visible scheduler statistics: current number of runnable
1c3e8264 2658 * threads, total number of context switches performed since bootup.
1da177e4
LT
2659 */
2660unsigned long nr_running(void)
2661{
2662 unsigned long i, sum = 0;
2663
2664 for_each_online_cpu(i)
2665 sum += cpu_rq(i)->nr_running;
2666
2667 return sum;
f711f609 2668}
1da177e4 2669
2ee507c4
TC
2670/*
2671 * Check if only the current task is running on the cpu.
00cc1633
DD
2672 *
2673 * Caution: this function does not check that the caller has disabled
2674 * preemption, thus the result might have a time-of-check-to-time-of-use
2675 * race. The caller is responsible to use it correctly, for example:
2676 *
2677 * - from a non-preemptable section (of course)
2678 *
2679 * - from a thread that is bound to a single CPU
2680 *
2681 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
2682 */
2683bool single_task_running(void)
2684{
00cc1633 2685 return raw_rq()->nr_running == 1;
2ee507c4
TC
2686}
2687EXPORT_SYMBOL(single_task_running);
2688
1da177e4 2689unsigned long long nr_context_switches(void)
46cb4b7c 2690{
cc94abfc
SR
2691 int i;
2692 unsigned long long sum = 0;
46cb4b7c 2693
0a945022 2694 for_each_possible_cpu(i)
1da177e4 2695 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2696
1da177e4
LT
2697 return sum;
2698}
483b4ee6 2699
1da177e4
LT
2700unsigned long nr_iowait(void)
2701{
2702 unsigned long i, sum = 0;
483b4ee6 2703
0a945022 2704 for_each_possible_cpu(i)
1da177e4 2705 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2706
1da177e4
LT
2707 return sum;
2708}
483b4ee6 2709
8c215bd3 2710unsigned long nr_iowait_cpu(int cpu)
69d25870 2711{
8c215bd3 2712 struct rq *this = cpu_rq(cpu);
69d25870
AV
2713 return atomic_read(&this->nr_iowait);
2714}
46cb4b7c 2715
372ba8cb
MG
2716void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2717{
3289bdb4
PZ
2718 struct rq *rq = this_rq();
2719 *nr_waiters = atomic_read(&rq->nr_iowait);
2720 *load = rq->load.weight;
372ba8cb
MG
2721}
2722
dd41f596 2723#ifdef CONFIG_SMP
8a0be9ef 2724
46cb4b7c 2725/*
38022906
PZ
2726 * sched_exec - execve() is a valuable balancing opportunity, because at
2727 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2728 */
38022906 2729void sched_exec(void)
46cb4b7c 2730{
38022906 2731 struct task_struct *p = current;
1da177e4 2732 unsigned long flags;
0017d735 2733 int dest_cpu;
46cb4b7c 2734
8f42ced9 2735 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 2736 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
2737 if (dest_cpu == smp_processor_id())
2738 goto unlock;
38022906 2739
8f42ced9 2740 if (likely(cpu_active(dest_cpu))) {
969c7921 2741 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2742
8f42ced9
PZ
2743 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2744 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2745 return;
2746 }
0017d735 2747unlock:
8f42ced9 2748 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2749}
dd41f596 2750
1da177e4
LT
2751#endif
2752
1da177e4 2753DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2754DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2755
2756EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2757EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 2758
c5f8d995
HS
2759/*
2760 * Return accounted runtime for the task.
2761 * In case the task is currently running, return the runtime plus current's
2762 * pending runtime that have not been accounted yet.
2763 */
2764unsigned long long task_sched_runtime(struct task_struct *p)
2765{
2766 unsigned long flags;
2767 struct rq *rq;
6e998916 2768 u64 ns;
c5f8d995 2769
911b2898
PZ
2770#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2771 /*
2772 * 64-bit doesn't need locks to atomically read a 64bit value.
2773 * So we have a optimization chance when the task's delta_exec is 0.
2774 * Reading ->on_cpu is racy, but this is ok.
2775 *
2776 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2777 * If we race with it entering cpu, unaccounted time is 0. This is
2778 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
2779 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2780 * been accounted, so we're correct here as well.
911b2898 2781 */
da0c1e65 2782 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
2783 return p->se.sum_exec_runtime;
2784#endif
2785
c5f8d995 2786 rq = task_rq_lock(p, &flags);
6e998916
SG
2787 /*
2788 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2789 * project cycles that may never be accounted to this
2790 * thread, breaking clock_gettime().
2791 */
2792 if (task_current(rq, p) && task_on_rq_queued(p)) {
2793 update_rq_clock(rq);
2794 p->sched_class->update_curr(rq);
2795 }
2796 ns = p->se.sum_exec_runtime;
0122ec5b 2797 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2798
2799 return ns;
2800}
48f24c4d 2801
7835b98b
CL
2802/*
2803 * This function gets called by the timer code, with HZ frequency.
2804 * We call it with interrupts disabled.
7835b98b
CL
2805 */
2806void scheduler_tick(void)
2807{
7835b98b
CL
2808 int cpu = smp_processor_id();
2809 struct rq *rq = cpu_rq(cpu);
dd41f596 2810 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2811
2812 sched_clock_tick();
dd41f596 2813
05fa785c 2814 raw_spin_lock(&rq->lock);
3e51f33f 2815 update_rq_clock(rq);
fa85ae24 2816 curr->sched_class->task_tick(rq, curr, 0);
83dfd523 2817 update_cpu_load_active(rq);
3289bdb4 2818 calc_global_load_tick(rq);
05fa785c 2819 raw_spin_unlock(&rq->lock);
7835b98b 2820
e9d2b064 2821 perf_event_task_tick();
e220d2dc 2822
e418e1c2 2823#ifdef CONFIG_SMP
6eb57e0d 2824 rq->idle_balance = idle_cpu(cpu);
7caff66f 2825 trigger_load_balance(rq);
e418e1c2 2826#endif
265f22a9 2827 rq_last_tick_reset(rq);
1da177e4
LT
2828}
2829
265f22a9
FW
2830#ifdef CONFIG_NO_HZ_FULL
2831/**
2832 * scheduler_tick_max_deferment
2833 *
2834 * Keep at least one tick per second when a single
2835 * active task is running because the scheduler doesn't
2836 * yet completely support full dynticks environment.
2837 *
2838 * This makes sure that uptime, CFS vruntime, load
2839 * balancing, etc... continue to move forward, even
2840 * with a very low granularity.
e69f6186
YB
2841 *
2842 * Return: Maximum deferment in nanoseconds.
265f22a9
FW
2843 */
2844u64 scheduler_tick_max_deferment(void)
2845{
2846 struct rq *rq = this_rq();
316c1608 2847 unsigned long next, now = READ_ONCE(jiffies);
265f22a9
FW
2848
2849 next = rq->last_sched_tick + HZ;
2850
2851 if (time_before_eq(next, now))
2852 return 0;
2853
8fe8ff09 2854 return jiffies_to_nsecs(next - now);
1da177e4 2855}
265f22a9 2856#endif
1da177e4 2857
132380a0 2858notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2859{
2860 if (in_lock_functions(addr)) {
2861 addr = CALLER_ADDR2;
2862 if (in_lock_functions(addr))
2863 addr = CALLER_ADDR3;
2864 }
2865 return addr;
2866}
1da177e4 2867
7e49fcce
SR
2868#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2869 defined(CONFIG_PREEMPT_TRACER))
2870
edafe3a5 2871void preempt_count_add(int val)
1da177e4 2872{
6cd8a4bb 2873#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2874 /*
2875 * Underflow?
2876 */
9a11b49a
IM
2877 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2878 return;
6cd8a4bb 2879#endif
bdb43806 2880 __preempt_count_add(val);
6cd8a4bb 2881#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2882 /*
2883 * Spinlock count overflowing soon?
2884 */
33859f7f
MOS
2885 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2886 PREEMPT_MASK - 10);
6cd8a4bb 2887#endif
8f47b187
TG
2888 if (preempt_count() == val) {
2889 unsigned long ip = get_parent_ip(CALLER_ADDR1);
2890#ifdef CONFIG_DEBUG_PREEMPT
2891 current->preempt_disable_ip = ip;
2892#endif
2893 trace_preempt_off(CALLER_ADDR0, ip);
2894 }
1da177e4 2895}
bdb43806 2896EXPORT_SYMBOL(preempt_count_add);
edafe3a5 2897NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 2898
edafe3a5 2899void preempt_count_sub(int val)
1da177e4 2900{
6cd8a4bb 2901#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2902 /*
2903 * Underflow?
2904 */
01e3eb82 2905 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2906 return;
1da177e4
LT
2907 /*
2908 * Is the spinlock portion underflowing?
2909 */
9a11b49a
IM
2910 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2911 !(preempt_count() & PREEMPT_MASK)))
2912 return;
6cd8a4bb 2913#endif
9a11b49a 2914
6cd8a4bb
SR
2915 if (preempt_count() == val)
2916 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
bdb43806 2917 __preempt_count_sub(val);
1da177e4 2918}
bdb43806 2919EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 2920NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4
LT
2921
2922#endif
2923
2924/*
dd41f596 2925 * Print scheduling while atomic bug:
1da177e4 2926 */
dd41f596 2927static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 2928{
664dfa65
DJ
2929 if (oops_in_progress)
2930 return;
2931
3df0fc5b
PZ
2932 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2933 prev->comm, prev->pid, preempt_count());
838225b4 2934
dd41f596 2935 debug_show_held_locks(prev);
e21f5b15 2936 print_modules();
dd41f596
IM
2937 if (irqs_disabled())
2938 print_irqtrace_events(prev);
8f47b187
TG
2939#ifdef CONFIG_DEBUG_PREEMPT
2940 if (in_atomic_preempt_off()) {
2941 pr_err("Preemption disabled at:");
2942 print_ip_sym(current->preempt_disable_ip);
2943 pr_cont("\n");
2944 }
2945#endif
6135fc1e 2946 dump_stack();
373d4d09 2947 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 2948}
1da177e4 2949
dd41f596
IM
2950/*
2951 * Various schedule()-time debugging checks and statistics:
2952 */
2953static inline void schedule_debug(struct task_struct *prev)
2954{
0d9e2632
AT
2955#ifdef CONFIG_SCHED_STACK_END_CHECK
2956 BUG_ON(unlikely(task_stack_end_corrupted(prev)));
2957#endif
1da177e4 2958 /*
41a2d6cf 2959 * Test if we are atomic. Since do_exit() needs to call into
192301e7
ON
2960 * schedule() atomically, we ignore that path. Otherwise whine
2961 * if we are scheduling when we should not.
1da177e4 2962 */
192301e7 2963 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
dd41f596 2964 __schedule_bug(prev);
b3fbab05 2965 rcu_sleep_check();
dd41f596 2966
1da177e4
LT
2967 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2968
2d72376b 2969 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
2970}
2971
2972/*
2973 * Pick up the highest-prio task:
2974 */
2975static inline struct task_struct *
606dba2e 2976pick_next_task(struct rq *rq, struct task_struct *prev)
dd41f596 2977{
37e117c0 2978 const struct sched_class *class = &fair_sched_class;
dd41f596 2979 struct task_struct *p;
1da177e4
LT
2980
2981 /*
dd41f596
IM
2982 * Optimization: we know that if all tasks are in
2983 * the fair class we can call that function directly:
1da177e4 2984 */
37e117c0 2985 if (likely(prev->sched_class == class &&
38033c37 2986 rq->nr_running == rq->cfs.h_nr_running)) {
606dba2e 2987 p = fair_sched_class.pick_next_task(rq, prev);
6ccdc84b
PZ
2988 if (unlikely(p == RETRY_TASK))
2989 goto again;
2990
2991 /* assumes fair_sched_class->next == idle_sched_class */
2992 if (unlikely(!p))
2993 p = idle_sched_class.pick_next_task(rq, prev);
2994
2995 return p;
1da177e4
LT
2996 }
2997
37e117c0 2998again:
34f971f6 2999 for_each_class(class) {
606dba2e 3000 p = class->pick_next_task(rq, prev);
37e117c0
PZ
3001 if (p) {
3002 if (unlikely(p == RETRY_TASK))
3003 goto again;
dd41f596 3004 return p;
37e117c0 3005 }
dd41f596 3006 }
34f971f6
PZ
3007
3008 BUG(); /* the idle class will always have a runnable task */
dd41f596 3009}
1da177e4 3010
dd41f596 3011/*
c259e01a 3012 * __schedule() is the main scheduler function.
edde96ea
PE
3013 *
3014 * The main means of driving the scheduler and thus entering this function are:
3015 *
3016 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3017 *
3018 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3019 * paths. For example, see arch/x86/entry_64.S.
3020 *
3021 * To drive preemption between tasks, the scheduler sets the flag in timer
3022 * interrupt handler scheduler_tick().
3023 *
3024 * 3. Wakeups don't really cause entry into schedule(). They add a
3025 * task to the run-queue and that's it.
3026 *
3027 * Now, if the new task added to the run-queue preempts the current
3028 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3029 * called on the nearest possible occasion:
3030 *
3031 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3032 *
3033 * - in syscall or exception context, at the next outmost
3034 * preempt_enable(). (this might be as soon as the wake_up()'s
3035 * spin_unlock()!)
3036 *
3037 * - in IRQ context, return from interrupt-handler to
3038 * preemptible context
3039 *
3040 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3041 * then at the next:
3042 *
3043 * - cond_resched() call
3044 * - explicit schedule() call
3045 * - return from syscall or exception to user-space
3046 * - return from interrupt-handler to user-space
bfd9b2b5 3047 *
b30f0e3f 3048 * WARNING: must be called with preemption disabled!
dd41f596 3049 */
c259e01a 3050static void __sched __schedule(void)
dd41f596
IM
3051{
3052 struct task_struct *prev, *next;
67ca7bde 3053 unsigned long *switch_count;
dd41f596 3054 struct rq *rq;
31656519 3055 int cpu;
dd41f596 3056
dd41f596
IM
3057 cpu = smp_processor_id();
3058 rq = cpu_rq(cpu);
38200cf2 3059 rcu_note_context_switch();
dd41f596 3060 prev = rq->curr;
dd41f596 3061
dd41f596 3062 schedule_debug(prev);
1da177e4 3063
31656519 3064 if (sched_feat(HRTICK))
f333fdc9 3065 hrtick_clear(rq);
8f4d37ec 3066
e0acd0a6
ON
3067 /*
3068 * Make sure that signal_pending_state()->signal_pending() below
3069 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3070 * done by the caller to avoid the race with signal_wake_up().
3071 */
3072 smp_mb__before_spinlock();
05fa785c 3073 raw_spin_lock_irq(&rq->lock);
cbce1a68 3074 lockdep_pin_lock(&rq->lock);
1da177e4 3075
9edfbfed
PZ
3076 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3077
246d86b5 3078 switch_count = &prev->nivcsw;
1da177e4 3079 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 3080 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3081 prev->state = TASK_RUNNING;
21aa9af0 3082 } else {
2acca55e
PZ
3083 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3084 prev->on_rq = 0;
3085
21aa9af0 3086 /*
2acca55e
PZ
3087 * If a worker went to sleep, notify and ask workqueue
3088 * whether it wants to wake up a task to maintain
3089 * concurrency.
21aa9af0
TH
3090 */
3091 if (prev->flags & PF_WQ_WORKER) {
3092 struct task_struct *to_wakeup;
3093
3094 to_wakeup = wq_worker_sleeping(prev, cpu);
3095 if (to_wakeup)
3096 try_to_wake_up_local(to_wakeup);
3097 }
21aa9af0 3098 }
dd41f596 3099 switch_count = &prev->nvcsw;
1da177e4
LT
3100 }
3101
9edfbfed 3102 if (task_on_rq_queued(prev))
606dba2e
PZ
3103 update_rq_clock(rq);
3104
3105 next = pick_next_task(rq, prev);
f26f9aff 3106 clear_tsk_need_resched(prev);
f27dde8d 3107 clear_preempt_need_resched();
9edfbfed 3108 rq->clock_skip_update = 0;
1da177e4 3109
1da177e4 3110 if (likely(prev != next)) {
1da177e4
LT
3111 rq->nr_switches++;
3112 rq->curr = next;
3113 ++*switch_count;
3114
dfa50b60
ON
3115 rq = context_switch(rq, prev, next); /* unlocks the rq */
3116 cpu = cpu_of(rq);
cbce1a68
PZ
3117 } else {
3118 lockdep_unpin_lock(&rq->lock);
05fa785c 3119 raw_spin_unlock_irq(&rq->lock);
cbce1a68 3120 }
1da177e4 3121
e3fca9e7 3122 balance_callback(rq);
1da177e4 3123}
c259e01a 3124
9c40cef2
TG
3125static inline void sched_submit_work(struct task_struct *tsk)
3126{
3c7d5184 3127 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
3128 return;
3129 /*
3130 * If we are going to sleep and we have plugged IO queued,
3131 * make sure to submit it to avoid deadlocks.
3132 */
3133 if (blk_needs_flush_plug(tsk))
3134 blk_schedule_flush_plug(tsk);
3135}
3136
722a9f92 3137asmlinkage __visible void __sched schedule(void)
c259e01a 3138{
9c40cef2
TG
3139 struct task_struct *tsk = current;
3140
3141 sched_submit_work(tsk);
bfd9b2b5 3142 do {
b30f0e3f 3143 preempt_disable();
bfd9b2b5 3144 __schedule();
b30f0e3f 3145 sched_preempt_enable_no_resched();
bfd9b2b5 3146 } while (need_resched());
c259e01a 3147}
1da177e4
LT
3148EXPORT_SYMBOL(schedule);
3149
91d1aa43 3150#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 3151asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
3152{
3153 /*
3154 * If we come here after a random call to set_need_resched(),
3155 * or we have been woken up remotely but the IPI has not yet arrived,
3156 * we haven't yet exited the RCU idle mode. Do it here manually until
3157 * we find a better solution.
7cc78f8f
AL
3158 *
3159 * NB: There are buggy callers of this function. Ideally we
c467ea76 3160 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 3161 * too frequently to make sense yet.
20ab65e3 3162 */
7cc78f8f 3163 enum ctx_state prev_state = exception_enter();
20ab65e3 3164 schedule();
7cc78f8f 3165 exception_exit(prev_state);
20ab65e3
FW
3166}
3167#endif
3168
c5491ea7
TG
3169/**
3170 * schedule_preempt_disabled - called with preemption disabled
3171 *
3172 * Returns with preemption disabled. Note: preempt_count must be 1
3173 */
3174void __sched schedule_preempt_disabled(void)
3175{
ba74c144 3176 sched_preempt_enable_no_resched();
c5491ea7
TG
3177 schedule();
3178 preempt_disable();
3179}
3180
06b1f808 3181static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
3182{
3183 do {
b30f0e3f 3184 preempt_active_enter();
a18b5d01 3185 __schedule();
b30f0e3f 3186 preempt_active_exit();
a18b5d01
FW
3187
3188 /*
3189 * Check again in case we missed a preemption opportunity
3190 * between schedule and now.
3191 */
a18b5d01
FW
3192 } while (need_resched());
3193}
3194
1da177e4
LT
3195#ifdef CONFIG_PREEMPT
3196/*
2ed6e34f 3197 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3198 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3199 * occur there and call schedule directly.
3200 */
722a9f92 3201asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 3202{
1da177e4
LT
3203 /*
3204 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3205 * we do not want to preempt the current task. Just return..
1da177e4 3206 */
fbb00b56 3207 if (likely(!preemptible()))
1da177e4
LT
3208 return;
3209
a18b5d01 3210 preempt_schedule_common();
1da177e4 3211}
376e2424 3212NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 3213EXPORT_SYMBOL(preempt_schedule);
009f60e2 3214
009f60e2 3215/**
4eaca0a8 3216 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
3217 *
3218 * The tracing infrastructure uses preempt_enable_notrace to prevent
3219 * recursion and tracing preempt enabling caused by the tracing
3220 * infrastructure itself. But as tracing can happen in areas coming
3221 * from userspace or just about to enter userspace, a preempt enable
3222 * can occur before user_exit() is called. This will cause the scheduler
3223 * to be called when the system is still in usermode.
3224 *
3225 * To prevent this, the preempt_enable_notrace will use this function
3226 * instead of preempt_schedule() to exit user context if needed before
3227 * calling the scheduler.
3228 */
4eaca0a8 3229asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
3230{
3231 enum ctx_state prev_ctx;
3232
3233 if (likely(!preemptible()))
3234 return;
3235
3236 do {
be690035
FW
3237 /*
3238 * Use raw __prempt_count() ops that don't call function.
3239 * We can't call functions before disabling preemption which
3240 * disarm preemption tracing recursions.
3241 */
3242 __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
3243 barrier();
009f60e2
ON
3244 /*
3245 * Needs preempt disabled in case user_exit() is traced
3246 * and the tracer calls preempt_enable_notrace() causing
3247 * an infinite recursion.
3248 */
3249 prev_ctx = exception_enter();
3250 __schedule();
3251 exception_exit(prev_ctx);
3252
009f60e2 3253 barrier();
be690035 3254 __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
009f60e2
ON
3255 } while (need_resched());
3256}
4eaca0a8 3257EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 3258
32e475d7 3259#endif /* CONFIG_PREEMPT */
1da177e4
LT
3260
3261/*
2ed6e34f 3262 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3263 * off of irq context.
3264 * Note, that this is called and return with irqs disabled. This will
3265 * protect us against recursive calling from irq.
3266 */
722a9f92 3267asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 3268{
b22366cd 3269 enum ctx_state prev_state;
6478d880 3270
2ed6e34f 3271 /* Catch callers which need to be fixed */
f27dde8d 3272 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 3273
b22366cd
FW
3274 prev_state = exception_enter();
3275
3a5c359a 3276 do {
b30f0e3f 3277 preempt_active_enter();
3a5c359a 3278 local_irq_enable();
c259e01a 3279 __schedule();
3a5c359a 3280 local_irq_disable();
b30f0e3f 3281 preempt_active_exit();
5ed0cec0 3282 } while (need_resched());
b22366cd
FW
3283
3284 exception_exit(prev_state);
1da177e4
LT
3285}
3286
63859d4f 3287int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3288 void *key)
1da177e4 3289{
63859d4f 3290 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3291}
1da177e4
LT
3292EXPORT_SYMBOL(default_wake_function);
3293
b29739f9
IM
3294#ifdef CONFIG_RT_MUTEXES
3295
3296/*
3297 * rt_mutex_setprio - set the current priority of a task
3298 * @p: task
3299 * @prio: prio value (kernel-internal form)
3300 *
3301 * This function changes the 'effective' priority of a task. It does
3302 * not touch ->normal_prio like __setscheduler().
3303 *
c365c292
TG
3304 * Used by the rt_mutex code to implement priority inheritance
3305 * logic. Call site only calls if the priority of the task changed.
b29739f9 3306 */
36c8b586 3307void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3308{
da0c1e65 3309 int oldprio, queued, running, enqueue_flag = 0;
70b97a7f 3310 struct rq *rq;
83ab0aa0 3311 const struct sched_class *prev_class;
b29739f9 3312
aab03e05 3313 BUG_ON(prio > MAX_PRIO);
b29739f9 3314
0122ec5b 3315 rq = __task_rq_lock(p);
b29739f9 3316
1c4dd99b
TG
3317 /*
3318 * Idle task boosting is a nono in general. There is one
3319 * exception, when PREEMPT_RT and NOHZ is active:
3320 *
3321 * The idle task calls get_next_timer_interrupt() and holds
3322 * the timer wheel base->lock on the CPU and another CPU wants
3323 * to access the timer (probably to cancel it). We can safely
3324 * ignore the boosting request, as the idle CPU runs this code
3325 * with interrupts disabled and will complete the lock
3326 * protected section without being interrupted. So there is no
3327 * real need to boost.
3328 */
3329 if (unlikely(p == rq->idle)) {
3330 WARN_ON(p != rq->curr);
3331 WARN_ON(p->pi_blocked_on);
3332 goto out_unlock;
3333 }
3334
a8027073 3335 trace_sched_pi_setprio(p, prio);
d5f9f942 3336 oldprio = p->prio;
83ab0aa0 3337 prev_class = p->sched_class;
da0c1e65 3338 queued = task_on_rq_queued(p);
051a1d1a 3339 running = task_current(rq, p);
da0c1e65 3340 if (queued)
69be72c1 3341 dequeue_task(rq, p, 0);
0e1f3483 3342 if (running)
f3cd1c4e 3343 put_prev_task(rq, p);
dd41f596 3344
2d3d891d
DF
3345 /*
3346 * Boosting condition are:
3347 * 1. -rt task is running and holds mutex A
3348 * --> -dl task blocks on mutex A
3349 *
3350 * 2. -dl task is running and holds mutex A
3351 * --> -dl task blocks on mutex A and could preempt the
3352 * running task
3353 */
3354 if (dl_prio(prio)) {
466af29b
ON
3355 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3356 if (!dl_prio(p->normal_prio) ||
3357 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 3358 p->dl.dl_boosted = 1;
2d3d891d
DF
3359 enqueue_flag = ENQUEUE_REPLENISH;
3360 } else
3361 p->dl.dl_boosted = 0;
aab03e05 3362 p->sched_class = &dl_sched_class;
2d3d891d
DF
3363 } else if (rt_prio(prio)) {
3364 if (dl_prio(oldprio))
3365 p->dl.dl_boosted = 0;
3366 if (oldprio < prio)
3367 enqueue_flag = ENQUEUE_HEAD;
dd41f596 3368 p->sched_class = &rt_sched_class;
2d3d891d
DF
3369 } else {
3370 if (dl_prio(oldprio))
3371 p->dl.dl_boosted = 0;
746db944
BS
3372 if (rt_prio(oldprio))
3373 p->rt.timeout = 0;
dd41f596 3374 p->sched_class = &fair_sched_class;
2d3d891d 3375 }
dd41f596 3376
b29739f9
IM
3377 p->prio = prio;
3378
0e1f3483
HS
3379 if (running)
3380 p->sched_class->set_curr_task(rq);
da0c1e65 3381 if (queued)
2d3d891d 3382 enqueue_task(rq, p, enqueue_flag);
cb469845 3383
da7a735e 3384 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3385out_unlock:
4c9a4bc8 3386 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 3387 __task_rq_unlock(rq);
4c9a4bc8
PZ
3388
3389 balance_callback(rq);
3390 preempt_enable();
b29739f9 3391}
b29739f9 3392#endif
d50dde5a 3393
36c8b586 3394void set_user_nice(struct task_struct *p, long nice)
1da177e4 3395{
da0c1e65 3396 int old_prio, delta, queued;
1da177e4 3397 unsigned long flags;
70b97a7f 3398 struct rq *rq;
1da177e4 3399
75e45d51 3400 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
3401 return;
3402 /*
3403 * We have to be careful, if called from sys_setpriority(),
3404 * the task might be in the middle of scheduling on another CPU.
3405 */
3406 rq = task_rq_lock(p, &flags);
3407 /*
3408 * The RT priorities are set via sched_setscheduler(), but we still
3409 * allow the 'normal' nice value to be set - but as expected
3410 * it wont have any effect on scheduling until the task is
aab03e05 3411 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 3412 */
aab03e05 3413 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
3414 p->static_prio = NICE_TO_PRIO(nice);
3415 goto out_unlock;
3416 }
da0c1e65
KT
3417 queued = task_on_rq_queued(p);
3418 if (queued)
69be72c1 3419 dequeue_task(rq, p, 0);
1da177e4 3420
1da177e4 3421 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3422 set_load_weight(p);
b29739f9
IM
3423 old_prio = p->prio;
3424 p->prio = effective_prio(p);
3425 delta = p->prio - old_prio;
1da177e4 3426
da0c1e65 3427 if (queued) {
371fd7e7 3428 enqueue_task(rq, p, 0);
1da177e4 3429 /*
d5f9f942
AM
3430 * If the task increased its priority or is running and
3431 * lowered its priority, then reschedule its CPU:
1da177e4 3432 */
d5f9f942 3433 if (delta < 0 || (delta > 0 && task_running(rq, p)))
8875125e 3434 resched_curr(rq);
1da177e4
LT
3435 }
3436out_unlock:
0122ec5b 3437 task_rq_unlock(rq, p, &flags);
1da177e4 3438}
1da177e4
LT
3439EXPORT_SYMBOL(set_user_nice);
3440
e43379f1
MM
3441/*
3442 * can_nice - check if a task can reduce its nice value
3443 * @p: task
3444 * @nice: nice value
3445 */
36c8b586 3446int can_nice(const struct task_struct *p, const int nice)
e43379f1 3447{
024f4747 3448 /* convert nice value [19,-20] to rlimit style value [1,40] */
7aa2c016 3449 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 3450
78d7d407 3451 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3452 capable(CAP_SYS_NICE));
3453}
3454
1da177e4
LT
3455#ifdef __ARCH_WANT_SYS_NICE
3456
3457/*
3458 * sys_nice - change the priority of the current process.
3459 * @increment: priority increment
3460 *
3461 * sys_setpriority is a more generic, but much slower function that
3462 * does similar things.
3463 */
5add95d4 3464SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3465{
48f24c4d 3466 long nice, retval;
1da177e4
LT
3467
3468 /*
3469 * Setpriority might change our priority at the same moment.
3470 * We don't have to worry. Conceptually one call occurs first
3471 * and we have a single winner.
3472 */
a9467fa3 3473 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 3474 nice = task_nice(current) + increment;
1da177e4 3475
a9467fa3 3476 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
3477 if (increment < 0 && !can_nice(current, nice))
3478 return -EPERM;
3479
1da177e4
LT
3480 retval = security_task_setnice(current, nice);
3481 if (retval)
3482 return retval;
3483
3484 set_user_nice(current, nice);
3485 return 0;
3486}
3487
3488#endif
3489
3490/**
3491 * task_prio - return the priority value of a given task.
3492 * @p: the task in question.
3493 *
e69f6186 3494 * Return: The priority value as seen by users in /proc.
1da177e4
LT
3495 * RT tasks are offset by -200. Normal tasks are centered
3496 * around 0, value goes from -16 to +15.
3497 */
36c8b586 3498int task_prio(const struct task_struct *p)
1da177e4
LT
3499{
3500 return p->prio - MAX_RT_PRIO;
3501}
3502
1da177e4
LT
3503/**
3504 * idle_cpu - is a given cpu idle currently?
3505 * @cpu: the processor in question.
e69f6186
YB
3506 *
3507 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
3508 */
3509int idle_cpu(int cpu)
3510{
908a3283
TG
3511 struct rq *rq = cpu_rq(cpu);
3512
3513 if (rq->curr != rq->idle)
3514 return 0;
3515
3516 if (rq->nr_running)
3517 return 0;
3518
3519#ifdef CONFIG_SMP
3520 if (!llist_empty(&rq->wake_list))
3521 return 0;
3522#endif
3523
3524 return 1;
1da177e4
LT
3525}
3526
1da177e4
LT
3527/**
3528 * idle_task - return the idle task for a given cpu.
3529 * @cpu: the processor in question.
e69f6186
YB
3530 *
3531 * Return: The idle task for the cpu @cpu.
1da177e4 3532 */
36c8b586 3533struct task_struct *idle_task(int cpu)
1da177e4
LT
3534{
3535 return cpu_rq(cpu)->idle;
3536}
3537
3538/**
3539 * find_process_by_pid - find a process with a matching PID value.
3540 * @pid: the pid in question.
e69f6186
YB
3541 *
3542 * The task of @pid, if found. %NULL otherwise.
1da177e4 3543 */
a9957449 3544static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3545{
228ebcbe 3546 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3547}
3548
aab03e05
DF
3549/*
3550 * This function initializes the sched_dl_entity of a newly becoming
3551 * SCHED_DEADLINE task.
3552 *
3553 * Only the static values are considered here, the actual runtime and the
3554 * absolute deadline will be properly calculated when the task is enqueued
3555 * for the first time with its new policy.
3556 */
3557static void
3558__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3559{
3560 struct sched_dl_entity *dl_se = &p->dl;
3561
aab03e05
DF
3562 dl_se->dl_runtime = attr->sched_runtime;
3563 dl_se->dl_deadline = attr->sched_deadline;
755378a4 3564 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
aab03e05 3565 dl_se->flags = attr->sched_flags;
332ac17e 3566 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
40767b0d
PZ
3567
3568 /*
3569 * Changing the parameters of a task is 'tricky' and we're not doing
3570 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3571 *
3572 * What we SHOULD do is delay the bandwidth release until the 0-lag
3573 * point. This would include retaining the task_struct until that time
3574 * and change dl_overflow() to not immediately decrement the current
3575 * amount.
3576 *
3577 * Instead we retain the current runtime/deadline and let the new
3578 * parameters take effect after the current reservation period lapses.
3579 * This is safe (albeit pessimistic) because the 0-lag point is always
3580 * before the current scheduling deadline.
3581 *
3582 * We can still have temporary overloads because we do not delay the
3583 * change in bandwidth until that time; so admission control is
3584 * not on the safe side. It does however guarantee tasks will never
3585 * consume more than promised.
3586 */
aab03e05
DF
3587}
3588
c13db6b1
SR
3589/*
3590 * sched_setparam() passes in -1 for its policy, to let the functions
3591 * it calls know not to change it.
3592 */
3593#define SETPARAM_POLICY -1
3594
c365c292
TG
3595static void __setscheduler_params(struct task_struct *p,
3596 const struct sched_attr *attr)
1da177e4 3597{
d50dde5a
DF
3598 int policy = attr->sched_policy;
3599
c13db6b1 3600 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
3601 policy = p->policy;
3602
1da177e4 3603 p->policy = policy;
d50dde5a 3604
aab03e05
DF
3605 if (dl_policy(policy))
3606 __setparam_dl(p, attr);
39fd8fd2 3607 else if (fair_policy(policy))
d50dde5a
DF
3608 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3609
39fd8fd2
PZ
3610 /*
3611 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3612 * !rt_policy. Always setting this ensures that things like
3613 * getparam()/getattr() don't report silly values for !rt tasks.
3614 */
3615 p->rt_priority = attr->sched_priority;
383afd09 3616 p->normal_prio = normal_prio(p);
c365c292
TG
3617 set_load_weight(p);
3618}
39fd8fd2 3619
c365c292
TG
3620/* Actually do priority change: must hold pi & rq lock. */
3621static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 3622 const struct sched_attr *attr, bool keep_boost)
c365c292
TG
3623{
3624 __setscheduler_params(p, attr);
d50dde5a 3625
383afd09 3626 /*
0782e63b
TG
3627 * Keep a potential priority boosting if called from
3628 * sched_setscheduler().
383afd09 3629 */
0782e63b
TG
3630 if (keep_boost)
3631 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3632 else
3633 p->prio = normal_prio(p);
383afd09 3634
aab03e05
DF
3635 if (dl_prio(p->prio))
3636 p->sched_class = &dl_sched_class;
3637 else if (rt_prio(p->prio))
ffd44db5
PZ
3638 p->sched_class = &rt_sched_class;
3639 else
3640 p->sched_class = &fair_sched_class;
1da177e4 3641}
aab03e05
DF
3642
3643static void
3644__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3645{
3646 struct sched_dl_entity *dl_se = &p->dl;
3647
3648 attr->sched_priority = p->rt_priority;
3649 attr->sched_runtime = dl_se->dl_runtime;
3650 attr->sched_deadline = dl_se->dl_deadline;
755378a4 3651 attr->sched_period = dl_se->dl_period;
aab03e05
DF
3652 attr->sched_flags = dl_se->flags;
3653}
3654
3655/*
3656 * This function validates the new parameters of a -deadline task.
3657 * We ask for the deadline not being zero, and greater or equal
755378a4 3658 * than the runtime, as well as the period of being zero or
332ac17e 3659 * greater than deadline. Furthermore, we have to be sure that
b0827819
JL
3660 * user parameters are above the internal resolution of 1us (we
3661 * check sched_runtime only since it is always the smaller one) and
3662 * below 2^63 ns (we have to check both sched_deadline and
3663 * sched_period, as the latter can be zero).
aab03e05
DF
3664 */
3665static bool
3666__checkparam_dl(const struct sched_attr *attr)
3667{
b0827819
JL
3668 /* deadline != 0 */
3669 if (attr->sched_deadline == 0)
3670 return false;
3671
3672 /*
3673 * Since we truncate DL_SCALE bits, make sure we're at least
3674 * that big.
3675 */
3676 if (attr->sched_runtime < (1ULL << DL_SCALE))
3677 return false;
3678
3679 /*
3680 * Since we use the MSB for wrap-around and sign issues, make
3681 * sure it's not set (mind that period can be equal to zero).
3682 */
3683 if (attr->sched_deadline & (1ULL << 63) ||
3684 attr->sched_period & (1ULL << 63))
3685 return false;
3686
3687 /* runtime <= deadline <= period (if period != 0) */
3688 if ((attr->sched_period != 0 &&
3689 attr->sched_period < attr->sched_deadline) ||
3690 attr->sched_deadline < attr->sched_runtime)
3691 return false;
3692
3693 return true;
aab03e05
DF
3694}
3695
c69e8d9c
DH
3696/*
3697 * check the target process has a UID that matches the current process's
3698 */
3699static bool check_same_owner(struct task_struct *p)
3700{
3701 const struct cred *cred = current_cred(), *pcred;
3702 bool match;
3703
3704 rcu_read_lock();
3705 pcred = __task_cred(p);
9c806aa0
EB
3706 match = (uid_eq(cred->euid, pcred->euid) ||
3707 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3708 rcu_read_unlock();
3709 return match;
3710}
3711
75381608
WL
3712static bool dl_param_changed(struct task_struct *p,
3713 const struct sched_attr *attr)
3714{
3715 struct sched_dl_entity *dl_se = &p->dl;
3716
3717 if (dl_se->dl_runtime != attr->sched_runtime ||
3718 dl_se->dl_deadline != attr->sched_deadline ||
3719 dl_se->dl_period != attr->sched_period ||
3720 dl_se->flags != attr->sched_flags)
3721 return true;
3722
3723 return false;
3724}
3725
d50dde5a
DF
3726static int __sched_setscheduler(struct task_struct *p,
3727 const struct sched_attr *attr,
dbc7f069 3728 bool user, bool pi)
1da177e4 3729{
383afd09
SR
3730 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3731 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 3732 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 3733 int new_effective_prio, policy = attr->sched_policy;
1da177e4 3734 unsigned long flags;
83ab0aa0 3735 const struct sched_class *prev_class;
70b97a7f 3736 struct rq *rq;
ca94c442 3737 int reset_on_fork;
1da177e4 3738
66e5393a
SR
3739 /* may grab non-irq protected spin_locks */
3740 BUG_ON(in_interrupt());
1da177e4
LT
3741recheck:
3742 /* double check policy once rq lock held */
ca94c442
LP
3743 if (policy < 0) {
3744 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3745 policy = oldpolicy = p->policy;
ca94c442 3746 } else {
7479f3c9 3747 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 3748
aab03e05
DF
3749 if (policy != SCHED_DEADLINE &&
3750 policy != SCHED_FIFO && policy != SCHED_RR &&
ca94c442
LP
3751 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3752 policy != SCHED_IDLE)
3753 return -EINVAL;
3754 }
3755
7479f3c9
PZ
3756 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3757 return -EINVAL;
3758
1da177e4
LT
3759 /*
3760 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3761 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3762 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 3763 */
0bb040a4 3764 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 3765 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 3766 return -EINVAL;
aab03e05
DF
3767 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3768 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
3769 return -EINVAL;
3770
37e4ab3f
OC
3771 /*
3772 * Allow unprivileged RT tasks to decrease priority:
3773 */
961ccddd 3774 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 3775 if (fair_policy(policy)) {
d0ea0268 3776 if (attr->sched_nice < task_nice(p) &&
eaad4513 3777 !can_nice(p, attr->sched_nice))
d50dde5a
DF
3778 return -EPERM;
3779 }
3780
e05606d3 3781 if (rt_policy(policy)) {
a44702e8
ON
3782 unsigned long rlim_rtprio =
3783 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3784
3785 /* can't set/change the rt policy */
3786 if (policy != p->policy && !rlim_rtprio)
3787 return -EPERM;
3788
3789 /* can't increase priority */
d50dde5a
DF
3790 if (attr->sched_priority > p->rt_priority &&
3791 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
3792 return -EPERM;
3793 }
c02aa73b 3794
d44753b8
JL
3795 /*
3796 * Can't set/change SCHED_DEADLINE policy at all for now
3797 * (safest behavior); in the future we would like to allow
3798 * unprivileged DL tasks to increase their relative deadline
3799 * or reduce their runtime (both ways reducing utilization)
3800 */
3801 if (dl_policy(policy))
3802 return -EPERM;
3803
dd41f596 3804 /*
c02aa73b
DH
3805 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3806 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 3807 */
c02aa73b 3808 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
d0ea0268 3809 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
3810 return -EPERM;
3811 }
5fe1d75f 3812
37e4ab3f 3813 /* can't change other user's priorities */
c69e8d9c 3814 if (!check_same_owner(p))
37e4ab3f 3815 return -EPERM;
ca94c442
LP
3816
3817 /* Normal users shall not reset the sched_reset_on_fork flag */
3818 if (p->sched_reset_on_fork && !reset_on_fork)
3819 return -EPERM;
37e4ab3f 3820 }
1da177e4 3821
725aad24 3822 if (user) {
b0ae1981 3823 retval = security_task_setscheduler(p);
725aad24
JF
3824 if (retval)
3825 return retval;
3826 }
3827
b29739f9
IM
3828 /*
3829 * make sure no PI-waiters arrive (or leave) while we are
3830 * changing the priority of the task:
0122ec5b 3831 *
25985edc 3832 * To be able to change p->policy safely, the appropriate
1da177e4
LT
3833 * runqueue lock must be held.
3834 */
0122ec5b 3835 rq = task_rq_lock(p, &flags);
dc61b1d6 3836
34f971f6
PZ
3837 /*
3838 * Changing the policy of the stop threads its a very bad idea
3839 */
3840 if (p == rq->stop) {
0122ec5b 3841 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
3842 return -EINVAL;
3843 }
3844
a51e9198 3845 /*
d6b1e911
TG
3846 * If not changing anything there's no need to proceed further,
3847 * but store a possible modification of reset_on_fork.
a51e9198 3848 */
d50dde5a 3849 if (unlikely(policy == p->policy)) {
d0ea0268 3850 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
3851 goto change;
3852 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3853 goto change;
75381608 3854 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 3855 goto change;
d50dde5a 3856
d6b1e911 3857 p->sched_reset_on_fork = reset_on_fork;
45afb173 3858 task_rq_unlock(rq, p, &flags);
a51e9198
DF
3859 return 0;
3860 }
d50dde5a 3861change:
a51e9198 3862
dc61b1d6 3863 if (user) {
332ac17e 3864#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
3865 /*
3866 * Do not allow realtime tasks into groups that have no runtime
3867 * assigned.
3868 */
3869 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
3870 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3871 !task_group_is_autogroup(task_group(p))) {
0122ec5b 3872 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
3873 return -EPERM;
3874 }
dc61b1d6 3875#endif
332ac17e
DF
3876#ifdef CONFIG_SMP
3877 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3878 cpumask_t *span = rq->rd->span;
332ac17e
DF
3879
3880 /*
3881 * Don't allow tasks with an affinity mask smaller than
3882 * the entire root_domain to become SCHED_DEADLINE. We
3883 * will also fail if there's no bandwidth available.
3884 */
e4099a5e
PZ
3885 if (!cpumask_subset(span, &p->cpus_allowed) ||
3886 rq->rd->dl_bw.bw == 0) {
332ac17e
DF
3887 task_rq_unlock(rq, p, &flags);
3888 return -EPERM;
3889 }
3890 }
3891#endif
3892 }
dc61b1d6 3893
1da177e4
LT
3894 /* recheck policy now with rq lock held */
3895 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3896 policy = oldpolicy = -1;
0122ec5b 3897 task_rq_unlock(rq, p, &flags);
1da177e4
LT
3898 goto recheck;
3899 }
332ac17e
DF
3900
3901 /*
3902 * If setscheduling to SCHED_DEADLINE (or changing the parameters
3903 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
3904 * is available.
3905 */
e4099a5e 3906 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
332ac17e
DF
3907 task_rq_unlock(rq, p, &flags);
3908 return -EBUSY;
3909 }
3910
c365c292
TG
3911 p->sched_reset_on_fork = reset_on_fork;
3912 oldprio = p->prio;
3913
dbc7f069
PZ
3914 if (pi) {
3915 /*
3916 * Take priority boosted tasks into account. If the new
3917 * effective priority is unchanged, we just store the new
3918 * normal parameters and do not touch the scheduler class and
3919 * the runqueue. This will be done when the task deboost
3920 * itself.
3921 */
3922 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3923 if (new_effective_prio == oldprio) {
3924 __setscheduler_params(p, attr);
3925 task_rq_unlock(rq, p, &flags);
3926 return 0;
3927 }
c365c292
TG
3928 }
3929
da0c1e65 3930 queued = task_on_rq_queued(p);
051a1d1a 3931 running = task_current(rq, p);
da0c1e65 3932 if (queued)
4ca9b72b 3933 dequeue_task(rq, p, 0);
0e1f3483 3934 if (running)
f3cd1c4e 3935 put_prev_task(rq, p);
f6b53205 3936
83ab0aa0 3937 prev_class = p->sched_class;
dbc7f069 3938 __setscheduler(rq, p, attr, pi);
f6b53205 3939
0e1f3483
HS
3940 if (running)
3941 p->sched_class->set_curr_task(rq);
da0c1e65 3942 if (queued) {
81a44c54
TG
3943 /*
3944 * We enqueue to tail when the priority of a task is
3945 * increased (user space view).
3946 */
3947 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
3948 }
cb469845 3949
da7a735e 3950 check_class_changed(rq, p, prev_class, oldprio);
4c9a4bc8 3951 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 3952 task_rq_unlock(rq, p, &flags);
b29739f9 3953
dbc7f069
PZ
3954 if (pi)
3955 rt_mutex_adjust_pi(p);
95e02ca9 3956
4c9a4bc8
PZ
3957 /*
3958 * Run balance callbacks after we've adjusted the PI chain.
3959 */
3960 balance_callback(rq);
3961 preempt_enable();
95e02ca9 3962
1da177e4
LT
3963 return 0;
3964}
961ccddd 3965
7479f3c9
PZ
3966static int _sched_setscheduler(struct task_struct *p, int policy,
3967 const struct sched_param *param, bool check)
3968{
3969 struct sched_attr attr = {
3970 .sched_policy = policy,
3971 .sched_priority = param->sched_priority,
3972 .sched_nice = PRIO_TO_NICE(p->static_prio),
3973 };
3974
c13db6b1
SR
3975 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
3976 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
3977 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
3978 policy &= ~SCHED_RESET_ON_FORK;
3979 attr.sched_policy = policy;
3980 }
3981
dbc7f069 3982 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 3983}
961ccddd
RR
3984/**
3985 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3986 * @p: the task in question.
3987 * @policy: new policy.
3988 * @param: structure containing the new RT priority.
3989 *
e69f6186
YB
3990 * Return: 0 on success. An error code otherwise.
3991 *
961ccddd
RR
3992 * NOTE that the task may be already dead.
3993 */
3994int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3995 const struct sched_param *param)
961ccddd 3996{
7479f3c9 3997 return _sched_setscheduler(p, policy, param, true);
961ccddd 3998}
1da177e4
LT
3999EXPORT_SYMBOL_GPL(sched_setscheduler);
4000
d50dde5a
DF
4001int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4002{
dbc7f069 4003 return __sched_setscheduler(p, attr, true, true);
d50dde5a
DF
4004}
4005EXPORT_SYMBOL_GPL(sched_setattr);
4006
961ccddd
RR
4007/**
4008 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4009 * @p: the task in question.
4010 * @policy: new policy.
4011 * @param: structure containing the new RT priority.
4012 *
4013 * Just like sched_setscheduler, only don't bother checking if the
4014 * current context has permission. For example, this is needed in
4015 * stop_machine(): we create temporary high priority worker threads,
4016 * but our caller might not have that capability.
e69f6186
YB
4017 *
4018 * Return: 0 on success. An error code otherwise.
961ccddd
RR
4019 */
4020int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4021 const struct sched_param *param)
961ccddd 4022{
7479f3c9 4023 return _sched_setscheduler(p, policy, param, false);
961ccddd
RR
4024}
4025
95cdf3b7
IM
4026static int
4027do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4028{
1da177e4
LT
4029 struct sched_param lparam;
4030 struct task_struct *p;
36c8b586 4031 int retval;
1da177e4
LT
4032
4033 if (!param || pid < 0)
4034 return -EINVAL;
4035 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4036 return -EFAULT;
5fe1d75f
ON
4037
4038 rcu_read_lock();
4039 retval = -ESRCH;
1da177e4 4040 p = find_process_by_pid(pid);
5fe1d75f
ON
4041 if (p != NULL)
4042 retval = sched_setscheduler(p, policy, &lparam);
4043 rcu_read_unlock();
36c8b586 4044
1da177e4
LT
4045 return retval;
4046}
4047
d50dde5a
DF
4048/*
4049 * Mimics kernel/events/core.c perf_copy_attr().
4050 */
4051static int sched_copy_attr(struct sched_attr __user *uattr,
4052 struct sched_attr *attr)
4053{
4054 u32 size;
4055 int ret;
4056
4057 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4058 return -EFAULT;
4059
4060 /*
4061 * zero the full structure, so that a short copy will be nice.
4062 */
4063 memset(attr, 0, sizeof(*attr));
4064
4065 ret = get_user(size, &uattr->size);
4066 if (ret)
4067 return ret;
4068
4069 if (size > PAGE_SIZE) /* silly large */
4070 goto err_size;
4071
4072 if (!size) /* abi compat */
4073 size = SCHED_ATTR_SIZE_VER0;
4074
4075 if (size < SCHED_ATTR_SIZE_VER0)
4076 goto err_size;
4077
4078 /*
4079 * If we're handed a bigger struct than we know of,
4080 * ensure all the unknown bits are 0 - i.e. new
4081 * user-space does not rely on any kernel feature
4082 * extensions we dont know about yet.
4083 */
4084 if (size > sizeof(*attr)) {
4085 unsigned char __user *addr;
4086 unsigned char __user *end;
4087 unsigned char val;
4088
4089 addr = (void __user *)uattr + sizeof(*attr);
4090 end = (void __user *)uattr + size;
4091
4092 for (; addr < end; addr++) {
4093 ret = get_user(val, addr);
4094 if (ret)
4095 return ret;
4096 if (val)
4097 goto err_size;
4098 }
4099 size = sizeof(*attr);
4100 }
4101
4102 ret = copy_from_user(attr, uattr, size);
4103 if (ret)
4104 return -EFAULT;
4105
4106 /*
4107 * XXX: do we want to be lenient like existing syscalls; or do we want
4108 * to be strict and return an error on out-of-bounds values?
4109 */
75e45d51 4110 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 4111
e78c7bca 4112 return 0;
d50dde5a
DF
4113
4114err_size:
4115 put_user(sizeof(*attr), &uattr->size);
e78c7bca 4116 return -E2BIG;
d50dde5a
DF
4117}
4118
1da177e4
LT
4119/**
4120 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4121 * @pid: the pid in question.
4122 * @policy: new policy.
4123 * @param: structure containing the new RT priority.
e69f6186
YB
4124 *
4125 * Return: 0 on success. An error code otherwise.
1da177e4 4126 */
5add95d4
HC
4127SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4128 struct sched_param __user *, param)
1da177e4 4129{
c21761f1
JB
4130 /* negative values for policy are not valid */
4131 if (policy < 0)
4132 return -EINVAL;
4133
1da177e4
LT
4134 return do_sched_setscheduler(pid, policy, param);
4135}
4136
4137/**
4138 * sys_sched_setparam - set/change the RT priority of a thread
4139 * @pid: the pid in question.
4140 * @param: structure containing the new RT priority.
e69f6186
YB
4141 *
4142 * Return: 0 on success. An error code otherwise.
1da177e4 4143 */
5add95d4 4144SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4145{
c13db6b1 4146 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
4147}
4148
d50dde5a
DF
4149/**
4150 * sys_sched_setattr - same as above, but with extended sched_attr
4151 * @pid: the pid in question.
5778fccf 4152 * @uattr: structure containing the extended parameters.
db66d756 4153 * @flags: for future extension.
d50dde5a 4154 */
6d35ab48
PZ
4155SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4156 unsigned int, flags)
d50dde5a
DF
4157{
4158 struct sched_attr attr;
4159 struct task_struct *p;
4160 int retval;
4161
6d35ab48 4162 if (!uattr || pid < 0 || flags)
d50dde5a
DF
4163 return -EINVAL;
4164
143cf23d
MK
4165 retval = sched_copy_attr(uattr, &attr);
4166 if (retval)
4167 return retval;
d50dde5a 4168
b14ed2c2 4169 if ((int)attr.sched_policy < 0)
dbdb2275 4170 return -EINVAL;
d50dde5a
DF
4171
4172 rcu_read_lock();
4173 retval = -ESRCH;
4174 p = find_process_by_pid(pid);
4175 if (p != NULL)
4176 retval = sched_setattr(p, &attr);
4177 rcu_read_unlock();
4178
4179 return retval;
4180}
4181
1da177e4
LT
4182/**
4183 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4184 * @pid: the pid in question.
e69f6186
YB
4185 *
4186 * Return: On success, the policy of the thread. Otherwise, a negative error
4187 * code.
1da177e4 4188 */
5add95d4 4189SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4190{
36c8b586 4191 struct task_struct *p;
3a5c359a 4192 int retval;
1da177e4
LT
4193
4194 if (pid < 0)
3a5c359a 4195 return -EINVAL;
1da177e4
LT
4196
4197 retval = -ESRCH;
5fe85be0 4198 rcu_read_lock();
1da177e4
LT
4199 p = find_process_by_pid(pid);
4200 if (p) {
4201 retval = security_task_getscheduler(p);
4202 if (!retval)
ca94c442
LP
4203 retval = p->policy
4204 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4205 }
5fe85be0 4206 rcu_read_unlock();
1da177e4
LT
4207 return retval;
4208}
4209
4210/**
ca94c442 4211 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4212 * @pid: the pid in question.
4213 * @param: structure containing the RT priority.
e69f6186
YB
4214 *
4215 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4216 * code.
1da177e4 4217 */
5add95d4 4218SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4219{
ce5f7f82 4220 struct sched_param lp = { .sched_priority = 0 };
36c8b586 4221 struct task_struct *p;
3a5c359a 4222 int retval;
1da177e4
LT
4223
4224 if (!param || pid < 0)
3a5c359a 4225 return -EINVAL;
1da177e4 4226
5fe85be0 4227 rcu_read_lock();
1da177e4
LT
4228 p = find_process_by_pid(pid);
4229 retval = -ESRCH;
4230 if (!p)
4231 goto out_unlock;
4232
4233 retval = security_task_getscheduler(p);
4234 if (retval)
4235 goto out_unlock;
4236
ce5f7f82
PZ
4237 if (task_has_rt_policy(p))
4238 lp.sched_priority = p->rt_priority;
5fe85be0 4239 rcu_read_unlock();
1da177e4
LT
4240
4241 /*
4242 * This one might sleep, we cannot do it with a spinlock held ...
4243 */
4244 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4245
1da177e4
LT
4246 return retval;
4247
4248out_unlock:
5fe85be0 4249 rcu_read_unlock();
1da177e4
LT
4250 return retval;
4251}
4252
d50dde5a
DF
4253static int sched_read_attr(struct sched_attr __user *uattr,
4254 struct sched_attr *attr,
4255 unsigned int usize)
4256{
4257 int ret;
4258
4259 if (!access_ok(VERIFY_WRITE, uattr, usize))
4260 return -EFAULT;
4261
4262 /*
4263 * If we're handed a smaller struct than we know of,
4264 * ensure all the unknown bits are 0 - i.e. old
4265 * user-space does not get uncomplete information.
4266 */
4267 if (usize < sizeof(*attr)) {
4268 unsigned char *addr;
4269 unsigned char *end;
4270
4271 addr = (void *)attr + usize;
4272 end = (void *)attr + sizeof(*attr);
4273
4274 for (; addr < end; addr++) {
4275 if (*addr)
22400674 4276 return -EFBIG;
d50dde5a
DF
4277 }
4278
4279 attr->size = usize;
4280 }
4281
4efbc454 4282 ret = copy_to_user(uattr, attr, attr->size);
d50dde5a
DF
4283 if (ret)
4284 return -EFAULT;
4285
22400674 4286 return 0;
d50dde5a
DF
4287}
4288
4289/**
aab03e05 4290 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 4291 * @pid: the pid in question.
5778fccf 4292 * @uattr: structure containing the extended parameters.
d50dde5a 4293 * @size: sizeof(attr) for fwd/bwd comp.
db66d756 4294 * @flags: for future extension.
d50dde5a 4295 */
6d35ab48
PZ
4296SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4297 unsigned int, size, unsigned int, flags)
d50dde5a
DF
4298{
4299 struct sched_attr attr = {
4300 .size = sizeof(struct sched_attr),
4301 };
4302 struct task_struct *p;
4303 int retval;
4304
4305 if (!uattr || pid < 0 || size > PAGE_SIZE ||
6d35ab48 4306 size < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
4307 return -EINVAL;
4308
4309 rcu_read_lock();
4310 p = find_process_by_pid(pid);
4311 retval = -ESRCH;
4312 if (!p)
4313 goto out_unlock;
4314
4315 retval = security_task_getscheduler(p);
4316 if (retval)
4317 goto out_unlock;
4318
4319 attr.sched_policy = p->policy;
7479f3c9
PZ
4320 if (p->sched_reset_on_fork)
4321 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05
DF
4322 if (task_has_dl_policy(p))
4323 __getparam_dl(p, &attr);
4324 else if (task_has_rt_policy(p))
d50dde5a
DF
4325 attr.sched_priority = p->rt_priority;
4326 else
d0ea0268 4327 attr.sched_nice = task_nice(p);
d50dde5a
DF
4328
4329 rcu_read_unlock();
4330
4331 retval = sched_read_attr(uattr, &attr, size);
4332 return retval;
4333
4334out_unlock:
4335 rcu_read_unlock();
4336 return retval;
4337}
4338
96f874e2 4339long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4340{
5a16f3d3 4341 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4342 struct task_struct *p;
4343 int retval;
1da177e4 4344
23f5d142 4345 rcu_read_lock();
1da177e4
LT
4346
4347 p = find_process_by_pid(pid);
4348 if (!p) {
23f5d142 4349 rcu_read_unlock();
1da177e4
LT
4350 return -ESRCH;
4351 }
4352
23f5d142 4353 /* Prevent p going away */
1da177e4 4354 get_task_struct(p);
23f5d142 4355 rcu_read_unlock();
1da177e4 4356
14a40ffc
TH
4357 if (p->flags & PF_NO_SETAFFINITY) {
4358 retval = -EINVAL;
4359 goto out_put_task;
4360 }
5a16f3d3
RR
4361 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4362 retval = -ENOMEM;
4363 goto out_put_task;
4364 }
4365 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4366 retval = -ENOMEM;
4367 goto out_free_cpus_allowed;
4368 }
1da177e4 4369 retval = -EPERM;
4c44aaaf
EB
4370 if (!check_same_owner(p)) {
4371 rcu_read_lock();
4372 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4373 rcu_read_unlock();
16303ab2 4374 goto out_free_new_mask;
4c44aaaf
EB
4375 }
4376 rcu_read_unlock();
4377 }
1da177e4 4378
b0ae1981 4379 retval = security_task_setscheduler(p);
e7834f8f 4380 if (retval)
16303ab2 4381 goto out_free_new_mask;
e7834f8f 4382
e4099a5e
PZ
4383
4384 cpuset_cpus_allowed(p, cpus_allowed);
4385 cpumask_and(new_mask, in_mask, cpus_allowed);
4386
332ac17e
DF
4387 /*
4388 * Since bandwidth control happens on root_domain basis,
4389 * if admission test is enabled, we only admit -deadline
4390 * tasks allowed to run on all the CPUs in the task's
4391 * root_domain.
4392 */
4393#ifdef CONFIG_SMP
f1e3a093
KT
4394 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4395 rcu_read_lock();
4396 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 4397 retval = -EBUSY;
f1e3a093 4398 rcu_read_unlock();
16303ab2 4399 goto out_free_new_mask;
332ac17e 4400 }
f1e3a093 4401 rcu_read_unlock();
332ac17e
DF
4402 }
4403#endif
49246274 4404again:
25834c73 4405 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 4406
8707d8b8 4407 if (!retval) {
5a16f3d3
RR
4408 cpuset_cpus_allowed(p, cpus_allowed);
4409 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4410 /*
4411 * We must have raced with a concurrent cpuset
4412 * update. Just reset the cpus_allowed to the
4413 * cpuset's cpus_allowed
4414 */
5a16f3d3 4415 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4416 goto again;
4417 }
4418 }
16303ab2 4419out_free_new_mask:
5a16f3d3
RR
4420 free_cpumask_var(new_mask);
4421out_free_cpus_allowed:
4422 free_cpumask_var(cpus_allowed);
4423out_put_task:
1da177e4 4424 put_task_struct(p);
1da177e4
LT
4425 return retval;
4426}
4427
4428static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4429 struct cpumask *new_mask)
1da177e4 4430{
96f874e2
RR
4431 if (len < cpumask_size())
4432 cpumask_clear(new_mask);
4433 else if (len > cpumask_size())
4434 len = cpumask_size();
4435
1da177e4
LT
4436 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4437}
4438
4439/**
4440 * sys_sched_setaffinity - set the cpu affinity of a process
4441 * @pid: pid of the process
4442 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4443 * @user_mask_ptr: user-space pointer to the new cpu mask
e69f6186
YB
4444 *
4445 * Return: 0 on success. An error code otherwise.
1da177e4 4446 */
5add95d4
HC
4447SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4448 unsigned long __user *, user_mask_ptr)
1da177e4 4449{
5a16f3d3 4450 cpumask_var_t new_mask;
1da177e4
LT
4451 int retval;
4452
5a16f3d3
RR
4453 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4454 return -ENOMEM;
1da177e4 4455
5a16f3d3
RR
4456 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4457 if (retval == 0)
4458 retval = sched_setaffinity(pid, new_mask);
4459 free_cpumask_var(new_mask);
4460 return retval;
1da177e4
LT
4461}
4462
96f874e2 4463long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4464{
36c8b586 4465 struct task_struct *p;
31605683 4466 unsigned long flags;
1da177e4 4467 int retval;
1da177e4 4468
23f5d142 4469 rcu_read_lock();
1da177e4
LT
4470
4471 retval = -ESRCH;
4472 p = find_process_by_pid(pid);
4473 if (!p)
4474 goto out_unlock;
4475
e7834f8f
DQ
4476 retval = security_task_getscheduler(p);
4477 if (retval)
4478 goto out_unlock;
4479
013fdb80 4480 raw_spin_lock_irqsave(&p->pi_lock, flags);
6acce3ef 4481 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
013fdb80 4482 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4483
4484out_unlock:
23f5d142 4485 rcu_read_unlock();
1da177e4 4486
9531b62f 4487 return retval;
1da177e4
LT
4488}
4489
4490/**
4491 * sys_sched_getaffinity - get the cpu affinity of a process
4492 * @pid: pid of the process
4493 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4494 * @user_mask_ptr: user-space pointer to hold the current cpu mask
e69f6186
YB
4495 *
4496 * Return: 0 on success. An error code otherwise.
1da177e4 4497 */
5add95d4
HC
4498SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4499 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4500{
4501 int ret;
f17c8607 4502 cpumask_var_t mask;
1da177e4 4503
84fba5ec 4504 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4505 return -EINVAL;
4506 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4507 return -EINVAL;
4508
f17c8607
RR
4509 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4510 return -ENOMEM;
1da177e4 4511
f17c8607
RR
4512 ret = sched_getaffinity(pid, mask);
4513 if (ret == 0) {
8bc037fb 4514 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4515
4516 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4517 ret = -EFAULT;
4518 else
cd3d8031 4519 ret = retlen;
f17c8607
RR
4520 }
4521 free_cpumask_var(mask);
1da177e4 4522
f17c8607 4523 return ret;
1da177e4
LT
4524}
4525
4526/**
4527 * sys_sched_yield - yield the current processor to other threads.
4528 *
dd41f596
IM
4529 * This function yields the current CPU to other tasks. If there are no
4530 * other threads running on this CPU then this function will return.
e69f6186
YB
4531 *
4532 * Return: 0.
1da177e4 4533 */
5add95d4 4534SYSCALL_DEFINE0(sched_yield)
1da177e4 4535{
70b97a7f 4536 struct rq *rq = this_rq_lock();
1da177e4 4537
2d72376b 4538 schedstat_inc(rq, yld_count);
4530d7ab 4539 current->sched_class->yield_task(rq);
1da177e4
LT
4540
4541 /*
4542 * Since we are going to call schedule() anyway, there's
4543 * no need to preempt or enable interrupts:
4544 */
4545 __release(rq->lock);
8a25d5de 4546 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4547 do_raw_spin_unlock(&rq->lock);
ba74c144 4548 sched_preempt_enable_no_resched();
1da177e4
LT
4549
4550 schedule();
4551
4552 return 0;
4553}
4554
02b67cc3 4555int __sched _cond_resched(void)
1da177e4 4556{
fe32d3cd 4557 if (should_resched(0)) {
a18b5d01 4558 preempt_schedule_common();
1da177e4
LT
4559 return 1;
4560 }
4561 return 0;
4562}
02b67cc3 4563EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4564
4565/*
613afbf8 4566 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4567 * call schedule, and on return reacquire the lock.
4568 *
41a2d6cf 4569 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4570 * operations here to prevent schedule() from being called twice (once via
4571 * spin_unlock(), once by hand).
4572 */
613afbf8 4573int __cond_resched_lock(spinlock_t *lock)
1da177e4 4574{
fe32d3cd 4575 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
4576 int ret = 0;
4577
f607c668
PZ
4578 lockdep_assert_held(lock);
4579
4a81e832 4580 if (spin_needbreak(lock) || resched) {
1da177e4 4581 spin_unlock(lock);
d86ee480 4582 if (resched)
a18b5d01 4583 preempt_schedule_common();
95c354fe
NP
4584 else
4585 cpu_relax();
6df3cecb 4586 ret = 1;
1da177e4 4587 spin_lock(lock);
1da177e4 4588 }
6df3cecb 4589 return ret;
1da177e4 4590}
613afbf8 4591EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4592
613afbf8 4593int __sched __cond_resched_softirq(void)
1da177e4
LT
4594{
4595 BUG_ON(!in_softirq());
4596
fe32d3cd 4597 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
98d82567 4598 local_bh_enable();
a18b5d01 4599 preempt_schedule_common();
1da177e4
LT
4600 local_bh_disable();
4601 return 1;
4602 }
4603 return 0;
4604}
613afbf8 4605EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4606
1da177e4
LT
4607/**
4608 * yield - yield the current processor to other threads.
4609 *
8e3fabfd
PZ
4610 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4611 *
4612 * The scheduler is at all times free to pick the calling task as the most
4613 * eligible task to run, if removing the yield() call from your code breaks
4614 * it, its already broken.
4615 *
4616 * Typical broken usage is:
4617 *
4618 * while (!event)
4619 * yield();
4620 *
4621 * where one assumes that yield() will let 'the other' process run that will
4622 * make event true. If the current task is a SCHED_FIFO task that will never
4623 * happen. Never use yield() as a progress guarantee!!
4624 *
4625 * If you want to use yield() to wait for something, use wait_event().
4626 * If you want to use yield() to be 'nice' for others, use cond_resched().
4627 * If you still want to use yield(), do not!
1da177e4
LT
4628 */
4629void __sched yield(void)
4630{
4631 set_current_state(TASK_RUNNING);
4632 sys_sched_yield();
4633}
1da177e4
LT
4634EXPORT_SYMBOL(yield);
4635
d95f4122
MG
4636/**
4637 * yield_to - yield the current processor to another thread in
4638 * your thread group, or accelerate that thread toward the
4639 * processor it's on.
16addf95
RD
4640 * @p: target task
4641 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4642 *
4643 * It's the caller's job to ensure that the target task struct
4644 * can't go away on us before we can do any checks.
4645 *
e69f6186 4646 * Return:
7b270f60
PZ
4647 * true (>0) if we indeed boosted the target task.
4648 * false (0) if we failed to boost the target.
4649 * -ESRCH if there's no task to yield to.
d95f4122 4650 */
fa93384f 4651int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
4652{
4653 struct task_struct *curr = current;
4654 struct rq *rq, *p_rq;
4655 unsigned long flags;
c3c18640 4656 int yielded = 0;
d95f4122
MG
4657
4658 local_irq_save(flags);
4659 rq = this_rq();
4660
4661again:
4662 p_rq = task_rq(p);
7b270f60
PZ
4663 /*
4664 * If we're the only runnable task on the rq and target rq also
4665 * has only one task, there's absolutely no point in yielding.
4666 */
4667 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4668 yielded = -ESRCH;
4669 goto out_irq;
4670 }
4671
d95f4122 4672 double_rq_lock(rq, p_rq);
39e24d8f 4673 if (task_rq(p) != p_rq) {
d95f4122
MG
4674 double_rq_unlock(rq, p_rq);
4675 goto again;
4676 }
4677
4678 if (!curr->sched_class->yield_to_task)
7b270f60 4679 goto out_unlock;
d95f4122
MG
4680
4681 if (curr->sched_class != p->sched_class)
7b270f60 4682 goto out_unlock;
d95f4122
MG
4683
4684 if (task_running(p_rq, p) || p->state)
7b270f60 4685 goto out_unlock;
d95f4122
MG
4686
4687 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4688 if (yielded) {
d95f4122 4689 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4690 /*
4691 * Make p's CPU reschedule; pick_next_entity takes care of
4692 * fairness.
4693 */
4694 if (preempt && rq != p_rq)
8875125e 4695 resched_curr(p_rq);
6d1cafd8 4696 }
d95f4122 4697
7b270f60 4698out_unlock:
d95f4122 4699 double_rq_unlock(rq, p_rq);
7b270f60 4700out_irq:
d95f4122
MG
4701 local_irq_restore(flags);
4702
7b270f60 4703 if (yielded > 0)
d95f4122
MG
4704 schedule();
4705
4706 return yielded;
4707}
4708EXPORT_SYMBOL_GPL(yield_to);
4709
1da177e4 4710/*
41a2d6cf 4711 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4712 * that process accounting knows that this is a task in IO wait state.
1da177e4 4713 */
1da177e4
LT
4714long __sched io_schedule_timeout(long timeout)
4715{
9cff8ade
N
4716 int old_iowait = current->in_iowait;
4717 struct rq *rq;
1da177e4
LT
4718 long ret;
4719
9cff8ade 4720 current->in_iowait = 1;
10d784ea 4721 blk_schedule_flush_plug(current);
9cff8ade 4722
0ff92245 4723 delayacct_blkio_start();
9cff8ade 4724 rq = raw_rq();
1da177e4
LT
4725 atomic_inc(&rq->nr_iowait);
4726 ret = schedule_timeout(timeout);
9cff8ade 4727 current->in_iowait = old_iowait;
1da177e4 4728 atomic_dec(&rq->nr_iowait);
0ff92245 4729 delayacct_blkio_end();
9cff8ade 4730
1da177e4
LT
4731 return ret;
4732}
9cff8ade 4733EXPORT_SYMBOL(io_schedule_timeout);
1da177e4
LT
4734
4735/**
4736 * sys_sched_get_priority_max - return maximum RT priority.
4737 * @policy: scheduling class.
4738 *
e69f6186
YB
4739 * Return: On success, this syscall returns the maximum
4740 * rt_priority that can be used by a given scheduling class.
4741 * On failure, a negative error code is returned.
1da177e4 4742 */
5add95d4 4743SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4744{
4745 int ret = -EINVAL;
4746
4747 switch (policy) {
4748 case SCHED_FIFO:
4749 case SCHED_RR:
4750 ret = MAX_USER_RT_PRIO-1;
4751 break;
aab03e05 4752 case SCHED_DEADLINE:
1da177e4 4753 case SCHED_NORMAL:
b0a9499c 4754 case SCHED_BATCH:
dd41f596 4755 case SCHED_IDLE:
1da177e4
LT
4756 ret = 0;
4757 break;
4758 }
4759 return ret;
4760}
4761
4762/**
4763 * sys_sched_get_priority_min - return minimum RT priority.
4764 * @policy: scheduling class.
4765 *
e69f6186
YB
4766 * Return: On success, this syscall returns the minimum
4767 * rt_priority that can be used by a given scheduling class.
4768 * On failure, a negative error code is returned.
1da177e4 4769 */
5add95d4 4770SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4771{
4772 int ret = -EINVAL;
4773
4774 switch (policy) {
4775 case SCHED_FIFO:
4776 case SCHED_RR:
4777 ret = 1;
4778 break;
aab03e05 4779 case SCHED_DEADLINE:
1da177e4 4780 case SCHED_NORMAL:
b0a9499c 4781 case SCHED_BATCH:
dd41f596 4782 case SCHED_IDLE:
1da177e4
LT
4783 ret = 0;
4784 }
4785 return ret;
4786}
4787
4788/**
4789 * sys_sched_rr_get_interval - return the default timeslice of a process.
4790 * @pid: pid of the process.
4791 * @interval: userspace pointer to the timeslice value.
4792 *
4793 * this syscall writes the default timeslice value of a given process
4794 * into the user-space timespec buffer. A value of '0' means infinity.
e69f6186
YB
4795 *
4796 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4797 * an error code.
1da177e4 4798 */
17da2bd9 4799SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4800 struct timespec __user *, interval)
1da177e4 4801{
36c8b586 4802 struct task_struct *p;
a4ec24b4 4803 unsigned int time_slice;
dba091b9
TG
4804 unsigned long flags;
4805 struct rq *rq;
3a5c359a 4806 int retval;
1da177e4 4807 struct timespec t;
1da177e4
LT
4808
4809 if (pid < 0)
3a5c359a 4810 return -EINVAL;
1da177e4
LT
4811
4812 retval = -ESRCH;
1a551ae7 4813 rcu_read_lock();
1da177e4
LT
4814 p = find_process_by_pid(pid);
4815 if (!p)
4816 goto out_unlock;
4817
4818 retval = security_task_getscheduler(p);
4819 if (retval)
4820 goto out_unlock;
4821
dba091b9 4822 rq = task_rq_lock(p, &flags);
a57beec5
PZ
4823 time_slice = 0;
4824 if (p->sched_class->get_rr_interval)
4825 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4826 task_rq_unlock(rq, p, &flags);
a4ec24b4 4827
1a551ae7 4828 rcu_read_unlock();
a4ec24b4 4829 jiffies_to_timespec(time_slice, &t);
1da177e4 4830 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4831 return retval;
3a5c359a 4832
1da177e4 4833out_unlock:
1a551ae7 4834 rcu_read_unlock();
1da177e4
LT
4835 return retval;
4836}
4837
7c731e0a 4838static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4839
82a1fcb9 4840void sched_show_task(struct task_struct *p)
1da177e4 4841{
1da177e4 4842 unsigned long free = 0;
4e79752c 4843 int ppid;
1f8a7633 4844 unsigned long state = p->state;
1da177e4 4845
1f8a7633
TH
4846 if (state)
4847 state = __ffs(state) + 1;
28d0686c 4848 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4849 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4850#if BITS_PER_LONG == 32
1da177e4 4851 if (state == TASK_RUNNING)
3df0fc5b 4852 printk(KERN_CONT " running ");
1da177e4 4853 else
3df0fc5b 4854 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4855#else
4856 if (state == TASK_RUNNING)
3df0fc5b 4857 printk(KERN_CONT " running task ");
1da177e4 4858 else
3df0fc5b 4859 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4860#endif
4861#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4862 free = stack_not_used(p);
1da177e4 4863#endif
a90e984c 4864 ppid = 0;
4e79752c 4865 rcu_read_lock();
a90e984c
ON
4866 if (pid_alive(p))
4867 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 4868 rcu_read_unlock();
3df0fc5b 4869 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 4870 task_pid_nr(p), ppid,
aa47b7e0 4871 (unsigned long)task_thread_info(p)->flags);
1da177e4 4872
3d1cb205 4873 print_worker_info(KERN_INFO, p);
5fb5e6de 4874 show_stack(p, NULL);
1da177e4
LT
4875}
4876
e59e2ae2 4877void show_state_filter(unsigned long state_filter)
1da177e4 4878{
36c8b586 4879 struct task_struct *g, *p;
1da177e4 4880
4bd77321 4881#if BITS_PER_LONG == 32
3df0fc5b
PZ
4882 printk(KERN_INFO
4883 " task PC stack pid father\n");
1da177e4 4884#else
3df0fc5b
PZ
4885 printk(KERN_INFO
4886 " task PC stack pid father\n");
1da177e4 4887#endif
510f5acc 4888 rcu_read_lock();
5d07f420 4889 for_each_process_thread(g, p) {
1da177e4
LT
4890 /*
4891 * reset the NMI-timeout, listing all files on a slow
25985edc 4892 * console might take a lot of time:
1da177e4
LT
4893 */
4894 touch_nmi_watchdog();
39bc89fd 4895 if (!state_filter || (p->state & state_filter))
82a1fcb9 4896 sched_show_task(p);
5d07f420 4897 }
1da177e4 4898
04c9167f
JF
4899 touch_all_softlockup_watchdogs();
4900
dd41f596
IM
4901#ifdef CONFIG_SCHED_DEBUG
4902 sysrq_sched_debug_show();
4903#endif
510f5acc 4904 rcu_read_unlock();
e59e2ae2
IM
4905 /*
4906 * Only show locks if all tasks are dumped:
4907 */
93335a21 4908 if (!state_filter)
e59e2ae2 4909 debug_show_all_locks();
1da177e4
LT
4910}
4911
0db0628d 4912void init_idle_bootup_task(struct task_struct *idle)
1df21055 4913{
dd41f596 4914 idle->sched_class = &idle_sched_class;
1df21055
IM
4915}
4916
f340c0d1
IM
4917/**
4918 * init_idle - set up an idle thread for a given CPU
4919 * @idle: task in question
4920 * @cpu: cpu the idle task belongs to
4921 *
4922 * NOTE: this function does not set the idle thread's NEED_RESCHED
4923 * flag, to make booting more robust.
4924 */
0db0628d 4925void init_idle(struct task_struct *idle, int cpu)
1da177e4 4926{
70b97a7f 4927 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4928 unsigned long flags;
4929
25834c73
PZ
4930 raw_spin_lock_irqsave(&idle->pi_lock, flags);
4931 raw_spin_lock(&rq->lock);
5cbd54ef 4932
5e1576ed 4933 __sched_fork(0, idle);
06b83b5f 4934 idle->state = TASK_RUNNING;
dd41f596
IM
4935 idle->se.exec_start = sched_clock();
4936
de9b8f5d
PZ
4937#ifdef CONFIG_SMP
4938 /*
4939 * Its possible that init_idle() gets called multiple times on a task,
4940 * in that case do_set_cpus_allowed() will not do the right thing.
4941 *
4942 * And since this is boot we can forgo the serialization.
4943 */
4944 set_cpus_allowed_common(idle, cpumask_of(cpu));
4945#endif
6506cf6c
PZ
4946 /*
4947 * We're having a chicken and egg problem, even though we are
4948 * holding rq->lock, the cpu isn't yet set to this cpu so the
4949 * lockdep check in task_group() will fail.
4950 *
4951 * Similar case to sched_fork(). / Alternatively we could
4952 * use task_rq_lock() here and obtain the other rq->lock.
4953 *
4954 * Silence PROVE_RCU
4955 */
4956 rcu_read_lock();
dd41f596 4957 __set_task_cpu(idle, cpu);
6506cf6c 4958 rcu_read_unlock();
1da177e4 4959
1da177e4 4960 rq->curr = rq->idle = idle;
da0c1e65 4961 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 4962#ifdef CONFIG_SMP
3ca7a440 4963 idle->on_cpu = 1;
4866cde0 4964#endif
25834c73
PZ
4965 raw_spin_unlock(&rq->lock);
4966 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
4967
4968 /* Set the preempt count _outside_ the spinlocks! */
01028747 4969 init_idle_preempt_count(idle, cpu);
55cd5340 4970
dd41f596
IM
4971 /*
4972 * The idle tasks have their own, simple scheduling class:
4973 */
4974 idle->sched_class = &idle_sched_class;
868baf07 4975 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 4976 vtime_init_idle(idle, cpu);
de9b8f5d 4977#ifdef CONFIG_SMP
f1c6f1a7
CE
4978 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4979#endif
19978ca6
IM
4980}
4981
f82f8042
JL
4982int cpuset_cpumask_can_shrink(const struct cpumask *cur,
4983 const struct cpumask *trial)
4984{
4985 int ret = 1, trial_cpus;
4986 struct dl_bw *cur_dl_b;
4987 unsigned long flags;
4988
bb2bc55a
MG
4989 if (!cpumask_weight(cur))
4990 return ret;
4991
75e23e49 4992 rcu_read_lock_sched();
f82f8042
JL
4993 cur_dl_b = dl_bw_of(cpumask_any(cur));
4994 trial_cpus = cpumask_weight(trial);
4995
4996 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
4997 if (cur_dl_b->bw != -1 &&
4998 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
4999 ret = 0;
5000 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
75e23e49 5001 rcu_read_unlock_sched();
f82f8042
JL
5002
5003 return ret;
5004}
5005
7f51412a
JL
5006int task_can_attach(struct task_struct *p,
5007 const struct cpumask *cs_cpus_allowed)
5008{
5009 int ret = 0;
5010
5011 /*
5012 * Kthreads which disallow setaffinity shouldn't be moved
5013 * to a new cpuset; we don't want to change their cpu
5014 * affinity and isolating such threads by their set of
5015 * allowed nodes is unnecessary. Thus, cpusets are not
5016 * applicable for such threads. This prevents checking for
5017 * success of set_cpus_allowed_ptr() on all attached tasks
5018 * before cpus_allowed may be changed.
5019 */
5020 if (p->flags & PF_NO_SETAFFINITY) {
5021 ret = -EINVAL;
5022 goto out;
5023 }
5024
5025#ifdef CONFIG_SMP
5026 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5027 cs_cpus_allowed)) {
5028 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5029 cs_cpus_allowed);
75e23e49 5030 struct dl_bw *dl_b;
7f51412a
JL
5031 bool overflow;
5032 int cpus;
5033 unsigned long flags;
5034
75e23e49
JL
5035 rcu_read_lock_sched();
5036 dl_b = dl_bw_of(dest_cpu);
7f51412a
JL
5037 raw_spin_lock_irqsave(&dl_b->lock, flags);
5038 cpus = dl_bw_cpus(dest_cpu);
5039 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5040 if (overflow)
5041 ret = -EBUSY;
5042 else {
5043 /*
5044 * We reserve space for this task in the destination
5045 * root_domain, as we can't fail after this point.
5046 * We will free resources in the source root_domain
5047 * later on (see set_cpus_allowed_dl()).
5048 */
5049 __dl_add(dl_b, p->dl.dl_bw);
5050 }
5051 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
75e23e49 5052 rcu_read_unlock_sched();
7f51412a
JL
5053
5054 }
5055#endif
5056out:
5057 return ret;
5058}
5059
1da177e4 5060#ifdef CONFIG_SMP
1da177e4 5061
e6628d5b
MG
5062#ifdef CONFIG_NUMA_BALANCING
5063/* Migrate current task p to target_cpu */
5064int migrate_task_to(struct task_struct *p, int target_cpu)
5065{
5066 struct migration_arg arg = { p, target_cpu };
5067 int curr_cpu = task_cpu(p);
5068
5069 if (curr_cpu == target_cpu)
5070 return 0;
5071
5072 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5073 return -EINVAL;
5074
5075 /* TODO: This is not properly updating schedstats */
5076
286549dc 5077 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
5078 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5079}
0ec8aa00
PZ
5080
5081/*
5082 * Requeue a task on a given node and accurately track the number of NUMA
5083 * tasks on the runqueues
5084 */
5085void sched_setnuma(struct task_struct *p, int nid)
5086{
5087 struct rq *rq;
5088 unsigned long flags;
da0c1e65 5089 bool queued, running;
0ec8aa00
PZ
5090
5091 rq = task_rq_lock(p, &flags);
da0c1e65 5092 queued = task_on_rq_queued(p);
0ec8aa00
PZ
5093 running = task_current(rq, p);
5094
da0c1e65 5095 if (queued)
0ec8aa00
PZ
5096 dequeue_task(rq, p, 0);
5097 if (running)
f3cd1c4e 5098 put_prev_task(rq, p);
0ec8aa00
PZ
5099
5100 p->numa_preferred_nid = nid;
0ec8aa00
PZ
5101
5102 if (running)
5103 p->sched_class->set_curr_task(rq);
da0c1e65 5104 if (queued)
0ec8aa00
PZ
5105 enqueue_task(rq, p, 0);
5106 task_rq_unlock(rq, p, &flags);
5107}
5cc389bc 5108#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 5109
1da177e4 5110#ifdef CONFIG_HOTPLUG_CPU
054b9108 5111/*
48c5ccae
PZ
5112 * Ensures that the idle task is using init_mm right before its cpu goes
5113 * offline.
054b9108 5114 */
48c5ccae 5115void idle_task_exit(void)
1da177e4 5116{
48c5ccae 5117 struct mm_struct *mm = current->active_mm;
e76bd8d9 5118
48c5ccae 5119 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5120
a53efe5f 5121 if (mm != &init_mm) {
48c5ccae 5122 switch_mm(mm, &init_mm, current);
a53efe5f
MS
5123 finish_arch_post_lock_switch();
5124 }
48c5ccae 5125 mmdrop(mm);
1da177e4
LT
5126}
5127
5128/*
5d180232
PZ
5129 * Since this CPU is going 'away' for a while, fold any nr_active delta
5130 * we might have. Assumes we're called after migrate_tasks() so that the
5131 * nr_active count is stable.
5132 *
5133 * Also see the comment "Global load-average calculations".
1da177e4 5134 */
5d180232 5135static void calc_load_migrate(struct rq *rq)
1da177e4 5136{
5d180232
PZ
5137 long delta = calc_load_fold_active(rq);
5138 if (delta)
5139 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
5140}
5141
3f1d2a31
PZ
5142static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5143{
5144}
5145
5146static const struct sched_class fake_sched_class = {
5147 .put_prev_task = put_prev_task_fake,
5148};
5149
5150static struct task_struct fake_task = {
5151 /*
5152 * Avoid pull_{rt,dl}_task()
5153 */
5154 .prio = MAX_PRIO + 1,
5155 .sched_class = &fake_sched_class,
5156};
5157
48f24c4d 5158/*
48c5ccae
PZ
5159 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5160 * try_to_wake_up()->select_task_rq().
5161 *
5162 * Called with rq->lock held even though we'er in stop_machine() and
5163 * there's no concurrency possible, we hold the required locks anyway
5164 * because of lock validation efforts.
1da177e4 5165 */
5e16bbc2 5166static void migrate_tasks(struct rq *dead_rq)
1da177e4 5167{
5e16bbc2 5168 struct rq *rq = dead_rq;
48c5ccae
PZ
5169 struct task_struct *next, *stop = rq->stop;
5170 int dest_cpu;
1da177e4
LT
5171
5172 /*
48c5ccae
PZ
5173 * Fudge the rq selection such that the below task selection loop
5174 * doesn't get stuck on the currently eligible stop task.
5175 *
5176 * We're currently inside stop_machine() and the rq is either stuck
5177 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5178 * either way we should never end up calling schedule() until we're
5179 * done here.
1da177e4 5180 */
48c5ccae 5181 rq->stop = NULL;
48f24c4d 5182
77bd3970
FW
5183 /*
5184 * put_prev_task() and pick_next_task() sched
5185 * class method both need to have an up-to-date
5186 * value of rq->clock[_task]
5187 */
5188 update_rq_clock(rq);
5189
5e16bbc2 5190 for (;;) {
48c5ccae
PZ
5191 /*
5192 * There's this thread running, bail when that's the only
5193 * remaining thread.
5194 */
5195 if (rq->nr_running == 1)
dd41f596 5196 break;
48c5ccae 5197
cbce1a68 5198 /*
5473e0cc 5199 * pick_next_task assumes pinned rq->lock.
cbce1a68
PZ
5200 */
5201 lockdep_pin_lock(&rq->lock);
3f1d2a31 5202 next = pick_next_task(rq, &fake_task);
48c5ccae 5203 BUG_ON(!next);
79c53799 5204 next->sched_class->put_prev_task(rq, next);
e692ab53 5205
5473e0cc
WL
5206 /*
5207 * Rules for changing task_struct::cpus_allowed are holding
5208 * both pi_lock and rq->lock, such that holding either
5209 * stabilizes the mask.
5210 *
5211 * Drop rq->lock is not quite as disastrous as it usually is
5212 * because !cpu_active at this point, which means load-balance
5213 * will not interfere. Also, stop-machine.
5214 */
5215 lockdep_unpin_lock(&rq->lock);
5216 raw_spin_unlock(&rq->lock);
5217 raw_spin_lock(&next->pi_lock);
5218 raw_spin_lock(&rq->lock);
5219
5220 /*
5221 * Since we're inside stop-machine, _nothing_ should have
5222 * changed the task, WARN if weird stuff happened, because in
5223 * that case the above rq->lock drop is a fail too.
5224 */
5225 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5226 raw_spin_unlock(&next->pi_lock);
5227 continue;
5228 }
5229
48c5ccae 5230 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 5231 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
48c5ccae 5232
5e16bbc2
PZ
5233 rq = __migrate_task(rq, next, dest_cpu);
5234 if (rq != dead_rq) {
5235 raw_spin_unlock(&rq->lock);
5236 rq = dead_rq;
5237 raw_spin_lock(&rq->lock);
5238 }
5473e0cc 5239 raw_spin_unlock(&next->pi_lock);
1da177e4 5240 }
dce48a84 5241
48c5ccae 5242 rq->stop = stop;
dce48a84 5243}
1da177e4
LT
5244#endif /* CONFIG_HOTPLUG_CPU */
5245
e692ab53
NP
5246#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5247
5248static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5249 {
5250 .procname = "sched_domain",
c57baf1e 5251 .mode = 0555,
e0361851 5252 },
56992309 5253 {}
e692ab53
NP
5254};
5255
5256static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5257 {
5258 .procname = "kernel",
c57baf1e 5259 .mode = 0555,
e0361851
AD
5260 .child = sd_ctl_dir,
5261 },
56992309 5262 {}
e692ab53
NP
5263};
5264
5265static struct ctl_table *sd_alloc_ctl_entry(int n)
5266{
5267 struct ctl_table *entry =
5cf9f062 5268 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5269
e692ab53
NP
5270 return entry;
5271}
5272
6382bc90
MM
5273static void sd_free_ctl_entry(struct ctl_table **tablep)
5274{
cd790076 5275 struct ctl_table *entry;
6382bc90 5276
cd790076
MM
5277 /*
5278 * In the intermediate directories, both the child directory and
5279 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5280 * will always be set. In the lowest directory the names are
cd790076
MM
5281 * static strings and all have proc handlers.
5282 */
5283 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5284 if (entry->child)
5285 sd_free_ctl_entry(&entry->child);
cd790076
MM
5286 if (entry->proc_handler == NULL)
5287 kfree(entry->procname);
5288 }
6382bc90
MM
5289
5290 kfree(*tablep);
5291 *tablep = NULL;
5292}
5293
201c373e 5294static int min_load_idx = 0;
fd9b86d3 5295static int max_load_idx = CPU_LOAD_IDX_MAX-1;
201c373e 5296
e692ab53 5297static void
e0361851 5298set_table_entry(struct ctl_table *entry,
e692ab53 5299 const char *procname, void *data, int maxlen,
201c373e
NK
5300 umode_t mode, proc_handler *proc_handler,
5301 bool load_idx)
e692ab53 5302{
e692ab53
NP
5303 entry->procname = procname;
5304 entry->data = data;
5305 entry->maxlen = maxlen;
5306 entry->mode = mode;
5307 entry->proc_handler = proc_handler;
201c373e
NK
5308
5309 if (load_idx) {
5310 entry->extra1 = &min_load_idx;
5311 entry->extra2 = &max_load_idx;
5312 }
e692ab53
NP
5313}
5314
5315static struct ctl_table *
5316sd_alloc_ctl_domain_table(struct sched_domain *sd)
5317{
37e6bae8 5318 struct ctl_table *table = sd_alloc_ctl_entry(14);
e692ab53 5319
ad1cdc1d
MM
5320 if (table == NULL)
5321 return NULL;
5322
e0361851 5323 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 5324 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5325 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 5326 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5327 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 5328 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5329 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 5330 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5331 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 5332 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5333 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 5334 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5335 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 5336 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5337 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 5338 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 5339 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 5340 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5341 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 5342 &sd->cache_nice_tries,
201c373e 5343 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5344 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 5345 sizeof(int), 0644, proc_dointvec_minmax, false);
37e6bae8
AS
5346 set_table_entry(&table[11], "max_newidle_lb_cost",
5347 &sd->max_newidle_lb_cost,
5348 sizeof(long), 0644, proc_doulongvec_minmax, false);
5349 set_table_entry(&table[12], "name", sd->name,
201c373e 5350 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
37e6bae8 5351 /* &table[13] is terminator */
e692ab53
NP
5352
5353 return table;
5354}
5355
be7002e6 5356static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5357{
5358 struct ctl_table *entry, *table;
5359 struct sched_domain *sd;
5360 int domain_num = 0, i;
5361 char buf[32];
5362
5363 for_each_domain(cpu, sd)
5364 domain_num++;
5365 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5366 if (table == NULL)
5367 return NULL;
e692ab53
NP
5368
5369 i = 0;
5370 for_each_domain(cpu, sd) {
5371 snprintf(buf, 32, "domain%d", i);
e692ab53 5372 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5373 entry->mode = 0555;
e692ab53
NP
5374 entry->child = sd_alloc_ctl_domain_table(sd);
5375 entry++;
5376 i++;
5377 }
5378 return table;
5379}
5380
5381static struct ctl_table_header *sd_sysctl_header;
6382bc90 5382static void register_sched_domain_sysctl(void)
e692ab53 5383{
6ad4c188 5384 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5385 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5386 char buf[32];
5387
7378547f
MM
5388 WARN_ON(sd_ctl_dir[0].child);
5389 sd_ctl_dir[0].child = entry;
5390
ad1cdc1d
MM
5391 if (entry == NULL)
5392 return;
5393
6ad4c188 5394 for_each_possible_cpu(i) {
e692ab53 5395 snprintf(buf, 32, "cpu%d", i);
e692ab53 5396 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5397 entry->mode = 0555;
e692ab53 5398 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5399 entry++;
e692ab53 5400 }
7378547f
MM
5401
5402 WARN_ON(sd_sysctl_header);
e692ab53
NP
5403 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5404}
6382bc90 5405
7378547f 5406/* may be called multiple times per register */
6382bc90
MM
5407static void unregister_sched_domain_sysctl(void)
5408{
781b0203 5409 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5410 sd_sysctl_header = NULL;
7378547f
MM
5411 if (sd_ctl_dir[0].child)
5412 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5413}
e692ab53 5414#else
6382bc90
MM
5415static void register_sched_domain_sysctl(void)
5416{
5417}
5418static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5419{
5420}
5cc389bc 5421#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
e692ab53 5422
1f11eb6a
GH
5423static void set_rq_online(struct rq *rq)
5424{
5425 if (!rq->online) {
5426 const struct sched_class *class;
5427
c6c4927b 5428 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5429 rq->online = 1;
5430
5431 for_each_class(class) {
5432 if (class->rq_online)
5433 class->rq_online(rq);
5434 }
5435 }
5436}
5437
5438static void set_rq_offline(struct rq *rq)
5439{
5440 if (rq->online) {
5441 const struct sched_class *class;
5442
5443 for_each_class(class) {
5444 if (class->rq_offline)
5445 class->rq_offline(rq);
5446 }
5447
c6c4927b 5448 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5449 rq->online = 0;
5450 }
5451}
5452
1da177e4
LT
5453/*
5454 * migration_call - callback that gets triggered when a CPU is added.
5455 * Here we can start up the necessary migration thread for the new CPU.
5456 */
0db0628d 5457static int
48f24c4d 5458migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5459{
48f24c4d 5460 int cpu = (long)hcpu;
1da177e4 5461 unsigned long flags;
969c7921 5462 struct rq *rq = cpu_rq(cpu);
1da177e4 5463
48c5ccae 5464 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5465
1da177e4 5466 case CPU_UP_PREPARE:
a468d389 5467 rq->calc_load_update = calc_load_update;
1da177e4 5468 break;
48f24c4d 5469
1da177e4 5470 case CPU_ONLINE:
1f94ef59 5471 /* Update our root-domain */
05fa785c 5472 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5473 if (rq->rd) {
c6c4927b 5474 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5475
5476 set_rq_online(rq);
1f94ef59 5477 }
05fa785c 5478 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5479 break;
48f24c4d 5480
1da177e4 5481#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5482 case CPU_DYING:
317f3941 5483 sched_ttwu_pending();
57d885fe 5484 /* Update our root-domain */
05fa785c 5485 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5486 if (rq->rd) {
c6c4927b 5487 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5488 set_rq_offline(rq);
57d885fe 5489 }
5e16bbc2 5490 migrate_tasks(rq);
48c5ccae 5491 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5492 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 5493 break;
48c5ccae 5494
5d180232 5495 case CPU_DEAD:
f319da0c 5496 calc_load_migrate(rq);
57d885fe 5497 break;
1da177e4
LT
5498#endif
5499 }
49c022e6
PZ
5500
5501 update_max_interval();
5502
1da177e4
LT
5503 return NOTIFY_OK;
5504}
5505
f38b0820
PM
5506/*
5507 * Register at high priority so that task migration (migrate_all_tasks)
5508 * happens before everything else. This has to be lower priority than
cdd6c482 5509 * the notifier in the perf_event subsystem, though.
1da177e4 5510 */
0db0628d 5511static struct notifier_block migration_notifier = {
1da177e4 5512 .notifier_call = migration_call,
50a323b7 5513 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5514};
5515
6a82b60d 5516static void set_cpu_rq_start_time(void)
a803f026
CM
5517{
5518 int cpu = smp_processor_id();
5519 struct rq *rq = cpu_rq(cpu);
5520 rq->age_stamp = sched_clock_cpu(cpu);
5521}
5522
0db0628d 5523static int sched_cpu_active(struct notifier_block *nfb,
3a101d05
TH
5524 unsigned long action, void *hcpu)
5525{
5526 switch (action & ~CPU_TASKS_FROZEN) {
a803f026
CM
5527 case CPU_STARTING:
5528 set_cpu_rq_start_time();
5529 return NOTIFY_OK;
dd9d3843
JS
5530 case CPU_ONLINE:
5531 /*
5532 * At this point a starting CPU has marked itself as online via
5533 * set_cpu_online(). But it might not yet have marked itself
5534 * as active, which is essential from here on.
5535 *
5536 * Thus, fall-through and help the starting CPU along.
5537 */
3a101d05
TH
5538 case CPU_DOWN_FAILED:
5539 set_cpu_active((long)hcpu, true);
5540 return NOTIFY_OK;
5541 default:
5542 return NOTIFY_DONE;
5543 }
5544}
5545
0db0628d 5546static int sched_cpu_inactive(struct notifier_block *nfb,
3a101d05
TH
5547 unsigned long action, void *hcpu)
5548{
5549 switch (action & ~CPU_TASKS_FROZEN) {
5550 case CPU_DOWN_PREPARE:
3c18d447 5551 set_cpu_active((long)hcpu, false);
3a101d05 5552 return NOTIFY_OK;
3c18d447
JL
5553 default:
5554 return NOTIFY_DONE;
3a101d05
TH
5555 }
5556}
5557
7babe8db 5558static int __init migration_init(void)
1da177e4
LT
5559{
5560 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5561 int err;
48f24c4d 5562
3a101d05 5563 /* Initialize migration for the boot CPU */
07dccf33
AM
5564 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5565 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5566 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5567 register_cpu_notifier(&migration_notifier);
7babe8db 5568
3a101d05
TH
5569 /* Register cpu active notifiers */
5570 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5571 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5572
a004cd42 5573 return 0;
1da177e4 5574}
7babe8db 5575early_initcall(migration_init);
476f3534 5576
4cb98839
PZ
5577static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5578
3e9830dc 5579#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5580
d039ac60 5581static __read_mostly int sched_debug_enabled;
f6630114 5582
d039ac60 5583static int __init sched_debug_setup(char *str)
f6630114 5584{
d039ac60 5585 sched_debug_enabled = 1;
f6630114
MT
5586
5587 return 0;
5588}
d039ac60
PZ
5589early_param("sched_debug", sched_debug_setup);
5590
5591static inline bool sched_debug(void)
5592{
5593 return sched_debug_enabled;
5594}
f6630114 5595
7c16ec58 5596static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5597 struct cpumask *groupmask)
1da177e4 5598{
4dcf6aff 5599 struct sched_group *group = sd->groups;
1da177e4 5600
96f874e2 5601 cpumask_clear(groupmask);
4dcf6aff
IM
5602
5603 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5604
5605 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5606 printk("does not load-balance\n");
4dcf6aff 5607 if (sd->parent)
3df0fc5b
PZ
5608 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5609 " has parent");
4dcf6aff 5610 return -1;
41c7ce9a
NP
5611 }
5612
333470ee
TH
5613 printk(KERN_CONT "span %*pbl level %s\n",
5614 cpumask_pr_args(sched_domain_span(sd)), sd->name);
4dcf6aff 5615
758b2cdc 5616 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5617 printk(KERN_ERR "ERROR: domain->span does not contain "
5618 "CPU%d\n", cpu);
4dcf6aff 5619 }
758b2cdc 5620 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5621 printk(KERN_ERR "ERROR: domain->groups does not contain"
5622 " CPU%d\n", cpu);
4dcf6aff 5623 }
1da177e4 5624
4dcf6aff 5625 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5626 do {
4dcf6aff 5627 if (!group) {
3df0fc5b
PZ
5628 printk("\n");
5629 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5630 break;
5631 }
5632
758b2cdc 5633 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5634 printk(KERN_CONT "\n");
5635 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5636 break;
5637 }
1da177e4 5638
cb83b629
PZ
5639 if (!(sd->flags & SD_OVERLAP) &&
5640 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5641 printk(KERN_CONT "\n");
5642 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5643 break;
5644 }
1da177e4 5645
758b2cdc 5646 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5647
333470ee
TH
5648 printk(KERN_CONT " %*pbl",
5649 cpumask_pr_args(sched_group_cpus(group)));
ca8ce3d0 5650 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
63b2ca30
NP
5651 printk(KERN_CONT " (cpu_capacity = %d)",
5652 group->sgc->capacity);
381512cf 5653 }
1da177e4 5654
4dcf6aff
IM
5655 group = group->next;
5656 } while (group != sd->groups);
3df0fc5b 5657 printk(KERN_CONT "\n");
1da177e4 5658
758b2cdc 5659 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5660 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5661
758b2cdc
RR
5662 if (sd->parent &&
5663 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5664 printk(KERN_ERR "ERROR: parent span is not a superset "
5665 "of domain->span\n");
4dcf6aff
IM
5666 return 0;
5667}
1da177e4 5668
4dcf6aff
IM
5669static void sched_domain_debug(struct sched_domain *sd, int cpu)
5670{
5671 int level = 0;
1da177e4 5672
d039ac60 5673 if (!sched_debug_enabled)
f6630114
MT
5674 return;
5675
4dcf6aff
IM
5676 if (!sd) {
5677 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5678 return;
5679 }
1da177e4 5680
4dcf6aff
IM
5681 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5682
5683 for (;;) {
4cb98839 5684 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5685 break;
1da177e4
LT
5686 level++;
5687 sd = sd->parent;
33859f7f 5688 if (!sd)
4dcf6aff
IM
5689 break;
5690 }
1da177e4 5691}
6d6bc0ad 5692#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5693# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5694static inline bool sched_debug(void)
5695{
5696 return false;
5697}
6d6bc0ad 5698#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5699
1a20ff27 5700static int sd_degenerate(struct sched_domain *sd)
245af2c7 5701{
758b2cdc 5702 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5703 return 1;
5704
5705 /* Following flags need at least 2 groups */
5706 if (sd->flags & (SD_LOAD_BALANCE |
5707 SD_BALANCE_NEWIDLE |
5708 SD_BALANCE_FORK |
89c4710e 5709 SD_BALANCE_EXEC |
5d4dfddd 5710 SD_SHARE_CPUCAPACITY |
d77b3ed5
VG
5711 SD_SHARE_PKG_RESOURCES |
5712 SD_SHARE_POWERDOMAIN)) {
245af2c7
SS
5713 if (sd->groups != sd->groups->next)
5714 return 0;
5715 }
5716
5717 /* Following flags don't use groups */
c88d5910 5718 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5719 return 0;
5720
5721 return 1;
5722}
5723
48f24c4d
IM
5724static int
5725sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5726{
5727 unsigned long cflags = sd->flags, pflags = parent->flags;
5728
5729 if (sd_degenerate(parent))
5730 return 1;
5731
758b2cdc 5732 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5733 return 0;
5734
245af2c7
SS
5735 /* Flags needing groups don't count if only 1 group in parent */
5736 if (parent->groups == parent->groups->next) {
5737 pflags &= ~(SD_LOAD_BALANCE |
5738 SD_BALANCE_NEWIDLE |
5739 SD_BALANCE_FORK |
89c4710e 5740 SD_BALANCE_EXEC |
5d4dfddd 5741 SD_SHARE_CPUCAPACITY |
10866e62 5742 SD_SHARE_PKG_RESOURCES |
d77b3ed5
VG
5743 SD_PREFER_SIBLING |
5744 SD_SHARE_POWERDOMAIN);
5436499e
KC
5745 if (nr_node_ids == 1)
5746 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5747 }
5748 if (~cflags & pflags)
5749 return 0;
5750
5751 return 1;
5752}
5753
dce840a0 5754static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5755{
dce840a0 5756 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5757
68e74568 5758 cpupri_cleanup(&rd->cpupri);
6bfd6d72 5759 cpudl_cleanup(&rd->cpudl);
1baca4ce 5760 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5761 free_cpumask_var(rd->rto_mask);
5762 free_cpumask_var(rd->online);
5763 free_cpumask_var(rd->span);
5764 kfree(rd);
5765}
5766
57d885fe
GH
5767static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5768{
a0490fa3 5769 struct root_domain *old_rd = NULL;
57d885fe 5770 unsigned long flags;
57d885fe 5771
05fa785c 5772 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5773
5774 if (rq->rd) {
a0490fa3 5775 old_rd = rq->rd;
57d885fe 5776
c6c4927b 5777 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5778 set_rq_offline(rq);
57d885fe 5779
c6c4927b 5780 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5781
a0490fa3 5782 /*
0515973f 5783 * If we dont want to free the old_rd yet then
a0490fa3
IM
5784 * set old_rd to NULL to skip the freeing later
5785 * in this function:
5786 */
5787 if (!atomic_dec_and_test(&old_rd->refcount))
5788 old_rd = NULL;
57d885fe
GH
5789 }
5790
5791 atomic_inc(&rd->refcount);
5792 rq->rd = rd;
5793
c6c4927b 5794 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5795 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5796 set_rq_online(rq);
57d885fe 5797
05fa785c 5798 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5799
5800 if (old_rd)
dce840a0 5801 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5802}
5803
68c38fc3 5804static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5805{
5806 memset(rd, 0, sizeof(*rd));
5807
68c38fc3 5808 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5809 goto out;
68c38fc3 5810 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5811 goto free_span;
1baca4ce 5812 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
c6c4927b 5813 goto free_online;
1baca4ce
JL
5814 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5815 goto free_dlo_mask;
6e0534f2 5816
332ac17e 5817 init_dl_bw(&rd->dl_bw);
6bfd6d72
JL
5818 if (cpudl_init(&rd->cpudl) != 0)
5819 goto free_dlo_mask;
332ac17e 5820
68c38fc3 5821 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5822 goto free_rto_mask;
c6c4927b 5823 return 0;
6e0534f2 5824
68e74568
RR
5825free_rto_mask:
5826 free_cpumask_var(rd->rto_mask);
1baca4ce
JL
5827free_dlo_mask:
5828 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5829free_online:
5830 free_cpumask_var(rd->online);
5831free_span:
5832 free_cpumask_var(rd->span);
0c910d28 5833out:
c6c4927b 5834 return -ENOMEM;
57d885fe
GH
5835}
5836
029632fb
PZ
5837/*
5838 * By default the system creates a single root-domain with all cpus as
5839 * members (mimicking the global state we have today).
5840 */
5841struct root_domain def_root_domain;
5842
57d885fe
GH
5843static void init_defrootdomain(void)
5844{
68c38fc3 5845 init_rootdomain(&def_root_domain);
c6c4927b 5846
57d885fe
GH
5847 atomic_set(&def_root_domain.refcount, 1);
5848}
5849
dc938520 5850static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5851{
5852 struct root_domain *rd;
5853
5854 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5855 if (!rd)
5856 return NULL;
5857
68c38fc3 5858 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5859 kfree(rd);
5860 return NULL;
5861 }
57d885fe
GH
5862
5863 return rd;
5864}
5865
63b2ca30 5866static void free_sched_groups(struct sched_group *sg, int free_sgc)
e3589f6c
PZ
5867{
5868 struct sched_group *tmp, *first;
5869
5870 if (!sg)
5871 return;
5872
5873 first = sg;
5874 do {
5875 tmp = sg->next;
5876
63b2ca30
NP
5877 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5878 kfree(sg->sgc);
e3589f6c
PZ
5879
5880 kfree(sg);
5881 sg = tmp;
5882 } while (sg != first);
5883}
5884
dce840a0
PZ
5885static void free_sched_domain(struct rcu_head *rcu)
5886{
5887 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5888
5889 /*
5890 * If its an overlapping domain it has private groups, iterate and
5891 * nuke them all.
5892 */
5893 if (sd->flags & SD_OVERLAP) {
5894 free_sched_groups(sd->groups, 1);
5895 } else if (atomic_dec_and_test(&sd->groups->ref)) {
63b2ca30 5896 kfree(sd->groups->sgc);
dce840a0 5897 kfree(sd->groups);
9c3f75cb 5898 }
dce840a0
PZ
5899 kfree(sd);
5900}
5901
5902static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5903{
5904 call_rcu(&sd->rcu, free_sched_domain);
5905}
5906
5907static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5908{
5909 for (; sd; sd = sd->parent)
5910 destroy_sched_domain(sd, cpu);
5911}
5912
518cd623
PZ
5913/*
5914 * Keep a special pointer to the highest sched_domain that has
5915 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5916 * allows us to avoid some pointer chasing select_idle_sibling().
5917 *
5918 * Also keep a unique ID per domain (we use the first cpu number in
5919 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5920 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5921 */
5922DEFINE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 5923DEFINE_PER_CPU(int, sd_llc_size);
518cd623 5924DEFINE_PER_CPU(int, sd_llc_id);
fb13c7ee 5925DEFINE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50
PM
5926DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5927DEFINE_PER_CPU(struct sched_domain *, sd_asym);
518cd623
PZ
5928
5929static void update_top_cache_domain(int cpu)
5930{
5931 struct sched_domain *sd;
5d4cf996 5932 struct sched_domain *busy_sd = NULL;
518cd623 5933 int id = cpu;
7d9ffa89 5934 int size = 1;
518cd623
PZ
5935
5936 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
7d9ffa89 5937 if (sd) {
518cd623 5938 id = cpumask_first(sched_domain_span(sd));
7d9ffa89 5939 size = cpumask_weight(sched_domain_span(sd));
5d4cf996 5940 busy_sd = sd->parent; /* sd_busy */
7d9ffa89 5941 }
5d4cf996 5942 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
518cd623
PZ
5943
5944 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
7d9ffa89 5945 per_cpu(sd_llc_size, cpu) = size;
518cd623 5946 per_cpu(sd_llc_id, cpu) = id;
fb13c7ee
MG
5947
5948 sd = lowest_flag_domain(cpu, SD_NUMA);
5949 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
37dc6b50
PM
5950
5951 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5952 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
518cd623
PZ
5953}
5954
1da177e4 5955/*
0eab9146 5956 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5957 * hold the hotplug lock.
5958 */
0eab9146
IM
5959static void
5960cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5961{
70b97a7f 5962 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5963 struct sched_domain *tmp;
5964
5965 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5966 for (tmp = sd; tmp; ) {
245af2c7
SS
5967 struct sched_domain *parent = tmp->parent;
5968 if (!parent)
5969 break;
f29c9b1c 5970
1a848870 5971 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5972 tmp->parent = parent->parent;
1a848870
SS
5973 if (parent->parent)
5974 parent->parent->child = tmp;
10866e62
PZ
5975 /*
5976 * Transfer SD_PREFER_SIBLING down in case of a
5977 * degenerate parent; the spans match for this
5978 * so the property transfers.
5979 */
5980 if (parent->flags & SD_PREFER_SIBLING)
5981 tmp->flags |= SD_PREFER_SIBLING;
dce840a0 5982 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5983 } else
5984 tmp = tmp->parent;
245af2c7
SS
5985 }
5986
1a848870 5987 if (sd && sd_degenerate(sd)) {
dce840a0 5988 tmp = sd;
245af2c7 5989 sd = sd->parent;
dce840a0 5990 destroy_sched_domain(tmp, cpu);
1a848870
SS
5991 if (sd)
5992 sd->child = NULL;
5993 }
1da177e4 5994
4cb98839 5995 sched_domain_debug(sd, cpu);
1da177e4 5996
57d885fe 5997 rq_attach_root(rq, rd);
dce840a0 5998 tmp = rq->sd;
674311d5 5999 rcu_assign_pointer(rq->sd, sd);
dce840a0 6000 destroy_sched_domains(tmp, cpu);
518cd623
PZ
6001
6002 update_top_cache_domain(cpu);
1da177e4
LT
6003}
6004
1da177e4
LT
6005/* Setup the mask of cpus configured for isolated domains */
6006static int __init isolated_cpu_setup(char *str)
6007{
bdddd296 6008 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6009 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6010 return 1;
6011}
6012
8927f494 6013__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6014
49a02c51 6015struct s_data {
21d42ccf 6016 struct sched_domain ** __percpu sd;
49a02c51
AH
6017 struct root_domain *rd;
6018};
6019
2109b99e 6020enum s_alloc {
2109b99e 6021 sa_rootdomain,
21d42ccf 6022 sa_sd,
dce840a0 6023 sa_sd_storage,
2109b99e
AH
6024 sa_none,
6025};
6026
c1174876
PZ
6027/*
6028 * Build an iteration mask that can exclude certain CPUs from the upwards
6029 * domain traversal.
6030 *
6031 * Asymmetric node setups can result in situations where the domain tree is of
6032 * unequal depth, make sure to skip domains that already cover the entire
6033 * range.
6034 *
6035 * In that case build_sched_domains() will have terminated the iteration early
6036 * and our sibling sd spans will be empty. Domains should always include the
6037 * cpu they're built on, so check that.
6038 *
6039 */
6040static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6041{
6042 const struct cpumask *span = sched_domain_span(sd);
6043 struct sd_data *sdd = sd->private;
6044 struct sched_domain *sibling;
6045 int i;
6046
6047 for_each_cpu(i, span) {
6048 sibling = *per_cpu_ptr(sdd->sd, i);
6049 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6050 continue;
6051
6052 cpumask_set_cpu(i, sched_group_mask(sg));
6053 }
6054}
6055
6056/*
6057 * Return the canonical balance cpu for this group, this is the first cpu
6058 * of this group that's also in the iteration mask.
6059 */
6060int group_balance_cpu(struct sched_group *sg)
6061{
6062 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6063}
6064
e3589f6c
PZ
6065static int
6066build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6067{
6068 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6069 const struct cpumask *span = sched_domain_span(sd);
6070 struct cpumask *covered = sched_domains_tmpmask;
6071 struct sd_data *sdd = sd->private;
aaecac4a 6072 struct sched_domain *sibling;
e3589f6c
PZ
6073 int i;
6074
6075 cpumask_clear(covered);
6076
6077 for_each_cpu(i, span) {
6078 struct cpumask *sg_span;
6079
6080 if (cpumask_test_cpu(i, covered))
6081 continue;
6082
aaecac4a 6083 sibling = *per_cpu_ptr(sdd->sd, i);
c1174876
PZ
6084
6085 /* See the comment near build_group_mask(). */
aaecac4a 6086 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
c1174876
PZ
6087 continue;
6088
e3589f6c 6089 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 6090 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
6091
6092 if (!sg)
6093 goto fail;
6094
6095 sg_span = sched_group_cpus(sg);
aaecac4a
ZZ
6096 if (sibling->child)
6097 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6098 else
e3589f6c
PZ
6099 cpumask_set_cpu(i, sg_span);
6100
6101 cpumask_or(covered, covered, sg_span);
6102
63b2ca30
NP
6103 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6104 if (atomic_inc_return(&sg->sgc->ref) == 1)
c1174876
PZ
6105 build_group_mask(sd, sg);
6106
c3decf0d 6107 /*
63b2ca30 6108 * Initialize sgc->capacity such that even if we mess up the
c3decf0d
PZ
6109 * domains and no possible iteration will get us here, we won't
6110 * die on a /0 trap.
6111 */
ca8ce3d0 6112 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
e3589f6c 6113
c1174876
PZ
6114 /*
6115 * Make sure the first group of this domain contains the
6116 * canonical balance cpu. Otherwise the sched_domain iteration
6117 * breaks. See update_sg_lb_stats().
6118 */
74a5ce20 6119 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 6120 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
6121 groups = sg;
6122
6123 if (!first)
6124 first = sg;
6125 if (last)
6126 last->next = sg;
6127 last = sg;
6128 last->next = first;
6129 }
6130 sd->groups = groups;
6131
6132 return 0;
6133
6134fail:
6135 free_sched_groups(first, 0);
6136
6137 return -ENOMEM;
6138}
6139
dce840a0 6140static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6141{
dce840a0
PZ
6142 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6143 struct sched_domain *child = sd->child;
1da177e4 6144
dce840a0
PZ
6145 if (child)
6146 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6147
9c3f75cb 6148 if (sg) {
dce840a0 6149 *sg = *per_cpu_ptr(sdd->sg, cpu);
63b2ca30
NP
6150 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6151 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
9c3f75cb 6152 }
dce840a0
PZ
6153
6154 return cpu;
1e9f28fa 6155}
1e9f28fa 6156
01a08546 6157/*
dce840a0
PZ
6158 * build_sched_groups will build a circular linked list of the groups
6159 * covered by the given span, and will set each group's ->cpumask correctly,
ced549fa 6160 * and ->cpu_capacity to 0.
e3589f6c
PZ
6161 *
6162 * Assumes the sched_domain tree is fully constructed
01a08546 6163 */
e3589f6c
PZ
6164static int
6165build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 6166{
dce840a0
PZ
6167 struct sched_group *first = NULL, *last = NULL;
6168 struct sd_data *sdd = sd->private;
6169 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6170 struct cpumask *covered;
dce840a0 6171 int i;
9c1cfda2 6172
e3589f6c
PZ
6173 get_group(cpu, sdd, &sd->groups);
6174 atomic_inc(&sd->groups->ref);
6175
0936629f 6176 if (cpu != cpumask_first(span))
e3589f6c
PZ
6177 return 0;
6178
f96225fd
PZ
6179 lockdep_assert_held(&sched_domains_mutex);
6180 covered = sched_domains_tmpmask;
6181
dce840a0 6182 cpumask_clear(covered);
6711cab4 6183
dce840a0
PZ
6184 for_each_cpu(i, span) {
6185 struct sched_group *sg;
cd08e923 6186 int group, j;
6711cab4 6187
dce840a0
PZ
6188 if (cpumask_test_cpu(i, covered))
6189 continue;
6711cab4 6190
cd08e923 6191 group = get_group(i, sdd, &sg);
c1174876 6192 cpumask_setall(sched_group_mask(sg));
0601a88d 6193
dce840a0
PZ
6194 for_each_cpu(j, span) {
6195 if (get_group(j, sdd, NULL) != group)
6196 continue;
0601a88d 6197
dce840a0
PZ
6198 cpumask_set_cpu(j, covered);
6199 cpumask_set_cpu(j, sched_group_cpus(sg));
6200 }
0601a88d 6201
dce840a0
PZ
6202 if (!first)
6203 first = sg;
6204 if (last)
6205 last->next = sg;
6206 last = sg;
6207 }
6208 last->next = first;
e3589f6c
PZ
6209
6210 return 0;
0601a88d 6211}
51888ca2 6212
89c4710e 6213/*
63b2ca30 6214 * Initialize sched groups cpu_capacity.
89c4710e 6215 *
63b2ca30 6216 * cpu_capacity indicates the capacity of sched group, which is used while
89c4710e 6217 * distributing the load between different sched groups in a sched domain.
63b2ca30
NP
6218 * Typically cpu_capacity for all the groups in a sched domain will be same
6219 * unless there are asymmetries in the topology. If there are asymmetries,
6220 * group having more cpu_capacity will pickup more load compared to the
6221 * group having less cpu_capacity.
89c4710e 6222 */
63b2ca30 6223static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
89c4710e 6224{
e3589f6c 6225 struct sched_group *sg = sd->groups;
89c4710e 6226
94c95ba6 6227 WARN_ON(!sg);
e3589f6c
PZ
6228
6229 do {
6230 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6231 sg = sg->next;
6232 } while (sg != sd->groups);
89c4710e 6233
c1174876 6234 if (cpu != group_balance_cpu(sg))
e3589f6c 6235 return;
aae6d3dd 6236
63b2ca30
NP
6237 update_group_capacity(sd, cpu);
6238 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6239}
6240
7c16ec58
MT
6241/*
6242 * Initializers for schedule domains
6243 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6244 */
6245
1d3504fc 6246static int default_relax_domain_level = -1;
60495e77 6247int sched_domain_level_max;
1d3504fc
HS
6248
6249static int __init setup_relax_domain_level(char *str)
6250{
a841f8ce
DS
6251 if (kstrtoint(str, 0, &default_relax_domain_level))
6252 pr_warn("Unable to set relax_domain_level\n");
30e0e178 6253
1d3504fc
HS
6254 return 1;
6255}
6256__setup("relax_domain_level=", setup_relax_domain_level);
6257
6258static void set_domain_attribute(struct sched_domain *sd,
6259 struct sched_domain_attr *attr)
6260{
6261 int request;
6262
6263 if (!attr || attr->relax_domain_level < 0) {
6264 if (default_relax_domain_level < 0)
6265 return;
6266 else
6267 request = default_relax_domain_level;
6268 } else
6269 request = attr->relax_domain_level;
6270 if (request < sd->level) {
6271 /* turn off idle balance on this domain */
c88d5910 6272 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6273 } else {
6274 /* turn on idle balance on this domain */
c88d5910 6275 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6276 }
6277}
6278
54ab4ff4
PZ
6279static void __sdt_free(const struct cpumask *cpu_map);
6280static int __sdt_alloc(const struct cpumask *cpu_map);
6281
2109b99e
AH
6282static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6283 const struct cpumask *cpu_map)
6284{
6285 switch (what) {
2109b99e 6286 case sa_rootdomain:
822ff793
PZ
6287 if (!atomic_read(&d->rd->refcount))
6288 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6289 case sa_sd:
6290 free_percpu(d->sd); /* fall through */
dce840a0 6291 case sa_sd_storage:
54ab4ff4 6292 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6293 case sa_none:
6294 break;
6295 }
6296}
3404c8d9 6297
2109b99e
AH
6298static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6299 const struct cpumask *cpu_map)
6300{
dce840a0
PZ
6301 memset(d, 0, sizeof(*d));
6302
54ab4ff4
PZ
6303 if (__sdt_alloc(cpu_map))
6304 return sa_sd_storage;
dce840a0
PZ
6305 d->sd = alloc_percpu(struct sched_domain *);
6306 if (!d->sd)
6307 return sa_sd_storage;
2109b99e 6308 d->rd = alloc_rootdomain();
dce840a0 6309 if (!d->rd)
21d42ccf 6310 return sa_sd;
2109b99e
AH
6311 return sa_rootdomain;
6312}
57d885fe 6313
dce840a0
PZ
6314/*
6315 * NULL the sd_data elements we've used to build the sched_domain and
6316 * sched_group structure so that the subsequent __free_domain_allocs()
6317 * will not free the data we're using.
6318 */
6319static void claim_allocations(int cpu, struct sched_domain *sd)
6320{
6321 struct sd_data *sdd = sd->private;
dce840a0
PZ
6322
6323 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6324 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6325
e3589f6c 6326 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6327 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c 6328
63b2ca30
NP
6329 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6330 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
dce840a0
PZ
6331}
6332
cb83b629 6333#ifdef CONFIG_NUMA
cb83b629 6334static int sched_domains_numa_levels;
e3fe70b1 6335enum numa_topology_type sched_numa_topology_type;
cb83b629 6336static int *sched_domains_numa_distance;
9942f79b 6337int sched_max_numa_distance;
cb83b629
PZ
6338static struct cpumask ***sched_domains_numa_masks;
6339static int sched_domains_curr_level;
143e1e28 6340#endif
cb83b629 6341
143e1e28
VG
6342/*
6343 * SD_flags allowed in topology descriptions.
6344 *
5d4dfddd 6345 * SD_SHARE_CPUCAPACITY - describes SMT topologies
143e1e28
VG
6346 * SD_SHARE_PKG_RESOURCES - describes shared caches
6347 * SD_NUMA - describes NUMA topologies
d77b3ed5 6348 * SD_SHARE_POWERDOMAIN - describes shared power domain
143e1e28
VG
6349 *
6350 * Odd one out:
6351 * SD_ASYM_PACKING - describes SMT quirks
6352 */
6353#define TOPOLOGY_SD_FLAGS \
5d4dfddd 6354 (SD_SHARE_CPUCAPACITY | \
143e1e28
VG
6355 SD_SHARE_PKG_RESOURCES | \
6356 SD_NUMA | \
d77b3ed5
VG
6357 SD_ASYM_PACKING | \
6358 SD_SHARE_POWERDOMAIN)
cb83b629
PZ
6359
6360static struct sched_domain *
143e1e28 6361sd_init(struct sched_domain_topology_level *tl, int cpu)
cb83b629
PZ
6362{
6363 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
143e1e28
VG
6364 int sd_weight, sd_flags = 0;
6365
6366#ifdef CONFIG_NUMA
6367 /*
6368 * Ugly hack to pass state to sd_numa_mask()...
6369 */
6370 sched_domains_curr_level = tl->numa_level;
6371#endif
6372
6373 sd_weight = cpumask_weight(tl->mask(cpu));
6374
6375 if (tl->sd_flags)
6376 sd_flags = (*tl->sd_flags)();
6377 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6378 "wrong sd_flags in topology description\n"))
6379 sd_flags &= ~TOPOLOGY_SD_FLAGS;
cb83b629
PZ
6380
6381 *sd = (struct sched_domain){
6382 .min_interval = sd_weight,
6383 .max_interval = 2*sd_weight,
6384 .busy_factor = 32,
870a0bb5 6385 .imbalance_pct = 125,
143e1e28
VG
6386
6387 .cache_nice_tries = 0,
6388 .busy_idx = 0,
6389 .idle_idx = 0,
cb83b629
PZ
6390 .newidle_idx = 0,
6391 .wake_idx = 0,
6392 .forkexec_idx = 0,
6393
6394 .flags = 1*SD_LOAD_BALANCE
6395 | 1*SD_BALANCE_NEWIDLE
143e1e28
VG
6396 | 1*SD_BALANCE_EXEC
6397 | 1*SD_BALANCE_FORK
cb83b629 6398 | 0*SD_BALANCE_WAKE
143e1e28 6399 | 1*SD_WAKE_AFFINE
5d4dfddd 6400 | 0*SD_SHARE_CPUCAPACITY
cb83b629 6401 | 0*SD_SHARE_PKG_RESOURCES
143e1e28 6402 | 0*SD_SERIALIZE
cb83b629 6403 | 0*SD_PREFER_SIBLING
143e1e28
VG
6404 | 0*SD_NUMA
6405 | sd_flags
cb83b629 6406 ,
143e1e28 6407
cb83b629
PZ
6408 .last_balance = jiffies,
6409 .balance_interval = sd_weight,
143e1e28 6410 .smt_gain = 0,
2b4cfe64
JL
6411 .max_newidle_lb_cost = 0,
6412 .next_decay_max_lb_cost = jiffies,
143e1e28
VG
6413#ifdef CONFIG_SCHED_DEBUG
6414 .name = tl->name,
6415#endif
cb83b629 6416 };
cb83b629
PZ
6417
6418 /*
143e1e28 6419 * Convert topological properties into behaviour.
cb83b629 6420 */
143e1e28 6421
5d4dfddd 6422 if (sd->flags & SD_SHARE_CPUCAPACITY) {
caff37ef 6423 sd->flags |= SD_PREFER_SIBLING;
143e1e28
VG
6424 sd->imbalance_pct = 110;
6425 sd->smt_gain = 1178; /* ~15% */
143e1e28
VG
6426
6427 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6428 sd->imbalance_pct = 117;
6429 sd->cache_nice_tries = 1;
6430 sd->busy_idx = 2;
6431
6432#ifdef CONFIG_NUMA
6433 } else if (sd->flags & SD_NUMA) {
6434 sd->cache_nice_tries = 2;
6435 sd->busy_idx = 3;
6436 sd->idle_idx = 2;
6437
6438 sd->flags |= SD_SERIALIZE;
6439 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6440 sd->flags &= ~(SD_BALANCE_EXEC |
6441 SD_BALANCE_FORK |
6442 SD_WAKE_AFFINE);
6443 }
6444
6445#endif
6446 } else {
6447 sd->flags |= SD_PREFER_SIBLING;
6448 sd->cache_nice_tries = 1;
6449 sd->busy_idx = 2;
6450 sd->idle_idx = 1;
6451 }
6452
6453 sd->private = &tl->data;
cb83b629
PZ
6454
6455 return sd;
6456}
6457
143e1e28
VG
6458/*
6459 * Topology list, bottom-up.
6460 */
6461static struct sched_domain_topology_level default_topology[] = {
6462#ifdef CONFIG_SCHED_SMT
6463 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6464#endif
6465#ifdef CONFIG_SCHED_MC
6466 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
143e1e28
VG
6467#endif
6468 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6469 { NULL, },
6470};
6471
6472struct sched_domain_topology_level *sched_domain_topology = default_topology;
6473
6474#define for_each_sd_topology(tl) \
6475 for (tl = sched_domain_topology; tl->mask; tl++)
6476
6477void set_sched_topology(struct sched_domain_topology_level *tl)
6478{
6479 sched_domain_topology = tl;
6480}
6481
6482#ifdef CONFIG_NUMA
6483
cb83b629
PZ
6484static const struct cpumask *sd_numa_mask(int cpu)
6485{
6486 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6487}
6488
d039ac60
PZ
6489static void sched_numa_warn(const char *str)
6490{
6491 static int done = false;
6492 int i,j;
6493
6494 if (done)
6495 return;
6496
6497 done = true;
6498
6499 printk(KERN_WARNING "ERROR: %s\n\n", str);
6500
6501 for (i = 0; i < nr_node_ids; i++) {
6502 printk(KERN_WARNING " ");
6503 for (j = 0; j < nr_node_ids; j++)
6504 printk(KERN_CONT "%02d ", node_distance(i,j));
6505 printk(KERN_CONT "\n");
6506 }
6507 printk(KERN_WARNING "\n");
6508}
6509
9942f79b 6510bool find_numa_distance(int distance)
d039ac60
PZ
6511{
6512 int i;
6513
6514 if (distance == node_distance(0, 0))
6515 return true;
6516
6517 for (i = 0; i < sched_domains_numa_levels; i++) {
6518 if (sched_domains_numa_distance[i] == distance)
6519 return true;
6520 }
6521
6522 return false;
6523}
6524
e3fe70b1
RR
6525/*
6526 * A system can have three types of NUMA topology:
6527 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6528 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6529 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6530 *
6531 * The difference between a glueless mesh topology and a backplane
6532 * topology lies in whether communication between not directly
6533 * connected nodes goes through intermediary nodes (where programs
6534 * could run), or through backplane controllers. This affects
6535 * placement of programs.
6536 *
6537 * The type of topology can be discerned with the following tests:
6538 * - If the maximum distance between any nodes is 1 hop, the system
6539 * is directly connected.
6540 * - If for two nodes A and B, located N > 1 hops away from each other,
6541 * there is an intermediary node C, which is < N hops away from both
6542 * nodes A and B, the system is a glueless mesh.
6543 */
6544static void init_numa_topology_type(void)
6545{
6546 int a, b, c, n;
6547
6548 n = sched_max_numa_distance;
6549
e237882b 6550 if (sched_domains_numa_levels <= 1) {
e3fe70b1 6551 sched_numa_topology_type = NUMA_DIRECT;
e237882b
AG
6552 return;
6553 }
e3fe70b1
RR
6554
6555 for_each_online_node(a) {
6556 for_each_online_node(b) {
6557 /* Find two nodes furthest removed from each other. */
6558 if (node_distance(a, b) < n)
6559 continue;
6560
6561 /* Is there an intermediary node between a and b? */
6562 for_each_online_node(c) {
6563 if (node_distance(a, c) < n &&
6564 node_distance(b, c) < n) {
6565 sched_numa_topology_type =
6566 NUMA_GLUELESS_MESH;
6567 return;
6568 }
6569 }
6570
6571 sched_numa_topology_type = NUMA_BACKPLANE;
6572 return;
6573 }
6574 }
6575}
6576
cb83b629
PZ
6577static void sched_init_numa(void)
6578{
6579 int next_distance, curr_distance = node_distance(0, 0);
6580 struct sched_domain_topology_level *tl;
6581 int level = 0;
6582 int i, j, k;
6583
cb83b629
PZ
6584 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6585 if (!sched_domains_numa_distance)
6586 return;
6587
6588 /*
6589 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6590 * unique distances in the node_distance() table.
6591 *
6592 * Assumes node_distance(0,j) includes all distances in
6593 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6594 */
6595 next_distance = curr_distance;
6596 for (i = 0; i < nr_node_ids; i++) {
6597 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6598 for (k = 0; k < nr_node_ids; k++) {
6599 int distance = node_distance(i, k);
6600
6601 if (distance > curr_distance &&
6602 (distance < next_distance ||
6603 next_distance == curr_distance))
6604 next_distance = distance;
6605
6606 /*
6607 * While not a strong assumption it would be nice to know
6608 * about cases where if node A is connected to B, B is not
6609 * equally connected to A.
6610 */
6611 if (sched_debug() && node_distance(k, i) != distance)
6612 sched_numa_warn("Node-distance not symmetric");
6613
6614 if (sched_debug() && i && !find_numa_distance(distance))
6615 sched_numa_warn("Node-0 not representative");
6616 }
6617 if (next_distance != curr_distance) {
6618 sched_domains_numa_distance[level++] = next_distance;
6619 sched_domains_numa_levels = level;
6620 curr_distance = next_distance;
6621 } else break;
cb83b629 6622 }
d039ac60
PZ
6623
6624 /*
6625 * In case of sched_debug() we verify the above assumption.
6626 */
6627 if (!sched_debug())
6628 break;
cb83b629 6629 }
c123588b
AR
6630
6631 if (!level)
6632 return;
6633
cb83b629
PZ
6634 /*
6635 * 'level' contains the number of unique distances, excluding the
6636 * identity distance node_distance(i,i).
6637 *
28b4a521 6638 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
6639 * numbers.
6640 */
6641
5f7865f3
TC
6642 /*
6643 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6644 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6645 * the array will contain less then 'level' members. This could be
6646 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6647 * in other functions.
6648 *
6649 * We reset it to 'level' at the end of this function.
6650 */
6651 sched_domains_numa_levels = 0;
6652
cb83b629
PZ
6653 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6654 if (!sched_domains_numa_masks)
6655 return;
6656
6657 /*
6658 * Now for each level, construct a mask per node which contains all
6659 * cpus of nodes that are that many hops away from us.
6660 */
6661 for (i = 0; i < level; i++) {
6662 sched_domains_numa_masks[i] =
6663 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6664 if (!sched_domains_numa_masks[i])
6665 return;
6666
6667 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6668 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6669 if (!mask)
6670 return;
6671
6672 sched_domains_numa_masks[i][j] = mask;
6673
6674 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 6675 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6676 continue;
6677
6678 cpumask_or(mask, mask, cpumask_of_node(k));
6679 }
6680 }
6681 }
6682
143e1e28
VG
6683 /* Compute default topology size */
6684 for (i = 0; sched_domain_topology[i].mask; i++);
6685
c515db8c 6686 tl = kzalloc((i + level + 1) *
cb83b629
PZ
6687 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6688 if (!tl)
6689 return;
6690
6691 /*
6692 * Copy the default topology bits..
6693 */
143e1e28
VG
6694 for (i = 0; sched_domain_topology[i].mask; i++)
6695 tl[i] = sched_domain_topology[i];
cb83b629
PZ
6696
6697 /*
6698 * .. and append 'j' levels of NUMA goodness.
6699 */
6700 for (j = 0; j < level; i++, j++) {
6701 tl[i] = (struct sched_domain_topology_level){
cb83b629 6702 .mask = sd_numa_mask,
143e1e28 6703 .sd_flags = cpu_numa_flags,
cb83b629
PZ
6704 .flags = SDTL_OVERLAP,
6705 .numa_level = j,
143e1e28 6706 SD_INIT_NAME(NUMA)
cb83b629
PZ
6707 };
6708 }
6709
6710 sched_domain_topology = tl;
5f7865f3
TC
6711
6712 sched_domains_numa_levels = level;
9942f79b 6713 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
e3fe70b1
RR
6714
6715 init_numa_topology_type();
cb83b629 6716}
301a5cba
TC
6717
6718static void sched_domains_numa_masks_set(int cpu)
6719{
6720 int i, j;
6721 int node = cpu_to_node(cpu);
6722
6723 for (i = 0; i < sched_domains_numa_levels; i++) {
6724 for (j = 0; j < nr_node_ids; j++) {
6725 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6726 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6727 }
6728 }
6729}
6730
6731static void sched_domains_numa_masks_clear(int cpu)
6732{
6733 int i, j;
6734 for (i = 0; i < sched_domains_numa_levels; i++) {
6735 for (j = 0; j < nr_node_ids; j++)
6736 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6737 }
6738}
6739
6740/*
6741 * Update sched_domains_numa_masks[level][node] array when new cpus
6742 * are onlined.
6743 */
6744static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6745 unsigned long action,
6746 void *hcpu)
6747{
6748 int cpu = (long)hcpu;
6749
6750 switch (action & ~CPU_TASKS_FROZEN) {
6751 case CPU_ONLINE:
6752 sched_domains_numa_masks_set(cpu);
6753 break;
6754
6755 case CPU_DEAD:
6756 sched_domains_numa_masks_clear(cpu);
6757 break;
6758
6759 default:
6760 return NOTIFY_DONE;
6761 }
6762
6763 return NOTIFY_OK;
cb83b629
PZ
6764}
6765#else
6766static inline void sched_init_numa(void)
6767{
6768}
301a5cba
TC
6769
6770static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6771 unsigned long action,
6772 void *hcpu)
6773{
6774 return 0;
6775}
cb83b629
PZ
6776#endif /* CONFIG_NUMA */
6777
54ab4ff4
PZ
6778static int __sdt_alloc(const struct cpumask *cpu_map)
6779{
6780 struct sched_domain_topology_level *tl;
6781 int j;
6782
27723a68 6783 for_each_sd_topology(tl) {
54ab4ff4
PZ
6784 struct sd_data *sdd = &tl->data;
6785
6786 sdd->sd = alloc_percpu(struct sched_domain *);
6787 if (!sdd->sd)
6788 return -ENOMEM;
6789
6790 sdd->sg = alloc_percpu(struct sched_group *);
6791 if (!sdd->sg)
6792 return -ENOMEM;
6793
63b2ca30
NP
6794 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6795 if (!sdd->sgc)
9c3f75cb
PZ
6796 return -ENOMEM;
6797
54ab4ff4
PZ
6798 for_each_cpu(j, cpu_map) {
6799 struct sched_domain *sd;
6800 struct sched_group *sg;
63b2ca30 6801 struct sched_group_capacity *sgc;
54ab4ff4 6802
5cc389bc 6803 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
54ab4ff4
PZ
6804 GFP_KERNEL, cpu_to_node(j));
6805 if (!sd)
6806 return -ENOMEM;
6807
6808 *per_cpu_ptr(sdd->sd, j) = sd;
6809
6810 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6811 GFP_KERNEL, cpu_to_node(j));
6812 if (!sg)
6813 return -ENOMEM;
6814
30b4e9eb
IM
6815 sg->next = sg;
6816
54ab4ff4 6817 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6818
63b2ca30 6819 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
9c3f75cb 6820 GFP_KERNEL, cpu_to_node(j));
63b2ca30 6821 if (!sgc)
9c3f75cb
PZ
6822 return -ENOMEM;
6823
63b2ca30 6824 *per_cpu_ptr(sdd->sgc, j) = sgc;
54ab4ff4
PZ
6825 }
6826 }
6827
6828 return 0;
6829}
6830
6831static void __sdt_free(const struct cpumask *cpu_map)
6832{
6833 struct sched_domain_topology_level *tl;
6834 int j;
6835
27723a68 6836 for_each_sd_topology(tl) {
54ab4ff4
PZ
6837 struct sd_data *sdd = &tl->data;
6838
6839 for_each_cpu(j, cpu_map) {
fb2cf2c6 6840 struct sched_domain *sd;
6841
6842 if (sdd->sd) {
6843 sd = *per_cpu_ptr(sdd->sd, j);
6844 if (sd && (sd->flags & SD_OVERLAP))
6845 free_sched_groups(sd->groups, 0);
6846 kfree(*per_cpu_ptr(sdd->sd, j));
6847 }
6848
6849 if (sdd->sg)
6850 kfree(*per_cpu_ptr(sdd->sg, j));
63b2ca30
NP
6851 if (sdd->sgc)
6852 kfree(*per_cpu_ptr(sdd->sgc, j));
54ab4ff4
PZ
6853 }
6854 free_percpu(sdd->sd);
fb2cf2c6 6855 sdd->sd = NULL;
54ab4ff4 6856 free_percpu(sdd->sg);
fb2cf2c6 6857 sdd->sg = NULL;
63b2ca30
NP
6858 free_percpu(sdd->sgc);
6859 sdd->sgc = NULL;
54ab4ff4
PZ
6860 }
6861}
6862
2c402dc3 6863struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
4a850cbe
VK
6864 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6865 struct sched_domain *child, int cpu)
2c402dc3 6866{
143e1e28 6867 struct sched_domain *sd = sd_init(tl, cpu);
2c402dc3 6868 if (!sd)
d069b916 6869 return child;
2c402dc3 6870
2c402dc3 6871 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6872 if (child) {
6873 sd->level = child->level + 1;
6874 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6875 child->parent = sd;
c75e0128 6876 sd->child = child;
6ae72dff
PZ
6877
6878 if (!cpumask_subset(sched_domain_span(child),
6879 sched_domain_span(sd))) {
6880 pr_err("BUG: arch topology borken\n");
6881#ifdef CONFIG_SCHED_DEBUG
6882 pr_err(" the %s domain not a subset of the %s domain\n",
6883 child->name, sd->name);
6884#endif
6885 /* Fixup, ensure @sd has at least @child cpus. */
6886 cpumask_or(sched_domain_span(sd),
6887 sched_domain_span(sd),
6888 sched_domain_span(child));
6889 }
6890
60495e77 6891 }
a841f8ce 6892 set_domain_attribute(sd, attr);
2c402dc3
PZ
6893
6894 return sd;
6895}
6896
2109b99e
AH
6897/*
6898 * Build sched domains for a given set of cpus and attach the sched domains
6899 * to the individual cpus
6900 */
dce840a0
PZ
6901static int build_sched_domains(const struct cpumask *cpu_map,
6902 struct sched_domain_attr *attr)
2109b99e 6903{
1c632169 6904 enum s_alloc alloc_state;
dce840a0 6905 struct sched_domain *sd;
2109b99e 6906 struct s_data d;
822ff793 6907 int i, ret = -ENOMEM;
9c1cfda2 6908
2109b99e
AH
6909 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6910 if (alloc_state != sa_rootdomain)
6911 goto error;
9c1cfda2 6912
dce840a0 6913 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6914 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6915 struct sched_domain_topology_level *tl;
6916
3bd65a80 6917 sd = NULL;
27723a68 6918 for_each_sd_topology(tl) {
4a850cbe 6919 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
22da9569
VK
6920 if (tl == sched_domain_topology)
6921 *per_cpu_ptr(d.sd, i) = sd;
e3589f6c
PZ
6922 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6923 sd->flags |= SD_OVERLAP;
d110235d
PZ
6924 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6925 break;
e3589f6c 6926 }
dce840a0
PZ
6927 }
6928
6929 /* Build the groups for the domains */
6930 for_each_cpu(i, cpu_map) {
6931 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6932 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6933 if (sd->flags & SD_OVERLAP) {
6934 if (build_overlap_sched_groups(sd, i))
6935 goto error;
6936 } else {
6937 if (build_sched_groups(sd, i))
6938 goto error;
6939 }
1cf51902 6940 }
a06dadbe 6941 }
9c1cfda2 6942
ced549fa 6943 /* Calculate CPU capacity for physical packages and nodes */
a9c9a9b6
PZ
6944 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6945 if (!cpumask_test_cpu(i, cpu_map))
6946 continue;
9c1cfda2 6947
dce840a0
PZ
6948 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6949 claim_allocations(i, sd);
63b2ca30 6950 init_sched_groups_capacity(i, sd);
dce840a0 6951 }
f712c0c7 6952 }
9c1cfda2 6953
1da177e4 6954 /* Attach the domains */
dce840a0 6955 rcu_read_lock();
abcd083a 6956 for_each_cpu(i, cpu_map) {
21d42ccf 6957 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6958 cpu_attach_domain(sd, d.rd, i);
1da177e4 6959 }
dce840a0 6960 rcu_read_unlock();
51888ca2 6961
822ff793 6962 ret = 0;
51888ca2 6963error:
2109b99e 6964 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6965 return ret;
1da177e4 6966}
029190c5 6967
acc3f5d7 6968static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6969static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6970static struct sched_domain_attr *dattr_cur;
6971 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6972
6973/*
6974 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6975 * cpumask) fails, then fallback to a single sched domain,
6976 * as determined by the single cpumask fallback_doms.
029190c5 6977 */
4212823f 6978static cpumask_var_t fallback_doms;
029190c5 6979
ee79d1bd
HC
6980/*
6981 * arch_update_cpu_topology lets virtualized architectures update the
6982 * cpu core maps. It is supposed to return 1 if the topology changed
6983 * or 0 if it stayed the same.
6984 */
52f5684c 6985int __weak arch_update_cpu_topology(void)
22e52b07 6986{
ee79d1bd 6987 return 0;
22e52b07
HC
6988}
6989
acc3f5d7
RR
6990cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6991{
6992 int i;
6993 cpumask_var_t *doms;
6994
6995 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6996 if (!doms)
6997 return NULL;
6998 for (i = 0; i < ndoms; i++) {
6999 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7000 free_sched_domains(doms, i);
7001 return NULL;
7002 }
7003 }
7004 return doms;
7005}
7006
7007void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7008{
7009 unsigned int i;
7010 for (i = 0; i < ndoms; i++)
7011 free_cpumask_var(doms[i]);
7012 kfree(doms);
7013}
7014
1a20ff27 7015/*
41a2d6cf 7016 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7017 * For now this just excludes isolated cpus, but could be used to
7018 * exclude other special cases in the future.
1a20ff27 7019 */
c4a8849a 7020static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7021{
7378547f
MM
7022 int err;
7023
22e52b07 7024 arch_update_cpu_topology();
029190c5 7025 ndoms_cur = 1;
acc3f5d7 7026 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7027 if (!doms_cur)
acc3f5d7
RR
7028 doms_cur = &fallback_doms;
7029 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 7030 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 7031 register_sched_domain_sysctl();
7378547f
MM
7032
7033 return err;
1a20ff27
DG
7034}
7035
1a20ff27
DG
7036/*
7037 * Detach sched domains from a group of cpus specified in cpu_map
7038 * These cpus will now be attached to the NULL domain
7039 */
96f874e2 7040static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
7041{
7042 int i;
7043
dce840a0 7044 rcu_read_lock();
abcd083a 7045 for_each_cpu(i, cpu_map)
57d885fe 7046 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7047 rcu_read_unlock();
1a20ff27
DG
7048}
7049
1d3504fc
HS
7050/* handle null as "default" */
7051static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7052 struct sched_domain_attr *new, int idx_new)
7053{
7054 struct sched_domain_attr tmp;
7055
7056 /* fast path */
7057 if (!new && !cur)
7058 return 1;
7059
7060 tmp = SD_ATTR_INIT;
7061 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7062 new ? (new + idx_new) : &tmp,
7063 sizeof(struct sched_domain_attr));
7064}
7065
029190c5
PJ
7066/*
7067 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7068 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7069 * doms_new[] to the current sched domain partitioning, doms_cur[].
7070 * It destroys each deleted domain and builds each new domain.
7071 *
acc3f5d7 7072 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7073 * The masks don't intersect (don't overlap.) We should setup one
7074 * sched domain for each mask. CPUs not in any of the cpumasks will
7075 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7076 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7077 * it as it is.
7078 *
acc3f5d7
RR
7079 * The passed in 'doms_new' should be allocated using
7080 * alloc_sched_domains. This routine takes ownership of it and will
7081 * free_sched_domains it when done with it. If the caller failed the
7082 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7083 * and partition_sched_domains() will fallback to the single partition
7084 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7085 *
96f874e2 7086 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7087 * ndoms_new == 0 is a special case for destroying existing domains,
7088 * and it will not create the default domain.
dfb512ec 7089 *
029190c5
PJ
7090 * Call with hotplug lock held
7091 */
acc3f5d7 7092void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7093 struct sched_domain_attr *dattr_new)
029190c5 7094{
dfb512ec 7095 int i, j, n;
d65bd5ec 7096 int new_topology;
029190c5 7097
712555ee 7098 mutex_lock(&sched_domains_mutex);
a1835615 7099
7378547f
MM
7100 /* always unregister in case we don't destroy any domains */
7101 unregister_sched_domain_sysctl();
7102
d65bd5ec
HC
7103 /* Let architecture update cpu core mappings. */
7104 new_topology = arch_update_cpu_topology();
7105
dfb512ec 7106 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7107
7108 /* Destroy deleted domains */
7109 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7110 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7111 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7112 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7113 goto match1;
7114 }
7115 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7116 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7117match1:
7118 ;
7119 }
7120
c8d2d47a 7121 n = ndoms_cur;
e761b772 7122 if (doms_new == NULL) {
c8d2d47a 7123 n = 0;
acc3f5d7 7124 doms_new = &fallback_doms;
6ad4c188 7125 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7126 WARN_ON_ONCE(dattr_new);
e761b772
MK
7127 }
7128
029190c5
PJ
7129 /* Build new domains */
7130 for (i = 0; i < ndoms_new; i++) {
c8d2d47a 7131 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7132 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7133 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7134 goto match2;
7135 }
7136 /* no match - add a new doms_new */
dce840a0 7137 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7138match2:
7139 ;
7140 }
7141
7142 /* Remember the new sched domains */
acc3f5d7
RR
7143 if (doms_cur != &fallback_doms)
7144 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7145 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7146 doms_cur = doms_new;
1d3504fc 7147 dattr_cur = dattr_new;
029190c5 7148 ndoms_cur = ndoms_new;
7378547f
MM
7149
7150 register_sched_domain_sysctl();
a1835615 7151
712555ee 7152 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7153}
7154
d35be8ba
SB
7155static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7156
1da177e4 7157/*
3a101d05
TH
7158 * Update cpusets according to cpu_active mask. If cpusets are
7159 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7160 * around partition_sched_domains().
d35be8ba
SB
7161 *
7162 * If we come here as part of a suspend/resume, don't touch cpusets because we
7163 * want to restore it back to its original state upon resume anyway.
1da177e4 7164 */
0b2e918a
TH
7165static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7166 void *hcpu)
e761b772 7167{
d35be8ba
SB
7168 switch (action) {
7169 case CPU_ONLINE_FROZEN:
7170 case CPU_DOWN_FAILED_FROZEN:
7171
7172 /*
7173 * num_cpus_frozen tracks how many CPUs are involved in suspend
7174 * resume sequence. As long as this is not the last online
7175 * operation in the resume sequence, just build a single sched
7176 * domain, ignoring cpusets.
7177 */
7178 num_cpus_frozen--;
7179 if (likely(num_cpus_frozen)) {
7180 partition_sched_domains(1, NULL, NULL);
7181 break;
7182 }
7183
7184 /*
7185 * This is the last CPU online operation. So fall through and
7186 * restore the original sched domains by considering the
7187 * cpuset configurations.
7188 */
7189
e761b772 7190 case CPU_ONLINE:
7ddf96b0 7191 cpuset_update_active_cpus(true);
d35be8ba 7192 break;
3a101d05
TH
7193 default:
7194 return NOTIFY_DONE;
7195 }
d35be8ba 7196 return NOTIFY_OK;
3a101d05 7197}
e761b772 7198
0b2e918a
TH
7199static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7200 void *hcpu)
3a101d05 7201{
3c18d447
JL
7202 unsigned long flags;
7203 long cpu = (long)hcpu;
7204 struct dl_bw *dl_b;
533445c6
OS
7205 bool overflow;
7206 int cpus;
3c18d447 7207
533445c6 7208 switch (action) {
3a101d05 7209 case CPU_DOWN_PREPARE:
533445c6
OS
7210 rcu_read_lock_sched();
7211 dl_b = dl_bw_of(cpu);
3c18d447 7212
533445c6
OS
7213 raw_spin_lock_irqsave(&dl_b->lock, flags);
7214 cpus = dl_bw_cpus(cpu);
7215 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7216 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3c18d447 7217
533445c6 7218 rcu_read_unlock_sched();
3c18d447 7219
533445c6
OS
7220 if (overflow)
7221 return notifier_from_errno(-EBUSY);
7ddf96b0 7222 cpuset_update_active_cpus(false);
d35be8ba
SB
7223 break;
7224 case CPU_DOWN_PREPARE_FROZEN:
7225 num_cpus_frozen++;
7226 partition_sched_domains(1, NULL, NULL);
7227 break;
e761b772
MK
7228 default:
7229 return NOTIFY_DONE;
7230 }
d35be8ba 7231 return NOTIFY_OK;
e761b772 7232}
e761b772 7233
1da177e4
LT
7234void __init sched_init_smp(void)
7235{
dcc30a35
RR
7236 cpumask_var_t non_isolated_cpus;
7237
7238 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7239 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7240
cb83b629
PZ
7241 sched_init_numa();
7242
6acce3ef
PZ
7243 /*
7244 * There's no userspace yet to cause hotplug operations; hence all the
7245 * cpu masks are stable and all blatant races in the below code cannot
7246 * happen.
7247 */
712555ee 7248 mutex_lock(&sched_domains_mutex);
c4a8849a 7249 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7250 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7251 if (cpumask_empty(non_isolated_cpus))
7252 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7253 mutex_unlock(&sched_domains_mutex);
e761b772 7254
301a5cba 7255 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
7256 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7257 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772 7258
b328ca18 7259 init_hrtick();
5c1e1767
NP
7260
7261 /* Move init over to a non-isolated CPU */
dcc30a35 7262 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7263 BUG();
19978ca6 7264 sched_init_granularity();
dcc30a35 7265 free_cpumask_var(non_isolated_cpus);
4212823f 7266
0e3900e6 7267 init_sched_rt_class();
1baca4ce 7268 init_sched_dl_class();
1da177e4
LT
7269}
7270#else
7271void __init sched_init_smp(void)
7272{
19978ca6 7273 sched_init_granularity();
1da177e4
LT
7274}
7275#endif /* CONFIG_SMP */
7276
7277int in_sched_functions(unsigned long addr)
7278{
1da177e4
LT
7279 return in_lock_functions(addr) ||
7280 (addr >= (unsigned long)__sched_text_start
7281 && addr < (unsigned long)__sched_text_end);
7282}
7283
029632fb 7284#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7285/*
7286 * Default task group.
7287 * Every task in system belongs to this group at bootup.
7288 */
029632fb 7289struct task_group root_task_group;
35cf4e50 7290LIST_HEAD(task_groups);
052f1dc7 7291#endif
6f505b16 7292
e6252c3e 7293DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 7294
1da177e4
LT
7295void __init sched_init(void)
7296{
dd41f596 7297 int i, j;
434d53b0
MT
7298 unsigned long alloc_size = 0, ptr;
7299
7300#ifdef CONFIG_FAIR_GROUP_SCHED
7301 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7302#endif
7303#ifdef CONFIG_RT_GROUP_SCHED
7304 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7305#endif
434d53b0 7306 if (alloc_size) {
36b7b6d4 7307 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7308
7309#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7310 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7311 ptr += nr_cpu_ids * sizeof(void **);
7312
07e06b01 7313 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7314 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7315
6d6bc0ad 7316#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7317#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7318 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7319 ptr += nr_cpu_ids * sizeof(void **);
7320
07e06b01 7321 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7322 ptr += nr_cpu_ids * sizeof(void **);
7323
6d6bc0ad 7324#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 7325 }
df7c8e84 7326#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
7327 for_each_possible_cpu(i) {
7328 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7329 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 7330 }
b74e6278 7331#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 7332
332ac17e
DF
7333 init_rt_bandwidth(&def_rt_bandwidth,
7334 global_rt_period(), global_rt_runtime());
7335 init_dl_bandwidth(&def_dl_bandwidth,
1724813d 7336 global_rt_period(), global_rt_runtime());
332ac17e 7337
57d885fe
GH
7338#ifdef CONFIG_SMP
7339 init_defrootdomain();
7340#endif
7341
d0b27fa7 7342#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7343 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7344 global_rt_period(), global_rt_runtime());
6d6bc0ad 7345#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7346
7c941438 7347#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
7348 list_add(&root_task_group.list, &task_groups);
7349 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7350 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7351 autogroup_init(&init_task);
54c707e9 7352
7c941438 7353#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7354
0a945022 7355 for_each_possible_cpu(i) {
70b97a7f 7356 struct rq *rq;
1da177e4
LT
7357
7358 rq = cpu_rq(i);
05fa785c 7359 raw_spin_lock_init(&rq->lock);
7897986b 7360 rq->nr_running = 0;
dce48a84
TG
7361 rq->calc_load_active = 0;
7362 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 7363 init_cfs_rq(&rq->cfs);
07c54f7a
AV
7364 init_rt_rq(&rq->rt);
7365 init_dl_rq(&rq->dl);
dd41f596 7366#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 7367 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 7368 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7369 /*
07e06b01 7370 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7371 *
7372 * In case of task-groups formed thr' the cgroup filesystem, it
7373 * gets 100% of the cpu resources in the system. This overall
7374 * system cpu resource is divided among the tasks of
07e06b01 7375 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7376 * based on each entity's (task or task-group's) weight
7377 * (se->load.weight).
7378 *
07e06b01 7379 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7380 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7381 * then A0's share of the cpu resource is:
7382 *
0d905bca 7383 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7384 *
07e06b01
YZ
7385 * We achieve this by letting root_task_group's tasks sit
7386 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7387 */
ab84d31e 7388 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 7389 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7390#endif /* CONFIG_FAIR_GROUP_SCHED */
7391
7392 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7393#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7394 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7395#endif
1da177e4 7396
dd41f596
IM
7397 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7398 rq->cpu_load[j] = 0;
fdf3e95d
VP
7399
7400 rq->last_load_update_tick = jiffies;
7401
1da177e4 7402#ifdef CONFIG_SMP
41c7ce9a 7403 rq->sd = NULL;
57d885fe 7404 rq->rd = NULL;
ca6d75e6 7405 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 7406 rq->balance_callback = NULL;
1da177e4 7407 rq->active_balance = 0;
dd41f596 7408 rq->next_balance = jiffies;
1da177e4 7409 rq->push_cpu = 0;
0a2966b4 7410 rq->cpu = i;
1f11eb6a 7411 rq->online = 0;
eae0c9df
MG
7412 rq->idle_stamp = 0;
7413 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 7414 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
7415
7416 INIT_LIST_HEAD(&rq->cfs_tasks);
7417
dc938520 7418 rq_attach_root(rq, &def_root_domain);
3451d024 7419#ifdef CONFIG_NO_HZ_COMMON
1c792db7 7420 rq->nohz_flags = 0;
83cd4fe2 7421#endif
265f22a9
FW
7422#ifdef CONFIG_NO_HZ_FULL
7423 rq->last_sched_tick = 0;
7424#endif
1da177e4 7425#endif
8f4d37ec 7426 init_rq_hrtick(rq);
1da177e4 7427 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7428 }
7429
2dd73a4f 7430 set_load_weight(&init_task);
b50f60ce 7431
e107be36
AK
7432#ifdef CONFIG_PREEMPT_NOTIFIERS
7433 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7434#endif
7435
1da177e4
LT
7436 /*
7437 * The boot idle thread does lazy MMU switching as well:
7438 */
7439 atomic_inc(&init_mm.mm_count);
7440 enter_lazy_tlb(&init_mm, current);
7441
1b537c7d
YD
7442 /*
7443 * During early bootup we pretend to be a normal task:
7444 */
7445 current->sched_class = &fair_sched_class;
7446
1da177e4
LT
7447 /*
7448 * Make us the idle thread. Technically, schedule() should not be
7449 * called from this thread, however somewhere below it might be,
7450 * but because we are the idle thread, we just pick up running again
7451 * when this runqueue becomes "idle".
7452 */
7453 init_idle(current, smp_processor_id());
dce48a84
TG
7454
7455 calc_load_update = jiffies + LOAD_FREQ;
7456
bf4d83f6 7457#ifdef CONFIG_SMP
4cb98839 7458 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
7459 /* May be allocated at isolcpus cmdline parse time */
7460 if (cpu_isolated_map == NULL)
7461 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 7462 idle_thread_set_boot_cpu();
a803f026 7463 set_cpu_rq_start_time();
029632fb
PZ
7464#endif
7465 init_sched_fair_class();
6a7b3dc3 7466
6892b75e 7467 scheduler_running = 1;
1da177e4
LT
7468}
7469
d902db1e 7470#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7471static inline int preempt_count_equals(int preempt_offset)
7472{
234da7bc 7473 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 7474
4ba8216c 7475 return (nested == preempt_offset);
e4aafea2
FW
7476}
7477
d894837f 7478void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7479{
8eb23b9f
PZ
7480 /*
7481 * Blocking primitives will set (and therefore destroy) current->state,
7482 * since we will exit with TASK_RUNNING make sure we enter with it,
7483 * otherwise we will destroy state.
7484 */
00845eb9 7485 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
7486 "do not call blocking ops when !TASK_RUNNING; "
7487 "state=%lx set at [<%p>] %pS\n",
7488 current->state,
7489 (void *)current->task_state_change,
00845eb9 7490 (void *)current->task_state_change);
8eb23b9f 7491
3427445a
PZ
7492 ___might_sleep(file, line, preempt_offset);
7493}
7494EXPORT_SYMBOL(__might_sleep);
7495
7496void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7497{
1da177e4
LT
7498 static unsigned long prev_jiffy; /* ratelimiting */
7499
b3fbab05 7500 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
db273be2
TG
7501 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7502 !is_idle_task(current)) ||
e4aafea2 7503 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
7504 return;
7505 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7506 return;
7507 prev_jiffy = jiffies;
7508
3df0fc5b
PZ
7509 printk(KERN_ERR
7510 "BUG: sleeping function called from invalid context at %s:%d\n",
7511 file, line);
7512 printk(KERN_ERR
7513 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7514 in_atomic(), irqs_disabled(),
7515 current->pid, current->comm);
aef745fc 7516
a8b686b3
ES
7517 if (task_stack_end_corrupted(current))
7518 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7519
aef745fc
IM
7520 debug_show_held_locks(current);
7521 if (irqs_disabled())
7522 print_irqtrace_events(current);
8f47b187
TG
7523#ifdef CONFIG_DEBUG_PREEMPT
7524 if (!preempt_count_equals(preempt_offset)) {
7525 pr_err("Preemption disabled at:");
7526 print_ip_sym(current->preempt_disable_ip);
7527 pr_cont("\n");
7528 }
7529#endif
aef745fc 7530 dump_stack();
1da177e4 7531}
3427445a 7532EXPORT_SYMBOL(___might_sleep);
1da177e4
LT
7533#endif
7534
7535#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 7536void normalize_rt_tasks(void)
3a5e4dc1 7537{
dbc7f069 7538 struct task_struct *g, *p;
d50dde5a
DF
7539 struct sched_attr attr = {
7540 .sched_policy = SCHED_NORMAL,
7541 };
1da177e4 7542
3472eaa1 7543 read_lock(&tasklist_lock);
5d07f420 7544 for_each_process_thread(g, p) {
178be793
IM
7545 /*
7546 * Only normalize user tasks:
7547 */
3472eaa1 7548 if (p->flags & PF_KTHREAD)
178be793
IM
7549 continue;
7550
6cfb0d5d 7551 p->se.exec_start = 0;
6cfb0d5d 7552#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7553 p->se.statistics.wait_start = 0;
7554 p->se.statistics.sleep_start = 0;
7555 p->se.statistics.block_start = 0;
6cfb0d5d 7556#endif
dd41f596 7557
aab03e05 7558 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
7559 /*
7560 * Renice negative nice level userspace
7561 * tasks back to 0:
7562 */
3472eaa1 7563 if (task_nice(p) < 0)
dd41f596 7564 set_user_nice(p, 0);
1da177e4 7565 continue;
dd41f596 7566 }
1da177e4 7567
dbc7f069 7568 __sched_setscheduler(p, &attr, false, false);
5d07f420 7569 }
3472eaa1 7570 read_unlock(&tasklist_lock);
1da177e4
LT
7571}
7572
7573#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7574
67fc4e0c 7575#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7576/*
67fc4e0c 7577 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7578 *
7579 * They can only be called when the whole system has been
7580 * stopped - every CPU needs to be quiescent, and no scheduling
7581 * activity can take place. Using them for anything else would
7582 * be a serious bug, and as a result, they aren't even visible
7583 * under any other configuration.
7584 */
7585
7586/**
7587 * curr_task - return the current task for a given cpu.
7588 * @cpu: the processor in question.
7589 *
7590 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
7591 *
7592 * Return: The current task for @cpu.
1df5c10a 7593 */
36c8b586 7594struct task_struct *curr_task(int cpu)
1df5c10a
LT
7595{
7596 return cpu_curr(cpu);
7597}
7598
67fc4e0c
JW
7599#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7600
7601#ifdef CONFIG_IA64
1df5c10a
LT
7602/**
7603 * set_curr_task - set the current task for a given cpu.
7604 * @cpu: the processor in question.
7605 * @p: the task pointer to set.
7606 *
7607 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7608 * are serviced on a separate stack. It allows the architecture to switch the
7609 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7610 * must be called with all CPU's synchronized, and interrupts disabled, the
7611 * and caller must save the original value of the current task (see
7612 * curr_task() above) and restore that value before reenabling interrupts and
7613 * re-starting the system.
7614 *
7615 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7616 */
36c8b586 7617void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7618{
7619 cpu_curr(cpu) = p;
7620}
7621
7622#endif
29f59db3 7623
7c941438 7624#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7625/* task_group_lock serializes the addition/removal of task groups */
7626static DEFINE_SPINLOCK(task_group_lock);
7627
bccbe08a
PZ
7628static void free_sched_group(struct task_group *tg)
7629{
7630 free_fair_sched_group(tg);
7631 free_rt_sched_group(tg);
e9aa1dd1 7632 autogroup_free(tg);
bccbe08a
PZ
7633 kfree(tg);
7634}
7635
7636/* allocate runqueue etc for a new task group */
ec7dc8ac 7637struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7638{
7639 struct task_group *tg;
bccbe08a
PZ
7640
7641 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7642 if (!tg)
7643 return ERR_PTR(-ENOMEM);
7644
ec7dc8ac 7645 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7646 goto err;
7647
ec7dc8ac 7648 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7649 goto err;
7650
ace783b9
LZ
7651 return tg;
7652
7653err:
7654 free_sched_group(tg);
7655 return ERR_PTR(-ENOMEM);
7656}
7657
7658void sched_online_group(struct task_group *tg, struct task_group *parent)
7659{
7660 unsigned long flags;
7661
8ed36996 7662 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7663 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7664
7665 WARN_ON(!parent); /* root should already exist */
7666
7667 tg->parent = parent;
f473aa5e 7668 INIT_LIST_HEAD(&tg->children);
09f2724a 7669 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7670 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7671}
7672
9b5b7751 7673/* rcu callback to free various structures associated with a task group */
6f505b16 7674static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7675{
29f59db3 7676 /* now it should be safe to free those cfs_rqs */
6f505b16 7677 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7678}
7679
9b5b7751 7680/* Destroy runqueue etc associated with a task group */
4cf86d77 7681void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
7682{
7683 /* wait for possible concurrent references to cfs_rqs complete */
7684 call_rcu(&tg->rcu, free_sched_group_rcu);
7685}
7686
7687void sched_offline_group(struct task_group *tg)
29f59db3 7688{
8ed36996 7689 unsigned long flags;
9b5b7751 7690 int i;
29f59db3 7691
3d4b47b4
PZ
7692 /* end participation in shares distribution */
7693 for_each_possible_cpu(i)
bccbe08a 7694 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7695
7696 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7697 list_del_rcu(&tg->list);
f473aa5e 7698 list_del_rcu(&tg->siblings);
8ed36996 7699 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7700}
7701
9b5b7751 7702/* change task's runqueue when it moves between groups.
3a252015
IM
7703 * The caller of this function should have put the task in its new group
7704 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7705 * reflect its new group.
9b5b7751
SV
7706 */
7707void sched_move_task(struct task_struct *tsk)
29f59db3 7708{
8323f26c 7709 struct task_group *tg;
da0c1e65 7710 int queued, running;
29f59db3
SV
7711 unsigned long flags;
7712 struct rq *rq;
7713
7714 rq = task_rq_lock(tsk, &flags);
7715
051a1d1a 7716 running = task_current(rq, tsk);
da0c1e65 7717 queued = task_on_rq_queued(tsk);
29f59db3 7718
da0c1e65 7719 if (queued)
29f59db3 7720 dequeue_task(rq, tsk, 0);
0e1f3483 7721 if (unlikely(running))
f3cd1c4e 7722 put_prev_task(rq, tsk);
29f59db3 7723
f7b8a47d
KT
7724 /*
7725 * All callers are synchronized by task_rq_lock(); we do not use RCU
7726 * which is pointless here. Thus, we pass "true" to task_css_check()
7727 * to prevent lockdep warnings.
7728 */
7729 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7730 struct task_group, css);
7731 tg = autogroup_task_group(tsk, tg);
7732 tsk->sched_task_group = tg;
7733
810b3817 7734#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 7735 if (tsk->sched_class->task_move_group)
da0c1e65 7736 tsk->sched_class->task_move_group(tsk, queued);
b2b5ce02 7737 else
810b3817 7738#endif
b2b5ce02 7739 set_task_rq(tsk, task_cpu(tsk));
810b3817 7740
0e1f3483
HS
7741 if (unlikely(running))
7742 tsk->sched_class->set_curr_task(rq);
da0c1e65 7743 if (queued)
371fd7e7 7744 enqueue_task(rq, tsk, 0);
29f59db3 7745
0122ec5b 7746 task_rq_unlock(rq, tsk, &flags);
29f59db3 7747}
7c941438 7748#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7749
a790de99
PT
7750#ifdef CONFIG_RT_GROUP_SCHED
7751/*
7752 * Ensure that the real time constraints are schedulable.
7753 */
7754static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7755
9a7e0b18
PZ
7756/* Must be called with tasklist_lock held */
7757static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7758{
9a7e0b18 7759 struct task_struct *g, *p;
b40b2e8e 7760
1fe89e1b
PZ
7761 /*
7762 * Autogroups do not have RT tasks; see autogroup_create().
7763 */
7764 if (task_group_is_autogroup(tg))
7765 return 0;
7766
5d07f420 7767 for_each_process_thread(g, p) {
8651c658 7768 if (rt_task(p) && task_group(p) == tg)
9a7e0b18 7769 return 1;
5d07f420 7770 }
b40b2e8e 7771
9a7e0b18
PZ
7772 return 0;
7773}
b40b2e8e 7774
9a7e0b18
PZ
7775struct rt_schedulable_data {
7776 struct task_group *tg;
7777 u64 rt_period;
7778 u64 rt_runtime;
7779};
b40b2e8e 7780
a790de99 7781static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7782{
7783 struct rt_schedulable_data *d = data;
7784 struct task_group *child;
7785 unsigned long total, sum = 0;
7786 u64 period, runtime;
b40b2e8e 7787
9a7e0b18
PZ
7788 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7789 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7790
9a7e0b18
PZ
7791 if (tg == d->tg) {
7792 period = d->rt_period;
7793 runtime = d->rt_runtime;
b40b2e8e 7794 }
b40b2e8e 7795
4653f803
PZ
7796 /*
7797 * Cannot have more runtime than the period.
7798 */
7799 if (runtime > period && runtime != RUNTIME_INF)
7800 return -EINVAL;
6f505b16 7801
4653f803
PZ
7802 /*
7803 * Ensure we don't starve existing RT tasks.
7804 */
9a7e0b18
PZ
7805 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7806 return -EBUSY;
6f505b16 7807
9a7e0b18 7808 total = to_ratio(period, runtime);
6f505b16 7809
4653f803
PZ
7810 /*
7811 * Nobody can have more than the global setting allows.
7812 */
7813 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7814 return -EINVAL;
6f505b16 7815
4653f803
PZ
7816 /*
7817 * The sum of our children's runtime should not exceed our own.
7818 */
9a7e0b18
PZ
7819 list_for_each_entry_rcu(child, &tg->children, siblings) {
7820 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7821 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7822
9a7e0b18
PZ
7823 if (child == d->tg) {
7824 period = d->rt_period;
7825 runtime = d->rt_runtime;
7826 }
6f505b16 7827
9a7e0b18 7828 sum += to_ratio(period, runtime);
9f0c1e56 7829 }
6f505b16 7830
9a7e0b18
PZ
7831 if (sum > total)
7832 return -EINVAL;
7833
7834 return 0;
6f505b16
PZ
7835}
7836
9a7e0b18 7837static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7838{
8277434e
PT
7839 int ret;
7840
9a7e0b18
PZ
7841 struct rt_schedulable_data data = {
7842 .tg = tg,
7843 .rt_period = period,
7844 .rt_runtime = runtime,
7845 };
7846
8277434e
PT
7847 rcu_read_lock();
7848 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7849 rcu_read_unlock();
7850
7851 return ret;
521f1a24
DG
7852}
7853
ab84d31e 7854static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7855 u64 rt_period, u64 rt_runtime)
6f505b16 7856{
ac086bc2 7857 int i, err = 0;
9f0c1e56 7858
2636ed5f
PZ
7859 /*
7860 * Disallowing the root group RT runtime is BAD, it would disallow the
7861 * kernel creating (and or operating) RT threads.
7862 */
7863 if (tg == &root_task_group && rt_runtime == 0)
7864 return -EINVAL;
7865
7866 /* No period doesn't make any sense. */
7867 if (rt_period == 0)
7868 return -EINVAL;
7869
9f0c1e56 7870 mutex_lock(&rt_constraints_mutex);
521f1a24 7871 read_lock(&tasklist_lock);
9a7e0b18
PZ
7872 err = __rt_schedulable(tg, rt_period, rt_runtime);
7873 if (err)
9f0c1e56 7874 goto unlock;
ac086bc2 7875
0986b11b 7876 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7877 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7878 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7879
7880 for_each_possible_cpu(i) {
7881 struct rt_rq *rt_rq = tg->rt_rq[i];
7882
0986b11b 7883 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7884 rt_rq->rt_runtime = rt_runtime;
0986b11b 7885 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7886 }
0986b11b 7887 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7888unlock:
521f1a24 7889 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7890 mutex_unlock(&rt_constraints_mutex);
7891
7892 return err;
6f505b16
PZ
7893}
7894
25cc7da7 7895static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
7896{
7897 u64 rt_runtime, rt_period;
7898
7899 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7900 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7901 if (rt_runtime_us < 0)
7902 rt_runtime = RUNTIME_INF;
7903
ab84d31e 7904 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7905}
7906
25cc7da7 7907static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
7908{
7909 u64 rt_runtime_us;
7910
d0b27fa7 7911 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7912 return -1;
7913
d0b27fa7 7914 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7915 do_div(rt_runtime_us, NSEC_PER_USEC);
7916 return rt_runtime_us;
7917}
d0b27fa7 7918
ce2f5fe4 7919static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
d0b27fa7
PZ
7920{
7921 u64 rt_runtime, rt_period;
7922
ce2f5fe4 7923 rt_period = rt_period_us * NSEC_PER_USEC;
d0b27fa7
PZ
7924 rt_runtime = tg->rt_bandwidth.rt_runtime;
7925
ab84d31e 7926 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7927}
7928
25cc7da7 7929static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
7930{
7931 u64 rt_period_us;
7932
7933 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7934 do_div(rt_period_us, NSEC_PER_USEC);
7935 return rt_period_us;
7936}
332ac17e 7937#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7938
332ac17e 7939#ifdef CONFIG_RT_GROUP_SCHED
d0b27fa7
PZ
7940static int sched_rt_global_constraints(void)
7941{
7942 int ret = 0;
7943
7944 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7945 read_lock(&tasklist_lock);
4653f803 7946 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7947 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7948 mutex_unlock(&rt_constraints_mutex);
7949
7950 return ret;
7951}
54e99124 7952
25cc7da7 7953static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
7954{
7955 /* Don't accept realtime tasks when there is no way for them to run */
7956 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7957 return 0;
7958
7959 return 1;
7960}
7961
6d6bc0ad 7962#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7963static int sched_rt_global_constraints(void)
7964{
ac086bc2 7965 unsigned long flags;
332ac17e 7966 int i, ret = 0;
ec5d4989 7967
0986b11b 7968 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7969 for_each_possible_cpu(i) {
7970 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7971
0986b11b 7972 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7973 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7974 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7975 }
0986b11b 7976 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7977
332ac17e 7978 return ret;
d0b27fa7 7979}
6d6bc0ad 7980#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7981
a1963b81 7982static int sched_dl_global_validate(void)
332ac17e 7983{
1724813d
PZ
7984 u64 runtime = global_rt_runtime();
7985 u64 period = global_rt_period();
332ac17e 7986 u64 new_bw = to_ratio(period, runtime);
f10e00f4 7987 struct dl_bw *dl_b;
1724813d 7988 int cpu, ret = 0;
49516342 7989 unsigned long flags;
332ac17e
DF
7990
7991 /*
7992 * Here we want to check the bandwidth not being set to some
7993 * value smaller than the currently allocated bandwidth in
7994 * any of the root_domains.
7995 *
7996 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
7997 * cycling on root_domains... Discussion on different/better
7998 * solutions is welcome!
7999 */
1724813d 8000 for_each_possible_cpu(cpu) {
f10e00f4
KT
8001 rcu_read_lock_sched();
8002 dl_b = dl_bw_of(cpu);
332ac17e 8003
49516342 8004 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d
PZ
8005 if (new_bw < dl_b->total_bw)
8006 ret = -EBUSY;
49516342 8007 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
1724813d 8008
f10e00f4
KT
8009 rcu_read_unlock_sched();
8010
1724813d
PZ
8011 if (ret)
8012 break;
332ac17e
DF
8013 }
8014
1724813d 8015 return ret;
332ac17e
DF
8016}
8017
1724813d 8018static void sched_dl_do_global(void)
ce0dbbbb 8019{
1724813d 8020 u64 new_bw = -1;
f10e00f4 8021 struct dl_bw *dl_b;
1724813d 8022 int cpu;
49516342 8023 unsigned long flags;
ce0dbbbb 8024
1724813d
PZ
8025 def_dl_bandwidth.dl_period = global_rt_period();
8026 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8027
8028 if (global_rt_runtime() != RUNTIME_INF)
8029 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8030
8031 /*
8032 * FIXME: As above...
8033 */
8034 for_each_possible_cpu(cpu) {
f10e00f4
KT
8035 rcu_read_lock_sched();
8036 dl_b = dl_bw_of(cpu);
1724813d 8037
49516342 8038 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d 8039 dl_b->bw = new_bw;
49516342 8040 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
f10e00f4
KT
8041
8042 rcu_read_unlock_sched();
ce0dbbbb 8043 }
1724813d
PZ
8044}
8045
8046static int sched_rt_global_validate(void)
8047{
8048 if (sysctl_sched_rt_period <= 0)
8049 return -EINVAL;
8050
e9e7cb38
JL
8051 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8052 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
1724813d
PZ
8053 return -EINVAL;
8054
8055 return 0;
8056}
8057
8058static void sched_rt_do_global(void)
8059{
8060 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8061 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
ce0dbbbb
CW
8062}
8063
d0b27fa7 8064int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8065 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8066 loff_t *ppos)
8067{
d0b27fa7
PZ
8068 int old_period, old_runtime;
8069 static DEFINE_MUTEX(mutex);
1724813d 8070 int ret;
d0b27fa7
PZ
8071
8072 mutex_lock(&mutex);
8073 old_period = sysctl_sched_rt_period;
8074 old_runtime = sysctl_sched_rt_runtime;
8075
8d65af78 8076 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8077
8078 if (!ret && write) {
1724813d
PZ
8079 ret = sched_rt_global_validate();
8080 if (ret)
8081 goto undo;
8082
a1963b81 8083 ret = sched_dl_global_validate();
1724813d
PZ
8084 if (ret)
8085 goto undo;
8086
a1963b81 8087 ret = sched_rt_global_constraints();
1724813d
PZ
8088 if (ret)
8089 goto undo;
8090
8091 sched_rt_do_global();
8092 sched_dl_do_global();
8093 }
8094 if (0) {
8095undo:
8096 sysctl_sched_rt_period = old_period;
8097 sysctl_sched_rt_runtime = old_runtime;
d0b27fa7
PZ
8098 }
8099 mutex_unlock(&mutex);
8100
8101 return ret;
8102}
68318b8e 8103
1724813d 8104int sched_rr_handler(struct ctl_table *table, int write,
332ac17e
DF
8105 void __user *buffer, size_t *lenp,
8106 loff_t *ppos)
8107{
8108 int ret;
332ac17e 8109 static DEFINE_MUTEX(mutex);
332ac17e
DF
8110
8111 mutex_lock(&mutex);
332ac17e 8112 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1724813d
PZ
8113 /* make sure that internally we keep jiffies */
8114 /* also, writing zero resets timeslice to default */
332ac17e 8115 if (!ret && write) {
1724813d
PZ
8116 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8117 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
332ac17e
DF
8118 }
8119 mutex_unlock(&mutex);
332ac17e
DF
8120 return ret;
8121}
8122
052f1dc7 8123#ifdef CONFIG_CGROUP_SCHED
68318b8e 8124
a7c6d554 8125static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 8126{
a7c6d554 8127 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
8128}
8129
eb95419b
TH
8130static struct cgroup_subsys_state *
8131cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 8132{
eb95419b
TH
8133 struct task_group *parent = css_tg(parent_css);
8134 struct task_group *tg;
68318b8e 8135
eb95419b 8136 if (!parent) {
68318b8e 8137 /* This is early initialization for the top cgroup */
07e06b01 8138 return &root_task_group.css;
68318b8e
SV
8139 }
8140
ec7dc8ac 8141 tg = sched_create_group(parent);
68318b8e
SV
8142 if (IS_ERR(tg))
8143 return ERR_PTR(-ENOMEM);
8144
68318b8e
SV
8145 return &tg->css;
8146}
8147
eb95419b 8148static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
ace783b9 8149{
eb95419b 8150 struct task_group *tg = css_tg(css);
5c9d535b 8151 struct task_group *parent = css_tg(css->parent);
ace783b9 8152
63876986
TH
8153 if (parent)
8154 sched_online_group(tg, parent);
ace783b9
LZ
8155 return 0;
8156}
8157
eb95419b 8158static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 8159{
eb95419b 8160 struct task_group *tg = css_tg(css);
68318b8e
SV
8161
8162 sched_destroy_group(tg);
8163}
8164
eb95419b 8165static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
ace783b9 8166{
eb95419b 8167 struct task_group *tg = css_tg(css);
ace783b9
LZ
8168
8169 sched_offline_group(tg);
8170}
8171
7e47682e 8172static void cpu_cgroup_fork(struct task_struct *task, void *private)
eeb61e53
KT
8173{
8174 sched_move_task(task);
8175}
8176
eb95419b 8177static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
bb9d97b6 8178 struct cgroup_taskset *tset)
68318b8e 8179{
bb9d97b6
TH
8180 struct task_struct *task;
8181
924f0d9a 8182 cgroup_taskset_for_each(task, tset) {
b68aa230 8183#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 8184 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 8185 return -EINVAL;
b68aa230 8186#else
bb9d97b6
TH
8187 /* We don't support RT-tasks being in separate groups */
8188 if (task->sched_class != &fair_sched_class)
8189 return -EINVAL;
b68aa230 8190#endif
bb9d97b6 8191 }
be367d09
BB
8192 return 0;
8193}
68318b8e 8194
eb95419b 8195static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
bb9d97b6 8196 struct cgroup_taskset *tset)
68318b8e 8197{
bb9d97b6
TH
8198 struct task_struct *task;
8199
924f0d9a 8200 cgroup_taskset_for_each(task, tset)
bb9d97b6 8201 sched_move_task(task);
68318b8e
SV
8202}
8203
eb95419b
TH
8204static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
8205 struct cgroup_subsys_state *old_css,
8206 struct task_struct *task)
068c5cc5
PZ
8207{
8208 /*
8209 * cgroup_exit() is called in the copy_process() failure path.
8210 * Ignore this case since the task hasn't ran yet, this avoids
8211 * trying to poke a half freed task state from generic code.
8212 */
8213 if (!(task->flags & PF_EXITING))
8214 return;
8215
8216 sched_move_task(task);
8217}
8218
052f1dc7 8219#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8220static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8221 struct cftype *cftype, u64 shareval)
68318b8e 8222{
182446d0 8223 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8224}
8225
182446d0
TH
8226static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8227 struct cftype *cft)
68318b8e 8228{
182446d0 8229 struct task_group *tg = css_tg(css);
68318b8e 8230
c8b28116 8231 return (u64) scale_load_down(tg->shares);
68318b8e 8232}
ab84d31e
PT
8233
8234#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8235static DEFINE_MUTEX(cfs_constraints_mutex);
8236
ab84d31e
PT
8237const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8238const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8239
a790de99
PT
8240static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8241
ab84d31e
PT
8242static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8243{
56f570e5 8244 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8245 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8246
8247 if (tg == &root_task_group)
8248 return -EINVAL;
8249
8250 /*
8251 * Ensure we have at some amount of bandwidth every period. This is
8252 * to prevent reaching a state of large arrears when throttled via
8253 * entity_tick() resulting in prolonged exit starvation.
8254 */
8255 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8256 return -EINVAL;
8257
8258 /*
8259 * Likewise, bound things on the otherside by preventing insane quota
8260 * periods. This also allows us to normalize in computing quota
8261 * feasibility.
8262 */
8263 if (period > max_cfs_quota_period)
8264 return -EINVAL;
8265
0e59bdae
KT
8266 /*
8267 * Prevent race between setting of cfs_rq->runtime_enabled and
8268 * unthrottle_offline_cfs_rqs().
8269 */
8270 get_online_cpus();
a790de99
PT
8271 mutex_lock(&cfs_constraints_mutex);
8272 ret = __cfs_schedulable(tg, period, quota);
8273 if (ret)
8274 goto out_unlock;
8275
58088ad0 8276 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8277 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
8278 /*
8279 * If we need to toggle cfs_bandwidth_used, off->on must occur
8280 * before making related changes, and on->off must occur afterwards
8281 */
8282 if (runtime_enabled && !runtime_was_enabled)
8283 cfs_bandwidth_usage_inc();
ab84d31e
PT
8284 raw_spin_lock_irq(&cfs_b->lock);
8285 cfs_b->period = ns_to_ktime(period);
8286 cfs_b->quota = quota;
58088ad0 8287
a9cf55b2 8288 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 8289 /* restart the period timer (if active) to handle new period expiry */
77a4d1a1
PZ
8290 if (runtime_enabled)
8291 start_cfs_bandwidth(cfs_b);
ab84d31e
PT
8292 raw_spin_unlock_irq(&cfs_b->lock);
8293
0e59bdae 8294 for_each_online_cpu(i) {
ab84d31e 8295 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8296 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
8297
8298 raw_spin_lock_irq(&rq->lock);
58088ad0 8299 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8300 cfs_rq->runtime_remaining = 0;
671fd9da 8301
029632fb 8302 if (cfs_rq->throttled)
671fd9da 8303 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
8304 raw_spin_unlock_irq(&rq->lock);
8305 }
1ee14e6c
BS
8306 if (runtime_was_enabled && !runtime_enabled)
8307 cfs_bandwidth_usage_dec();
a790de99
PT
8308out_unlock:
8309 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 8310 put_online_cpus();
ab84d31e 8311
a790de99 8312 return ret;
ab84d31e
PT
8313}
8314
8315int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8316{
8317 u64 quota, period;
8318
029632fb 8319 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8320 if (cfs_quota_us < 0)
8321 quota = RUNTIME_INF;
8322 else
8323 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8324
8325 return tg_set_cfs_bandwidth(tg, period, quota);
8326}
8327
8328long tg_get_cfs_quota(struct task_group *tg)
8329{
8330 u64 quota_us;
8331
029632fb 8332 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8333 return -1;
8334
029632fb 8335 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8336 do_div(quota_us, NSEC_PER_USEC);
8337
8338 return quota_us;
8339}
8340
8341int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8342{
8343 u64 quota, period;
8344
8345 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8346 quota = tg->cfs_bandwidth.quota;
ab84d31e 8347
ab84d31e
PT
8348 return tg_set_cfs_bandwidth(tg, period, quota);
8349}
8350
8351long tg_get_cfs_period(struct task_group *tg)
8352{
8353 u64 cfs_period_us;
8354
029632fb 8355 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8356 do_div(cfs_period_us, NSEC_PER_USEC);
8357
8358 return cfs_period_us;
8359}
8360
182446d0
TH
8361static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8362 struct cftype *cft)
ab84d31e 8363{
182446d0 8364 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
8365}
8366
182446d0
TH
8367static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8368 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 8369{
182446d0 8370 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
8371}
8372
182446d0
TH
8373static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8374 struct cftype *cft)
ab84d31e 8375{
182446d0 8376 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
8377}
8378
182446d0
TH
8379static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8380 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 8381{
182446d0 8382 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
8383}
8384
a790de99
PT
8385struct cfs_schedulable_data {
8386 struct task_group *tg;
8387 u64 period, quota;
8388};
8389
8390/*
8391 * normalize group quota/period to be quota/max_period
8392 * note: units are usecs
8393 */
8394static u64 normalize_cfs_quota(struct task_group *tg,
8395 struct cfs_schedulable_data *d)
8396{
8397 u64 quota, period;
8398
8399 if (tg == d->tg) {
8400 period = d->period;
8401 quota = d->quota;
8402 } else {
8403 period = tg_get_cfs_period(tg);
8404 quota = tg_get_cfs_quota(tg);
8405 }
8406
8407 /* note: these should typically be equivalent */
8408 if (quota == RUNTIME_INF || quota == -1)
8409 return RUNTIME_INF;
8410
8411 return to_ratio(period, quota);
8412}
8413
8414static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8415{
8416 struct cfs_schedulable_data *d = data;
029632fb 8417 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8418 s64 quota = 0, parent_quota = -1;
8419
8420 if (!tg->parent) {
8421 quota = RUNTIME_INF;
8422 } else {
029632fb 8423 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8424
8425 quota = normalize_cfs_quota(tg, d);
9c58c79a 8426 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
8427
8428 /*
8429 * ensure max(child_quota) <= parent_quota, inherit when no
8430 * limit is set
8431 */
8432 if (quota == RUNTIME_INF)
8433 quota = parent_quota;
8434 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8435 return -EINVAL;
8436 }
9c58c79a 8437 cfs_b->hierarchical_quota = quota;
a790de99
PT
8438
8439 return 0;
8440}
8441
8442static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8443{
8277434e 8444 int ret;
a790de99
PT
8445 struct cfs_schedulable_data data = {
8446 .tg = tg,
8447 .period = period,
8448 .quota = quota,
8449 };
8450
8451 if (quota != RUNTIME_INF) {
8452 do_div(data.period, NSEC_PER_USEC);
8453 do_div(data.quota, NSEC_PER_USEC);
8454 }
8455
8277434e
PT
8456 rcu_read_lock();
8457 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8458 rcu_read_unlock();
8459
8460 return ret;
a790de99 8461}
e8da1b18 8462
2da8ca82 8463static int cpu_stats_show(struct seq_file *sf, void *v)
e8da1b18 8464{
2da8ca82 8465 struct task_group *tg = css_tg(seq_css(sf));
029632fb 8466 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 8467
44ffc75b
TH
8468 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8469 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8470 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18
NR
8471
8472 return 0;
8473}
ab84d31e 8474#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8475#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8476
052f1dc7 8477#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
8478static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8479 struct cftype *cft, s64 val)
6f505b16 8480{
182446d0 8481 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
8482}
8483
182446d0
TH
8484static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8485 struct cftype *cft)
6f505b16 8486{
182446d0 8487 return sched_group_rt_runtime(css_tg(css));
6f505b16 8488}
d0b27fa7 8489
182446d0
TH
8490static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8491 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 8492{
182446d0 8493 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
8494}
8495
182446d0
TH
8496static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8497 struct cftype *cft)
d0b27fa7 8498{
182446d0 8499 return sched_group_rt_period(css_tg(css));
d0b27fa7 8500}
6d6bc0ad 8501#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8502
fe5c7cc2 8503static struct cftype cpu_files[] = {
052f1dc7 8504#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8505 {
8506 .name = "shares",
f4c753b7
PM
8507 .read_u64 = cpu_shares_read_u64,
8508 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8509 },
052f1dc7 8510#endif
ab84d31e
PT
8511#ifdef CONFIG_CFS_BANDWIDTH
8512 {
8513 .name = "cfs_quota_us",
8514 .read_s64 = cpu_cfs_quota_read_s64,
8515 .write_s64 = cpu_cfs_quota_write_s64,
8516 },
8517 {
8518 .name = "cfs_period_us",
8519 .read_u64 = cpu_cfs_period_read_u64,
8520 .write_u64 = cpu_cfs_period_write_u64,
8521 },
e8da1b18
NR
8522 {
8523 .name = "stat",
2da8ca82 8524 .seq_show = cpu_stats_show,
e8da1b18 8525 },
ab84d31e 8526#endif
052f1dc7 8527#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8528 {
9f0c1e56 8529 .name = "rt_runtime_us",
06ecb27c
PM
8530 .read_s64 = cpu_rt_runtime_read,
8531 .write_s64 = cpu_rt_runtime_write,
6f505b16 8532 },
d0b27fa7
PZ
8533 {
8534 .name = "rt_period_us",
f4c753b7
PM
8535 .read_u64 = cpu_rt_period_read_uint,
8536 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8537 },
052f1dc7 8538#endif
4baf6e33 8539 { } /* terminate */
68318b8e
SV
8540};
8541
073219e9 8542struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748
TH
8543 .css_alloc = cpu_cgroup_css_alloc,
8544 .css_free = cpu_cgroup_css_free,
ace783b9
LZ
8545 .css_online = cpu_cgroup_css_online,
8546 .css_offline = cpu_cgroup_css_offline,
eeb61e53 8547 .fork = cpu_cgroup_fork,
bb9d97b6
TH
8548 .can_attach = cpu_cgroup_can_attach,
8549 .attach = cpu_cgroup_attach,
068c5cc5 8550 .exit = cpu_cgroup_exit,
5577964e 8551 .legacy_cftypes = cpu_files,
68318b8e
SV
8552 .early_init = 1,
8553};
8554
052f1dc7 8555#endif /* CONFIG_CGROUP_SCHED */
d842de87 8556
b637a328
PM
8557void dump_cpu_task(int cpu)
8558{
8559 pr_info("Task dump for CPU %d:\n", cpu);
8560 sched_show_task(cpu_curr(cpu));
8561}