]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/core.c
Merge branch 'sched/urgent' into locking/core, to pick up scheduler fix we rely on
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
91d1aa43 75#include <linux/context_tracking.h>
52f5684c 76#include <linux/compiler.h>
1da177e4 77
96f951ed 78#include <asm/switch_to.h>
5517d86b 79#include <asm/tlb.h>
838225b4 80#include <asm/irq_regs.h>
db7e527d 81#include <asm/mutex.h>
e6e6685a
GC
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
1da177e4 85
029632fb 86#include "sched.h"
ea138446 87#include "../workqueue_internal.h"
29d5e047 88#include "../smpboot.h"
6e0534f2 89
a8d154b0 90#define CREATE_TRACE_POINTS
ad8d75ff 91#include <trace/events/sched.h>
a8d154b0 92
029632fb
PZ
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 95
fe44d621 96static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 97
029632fb 98void update_rq_clock(struct rq *rq)
3e51f33f 99{
fe44d621 100 s64 delta;
305e6835 101
9edfbfed
PZ
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
f26f9aff 105 return;
aa483808 106
fe44d621 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
4036ac15
MG
108 if (delta < 0)
109 return;
fe44d621
PZ
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
3e51f33f
PZ
112}
113
bf5c91ba
IM
114/*
115 * Debugging: various feature bits
116 */
f00b45c1 117
f00b45c1
PZ
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
bf5c91ba 121const_debug unsigned int sysctl_sched_features =
391e43da 122#include "features.h"
f00b45c1
PZ
123 0;
124
125#undef SCHED_FEAT
126
127#ifdef CONFIG_SCHED_DEBUG
128#define SCHED_FEAT(name, enabled) \
129 #name ,
130
1292531f 131static const char * const sched_feat_names[] = {
391e43da 132#include "features.h"
f00b45c1
PZ
133};
134
135#undef SCHED_FEAT
136
34f3a814 137static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 138{
f00b45c1
PZ
139 int i;
140
f8b6d1cc 141 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
142 if (!(sysctl_sched_features & (1UL << i)))
143 seq_puts(m, "NO_");
144 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 145 }
34f3a814 146 seq_puts(m, "\n");
f00b45c1 147
34f3a814 148 return 0;
f00b45c1
PZ
149}
150
f8b6d1cc
PZ
151#ifdef HAVE_JUMP_LABEL
152
c5905afb
IM
153#define jump_label_key__true STATIC_KEY_INIT_TRUE
154#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
155
156#define SCHED_FEAT(name, enabled) \
157 jump_label_key__##enabled ,
158
c5905afb 159struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
160#include "features.h"
161};
162
163#undef SCHED_FEAT
164
165static void sched_feat_disable(int i)
166{
e33886b3 167 static_key_disable(&sched_feat_keys[i]);
f8b6d1cc
PZ
168}
169
170static void sched_feat_enable(int i)
171{
e33886b3 172 static_key_enable(&sched_feat_keys[i]);
f8b6d1cc
PZ
173}
174#else
175static void sched_feat_disable(int i) { };
176static void sched_feat_enable(int i) { };
177#endif /* HAVE_JUMP_LABEL */
178
1a687c2e 179static int sched_feat_set(char *cmp)
f00b45c1 180{
f00b45c1 181 int i;
1a687c2e 182 int neg = 0;
f00b45c1 183
524429c3 184 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
185 neg = 1;
186 cmp += 3;
187 }
188
f8b6d1cc 189 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 190 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 191 if (neg) {
f00b45c1 192 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
193 sched_feat_disable(i);
194 } else {
f00b45c1 195 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
196 sched_feat_enable(i);
197 }
f00b45c1
PZ
198 break;
199 }
200 }
201
1a687c2e
MG
202 return i;
203}
204
205static ssize_t
206sched_feat_write(struct file *filp, const char __user *ubuf,
207 size_t cnt, loff_t *ppos)
208{
209 char buf[64];
210 char *cmp;
211 int i;
5cd08fbf 212 struct inode *inode;
1a687c2e
MG
213
214 if (cnt > 63)
215 cnt = 63;
216
217 if (copy_from_user(&buf, ubuf, cnt))
218 return -EFAULT;
219
220 buf[cnt] = 0;
221 cmp = strstrip(buf);
222
5cd08fbf
JB
223 /* Ensure the static_key remains in a consistent state */
224 inode = file_inode(filp);
225 mutex_lock(&inode->i_mutex);
1a687c2e 226 i = sched_feat_set(cmp);
5cd08fbf 227 mutex_unlock(&inode->i_mutex);
f8b6d1cc 228 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
229 return -EINVAL;
230
42994724 231 *ppos += cnt;
f00b45c1
PZ
232
233 return cnt;
234}
235
34f3a814
LZ
236static int sched_feat_open(struct inode *inode, struct file *filp)
237{
238 return single_open(filp, sched_feat_show, NULL);
239}
240
828c0950 241static const struct file_operations sched_feat_fops = {
34f3a814
LZ
242 .open = sched_feat_open,
243 .write = sched_feat_write,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = single_release,
f00b45c1
PZ
247};
248
249static __init int sched_init_debug(void)
250{
f00b45c1
PZ
251 debugfs_create_file("sched_features", 0644, NULL, NULL,
252 &sched_feat_fops);
253
254 return 0;
255}
256late_initcall(sched_init_debug);
f8b6d1cc 257#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 258
b82d9fdd
PZ
259/*
260 * Number of tasks to iterate in a single balance run.
261 * Limited because this is done with IRQs disabled.
262 */
263const_debug unsigned int sysctl_sched_nr_migrate = 32;
264
e9e9250b
PZ
265/*
266 * period over which we average the RT time consumption, measured
267 * in ms.
268 *
269 * default: 1s
270 */
271const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
272
fa85ae24 273/*
9f0c1e56 274 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
275 * default: 1s
276 */
9f0c1e56 277unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 278
029632fb 279__read_mostly int scheduler_running;
6892b75e 280
9f0c1e56
PZ
281/*
282 * part of the period that we allow rt tasks to run in us.
283 * default: 0.95s
284 */
285int sysctl_sched_rt_runtime = 950000;
fa85ae24 286
3fa0818b
RR
287/* cpus with isolated domains */
288cpumask_var_t cpu_isolated_map;
289
1da177e4 290/*
cc2a73b5 291 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 292 */
a9957449 293static struct rq *this_rq_lock(void)
1da177e4
LT
294 __acquires(rq->lock)
295{
70b97a7f 296 struct rq *rq;
1da177e4
LT
297
298 local_irq_disable();
299 rq = this_rq();
05fa785c 300 raw_spin_lock(&rq->lock);
1da177e4
LT
301
302 return rq;
303}
304
8f4d37ec
PZ
305#ifdef CONFIG_SCHED_HRTICK
306/*
307 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 308 */
8f4d37ec 309
8f4d37ec
PZ
310static void hrtick_clear(struct rq *rq)
311{
312 if (hrtimer_active(&rq->hrtick_timer))
313 hrtimer_cancel(&rq->hrtick_timer);
314}
315
8f4d37ec
PZ
316/*
317 * High-resolution timer tick.
318 * Runs from hardirq context with interrupts disabled.
319 */
320static enum hrtimer_restart hrtick(struct hrtimer *timer)
321{
322 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
323
324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
325
05fa785c 326 raw_spin_lock(&rq->lock);
3e51f33f 327 update_rq_clock(rq);
8f4d37ec 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 329 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
330
331 return HRTIMER_NORESTART;
332}
333
95e904c7 334#ifdef CONFIG_SMP
971ee28c 335
4961b6e1 336static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
337{
338 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 339
4961b6e1 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
971ee28c
PZ
341}
342
31656519
PZ
343/*
344 * called from hardirq (IPI) context
345 */
346static void __hrtick_start(void *arg)
b328ca18 347{
31656519 348 struct rq *rq = arg;
b328ca18 349
05fa785c 350 raw_spin_lock(&rq->lock);
971ee28c 351 __hrtick_restart(rq);
31656519 352 rq->hrtick_csd_pending = 0;
05fa785c 353 raw_spin_unlock(&rq->lock);
b328ca18
PZ
354}
355
31656519
PZ
356/*
357 * Called to set the hrtick timer state.
358 *
359 * called with rq->lock held and irqs disabled
360 */
029632fb 361void hrtick_start(struct rq *rq, u64 delay)
b328ca18 362{
31656519 363 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 364 ktime_t time;
365 s64 delta;
366
367 /*
368 * Don't schedule slices shorter than 10000ns, that just
369 * doesn't make sense and can cause timer DoS.
370 */
371 delta = max_t(s64, delay, 10000LL);
372 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 373
cc584b21 374 hrtimer_set_expires(timer, time);
31656519
PZ
375
376 if (rq == this_rq()) {
971ee28c 377 __hrtick_restart(rq);
31656519 378 } else if (!rq->hrtick_csd_pending) {
c46fff2a 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
31656519
PZ
380 rq->hrtick_csd_pending = 1;
381 }
b328ca18
PZ
382}
383
384static int
385hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
386{
387 int cpu = (int)(long)hcpu;
388
389 switch (action) {
390 case CPU_UP_CANCELED:
391 case CPU_UP_CANCELED_FROZEN:
392 case CPU_DOWN_PREPARE:
393 case CPU_DOWN_PREPARE_FROZEN:
394 case CPU_DEAD:
395 case CPU_DEAD_FROZEN:
31656519 396 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
397 return NOTIFY_OK;
398 }
399
400 return NOTIFY_DONE;
401}
402
fa748203 403static __init void init_hrtick(void)
b328ca18
PZ
404{
405 hotcpu_notifier(hotplug_hrtick, 0);
406}
31656519
PZ
407#else
408/*
409 * Called to set the hrtick timer state.
410 *
411 * called with rq->lock held and irqs disabled
412 */
029632fb 413void hrtick_start(struct rq *rq, u64 delay)
31656519 414{
86893335
WL
415 /*
416 * Don't schedule slices shorter than 10000ns, that just
417 * doesn't make sense. Rely on vruntime for fairness.
418 */
419 delay = max_t(u64, delay, 10000LL);
4961b6e1
TG
420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
421 HRTIMER_MODE_REL_PINNED);
31656519 422}
b328ca18 423
006c75f1 424static inline void init_hrtick(void)
8f4d37ec 425{
8f4d37ec 426}
31656519 427#endif /* CONFIG_SMP */
8f4d37ec 428
31656519 429static void init_rq_hrtick(struct rq *rq)
8f4d37ec 430{
31656519
PZ
431#ifdef CONFIG_SMP
432 rq->hrtick_csd_pending = 0;
8f4d37ec 433
31656519
PZ
434 rq->hrtick_csd.flags = 0;
435 rq->hrtick_csd.func = __hrtick_start;
436 rq->hrtick_csd.info = rq;
437#endif
8f4d37ec 438
31656519
PZ
439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
440 rq->hrtick_timer.function = hrtick;
8f4d37ec 441}
006c75f1 442#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
443static inline void hrtick_clear(struct rq *rq)
444{
445}
446
8f4d37ec
PZ
447static inline void init_rq_hrtick(struct rq *rq)
448{
449}
450
b328ca18
PZ
451static inline void init_hrtick(void)
452{
453}
006c75f1 454#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 455
fd99f91a
PZ
456/*
457 * cmpxchg based fetch_or, macro so it works for different integer types
458 */
459#define fetch_or(ptr, val) \
460({ typeof(*(ptr)) __old, __val = *(ptr); \
461 for (;;) { \
462 __old = cmpxchg((ptr), __val, __val | (val)); \
463 if (__old == __val) \
464 break; \
465 __val = __old; \
466 } \
467 __old; \
468})
469
e3baac47 470#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
471/*
472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
473 * this avoids any races wrt polling state changes and thereby avoids
474 * spurious IPIs.
475 */
476static bool set_nr_and_not_polling(struct task_struct *p)
477{
478 struct thread_info *ti = task_thread_info(p);
479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
480}
e3baac47
PZ
481
482/*
483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
484 *
485 * If this returns true, then the idle task promises to call
486 * sched_ttwu_pending() and reschedule soon.
487 */
488static bool set_nr_if_polling(struct task_struct *p)
489{
490 struct thread_info *ti = task_thread_info(p);
316c1608 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
492
493 for (;;) {
494 if (!(val & _TIF_POLLING_NRFLAG))
495 return false;
496 if (val & _TIF_NEED_RESCHED)
497 return true;
498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
499 if (old == val)
500 break;
501 val = old;
502 }
503 return true;
504}
505
fd99f91a
PZ
506#else
507static bool set_nr_and_not_polling(struct task_struct *p)
508{
509 set_tsk_need_resched(p);
510 return true;
511}
e3baac47
PZ
512
513#ifdef CONFIG_SMP
514static bool set_nr_if_polling(struct task_struct *p)
515{
516 return false;
517}
518#endif
fd99f91a
PZ
519#endif
520
76751049
PZ
521void wake_q_add(struct wake_q_head *head, struct task_struct *task)
522{
523 struct wake_q_node *node = &task->wake_q;
524
525 /*
526 * Atomically grab the task, if ->wake_q is !nil already it means
527 * its already queued (either by us or someone else) and will get the
528 * wakeup due to that.
529 *
530 * This cmpxchg() implies a full barrier, which pairs with the write
531 * barrier implied by the wakeup in wake_up_list().
532 */
533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
534 return;
535
536 get_task_struct(task);
537
538 /*
539 * The head is context local, there can be no concurrency.
540 */
541 *head->lastp = node;
542 head->lastp = &node->next;
543}
544
545void wake_up_q(struct wake_q_head *head)
546{
547 struct wake_q_node *node = head->first;
548
549 while (node != WAKE_Q_TAIL) {
550 struct task_struct *task;
551
552 task = container_of(node, struct task_struct, wake_q);
553 BUG_ON(!task);
554 /* task can safely be re-inserted now */
555 node = node->next;
556 task->wake_q.next = NULL;
557
558 /*
559 * wake_up_process() implies a wmb() to pair with the queueing
560 * in wake_q_add() so as not to miss wakeups.
561 */
562 wake_up_process(task);
563 put_task_struct(task);
564 }
565}
566
c24d20db 567/*
8875125e 568 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
569 *
570 * On UP this means the setting of the need_resched flag, on SMP it
571 * might also involve a cross-CPU call to trigger the scheduler on
572 * the target CPU.
573 */
8875125e 574void resched_curr(struct rq *rq)
c24d20db 575{
8875125e 576 struct task_struct *curr = rq->curr;
c24d20db
IM
577 int cpu;
578
8875125e 579 lockdep_assert_held(&rq->lock);
c24d20db 580
8875125e 581 if (test_tsk_need_resched(curr))
c24d20db
IM
582 return;
583
8875125e 584 cpu = cpu_of(rq);
fd99f91a 585
f27dde8d 586 if (cpu == smp_processor_id()) {
8875125e 587 set_tsk_need_resched(curr);
f27dde8d 588 set_preempt_need_resched();
c24d20db 589 return;
f27dde8d 590 }
c24d20db 591
8875125e 592 if (set_nr_and_not_polling(curr))
c24d20db 593 smp_send_reschedule(cpu);
dfc68f29
AL
594 else
595 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
596}
597
029632fb 598void resched_cpu(int cpu)
c24d20db
IM
599{
600 struct rq *rq = cpu_rq(cpu);
601 unsigned long flags;
602
05fa785c 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db 604 return;
8875125e 605 resched_curr(rq);
05fa785c 606 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 607}
06d8308c 608
b021fe3e 609#ifdef CONFIG_SMP
3451d024 610#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
611/*
612 * In the semi idle case, use the nearest busy cpu for migrating timers
613 * from an idle cpu. This is good for power-savings.
614 *
615 * We don't do similar optimization for completely idle system, as
616 * selecting an idle cpu will add more delays to the timers than intended
617 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
618 */
bc7a34b8 619int get_nohz_timer_target(void)
83cd4fe2 620{
bc7a34b8 621 int i, cpu = smp_processor_id();
83cd4fe2
VP
622 struct sched_domain *sd;
623
9642d18e 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
6201b4d6
VK
625 return cpu;
626
057f3fad 627 rcu_read_lock();
83cd4fe2 628 for_each_domain(cpu, sd) {
057f3fad 629 for_each_cpu(i, sched_domain_span(sd)) {
9642d18e 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
057f3fad
PZ
631 cpu = i;
632 goto unlock;
633 }
634 }
83cd4fe2 635 }
9642d18e
VH
636
637 if (!is_housekeeping_cpu(cpu))
638 cpu = housekeeping_any_cpu();
057f3fad
PZ
639unlock:
640 rcu_read_unlock();
83cd4fe2
VP
641 return cpu;
642}
06d8308c
TG
643/*
644 * When add_timer_on() enqueues a timer into the timer wheel of an
645 * idle CPU then this timer might expire before the next timer event
646 * which is scheduled to wake up that CPU. In case of a completely
647 * idle system the next event might even be infinite time into the
648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
649 * leaves the inner idle loop so the newly added timer is taken into
650 * account when the CPU goes back to idle and evaluates the timer
651 * wheel for the next timer event.
652 */
1c20091e 653static void wake_up_idle_cpu(int cpu)
06d8308c
TG
654{
655 struct rq *rq = cpu_rq(cpu);
656
657 if (cpu == smp_processor_id())
658 return;
659
67b9ca70 660 if (set_nr_and_not_polling(rq->idle))
06d8308c 661 smp_send_reschedule(cpu);
dfc68f29
AL
662 else
663 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
664}
665
c5bfece2 666static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 667{
53c5fa16
FW
668 /*
669 * We just need the target to call irq_exit() and re-evaluate
670 * the next tick. The nohz full kick at least implies that.
671 * If needed we can still optimize that later with an
672 * empty IRQ.
673 */
c5bfece2 674 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
675 if (cpu != smp_processor_id() ||
676 tick_nohz_tick_stopped())
53c5fa16 677 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
678 return true;
679 }
680
681 return false;
682}
683
684void wake_up_nohz_cpu(int cpu)
685{
c5bfece2 686 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
687 wake_up_idle_cpu(cpu);
688}
689
ca38062e 690static inline bool got_nohz_idle_kick(void)
45bf76df 691{
1c792db7 692 int cpu = smp_processor_id();
873b4c65
VG
693
694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
695 return false;
696
697 if (idle_cpu(cpu) && !need_resched())
698 return true;
699
700 /*
701 * We can't run Idle Load Balance on this CPU for this time so we
702 * cancel it and clear NOHZ_BALANCE_KICK
703 */
704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
705 return false;
45bf76df
IM
706}
707
3451d024 708#else /* CONFIG_NO_HZ_COMMON */
45bf76df 709
ca38062e 710static inline bool got_nohz_idle_kick(void)
2069dd75 711{
ca38062e 712 return false;
2069dd75
PZ
713}
714
3451d024 715#endif /* CONFIG_NO_HZ_COMMON */
d842de87 716
ce831b38
FW
717#ifdef CONFIG_NO_HZ_FULL
718bool sched_can_stop_tick(void)
719{
1e78cdbd
RR
720 /*
721 * FIFO realtime policy runs the highest priority task. Other runnable
722 * tasks are of a lower priority. The scheduler tick does nothing.
723 */
724 if (current->policy == SCHED_FIFO)
725 return true;
726
727 /*
728 * Round-robin realtime tasks time slice with other tasks at the same
729 * realtime priority. Is this task the only one at this priority?
730 */
731 if (current->policy == SCHED_RR) {
732 struct sched_rt_entity *rt_se = &current->rt;
733
734 return rt_se->run_list.prev == rt_se->run_list.next;
735 }
736
3882ec64
FW
737 /*
738 * More than one running task need preemption.
739 * nr_running update is assumed to be visible
740 * after IPI is sent from wakers.
741 */
541b8264
VK
742 if (this_rq()->nr_running > 1)
743 return false;
ce831b38 744
541b8264 745 return true;
ce831b38
FW
746}
747#endif /* CONFIG_NO_HZ_FULL */
d842de87 748
029632fb 749void sched_avg_update(struct rq *rq)
18d95a28 750{
e9e9250b
PZ
751 s64 period = sched_avg_period();
752
78becc27 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
0d98bb26
WD
754 /*
755 * Inline assembly required to prevent the compiler
756 * optimising this loop into a divmod call.
757 * See __iter_div_u64_rem() for another example of this.
758 */
759 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
760 rq->age_stamp += period;
761 rq->rt_avg /= 2;
762 }
18d95a28
PZ
763}
764
6d6bc0ad 765#endif /* CONFIG_SMP */
18d95a28 766
a790de99
PT
767#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 769/*
8277434e
PT
770 * Iterate task_group tree rooted at *from, calling @down when first entering a
771 * node and @up when leaving it for the final time.
772 *
773 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 774 */
029632fb 775int walk_tg_tree_from(struct task_group *from,
8277434e 776 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
777{
778 struct task_group *parent, *child;
eb755805 779 int ret;
c09595f6 780
8277434e
PT
781 parent = from;
782
c09595f6 783down:
eb755805
PZ
784 ret = (*down)(parent, data);
785 if (ret)
8277434e 786 goto out;
c09595f6
PZ
787 list_for_each_entry_rcu(child, &parent->children, siblings) {
788 parent = child;
789 goto down;
790
791up:
792 continue;
793 }
eb755805 794 ret = (*up)(parent, data);
8277434e
PT
795 if (ret || parent == from)
796 goto out;
c09595f6
PZ
797
798 child = parent;
799 parent = parent->parent;
800 if (parent)
801 goto up;
8277434e 802out:
eb755805 803 return ret;
c09595f6
PZ
804}
805
029632fb 806int tg_nop(struct task_group *tg, void *data)
eb755805 807{
e2b245f8 808 return 0;
eb755805 809}
18d95a28
PZ
810#endif
811
45bf76df
IM
812static void set_load_weight(struct task_struct *p)
813{
f05998d4
NR
814 int prio = p->static_prio - MAX_RT_PRIO;
815 struct load_weight *load = &p->se.load;
816
dd41f596
IM
817 /*
818 * SCHED_IDLE tasks get minimal weight:
819 */
20f9cd2a 820 if (idle_policy(p->policy)) {
c8b28116 821 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 822 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
823 return;
824 }
71f8bd46 825
c8b28116 826 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 827 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
828}
829
1de64443 830static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 831{
a64692a3 832 update_rq_clock(rq);
1de64443
PZ
833 if (!(flags & ENQUEUE_RESTORE))
834 sched_info_queued(rq, p);
371fd7e7 835 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
836}
837
1de64443 838static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 839{
a64692a3 840 update_rq_clock(rq);
1de64443
PZ
841 if (!(flags & DEQUEUE_SAVE))
842 sched_info_dequeued(rq, p);
371fd7e7 843 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
844}
845
029632fb 846void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
847{
848 if (task_contributes_to_load(p))
849 rq->nr_uninterruptible--;
850
371fd7e7 851 enqueue_task(rq, p, flags);
1e3c88bd
PZ
852}
853
029632fb 854void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
855{
856 if (task_contributes_to_load(p))
857 rq->nr_uninterruptible++;
858
371fd7e7 859 dequeue_task(rq, p, flags);
1e3c88bd
PZ
860}
861
fe44d621 862static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 863{
095c0aa8
GC
864/*
865 * In theory, the compile should just see 0 here, and optimize out the call
866 * to sched_rt_avg_update. But I don't trust it...
867 */
868#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
869 s64 steal = 0, irq_delta = 0;
870#endif
871#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 872 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
873
874 /*
875 * Since irq_time is only updated on {soft,}irq_exit, we might run into
876 * this case when a previous update_rq_clock() happened inside a
877 * {soft,}irq region.
878 *
879 * When this happens, we stop ->clock_task and only update the
880 * prev_irq_time stamp to account for the part that fit, so that a next
881 * update will consume the rest. This ensures ->clock_task is
882 * monotonic.
883 *
884 * It does however cause some slight miss-attribution of {soft,}irq
885 * time, a more accurate solution would be to update the irq_time using
886 * the current rq->clock timestamp, except that would require using
887 * atomic ops.
888 */
889 if (irq_delta > delta)
890 irq_delta = delta;
891
892 rq->prev_irq_time += irq_delta;
893 delta -= irq_delta;
095c0aa8
GC
894#endif
895#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 896 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
897 steal = paravirt_steal_clock(cpu_of(rq));
898 steal -= rq->prev_steal_time_rq;
899
900 if (unlikely(steal > delta))
901 steal = delta;
902
095c0aa8 903 rq->prev_steal_time_rq += steal;
095c0aa8
GC
904 delta -= steal;
905 }
906#endif
907
fe44d621
PZ
908 rq->clock_task += delta;
909
095c0aa8 910#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
5d4dfddd 911 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
095c0aa8
GC
912 sched_rt_avg_update(rq, irq_delta + steal);
913#endif
aa483808
VP
914}
915
34f971f6
PZ
916void sched_set_stop_task(int cpu, struct task_struct *stop)
917{
918 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
919 struct task_struct *old_stop = cpu_rq(cpu)->stop;
920
921 if (stop) {
922 /*
923 * Make it appear like a SCHED_FIFO task, its something
924 * userspace knows about and won't get confused about.
925 *
926 * Also, it will make PI more or less work without too
927 * much confusion -- but then, stop work should not
928 * rely on PI working anyway.
929 */
930 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
931
932 stop->sched_class = &stop_sched_class;
933 }
934
935 cpu_rq(cpu)->stop = stop;
936
937 if (old_stop) {
938 /*
939 * Reset it back to a normal scheduling class so that
940 * it can die in pieces.
941 */
942 old_stop->sched_class = &rt_sched_class;
943 }
944}
945
14531189 946/*
dd41f596 947 * __normal_prio - return the priority that is based on the static prio
14531189 948 */
14531189
IM
949static inline int __normal_prio(struct task_struct *p)
950{
dd41f596 951 return p->static_prio;
14531189
IM
952}
953
b29739f9
IM
954/*
955 * Calculate the expected normal priority: i.e. priority
956 * without taking RT-inheritance into account. Might be
957 * boosted by interactivity modifiers. Changes upon fork,
958 * setprio syscalls, and whenever the interactivity
959 * estimator recalculates.
960 */
36c8b586 961static inline int normal_prio(struct task_struct *p)
b29739f9
IM
962{
963 int prio;
964
aab03e05
DF
965 if (task_has_dl_policy(p))
966 prio = MAX_DL_PRIO-1;
967 else if (task_has_rt_policy(p))
b29739f9
IM
968 prio = MAX_RT_PRIO-1 - p->rt_priority;
969 else
970 prio = __normal_prio(p);
971 return prio;
972}
973
974/*
975 * Calculate the current priority, i.e. the priority
976 * taken into account by the scheduler. This value might
977 * be boosted by RT tasks, or might be boosted by
978 * interactivity modifiers. Will be RT if the task got
979 * RT-boosted. If not then it returns p->normal_prio.
980 */
36c8b586 981static int effective_prio(struct task_struct *p)
b29739f9
IM
982{
983 p->normal_prio = normal_prio(p);
984 /*
985 * If we are RT tasks or we were boosted to RT priority,
986 * keep the priority unchanged. Otherwise, update priority
987 * to the normal priority:
988 */
989 if (!rt_prio(p->prio))
990 return p->normal_prio;
991 return p->prio;
992}
993
1da177e4
LT
994/**
995 * task_curr - is this task currently executing on a CPU?
996 * @p: the task in question.
e69f6186
YB
997 *
998 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 999 */
36c8b586 1000inline int task_curr(const struct task_struct *p)
1da177e4
LT
1001{
1002 return cpu_curr(task_cpu(p)) == p;
1003}
1004
67dfa1b7 1005/*
4c9a4bc8
PZ
1006 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1007 * use the balance_callback list if you want balancing.
1008 *
1009 * this means any call to check_class_changed() must be followed by a call to
1010 * balance_callback().
67dfa1b7 1011 */
cb469845
SR
1012static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1013 const struct sched_class *prev_class,
da7a735e 1014 int oldprio)
cb469845
SR
1015{
1016 if (prev_class != p->sched_class) {
1017 if (prev_class->switched_from)
da7a735e 1018 prev_class->switched_from(rq, p);
4c9a4bc8 1019
da7a735e 1020 p->sched_class->switched_to(rq, p);
2d3d891d 1021 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1022 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1023}
1024
029632fb 1025void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1026{
1027 const struct sched_class *class;
1028
1029 if (p->sched_class == rq->curr->sched_class) {
1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1031 } else {
1032 for_each_class(class) {
1033 if (class == rq->curr->sched_class)
1034 break;
1035 if (class == p->sched_class) {
8875125e 1036 resched_curr(rq);
1e5a7405
PZ
1037 break;
1038 }
1039 }
1040 }
1041
1042 /*
1043 * A queue event has occurred, and we're going to schedule. In
1044 * this case, we can save a useless back to back clock update.
1045 */
da0c1e65 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
9edfbfed 1047 rq_clock_skip_update(rq, true);
1e5a7405
PZ
1048}
1049
1da177e4 1050#ifdef CONFIG_SMP
5cc389bc
PZ
1051/*
1052 * This is how migration works:
1053 *
1054 * 1) we invoke migration_cpu_stop() on the target CPU using
1055 * stop_one_cpu().
1056 * 2) stopper starts to run (implicitly forcing the migrated thread
1057 * off the CPU)
1058 * 3) it checks whether the migrated task is still in the wrong runqueue.
1059 * 4) if it's in the wrong runqueue then the migration thread removes
1060 * it and puts it into the right queue.
1061 * 5) stopper completes and stop_one_cpu() returns and the migration
1062 * is done.
1063 */
1064
1065/*
1066 * move_queued_task - move a queued task to new rq.
1067 *
1068 * Returns (locked) new rq. Old rq's lock is released.
1069 */
5e16bbc2 1070static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
5cc389bc 1071{
5cc389bc
PZ
1072 lockdep_assert_held(&rq->lock);
1073
1074 dequeue_task(rq, p, 0);
1075 p->on_rq = TASK_ON_RQ_MIGRATING;
1076 set_task_cpu(p, new_cpu);
1077 raw_spin_unlock(&rq->lock);
1078
1079 rq = cpu_rq(new_cpu);
1080
1081 raw_spin_lock(&rq->lock);
1082 BUG_ON(task_cpu(p) != new_cpu);
1083 p->on_rq = TASK_ON_RQ_QUEUED;
1084 enqueue_task(rq, p, 0);
1085 check_preempt_curr(rq, p, 0);
1086
1087 return rq;
1088}
1089
1090struct migration_arg {
1091 struct task_struct *task;
1092 int dest_cpu;
1093};
1094
1095/*
1096 * Move (not current) task off this cpu, onto dest cpu. We're doing
1097 * this because either it can't run here any more (set_cpus_allowed()
1098 * away from this CPU, or CPU going down), or because we're
1099 * attempting to rebalance this task on exec (sched_exec).
1100 *
1101 * So we race with normal scheduler movements, but that's OK, as long
1102 * as the task is no longer on this CPU.
5cc389bc 1103 */
5e16bbc2 1104static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
5cc389bc 1105{
5cc389bc 1106 if (unlikely(!cpu_active(dest_cpu)))
5e16bbc2 1107 return rq;
5cc389bc
PZ
1108
1109 /* Affinity changed (again). */
1110 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5e16bbc2 1111 return rq;
5cc389bc 1112
5e16bbc2
PZ
1113 rq = move_queued_task(rq, p, dest_cpu);
1114
1115 return rq;
5cc389bc
PZ
1116}
1117
1118/*
1119 * migration_cpu_stop - this will be executed by a highprio stopper thread
1120 * and performs thread migration by bumping thread off CPU then
1121 * 'pushing' onto another runqueue.
1122 */
1123static int migration_cpu_stop(void *data)
1124{
1125 struct migration_arg *arg = data;
5e16bbc2
PZ
1126 struct task_struct *p = arg->task;
1127 struct rq *rq = this_rq();
5cc389bc
PZ
1128
1129 /*
1130 * The original target cpu might have gone down and we might
1131 * be on another cpu but it doesn't matter.
1132 */
1133 local_irq_disable();
1134 /*
1135 * We need to explicitly wake pending tasks before running
1136 * __migrate_task() such that we will not miss enforcing cpus_allowed
1137 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1138 */
1139 sched_ttwu_pending();
5e16bbc2
PZ
1140
1141 raw_spin_lock(&p->pi_lock);
1142 raw_spin_lock(&rq->lock);
1143 /*
1144 * If task_rq(p) != rq, it cannot be migrated here, because we're
1145 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1146 * we're holding p->pi_lock.
1147 */
1148 if (task_rq(p) == rq && task_on_rq_queued(p))
1149 rq = __migrate_task(rq, p, arg->dest_cpu);
1150 raw_spin_unlock(&rq->lock);
1151 raw_spin_unlock(&p->pi_lock);
1152
5cc389bc
PZ
1153 local_irq_enable();
1154 return 0;
1155}
1156
c5b28038
PZ
1157/*
1158 * sched_class::set_cpus_allowed must do the below, but is not required to
1159 * actually call this function.
1160 */
1161void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1162{
5cc389bc
PZ
1163 cpumask_copy(&p->cpus_allowed, new_mask);
1164 p->nr_cpus_allowed = cpumask_weight(new_mask);
1165}
1166
c5b28038
PZ
1167void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1168{
6c37067e
PZ
1169 struct rq *rq = task_rq(p);
1170 bool queued, running;
1171
c5b28038 1172 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1173
1174 queued = task_on_rq_queued(p);
1175 running = task_current(rq, p);
1176
1177 if (queued) {
1178 /*
1179 * Because __kthread_bind() calls this on blocked tasks without
1180 * holding rq->lock.
1181 */
1182 lockdep_assert_held(&rq->lock);
1de64443 1183 dequeue_task(rq, p, DEQUEUE_SAVE);
6c37067e
PZ
1184 }
1185 if (running)
1186 put_prev_task(rq, p);
1187
c5b28038 1188 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e
PZ
1189
1190 if (running)
1191 p->sched_class->set_curr_task(rq);
1192 if (queued)
1de64443 1193 enqueue_task(rq, p, ENQUEUE_RESTORE);
c5b28038
PZ
1194}
1195
5cc389bc
PZ
1196/*
1197 * Change a given task's CPU affinity. Migrate the thread to a
1198 * proper CPU and schedule it away if the CPU it's executing on
1199 * is removed from the allowed bitmask.
1200 *
1201 * NOTE: the caller must have a valid reference to the task, the
1202 * task must not exit() & deallocate itself prematurely. The
1203 * call is not atomic; no spinlocks may be held.
1204 */
25834c73
PZ
1205static int __set_cpus_allowed_ptr(struct task_struct *p,
1206 const struct cpumask *new_mask, bool check)
5cc389bc
PZ
1207{
1208 unsigned long flags;
1209 struct rq *rq;
1210 unsigned int dest_cpu;
1211 int ret = 0;
1212
1213 rq = task_rq_lock(p, &flags);
1214
25834c73
PZ
1215 /*
1216 * Must re-check here, to close a race against __kthread_bind(),
1217 * sched_setaffinity() is not guaranteed to observe the flag.
1218 */
1219 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1220 ret = -EINVAL;
1221 goto out;
1222 }
1223
5cc389bc
PZ
1224 if (cpumask_equal(&p->cpus_allowed, new_mask))
1225 goto out;
1226
1227 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1228 ret = -EINVAL;
1229 goto out;
1230 }
1231
1232 do_set_cpus_allowed(p, new_mask);
1233
1234 /* Can the task run on the task's current CPU? If so, we're done */
1235 if (cpumask_test_cpu(task_cpu(p), new_mask))
1236 goto out;
1237
1238 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1239 if (task_running(rq, p) || p->state == TASK_WAKING) {
1240 struct migration_arg arg = { p, dest_cpu };
1241 /* Need help from migration thread: drop lock and wait. */
1242 task_rq_unlock(rq, p, &flags);
1243 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1244 tlb_migrate_finish(p->mm);
1245 return 0;
cbce1a68
PZ
1246 } else if (task_on_rq_queued(p)) {
1247 /*
1248 * OK, since we're going to drop the lock immediately
1249 * afterwards anyway.
1250 */
1251 lockdep_unpin_lock(&rq->lock);
5e16bbc2 1252 rq = move_queued_task(rq, p, dest_cpu);
cbce1a68
PZ
1253 lockdep_pin_lock(&rq->lock);
1254 }
5cc389bc
PZ
1255out:
1256 task_rq_unlock(rq, p, &flags);
1257
1258 return ret;
1259}
25834c73
PZ
1260
1261int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1262{
1263 return __set_cpus_allowed_ptr(p, new_mask, false);
1264}
5cc389bc
PZ
1265EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1266
dd41f596 1267void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1268{
e2912009
PZ
1269#ifdef CONFIG_SCHED_DEBUG
1270 /*
1271 * We should never call set_task_cpu() on a blocked task,
1272 * ttwu() will sort out the placement.
1273 */
077614ee 1274 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1275 !p->on_rq);
0122ec5b
PZ
1276
1277#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1278 /*
1279 * The caller should hold either p->pi_lock or rq->lock, when changing
1280 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1281 *
1282 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1283 * see task_group().
6c6c54e1
PZ
1284 *
1285 * Furthermore, all task_rq users should acquire both locks, see
1286 * task_rq_lock().
1287 */
0122ec5b
PZ
1288 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1289 lockdep_is_held(&task_rq(p)->lock)));
1290#endif
e2912009
PZ
1291#endif
1292
de1d7286 1293 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1294
0c69774e 1295 if (task_cpu(p) != new_cpu) {
0a74bef8 1296 if (p->sched_class->migrate_task_rq)
5a4fd036 1297 p->sched_class->migrate_task_rq(p);
0c69774e 1298 p->se.nr_migrations++;
ff303e66 1299 perf_event_task_migrate(p);
0c69774e 1300 }
dd41f596
IM
1301
1302 __set_task_cpu(p, new_cpu);
c65cc870
IM
1303}
1304
ac66f547
PZ
1305static void __migrate_swap_task(struct task_struct *p, int cpu)
1306{
da0c1e65 1307 if (task_on_rq_queued(p)) {
ac66f547
PZ
1308 struct rq *src_rq, *dst_rq;
1309
1310 src_rq = task_rq(p);
1311 dst_rq = cpu_rq(cpu);
1312
1313 deactivate_task(src_rq, p, 0);
1314 set_task_cpu(p, cpu);
1315 activate_task(dst_rq, p, 0);
1316 check_preempt_curr(dst_rq, p, 0);
1317 } else {
1318 /*
1319 * Task isn't running anymore; make it appear like we migrated
1320 * it before it went to sleep. This means on wakeup we make the
1321 * previous cpu our targer instead of where it really is.
1322 */
1323 p->wake_cpu = cpu;
1324 }
1325}
1326
1327struct migration_swap_arg {
1328 struct task_struct *src_task, *dst_task;
1329 int src_cpu, dst_cpu;
1330};
1331
1332static int migrate_swap_stop(void *data)
1333{
1334 struct migration_swap_arg *arg = data;
1335 struct rq *src_rq, *dst_rq;
1336 int ret = -EAGAIN;
1337
62694cd5
PZ
1338 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1339 return -EAGAIN;
1340
ac66f547
PZ
1341 src_rq = cpu_rq(arg->src_cpu);
1342 dst_rq = cpu_rq(arg->dst_cpu);
1343
74602315
PZ
1344 double_raw_lock(&arg->src_task->pi_lock,
1345 &arg->dst_task->pi_lock);
ac66f547 1346 double_rq_lock(src_rq, dst_rq);
62694cd5 1347
ac66f547
PZ
1348 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1349 goto unlock;
1350
1351 if (task_cpu(arg->src_task) != arg->src_cpu)
1352 goto unlock;
1353
1354 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1355 goto unlock;
1356
1357 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1358 goto unlock;
1359
1360 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1361 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1362
1363 ret = 0;
1364
1365unlock:
1366 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
1367 raw_spin_unlock(&arg->dst_task->pi_lock);
1368 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
1369
1370 return ret;
1371}
1372
1373/*
1374 * Cross migrate two tasks
1375 */
1376int migrate_swap(struct task_struct *cur, struct task_struct *p)
1377{
1378 struct migration_swap_arg arg;
1379 int ret = -EINVAL;
1380
ac66f547
PZ
1381 arg = (struct migration_swap_arg){
1382 .src_task = cur,
1383 .src_cpu = task_cpu(cur),
1384 .dst_task = p,
1385 .dst_cpu = task_cpu(p),
1386 };
1387
1388 if (arg.src_cpu == arg.dst_cpu)
1389 goto out;
1390
6acce3ef
PZ
1391 /*
1392 * These three tests are all lockless; this is OK since all of them
1393 * will be re-checked with proper locks held further down the line.
1394 */
ac66f547
PZ
1395 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1396 goto out;
1397
1398 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1399 goto out;
1400
1401 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1402 goto out;
1403
286549dc 1404 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
1405 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1406
1407out:
ac66f547
PZ
1408 return ret;
1409}
1410
1da177e4
LT
1411/*
1412 * wait_task_inactive - wait for a thread to unschedule.
1413 *
85ba2d86
RM
1414 * If @match_state is nonzero, it's the @p->state value just checked and
1415 * not expected to change. If it changes, i.e. @p might have woken up,
1416 * then return zero. When we succeed in waiting for @p to be off its CPU,
1417 * we return a positive number (its total switch count). If a second call
1418 * a short while later returns the same number, the caller can be sure that
1419 * @p has remained unscheduled the whole time.
1420 *
1da177e4
LT
1421 * The caller must ensure that the task *will* unschedule sometime soon,
1422 * else this function might spin for a *long* time. This function can't
1423 * be called with interrupts off, or it may introduce deadlock with
1424 * smp_call_function() if an IPI is sent by the same process we are
1425 * waiting to become inactive.
1426 */
85ba2d86 1427unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1428{
1429 unsigned long flags;
da0c1e65 1430 int running, queued;
85ba2d86 1431 unsigned long ncsw;
70b97a7f 1432 struct rq *rq;
1da177e4 1433
3a5c359a
AK
1434 for (;;) {
1435 /*
1436 * We do the initial early heuristics without holding
1437 * any task-queue locks at all. We'll only try to get
1438 * the runqueue lock when things look like they will
1439 * work out!
1440 */
1441 rq = task_rq(p);
fa490cfd 1442
3a5c359a
AK
1443 /*
1444 * If the task is actively running on another CPU
1445 * still, just relax and busy-wait without holding
1446 * any locks.
1447 *
1448 * NOTE! Since we don't hold any locks, it's not
1449 * even sure that "rq" stays as the right runqueue!
1450 * But we don't care, since "task_running()" will
1451 * return false if the runqueue has changed and p
1452 * is actually now running somewhere else!
1453 */
85ba2d86
RM
1454 while (task_running(rq, p)) {
1455 if (match_state && unlikely(p->state != match_state))
1456 return 0;
3a5c359a 1457 cpu_relax();
85ba2d86 1458 }
fa490cfd 1459
3a5c359a
AK
1460 /*
1461 * Ok, time to look more closely! We need the rq
1462 * lock now, to be *sure*. If we're wrong, we'll
1463 * just go back and repeat.
1464 */
1465 rq = task_rq_lock(p, &flags);
27a9da65 1466 trace_sched_wait_task(p);
3a5c359a 1467 running = task_running(rq, p);
da0c1e65 1468 queued = task_on_rq_queued(p);
85ba2d86 1469 ncsw = 0;
f31e11d8 1470 if (!match_state || p->state == match_state)
93dcf55f 1471 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1472 task_rq_unlock(rq, p, &flags);
fa490cfd 1473
85ba2d86
RM
1474 /*
1475 * If it changed from the expected state, bail out now.
1476 */
1477 if (unlikely(!ncsw))
1478 break;
1479
3a5c359a
AK
1480 /*
1481 * Was it really running after all now that we
1482 * checked with the proper locks actually held?
1483 *
1484 * Oops. Go back and try again..
1485 */
1486 if (unlikely(running)) {
1487 cpu_relax();
1488 continue;
1489 }
fa490cfd 1490
3a5c359a
AK
1491 /*
1492 * It's not enough that it's not actively running,
1493 * it must be off the runqueue _entirely_, and not
1494 * preempted!
1495 *
80dd99b3 1496 * So if it was still runnable (but just not actively
3a5c359a
AK
1497 * running right now), it's preempted, and we should
1498 * yield - it could be a while.
1499 */
da0c1e65 1500 if (unlikely(queued)) {
8eb90c30
TG
1501 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1502
1503 set_current_state(TASK_UNINTERRUPTIBLE);
1504 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1505 continue;
1506 }
fa490cfd 1507
3a5c359a
AK
1508 /*
1509 * Ahh, all good. It wasn't running, and it wasn't
1510 * runnable, which means that it will never become
1511 * running in the future either. We're all done!
1512 */
1513 break;
1514 }
85ba2d86
RM
1515
1516 return ncsw;
1da177e4
LT
1517}
1518
1519/***
1520 * kick_process - kick a running thread to enter/exit the kernel
1521 * @p: the to-be-kicked thread
1522 *
1523 * Cause a process which is running on another CPU to enter
1524 * kernel-mode, without any delay. (to get signals handled.)
1525 *
25985edc 1526 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1527 * because all it wants to ensure is that the remote task enters
1528 * the kernel. If the IPI races and the task has been migrated
1529 * to another CPU then no harm is done and the purpose has been
1530 * achieved as well.
1531 */
36c8b586 1532void kick_process(struct task_struct *p)
1da177e4
LT
1533{
1534 int cpu;
1535
1536 preempt_disable();
1537 cpu = task_cpu(p);
1538 if ((cpu != smp_processor_id()) && task_curr(p))
1539 smp_send_reschedule(cpu);
1540 preempt_enable();
1541}
b43e3521 1542EXPORT_SYMBOL_GPL(kick_process);
1da177e4 1543
30da688e 1544/*
013fdb80 1545 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1546 */
5da9a0fb
PZ
1547static int select_fallback_rq(int cpu, struct task_struct *p)
1548{
aa00d89c
TC
1549 int nid = cpu_to_node(cpu);
1550 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1551 enum { cpuset, possible, fail } state = cpuset;
1552 int dest_cpu;
5da9a0fb 1553
aa00d89c
TC
1554 /*
1555 * If the node that the cpu is on has been offlined, cpu_to_node()
1556 * will return -1. There is no cpu on the node, and we should
1557 * select the cpu on the other node.
1558 */
1559 if (nid != -1) {
1560 nodemask = cpumask_of_node(nid);
1561
1562 /* Look for allowed, online CPU in same node. */
1563 for_each_cpu(dest_cpu, nodemask) {
1564 if (!cpu_online(dest_cpu))
1565 continue;
1566 if (!cpu_active(dest_cpu))
1567 continue;
1568 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1569 return dest_cpu;
1570 }
2baab4e9 1571 }
5da9a0fb 1572
2baab4e9
PZ
1573 for (;;) {
1574 /* Any allowed, online CPU? */
e3831edd 1575 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1576 if (!cpu_online(dest_cpu))
1577 continue;
1578 if (!cpu_active(dest_cpu))
1579 continue;
1580 goto out;
1581 }
5da9a0fb 1582
e73e85f0 1583 /* No more Mr. Nice Guy. */
2baab4e9
PZ
1584 switch (state) {
1585 case cpuset:
e73e85f0
ON
1586 if (IS_ENABLED(CONFIG_CPUSETS)) {
1587 cpuset_cpus_allowed_fallback(p);
1588 state = possible;
1589 break;
1590 }
1591 /* fall-through */
2baab4e9
PZ
1592 case possible:
1593 do_set_cpus_allowed(p, cpu_possible_mask);
1594 state = fail;
1595 break;
1596
1597 case fail:
1598 BUG();
1599 break;
1600 }
1601 }
1602
1603out:
1604 if (state != cpuset) {
1605 /*
1606 * Don't tell them about moving exiting tasks or
1607 * kernel threads (both mm NULL), since they never
1608 * leave kernel.
1609 */
1610 if (p->mm && printk_ratelimit()) {
aac74dc4 1611 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
1612 task_pid_nr(p), p->comm, cpu);
1613 }
5da9a0fb
PZ
1614 }
1615
1616 return dest_cpu;
1617}
1618
e2912009 1619/*
013fdb80 1620 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1621 */
970b13ba 1622static inline
ac66f547 1623int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 1624{
cbce1a68
PZ
1625 lockdep_assert_held(&p->pi_lock);
1626
6c1d9410
WL
1627 if (p->nr_cpus_allowed > 1)
1628 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e2912009
PZ
1629
1630 /*
1631 * In order not to call set_task_cpu() on a blocking task we need
1632 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1633 * cpu.
1634 *
1635 * Since this is common to all placement strategies, this lives here.
1636 *
1637 * [ this allows ->select_task() to simply return task_cpu(p) and
1638 * not worry about this generic constraint ]
1639 */
fa17b507 1640 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1641 !cpu_online(cpu)))
5da9a0fb 1642 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1643
1644 return cpu;
970b13ba 1645}
09a40af5
MG
1646
1647static void update_avg(u64 *avg, u64 sample)
1648{
1649 s64 diff = sample - *avg;
1650 *avg += diff >> 3;
1651}
25834c73
PZ
1652
1653#else
1654
1655static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1656 const struct cpumask *new_mask, bool check)
1657{
1658 return set_cpus_allowed_ptr(p, new_mask);
1659}
1660
5cc389bc 1661#endif /* CONFIG_SMP */
970b13ba 1662
d7c01d27 1663static void
b84cb5df 1664ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1665{
d7c01d27 1666#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1667 struct rq *rq = this_rq();
1668
d7c01d27
PZ
1669#ifdef CONFIG_SMP
1670 int this_cpu = smp_processor_id();
1671
1672 if (cpu == this_cpu) {
1673 schedstat_inc(rq, ttwu_local);
1674 schedstat_inc(p, se.statistics.nr_wakeups_local);
1675 } else {
1676 struct sched_domain *sd;
1677
1678 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1679 rcu_read_lock();
d7c01d27
PZ
1680 for_each_domain(this_cpu, sd) {
1681 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1682 schedstat_inc(sd, ttwu_wake_remote);
1683 break;
1684 }
1685 }
057f3fad 1686 rcu_read_unlock();
d7c01d27 1687 }
f339b9dc
PZ
1688
1689 if (wake_flags & WF_MIGRATED)
1690 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1691
d7c01d27
PZ
1692#endif /* CONFIG_SMP */
1693
1694 schedstat_inc(rq, ttwu_count);
9ed3811a 1695 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1696
1697 if (wake_flags & WF_SYNC)
9ed3811a 1698 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1699
d7c01d27
PZ
1700#endif /* CONFIG_SCHEDSTATS */
1701}
1702
1de64443 1703static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
d7c01d27 1704{
9ed3811a 1705 activate_task(rq, p, en_flags);
da0c1e65 1706 p->on_rq = TASK_ON_RQ_QUEUED;
c2f7115e
PZ
1707
1708 /* if a worker is waking up, notify workqueue */
1709 if (p->flags & PF_WQ_WORKER)
1710 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1711}
1712
23f41eeb
PZ
1713/*
1714 * Mark the task runnable and perform wakeup-preemption.
1715 */
89363381 1716static void
23f41eeb 1717ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1718{
9ed3811a 1719 check_preempt_curr(rq, p, wake_flags);
9ed3811a 1720 p->state = TASK_RUNNING;
fbd705a0
PZ
1721 trace_sched_wakeup(p);
1722
9ed3811a 1723#ifdef CONFIG_SMP
4c9a4bc8
PZ
1724 if (p->sched_class->task_woken) {
1725 /*
cbce1a68
PZ
1726 * Our task @p is fully woken up and running; so its safe to
1727 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 1728 */
cbce1a68 1729 lockdep_unpin_lock(&rq->lock);
9ed3811a 1730 p->sched_class->task_woken(rq, p);
cbce1a68 1731 lockdep_pin_lock(&rq->lock);
4c9a4bc8 1732 }
9ed3811a 1733
e69c6341 1734 if (rq->idle_stamp) {
78becc27 1735 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 1736 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 1737
abfafa54
JL
1738 update_avg(&rq->avg_idle, delta);
1739
1740 if (rq->avg_idle > max)
9ed3811a 1741 rq->avg_idle = max;
abfafa54 1742
9ed3811a
TH
1743 rq->idle_stamp = 0;
1744 }
1745#endif
1746}
1747
c05fbafb
PZ
1748static void
1749ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1750{
cbce1a68
PZ
1751 lockdep_assert_held(&rq->lock);
1752
c05fbafb
PZ
1753#ifdef CONFIG_SMP
1754 if (p->sched_contributes_to_load)
1755 rq->nr_uninterruptible--;
1756#endif
1757
1758 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1759 ttwu_do_wakeup(rq, p, wake_flags);
1760}
1761
1762/*
1763 * Called in case the task @p isn't fully descheduled from its runqueue,
1764 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1765 * since all we need to do is flip p->state to TASK_RUNNING, since
1766 * the task is still ->on_rq.
1767 */
1768static int ttwu_remote(struct task_struct *p, int wake_flags)
1769{
1770 struct rq *rq;
1771 int ret = 0;
1772
1773 rq = __task_rq_lock(p);
da0c1e65 1774 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
1775 /* check_preempt_curr() may use rq clock */
1776 update_rq_clock(rq);
c05fbafb
PZ
1777 ttwu_do_wakeup(rq, p, wake_flags);
1778 ret = 1;
1779 }
1780 __task_rq_unlock(rq);
1781
1782 return ret;
1783}
1784
317f3941 1785#ifdef CONFIG_SMP
e3baac47 1786void sched_ttwu_pending(void)
317f3941
PZ
1787{
1788 struct rq *rq = this_rq();
fa14ff4a
PZ
1789 struct llist_node *llist = llist_del_all(&rq->wake_list);
1790 struct task_struct *p;
e3baac47 1791 unsigned long flags;
317f3941 1792
e3baac47
PZ
1793 if (!llist)
1794 return;
1795
1796 raw_spin_lock_irqsave(&rq->lock, flags);
cbce1a68 1797 lockdep_pin_lock(&rq->lock);
317f3941 1798
fa14ff4a
PZ
1799 while (llist) {
1800 p = llist_entry(llist, struct task_struct, wake_entry);
1801 llist = llist_next(llist);
317f3941
PZ
1802 ttwu_do_activate(rq, p, 0);
1803 }
1804
cbce1a68 1805 lockdep_unpin_lock(&rq->lock);
e3baac47 1806 raw_spin_unlock_irqrestore(&rq->lock, flags);
317f3941
PZ
1807}
1808
1809void scheduler_ipi(void)
1810{
f27dde8d
PZ
1811 /*
1812 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1813 * TIF_NEED_RESCHED remotely (for the first time) will also send
1814 * this IPI.
1815 */
8cb75e0c 1816 preempt_fold_need_resched();
f27dde8d 1817
fd2ac4f4 1818 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1819 return;
1820
1821 /*
1822 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1823 * traditionally all their work was done from the interrupt return
1824 * path. Now that we actually do some work, we need to make sure
1825 * we do call them.
1826 *
1827 * Some archs already do call them, luckily irq_enter/exit nest
1828 * properly.
1829 *
1830 * Arguably we should visit all archs and update all handlers,
1831 * however a fair share of IPIs are still resched only so this would
1832 * somewhat pessimize the simple resched case.
1833 */
1834 irq_enter();
fa14ff4a 1835 sched_ttwu_pending();
ca38062e
SS
1836
1837 /*
1838 * Check if someone kicked us for doing the nohz idle load balance.
1839 */
873b4c65 1840 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 1841 this_rq()->idle_balance = 1;
ca38062e 1842 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1843 }
c5d753a5 1844 irq_exit();
317f3941
PZ
1845}
1846
1847static void ttwu_queue_remote(struct task_struct *p, int cpu)
1848{
e3baac47
PZ
1849 struct rq *rq = cpu_rq(cpu);
1850
1851 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1852 if (!set_nr_if_polling(rq->idle))
1853 smp_send_reschedule(cpu);
1854 else
1855 trace_sched_wake_idle_without_ipi(cpu);
1856 }
317f3941 1857}
d6aa8f85 1858
f6be8af1
CL
1859void wake_up_if_idle(int cpu)
1860{
1861 struct rq *rq = cpu_rq(cpu);
1862 unsigned long flags;
1863
fd7de1e8
AL
1864 rcu_read_lock();
1865
1866 if (!is_idle_task(rcu_dereference(rq->curr)))
1867 goto out;
f6be8af1
CL
1868
1869 if (set_nr_if_polling(rq->idle)) {
1870 trace_sched_wake_idle_without_ipi(cpu);
1871 } else {
1872 raw_spin_lock_irqsave(&rq->lock, flags);
1873 if (is_idle_task(rq->curr))
1874 smp_send_reschedule(cpu);
1875 /* Else cpu is not in idle, do nothing here */
1876 raw_spin_unlock_irqrestore(&rq->lock, flags);
1877 }
fd7de1e8
AL
1878
1879out:
1880 rcu_read_unlock();
f6be8af1
CL
1881}
1882
39be3501 1883bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1884{
1885 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1886}
d6aa8f85 1887#endif /* CONFIG_SMP */
317f3941 1888
c05fbafb
PZ
1889static void ttwu_queue(struct task_struct *p, int cpu)
1890{
1891 struct rq *rq = cpu_rq(cpu);
1892
17d9f311 1893#if defined(CONFIG_SMP)
39be3501 1894 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1895 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1896 ttwu_queue_remote(p, cpu);
1897 return;
1898 }
1899#endif
1900
c05fbafb 1901 raw_spin_lock(&rq->lock);
cbce1a68 1902 lockdep_pin_lock(&rq->lock);
c05fbafb 1903 ttwu_do_activate(rq, p, 0);
cbce1a68 1904 lockdep_unpin_lock(&rq->lock);
c05fbafb 1905 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1906}
1907
1908/**
1da177e4 1909 * try_to_wake_up - wake up a thread
9ed3811a 1910 * @p: the thread to be awakened
1da177e4 1911 * @state: the mask of task states that can be woken
9ed3811a 1912 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1913 *
1914 * Put it on the run-queue if it's not already there. The "current"
1915 * thread is always on the run-queue (except when the actual
1916 * re-schedule is in progress), and as such you're allowed to do
1917 * the simpler "current->state = TASK_RUNNING" to mark yourself
1918 * runnable without the overhead of this.
1919 *
e69f6186 1920 * Return: %true if @p was woken up, %false if it was already running.
9ed3811a 1921 * or @state didn't match @p's state.
1da177e4 1922 */
e4a52bcb
PZ
1923static int
1924try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1925{
1da177e4 1926 unsigned long flags;
c05fbafb 1927 int cpu, success = 0;
2398f2c6 1928
e0acd0a6
ON
1929 /*
1930 * If we are going to wake up a thread waiting for CONDITION we
1931 * need to ensure that CONDITION=1 done by the caller can not be
1932 * reordered with p->state check below. This pairs with mb() in
1933 * set_current_state() the waiting thread does.
1934 */
1935 smp_mb__before_spinlock();
013fdb80 1936 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1937 if (!(p->state & state))
1da177e4
LT
1938 goto out;
1939
fbd705a0
PZ
1940 trace_sched_waking(p);
1941
c05fbafb 1942 success = 1; /* we're going to change ->state */
1da177e4 1943 cpu = task_cpu(p);
1da177e4 1944
c05fbafb
PZ
1945 if (p->on_rq && ttwu_remote(p, wake_flags))
1946 goto stat;
1da177e4 1947
1da177e4 1948#ifdef CONFIG_SMP
ecf7d01c
PZ
1949 /*
1950 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1951 * possible to, falsely, observe p->on_cpu == 0.
1952 *
1953 * One must be running (->on_cpu == 1) in order to remove oneself
1954 * from the runqueue.
1955 *
1956 * [S] ->on_cpu = 1; [L] ->on_rq
1957 * UNLOCK rq->lock
1958 * RMB
1959 * LOCK rq->lock
1960 * [S] ->on_rq = 0; [L] ->on_cpu
1961 *
1962 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1963 * from the consecutive calls to schedule(); the first switching to our
1964 * task, the second putting it to sleep.
1965 */
1966 smp_rmb();
1967
e9c84311 1968 /*
c05fbafb
PZ
1969 * If the owning (remote) cpu is still in the middle of schedule() with
1970 * this task as prev, wait until its done referencing the task.
e9c84311 1971 */
f3e94786 1972 while (p->on_cpu)
e4a52bcb 1973 cpu_relax();
0970d299 1974 /*
b75a2253
PZ
1975 * Combined with the control dependency above, we have an effective
1976 * smp_load_acquire() without the need for full barriers.
1977 *
1978 * Pairs with the smp_store_release() in finish_lock_switch().
1979 *
1980 * This ensures that tasks getting woken will be fully ordered against
1981 * their previous state and preserve Program Order.
0970d299 1982 */
e4a52bcb 1983 smp_rmb();
1da177e4 1984
a8e4f2ea 1985 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1986 p->state = TASK_WAKING;
e7693a36 1987
e4a52bcb 1988 if (p->sched_class->task_waking)
74f8e4b2 1989 p->sched_class->task_waking(p);
efbbd05a 1990
ac66f547 1991 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1992 if (task_cpu(p) != cpu) {
1993 wake_flags |= WF_MIGRATED;
e4a52bcb 1994 set_task_cpu(p, cpu);
f339b9dc 1995 }
1da177e4 1996#endif /* CONFIG_SMP */
1da177e4 1997
c05fbafb
PZ
1998 ttwu_queue(p, cpu);
1999stat:
b84cb5df 2000 ttwu_stat(p, cpu, wake_flags);
1da177e4 2001out:
013fdb80 2002 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
2003
2004 return success;
2005}
2006
21aa9af0
TH
2007/**
2008 * try_to_wake_up_local - try to wake up a local task with rq lock held
2009 * @p: the thread to be awakened
2010 *
2acca55e 2011 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 2012 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 2013 * the current task.
21aa9af0
TH
2014 */
2015static void try_to_wake_up_local(struct task_struct *p)
2016{
2017 struct rq *rq = task_rq(p);
21aa9af0 2018
383efcd0
TH
2019 if (WARN_ON_ONCE(rq != this_rq()) ||
2020 WARN_ON_ONCE(p == current))
2021 return;
2022
21aa9af0
TH
2023 lockdep_assert_held(&rq->lock);
2024
2acca55e 2025 if (!raw_spin_trylock(&p->pi_lock)) {
cbce1a68
PZ
2026 /*
2027 * This is OK, because current is on_cpu, which avoids it being
2028 * picked for load-balance and preemption/IRQs are still
2029 * disabled avoiding further scheduler activity on it and we've
2030 * not yet picked a replacement task.
2031 */
2032 lockdep_unpin_lock(&rq->lock);
2acca55e
PZ
2033 raw_spin_unlock(&rq->lock);
2034 raw_spin_lock(&p->pi_lock);
2035 raw_spin_lock(&rq->lock);
cbce1a68 2036 lockdep_pin_lock(&rq->lock);
2acca55e
PZ
2037 }
2038
21aa9af0 2039 if (!(p->state & TASK_NORMAL))
2acca55e 2040 goto out;
21aa9af0 2041
fbd705a0
PZ
2042 trace_sched_waking(p);
2043
da0c1e65 2044 if (!task_on_rq_queued(p))
d7c01d27
PZ
2045 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2046
23f41eeb 2047 ttwu_do_wakeup(rq, p, 0);
b84cb5df 2048 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
2049out:
2050 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
2051}
2052
50fa610a
DH
2053/**
2054 * wake_up_process - Wake up a specific process
2055 * @p: The process to be woken up.
2056 *
2057 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
2058 * processes.
2059 *
2060 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a
DH
2061 *
2062 * It may be assumed that this function implies a write memory barrier before
2063 * changing the task state if and only if any tasks are woken up.
2064 */
7ad5b3a5 2065int wake_up_process(struct task_struct *p)
1da177e4 2066{
9067ac85 2067 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 2068}
1da177e4
LT
2069EXPORT_SYMBOL(wake_up_process);
2070
7ad5b3a5 2071int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2072{
2073 return try_to_wake_up(p, state, 0);
2074}
2075
a5e7be3b
JL
2076/*
2077 * This function clears the sched_dl_entity static params.
2078 */
2079void __dl_clear_params(struct task_struct *p)
2080{
2081 struct sched_dl_entity *dl_se = &p->dl;
2082
2083 dl_se->dl_runtime = 0;
2084 dl_se->dl_deadline = 0;
2085 dl_se->dl_period = 0;
2086 dl_se->flags = 0;
2087 dl_se->dl_bw = 0;
40767b0d
PZ
2088
2089 dl_se->dl_throttled = 0;
2090 dl_se->dl_new = 1;
2091 dl_se->dl_yielded = 0;
a5e7be3b
JL
2092}
2093
1da177e4
LT
2094/*
2095 * Perform scheduler related setup for a newly forked process p.
2096 * p is forked by current.
dd41f596
IM
2097 *
2098 * __sched_fork() is basic setup used by init_idle() too:
2099 */
5e1576ed 2100static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2101{
fd2f4419
PZ
2102 p->on_rq = 0;
2103
2104 p->se.on_rq = 0;
dd41f596
IM
2105 p->se.exec_start = 0;
2106 p->se.sum_exec_runtime = 0;
f6cf891c 2107 p->se.prev_sum_exec_runtime = 0;
6c594c21 2108 p->se.nr_migrations = 0;
da7a735e 2109 p->se.vruntime = 0;
fd2f4419 2110 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
2111
2112#ifdef CONFIG_SCHEDSTATS
41acab88 2113 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2114#endif
476d139c 2115
aab03e05 2116 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 2117 init_dl_task_timer(&p->dl);
a5e7be3b 2118 __dl_clear_params(p);
aab03e05 2119
fa717060 2120 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 2121
e107be36
AK
2122#ifdef CONFIG_PREEMPT_NOTIFIERS
2123 INIT_HLIST_HEAD(&p->preempt_notifiers);
2124#endif
cbee9f88
PZ
2125
2126#ifdef CONFIG_NUMA_BALANCING
2127 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
7e8d16b6 2128 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
cbee9f88
PZ
2129 p->mm->numa_scan_seq = 0;
2130 }
2131
5e1576ed
RR
2132 if (clone_flags & CLONE_VM)
2133 p->numa_preferred_nid = current->numa_preferred_nid;
2134 else
2135 p->numa_preferred_nid = -1;
2136
cbee9f88
PZ
2137 p->node_stamp = 0ULL;
2138 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
4b96a29b 2139 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88 2140 p->numa_work.next = &p->numa_work;
44dba3d5 2141 p->numa_faults = NULL;
7e2703e6
RR
2142 p->last_task_numa_placement = 0;
2143 p->last_sum_exec_runtime = 0;
8c8a743c 2144
8c8a743c 2145 p->numa_group = NULL;
cbee9f88 2146#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
2147}
2148
2a595721
SD
2149DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2150
1a687c2e 2151#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 2152
1a687c2e
MG
2153void set_numabalancing_state(bool enabled)
2154{
2155 if (enabled)
2a595721 2156 static_branch_enable(&sched_numa_balancing);
1a687c2e 2157 else
2a595721 2158 static_branch_disable(&sched_numa_balancing);
1a687c2e 2159}
54a43d54
AK
2160
2161#ifdef CONFIG_PROC_SYSCTL
2162int sysctl_numa_balancing(struct ctl_table *table, int write,
2163 void __user *buffer, size_t *lenp, loff_t *ppos)
2164{
2165 struct ctl_table t;
2166 int err;
2a595721 2167 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
2168
2169 if (write && !capable(CAP_SYS_ADMIN))
2170 return -EPERM;
2171
2172 t = *table;
2173 t.data = &state;
2174 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2175 if (err < 0)
2176 return err;
2177 if (write)
2178 set_numabalancing_state(state);
2179 return err;
2180}
2181#endif
2182#endif
dd41f596
IM
2183
2184/*
2185 * fork()/clone()-time setup:
2186 */
aab03e05 2187int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2188{
0122ec5b 2189 unsigned long flags;
dd41f596
IM
2190 int cpu = get_cpu();
2191
5e1576ed 2192 __sched_fork(clone_flags, p);
06b83b5f 2193 /*
0017d735 2194 * We mark the process as running here. This guarantees that
06b83b5f
PZ
2195 * nobody will actually run it, and a signal or other external
2196 * event cannot wake it up and insert it on the runqueue either.
2197 */
0017d735 2198 p->state = TASK_RUNNING;
dd41f596 2199
c350a04e
MG
2200 /*
2201 * Make sure we do not leak PI boosting priority to the child.
2202 */
2203 p->prio = current->normal_prio;
2204
b9dc29e7
MG
2205 /*
2206 * Revert to default priority/policy on fork if requested.
2207 */
2208 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 2209 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 2210 p->policy = SCHED_NORMAL;
6c697bdf 2211 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
2212 p->rt_priority = 0;
2213 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2214 p->static_prio = NICE_TO_PRIO(0);
2215
2216 p->prio = p->normal_prio = __normal_prio(p);
2217 set_load_weight(p);
6c697bdf 2218
b9dc29e7
MG
2219 /*
2220 * We don't need the reset flag anymore after the fork. It has
2221 * fulfilled its duty:
2222 */
2223 p->sched_reset_on_fork = 0;
2224 }
ca94c442 2225
aab03e05
DF
2226 if (dl_prio(p->prio)) {
2227 put_cpu();
2228 return -EAGAIN;
2229 } else if (rt_prio(p->prio)) {
2230 p->sched_class = &rt_sched_class;
2231 } else {
2ddbf952 2232 p->sched_class = &fair_sched_class;
aab03e05 2233 }
b29739f9 2234
cd29fe6f
PZ
2235 if (p->sched_class->task_fork)
2236 p->sched_class->task_fork(p);
2237
86951599
PZ
2238 /*
2239 * The child is not yet in the pid-hash so no cgroup attach races,
2240 * and the cgroup is pinned to this child due to cgroup_fork()
2241 * is ran before sched_fork().
2242 *
2243 * Silence PROVE_RCU.
2244 */
0122ec5b 2245 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 2246 set_task_cpu(p, cpu);
0122ec5b 2247 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2248
f6db8347 2249#ifdef CONFIG_SCHED_INFO
dd41f596 2250 if (likely(sched_info_on()))
52f17b6c 2251 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2252#endif
3ca7a440
PZ
2253#if defined(CONFIG_SMP)
2254 p->on_cpu = 0;
4866cde0 2255#endif
01028747 2256 init_task_preempt_count(p);
806c09a7 2257#ifdef CONFIG_SMP
917b627d 2258 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 2259 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 2260#endif
917b627d 2261
476d139c 2262 put_cpu();
aab03e05 2263 return 0;
1da177e4
LT
2264}
2265
332ac17e
DF
2266unsigned long to_ratio(u64 period, u64 runtime)
2267{
2268 if (runtime == RUNTIME_INF)
2269 return 1ULL << 20;
2270
2271 /*
2272 * Doing this here saves a lot of checks in all
2273 * the calling paths, and returning zero seems
2274 * safe for them anyway.
2275 */
2276 if (period == 0)
2277 return 0;
2278
2279 return div64_u64(runtime << 20, period);
2280}
2281
2282#ifdef CONFIG_SMP
2283inline struct dl_bw *dl_bw_of(int i)
2284{
f78f5b90
PM
2285 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2286 "sched RCU must be held");
332ac17e
DF
2287 return &cpu_rq(i)->rd->dl_bw;
2288}
2289
de212f18 2290static inline int dl_bw_cpus(int i)
332ac17e 2291{
de212f18
PZ
2292 struct root_domain *rd = cpu_rq(i)->rd;
2293 int cpus = 0;
2294
f78f5b90
PM
2295 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2296 "sched RCU must be held");
de212f18
PZ
2297 for_each_cpu_and(i, rd->span, cpu_active_mask)
2298 cpus++;
2299
2300 return cpus;
332ac17e
DF
2301}
2302#else
2303inline struct dl_bw *dl_bw_of(int i)
2304{
2305 return &cpu_rq(i)->dl.dl_bw;
2306}
2307
de212f18 2308static inline int dl_bw_cpus(int i)
332ac17e
DF
2309{
2310 return 1;
2311}
2312#endif
2313
332ac17e
DF
2314/*
2315 * We must be sure that accepting a new task (or allowing changing the
2316 * parameters of an existing one) is consistent with the bandwidth
2317 * constraints. If yes, this function also accordingly updates the currently
2318 * allocated bandwidth to reflect the new situation.
2319 *
2320 * This function is called while holding p's rq->lock.
40767b0d
PZ
2321 *
2322 * XXX we should delay bw change until the task's 0-lag point, see
2323 * __setparam_dl().
332ac17e
DF
2324 */
2325static int dl_overflow(struct task_struct *p, int policy,
2326 const struct sched_attr *attr)
2327{
2328
2329 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
4df1638c 2330 u64 period = attr->sched_period ?: attr->sched_deadline;
332ac17e
DF
2331 u64 runtime = attr->sched_runtime;
2332 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
de212f18 2333 int cpus, err = -1;
332ac17e
DF
2334
2335 if (new_bw == p->dl.dl_bw)
2336 return 0;
2337
2338 /*
2339 * Either if a task, enters, leave, or stays -deadline but changes
2340 * its parameters, we may need to update accordingly the total
2341 * allocated bandwidth of the container.
2342 */
2343 raw_spin_lock(&dl_b->lock);
de212f18 2344 cpus = dl_bw_cpus(task_cpu(p));
332ac17e
DF
2345 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2346 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2347 __dl_add(dl_b, new_bw);
2348 err = 0;
2349 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2350 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2351 __dl_clear(dl_b, p->dl.dl_bw);
2352 __dl_add(dl_b, new_bw);
2353 err = 0;
2354 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2355 __dl_clear(dl_b, p->dl.dl_bw);
2356 err = 0;
2357 }
2358 raw_spin_unlock(&dl_b->lock);
2359
2360 return err;
2361}
2362
2363extern void init_dl_bw(struct dl_bw *dl_b);
2364
1da177e4
LT
2365/*
2366 * wake_up_new_task - wake up a newly created task for the first time.
2367 *
2368 * This function will do some initial scheduler statistics housekeeping
2369 * that must be done for every newly created context, then puts the task
2370 * on the runqueue and wakes it.
2371 */
3e51e3ed 2372void wake_up_new_task(struct task_struct *p)
1da177e4
LT
2373{
2374 unsigned long flags;
dd41f596 2375 struct rq *rq;
fabf318e 2376
ab2515c4 2377 raw_spin_lock_irqsave(&p->pi_lock, flags);
98d8fd81
MR
2378 /* Initialize new task's runnable average */
2379 init_entity_runnable_average(&p->se);
fabf318e
PZ
2380#ifdef CONFIG_SMP
2381 /*
2382 * Fork balancing, do it here and not earlier because:
2383 * - cpus_allowed can change in the fork path
2384 * - any previously selected cpu might disappear through hotplug
fabf318e 2385 */
ac66f547 2386 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735
PZ
2387#endif
2388
ab2515c4 2389 rq = __task_rq_lock(p);
cd29fe6f 2390 activate_task(rq, p, 0);
da0c1e65 2391 p->on_rq = TASK_ON_RQ_QUEUED;
fbd705a0 2392 trace_sched_wakeup_new(p);
a7558e01 2393 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2394#ifdef CONFIG_SMP
0aaafaab
PZ
2395 if (p->sched_class->task_woken) {
2396 /*
2397 * Nothing relies on rq->lock after this, so its fine to
2398 * drop it.
2399 */
2400 lockdep_unpin_lock(&rq->lock);
efbbd05a 2401 p->sched_class->task_woken(rq, p);
0aaafaab
PZ
2402 lockdep_pin_lock(&rq->lock);
2403 }
9a897c5a 2404#endif
0122ec5b 2405 task_rq_unlock(rq, p, &flags);
1da177e4
LT
2406}
2407
e107be36
AK
2408#ifdef CONFIG_PREEMPT_NOTIFIERS
2409
1cde2930
PZ
2410static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2411
2ecd9d29
PZ
2412void preempt_notifier_inc(void)
2413{
2414 static_key_slow_inc(&preempt_notifier_key);
2415}
2416EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2417
2418void preempt_notifier_dec(void)
2419{
2420 static_key_slow_dec(&preempt_notifier_key);
2421}
2422EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2423
e107be36 2424/**
80dd99b3 2425 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2426 * @notifier: notifier struct to register
e107be36
AK
2427 */
2428void preempt_notifier_register(struct preempt_notifier *notifier)
2429{
2ecd9d29
PZ
2430 if (!static_key_false(&preempt_notifier_key))
2431 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2432
e107be36
AK
2433 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2434}
2435EXPORT_SYMBOL_GPL(preempt_notifier_register);
2436
2437/**
2438 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2439 * @notifier: notifier struct to unregister
e107be36 2440 *
d84525a8 2441 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
2442 */
2443void preempt_notifier_unregister(struct preempt_notifier *notifier)
2444{
2445 hlist_del(&notifier->link);
2446}
2447EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2448
1cde2930 2449static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2450{
2451 struct preempt_notifier *notifier;
e107be36 2452
b67bfe0d 2453 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2454 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2455}
2456
1cde2930
PZ
2457static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2458{
2459 if (static_key_false(&preempt_notifier_key))
2460 __fire_sched_in_preempt_notifiers(curr);
2461}
2462
e107be36 2463static void
1cde2930
PZ
2464__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2465 struct task_struct *next)
e107be36
AK
2466{
2467 struct preempt_notifier *notifier;
e107be36 2468
b67bfe0d 2469 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
2470 notifier->ops->sched_out(notifier, next);
2471}
2472
1cde2930
PZ
2473static __always_inline void
2474fire_sched_out_preempt_notifiers(struct task_struct *curr,
2475 struct task_struct *next)
2476{
2477 if (static_key_false(&preempt_notifier_key))
2478 __fire_sched_out_preempt_notifiers(curr, next);
2479}
2480
6d6bc0ad 2481#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 2482
1cde2930 2483static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
2484{
2485}
2486
1cde2930 2487static inline void
e107be36
AK
2488fire_sched_out_preempt_notifiers(struct task_struct *curr,
2489 struct task_struct *next)
2490{
2491}
2492
6d6bc0ad 2493#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2494
4866cde0
NP
2495/**
2496 * prepare_task_switch - prepare to switch tasks
2497 * @rq: the runqueue preparing to switch
421cee29 2498 * @prev: the current task that is being switched out
4866cde0
NP
2499 * @next: the task we are going to switch to.
2500 *
2501 * This is called with the rq lock held and interrupts off. It must
2502 * be paired with a subsequent finish_task_switch after the context
2503 * switch.
2504 *
2505 * prepare_task_switch sets up locking and calls architecture specific
2506 * hooks.
2507 */
e107be36
AK
2508static inline void
2509prepare_task_switch(struct rq *rq, struct task_struct *prev,
2510 struct task_struct *next)
4866cde0 2511{
43148951 2512 sched_info_switch(rq, prev, next);
fe4b04fa 2513 perf_event_task_sched_out(prev, next);
e107be36 2514 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2515 prepare_lock_switch(rq, next);
2516 prepare_arch_switch(next);
2517}
2518
1da177e4
LT
2519/**
2520 * finish_task_switch - clean up after a task-switch
2521 * @prev: the thread we just switched away from.
2522 *
4866cde0
NP
2523 * finish_task_switch must be called after the context switch, paired
2524 * with a prepare_task_switch call before the context switch.
2525 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2526 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2527 *
2528 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2529 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2530 * with the lock held can cause deadlocks; see schedule() for
2531 * details.)
dfa50b60
ON
2532 *
2533 * The context switch have flipped the stack from under us and restored the
2534 * local variables which were saved when this task called schedule() in the
2535 * past. prev == current is still correct but we need to recalculate this_rq
2536 * because prev may have moved to another CPU.
1da177e4 2537 */
dfa50b60 2538static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
2539 __releases(rq->lock)
2540{
dfa50b60 2541 struct rq *rq = this_rq();
1da177e4 2542 struct mm_struct *mm = rq->prev_mm;
55a101f8 2543 long prev_state;
1da177e4 2544
609ca066
PZ
2545 /*
2546 * The previous task will have left us with a preempt_count of 2
2547 * because it left us after:
2548 *
2549 * schedule()
2550 * preempt_disable(); // 1
2551 * __schedule()
2552 * raw_spin_lock_irq(&rq->lock) // 2
2553 *
2554 * Also, see FORK_PREEMPT_COUNT.
2555 */
e2bf1c4b
PZ
2556 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2557 "corrupted preempt_count: %s/%d/0x%x\n",
2558 current->comm, current->pid, preempt_count()))
2559 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 2560
1da177e4
LT
2561 rq->prev_mm = NULL;
2562
2563 /*
2564 * A task struct has one reference for the use as "current".
c394cc9f 2565 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2566 * schedule one last time. The schedule call will never return, and
2567 * the scheduled task must drop that reference.
95913d97
PZ
2568 *
2569 * We must observe prev->state before clearing prev->on_cpu (in
2570 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2571 * running on another CPU and we could rave with its RUNNING -> DEAD
2572 * transition, resulting in a double drop.
1da177e4 2573 */
55a101f8 2574 prev_state = prev->state;
bf9fae9f 2575 vtime_task_switch(prev);
a8d757ef 2576 perf_event_task_sched_in(prev, current);
4866cde0 2577 finish_lock_switch(rq, prev);
01f23e16 2578 finish_arch_post_lock_switch();
e8fa1362 2579
e107be36 2580 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2581 if (mm)
2582 mmdrop(mm);
c394cc9f 2583 if (unlikely(prev_state == TASK_DEAD)) {
e6c390f2
DF
2584 if (prev->sched_class->task_dead)
2585 prev->sched_class->task_dead(prev);
2586
c6fd91f0 2587 /*
2588 * Remove function-return probe instances associated with this
2589 * task and put them back on the free list.
9761eea8 2590 */
c6fd91f0 2591 kprobe_flush_task(prev);
1da177e4 2592 put_task_struct(prev);
c6fd91f0 2593 }
99e5ada9 2594
de734f89 2595 tick_nohz_task_switch();
dfa50b60 2596 return rq;
1da177e4
LT
2597}
2598
3f029d3c
GH
2599#ifdef CONFIG_SMP
2600
3f029d3c 2601/* rq->lock is NOT held, but preemption is disabled */
e3fca9e7 2602static void __balance_callback(struct rq *rq)
3f029d3c 2603{
e3fca9e7
PZ
2604 struct callback_head *head, *next;
2605 void (*func)(struct rq *rq);
2606 unsigned long flags;
3f029d3c 2607
e3fca9e7
PZ
2608 raw_spin_lock_irqsave(&rq->lock, flags);
2609 head = rq->balance_callback;
2610 rq->balance_callback = NULL;
2611 while (head) {
2612 func = (void (*)(struct rq *))head->func;
2613 next = head->next;
2614 head->next = NULL;
2615 head = next;
3f029d3c 2616
e3fca9e7 2617 func(rq);
3f029d3c 2618 }
e3fca9e7
PZ
2619 raw_spin_unlock_irqrestore(&rq->lock, flags);
2620}
2621
2622static inline void balance_callback(struct rq *rq)
2623{
2624 if (unlikely(rq->balance_callback))
2625 __balance_callback(rq);
3f029d3c
GH
2626}
2627
2628#else
da19ab51 2629
e3fca9e7 2630static inline void balance_callback(struct rq *rq)
3f029d3c 2631{
1da177e4
LT
2632}
2633
3f029d3c
GH
2634#endif
2635
1da177e4
LT
2636/**
2637 * schedule_tail - first thing a freshly forked thread must call.
2638 * @prev: the thread we just switched away from.
2639 */
722a9f92 2640asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
2641 __releases(rq->lock)
2642{
1a43a14a 2643 struct rq *rq;
da19ab51 2644
609ca066
PZ
2645 /*
2646 * New tasks start with FORK_PREEMPT_COUNT, see there and
2647 * finish_task_switch() for details.
2648 *
2649 * finish_task_switch() will drop rq->lock() and lower preempt_count
2650 * and the preempt_enable() will end up enabling preemption (on
2651 * PREEMPT_COUNT kernels).
2652 */
2653
dfa50b60 2654 rq = finish_task_switch(prev);
e3fca9e7 2655 balance_callback(rq);
1a43a14a 2656 preempt_enable();
70b97a7f 2657
1da177e4 2658 if (current->set_child_tid)
b488893a 2659 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2660}
2661
2662/*
dfa50b60 2663 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 2664 */
dfa50b60 2665static inline struct rq *
70b97a7f 2666context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2667 struct task_struct *next)
1da177e4 2668{
dd41f596 2669 struct mm_struct *mm, *oldmm;
1da177e4 2670
e107be36 2671 prepare_task_switch(rq, prev, next);
fe4b04fa 2672
dd41f596
IM
2673 mm = next->mm;
2674 oldmm = prev->active_mm;
9226d125
ZA
2675 /*
2676 * For paravirt, this is coupled with an exit in switch_to to
2677 * combine the page table reload and the switch backend into
2678 * one hypercall.
2679 */
224101ed 2680 arch_start_context_switch(prev);
9226d125 2681
31915ab4 2682 if (!mm) {
1da177e4
LT
2683 next->active_mm = oldmm;
2684 atomic_inc(&oldmm->mm_count);
2685 enter_lazy_tlb(oldmm, next);
2686 } else
2687 switch_mm(oldmm, mm, next);
2688
31915ab4 2689 if (!prev->mm) {
1da177e4 2690 prev->active_mm = NULL;
1da177e4
LT
2691 rq->prev_mm = oldmm;
2692 }
3a5f5e48
IM
2693 /*
2694 * Since the runqueue lock will be released by the next
2695 * task (which is an invalid locking op but in the case
2696 * of the scheduler it's an obvious special-case), so we
2697 * do an early lockdep release here:
2698 */
cbce1a68 2699 lockdep_unpin_lock(&rq->lock);
8a25d5de 2700 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1da177e4
LT
2701
2702 /* Here we just switch the register state and the stack. */
2703 switch_to(prev, next, prev);
dd41f596 2704 barrier();
dfa50b60
ON
2705
2706 return finish_task_switch(prev);
1da177e4
LT
2707}
2708
2709/*
1c3e8264 2710 * nr_running and nr_context_switches:
1da177e4
LT
2711 *
2712 * externally visible scheduler statistics: current number of runnable
1c3e8264 2713 * threads, total number of context switches performed since bootup.
1da177e4
LT
2714 */
2715unsigned long nr_running(void)
2716{
2717 unsigned long i, sum = 0;
2718
2719 for_each_online_cpu(i)
2720 sum += cpu_rq(i)->nr_running;
2721
2722 return sum;
f711f609 2723}
1da177e4 2724
2ee507c4
TC
2725/*
2726 * Check if only the current task is running on the cpu.
00cc1633
DD
2727 *
2728 * Caution: this function does not check that the caller has disabled
2729 * preemption, thus the result might have a time-of-check-to-time-of-use
2730 * race. The caller is responsible to use it correctly, for example:
2731 *
2732 * - from a non-preemptable section (of course)
2733 *
2734 * - from a thread that is bound to a single CPU
2735 *
2736 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
2737 */
2738bool single_task_running(void)
2739{
00cc1633 2740 return raw_rq()->nr_running == 1;
2ee507c4
TC
2741}
2742EXPORT_SYMBOL(single_task_running);
2743
1da177e4 2744unsigned long long nr_context_switches(void)
46cb4b7c 2745{
cc94abfc
SR
2746 int i;
2747 unsigned long long sum = 0;
46cb4b7c 2748
0a945022 2749 for_each_possible_cpu(i)
1da177e4 2750 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2751
1da177e4
LT
2752 return sum;
2753}
483b4ee6 2754
1da177e4
LT
2755unsigned long nr_iowait(void)
2756{
2757 unsigned long i, sum = 0;
483b4ee6 2758
0a945022 2759 for_each_possible_cpu(i)
1da177e4 2760 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2761
1da177e4
LT
2762 return sum;
2763}
483b4ee6 2764
8c215bd3 2765unsigned long nr_iowait_cpu(int cpu)
69d25870 2766{
8c215bd3 2767 struct rq *this = cpu_rq(cpu);
69d25870
AV
2768 return atomic_read(&this->nr_iowait);
2769}
46cb4b7c 2770
372ba8cb
MG
2771void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2772{
3289bdb4
PZ
2773 struct rq *rq = this_rq();
2774 *nr_waiters = atomic_read(&rq->nr_iowait);
2775 *load = rq->load.weight;
372ba8cb
MG
2776}
2777
dd41f596 2778#ifdef CONFIG_SMP
8a0be9ef 2779
46cb4b7c 2780/*
38022906
PZ
2781 * sched_exec - execve() is a valuable balancing opportunity, because at
2782 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2783 */
38022906 2784void sched_exec(void)
46cb4b7c 2785{
38022906 2786 struct task_struct *p = current;
1da177e4 2787 unsigned long flags;
0017d735 2788 int dest_cpu;
46cb4b7c 2789
8f42ced9 2790 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 2791 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
2792 if (dest_cpu == smp_processor_id())
2793 goto unlock;
38022906 2794
8f42ced9 2795 if (likely(cpu_active(dest_cpu))) {
969c7921 2796 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2797
8f42ced9
PZ
2798 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2799 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2800 return;
2801 }
0017d735 2802unlock:
8f42ced9 2803 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2804}
dd41f596 2805
1da177e4
LT
2806#endif
2807
1da177e4 2808DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2809DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2810
2811EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2812EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 2813
c5f8d995
HS
2814/*
2815 * Return accounted runtime for the task.
2816 * In case the task is currently running, return the runtime plus current's
2817 * pending runtime that have not been accounted yet.
2818 */
2819unsigned long long task_sched_runtime(struct task_struct *p)
2820{
2821 unsigned long flags;
2822 struct rq *rq;
6e998916 2823 u64 ns;
c5f8d995 2824
911b2898
PZ
2825#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2826 /*
2827 * 64-bit doesn't need locks to atomically read a 64bit value.
2828 * So we have a optimization chance when the task's delta_exec is 0.
2829 * Reading ->on_cpu is racy, but this is ok.
2830 *
2831 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2832 * If we race with it entering cpu, unaccounted time is 0. This is
2833 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
2834 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2835 * been accounted, so we're correct here as well.
911b2898 2836 */
da0c1e65 2837 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
2838 return p->se.sum_exec_runtime;
2839#endif
2840
c5f8d995 2841 rq = task_rq_lock(p, &flags);
6e998916
SG
2842 /*
2843 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2844 * project cycles that may never be accounted to this
2845 * thread, breaking clock_gettime().
2846 */
2847 if (task_current(rq, p) && task_on_rq_queued(p)) {
2848 update_rq_clock(rq);
2849 p->sched_class->update_curr(rq);
2850 }
2851 ns = p->se.sum_exec_runtime;
0122ec5b 2852 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2853
2854 return ns;
2855}
48f24c4d 2856
7835b98b
CL
2857/*
2858 * This function gets called by the timer code, with HZ frequency.
2859 * We call it with interrupts disabled.
7835b98b
CL
2860 */
2861void scheduler_tick(void)
2862{
7835b98b
CL
2863 int cpu = smp_processor_id();
2864 struct rq *rq = cpu_rq(cpu);
dd41f596 2865 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2866
2867 sched_clock_tick();
dd41f596 2868
05fa785c 2869 raw_spin_lock(&rq->lock);
3e51f33f 2870 update_rq_clock(rq);
fa85ae24 2871 curr->sched_class->task_tick(rq, curr, 0);
83dfd523 2872 update_cpu_load_active(rq);
3289bdb4 2873 calc_global_load_tick(rq);
05fa785c 2874 raw_spin_unlock(&rq->lock);
7835b98b 2875
e9d2b064 2876 perf_event_task_tick();
e220d2dc 2877
e418e1c2 2878#ifdef CONFIG_SMP
6eb57e0d 2879 rq->idle_balance = idle_cpu(cpu);
7caff66f 2880 trigger_load_balance(rq);
e418e1c2 2881#endif
265f22a9 2882 rq_last_tick_reset(rq);
1da177e4
LT
2883}
2884
265f22a9
FW
2885#ifdef CONFIG_NO_HZ_FULL
2886/**
2887 * scheduler_tick_max_deferment
2888 *
2889 * Keep at least one tick per second when a single
2890 * active task is running because the scheduler doesn't
2891 * yet completely support full dynticks environment.
2892 *
2893 * This makes sure that uptime, CFS vruntime, load
2894 * balancing, etc... continue to move forward, even
2895 * with a very low granularity.
e69f6186
YB
2896 *
2897 * Return: Maximum deferment in nanoseconds.
265f22a9
FW
2898 */
2899u64 scheduler_tick_max_deferment(void)
2900{
2901 struct rq *rq = this_rq();
316c1608 2902 unsigned long next, now = READ_ONCE(jiffies);
265f22a9
FW
2903
2904 next = rq->last_sched_tick + HZ;
2905
2906 if (time_before_eq(next, now))
2907 return 0;
2908
8fe8ff09 2909 return jiffies_to_nsecs(next - now);
1da177e4 2910}
265f22a9 2911#endif
1da177e4 2912
132380a0 2913notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2914{
2915 if (in_lock_functions(addr)) {
2916 addr = CALLER_ADDR2;
2917 if (in_lock_functions(addr))
2918 addr = CALLER_ADDR3;
2919 }
2920 return addr;
2921}
1da177e4 2922
7e49fcce
SR
2923#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2924 defined(CONFIG_PREEMPT_TRACER))
2925
edafe3a5 2926void preempt_count_add(int val)
1da177e4 2927{
6cd8a4bb 2928#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2929 /*
2930 * Underflow?
2931 */
9a11b49a
IM
2932 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2933 return;
6cd8a4bb 2934#endif
bdb43806 2935 __preempt_count_add(val);
6cd8a4bb 2936#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2937 /*
2938 * Spinlock count overflowing soon?
2939 */
33859f7f
MOS
2940 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2941 PREEMPT_MASK - 10);
6cd8a4bb 2942#endif
8f47b187
TG
2943 if (preempt_count() == val) {
2944 unsigned long ip = get_parent_ip(CALLER_ADDR1);
2945#ifdef CONFIG_DEBUG_PREEMPT
2946 current->preempt_disable_ip = ip;
2947#endif
2948 trace_preempt_off(CALLER_ADDR0, ip);
2949 }
1da177e4 2950}
bdb43806 2951EXPORT_SYMBOL(preempt_count_add);
edafe3a5 2952NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 2953
edafe3a5 2954void preempt_count_sub(int val)
1da177e4 2955{
6cd8a4bb 2956#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2957 /*
2958 * Underflow?
2959 */
01e3eb82 2960 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2961 return;
1da177e4
LT
2962 /*
2963 * Is the spinlock portion underflowing?
2964 */
9a11b49a
IM
2965 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2966 !(preempt_count() & PREEMPT_MASK)))
2967 return;
6cd8a4bb 2968#endif
9a11b49a 2969
6cd8a4bb
SR
2970 if (preempt_count() == val)
2971 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
bdb43806 2972 __preempt_count_sub(val);
1da177e4 2973}
bdb43806 2974EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 2975NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4
LT
2976
2977#endif
2978
2979/*
dd41f596 2980 * Print scheduling while atomic bug:
1da177e4 2981 */
dd41f596 2982static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 2983{
664dfa65
DJ
2984 if (oops_in_progress)
2985 return;
2986
3df0fc5b
PZ
2987 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2988 prev->comm, prev->pid, preempt_count());
838225b4 2989
dd41f596 2990 debug_show_held_locks(prev);
e21f5b15 2991 print_modules();
dd41f596
IM
2992 if (irqs_disabled())
2993 print_irqtrace_events(prev);
8f47b187
TG
2994#ifdef CONFIG_DEBUG_PREEMPT
2995 if (in_atomic_preempt_off()) {
2996 pr_err("Preemption disabled at:");
2997 print_ip_sym(current->preempt_disable_ip);
2998 pr_cont("\n");
2999 }
3000#endif
6135fc1e 3001 dump_stack();
373d4d09 3002 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 3003}
1da177e4 3004
dd41f596
IM
3005/*
3006 * Various schedule()-time debugging checks and statistics:
3007 */
3008static inline void schedule_debug(struct task_struct *prev)
3009{
0d9e2632 3010#ifdef CONFIG_SCHED_STACK_END_CHECK
ce03e413 3011 BUG_ON(task_stack_end_corrupted(prev));
0d9e2632 3012#endif
b99def8b 3013
1dc0fffc 3014 if (unlikely(in_atomic_preempt_off())) {
dd41f596 3015 __schedule_bug(prev);
1dc0fffc
PZ
3016 preempt_count_set(PREEMPT_DISABLED);
3017 }
b3fbab05 3018 rcu_sleep_check();
dd41f596 3019
1da177e4
LT
3020 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3021
2d72376b 3022 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
3023}
3024
3025/*
3026 * Pick up the highest-prio task:
3027 */
3028static inline struct task_struct *
606dba2e 3029pick_next_task(struct rq *rq, struct task_struct *prev)
dd41f596 3030{
37e117c0 3031 const struct sched_class *class = &fair_sched_class;
dd41f596 3032 struct task_struct *p;
1da177e4
LT
3033
3034 /*
dd41f596
IM
3035 * Optimization: we know that if all tasks are in
3036 * the fair class we can call that function directly:
1da177e4 3037 */
37e117c0 3038 if (likely(prev->sched_class == class &&
38033c37 3039 rq->nr_running == rq->cfs.h_nr_running)) {
606dba2e 3040 p = fair_sched_class.pick_next_task(rq, prev);
6ccdc84b
PZ
3041 if (unlikely(p == RETRY_TASK))
3042 goto again;
3043
3044 /* assumes fair_sched_class->next == idle_sched_class */
3045 if (unlikely(!p))
3046 p = idle_sched_class.pick_next_task(rq, prev);
3047
3048 return p;
1da177e4
LT
3049 }
3050
37e117c0 3051again:
34f971f6 3052 for_each_class(class) {
606dba2e 3053 p = class->pick_next_task(rq, prev);
37e117c0
PZ
3054 if (p) {
3055 if (unlikely(p == RETRY_TASK))
3056 goto again;
dd41f596 3057 return p;
37e117c0 3058 }
dd41f596 3059 }
34f971f6
PZ
3060
3061 BUG(); /* the idle class will always have a runnable task */
dd41f596 3062}
1da177e4 3063
dd41f596 3064/*
c259e01a 3065 * __schedule() is the main scheduler function.
edde96ea
PE
3066 *
3067 * The main means of driving the scheduler and thus entering this function are:
3068 *
3069 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3070 *
3071 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3072 * paths. For example, see arch/x86/entry_64.S.
3073 *
3074 * To drive preemption between tasks, the scheduler sets the flag in timer
3075 * interrupt handler scheduler_tick().
3076 *
3077 * 3. Wakeups don't really cause entry into schedule(). They add a
3078 * task to the run-queue and that's it.
3079 *
3080 * Now, if the new task added to the run-queue preempts the current
3081 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3082 * called on the nearest possible occasion:
3083 *
3084 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3085 *
3086 * - in syscall or exception context, at the next outmost
3087 * preempt_enable(). (this might be as soon as the wake_up()'s
3088 * spin_unlock()!)
3089 *
3090 * - in IRQ context, return from interrupt-handler to
3091 * preemptible context
3092 *
3093 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3094 * then at the next:
3095 *
3096 * - cond_resched() call
3097 * - explicit schedule() call
3098 * - return from syscall or exception to user-space
3099 * - return from interrupt-handler to user-space
bfd9b2b5 3100 *
b30f0e3f 3101 * WARNING: must be called with preemption disabled!
dd41f596 3102 */
499d7955 3103static void __sched notrace __schedule(bool preempt)
dd41f596
IM
3104{
3105 struct task_struct *prev, *next;
67ca7bde 3106 unsigned long *switch_count;
dd41f596 3107 struct rq *rq;
31656519 3108 int cpu;
dd41f596 3109
dd41f596
IM
3110 cpu = smp_processor_id();
3111 rq = cpu_rq(cpu);
38200cf2 3112 rcu_note_context_switch();
dd41f596 3113 prev = rq->curr;
dd41f596 3114
b99def8b
PZ
3115 /*
3116 * do_exit() calls schedule() with preemption disabled as an exception;
3117 * however we must fix that up, otherwise the next task will see an
3118 * inconsistent (higher) preempt count.
3119 *
3120 * It also avoids the below schedule_debug() test from complaining
3121 * about this.
3122 */
3123 if (unlikely(prev->state == TASK_DEAD))
3124 preempt_enable_no_resched_notrace();
3125
dd41f596 3126 schedule_debug(prev);
1da177e4 3127
31656519 3128 if (sched_feat(HRTICK))
f333fdc9 3129 hrtick_clear(rq);
8f4d37ec 3130
e0acd0a6
ON
3131 /*
3132 * Make sure that signal_pending_state()->signal_pending() below
3133 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3134 * done by the caller to avoid the race with signal_wake_up().
3135 */
3136 smp_mb__before_spinlock();
05fa785c 3137 raw_spin_lock_irq(&rq->lock);
cbce1a68 3138 lockdep_pin_lock(&rq->lock);
1da177e4 3139
9edfbfed
PZ
3140 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3141
246d86b5 3142 switch_count = &prev->nivcsw;
fc13aeba 3143 if (!preempt && prev->state) {
21aa9af0 3144 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3145 prev->state = TASK_RUNNING;
21aa9af0 3146 } else {
2acca55e
PZ
3147 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3148 prev->on_rq = 0;
3149
21aa9af0 3150 /*
2acca55e
PZ
3151 * If a worker went to sleep, notify and ask workqueue
3152 * whether it wants to wake up a task to maintain
3153 * concurrency.
21aa9af0
TH
3154 */
3155 if (prev->flags & PF_WQ_WORKER) {
3156 struct task_struct *to_wakeup;
3157
3158 to_wakeup = wq_worker_sleeping(prev, cpu);
3159 if (to_wakeup)
3160 try_to_wake_up_local(to_wakeup);
3161 }
21aa9af0 3162 }
dd41f596 3163 switch_count = &prev->nvcsw;
1da177e4
LT
3164 }
3165
9edfbfed 3166 if (task_on_rq_queued(prev))
606dba2e
PZ
3167 update_rq_clock(rq);
3168
3169 next = pick_next_task(rq, prev);
f26f9aff 3170 clear_tsk_need_resched(prev);
f27dde8d 3171 clear_preempt_need_resched();
9edfbfed 3172 rq->clock_skip_update = 0;
1da177e4 3173
1da177e4 3174 if (likely(prev != next)) {
1da177e4
LT
3175 rq->nr_switches++;
3176 rq->curr = next;
3177 ++*switch_count;
3178
c73464b1 3179 trace_sched_switch(preempt, prev, next);
dfa50b60
ON
3180 rq = context_switch(rq, prev, next); /* unlocks the rq */
3181 cpu = cpu_of(rq);
cbce1a68
PZ
3182 } else {
3183 lockdep_unpin_lock(&rq->lock);
05fa785c 3184 raw_spin_unlock_irq(&rq->lock);
cbce1a68 3185 }
1da177e4 3186
e3fca9e7 3187 balance_callback(rq);
1da177e4 3188}
c259e01a 3189
9c40cef2
TG
3190static inline void sched_submit_work(struct task_struct *tsk)
3191{
3c7d5184 3192 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
3193 return;
3194 /*
3195 * If we are going to sleep and we have plugged IO queued,
3196 * make sure to submit it to avoid deadlocks.
3197 */
3198 if (blk_needs_flush_plug(tsk))
3199 blk_schedule_flush_plug(tsk);
3200}
3201
722a9f92 3202asmlinkage __visible void __sched schedule(void)
c259e01a 3203{
9c40cef2
TG
3204 struct task_struct *tsk = current;
3205
3206 sched_submit_work(tsk);
bfd9b2b5 3207 do {
b30f0e3f 3208 preempt_disable();
fc13aeba 3209 __schedule(false);
b30f0e3f 3210 sched_preempt_enable_no_resched();
bfd9b2b5 3211 } while (need_resched());
c259e01a 3212}
1da177e4
LT
3213EXPORT_SYMBOL(schedule);
3214
91d1aa43 3215#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 3216asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
3217{
3218 /*
3219 * If we come here after a random call to set_need_resched(),
3220 * or we have been woken up remotely but the IPI has not yet arrived,
3221 * we haven't yet exited the RCU idle mode. Do it here manually until
3222 * we find a better solution.
7cc78f8f
AL
3223 *
3224 * NB: There are buggy callers of this function. Ideally we
c467ea76 3225 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 3226 * too frequently to make sense yet.
20ab65e3 3227 */
7cc78f8f 3228 enum ctx_state prev_state = exception_enter();
20ab65e3 3229 schedule();
7cc78f8f 3230 exception_exit(prev_state);
20ab65e3
FW
3231}
3232#endif
3233
c5491ea7
TG
3234/**
3235 * schedule_preempt_disabled - called with preemption disabled
3236 *
3237 * Returns with preemption disabled. Note: preempt_count must be 1
3238 */
3239void __sched schedule_preempt_disabled(void)
3240{
ba74c144 3241 sched_preempt_enable_no_resched();
c5491ea7
TG
3242 schedule();
3243 preempt_disable();
3244}
3245
06b1f808 3246static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
3247{
3248 do {
499d7955 3249 preempt_disable_notrace();
fc13aeba 3250 __schedule(true);
499d7955 3251 preempt_enable_no_resched_notrace();
a18b5d01
FW
3252
3253 /*
3254 * Check again in case we missed a preemption opportunity
3255 * between schedule and now.
3256 */
a18b5d01
FW
3257 } while (need_resched());
3258}
3259
1da177e4
LT
3260#ifdef CONFIG_PREEMPT
3261/*
2ed6e34f 3262 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3263 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3264 * occur there and call schedule directly.
3265 */
722a9f92 3266asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 3267{
1da177e4
LT
3268 /*
3269 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3270 * we do not want to preempt the current task. Just return..
1da177e4 3271 */
fbb00b56 3272 if (likely(!preemptible()))
1da177e4
LT
3273 return;
3274
a18b5d01 3275 preempt_schedule_common();
1da177e4 3276}
376e2424 3277NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 3278EXPORT_SYMBOL(preempt_schedule);
009f60e2 3279
009f60e2 3280/**
4eaca0a8 3281 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
3282 *
3283 * The tracing infrastructure uses preempt_enable_notrace to prevent
3284 * recursion and tracing preempt enabling caused by the tracing
3285 * infrastructure itself. But as tracing can happen in areas coming
3286 * from userspace or just about to enter userspace, a preempt enable
3287 * can occur before user_exit() is called. This will cause the scheduler
3288 * to be called when the system is still in usermode.
3289 *
3290 * To prevent this, the preempt_enable_notrace will use this function
3291 * instead of preempt_schedule() to exit user context if needed before
3292 * calling the scheduler.
3293 */
4eaca0a8 3294asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
3295{
3296 enum ctx_state prev_ctx;
3297
3298 if (likely(!preemptible()))
3299 return;
3300
3301 do {
3d8f74dd 3302 preempt_disable_notrace();
009f60e2
ON
3303 /*
3304 * Needs preempt disabled in case user_exit() is traced
3305 * and the tracer calls preempt_enable_notrace() causing
3306 * an infinite recursion.
3307 */
3308 prev_ctx = exception_enter();
fc13aeba 3309 __schedule(true);
009f60e2
ON
3310 exception_exit(prev_ctx);
3311
3d8f74dd 3312 preempt_enable_no_resched_notrace();
009f60e2
ON
3313 } while (need_resched());
3314}
4eaca0a8 3315EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 3316
32e475d7 3317#endif /* CONFIG_PREEMPT */
1da177e4
LT
3318
3319/*
2ed6e34f 3320 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3321 * off of irq context.
3322 * Note, that this is called and return with irqs disabled. This will
3323 * protect us against recursive calling from irq.
3324 */
722a9f92 3325asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 3326{
b22366cd 3327 enum ctx_state prev_state;
6478d880 3328
2ed6e34f 3329 /* Catch callers which need to be fixed */
f27dde8d 3330 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 3331
b22366cd
FW
3332 prev_state = exception_enter();
3333
3a5c359a 3334 do {
3d8f74dd 3335 preempt_disable();
3a5c359a 3336 local_irq_enable();
fc13aeba 3337 __schedule(true);
3a5c359a 3338 local_irq_disable();
3d8f74dd 3339 sched_preempt_enable_no_resched();
5ed0cec0 3340 } while (need_resched());
b22366cd
FW
3341
3342 exception_exit(prev_state);
1da177e4
LT
3343}
3344
63859d4f 3345int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3346 void *key)
1da177e4 3347{
63859d4f 3348 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3349}
1da177e4
LT
3350EXPORT_SYMBOL(default_wake_function);
3351
b29739f9
IM
3352#ifdef CONFIG_RT_MUTEXES
3353
3354/*
3355 * rt_mutex_setprio - set the current priority of a task
3356 * @p: task
3357 * @prio: prio value (kernel-internal form)
3358 *
3359 * This function changes the 'effective' priority of a task. It does
3360 * not touch ->normal_prio like __setscheduler().
3361 *
c365c292
TG
3362 * Used by the rt_mutex code to implement priority inheritance
3363 * logic. Call site only calls if the priority of the task changed.
b29739f9 3364 */
36c8b586 3365void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3366{
1de64443 3367 int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE;
70b97a7f 3368 struct rq *rq;
83ab0aa0 3369 const struct sched_class *prev_class;
b29739f9 3370
aab03e05 3371 BUG_ON(prio > MAX_PRIO);
b29739f9 3372
0122ec5b 3373 rq = __task_rq_lock(p);
b29739f9 3374
1c4dd99b
TG
3375 /*
3376 * Idle task boosting is a nono in general. There is one
3377 * exception, when PREEMPT_RT and NOHZ is active:
3378 *
3379 * The idle task calls get_next_timer_interrupt() and holds
3380 * the timer wheel base->lock on the CPU and another CPU wants
3381 * to access the timer (probably to cancel it). We can safely
3382 * ignore the boosting request, as the idle CPU runs this code
3383 * with interrupts disabled and will complete the lock
3384 * protected section without being interrupted. So there is no
3385 * real need to boost.
3386 */
3387 if (unlikely(p == rq->idle)) {
3388 WARN_ON(p != rq->curr);
3389 WARN_ON(p->pi_blocked_on);
3390 goto out_unlock;
3391 }
3392
a8027073 3393 trace_sched_pi_setprio(p, prio);
d5f9f942 3394 oldprio = p->prio;
83ab0aa0 3395 prev_class = p->sched_class;
da0c1e65 3396 queued = task_on_rq_queued(p);
051a1d1a 3397 running = task_current(rq, p);
da0c1e65 3398 if (queued)
1de64443 3399 dequeue_task(rq, p, DEQUEUE_SAVE);
0e1f3483 3400 if (running)
f3cd1c4e 3401 put_prev_task(rq, p);
dd41f596 3402
2d3d891d
DF
3403 /*
3404 * Boosting condition are:
3405 * 1. -rt task is running and holds mutex A
3406 * --> -dl task blocks on mutex A
3407 *
3408 * 2. -dl task is running and holds mutex A
3409 * --> -dl task blocks on mutex A and could preempt the
3410 * running task
3411 */
3412 if (dl_prio(prio)) {
466af29b
ON
3413 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3414 if (!dl_prio(p->normal_prio) ||
3415 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 3416 p->dl.dl_boosted = 1;
1de64443 3417 enqueue_flag |= ENQUEUE_REPLENISH;
2d3d891d
DF
3418 } else
3419 p->dl.dl_boosted = 0;
aab03e05 3420 p->sched_class = &dl_sched_class;
2d3d891d
DF
3421 } else if (rt_prio(prio)) {
3422 if (dl_prio(oldprio))
3423 p->dl.dl_boosted = 0;
3424 if (oldprio < prio)
1de64443 3425 enqueue_flag |= ENQUEUE_HEAD;
dd41f596 3426 p->sched_class = &rt_sched_class;
2d3d891d
DF
3427 } else {
3428 if (dl_prio(oldprio))
3429 p->dl.dl_boosted = 0;
746db944
BS
3430 if (rt_prio(oldprio))
3431 p->rt.timeout = 0;
dd41f596 3432 p->sched_class = &fair_sched_class;
2d3d891d 3433 }
dd41f596 3434
b29739f9
IM
3435 p->prio = prio;
3436
0e1f3483
HS
3437 if (running)
3438 p->sched_class->set_curr_task(rq);
da0c1e65 3439 if (queued)
2d3d891d 3440 enqueue_task(rq, p, enqueue_flag);
cb469845 3441
da7a735e 3442 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3443out_unlock:
4c9a4bc8 3444 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 3445 __task_rq_unlock(rq);
4c9a4bc8
PZ
3446
3447 balance_callback(rq);
3448 preempt_enable();
b29739f9 3449}
b29739f9 3450#endif
d50dde5a 3451
36c8b586 3452void set_user_nice(struct task_struct *p, long nice)
1da177e4 3453{
da0c1e65 3454 int old_prio, delta, queued;
1da177e4 3455 unsigned long flags;
70b97a7f 3456 struct rq *rq;
1da177e4 3457
75e45d51 3458 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
3459 return;
3460 /*
3461 * We have to be careful, if called from sys_setpriority(),
3462 * the task might be in the middle of scheduling on another CPU.
3463 */
3464 rq = task_rq_lock(p, &flags);
3465 /*
3466 * The RT priorities are set via sched_setscheduler(), but we still
3467 * allow the 'normal' nice value to be set - but as expected
3468 * it wont have any effect on scheduling until the task is
aab03e05 3469 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 3470 */
aab03e05 3471 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
3472 p->static_prio = NICE_TO_PRIO(nice);
3473 goto out_unlock;
3474 }
da0c1e65
KT
3475 queued = task_on_rq_queued(p);
3476 if (queued)
1de64443 3477 dequeue_task(rq, p, DEQUEUE_SAVE);
1da177e4 3478
1da177e4 3479 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3480 set_load_weight(p);
b29739f9
IM
3481 old_prio = p->prio;
3482 p->prio = effective_prio(p);
3483 delta = p->prio - old_prio;
1da177e4 3484
da0c1e65 3485 if (queued) {
1de64443 3486 enqueue_task(rq, p, ENQUEUE_RESTORE);
1da177e4 3487 /*
d5f9f942
AM
3488 * If the task increased its priority or is running and
3489 * lowered its priority, then reschedule its CPU:
1da177e4 3490 */
d5f9f942 3491 if (delta < 0 || (delta > 0 && task_running(rq, p)))
8875125e 3492 resched_curr(rq);
1da177e4
LT
3493 }
3494out_unlock:
0122ec5b 3495 task_rq_unlock(rq, p, &flags);
1da177e4 3496}
1da177e4
LT
3497EXPORT_SYMBOL(set_user_nice);
3498
e43379f1
MM
3499/*
3500 * can_nice - check if a task can reduce its nice value
3501 * @p: task
3502 * @nice: nice value
3503 */
36c8b586 3504int can_nice(const struct task_struct *p, const int nice)
e43379f1 3505{
024f4747 3506 /* convert nice value [19,-20] to rlimit style value [1,40] */
7aa2c016 3507 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 3508
78d7d407 3509 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3510 capable(CAP_SYS_NICE));
3511}
3512
1da177e4
LT
3513#ifdef __ARCH_WANT_SYS_NICE
3514
3515/*
3516 * sys_nice - change the priority of the current process.
3517 * @increment: priority increment
3518 *
3519 * sys_setpriority is a more generic, but much slower function that
3520 * does similar things.
3521 */
5add95d4 3522SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3523{
48f24c4d 3524 long nice, retval;
1da177e4
LT
3525
3526 /*
3527 * Setpriority might change our priority at the same moment.
3528 * We don't have to worry. Conceptually one call occurs first
3529 * and we have a single winner.
3530 */
a9467fa3 3531 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 3532 nice = task_nice(current) + increment;
1da177e4 3533
a9467fa3 3534 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
3535 if (increment < 0 && !can_nice(current, nice))
3536 return -EPERM;
3537
1da177e4
LT
3538 retval = security_task_setnice(current, nice);
3539 if (retval)
3540 return retval;
3541
3542 set_user_nice(current, nice);
3543 return 0;
3544}
3545
3546#endif
3547
3548/**
3549 * task_prio - return the priority value of a given task.
3550 * @p: the task in question.
3551 *
e69f6186 3552 * Return: The priority value as seen by users in /proc.
1da177e4
LT
3553 * RT tasks are offset by -200. Normal tasks are centered
3554 * around 0, value goes from -16 to +15.
3555 */
36c8b586 3556int task_prio(const struct task_struct *p)
1da177e4
LT
3557{
3558 return p->prio - MAX_RT_PRIO;
3559}
3560
1da177e4
LT
3561/**
3562 * idle_cpu - is a given cpu idle currently?
3563 * @cpu: the processor in question.
e69f6186
YB
3564 *
3565 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
3566 */
3567int idle_cpu(int cpu)
3568{
908a3283
TG
3569 struct rq *rq = cpu_rq(cpu);
3570
3571 if (rq->curr != rq->idle)
3572 return 0;
3573
3574 if (rq->nr_running)
3575 return 0;
3576
3577#ifdef CONFIG_SMP
3578 if (!llist_empty(&rq->wake_list))
3579 return 0;
3580#endif
3581
3582 return 1;
1da177e4
LT
3583}
3584
1da177e4
LT
3585/**
3586 * idle_task - return the idle task for a given cpu.
3587 * @cpu: the processor in question.
e69f6186
YB
3588 *
3589 * Return: The idle task for the cpu @cpu.
1da177e4 3590 */
36c8b586 3591struct task_struct *idle_task(int cpu)
1da177e4
LT
3592{
3593 return cpu_rq(cpu)->idle;
3594}
3595
3596/**
3597 * find_process_by_pid - find a process with a matching PID value.
3598 * @pid: the pid in question.
e69f6186
YB
3599 *
3600 * The task of @pid, if found. %NULL otherwise.
1da177e4 3601 */
a9957449 3602static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3603{
228ebcbe 3604 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3605}
3606
aab03e05
DF
3607/*
3608 * This function initializes the sched_dl_entity of a newly becoming
3609 * SCHED_DEADLINE task.
3610 *
3611 * Only the static values are considered here, the actual runtime and the
3612 * absolute deadline will be properly calculated when the task is enqueued
3613 * for the first time with its new policy.
3614 */
3615static void
3616__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3617{
3618 struct sched_dl_entity *dl_se = &p->dl;
3619
aab03e05
DF
3620 dl_se->dl_runtime = attr->sched_runtime;
3621 dl_se->dl_deadline = attr->sched_deadline;
755378a4 3622 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
aab03e05 3623 dl_se->flags = attr->sched_flags;
332ac17e 3624 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
40767b0d
PZ
3625
3626 /*
3627 * Changing the parameters of a task is 'tricky' and we're not doing
3628 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3629 *
3630 * What we SHOULD do is delay the bandwidth release until the 0-lag
3631 * point. This would include retaining the task_struct until that time
3632 * and change dl_overflow() to not immediately decrement the current
3633 * amount.
3634 *
3635 * Instead we retain the current runtime/deadline and let the new
3636 * parameters take effect after the current reservation period lapses.
3637 * This is safe (albeit pessimistic) because the 0-lag point is always
3638 * before the current scheduling deadline.
3639 *
3640 * We can still have temporary overloads because we do not delay the
3641 * change in bandwidth until that time; so admission control is
3642 * not on the safe side. It does however guarantee tasks will never
3643 * consume more than promised.
3644 */
aab03e05
DF
3645}
3646
c13db6b1
SR
3647/*
3648 * sched_setparam() passes in -1 for its policy, to let the functions
3649 * it calls know not to change it.
3650 */
3651#define SETPARAM_POLICY -1
3652
c365c292
TG
3653static void __setscheduler_params(struct task_struct *p,
3654 const struct sched_attr *attr)
1da177e4 3655{
d50dde5a
DF
3656 int policy = attr->sched_policy;
3657
c13db6b1 3658 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
3659 policy = p->policy;
3660
1da177e4 3661 p->policy = policy;
d50dde5a 3662
aab03e05
DF
3663 if (dl_policy(policy))
3664 __setparam_dl(p, attr);
39fd8fd2 3665 else if (fair_policy(policy))
d50dde5a
DF
3666 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3667
39fd8fd2
PZ
3668 /*
3669 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3670 * !rt_policy. Always setting this ensures that things like
3671 * getparam()/getattr() don't report silly values for !rt tasks.
3672 */
3673 p->rt_priority = attr->sched_priority;
383afd09 3674 p->normal_prio = normal_prio(p);
c365c292
TG
3675 set_load_weight(p);
3676}
39fd8fd2 3677
c365c292
TG
3678/* Actually do priority change: must hold pi & rq lock. */
3679static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 3680 const struct sched_attr *attr, bool keep_boost)
c365c292
TG
3681{
3682 __setscheduler_params(p, attr);
d50dde5a 3683
383afd09 3684 /*
0782e63b
TG
3685 * Keep a potential priority boosting if called from
3686 * sched_setscheduler().
383afd09 3687 */
0782e63b
TG
3688 if (keep_boost)
3689 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3690 else
3691 p->prio = normal_prio(p);
383afd09 3692
aab03e05
DF
3693 if (dl_prio(p->prio))
3694 p->sched_class = &dl_sched_class;
3695 else if (rt_prio(p->prio))
ffd44db5
PZ
3696 p->sched_class = &rt_sched_class;
3697 else
3698 p->sched_class = &fair_sched_class;
1da177e4 3699}
aab03e05
DF
3700
3701static void
3702__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3703{
3704 struct sched_dl_entity *dl_se = &p->dl;
3705
3706 attr->sched_priority = p->rt_priority;
3707 attr->sched_runtime = dl_se->dl_runtime;
3708 attr->sched_deadline = dl_se->dl_deadline;
755378a4 3709 attr->sched_period = dl_se->dl_period;
aab03e05
DF
3710 attr->sched_flags = dl_se->flags;
3711}
3712
3713/*
3714 * This function validates the new parameters of a -deadline task.
3715 * We ask for the deadline not being zero, and greater or equal
755378a4 3716 * than the runtime, as well as the period of being zero or
332ac17e 3717 * greater than deadline. Furthermore, we have to be sure that
b0827819
JL
3718 * user parameters are above the internal resolution of 1us (we
3719 * check sched_runtime only since it is always the smaller one) and
3720 * below 2^63 ns (we have to check both sched_deadline and
3721 * sched_period, as the latter can be zero).
aab03e05
DF
3722 */
3723static bool
3724__checkparam_dl(const struct sched_attr *attr)
3725{
b0827819
JL
3726 /* deadline != 0 */
3727 if (attr->sched_deadline == 0)
3728 return false;
3729
3730 /*
3731 * Since we truncate DL_SCALE bits, make sure we're at least
3732 * that big.
3733 */
3734 if (attr->sched_runtime < (1ULL << DL_SCALE))
3735 return false;
3736
3737 /*
3738 * Since we use the MSB for wrap-around and sign issues, make
3739 * sure it's not set (mind that period can be equal to zero).
3740 */
3741 if (attr->sched_deadline & (1ULL << 63) ||
3742 attr->sched_period & (1ULL << 63))
3743 return false;
3744
3745 /* runtime <= deadline <= period (if period != 0) */
3746 if ((attr->sched_period != 0 &&
3747 attr->sched_period < attr->sched_deadline) ||
3748 attr->sched_deadline < attr->sched_runtime)
3749 return false;
3750
3751 return true;
aab03e05
DF
3752}
3753
c69e8d9c
DH
3754/*
3755 * check the target process has a UID that matches the current process's
3756 */
3757static bool check_same_owner(struct task_struct *p)
3758{
3759 const struct cred *cred = current_cred(), *pcred;
3760 bool match;
3761
3762 rcu_read_lock();
3763 pcred = __task_cred(p);
9c806aa0
EB
3764 match = (uid_eq(cred->euid, pcred->euid) ||
3765 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
3766 rcu_read_unlock();
3767 return match;
3768}
3769
75381608
WL
3770static bool dl_param_changed(struct task_struct *p,
3771 const struct sched_attr *attr)
3772{
3773 struct sched_dl_entity *dl_se = &p->dl;
3774
3775 if (dl_se->dl_runtime != attr->sched_runtime ||
3776 dl_se->dl_deadline != attr->sched_deadline ||
3777 dl_se->dl_period != attr->sched_period ||
3778 dl_se->flags != attr->sched_flags)
3779 return true;
3780
3781 return false;
3782}
3783
d50dde5a
DF
3784static int __sched_setscheduler(struct task_struct *p,
3785 const struct sched_attr *attr,
dbc7f069 3786 bool user, bool pi)
1da177e4 3787{
383afd09
SR
3788 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3789 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 3790 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 3791 int new_effective_prio, policy = attr->sched_policy;
1da177e4 3792 unsigned long flags;
83ab0aa0 3793 const struct sched_class *prev_class;
70b97a7f 3794 struct rq *rq;
ca94c442 3795 int reset_on_fork;
1da177e4 3796
66e5393a
SR
3797 /* may grab non-irq protected spin_locks */
3798 BUG_ON(in_interrupt());
1da177e4
LT
3799recheck:
3800 /* double check policy once rq lock held */
ca94c442
LP
3801 if (policy < 0) {
3802 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3803 policy = oldpolicy = p->policy;
ca94c442 3804 } else {
7479f3c9 3805 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 3806
20f9cd2a 3807 if (!valid_policy(policy))
ca94c442
LP
3808 return -EINVAL;
3809 }
3810
7479f3c9
PZ
3811 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3812 return -EINVAL;
3813
1da177e4
LT
3814 /*
3815 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3816 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3817 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 3818 */
0bb040a4 3819 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 3820 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 3821 return -EINVAL;
aab03e05
DF
3822 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3823 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
3824 return -EINVAL;
3825
37e4ab3f
OC
3826 /*
3827 * Allow unprivileged RT tasks to decrease priority:
3828 */
961ccddd 3829 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 3830 if (fair_policy(policy)) {
d0ea0268 3831 if (attr->sched_nice < task_nice(p) &&
eaad4513 3832 !can_nice(p, attr->sched_nice))
d50dde5a
DF
3833 return -EPERM;
3834 }
3835
e05606d3 3836 if (rt_policy(policy)) {
a44702e8
ON
3837 unsigned long rlim_rtprio =
3838 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3839
3840 /* can't set/change the rt policy */
3841 if (policy != p->policy && !rlim_rtprio)
3842 return -EPERM;
3843
3844 /* can't increase priority */
d50dde5a
DF
3845 if (attr->sched_priority > p->rt_priority &&
3846 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
3847 return -EPERM;
3848 }
c02aa73b 3849
d44753b8
JL
3850 /*
3851 * Can't set/change SCHED_DEADLINE policy at all for now
3852 * (safest behavior); in the future we would like to allow
3853 * unprivileged DL tasks to increase their relative deadline
3854 * or reduce their runtime (both ways reducing utilization)
3855 */
3856 if (dl_policy(policy))
3857 return -EPERM;
3858
dd41f596 3859 /*
c02aa73b
DH
3860 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3861 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 3862 */
20f9cd2a 3863 if (idle_policy(p->policy) && !idle_policy(policy)) {
d0ea0268 3864 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
3865 return -EPERM;
3866 }
5fe1d75f 3867
37e4ab3f 3868 /* can't change other user's priorities */
c69e8d9c 3869 if (!check_same_owner(p))
37e4ab3f 3870 return -EPERM;
ca94c442
LP
3871
3872 /* Normal users shall not reset the sched_reset_on_fork flag */
3873 if (p->sched_reset_on_fork && !reset_on_fork)
3874 return -EPERM;
37e4ab3f 3875 }
1da177e4 3876
725aad24 3877 if (user) {
b0ae1981 3878 retval = security_task_setscheduler(p);
725aad24
JF
3879 if (retval)
3880 return retval;
3881 }
3882
b29739f9
IM
3883 /*
3884 * make sure no PI-waiters arrive (or leave) while we are
3885 * changing the priority of the task:
0122ec5b 3886 *
25985edc 3887 * To be able to change p->policy safely, the appropriate
1da177e4
LT
3888 * runqueue lock must be held.
3889 */
0122ec5b 3890 rq = task_rq_lock(p, &flags);
dc61b1d6 3891
34f971f6
PZ
3892 /*
3893 * Changing the policy of the stop threads its a very bad idea
3894 */
3895 if (p == rq->stop) {
0122ec5b 3896 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
3897 return -EINVAL;
3898 }
3899
a51e9198 3900 /*
d6b1e911
TG
3901 * If not changing anything there's no need to proceed further,
3902 * but store a possible modification of reset_on_fork.
a51e9198 3903 */
d50dde5a 3904 if (unlikely(policy == p->policy)) {
d0ea0268 3905 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
3906 goto change;
3907 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3908 goto change;
75381608 3909 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 3910 goto change;
d50dde5a 3911
d6b1e911 3912 p->sched_reset_on_fork = reset_on_fork;
45afb173 3913 task_rq_unlock(rq, p, &flags);
a51e9198
DF
3914 return 0;
3915 }
d50dde5a 3916change:
a51e9198 3917
dc61b1d6 3918 if (user) {
332ac17e 3919#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
3920 /*
3921 * Do not allow realtime tasks into groups that have no runtime
3922 * assigned.
3923 */
3924 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
3925 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3926 !task_group_is_autogroup(task_group(p))) {
0122ec5b 3927 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
3928 return -EPERM;
3929 }
dc61b1d6 3930#endif
332ac17e
DF
3931#ifdef CONFIG_SMP
3932 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3933 cpumask_t *span = rq->rd->span;
332ac17e
DF
3934
3935 /*
3936 * Don't allow tasks with an affinity mask smaller than
3937 * the entire root_domain to become SCHED_DEADLINE. We
3938 * will also fail if there's no bandwidth available.
3939 */
e4099a5e
PZ
3940 if (!cpumask_subset(span, &p->cpus_allowed) ||
3941 rq->rd->dl_bw.bw == 0) {
332ac17e
DF
3942 task_rq_unlock(rq, p, &flags);
3943 return -EPERM;
3944 }
3945 }
3946#endif
3947 }
dc61b1d6 3948
1da177e4
LT
3949 /* recheck policy now with rq lock held */
3950 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3951 policy = oldpolicy = -1;
0122ec5b 3952 task_rq_unlock(rq, p, &flags);
1da177e4
LT
3953 goto recheck;
3954 }
332ac17e
DF
3955
3956 /*
3957 * If setscheduling to SCHED_DEADLINE (or changing the parameters
3958 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
3959 * is available.
3960 */
e4099a5e 3961 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
332ac17e
DF
3962 task_rq_unlock(rq, p, &flags);
3963 return -EBUSY;
3964 }
3965
c365c292
TG
3966 p->sched_reset_on_fork = reset_on_fork;
3967 oldprio = p->prio;
3968
dbc7f069
PZ
3969 if (pi) {
3970 /*
3971 * Take priority boosted tasks into account. If the new
3972 * effective priority is unchanged, we just store the new
3973 * normal parameters and do not touch the scheduler class and
3974 * the runqueue. This will be done when the task deboost
3975 * itself.
3976 */
3977 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3978 if (new_effective_prio == oldprio) {
3979 __setscheduler_params(p, attr);
3980 task_rq_unlock(rq, p, &flags);
3981 return 0;
3982 }
c365c292
TG
3983 }
3984
da0c1e65 3985 queued = task_on_rq_queued(p);
051a1d1a 3986 running = task_current(rq, p);
da0c1e65 3987 if (queued)
1de64443 3988 dequeue_task(rq, p, DEQUEUE_SAVE);
0e1f3483 3989 if (running)
f3cd1c4e 3990 put_prev_task(rq, p);
f6b53205 3991
83ab0aa0 3992 prev_class = p->sched_class;
dbc7f069 3993 __setscheduler(rq, p, attr, pi);
f6b53205 3994
0e1f3483
HS
3995 if (running)
3996 p->sched_class->set_curr_task(rq);
da0c1e65 3997 if (queued) {
1de64443 3998 int enqueue_flags = ENQUEUE_RESTORE;
81a44c54
TG
3999 /*
4000 * We enqueue to tail when the priority of a task is
4001 * increased (user space view).
4002 */
1de64443
PZ
4003 if (oldprio <= p->prio)
4004 enqueue_flags |= ENQUEUE_HEAD;
4005
4006 enqueue_task(rq, p, enqueue_flags);
81a44c54 4007 }
cb469845 4008
da7a735e 4009 check_class_changed(rq, p, prev_class, oldprio);
4c9a4bc8 4010 preempt_disable(); /* avoid rq from going away on us */
0122ec5b 4011 task_rq_unlock(rq, p, &flags);
b29739f9 4012
dbc7f069
PZ
4013 if (pi)
4014 rt_mutex_adjust_pi(p);
95e02ca9 4015
4c9a4bc8
PZ
4016 /*
4017 * Run balance callbacks after we've adjusted the PI chain.
4018 */
4019 balance_callback(rq);
4020 preempt_enable();
95e02ca9 4021
1da177e4
LT
4022 return 0;
4023}
961ccddd 4024
7479f3c9
PZ
4025static int _sched_setscheduler(struct task_struct *p, int policy,
4026 const struct sched_param *param, bool check)
4027{
4028 struct sched_attr attr = {
4029 .sched_policy = policy,
4030 .sched_priority = param->sched_priority,
4031 .sched_nice = PRIO_TO_NICE(p->static_prio),
4032 };
4033
c13db6b1
SR
4034 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4035 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
4036 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4037 policy &= ~SCHED_RESET_ON_FORK;
4038 attr.sched_policy = policy;
4039 }
4040
dbc7f069 4041 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 4042}
961ccddd
RR
4043/**
4044 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4045 * @p: the task in question.
4046 * @policy: new policy.
4047 * @param: structure containing the new RT priority.
4048 *
e69f6186
YB
4049 * Return: 0 on success. An error code otherwise.
4050 *
961ccddd
RR
4051 * NOTE that the task may be already dead.
4052 */
4053int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4054 const struct sched_param *param)
961ccddd 4055{
7479f3c9 4056 return _sched_setscheduler(p, policy, param, true);
961ccddd 4057}
1da177e4
LT
4058EXPORT_SYMBOL_GPL(sched_setscheduler);
4059
d50dde5a
DF
4060int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4061{
dbc7f069 4062 return __sched_setscheduler(p, attr, true, true);
d50dde5a
DF
4063}
4064EXPORT_SYMBOL_GPL(sched_setattr);
4065
961ccddd
RR
4066/**
4067 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4068 * @p: the task in question.
4069 * @policy: new policy.
4070 * @param: structure containing the new RT priority.
4071 *
4072 * Just like sched_setscheduler, only don't bother checking if the
4073 * current context has permission. For example, this is needed in
4074 * stop_machine(): we create temporary high priority worker threads,
4075 * but our caller might not have that capability.
e69f6186
YB
4076 *
4077 * Return: 0 on success. An error code otherwise.
961ccddd
RR
4078 */
4079int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4080 const struct sched_param *param)
961ccddd 4081{
7479f3c9 4082 return _sched_setscheduler(p, policy, param, false);
961ccddd 4083}
84778472 4084EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
961ccddd 4085
95cdf3b7
IM
4086static int
4087do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4088{
1da177e4
LT
4089 struct sched_param lparam;
4090 struct task_struct *p;
36c8b586 4091 int retval;
1da177e4
LT
4092
4093 if (!param || pid < 0)
4094 return -EINVAL;
4095 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4096 return -EFAULT;
5fe1d75f
ON
4097
4098 rcu_read_lock();
4099 retval = -ESRCH;
1da177e4 4100 p = find_process_by_pid(pid);
5fe1d75f
ON
4101 if (p != NULL)
4102 retval = sched_setscheduler(p, policy, &lparam);
4103 rcu_read_unlock();
36c8b586 4104
1da177e4
LT
4105 return retval;
4106}
4107
d50dde5a
DF
4108/*
4109 * Mimics kernel/events/core.c perf_copy_attr().
4110 */
4111static int sched_copy_attr(struct sched_attr __user *uattr,
4112 struct sched_attr *attr)
4113{
4114 u32 size;
4115 int ret;
4116
4117 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4118 return -EFAULT;
4119
4120 /*
4121 * zero the full structure, so that a short copy will be nice.
4122 */
4123 memset(attr, 0, sizeof(*attr));
4124
4125 ret = get_user(size, &uattr->size);
4126 if (ret)
4127 return ret;
4128
4129 if (size > PAGE_SIZE) /* silly large */
4130 goto err_size;
4131
4132 if (!size) /* abi compat */
4133 size = SCHED_ATTR_SIZE_VER0;
4134
4135 if (size < SCHED_ATTR_SIZE_VER0)
4136 goto err_size;
4137
4138 /*
4139 * If we're handed a bigger struct than we know of,
4140 * ensure all the unknown bits are 0 - i.e. new
4141 * user-space does not rely on any kernel feature
4142 * extensions we dont know about yet.
4143 */
4144 if (size > sizeof(*attr)) {
4145 unsigned char __user *addr;
4146 unsigned char __user *end;
4147 unsigned char val;
4148
4149 addr = (void __user *)uattr + sizeof(*attr);
4150 end = (void __user *)uattr + size;
4151
4152 for (; addr < end; addr++) {
4153 ret = get_user(val, addr);
4154 if (ret)
4155 return ret;
4156 if (val)
4157 goto err_size;
4158 }
4159 size = sizeof(*attr);
4160 }
4161
4162 ret = copy_from_user(attr, uattr, size);
4163 if (ret)
4164 return -EFAULT;
4165
4166 /*
4167 * XXX: do we want to be lenient like existing syscalls; or do we want
4168 * to be strict and return an error on out-of-bounds values?
4169 */
75e45d51 4170 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 4171
e78c7bca 4172 return 0;
d50dde5a
DF
4173
4174err_size:
4175 put_user(sizeof(*attr), &uattr->size);
e78c7bca 4176 return -E2BIG;
d50dde5a
DF
4177}
4178
1da177e4
LT
4179/**
4180 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4181 * @pid: the pid in question.
4182 * @policy: new policy.
4183 * @param: structure containing the new RT priority.
e69f6186
YB
4184 *
4185 * Return: 0 on success. An error code otherwise.
1da177e4 4186 */
5add95d4
HC
4187SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4188 struct sched_param __user *, param)
1da177e4 4189{
c21761f1
JB
4190 /* negative values for policy are not valid */
4191 if (policy < 0)
4192 return -EINVAL;
4193
1da177e4
LT
4194 return do_sched_setscheduler(pid, policy, param);
4195}
4196
4197/**
4198 * sys_sched_setparam - set/change the RT priority of a thread
4199 * @pid: the pid in question.
4200 * @param: structure containing the new RT priority.
e69f6186
YB
4201 *
4202 * Return: 0 on success. An error code otherwise.
1da177e4 4203 */
5add95d4 4204SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4205{
c13db6b1 4206 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
4207}
4208
d50dde5a
DF
4209/**
4210 * sys_sched_setattr - same as above, but with extended sched_attr
4211 * @pid: the pid in question.
5778fccf 4212 * @uattr: structure containing the extended parameters.
db66d756 4213 * @flags: for future extension.
d50dde5a 4214 */
6d35ab48
PZ
4215SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4216 unsigned int, flags)
d50dde5a
DF
4217{
4218 struct sched_attr attr;
4219 struct task_struct *p;
4220 int retval;
4221
6d35ab48 4222 if (!uattr || pid < 0 || flags)
d50dde5a
DF
4223 return -EINVAL;
4224
143cf23d
MK
4225 retval = sched_copy_attr(uattr, &attr);
4226 if (retval)
4227 return retval;
d50dde5a 4228
b14ed2c2 4229 if ((int)attr.sched_policy < 0)
dbdb2275 4230 return -EINVAL;
d50dde5a
DF
4231
4232 rcu_read_lock();
4233 retval = -ESRCH;
4234 p = find_process_by_pid(pid);
4235 if (p != NULL)
4236 retval = sched_setattr(p, &attr);
4237 rcu_read_unlock();
4238
4239 return retval;
4240}
4241
1da177e4
LT
4242/**
4243 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4244 * @pid: the pid in question.
e69f6186
YB
4245 *
4246 * Return: On success, the policy of the thread. Otherwise, a negative error
4247 * code.
1da177e4 4248 */
5add95d4 4249SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4250{
36c8b586 4251 struct task_struct *p;
3a5c359a 4252 int retval;
1da177e4
LT
4253
4254 if (pid < 0)
3a5c359a 4255 return -EINVAL;
1da177e4
LT
4256
4257 retval = -ESRCH;
5fe85be0 4258 rcu_read_lock();
1da177e4
LT
4259 p = find_process_by_pid(pid);
4260 if (p) {
4261 retval = security_task_getscheduler(p);
4262 if (!retval)
ca94c442
LP
4263 retval = p->policy
4264 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4265 }
5fe85be0 4266 rcu_read_unlock();
1da177e4
LT
4267 return retval;
4268}
4269
4270/**
ca94c442 4271 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4272 * @pid: the pid in question.
4273 * @param: structure containing the RT priority.
e69f6186
YB
4274 *
4275 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4276 * code.
1da177e4 4277 */
5add95d4 4278SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 4279{
ce5f7f82 4280 struct sched_param lp = { .sched_priority = 0 };
36c8b586 4281 struct task_struct *p;
3a5c359a 4282 int retval;
1da177e4
LT
4283
4284 if (!param || pid < 0)
3a5c359a 4285 return -EINVAL;
1da177e4 4286
5fe85be0 4287 rcu_read_lock();
1da177e4
LT
4288 p = find_process_by_pid(pid);
4289 retval = -ESRCH;
4290 if (!p)
4291 goto out_unlock;
4292
4293 retval = security_task_getscheduler(p);
4294 if (retval)
4295 goto out_unlock;
4296
ce5f7f82
PZ
4297 if (task_has_rt_policy(p))
4298 lp.sched_priority = p->rt_priority;
5fe85be0 4299 rcu_read_unlock();
1da177e4
LT
4300
4301 /*
4302 * This one might sleep, we cannot do it with a spinlock held ...
4303 */
4304 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4305
1da177e4
LT
4306 return retval;
4307
4308out_unlock:
5fe85be0 4309 rcu_read_unlock();
1da177e4
LT
4310 return retval;
4311}
4312
d50dde5a
DF
4313static int sched_read_attr(struct sched_attr __user *uattr,
4314 struct sched_attr *attr,
4315 unsigned int usize)
4316{
4317 int ret;
4318
4319 if (!access_ok(VERIFY_WRITE, uattr, usize))
4320 return -EFAULT;
4321
4322 /*
4323 * If we're handed a smaller struct than we know of,
4324 * ensure all the unknown bits are 0 - i.e. old
4325 * user-space does not get uncomplete information.
4326 */
4327 if (usize < sizeof(*attr)) {
4328 unsigned char *addr;
4329 unsigned char *end;
4330
4331 addr = (void *)attr + usize;
4332 end = (void *)attr + sizeof(*attr);
4333
4334 for (; addr < end; addr++) {
4335 if (*addr)
22400674 4336 return -EFBIG;
d50dde5a
DF
4337 }
4338
4339 attr->size = usize;
4340 }
4341
4efbc454 4342 ret = copy_to_user(uattr, attr, attr->size);
d50dde5a
DF
4343 if (ret)
4344 return -EFAULT;
4345
22400674 4346 return 0;
d50dde5a
DF
4347}
4348
4349/**
aab03e05 4350 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 4351 * @pid: the pid in question.
5778fccf 4352 * @uattr: structure containing the extended parameters.
d50dde5a 4353 * @size: sizeof(attr) for fwd/bwd comp.
db66d756 4354 * @flags: for future extension.
d50dde5a 4355 */
6d35ab48
PZ
4356SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4357 unsigned int, size, unsigned int, flags)
d50dde5a
DF
4358{
4359 struct sched_attr attr = {
4360 .size = sizeof(struct sched_attr),
4361 };
4362 struct task_struct *p;
4363 int retval;
4364
4365 if (!uattr || pid < 0 || size > PAGE_SIZE ||
6d35ab48 4366 size < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
4367 return -EINVAL;
4368
4369 rcu_read_lock();
4370 p = find_process_by_pid(pid);
4371 retval = -ESRCH;
4372 if (!p)
4373 goto out_unlock;
4374
4375 retval = security_task_getscheduler(p);
4376 if (retval)
4377 goto out_unlock;
4378
4379 attr.sched_policy = p->policy;
7479f3c9
PZ
4380 if (p->sched_reset_on_fork)
4381 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05
DF
4382 if (task_has_dl_policy(p))
4383 __getparam_dl(p, &attr);
4384 else if (task_has_rt_policy(p))
d50dde5a
DF
4385 attr.sched_priority = p->rt_priority;
4386 else
d0ea0268 4387 attr.sched_nice = task_nice(p);
d50dde5a
DF
4388
4389 rcu_read_unlock();
4390
4391 retval = sched_read_attr(uattr, &attr, size);
4392 return retval;
4393
4394out_unlock:
4395 rcu_read_unlock();
4396 return retval;
4397}
4398
96f874e2 4399long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4400{
5a16f3d3 4401 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4402 struct task_struct *p;
4403 int retval;
1da177e4 4404
23f5d142 4405 rcu_read_lock();
1da177e4
LT
4406
4407 p = find_process_by_pid(pid);
4408 if (!p) {
23f5d142 4409 rcu_read_unlock();
1da177e4
LT
4410 return -ESRCH;
4411 }
4412
23f5d142 4413 /* Prevent p going away */
1da177e4 4414 get_task_struct(p);
23f5d142 4415 rcu_read_unlock();
1da177e4 4416
14a40ffc
TH
4417 if (p->flags & PF_NO_SETAFFINITY) {
4418 retval = -EINVAL;
4419 goto out_put_task;
4420 }
5a16f3d3
RR
4421 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4422 retval = -ENOMEM;
4423 goto out_put_task;
4424 }
4425 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4426 retval = -ENOMEM;
4427 goto out_free_cpus_allowed;
4428 }
1da177e4 4429 retval = -EPERM;
4c44aaaf
EB
4430 if (!check_same_owner(p)) {
4431 rcu_read_lock();
4432 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4433 rcu_read_unlock();
16303ab2 4434 goto out_free_new_mask;
4c44aaaf
EB
4435 }
4436 rcu_read_unlock();
4437 }
1da177e4 4438
b0ae1981 4439 retval = security_task_setscheduler(p);
e7834f8f 4440 if (retval)
16303ab2 4441 goto out_free_new_mask;
e7834f8f 4442
e4099a5e
PZ
4443
4444 cpuset_cpus_allowed(p, cpus_allowed);
4445 cpumask_and(new_mask, in_mask, cpus_allowed);
4446
332ac17e
DF
4447 /*
4448 * Since bandwidth control happens on root_domain basis,
4449 * if admission test is enabled, we only admit -deadline
4450 * tasks allowed to run on all the CPUs in the task's
4451 * root_domain.
4452 */
4453#ifdef CONFIG_SMP
f1e3a093
KT
4454 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4455 rcu_read_lock();
4456 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 4457 retval = -EBUSY;
f1e3a093 4458 rcu_read_unlock();
16303ab2 4459 goto out_free_new_mask;
332ac17e 4460 }
f1e3a093 4461 rcu_read_unlock();
332ac17e
DF
4462 }
4463#endif
49246274 4464again:
25834c73 4465 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 4466
8707d8b8 4467 if (!retval) {
5a16f3d3
RR
4468 cpuset_cpus_allowed(p, cpus_allowed);
4469 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4470 /*
4471 * We must have raced with a concurrent cpuset
4472 * update. Just reset the cpus_allowed to the
4473 * cpuset's cpus_allowed
4474 */
5a16f3d3 4475 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4476 goto again;
4477 }
4478 }
16303ab2 4479out_free_new_mask:
5a16f3d3
RR
4480 free_cpumask_var(new_mask);
4481out_free_cpus_allowed:
4482 free_cpumask_var(cpus_allowed);
4483out_put_task:
1da177e4 4484 put_task_struct(p);
1da177e4
LT
4485 return retval;
4486}
4487
4488static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4489 struct cpumask *new_mask)
1da177e4 4490{
96f874e2
RR
4491 if (len < cpumask_size())
4492 cpumask_clear(new_mask);
4493 else if (len > cpumask_size())
4494 len = cpumask_size();
4495
1da177e4
LT
4496 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4497}
4498
4499/**
4500 * sys_sched_setaffinity - set the cpu affinity of a process
4501 * @pid: pid of the process
4502 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4503 * @user_mask_ptr: user-space pointer to the new cpu mask
e69f6186
YB
4504 *
4505 * Return: 0 on success. An error code otherwise.
1da177e4 4506 */
5add95d4
HC
4507SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4508 unsigned long __user *, user_mask_ptr)
1da177e4 4509{
5a16f3d3 4510 cpumask_var_t new_mask;
1da177e4
LT
4511 int retval;
4512
5a16f3d3
RR
4513 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4514 return -ENOMEM;
1da177e4 4515
5a16f3d3
RR
4516 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4517 if (retval == 0)
4518 retval = sched_setaffinity(pid, new_mask);
4519 free_cpumask_var(new_mask);
4520 return retval;
1da177e4
LT
4521}
4522
96f874e2 4523long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4524{
36c8b586 4525 struct task_struct *p;
31605683 4526 unsigned long flags;
1da177e4 4527 int retval;
1da177e4 4528
23f5d142 4529 rcu_read_lock();
1da177e4
LT
4530
4531 retval = -ESRCH;
4532 p = find_process_by_pid(pid);
4533 if (!p)
4534 goto out_unlock;
4535
e7834f8f
DQ
4536 retval = security_task_getscheduler(p);
4537 if (retval)
4538 goto out_unlock;
4539
013fdb80 4540 raw_spin_lock_irqsave(&p->pi_lock, flags);
6acce3ef 4541 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
013fdb80 4542 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4543
4544out_unlock:
23f5d142 4545 rcu_read_unlock();
1da177e4 4546
9531b62f 4547 return retval;
1da177e4
LT
4548}
4549
4550/**
4551 * sys_sched_getaffinity - get the cpu affinity of a process
4552 * @pid: pid of the process
4553 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4554 * @user_mask_ptr: user-space pointer to hold the current cpu mask
e69f6186
YB
4555 *
4556 * Return: 0 on success. An error code otherwise.
1da177e4 4557 */
5add95d4
HC
4558SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4559 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4560{
4561 int ret;
f17c8607 4562 cpumask_var_t mask;
1da177e4 4563
84fba5ec 4564 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4565 return -EINVAL;
4566 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4567 return -EINVAL;
4568
f17c8607
RR
4569 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4570 return -ENOMEM;
1da177e4 4571
f17c8607
RR
4572 ret = sched_getaffinity(pid, mask);
4573 if (ret == 0) {
8bc037fb 4574 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4575
4576 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4577 ret = -EFAULT;
4578 else
cd3d8031 4579 ret = retlen;
f17c8607
RR
4580 }
4581 free_cpumask_var(mask);
1da177e4 4582
f17c8607 4583 return ret;
1da177e4
LT
4584}
4585
4586/**
4587 * sys_sched_yield - yield the current processor to other threads.
4588 *
dd41f596
IM
4589 * This function yields the current CPU to other tasks. If there are no
4590 * other threads running on this CPU then this function will return.
e69f6186
YB
4591 *
4592 * Return: 0.
1da177e4 4593 */
5add95d4 4594SYSCALL_DEFINE0(sched_yield)
1da177e4 4595{
70b97a7f 4596 struct rq *rq = this_rq_lock();
1da177e4 4597
2d72376b 4598 schedstat_inc(rq, yld_count);
4530d7ab 4599 current->sched_class->yield_task(rq);
1da177e4
LT
4600
4601 /*
4602 * Since we are going to call schedule() anyway, there's
4603 * no need to preempt or enable interrupts:
4604 */
4605 __release(rq->lock);
8a25d5de 4606 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4607 do_raw_spin_unlock(&rq->lock);
ba74c144 4608 sched_preempt_enable_no_resched();
1da177e4
LT
4609
4610 schedule();
4611
4612 return 0;
4613}
4614
02b67cc3 4615int __sched _cond_resched(void)
1da177e4 4616{
fe32d3cd 4617 if (should_resched(0)) {
a18b5d01 4618 preempt_schedule_common();
1da177e4
LT
4619 return 1;
4620 }
4621 return 0;
4622}
02b67cc3 4623EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4624
4625/*
613afbf8 4626 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4627 * call schedule, and on return reacquire the lock.
4628 *
41a2d6cf 4629 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4630 * operations here to prevent schedule() from being called twice (once via
4631 * spin_unlock(), once by hand).
4632 */
613afbf8 4633int __cond_resched_lock(spinlock_t *lock)
1da177e4 4634{
fe32d3cd 4635 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
4636 int ret = 0;
4637
f607c668
PZ
4638 lockdep_assert_held(lock);
4639
4a81e832 4640 if (spin_needbreak(lock) || resched) {
1da177e4 4641 spin_unlock(lock);
d86ee480 4642 if (resched)
a18b5d01 4643 preempt_schedule_common();
95c354fe
NP
4644 else
4645 cpu_relax();
6df3cecb 4646 ret = 1;
1da177e4 4647 spin_lock(lock);
1da177e4 4648 }
6df3cecb 4649 return ret;
1da177e4 4650}
613afbf8 4651EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4652
613afbf8 4653int __sched __cond_resched_softirq(void)
1da177e4
LT
4654{
4655 BUG_ON(!in_softirq());
4656
fe32d3cd 4657 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
98d82567 4658 local_bh_enable();
a18b5d01 4659 preempt_schedule_common();
1da177e4
LT
4660 local_bh_disable();
4661 return 1;
4662 }
4663 return 0;
4664}
613afbf8 4665EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4666
1da177e4
LT
4667/**
4668 * yield - yield the current processor to other threads.
4669 *
8e3fabfd
PZ
4670 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4671 *
4672 * The scheduler is at all times free to pick the calling task as the most
4673 * eligible task to run, if removing the yield() call from your code breaks
4674 * it, its already broken.
4675 *
4676 * Typical broken usage is:
4677 *
4678 * while (!event)
4679 * yield();
4680 *
4681 * where one assumes that yield() will let 'the other' process run that will
4682 * make event true. If the current task is a SCHED_FIFO task that will never
4683 * happen. Never use yield() as a progress guarantee!!
4684 *
4685 * If you want to use yield() to wait for something, use wait_event().
4686 * If you want to use yield() to be 'nice' for others, use cond_resched().
4687 * If you still want to use yield(), do not!
1da177e4
LT
4688 */
4689void __sched yield(void)
4690{
4691 set_current_state(TASK_RUNNING);
4692 sys_sched_yield();
4693}
1da177e4
LT
4694EXPORT_SYMBOL(yield);
4695
d95f4122
MG
4696/**
4697 * yield_to - yield the current processor to another thread in
4698 * your thread group, or accelerate that thread toward the
4699 * processor it's on.
16addf95
RD
4700 * @p: target task
4701 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4702 *
4703 * It's the caller's job to ensure that the target task struct
4704 * can't go away on us before we can do any checks.
4705 *
e69f6186 4706 * Return:
7b270f60
PZ
4707 * true (>0) if we indeed boosted the target task.
4708 * false (0) if we failed to boost the target.
4709 * -ESRCH if there's no task to yield to.
d95f4122 4710 */
fa93384f 4711int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
4712{
4713 struct task_struct *curr = current;
4714 struct rq *rq, *p_rq;
4715 unsigned long flags;
c3c18640 4716 int yielded = 0;
d95f4122
MG
4717
4718 local_irq_save(flags);
4719 rq = this_rq();
4720
4721again:
4722 p_rq = task_rq(p);
7b270f60
PZ
4723 /*
4724 * If we're the only runnable task on the rq and target rq also
4725 * has only one task, there's absolutely no point in yielding.
4726 */
4727 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4728 yielded = -ESRCH;
4729 goto out_irq;
4730 }
4731
d95f4122 4732 double_rq_lock(rq, p_rq);
39e24d8f 4733 if (task_rq(p) != p_rq) {
d95f4122
MG
4734 double_rq_unlock(rq, p_rq);
4735 goto again;
4736 }
4737
4738 if (!curr->sched_class->yield_to_task)
7b270f60 4739 goto out_unlock;
d95f4122
MG
4740
4741 if (curr->sched_class != p->sched_class)
7b270f60 4742 goto out_unlock;
d95f4122
MG
4743
4744 if (task_running(p_rq, p) || p->state)
7b270f60 4745 goto out_unlock;
d95f4122
MG
4746
4747 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4748 if (yielded) {
d95f4122 4749 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4750 /*
4751 * Make p's CPU reschedule; pick_next_entity takes care of
4752 * fairness.
4753 */
4754 if (preempt && rq != p_rq)
8875125e 4755 resched_curr(p_rq);
6d1cafd8 4756 }
d95f4122 4757
7b270f60 4758out_unlock:
d95f4122 4759 double_rq_unlock(rq, p_rq);
7b270f60 4760out_irq:
d95f4122
MG
4761 local_irq_restore(flags);
4762
7b270f60 4763 if (yielded > 0)
d95f4122
MG
4764 schedule();
4765
4766 return yielded;
4767}
4768EXPORT_SYMBOL_GPL(yield_to);
4769
1da177e4 4770/*
41a2d6cf 4771 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4772 * that process accounting knows that this is a task in IO wait state.
1da177e4 4773 */
1da177e4
LT
4774long __sched io_schedule_timeout(long timeout)
4775{
9cff8ade
N
4776 int old_iowait = current->in_iowait;
4777 struct rq *rq;
1da177e4
LT
4778 long ret;
4779
9cff8ade 4780 current->in_iowait = 1;
10d784ea 4781 blk_schedule_flush_plug(current);
9cff8ade 4782
0ff92245 4783 delayacct_blkio_start();
9cff8ade 4784 rq = raw_rq();
1da177e4
LT
4785 atomic_inc(&rq->nr_iowait);
4786 ret = schedule_timeout(timeout);
9cff8ade 4787 current->in_iowait = old_iowait;
1da177e4 4788 atomic_dec(&rq->nr_iowait);
0ff92245 4789 delayacct_blkio_end();
9cff8ade 4790
1da177e4
LT
4791 return ret;
4792}
9cff8ade 4793EXPORT_SYMBOL(io_schedule_timeout);
1da177e4
LT
4794
4795/**
4796 * sys_sched_get_priority_max - return maximum RT priority.
4797 * @policy: scheduling class.
4798 *
e69f6186
YB
4799 * Return: On success, this syscall returns the maximum
4800 * rt_priority that can be used by a given scheduling class.
4801 * On failure, a negative error code is returned.
1da177e4 4802 */
5add95d4 4803SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4804{
4805 int ret = -EINVAL;
4806
4807 switch (policy) {
4808 case SCHED_FIFO:
4809 case SCHED_RR:
4810 ret = MAX_USER_RT_PRIO-1;
4811 break;
aab03e05 4812 case SCHED_DEADLINE:
1da177e4 4813 case SCHED_NORMAL:
b0a9499c 4814 case SCHED_BATCH:
dd41f596 4815 case SCHED_IDLE:
1da177e4
LT
4816 ret = 0;
4817 break;
4818 }
4819 return ret;
4820}
4821
4822/**
4823 * sys_sched_get_priority_min - return minimum RT priority.
4824 * @policy: scheduling class.
4825 *
e69f6186
YB
4826 * Return: On success, this syscall returns the minimum
4827 * rt_priority that can be used by a given scheduling class.
4828 * On failure, a negative error code is returned.
1da177e4 4829 */
5add95d4 4830SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4831{
4832 int ret = -EINVAL;
4833
4834 switch (policy) {
4835 case SCHED_FIFO:
4836 case SCHED_RR:
4837 ret = 1;
4838 break;
aab03e05 4839 case SCHED_DEADLINE:
1da177e4 4840 case SCHED_NORMAL:
b0a9499c 4841 case SCHED_BATCH:
dd41f596 4842 case SCHED_IDLE:
1da177e4
LT
4843 ret = 0;
4844 }
4845 return ret;
4846}
4847
4848/**
4849 * sys_sched_rr_get_interval - return the default timeslice of a process.
4850 * @pid: pid of the process.
4851 * @interval: userspace pointer to the timeslice value.
4852 *
4853 * this syscall writes the default timeslice value of a given process
4854 * into the user-space timespec buffer. A value of '0' means infinity.
e69f6186
YB
4855 *
4856 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4857 * an error code.
1da177e4 4858 */
17da2bd9 4859SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4860 struct timespec __user *, interval)
1da177e4 4861{
36c8b586 4862 struct task_struct *p;
a4ec24b4 4863 unsigned int time_slice;
dba091b9
TG
4864 unsigned long flags;
4865 struct rq *rq;
3a5c359a 4866 int retval;
1da177e4 4867 struct timespec t;
1da177e4
LT
4868
4869 if (pid < 0)
3a5c359a 4870 return -EINVAL;
1da177e4
LT
4871
4872 retval = -ESRCH;
1a551ae7 4873 rcu_read_lock();
1da177e4
LT
4874 p = find_process_by_pid(pid);
4875 if (!p)
4876 goto out_unlock;
4877
4878 retval = security_task_getscheduler(p);
4879 if (retval)
4880 goto out_unlock;
4881
dba091b9 4882 rq = task_rq_lock(p, &flags);
a57beec5
PZ
4883 time_slice = 0;
4884 if (p->sched_class->get_rr_interval)
4885 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4886 task_rq_unlock(rq, p, &flags);
a4ec24b4 4887
1a551ae7 4888 rcu_read_unlock();
a4ec24b4 4889 jiffies_to_timespec(time_slice, &t);
1da177e4 4890 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4891 return retval;
3a5c359a 4892
1da177e4 4893out_unlock:
1a551ae7 4894 rcu_read_unlock();
1da177e4
LT
4895 return retval;
4896}
4897
7c731e0a 4898static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4899
82a1fcb9 4900void sched_show_task(struct task_struct *p)
1da177e4 4901{
1da177e4 4902 unsigned long free = 0;
4e79752c 4903 int ppid;
1f8a7633 4904 unsigned long state = p->state;
1da177e4 4905
1f8a7633
TH
4906 if (state)
4907 state = __ffs(state) + 1;
28d0686c 4908 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4909 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4910#if BITS_PER_LONG == 32
1da177e4 4911 if (state == TASK_RUNNING)
3df0fc5b 4912 printk(KERN_CONT " running ");
1da177e4 4913 else
3df0fc5b 4914 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4915#else
4916 if (state == TASK_RUNNING)
3df0fc5b 4917 printk(KERN_CONT " running task ");
1da177e4 4918 else
3df0fc5b 4919 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4920#endif
4921#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4922 free = stack_not_used(p);
1da177e4 4923#endif
a90e984c 4924 ppid = 0;
4e79752c 4925 rcu_read_lock();
a90e984c
ON
4926 if (pid_alive(p))
4927 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 4928 rcu_read_unlock();
3df0fc5b 4929 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 4930 task_pid_nr(p), ppid,
aa47b7e0 4931 (unsigned long)task_thread_info(p)->flags);
1da177e4 4932
3d1cb205 4933 print_worker_info(KERN_INFO, p);
5fb5e6de 4934 show_stack(p, NULL);
1da177e4
LT
4935}
4936
e59e2ae2 4937void show_state_filter(unsigned long state_filter)
1da177e4 4938{
36c8b586 4939 struct task_struct *g, *p;
1da177e4 4940
4bd77321 4941#if BITS_PER_LONG == 32
3df0fc5b
PZ
4942 printk(KERN_INFO
4943 " task PC stack pid father\n");
1da177e4 4944#else
3df0fc5b
PZ
4945 printk(KERN_INFO
4946 " task PC stack pid father\n");
1da177e4 4947#endif
510f5acc 4948 rcu_read_lock();
5d07f420 4949 for_each_process_thread(g, p) {
1da177e4
LT
4950 /*
4951 * reset the NMI-timeout, listing all files on a slow
25985edc 4952 * console might take a lot of time:
1da177e4
LT
4953 */
4954 touch_nmi_watchdog();
39bc89fd 4955 if (!state_filter || (p->state & state_filter))
82a1fcb9 4956 sched_show_task(p);
5d07f420 4957 }
1da177e4 4958
04c9167f
JF
4959 touch_all_softlockup_watchdogs();
4960
dd41f596
IM
4961#ifdef CONFIG_SCHED_DEBUG
4962 sysrq_sched_debug_show();
4963#endif
510f5acc 4964 rcu_read_unlock();
e59e2ae2
IM
4965 /*
4966 * Only show locks if all tasks are dumped:
4967 */
93335a21 4968 if (!state_filter)
e59e2ae2 4969 debug_show_all_locks();
1da177e4
LT
4970}
4971
0db0628d 4972void init_idle_bootup_task(struct task_struct *idle)
1df21055 4973{
dd41f596 4974 idle->sched_class = &idle_sched_class;
1df21055
IM
4975}
4976
f340c0d1
IM
4977/**
4978 * init_idle - set up an idle thread for a given CPU
4979 * @idle: task in question
4980 * @cpu: cpu the idle task belongs to
4981 *
4982 * NOTE: this function does not set the idle thread's NEED_RESCHED
4983 * flag, to make booting more robust.
4984 */
0db0628d 4985void init_idle(struct task_struct *idle, int cpu)
1da177e4 4986{
70b97a7f 4987 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4988 unsigned long flags;
4989
25834c73
PZ
4990 raw_spin_lock_irqsave(&idle->pi_lock, flags);
4991 raw_spin_lock(&rq->lock);
5cbd54ef 4992
5e1576ed 4993 __sched_fork(0, idle);
06b83b5f 4994 idle->state = TASK_RUNNING;
dd41f596
IM
4995 idle->se.exec_start = sched_clock();
4996
de9b8f5d
PZ
4997#ifdef CONFIG_SMP
4998 /*
4999 * Its possible that init_idle() gets called multiple times on a task,
5000 * in that case do_set_cpus_allowed() will not do the right thing.
5001 *
5002 * And since this is boot we can forgo the serialization.
5003 */
5004 set_cpus_allowed_common(idle, cpumask_of(cpu));
5005#endif
6506cf6c
PZ
5006 /*
5007 * We're having a chicken and egg problem, even though we are
5008 * holding rq->lock, the cpu isn't yet set to this cpu so the
5009 * lockdep check in task_group() will fail.
5010 *
5011 * Similar case to sched_fork(). / Alternatively we could
5012 * use task_rq_lock() here and obtain the other rq->lock.
5013 *
5014 * Silence PROVE_RCU
5015 */
5016 rcu_read_lock();
dd41f596 5017 __set_task_cpu(idle, cpu);
6506cf6c 5018 rcu_read_unlock();
1da177e4 5019
1da177e4 5020 rq->curr = rq->idle = idle;
da0c1e65 5021 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 5022#ifdef CONFIG_SMP
3ca7a440 5023 idle->on_cpu = 1;
4866cde0 5024#endif
25834c73
PZ
5025 raw_spin_unlock(&rq->lock);
5026 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
5027
5028 /* Set the preempt count _outside_ the spinlocks! */
01028747 5029 init_idle_preempt_count(idle, cpu);
55cd5340 5030
dd41f596
IM
5031 /*
5032 * The idle tasks have their own, simple scheduling class:
5033 */
5034 idle->sched_class = &idle_sched_class;
868baf07 5035 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 5036 vtime_init_idle(idle, cpu);
de9b8f5d 5037#ifdef CONFIG_SMP
f1c6f1a7
CE
5038 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5039#endif
19978ca6
IM
5040}
5041
f82f8042
JL
5042int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5043 const struct cpumask *trial)
5044{
5045 int ret = 1, trial_cpus;
5046 struct dl_bw *cur_dl_b;
5047 unsigned long flags;
5048
bb2bc55a
MG
5049 if (!cpumask_weight(cur))
5050 return ret;
5051
75e23e49 5052 rcu_read_lock_sched();
f82f8042
JL
5053 cur_dl_b = dl_bw_of(cpumask_any(cur));
5054 trial_cpus = cpumask_weight(trial);
5055
5056 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5057 if (cur_dl_b->bw != -1 &&
5058 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5059 ret = 0;
5060 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
75e23e49 5061 rcu_read_unlock_sched();
f82f8042
JL
5062
5063 return ret;
5064}
5065
7f51412a
JL
5066int task_can_attach(struct task_struct *p,
5067 const struct cpumask *cs_cpus_allowed)
5068{
5069 int ret = 0;
5070
5071 /*
5072 * Kthreads which disallow setaffinity shouldn't be moved
5073 * to a new cpuset; we don't want to change their cpu
5074 * affinity and isolating such threads by their set of
5075 * allowed nodes is unnecessary. Thus, cpusets are not
5076 * applicable for such threads. This prevents checking for
5077 * success of set_cpus_allowed_ptr() on all attached tasks
5078 * before cpus_allowed may be changed.
5079 */
5080 if (p->flags & PF_NO_SETAFFINITY) {
5081 ret = -EINVAL;
5082 goto out;
5083 }
5084
5085#ifdef CONFIG_SMP
5086 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5087 cs_cpus_allowed)) {
5088 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5089 cs_cpus_allowed);
75e23e49 5090 struct dl_bw *dl_b;
7f51412a
JL
5091 bool overflow;
5092 int cpus;
5093 unsigned long flags;
5094
75e23e49
JL
5095 rcu_read_lock_sched();
5096 dl_b = dl_bw_of(dest_cpu);
7f51412a
JL
5097 raw_spin_lock_irqsave(&dl_b->lock, flags);
5098 cpus = dl_bw_cpus(dest_cpu);
5099 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5100 if (overflow)
5101 ret = -EBUSY;
5102 else {
5103 /*
5104 * We reserve space for this task in the destination
5105 * root_domain, as we can't fail after this point.
5106 * We will free resources in the source root_domain
5107 * later on (see set_cpus_allowed_dl()).
5108 */
5109 __dl_add(dl_b, p->dl.dl_bw);
5110 }
5111 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
75e23e49 5112 rcu_read_unlock_sched();
7f51412a
JL
5113
5114 }
5115#endif
5116out:
5117 return ret;
5118}
5119
1da177e4 5120#ifdef CONFIG_SMP
1da177e4 5121
e6628d5b
MG
5122#ifdef CONFIG_NUMA_BALANCING
5123/* Migrate current task p to target_cpu */
5124int migrate_task_to(struct task_struct *p, int target_cpu)
5125{
5126 struct migration_arg arg = { p, target_cpu };
5127 int curr_cpu = task_cpu(p);
5128
5129 if (curr_cpu == target_cpu)
5130 return 0;
5131
5132 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5133 return -EINVAL;
5134
5135 /* TODO: This is not properly updating schedstats */
5136
286549dc 5137 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
5138 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5139}
0ec8aa00
PZ
5140
5141/*
5142 * Requeue a task on a given node and accurately track the number of NUMA
5143 * tasks on the runqueues
5144 */
5145void sched_setnuma(struct task_struct *p, int nid)
5146{
5147 struct rq *rq;
5148 unsigned long flags;
da0c1e65 5149 bool queued, running;
0ec8aa00
PZ
5150
5151 rq = task_rq_lock(p, &flags);
da0c1e65 5152 queued = task_on_rq_queued(p);
0ec8aa00
PZ
5153 running = task_current(rq, p);
5154
da0c1e65 5155 if (queued)
1de64443 5156 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 5157 if (running)
f3cd1c4e 5158 put_prev_task(rq, p);
0ec8aa00
PZ
5159
5160 p->numa_preferred_nid = nid;
0ec8aa00
PZ
5161
5162 if (running)
5163 p->sched_class->set_curr_task(rq);
da0c1e65 5164 if (queued)
1de64443 5165 enqueue_task(rq, p, ENQUEUE_RESTORE);
0ec8aa00
PZ
5166 task_rq_unlock(rq, p, &flags);
5167}
5cc389bc 5168#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 5169
1da177e4 5170#ifdef CONFIG_HOTPLUG_CPU
054b9108 5171/*
48c5ccae
PZ
5172 * Ensures that the idle task is using init_mm right before its cpu goes
5173 * offline.
054b9108 5174 */
48c5ccae 5175void idle_task_exit(void)
1da177e4 5176{
48c5ccae 5177 struct mm_struct *mm = current->active_mm;
e76bd8d9 5178
48c5ccae 5179 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5180
a53efe5f 5181 if (mm != &init_mm) {
48c5ccae 5182 switch_mm(mm, &init_mm, current);
a53efe5f
MS
5183 finish_arch_post_lock_switch();
5184 }
48c5ccae 5185 mmdrop(mm);
1da177e4
LT
5186}
5187
5188/*
5d180232
PZ
5189 * Since this CPU is going 'away' for a while, fold any nr_active delta
5190 * we might have. Assumes we're called after migrate_tasks() so that the
5191 * nr_active count is stable.
5192 *
5193 * Also see the comment "Global load-average calculations".
1da177e4 5194 */
5d180232 5195static void calc_load_migrate(struct rq *rq)
1da177e4 5196{
5d180232
PZ
5197 long delta = calc_load_fold_active(rq);
5198 if (delta)
5199 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
5200}
5201
3f1d2a31
PZ
5202static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5203{
5204}
5205
5206static const struct sched_class fake_sched_class = {
5207 .put_prev_task = put_prev_task_fake,
5208};
5209
5210static struct task_struct fake_task = {
5211 /*
5212 * Avoid pull_{rt,dl}_task()
5213 */
5214 .prio = MAX_PRIO + 1,
5215 .sched_class = &fake_sched_class,
5216};
5217
48f24c4d 5218/*
48c5ccae
PZ
5219 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5220 * try_to_wake_up()->select_task_rq().
5221 *
5222 * Called with rq->lock held even though we'er in stop_machine() and
5223 * there's no concurrency possible, we hold the required locks anyway
5224 * because of lock validation efforts.
1da177e4 5225 */
5e16bbc2 5226static void migrate_tasks(struct rq *dead_rq)
1da177e4 5227{
5e16bbc2 5228 struct rq *rq = dead_rq;
48c5ccae
PZ
5229 struct task_struct *next, *stop = rq->stop;
5230 int dest_cpu;
1da177e4
LT
5231
5232 /*
48c5ccae
PZ
5233 * Fudge the rq selection such that the below task selection loop
5234 * doesn't get stuck on the currently eligible stop task.
5235 *
5236 * We're currently inside stop_machine() and the rq is either stuck
5237 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5238 * either way we should never end up calling schedule() until we're
5239 * done here.
1da177e4 5240 */
48c5ccae 5241 rq->stop = NULL;
48f24c4d 5242
77bd3970
FW
5243 /*
5244 * put_prev_task() and pick_next_task() sched
5245 * class method both need to have an up-to-date
5246 * value of rq->clock[_task]
5247 */
5248 update_rq_clock(rq);
5249
5e16bbc2 5250 for (;;) {
48c5ccae
PZ
5251 /*
5252 * There's this thread running, bail when that's the only
5253 * remaining thread.
5254 */
5255 if (rq->nr_running == 1)
dd41f596 5256 break;
48c5ccae 5257
cbce1a68 5258 /*
5473e0cc 5259 * pick_next_task assumes pinned rq->lock.
cbce1a68
PZ
5260 */
5261 lockdep_pin_lock(&rq->lock);
3f1d2a31 5262 next = pick_next_task(rq, &fake_task);
48c5ccae 5263 BUG_ON(!next);
79c53799 5264 next->sched_class->put_prev_task(rq, next);
e692ab53 5265
5473e0cc
WL
5266 /*
5267 * Rules for changing task_struct::cpus_allowed are holding
5268 * both pi_lock and rq->lock, such that holding either
5269 * stabilizes the mask.
5270 *
5271 * Drop rq->lock is not quite as disastrous as it usually is
5272 * because !cpu_active at this point, which means load-balance
5273 * will not interfere. Also, stop-machine.
5274 */
5275 lockdep_unpin_lock(&rq->lock);
5276 raw_spin_unlock(&rq->lock);
5277 raw_spin_lock(&next->pi_lock);
5278 raw_spin_lock(&rq->lock);
5279
5280 /*
5281 * Since we're inside stop-machine, _nothing_ should have
5282 * changed the task, WARN if weird stuff happened, because in
5283 * that case the above rq->lock drop is a fail too.
5284 */
5285 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5286 raw_spin_unlock(&next->pi_lock);
5287 continue;
5288 }
5289
48c5ccae 5290 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 5291 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
48c5ccae 5292
5e16bbc2
PZ
5293 rq = __migrate_task(rq, next, dest_cpu);
5294 if (rq != dead_rq) {
5295 raw_spin_unlock(&rq->lock);
5296 rq = dead_rq;
5297 raw_spin_lock(&rq->lock);
5298 }
5473e0cc 5299 raw_spin_unlock(&next->pi_lock);
1da177e4 5300 }
dce48a84 5301
48c5ccae 5302 rq->stop = stop;
dce48a84 5303}
1da177e4
LT
5304#endif /* CONFIG_HOTPLUG_CPU */
5305
e692ab53
NP
5306#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5307
5308static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5309 {
5310 .procname = "sched_domain",
c57baf1e 5311 .mode = 0555,
e0361851 5312 },
56992309 5313 {}
e692ab53
NP
5314};
5315
5316static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5317 {
5318 .procname = "kernel",
c57baf1e 5319 .mode = 0555,
e0361851
AD
5320 .child = sd_ctl_dir,
5321 },
56992309 5322 {}
e692ab53
NP
5323};
5324
5325static struct ctl_table *sd_alloc_ctl_entry(int n)
5326{
5327 struct ctl_table *entry =
5cf9f062 5328 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5329
e692ab53
NP
5330 return entry;
5331}
5332
6382bc90
MM
5333static void sd_free_ctl_entry(struct ctl_table **tablep)
5334{
cd790076 5335 struct ctl_table *entry;
6382bc90 5336
cd790076
MM
5337 /*
5338 * In the intermediate directories, both the child directory and
5339 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5340 * will always be set. In the lowest directory the names are
cd790076
MM
5341 * static strings and all have proc handlers.
5342 */
5343 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5344 if (entry->child)
5345 sd_free_ctl_entry(&entry->child);
cd790076
MM
5346 if (entry->proc_handler == NULL)
5347 kfree(entry->procname);
5348 }
6382bc90
MM
5349
5350 kfree(*tablep);
5351 *tablep = NULL;
5352}
5353
201c373e 5354static int min_load_idx = 0;
fd9b86d3 5355static int max_load_idx = CPU_LOAD_IDX_MAX-1;
201c373e 5356
e692ab53 5357static void
e0361851 5358set_table_entry(struct ctl_table *entry,
e692ab53 5359 const char *procname, void *data, int maxlen,
201c373e
NK
5360 umode_t mode, proc_handler *proc_handler,
5361 bool load_idx)
e692ab53 5362{
e692ab53
NP
5363 entry->procname = procname;
5364 entry->data = data;
5365 entry->maxlen = maxlen;
5366 entry->mode = mode;
5367 entry->proc_handler = proc_handler;
201c373e
NK
5368
5369 if (load_idx) {
5370 entry->extra1 = &min_load_idx;
5371 entry->extra2 = &max_load_idx;
5372 }
e692ab53
NP
5373}
5374
5375static struct ctl_table *
5376sd_alloc_ctl_domain_table(struct sched_domain *sd)
5377{
37e6bae8 5378 struct ctl_table *table = sd_alloc_ctl_entry(14);
e692ab53 5379
ad1cdc1d
MM
5380 if (table == NULL)
5381 return NULL;
5382
e0361851 5383 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 5384 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5385 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 5386 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5387 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 5388 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5389 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 5390 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5391 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 5392 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5393 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 5394 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5395 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 5396 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5397 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 5398 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 5399 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 5400 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5401 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 5402 &sd->cache_nice_tries,
201c373e 5403 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5404 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 5405 sizeof(int), 0644, proc_dointvec_minmax, false);
37e6bae8
AS
5406 set_table_entry(&table[11], "max_newidle_lb_cost",
5407 &sd->max_newidle_lb_cost,
5408 sizeof(long), 0644, proc_doulongvec_minmax, false);
5409 set_table_entry(&table[12], "name", sd->name,
201c373e 5410 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
37e6bae8 5411 /* &table[13] is terminator */
e692ab53
NP
5412
5413 return table;
5414}
5415
be7002e6 5416static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5417{
5418 struct ctl_table *entry, *table;
5419 struct sched_domain *sd;
5420 int domain_num = 0, i;
5421 char buf[32];
5422
5423 for_each_domain(cpu, sd)
5424 domain_num++;
5425 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5426 if (table == NULL)
5427 return NULL;
e692ab53
NP
5428
5429 i = 0;
5430 for_each_domain(cpu, sd) {
5431 snprintf(buf, 32, "domain%d", i);
e692ab53 5432 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5433 entry->mode = 0555;
e692ab53
NP
5434 entry->child = sd_alloc_ctl_domain_table(sd);
5435 entry++;
5436 i++;
5437 }
5438 return table;
5439}
5440
5441static struct ctl_table_header *sd_sysctl_header;
6382bc90 5442static void register_sched_domain_sysctl(void)
e692ab53 5443{
6ad4c188 5444 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5445 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5446 char buf[32];
5447
7378547f
MM
5448 WARN_ON(sd_ctl_dir[0].child);
5449 sd_ctl_dir[0].child = entry;
5450
ad1cdc1d
MM
5451 if (entry == NULL)
5452 return;
5453
6ad4c188 5454 for_each_possible_cpu(i) {
e692ab53 5455 snprintf(buf, 32, "cpu%d", i);
e692ab53 5456 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5457 entry->mode = 0555;
e692ab53 5458 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5459 entry++;
e692ab53 5460 }
7378547f
MM
5461
5462 WARN_ON(sd_sysctl_header);
e692ab53
NP
5463 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5464}
6382bc90 5465
7378547f 5466/* may be called multiple times per register */
6382bc90
MM
5467static void unregister_sched_domain_sysctl(void)
5468{
781b0203 5469 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5470 sd_sysctl_header = NULL;
7378547f
MM
5471 if (sd_ctl_dir[0].child)
5472 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5473}
e692ab53 5474#else
6382bc90
MM
5475static void register_sched_domain_sysctl(void)
5476{
5477}
5478static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5479{
5480}
5cc389bc 5481#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
e692ab53 5482
1f11eb6a
GH
5483static void set_rq_online(struct rq *rq)
5484{
5485 if (!rq->online) {
5486 const struct sched_class *class;
5487
c6c4927b 5488 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5489 rq->online = 1;
5490
5491 for_each_class(class) {
5492 if (class->rq_online)
5493 class->rq_online(rq);
5494 }
5495 }
5496}
5497
5498static void set_rq_offline(struct rq *rq)
5499{
5500 if (rq->online) {
5501 const struct sched_class *class;
5502
5503 for_each_class(class) {
5504 if (class->rq_offline)
5505 class->rq_offline(rq);
5506 }
5507
c6c4927b 5508 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5509 rq->online = 0;
5510 }
5511}
5512
1da177e4
LT
5513/*
5514 * migration_call - callback that gets triggered when a CPU is added.
5515 * Here we can start up the necessary migration thread for the new CPU.
5516 */
0db0628d 5517static int
48f24c4d 5518migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5519{
48f24c4d 5520 int cpu = (long)hcpu;
1da177e4 5521 unsigned long flags;
969c7921 5522 struct rq *rq = cpu_rq(cpu);
1da177e4 5523
48c5ccae 5524 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5525
1da177e4 5526 case CPU_UP_PREPARE:
a468d389 5527 rq->calc_load_update = calc_load_update;
1da177e4 5528 break;
48f24c4d 5529
1da177e4 5530 case CPU_ONLINE:
1f94ef59 5531 /* Update our root-domain */
05fa785c 5532 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5533 if (rq->rd) {
c6c4927b 5534 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5535
5536 set_rq_online(rq);
1f94ef59 5537 }
05fa785c 5538 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5539 break;
48f24c4d 5540
1da177e4 5541#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5542 case CPU_DYING:
317f3941 5543 sched_ttwu_pending();
57d885fe 5544 /* Update our root-domain */
05fa785c 5545 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5546 if (rq->rd) {
c6c4927b 5547 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5548 set_rq_offline(rq);
57d885fe 5549 }
5e16bbc2 5550 migrate_tasks(rq);
48c5ccae 5551 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5552 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 5553 break;
48c5ccae 5554
5d180232 5555 case CPU_DEAD:
f319da0c 5556 calc_load_migrate(rq);
57d885fe 5557 break;
1da177e4
LT
5558#endif
5559 }
49c022e6
PZ
5560
5561 update_max_interval();
5562
1da177e4
LT
5563 return NOTIFY_OK;
5564}
5565
f38b0820
PM
5566/*
5567 * Register at high priority so that task migration (migrate_all_tasks)
5568 * happens before everything else. This has to be lower priority than
cdd6c482 5569 * the notifier in the perf_event subsystem, though.
1da177e4 5570 */
0db0628d 5571static struct notifier_block migration_notifier = {
1da177e4 5572 .notifier_call = migration_call,
50a323b7 5573 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5574};
5575
6a82b60d 5576static void set_cpu_rq_start_time(void)
a803f026
CM
5577{
5578 int cpu = smp_processor_id();
5579 struct rq *rq = cpu_rq(cpu);
5580 rq->age_stamp = sched_clock_cpu(cpu);
5581}
5582
0db0628d 5583static int sched_cpu_active(struct notifier_block *nfb,
3a101d05
TH
5584 unsigned long action, void *hcpu)
5585{
07f06cb3
PZ
5586 int cpu = (long)hcpu;
5587
3a101d05 5588 switch (action & ~CPU_TASKS_FROZEN) {
a803f026
CM
5589 case CPU_STARTING:
5590 set_cpu_rq_start_time();
5591 return NOTIFY_OK;
07f06cb3 5592
dd9d3843
JS
5593 case CPU_ONLINE:
5594 /*
5595 * At this point a starting CPU has marked itself as online via
5596 * set_cpu_online(). But it might not yet have marked itself
5597 * as active, which is essential from here on.
dd9d3843 5598 */
07f06cb3
PZ
5599 set_cpu_active(cpu, true);
5600 stop_machine_unpark(cpu);
5601 return NOTIFY_OK;
5602
3a101d05 5603 case CPU_DOWN_FAILED:
07f06cb3 5604 set_cpu_active(cpu, true);
3a101d05 5605 return NOTIFY_OK;
07f06cb3 5606
3a101d05
TH
5607 default:
5608 return NOTIFY_DONE;
5609 }
5610}
5611
0db0628d 5612static int sched_cpu_inactive(struct notifier_block *nfb,
3a101d05
TH
5613 unsigned long action, void *hcpu)
5614{
5615 switch (action & ~CPU_TASKS_FROZEN) {
5616 case CPU_DOWN_PREPARE:
3c18d447 5617 set_cpu_active((long)hcpu, false);
3a101d05 5618 return NOTIFY_OK;
3c18d447
JL
5619 default:
5620 return NOTIFY_DONE;
3a101d05
TH
5621 }
5622}
5623
7babe8db 5624static int __init migration_init(void)
1da177e4
LT
5625{
5626 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5627 int err;
48f24c4d 5628
3a101d05 5629 /* Initialize migration for the boot CPU */
07dccf33
AM
5630 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5631 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5632 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5633 register_cpu_notifier(&migration_notifier);
7babe8db 5634
3a101d05
TH
5635 /* Register cpu active notifiers */
5636 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5637 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5638
a004cd42 5639 return 0;
1da177e4 5640}
7babe8db 5641early_initcall(migration_init);
476f3534 5642
4cb98839
PZ
5643static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5644
3e9830dc 5645#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5646
d039ac60 5647static __read_mostly int sched_debug_enabled;
f6630114 5648
d039ac60 5649static int __init sched_debug_setup(char *str)
f6630114 5650{
d039ac60 5651 sched_debug_enabled = 1;
f6630114
MT
5652
5653 return 0;
5654}
d039ac60
PZ
5655early_param("sched_debug", sched_debug_setup);
5656
5657static inline bool sched_debug(void)
5658{
5659 return sched_debug_enabled;
5660}
f6630114 5661
7c16ec58 5662static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5663 struct cpumask *groupmask)
1da177e4 5664{
4dcf6aff 5665 struct sched_group *group = sd->groups;
1da177e4 5666
96f874e2 5667 cpumask_clear(groupmask);
4dcf6aff
IM
5668
5669 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5670
5671 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5672 printk("does not load-balance\n");
4dcf6aff 5673 if (sd->parent)
3df0fc5b
PZ
5674 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5675 " has parent");
4dcf6aff 5676 return -1;
41c7ce9a
NP
5677 }
5678
333470ee
TH
5679 printk(KERN_CONT "span %*pbl level %s\n",
5680 cpumask_pr_args(sched_domain_span(sd)), sd->name);
4dcf6aff 5681
758b2cdc 5682 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5683 printk(KERN_ERR "ERROR: domain->span does not contain "
5684 "CPU%d\n", cpu);
4dcf6aff 5685 }
758b2cdc 5686 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5687 printk(KERN_ERR "ERROR: domain->groups does not contain"
5688 " CPU%d\n", cpu);
4dcf6aff 5689 }
1da177e4 5690
4dcf6aff 5691 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5692 do {
4dcf6aff 5693 if (!group) {
3df0fc5b
PZ
5694 printk("\n");
5695 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5696 break;
5697 }
5698
758b2cdc 5699 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5700 printk(KERN_CONT "\n");
5701 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5702 break;
5703 }
1da177e4 5704
cb83b629
PZ
5705 if (!(sd->flags & SD_OVERLAP) &&
5706 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5707 printk(KERN_CONT "\n");
5708 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5709 break;
5710 }
1da177e4 5711
758b2cdc 5712 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5713
333470ee
TH
5714 printk(KERN_CONT " %*pbl",
5715 cpumask_pr_args(sched_group_cpus(group)));
ca8ce3d0 5716 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
63b2ca30
NP
5717 printk(KERN_CONT " (cpu_capacity = %d)",
5718 group->sgc->capacity);
381512cf 5719 }
1da177e4 5720
4dcf6aff
IM
5721 group = group->next;
5722 } while (group != sd->groups);
3df0fc5b 5723 printk(KERN_CONT "\n");
1da177e4 5724
758b2cdc 5725 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5726 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5727
758b2cdc
RR
5728 if (sd->parent &&
5729 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5730 printk(KERN_ERR "ERROR: parent span is not a superset "
5731 "of domain->span\n");
4dcf6aff
IM
5732 return 0;
5733}
1da177e4 5734
4dcf6aff
IM
5735static void sched_domain_debug(struct sched_domain *sd, int cpu)
5736{
5737 int level = 0;
1da177e4 5738
d039ac60 5739 if (!sched_debug_enabled)
f6630114
MT
5740 return;
5741
4dcf6aff
IM
5742 if (!sd) {
5743 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5744 return;
5745 }
1da177e4 5746
4dcf6aff
IM
5747 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5748
5749 for (;;) {
4cb98839 5750 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5751 break;
1da177e4
LT
5752 level++;
5753 sd = sd->parent;
33859f7f 5754 if (!sd)
4dcf6aff
IM
5755 break;
5756 }
1da177e4 5757}
6d6bc0ad 5758#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5759# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5760static inline bool sched_debug(void)
5761{
5762 return false;
5763}
6d6bc0ad 5764#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5765
1a20ff27 5766static int sd_degenerate(struct sched_domain *sd)
245af2c7 5767{
758b2cdc 5768 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5769 return 1;
5770
5771 /* Following flags need at least 2 groups */
5772 if (sd->flags & (SD_LOAD_BALANCE |
5773 SD_BALANCE_NEWIDLE |
5774 SD_BALANCE_FORK |
89c4710e 5775 SD_BALANCE_EXEC |
5d4dfddd 5776 SD_SHARE_CPUCAPACITY |
d77b3ed5
VG
5777 SD_SHARE_PKG_RESOURCES |
5778 SD_SHARE_POWERDOMAIN)) {
245af2c7
SS
5779 if (sd->groups != sd->groups->next)
5780 return 0;
5781 }
5782
5783 /* Following flags don't use groups */
c88d5910 5784 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5785 return 0;
5786
5787 return 1;
5788}
5789
48f24c4d
IM
5790static int
5791sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5792{
5793 unsigned long cflags = sd->flags, pflags = parent->flags;
5794
5795 if (sd_degenerate(parent))
5796 return 1;
5797
758b2cdc 5798 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5799 return 0;
5800
245af2c7
SS
5801 /* Flags needing groups don't count if only 1 group in parent */
5802 if (parent->groups == parent->groups->next) {
5803 pflags &= ~(SD_LOAD_BALANCE |
5804 SD_BALANCE_NEWIDLE |
5805 SD_BALANCE_FORK |
89c4710e 5806 SD_BALANCE_EXEC |
5d4dfddd 5807 SD_SHARE_CPUCAPACITY |
10866e62 5808 SD_SHARE_PKG_RESOURCES |
d77b3ed5
VG
5809 SD_PREFER_SIBLING |
5810 SD_SHARE_POWERDOMAIN);
5436499e
KC
5811 if (nr_node_ids == 1)
5812 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5813 }
5814 if (~cflags & pflags)
5815 return 0;
5816
5817 return 1;
5818}
5819
dce840a0 5820static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5821{
dce840a0 5822 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5823
68e74568 5824 cpupri_cleanup(&rd->cpupri);
6bfd6d72 5825 cpudl_cleanup(&rd->cpudl);
1baca4ce 5826 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5827 free_cpumask_var(rd->rto_mask);
5828 free_cpumask_var(rd->online);
5829 free_cpumask_var(rd->span);
5830 kfree(rd);
5831}
5832
57d885fe
GH
5833static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5834{
a0490fa3 5835 struct root_domain *old_rd = NULL;
57d885fe 5836 unsigned long flags;
57d885fe 5837
05fa785c 5838 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5839
5840 if (rq->rd) {
a0490fa3 5841 old_rd = rq->rd;
57d885fe 5842
c6c4927b 5843 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5844 set_rq_offline(rq);
57d885fe 5845
c6c4927b 5846 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5847
a0490fa3 5848 /*
0515973f 5849 * If we dont want to free the old_rd yet then
a0490fa3
IM
5850 * set old_rd to NULL to skip the freeing later
5851 * in this function:
5852 */
5853 if (!atomic_dec_and_test(&old_rd->refcount))
5854 old_rd = NULL;
57d885fe
GH
5855 }
5856
5857 atomic_inc(&rd->refcount);
5858 rq->rd = rd;
5859
c6c4927b 5860 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5861 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5862 set_rq_online(rq);
57d885fe 5863
05fa785c 5864 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5865
5866 if (old_rd)
dce840a0 5867 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5868}
5869
68c38fc3 5870static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5871{
5872 memset(rd, 0, sizeof(*rd));
5873
8295c699 5874 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5875 goto out;
8295c699 5876 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5877 goto free_span;
8295c699 5878 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
c6c4927b 5879 goto free_online;
8295c699 5880 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
1baca4ce 5881 goto free_dlo_mask;
6e0534f2 5882
332ac17e 5883 init_dl_bw(&rd->dl_bw);
6bfd6d72
JL
5884 if (cpudl_init(&rd->cpudl) != 0)
5885 goto free_dlo_mask;
332ac17e 5886
68c38fc3 5887 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5888 goto free_rto_mask;
c6c4927b 5889 return 0;
6e0534f2 5890
68e74568
RR
5891free_rto_mask:
5892 free_cpumask_var(rd->rto_mask);
1baca4ce
JL
5893free_dlo_mask:
5894 free_cpumask_var(rd->dlo_mask);
c6c4927b
RR
5895free_online:
5896 free_cpumask_var(rd->online);
5897free_span:
5898 free_cpumask_var(rd->span);
0c910d28 5899out:
c6c4927b 5900 return -ENOMEM;
57d885fe
GH
5901}
5902
029632fb
PZ
5903/*
5904 * By default the system creates a single root-domain with all cpus as
5905 * members (mimicking the global state we have today).
5906 */
5907struct root_domain def_root_domain;
5908
57d885fe
GH
5909static void init_defrootdomain(void)
5910{
68c38fc3 5911 init_rootdomain(&def_root_domain);
c6c4927b 5912
57d885fe
GH
5913 atomic_set(&def_root_domain.refcount, 1);
5914}
5915
dc938520 5916static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5917{
5918 struct root_domain *rd;
5919
5920 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5921 if (!rd)
5922 return NULL;
5923
68c38fc3 5924 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5925 kfree(rd);
5926 return NULL;
5927 }
57d885fe
GH
5928
5929 return rd;
5930}
5931
63b2ca30 5932static void free_sched_groups(struct sched_group *sg, int free_sgc)
e3589f6c
PZ
5933{
5934 struct sched_group *tmp, *first;
5935
5936 if (!sg)
5937 return;
5938
5939 first = sg;
5940 do {
5941 tmp = sg->next;
5942
63b2ca30
NP
5943 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5944 kfree(sg->sgc);
e3589f6c
PZ
5945
5946 kfree(sg);
5947 sg = tmp;
5948 } while (sg != first);
5949}
5950
dce840a0
PZ
5951static void free_sched_domain(struct rcu_head *rcu)
5952{
5953 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5954
5955 /*
5956 * If its an overlapping domain it has private groups, iterate and
5957 * nuke them all.
5958 */
5959 if (sd->flags & SD_OVERLAP) {
5960 free_sched_groups(sd->groups, 1);
5961 } else if (atomic_dec_and_test(&sd->groups->ref)) {
63b2ca30 5962 kfree(sd->groups->sgc);
dce840a0 5963 kfree(sd->groups);
9c3f75cb 5964 }
dce840a0
PZ
5965 kfree(sd);
5966}
5967
5968static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5969{
5970 call_rcu(&sd->rcu, free_sched_domain);
5971}
5972
5973static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5974{
5975 for (; sd; sd = sd->parent)
5976 destroy_sched_domain(sd, cpu);
5977}
5978
518cd623
PZ
5979/*
5980 * Keep a special pointer to the highest sched_domain that has
5981 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5982 * allows us to avoid some pointer chasing select_idle_sibling().
5983 *
5984 * Also keep a unique ID per domain (we use the first cpu number in
5985 * the cpumask of the domain), this allows us to quickly tell if
39be3501 5986 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
5987 */
5988DEFINE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 5989DEFINE_PER_CPU(int, sd_llc_size);
518cd623 5990DEFINE_PER_CPU(int, sd_llc_id);
fb13c7ee 5991DEFINE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50
PM
5992DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5993DEFINE_PER_CPU(struct sched_domain *, sd_asym);
518cd623
PZ
5994
5995static void update_top_cache_domain(int cpu)
5996{
5997 struct sched_domain *sd;
5d4cf996 5998 struct sched_domain *busy_sd = NULL;
518cd623 5999 int id = cpu;
7d9ffa89 6000 int size = 1;
518cd623
PZ
6001
6002 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
7d9ffa89 6003 if (sd) {
518cd623 6004 id = cpumask_first(sched_domain_span(sd));
7d9ffa89 6005 size = cpumask_weight(sched_domain_span(sd));
5d4cf996 6006 busy_sd = sd->parent; /* sd_busy */
7d9ffa89 6007 }
5d4cf996 6008 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
518cd623
PZ
6009
6010 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
7d9ffa89 6011 per_cpu(sd_llc_size, cpu) = size;
518cd623 6012 per_cpu(sd_llc_id, cpu) = id;
fb13c7ee
MG
6013
6014 sd = lowest_flag_domain(cpu, SD_NUMA);
6015 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
37dc6b50
PM
6016
6017 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
6018 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
518cd623
PZ
6019}
6020
1da177e4 6021/*
0eab9146 6022 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
6023 * hold the hotplug lock.
6024 */
0eab9146
IM
6025static void
6026cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 6027{
70b97a7f 6028 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
6029 struct sched_domain *tmp;
6030
6031 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 6032 for (tmp = sd; tmp; ) {
245af2c7
SS
6033 struct sched_domain *parent = tmp->parent;
6034 if (!parent)
6035 break;
f29c9b1c 6036
1a848870 6037 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 6038 tmp->parent = parent->parent;
1a848870
SS
6039 if (parent->parent)
6040 parent->parent->child = tmp;
10866e62
PZ
6041 /*
6042 * Transfer SD_PREFER_SIBLING down in case of a
6043 * degenerate parent; the spans match for this
6044 * so the property transfers.
6045 */
6046 if (parent->flags & SD_PREFER_SIBLING)
6047 tmp->flags |= SD_PREFER_SIBLING;
dce840a0 6048 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
6049 } else
6050 tmp = tmp->parent;
245af2c7
SS
6051 }
6052
1a848870 6053 if (sd && sd_degenerate(sd)) {
dce840a0 6054 tmp = sd;
245af2c7 6055 sd = sd->parent;
dce840a0 6056 destroy_sched_domain(tmp, cpu);
1a848870
SS
6057 if (sd)
6058 sd->child = NULL;
6059 }
1da177e4 6060
4cb98839 6061 sched_domain_debug(sd, cpu);
1da177e4 6062
57d885fe 6063 rq_attach_root(rq, rd);
dce840a0 6064 tmp = rq->sd;
674311d5 6065 rcu_assign_pointer(rq->sd, sd);
dce840a0 6066 destroy_sched_domains(tmp, cpu);
518cd623
PZ
6067
6068 update_top_cache_domain(cpu);
1da177e4
LT
6069}
6070
1da177e4
LT
6071/* Setup the mask of cpus configured for isolated domains */
6072static int __init isolated_cpu_setup(char *str)
6073{
bdddd296 6074 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6075 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6076 return 1;
6077}
6078
8927f494 6079__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6080
49a02c51 6081struct s_data {
21d42ccf 6082 struct sched_domain ** __percpu sd;
49a02c51
AH
6083 struct root_domain *rd;
6084};
6085
2109b99e 6086enum s_alloc {
2109b99e 6087 sa_rootdomain,
21d42ccf 6088 sa_sd,
dce840a0 6089 sa_sd_storage,
2109b99e
AH
6090 sa_none,
6091};
6092
c1174876
PZ
6093/*
6094 * Build an iteration mask that can exclude certain CPUs from the upwards
6095 * domain traversal.
6096 *
6097 * Asymmetric node setups can result in situations where the domain tree is of
6098 * unequal depth, make sure to skip domains that already cover the entire
6099 * range.
6100 *
6101 * In that case build_sched_domains() will have terminated the iteration early
6102 * and our sibling sd spans will be empty. Domains should always include the
6103 * cpu they're built on, so check that.
6104 *
6105 */
6106static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6107{
6108 const struct cpumask *span = sched_domain_span(sd);
6109 struct sd_data *sdd = sd->private;
6110 struct sched_domain *sibling;
6111 int i;
6112
6113 for_each_cpu(i, span) {
6114 sibling = *per_cpu_ptr(sdd->sd, i);
6115 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6116 continue;
6117
6118 cpumask_set_cpu(i, sched_group_mask(sg));
6119 }
6120}
6121
6122/*
6123 * Return the canonical balance cpu for this group, this is the first cpu
6124 * of this group that's also in the iteration mask.
6125 */
6126int group_balance_cpu(struct sched_group *sg)
6127{
6128 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6129}
6130
e3589f6c
PZ
6131static int
6132build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6133{
6134 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6135 const struct cpumask *span = sched_domain_span(sd);
6136 struct cpumask *covered = sched_domains_tmpmask;
6137 struct sd_data *sdd = sd->private;
aaecac4a 6138 struct sched_domain *sibling;
e3589f6c
PZ
6139 int i;
6140
6141 cpumask_clear(covered);
6142
6143 for_each_cpu(i, span) {
6144 struct cpumask *sg_span;
6145
6146 if (cpumask_test_cpu(i, covered))
6147 continue;
6148
aaecac4a 6149 sibling = *per_cpu_ptr(sdd->sd, i);
c1174876
PZ
6150
6151 /* See the comment near build_group_mask(). */
aaecac4a 6152 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
c1174876
PZ
6153 continue;
6154
e3589f6c 6155 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 6156 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
6157
6158 if (!sg)
6159 goto fail;
6160
6161 sg_span = sched_group_cpus(sg);
aaecac4a
ZZ
6162 if (sibling->child)
6163 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6164 else
e3589f6c
PZ
6165 cpumask_set_cpu(i, sg_span);
6166
6167 cpumask_or(covered, covered, sg_span);
6168
63b2ca30
NP
6169 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6170 if (atomic_inc_return(&sg->sgc->ref) == 1)
c1174876
PZ
6171 build_group_mask(sd, sg);
6172
c3decf0d 6173 /*
63b2ca30 6174 * Initialize sgc->capacity such that even if we mess up the
c3decf0d
PZ
6175 * domains and no possible iteration will get us here, we won't
6176 * die on a /0 trap.
6177 */
ca8ce3d0 6178 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
e3589f6c 6179
c1174876
PZ
6180 /*
6181 * Make sure the first group of this domain contains the
6182 * canonical balance cpu. Otherwise the sched_domain iteration
6183 * breaks. See update_sg_lb_stats().
6184 */
74a5ce20 6185 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 6186 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
6187 groups = sg;
6188
6189 if (!first)
6190 first = sg;
6191 if (last)
6192 last->next = sg;
6193 last = sg;
6194 last->next = first;
6195 }
6196 sd->groups = groups;
6197
6198 return 0;
6199
6200fail:
6201 free_sched_groups(first, 0);
6202
6203 return -ENOMEM;
6204}
6205
dce840a0 6206static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6207{
dce840a0
PZ
6208 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6209 struct sched_domain *child = sd->child;
1da177e4 6210
dce840a0
PZ
6211 if (child)
6212 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6213
9c3f75cb 6214 if (sg) {
dce840a0 6215 *sg = *per_cpu_ptr(sdd->sg, cpu);
63b2ca30
NP
6216 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6217 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
9c3f75cb 6218 }
dce840a0
PZ
6219
6220 return cpu;
1e9f28fa 6221}
1e9f28fa 6222
01a08546 6223/*
dce840a0
PZ
6224 * build_sched_groups will build a circular linked list of the groups
6225 * covered by the given span, and will set each group's ->cpumask correctly,
ced549fa 6226 * and ->cpu_capacity to 0.
e3589f6c
PZ
6227 *
6228 * Assumes the sched_domain tree is fully constructed
01a08546 6229 */
e3589f6c
PZ
6230static int
6231build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 6232{
dce840a0
PZ
6233 struct sched_group *first = NULL, *last = NULL;
6234 struct sd_data *sdd = sd->private;
6235 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6236 struct cpumask *covered;
dce840a0 6237 int i;
9c1cfda2 6238
e3589f6c
PZ
6239 get_group(cpu, sdd, &sd->groups);
6240 atomic_inc(&sd->groups->ref);
6241
0936629f 6242 if (cpu != cpumask_first(span))
e3589f6c
PZ
6243 return 0;
6244
f96225fd
PZ
6245 lockdep_assert_held(&sched_domains_mutex);
6246 covered = sched_domains_tmpmask;
6247
dce840a0 6248 cpumask_clear(covered);
6711cab4 6249
dce840a0
PZ
6250 for_each_cpu(i, span) {
6251 struct sched_group *sg;
cd08e923 6252 int group, j;
6711cab4 6253
dce840a0
PZ
6254 if (cpumask_test_cpu(i, covered))
6255 continue;
6711cab4 6256
cd08e923 6257 group = get_group(i, sdd, &sg);
c1174876 6258 cpumask_setall(sched_group_mask(sg));
0601a88d 6259
dce840a0
PZ
6260 for_each_cpu(j, span) {
6261 if (get_group(j, sdd, NULL) != group)
6262 continue;
0601a88d 6263
dce840a0
PZ
6264 cpumask_set_cpu(j, covered);
6265 cpumask_set_cpu(j, sched_group_cpus(sg));
6266 }
0601a88d 6267
dce840a0
PZ
6268 if (!first)
6269 first = sg;
6270 if (last)
6271 last->next = sg;
6272 last = sg;
6273 }
6274 last->next = first;
e3589f6c
PZ
6275
6276 return 0;
0601a88d 6277}
51888ca2 6278
89c4710e 6279/*
63b2ca30 6280 * Initialize sched groups cpu_capacity.
89c4710e 6281 *
63b2ca30 6282 * cpu_capacity indicates the capacity of sched group, which is used while
89c4710e 6283 * distributing the load between different sched groups in a sched domain.
63b2ca30
NP
6284 * Typically cpu_capacity for all the groups in a sched domain will be same
6285 * unless there are asymmetries in the topology. If there are asymmetries,
6286 * group having more cpu_capacity will pickup more load compared to the
6287 * group having less cpu_capacity.
89c4710e 6288 */
63b2ca30 6289static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
89c4710e 6290{
e3589f6c 6291 struct sched_group *sg = sd->groups;
89c4710e 6292
94c95ba6 6293 WARN_ON(!sg);
e3589f6c
PZ
6294
6295 do {
6296 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6297 sg = sg->next;
6298 } while (sg != sd->groups);
89c4710e 6299
c1174876 6300 if (cpu != group_balance_cpu(sg))
e3589f6c 6301 return;
aae6d3dd 6302
63b2ca30
NP
6303 update_group_capacity(sd, cpu);
6304 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6305}
6306
7c16ec58
MT
6307/*
6308 * Initializers for schedule domains
6309 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6310 */
6311
1d3504fc 6312static int default_relax_domain_level = -1;
60495e77 6313int sched_domain_level_max;
1d3504fc
HS
6314
6315static int __init setup_relax_domain_level(char *str)
6316{
a841f8ce
DS
6317 if (kstrtoint(str, 0, &default_relax_domain_level))
6318 pr_warn("Unable to set relax_domain_level\n");
30e0e178 6319
1d3504fc
HS
6320 return 1;
6321}
6322__setup("relax_domain_level=", setup_relax_domain_level);
6323
6324static void set_domain_attribute(struct sched_domain *sd,
6325 struct sched_domain_attr *attr)
6326{
6327 int request;
6328
6329 if (!attr || attr->relax_domain_level < 0) {
6330 if (default_relax_domain_level < 0)
6331 return;
6332 else
6333 request = default_relax_domain_level;
6334 } else
6335 request = attr->relax_domain_level;
6336 if (request < sd->level) {
6337 /* turn off idle balance on this domain */
c88d5910 6338 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6339 } else {
6340 /* turn on idle balance on this domain */
c88d5910 6341 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6342 }
6343}
6344
54ab4ff4
PZ
6345static void __sdt_free(const struct cpumask *cpu_map);
6346static int __sdt_alloc(const struct cpumask *cpu_map);
6347
2109b99e
AH
6348static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6349 const struct cpumask *cpu_map)
6350{
6351 switch (what) {
2109b99e 6352 case sa_rootdomain:
822ff793
PZ
6353 if (!atomic_read(&d->rd->refcount))
6354 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6355 case sa_sd:
6356 free_percpu(d->sd); /* fall through */
dce840a0 6357 case sa_sd_storage:
54ab4ff4 6358 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6359 case sa_none:
6360 break;
6361 }
6362}
3404c8d9 6363
2109b99e
AH
6364static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6365 const struct cpumask *cpu_map)
6366{
dce840a0
PZ
6367 memset(d, 0, sizeof(*d));
6368
54ab4ff4
PZ
6369 if (__sdt_alloc(cpu_map))
6370 return sa_sd_storage;
dce840a0
PZ
6371 d->sd = alloc_percpu(struct sched_domain *);
6372 if (!d->sd)
6373 return sa_sd_storage;
2109b99e 6374 d->rd = alloc_rootdomain();
dce840a0 6375 if (!d->rd)
21d42ccf 6376 return sa_sd;
2109b99e
AH
6377 return sa_rootdomain;
6378}
57d885fe 6379
dce840a0
PZ
6380/*
6381 * NULL the sd_data elements we've used to build the sched_domain and
6382 * sched_group structure so that the subsequent __free_domain_allocs()
6383 * will not free the data we're using.
6384 */
6385static void claim_allocations(int cpu, struct sched_domain *sd)
6386{
6387 struct sd_data *sdd = sd->private;
dce840a0
PZ
6388
6389 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6390 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6391
e3589f6c 6392 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6393 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c 6394
63b2ca30
NP
6395 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6396 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
dce840a0
PZ
6397}
6398
cb83b629 6399#ifdef CONFIG_NUMA
cb83b629 6400static int sched_domains_numa_levels;
e3fe70b1 6401enum numa_topology_type sched_numa_topology_type;
cb83b629 6402static int *sched_domains_numa_distance;
9942f79b 6403int sched_max_numa_distance;
cb83b629
PZ
6404static struct cpumask ***sched_domains_numa_masks;
6405static int sched_domains_curr_level;
143e1e28 6406#endif
cb83b629 6407
143e1e28
VG
6408/*
6409 * SD_flags allowed in topology descriptions.
6410 *
5d4dfddd 6411 * SD_SHARE_CPUCAPACITY - describes SMT topologies
143e1e28
VG
6412 * SD_SHARE_PKG_RESOURCES - describes shared caches
6413 * SD_NUMA - describes NUMA topologies
d77b3ed5 6414 * SD_SHARE_POWERDOMAIN - describes shared power domain
143e1e28
VG
6415 *
6416 * Odd one out:
6417 * SD_ASYM_PACKING - describes SMT quirks
6418 */
6419#define TOPOLOGY_SD_FLAGS \
5d4dfddd 6420 (SD_SHARE_CPUCAPACITY | \
143e1e28
VG
6421 SD_SHARE_PKG_RESOURCES | \
6422 SD_NUMA | \
d77b3ed5
VG
6423 SD_ASYM_PACKING | \
6424 SD_SHARE_POWERDOMAIN)
cb83b629
PZ
6425
6426static struct sched_domain *
143e1e28 6427sd_init(struct sched_domain_topology_level *tl, int cpu)
cb83b629
PZ
6428{
6429 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
143e1e28
VG
6430 int sd_weight, sd_flags = 0;
6431
6432#ifdef CONFIG_NUMA
6433 /*
6434 * Ugly hack to pass state to sd_numa_mask()...
6435 */
6436 sched_domains_curr_level = tl->numa_level;
6437#endif
6438
6439 sd_weight = cpumask_weight(tl->mask(cpu));
6440
6441 if (tl->sd_flags)
6442 sd_flags = (*tl->sd_flags)();
6443 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6444 "wrong sd_flags in topology description\n"))
6445 sd_flags &= ~TOPOLOGY_SD_FLAGS;
cb83b629
PZ
6446
6447 *sd = (struct sched_domain){
6448 .min_interval = sd_weight,
6449 .max_interval = 2*sd_weight,
6450 .busy_factor = 32,
870a0bb5 6451 .imbalance_pct = 125,
143e1e28
VG
6452
6453 .cache_nice_tries = 0,
6454 .busy_idx = 0,
6455 .idle_idx = 0,
cb83b629
PZ
6456 .newidle_idx = 0,
6457 .wake_idx = 0,
6458 .forkexec_idx = 0,
6459
6460 .flags = 1*SD_LOAD_BALANCE
6461 | 1*SD_BALANCE_NEWIDLE
143e1e28
VG
6462 | 1*SD_BALANCE_EXEC
6463 | 1*SD_BALANCE_FORK
cb83b629 6464 | 0*SD_BALANCE_WAKE
143e1e28 6465 | 1*SD_WAKE_AFFINE
5d4dfddd 6466 | 0*SD_SHARE_CPUCAPACITY
cb83b629 6467 | 0*SD_SHARE_PKG_RESOURCES
143e1e28 6468 | 0*SD_SERIALIZE
cb83b629 6469 | 0*SD_PREFER_SIBLING
143e1e28
VG
6470 | 0*SD_NUMA
6471 | sd_flags
cb83b629 6472 ,
143e1e28 6473
cb83b629
PZ
6474 .last_balance = jiffies,
6475 .balance_interval = sd_weight,
143e1e28 6476 .smt_gain = 0,
2b4cfe64
JL
6477 .max_newidle_lb_cost = 0,
6478 .next_decay_max_lb_cost = jiffies,
143e1e28
VG
6479#ifdef CONFIG_SCHED_DEBUG
6480 .name = tl->name,
6481#endif
cb83b629 6482 };
cb83b629
PZ
6483
6484 /*
143e1e28 6485 * Convert topological properties into behaviour.
cb83b629 6486 */
143e1e28 6487
5d4dfddd 6488 if (sd->flags & SD_SHARE_CPUCAPACITY) {
caff37ef 6489 sd->flags |= SD_PREFER_SIBLING;
143e1e28
VG
6490 sd->imbalance_pct = 110;
6491 sd->smt_gain = 1178; /* ~15% */
143e1e28
VG
6492
6493 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6494 sd->imbalance_pct = 117;
6495 sd->cache_nice_tries = 1;
6496 sd->busy_idx = 2;
6497
6498#ifdef CONFIG_NUMA
6499 } else if (sd->flags & SD_NUMA) {
6500 sd->cache_nice_tries = 2;
6501 sd->busy_idx = 3;
6502 sd->idle_idx = 2;
6503
6504 sd->flags |= SD_SERIALIZE;
6505 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6506 sd->flags &= ~(SD_BALANCE_EXEC |
6507 SD_BALANCE_FORK |
6508 SD_WAKE_AFFINE);
6509 }
6510
6511#endif
6512 } else {
6513 sd->flags |= SD_PREFER_SIBLING;
6514 sd->cache_nice_tries = 1;
6515 sd->busy_idx = 2;
6516 sd->idle_idx = 1;
6517 }
6518
6519 sd->private = &tl->data;
cb83b629
PZ
6520
6521 return sd;
6522}
6523
143e1e28
VG
6524/*
6525 * Topology list, bottom-up.
6526 */
6527static struct sched_domain_topology_level default_topology[] = {
6528#ifdef CONFIG_SCHED_SMT
6529 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6530#endif
6531#ifdef CONFIG_SCHED_MC
6532 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
143e1e28
VG
6533#endif
6534 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6535 { NULL, },
6536};
6537
c6e1e7b5
JG
6538static struct sched_domain_topology_level *sched_domain_topology =
6539 default_topology;
143e1e28
VG
6540
6541#define for_each_sd_topology(tl) \
6542 for (tl = sched_domain_topology; tl->mask; tl++)
6543
6544void set_sched_topology(struct sched_domain_topology_level *tl)
6545{
6546 sched_domain_topology = tl;
6547}
6548
6549#ifdef CONFIG_NUMA
6550
cb83b629
PZ
6551static const struct cpumask *sd_numa_mask(int cpu)
6552{
6553 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6554}
6555
d039ac60
PZ
6556static void sched_numa_warn(const char *str)
6557{
6558 static int done = false;
6559 int i,j;
6560
6561 if (done)
6562 return;
6563
6564 done = true;
6565
6566 printk(KERN_WARNING "ERROR: %s\n\n", str);
6567
6568 for (i = 0; i < nr_node_ids; i++) {
6569 printk(KERN_WARNING " ");
6570 for (j = 0; j < nr_node_ids; j++)
6571 printk(KERN_CONT "%02d ", node_distance(i,j));
6572 printk(KERN_CONT "\n");
6573 }
6574 printk(KERN_WARNING "\n");
6575}
6576
9942f79b 6577bool find_numa_distance(int distance)
d039ac60
PZ
6578{
6579 int i;
6580
6581 if (distance == node_distance(0, 0))
6582 return true;
6583
6584 for (i = 0; i < sched_domains_numa_levels; i++) {
6585 if (sched_domains_numa_distance[i] == distance)
6586 return true;
6587 }
6588
6589 return false;
6590}
6591
e3fe70b1
RR
6592/*
6593 * A system can have three types of NUMA topology:
6594 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6595 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6596 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6597 *
6598 * The difference between a glueless mesh topology and a backplane
6599 * topology lies in whether communication between not directly
6600 * connected nodes goes through intermediary nodes (where programs
6601 * could run), or through backplane controllers. This affects
6602 * placement of programs.
6603 *
6604 * The type of topology can be discerned with the following tests:
6605 * - If the maximum distance between any nodes is 1 hop, the system
6606 * is directly connected.
6607 * - If for two nodes A and B, located N > 1 hops away from each other,
6608 * there is an intermediary node C, which is < N hops away from both
6609 * nodes A and B, the system is a glueless mesh.
6610 */
6611static void init_numa_topology_type(void)
6612{
6613 int a, b, c, n;
6614
6615 n = sched_max_numa_distance;
6616
e237882b 6617 if (sched_domains_numa_levels <= 1) {
e3fe70b1 6618 sched_numa_topology_type = NUMA_DIRECT;
e237882b
AG
6619 return;
6620 }
e3fe70b1
RR
6621
6622 for_each_online_node(a) {
6623 for_each_online_node(b) {
6624 /* Find two nodes furthest removed from each other. */
6625 if (node_distance(a, b) < n)
6626 continue;
6627
6628 /* Is there an intermediary node between a and b? */
6629 for_each_online_node(c) {
6630 if (node_distance(a, c) < n &&
6631 node_distance(b, c) < n) {
6632 sched_numa_topology_type =
6633 NUMA_GLUELESS_MESH;
6634 return;
6635 }
6636 }
6637
6638 sched_numa_topology_type = NUMA_BACKPLANE;
6639 return;
6640 }
6641 }
6642}
6643
cb83b629
PZ
6644static void sched_init_numa(void)
6645{
6646 int next_distance, curr_distance = node_distance(0, 0);
6647 struct sched_domain_topology_level *tl;
6648 int level = 0;
6649 int i, j, k;
6650
cb83b629
PZ
6651 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6652 if (!sched_domains_numa_distance)
6653 return;
6654
6655 /*
6656 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6657 * unique distances in the node_distance() table.
6658 *
6659 * Assumes node_distance(0,j) includes all distances in
6660 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6661 */
6662 next_distance = curr_distance;
6663 for (i = 0; i < nr_node_ids; i++) {
6664 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6665 for (k = 0; k < nr_node_ids; k++) {
6666 int distance = node_distance(i, k);
6667
6668 if (distance > curr_distance &&
6669 (distance < next_distance ||
6670 next_distance == curr_distance))
6671 next_distance = distance;
6672
6673 /*
6674 * While not a strong assumption it would be nice to know
6675 * about cases where if node A is connected to B, B is not
6676 * equally connected to A.
6677 */
6678 if (sched_debug() && node_distance(k, i) != distance)
6679 sched_numa_warn("Node-distance not symmetric");
6680
6681 if (sched_debug() && i && !find_numa_distance(distance))
6682 sched_numa_warn("Node-0 not representative");
6683 }
6684 if (next_distance != curr_distance) {
6685 sched_domains_numa_distance[level++] = next_distance;
6686 sched_domains_numa_levels = level;
6687 curr_distance = next_distance;
6688 } else break;
cb83b629 6689 }
d039ac60
PZ
6690
6691 /*
6692 * In case of sched_debug() we verify the above assumption.
6693 */
6694 if (!sched_debug())
6695 break;
cb83b629 6696 }
c123588b
AR
6697
6698 if (!level)
6699 return;
6700
cb83b629
PZ
6701 /*
6702 * 'level' contains the number of unique distances, excluding the
6703 * identity distance node_distance(i,i).
6704 *
28b4a521 6705 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
6706 * numbers.
6707 */
6708
5f7865f3
TC
6709 /*
6710 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6711 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6712 * the array will contain less then 'level' members. This could be
6713 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6714 * in other functions.
6715 *
6716 * We reset it to 'level' at the end of this function.
6717 */
6718 sched_domains_numa_levels = 0;
6719
cb83b629
PZ
6720 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6721 if (!sched_domains_numa_masks)
6722 return;
6723
6724 /*
6725 * Now for each level, construct a mask per node which contains all
6726 * cpus of nodes that are that many hops away from us.
6727 */
6728 for (i = 0; i < level; i++) {
6729 sched_domains_numa_masks[i] =
6730 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6731 if (!sched_domains_numa_masks[i])
6732 return;
6733
6734 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6735 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6736 if (!mask)
6737 return;
6738
6739 sched_domains_numa_masks[i][j] = mask;
6740
6741 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 6742 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6743 continue;
6744
6745 cpumask_or(mask, mask, cpumask_of_node(k));
6746 }
6747 }
6748 }
6749
143e1e28
VG
6750 /* Compute default topology size */
6751 for (i = 0; sched_domain_topology[i].mask; i++);
6752
c515db8c 6753 tl = kzalloc((i + level + 1) *
cb83b629
PZ
6754 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6755 if (!tl)
6756 return;
6757
6758 /*
6759 * Copy the default topology bits..
6760 */
143e1e28
VG
6761 for (i = 0; sched_domain_topology[i].mask; i++)
6762 tl[i] = sched_domain_topology[i];
cb83b629
PZ
6763
6764 /*
6765 * .. and append 'j' levels of NUMA goodness.
6766 */
6767 for (j = 0; j < level; i++, j++) {
6768 tl[i] = (struct sched_domain_topology_level){
cb83b629 6769 .mask = sd_numa_mask,
143e1e28 6770 .sd_flags = cpu_numa_flags,
cb83b629
PZ
6771 .flags = SDTL_OVERLAP,
6772 .numa_level = j,
143e1e28 6773 SD_INIT_NAME(NUMA)
cb83b629
PZ
6774 };
6775 }
6776
6777 sched_domain_topology = tl;
5f7865f3
TC
6778
6779 sched_domains_numa_levels = level;
9942f79b 6780 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
e3fe70b1
RR
6781
6782 init_numa_topology_type();
cb83b629 6783}
301a5cba
TC
6784
6785static void sched_domains_numa_masks_set(int cpu)
6786{
6787 int i, j;
6788 int node = cpu_to_node(cpu);
6789
6790 for (i = 0; i < sched_domains_numa_levels; i++) {
6791 for (j = 0; j < nr_node_ids; j++) {
6792 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6793 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6794 }
6795 }
6796}
6797
6798static void sched_domains_numa_masks_clear(int cpu)
6799{
6800 int i, j;
6801 for (i = 0; i < sched_domains_numa_levels; i++) {
6802 for (j = 0; j < nr_node_ids; j++)
6803 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6804 }
6805}
6806
6807/*
6808 * Update sched_domains_numa_masks[level][node] array when new cpus
6809 * are onlined.
6810 */
6811static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6812 unsigned long action,
6813 void *hcpu)
6814{
6815 int cpu = (long)hcpu;
6816
6817 switch (action & ~CPU_TASKS_FROZEN) {
6818 case CPU_ONLINE:
6819 sched_domains_numa_masks_set(cpu);
6820 break;
6821
6822 case CPU_DEAD:
6823 sched_domains_numa_masks_clear(cpu);
6824 break;
6825
6826 default:
6827 return NOTIFY_DONE;
6828 }
6829
6830 return NOTIFY_OK;
cb83b629
PZ
6831}
6832#else
6833static inline void sched_init_numa(void)
6834{
6835}
301a5cba
TC
6836
6837static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6838 unsigned long action,
6839 void *hcpu)
6840{
6841 return 0;
6842}
cb83b629
PZ
6843#endif /* CONFIG_NUMA */
6844
54ab4ff4
PZ
6845static int __sdt_alloc(const struct cpumask *cpu_map)
6846{
6847 struct sched_domain_topology_level *tl;
6848 int j;
6849
27723a68 6850 for_each_sd_topology(tl) {
54ab4ff4
PZ
6851 struct sd_data *sdd = &tl->data;
6852
6853 sdd->sd = alloc_percpu(struct sched_domain *);
6854 if (!sdd->sd)
6855 return -ENOMEM;
6856
6857 sdd->sg = alloc_percpu(struct sched_group *);
6858 if (!sdd->sg)
6859 return -ENOMEM;
6860
63b2ca30
NP
6861 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6862 if (!sdd->sgc)
9c3f75cb
PZ
6863 return -ENOMEM;
6864
54ab4ff4
PZ
6865 for_each_cpu(j, cpu_map) {
6866 struct sched_domain *sd;
6867 struct sched_group *sg;
63b2ca30 6868 struct sched_group_capacity *sgc;
54ab4ff4 6869
5cc389bc 6870 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
54ab4ff4
PZ
6871 GFP_KERNEL, cpu_to_node(j));
6872 if (!sd)
6873 return -ENOMEM;
6874
6875 *per_cpu_ptr(sdd->sd, j) = sd;
6876
6877 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6878 GFP_KERNEL, cpu_to_node(j));
6879 if (!sg)
6880 return -ENOMEM;
6881
30b4e9eb
IM
6882 sg->next = sg;
6883
54ab4ff4 6884 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6885
63b2ca30 6886 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
9c3f75cb 6887 GFP_KERNEL, cpu_to_node(j));
63b2ca30 6888 if (!sgc)
9c3f75cb
PZ
6889 return -ENOMEM;
6890
63b2ca30 6891 *per_cpu_ptr(sdd->sgc, j) = sgc;
54ab4ff4
PZ
6892 }
6893 }
6894
6895 return 0;
6896}
6897
6898static void __sdt_free(const struct cpumask *cpu_map)
6899{
6900 struct sched_domain_topology_level *tl;
6901 int j;
6902
27723a68 6903 for_each_sd_topology(tl) {
54ab4ff4
PZ
6904 struct sd_data *sdd = &tl->data;
6905
6906 for_each_cpu(j, cpu_map) {
fb2cf2c6 6907 struct sched_domain *sd;
6908
6909 if (sdd->sd) {
6910 sd = *per_cpu_ptr(sdd->sd, j);
6911 if (sd && (sd->flags & SD_OVERLAP))
6912 free_sched_groups(sd->groups, 0);
6913 kfree(*per_cpu_ptr(sdd->sd, j));
6914 }
6915
6916 if (sdd->sg)
6917 kfree(*per_cpu_ptr(sdd->sg, j));
63b2ca30
NP
6918 if (sdd->sgc)
6919 kfree(*per_cpu_ptr(sdd->sgc, j));
54ab4ff4
PZ
6920 }
6921 free_percpu(sdd->sd);
fb2cf2c6 6922 sdd->sd = NULL;
54ab4ff4 6923 free_percpu(sdd->sg);
fb2cf2c6 6924 sdd->sg = NULL;
63b2ca30
NP
6925 free_percpu(sdd->sgc);
6926 sdd->sgc = NULL;
54ab4ff4
PZ
6927 }
6928}
6929
2c402dc3 6930struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
4a850cbe
VK
6931 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6932 struct sched_domain *child, int cpu)
2c402dc3 6933{
143e1e28 6934 struct sched_domain *sd = sd_init(tl, cpu);
2c402dc3 6935 if (!sd)
d069b916 6936 return child;
2c402dc3 6937
2c402dc3 6938 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6939 if (child) {
6940 sd->level = child->level + 1;
6941 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6942 child->parent = sd;
c75e0128 6943 sd->child = child;
6ae72dff
PZ
6944
6945 if (!cpumask_subset(sched_domain_span(child),
6946 sched_domain_span(sd))) {
6947 pr_err("BUG: arch topology borken\n");
6948#ifdef CONFIG_SCHED_DEBUG
6949 pr_err(" the %s domain not a subset of the %s domain\n",
6950 child->name, sd->name);
6951#endif
6952 /* Fixup, ensure @sd has at least @child cpus. */
6953 cpumask_or(sched_domain_span(sd),
6954 sched_domain_span(sd),
6955 sched_domain_span(child));
6956 }
6957
60495e77 6958 }
a841f8ce 6959 set_domain_attribute(sd, attr);
2c402dc3
PZ
6960
6961 return sd;
6962}
6963
2109b99e
AH
6964/*
6965 * Build sched domains for a given set of cpus and attach the sched domains
6966 * to the individual cpus
6967 */
dce840a0
PZ
6968static int build_sched_domains(const struct cpumask *cpu_map,
6969 struct sched_domain_attr *attr)
2109b99e 6970{
1c632169 6971 enum s_alloc alloc_state;
dce840a0 6972 struct sched_domain *sd;
2109b99e 6973 struct s_data d;
822ff793 6974 int i, ret = -ENOMEM;
9c1cfda2 6975
2109b99e
AH
6976 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6977 if (alloc_state != sa_rootdomain)
6978 goto error;
9c1cfda2 6979
dce840a0 6980 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6981 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6982 struct sched_domain_topology_level *tl;
6983
3bd65a80 6984 sd = NULL;
27723a68 6985 for_each_sd_topology(tl) {
4a850cbe 6986 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
22da9569
VK
6987 if (tl == sched_domain_topology)
6988 *per_cpu_ptr(d.sd, i) = sd;
e3589f6c
PZ
6989 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6990 sd->flags |= SD_OVERLAP;
d110235d
PZ
6991 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6992 break;
e3589f6c 6993 }
dce840a0
PZ
6994 }
6995
6996 /* Build the groups for the domains */
6997 for_each_cpu(i, cpu_map) {
6998 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6999 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
7000 if (sd->flags & SD_OVERLAP) {
7001 if (build_overlap_sched_groups(sd, i))
7002 goto error;
7003 } else {
7004 if (build_sched_groups(sd, i))
7005 goto error;
7006 }
1cf51902 7007 }
a06dadbe 7008 }
9c1cfda2 7009
ced549fa 7010 /* Calculate CPU capacity for physical packages and nodes */
a9c9a9b6
PZ
7011 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7012 if (!cpumask_test_cpu(i, cpu_map))
7013 continue;
9c1cfda2 7014
dce840a0
PZ
7015 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7016 claim_allocations(i, sd);
63b2ca30 7017 init_sched_groups_capacity(i, sd);
dce840a0 7018 }
f712c0c7 7019 }
9c1cfda2 7020
1da177e4 7021 /* Attach the domains */
dce840a0 7022 rcu_read_lock();
abcd083a 7023 for_each_cpu(i, cpu_map) {
21d42ccf 7024 sd = *per_cpu_ptr(d.sd, i);
49a02c51 7025 cpu_attach_domain(sd, d.rd, i);
1da177e4 7026 }
dce840a0 7027 rcu_read_unlock();
51888ca2 7028
822ff793 7029 ret = 0;
51888ca2 7030error:
2109b99e 7031 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 7032 return ret;
1da177e4 7033}
029190c5 7034
acc3f5d7 7035static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 7036static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
7037static struct sched_domain_attr *dattr_cur;
7038 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
7039
7040/*
7041 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
7042 * cpumask) fails, then fallback to a single sched domain,
7043 * as determined by the single cpumask fallback_doms.
029190c5 7044 */
4212823f 7045static cpumask_var_t fallback_doms;
029190c5 7046
ee79d1bd
HC
7047/*
7048 * arch_update_cpu_topology lets virtualized architectures update the
7049 * cpu core maps. It is supposed to return 1 if the topology changed
7050 * or 0 if it stayed the same.
7051 */
52f5684c 7052int __weak arch_update_cpu_topology(void)
22e52b07 7053{
ee79d1bd 7054 return 0;
22e52b07
HC
7055}
7056
acc3f5d7
RR
7057cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7058{
7059 int i;
7060 cpumask_var_t *doms;
7061
7062 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7063 if (!doms)
7064 return NULL;
7065 for (i = 0; i < ndoms; i++) {
7066 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7067 free_sched_domains(doms, i);
7068 return NULL;
7069 }
7070 }
7071 return doms;
7072}
7073
7074void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7075{
7076 unsigned int i;
7077 for (i = 0; i < ndoms; i++)
7078 free_cpumask_var(doms[i]);
7079 kfree(doms);
7080}
7081
1a20ff27 7082/*
41a2d6cf 7083 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7084 * For now this just excludes isolated cpus, but could be used to
7085 * exclude other special cases in the future.
1a20ff27 7086 */
c4a8849a 7087static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7088{
7378547f
MM
7089 int err;
7090
22e52b07 7091 arch_update_cpu_topology();
029190c5 7092 ndoms_cur = 1;
acc3f5d7 7093 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7094 if (!doms_cur)
acc3f5d7
RR
7095 doms_cur = &fallback_doms;
7096 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 7097 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 7098 register_sched_domain_sysctl();
7378547f
MM
7099
7100 return err;
1a20ff27
DG
7101}
7102
1a20ff27
DG
7103/*
7104 * Detach sched domains from a group of cpus specified in cpu_map
7105 * These cpus will now be attached to the NULL domain
7106 */
96f874e2 7107static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
7108{
7109 int i;
7110
dce840a0 7111 rcu_read_lock();
abcd083a 7112 for_each_cpu(i, cpu_map)
57d885fe 7113 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7114 rcu_read_unlock();
1a20ff27
DG
7115}
7116
1d3504fc
HS
7117/* handle null as "default" */
7118static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7119 struct sched_domain_attr *new, int idx_new)
7120{
7121 struct sched_domain_attr tmp;
7122
7123 /* fast path */
7124 if (!new && !cur)
7125 return 1;
7126
7127 tmp = SD_ATTR_INIT;
7128 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7129 new ? (new + idx_new) : &tmp,
7130 sizeof(struct sched_domain_attr));
7131}
7132
029190c5
PJ
7133/*
7134 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7135 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7136 * doms_new[] to the current sched domain partitioning, doms_cur[].
7137 * It destroys each deleted domain and builds each new domain.
7138 *
acc3f5d7 7139 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7140 * The masks don't intersect (don't overlap.) We should setup one
7141 * sched domain for each mask. CPUs not in any of the cpumasks will
7142 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7143 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7144 * it as it is.
7145 *
acc3f5d7
RR
7146 * The passed in 'doms_new' should be allocated using
7147 * alloc_sched_domains. This routine takes ownership of it and will
7148 * free_sched_domains it when done with it. If the caller failed the
7149 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7150 * and partition_sched_domains() will fallback to the single partition
7151 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7152 *
96f874e2 7153 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7154 * ndoms_new == 0 is a special case for destroying existing domains,
7155 * and it will not create the default domain.
dfb512ec 7156 *
029190c5
PJ
7157 * Call with hotplug lock held
7158 */
acc3f5d7 7159void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7160 struct sched_domain_attr *dattr_new)
029190c5 7161{
dfb512ec 7162 int i, j, n;
d65bd5ec 7163 int new_topology;
029190c5 7164
712555ee 7165 mutex_lock(&sched_domains_mutex);
a1835615 7166
7378547f
MM
7167 /* always unregister in case we don't destroy any domains */
7168 unregister_sched_domain_sysctl();
7169
d65bd5ec
HC
7170 /* Let architecture update cpu core mappings. */
7171 new_topology = arch_update_cpu_topology();
7172
dfb512ec 7173 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7174
7175 /* Destroy deleted domains */
7176 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7177 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7178 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7179 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7180 goto match1;
7181 }
7182 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7183 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7184match1:
7185 ;
7186 }
7187
c8d2d47a 7188 n = ndoms_cur;
e761b772 7189 if (doms_new == NULL) {
c8d2d47a 7190 n = 0;
acc3f5d7 7191 doms_new = &fallback_doms;
6ad4c188 7192 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7193 WARN_ON_ONCE(dattr_new);
e761b772
MK
7194 }
7195
029190c5
PJ
7196 /* Build new domains */
7197 for (i = 0; i < ndoms_new; i++) {
c8d2d47a 7198 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7199 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7200 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7201 goto match2;
7202 }
7203 /* no match - add a new doms_new */
dce840a0 7204 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7205match2:
7206 ;
7207 }
7208
7209 /* Remember the new sched domains */
acc3f5d7
RR
7210 if (doms_cur != &fallback_doms)
7211 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7212 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7213 doms_cur = doms_new;
1d3504fc 7214 dattr_cur = dattr_new;
029190c5 7215 ndoms_cur = ndoms_new;
7378547f
MM
7216
7217 register_sched_domain_sysctl();
a1835615 7218
712555ee 7219 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7220}
7221
d35be8ba
SB
7222static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7223
1da177e4 7224/*
3a101d05
TH
7225 * Update cpusets according to cpu_active mask. If cpusets are
7226 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7227 * around partition_sched_domains().
d35be8ba
SB
7228 *
7229 * If we come here as part of a suspend/resume, don't touch cpusets because we
7230 * want to restore it back to its original state upon resume anyway.
1da177e4 7231 */
0b2e918a
TH
7232static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7233 void *hcpu)
e761b772 7234{
d35be8ba
SB
7235 switch (action) {
7236 case CPU_ONLINE_FROZEN:
7237 case CPU_DOWN_FAILED_FROZEN:
7238
7239 /*
7240 * num_cpus_frozen tracks how many CPUs are involved in suspend
7241 * resume sequence. As long as this is not the last online
7242 * operation in the resume sequence, just build a single sched
7243 * domain, ignoring cpusets.
7244 */
7245 num_cpus_frozen--;
7246 if (likely(num_cpus_frozen)) {
7247 partition_sched_domains(1, NULL, NULL);
7248 break;
7249 }
7250
7251 /*
7252 * This is the last CPU online operation. So fall through and
7253 * restore the original sched domains by considering the
7254 * cpuset configurations.
7255 */
7256
e761b772 7257 case CPU_ONLINE:
7ddf96b0 7258 cpuset_update_active_cpus(true);
d35be8ba 7259 break;
3a101d05
TH
7260 default:
7261 return NOTIFY_DONE;
7262 }
d35be8ba 7263 return NOTIFY_OK;
3a101d05 7264}
e761b772 7265
0b2e918a
TH
7266static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7267 void *hcpu)
3a101d05 7268{
3c18d447
JL
7269 unsigned long flags;
7270 long cpu = (long)hcpu;
7271 struct dl_bw *dl_b;
533445c6
OS
7272 bool overflow;
7273 int cpus;
3c18d447 7274
533445c6 7275 switch (action) {
3a101d05 7276 case CPU_DOWN_PREPARE:
533445c6
OS
7277 rcu_read_lock_sched();
7278 dl_b = dl_bw_of(cpu);
3c18d447 7279
533445c6
OS
7280 raw_spin_lock_irqsave(&dl_b->lock, flags);
7281 cpus = dl_bw_cpus(cpu);
7282 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7283 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3c18d447 7284
533445c6 7285 rcu_read_unlock_sched();
3c18d447 7286
533445c6
OS
7287 if (overflow)
7288 return notifier_from_errno(-EBUSY);
7ddf96b0 7289 cpuset_update_active_cpus(false);
d35be8ba
SB
7290 break;
7291 case CPU_DOWN_PREPARE_FROZEN:
7292 num_cpus_frozen++;
7293 partition_sched_domains(1, NULL, NULL);
7294 break;
e761b772
MK
7295 default:
7296 return NOTIFY_DONE;
7297 }
d35be8ba 7298 return NOTIFY_OK;
e761b772 7299}
e761b772 7300
1da177e4
LT
7301void __init sched_init_smp(void)
7302{
dcc30a35
RR
7303 cpumask_var_t non_isolated_cpus;
7304
7305 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7306 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7307
cb83b629
PZ
7308 sched_init_numa();
7309
6acce3ef
PZ
7310 /*
7311 * There's no userspace yet to cause hotplug operations; hence all the
7312 * cpu masks are stable and all blatant races in the below code cannot
7313 * happen.
7314 */
712555ee 7315 mutex_lock(&sched_domains_mutex);
c4a8849a 7316 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7317 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7318 if (cpumask_empty(non_isolated_cpus))
7319 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7320 mutex_unlock(&sched_domains_mutex);
e761b772 7321
301a5cba 7322 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
7323 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7324 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772 7325
b328ca18 7326 init_hrtick();
5c1e1767
NP
7327
7328 /* Move init over to a non-isolated CPU */
dcc30a35 7329 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7330 BUG();
19978ca6 7331 sched_init_granularity();
dcc30a35 7332 free_cpumask_var(non_isolated_cpus);
4212823f 7333
0e3900e6 7334 init_sched_rt_class();
1baca4ce 7335 init_sched_dl_class();
1da177e4
LT
7336}
7337#else
7338void __init sched_init_smp(void)
7339{
19978ca6 7340 sched_init_granularity();
1da177e4
LT
7341}
7342#endif /* CONFIG_SMP */
7343
7344int in_sched_functions(unsigned long addr)
7345{
1da177e4
LT
7346 return in_lock_functions(addr) ||
7347 (addr >= (unsigned long)__sched_text_start
7348 && addr < (unsigned long)__sched_text_end);
7349}
7350
029632fb 7351#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7352/*
7353 * Default task group.
7354 * Every task in system belongs to this group at bootup.
7355 */
029632fb 7356struct task_group root_task_group;
35cf4e50 7357LIST_HEAD(task_groups);
052f1dc7 7358#endif
6f505b16 7359
e6252c3e 7360DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 7361
1da177e4
LT
7362void __init sched_init(void)
7363{
dd41f596 7364 int i, j;
434d53b0
MT
7365 unsigned long alloc_size = 0, ptr;
7366
7367#ifdef CONFIG_FAIR_GROUP_SCHED
7368 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7369#endif
7370#ifdef CONFIG_RT_GROUP_SCHED
7371 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7372#endif
434d53b0 7373 if (alloc_size) {
36b7b6d4 7374 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7375
7376#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7377 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7378 ptr += nr_cpu_ids * sizeof(void **);
7379
07e06b01 7380 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7381 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7382
6d6bc0ad 7383#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7384#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7385 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7386 ptr += nr_cpu_ids * sizeof(void **);
7387
07e06b01 7388 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7389 ptr += nr_cpu_ids * sizeof(void **);
7390
6d6bc0ad 7391#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 7392 }
df7c8e84 7393#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
7394 for_each_possible_cpu(i) {
7395 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7396 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 7397 }
b74e6278 7398#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 7399
332ac17e
DF
7400 init_rt_bandwidth(&def_rt_bandwidth,
7401 global_rt_period(), global_rt_runtime());
7402 init_dl_bandwidth(&def_dl_bandwidth,
1724813d 7403 global_rt_period(), global_rt_runtime());
332ac17e 7404
57d885fe
GH
7405#ifdef CONFIG_SMP
7406 init_defrootdomain();
7407#endif
7408
d0b27fa7 7409#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7410 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7411 global_rt_period(), global_rt_runtime());
6d6bc0ad 7412#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7413
7c941438 7414#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
7415 list_add(&root_task_group.list, &task_groups);
7416 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7417 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7418 autogroup_init(&init_task);
54c707e9 7419
7c941438 7420#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7421
0a945022 7422 for_each_possible_cpu(i) {
70b97a7f 7423 struct rq *rq;
1da177e4
LT
7424
7425 rq = cpu_rq(i);
05fa785c 7426 raw_spin_lock_init(&rq->lock);
7897986b 7427 rq->nr_running = 0;
dce48a84
TG
7428 rq->calc_load_active = 0;
7429 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 7430 init_cfs_rq(&rq->cfs);
07c54f7a
AV
7431 init_rt_rq(&rq->rt);
7432 init_dl_rq(&rq->dl);
dd41f596 7433#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 7434 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 7435 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7436 /*
07e06b01 7437 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7438 *
7439 * In case of task-groups formed thr' the cgroup filesystem, it
7440 * gets 100% of the cpu resources in the system. This overall
7441 * system cpu resource is divided among the tasks of
07e06b01 7442 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7443 * based on each entity's (task or task-group's) weight
7444 * (se->load.weight).
7445 *
07e06b01 7446 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7447 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7448 * then A0's share of the cpu resource is:
7449 *
0d905bca 7450 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7451 *
07e06b01
YZ
7452 * We achieve this by letting root_task_group's tasks sit
7453 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7454 */
ab84d31e 7455 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 7456 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7457#endif /* CONFIG_FAIR_GROUP_SCHED */
7458
7459 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7460#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7461 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7462#endif
1da177e4 7463
dd41f596
IM
7464 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7465 rq->cpu_load[j] = 0;
fdf3e95d
VP
7466
7467 rq->last_load_update_tick = jiffies;
7468
1da177e4 7469#ifdef CONFIG_SMP
41c7ce9a 7470 rq->sd = NULL;
57d885fe 7471 rq->rd = NULL;
ca6d75e6 7472 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 7473 rq->balance_callback = NULL;
1da177e4 7474 rq->active_balance = 0;
dd41f596 7475 rq->next_balance = jiffies;
1da177e4 7476 rq->push_cpu = 0;
0a2966b4 7477 rq->cpu = i;
1f11eb6a 7478 rq->online = 0;
eae0c9df
MG
7479 rq->idle_stamp = 0;
7480 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 7481 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
7482
7483 INIT_LIST_HEAD(&rq->cfs_tasks);
7484
dc938520 7485 rq_attach_root(rq, &def_root_domain);
3451d024 7486#ifdef CONFIG_NO_HZ_COMMON
1c792db7 7487 rq->nohz_flags = 0;
83cd4fe2 7488#endif
265f22a9
FW
7489#ifdef CONFIG_NO_HZ_FULL
7490 rq->last_sched_tick = 0;
7491#endif
1da177e4 7492#endif
8f4d37ec 7493 init_rq_hrtick(rq);
1da177e4 7494 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7495 }
7496
2dd73a4f 7497 set_load_weight(&init_task);
b50f60ce 7498
e107be36
AK
7499#ifdef CONFIG_PREEMPT_NOTIFIERS
7500 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7501#endif
7502
1da177e4
LT
7503 /*
7504 * The boot idle thread does lazy MMU switching as well:
7505 */
7506 atomic_inc(&init_mm.mm_count);
7507 enter_lazy_tlb(&init_mm, current);
7508
1b537c7d
YD
7509 /*
7510 * During early bootup we pretend to be a normal task:
7511 */
7512 current->sched_class = &fair_sched_class;
7513
1da177e4
LT
7514 /*
7515 * Make us the idle thread. Technically, schedule() should not be
7516 * called from this thread, however somewhere below it might be,
7517 * but because we are the idle thread, we just pick up running again
7518 * when this runqueue becomes "idle".
7519 */
7520 init_idle(current, smp_processor_id());
dce48a84
TG
7521
7522 calc_load_update = jiffies + LOAD_FREQ;
7523
bf4d83f6 7524#ifdef CONFIG_SMP
4cb98839 7525 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
7526 /* May be allocated at isolcpus cmdline parse time */
7527 if (cpu_isolated_map == NULL)
7528 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 7529 idle_thread_set_boot_cpu();
a803f026 7530 set_cpu_rq_start_time();
029632fb
PZ
7531#endif
7532 init_sched_fair_class();
6a7b3dc3 7533
6892b75e 7534 scheduler_running = 1;
1da177e4
LT
7535}
7536
d902db1e 7537#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7538static inline int preempt_count_equals(int preempt_offset)
7539{
da7142e2 7540 int nested = preempt_count() + rcu_preempt_depth();
e4aafea2 7541
4ba8216c 7542 return (nested == preempt_offset);
e4aafea2
FW
7543}
7544
d894837f 7545void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7546{
8eb23b9f
PZ
7547 /*
7548 * Blocking primitives will set (and therefore destroy) current->state,
7549 * since we will exit with TASK_RUNNING make sure we enter with it,
7550 * otherwise we will destroy state.
7551 */
00845eb9 7552 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
7553 "do not call blocking ops when !TASK_RUNNING; "
7554 "state=%lx set at [<%p>] %pS\n",
7555 current->state,
7556 (void *)current->task_state_change,
00845eb9 7557 (void *)current->task_state_change);
8eb23b9f 7558
3427445a
PZ
7559 ___might_sleep(file, line, preempt_offset);
7560}
7561EXPORT_SYMBOL(__might_sleep);
7562
7563void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7564{
1da177e4
LT
7565 static unsigned long prev_jiffy; /* ratelimiting */
7566
b3fbab05 7567 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
db273be2
TG
7568 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7569 !is_idle_task(current)) ||
e4aafea2 7570 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
7571 return;
7572 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7573 return;
7574 prev_jiffy = jiffies;
7575
3df0fc5b
PZ
7576 printk(KERN_ERR
7577 "BUG: sleeping function called from invalid context at %s:%d\n",
7578 file, line);
7579 printk(KERN_ERR
7580 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7581 in_atomic(), irqs_disabled(),
7582 current->pid, current->comm);
aef745fc 7583
a8b686b3
ES
7584 if (task_stack_end_corrupted(current))
7585 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7586
aef745fc
IM
7587 debug_show_held_locks(current);
7588 if (irqs_disabled())
7589 print_irqtrace_events(current);
8f47b187
TG
7590#ifdef CONFIG_DEBUG_PREEMPT
7591 if (!preempt_count_equals(preempt_offset)) {
7592 pr_err("Preemption disabled at:");
7593 print_ip_sym(current->preempt_disable_ip);
7594 pr_cont("\n");
7595 }
7596#endif
aef745fc 7597 dump_stack();
1da177e4 7598}
3427445a 7599EXPORT_SYMBOL(___might_sleep);
1da177e4
LT
7600#endif
7601
7602#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 7603void normalize_rt_tasks(void)
3a5e4dc1 7604{
dbc7f069 7605 struct task_struct *g, *p;
d50dde5a
DF
7606 struct sched_attr attr = {
7607 .sched_policy = SCHED_NORMAL,
7608 };
1da177e4 7609
3472eaa1 7610 read_lock(&tasklist_lock);
5d07f420 7611 for_each_process_thread(g, p) {
178be793
IM
7612 /*
7613 * Only normalize user tasks:
7614 */
3472eaa1 7615 if (p->flags & PF_KTHREAD)
178be793
IM
7616 continue;
7617
6cfb0d5d 7618 p->se.exec_start = 0;
6cfb0d5d 7619#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7620 p->se.statistics.wait_start = 0;
7621 p->se.statistics.sleep_start = 0;
7622 p->se.statistics.block_start = 0;
6cfb0d5d 7623#endif
dd41f596 7624
aab03e05 7625 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
7626 /*
7627 * Renice negative nice level userspace
7628 * tasks back to 0:
7629 */
3472eaa1 7630 if (task_nice(p) < 0)
dd41f596 7631 set_user_nice(p, 0);
1da177e4 7632 continue;
dd41f596 7633 }
1da177e4 7634
dbc7f069 7635 __sched_setscheduler(p, &attr, false, false);
5d07f420 7636 }
3472eaa1 7637 read_unlock(&tasklist_lock);
1da177e4
LT
7638}
7639
7640#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7641
67fc4e0c 7642#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7643/*
67fc4e0c 7644 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7645 *
7646 * They can only be called when the whole system has been
7647 * stopped - every CPU needs to be quiescent, and no scheduling
7648 * activity can take place. Using them for anything else would
7649 * be a serious bug, and as a result, they aren't even visible
7650 * under any other configuration.
7651 */
7652
7653/**
7654 * curr_task - return the current task for a given cpu.
7655 * @cpu: the processor in question.
7656 *
7657 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
7658 *
7659 * Return: The current task for @cpu.
1df5c10a 7660 */
36c8b586 7661struct task_struct *curr_task(int cpu)
1df5c10a
LT
7662{
7663 return cpu_curr(cpu);
7664}
7665
67fc4e0c
JW
7666#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7667
7668#ifdef CONFIG_IA64
1df5c10a
LT
7669/**
7670 * set_curr_task - set the current task for a given cpu.
7671 * @cpu: the processor in question.
7672 * @p: the task pointer to set.
7673 *
7674 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7675 * are serviced on a separate stack. It allows the architecture to switch the
7676 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7677 * must be called with all CPU's synchronized, and interrupts disabled, the
7678 * and caller must save the original value of the current task (see
7679 * curr_task() above) and restore that value before reenabling interrupts and
7680 * re-starting the system.
7681 *
7682 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7683 */
36c8b586 7684void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7685{
7686 cpu_curr(cpu) = p;
7687}
7688
7689#endif
29f59db3 7690
7c941438 7691#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7692/* task_group_lock serializes the addition/removal of task groups */
7693static DEFINE_SPINLOCK(task_group_lock);
7694
bccbe08a
PZ
7695static void free_sched_group(struct task_group *tg)
7696{
7697 free_fair_sched_group(tg);
7698 free_rt_sched_group(tg);
e9aa1dd1 7699 autogroup_free(tg);
bccbe08a
PZ
7700 kfree(tg);
7701}
7702
7703/* allocate runqueue etc for a new task group */
ec7dc8ac 7704struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7705{
7706 struct task_group *tg;
bccbe08a
PZ
7707
7708 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7709 if (!tg)
7710 return ERR_PTR(-ENOMEM);
7711
ec7dc8ac 7712 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7713 goto err;
7714
ec7dc8ac 7715 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7716 goto err;
7717
ace783b9
LZ
7718 return tg;
7719
7720err:
7721 free_sched_group(tg);
7722 return ERR_PTR(-ENOMEM);
7723}
7724
7725void sched_online_group(struct task_group *tg, struct task_group *parent)
7726{
7727 unsigned long flags;
7728
8ed36996 7729 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7730 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7731
7732 WARN_ON(!parent); /* root should already exist */
7733
7734 tg->parent = parent;
f473aa5e 7735 INIT_LIST_HEAD(&tg->children);
09f2724a 7736 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7737 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7738}
7739
9b5b7751 7740/* rcu callback to free various structures associated with a task group */
6f505b16 7741static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7742{
29f59db3 7743 /* now it should be safe to free those cfs_rqs */
6f505b16 7744 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7745}
7746
9b5b7751 7747/* Destroy runqueue etc associated with a task group */
4cf86d77 7748void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
7749{
7750 /* wait for possible concurrent references to cfs_rqs complete */
7751 call_rcu(&tg->rcu, free_sched_group_rcu);
7752}
7753
7754void sched_offline_group(struct task_group *tg)
29f59db3 7755{
8ed36996 7756 unsigned long flags;
9b5b7751 7757 int i;
29f59db3 7758
3d4b47b4
PZ
7759 /* end participation in shares distribution */
7760 for_each_possible_cpu(i)
bccbe08a 7761 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7762
7763 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7764 list_del_rcu(&tg->list);
f473aa5e 7765 list_del_rcu(&tg->siblings);
8ed36996 7766 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7767}
7768
9b5b7751 7769/* change task's runqueue when it moves between groups.
3a252015
IM
7770 * The caller of this function should have put the task in its new group
7771 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7772 * reflect its new group.
9b5b7751
SV
7773 */
7774void sched_move_task(struct task_struct *tsk)
29f59db3 7775{
8323f26c 7776 struct task_group *tg;
da0c1e65 7777 int queued, running;
29f59db3
SV
7778 unsigned long flags;
7779 struct rq *rq;
7780
7781 rq = task_rq_lock(tsk, &flags);
7782
051a1d1a 7783 running = task_current(rq, tsk);
da0c1e65 7784 queued = task_on_rq_queued(tsk);
29f59db3 7785
da0c1e65 7786 if (queued)
1de64443 7787 dequeue_task(rq, tsk, DEQUEUE_SAVE);
0e1f3483 7788 if (unlikely(running))
f3cd1c4e 7789 put_prev_task(rq, tsk);
29f59db3 7790
f7b8a47d
KT
7791 /*
7792 * All callers are synchronized by task_rq_lock(); we do not use RCU
7793 * which is pointless here. Thus, we pass "true" to task_css_check()
7794 * to prevent lockdep warnings.
7795 */
7796 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7797 struct task_group, css);
7798 tg = autogroup_task_group(tsk, tg);
7799 tsk->sched_task_group = tg;
7800
810b3817 7801#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 7802 if (tsk->sched_class->task_move_group)
bc54da21 7803 tsk->sched_class->task_move_group(tsk);
b2b5ce02 7804 else
810b3817 7805#endif
b2b5ce02 7806 set_task_rq(tsk, task_cpu(tsk));
810b3817 7807
0e1f3483
HS
7808 if (unlikely(running))
7809 tsk->sched_class->set_curr_task(rq);
da0c1e65 7810 if (queued)
1de64443 7811 enqueue_task(rq, tsk, ENQUEUE_RESTORE);
29f59db3 7812
0122ec5b 7813 task_rq_unlock(rq, tsk, &flags);
29f59db3 7814}
7c941438 7815#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7816
a790de99
PT
7817#ifdef CONFIG_RT_GROUP_SCHED
7818/*
7819 * Ensure that the real time constraints are schedulable.
7820 */
7821static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7822
9a7e0b18
PZ
7823/* Must be called with tasklist_lock held */
7824static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7825{
9a7e0b18 7826 struct task_struct *g, *p;
b40b2e8e 7827
1fe89e1b
PZ
7828 /*
7829 * Autogroups do not have RT tasks; see autogroup_create().
7830 */
7831 if (task_group_is_autogroup(tg))
7832 return 0;
7833
5d07f420 7834 for_each_process_thread(g, p) {
8651c658 7835 if (rt_task(p) && task_group(p) == tg)
9a7e0b18 7836 return 1;
5d07f420 7837 }
b40b2e8e 7838
9a7e0b18
PZ
7839 return 0;
7840}
b40b2e8e 7841
9a7e0b18
PZ
7842struct rt_schedulable_data {
7843 struct task_group *tg;
7844 u64 rt_period;
7845 u64 rt_runtime;
7846};
b40b2e8e 7847
a790de99 7848static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7849{
7850 struct rt_schedulable_data *d = data;
7851 struct task_group *child;
7852 unsigned long total, sum = 0;
7853 u64 period, runtime;
b40b2e8e 7854
9a7e0b18
PZ
7855 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7856 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7857
9a7e0b18
PZ
7858 if (tg == d->tg) {
7859 period = d->rt_period;
7860 runtime = d->rt_runtime;
b40b2e8e 7861 }
b40b2e8e 7862
4653f803
PZ
7863 /*
7864 * Cannot have more runtime than the period.
7865 */
7866 if (runtime > period && runtime != RUNTIME_INF)
7867 return -EINVAL;
6f505b16 7868
4653f803
PZ
7869 /*
7870 * Ensure we don't starve existing RT tasks.
7871 */
9a7e0b18
PZ
7872 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7873 return -EBUSY;
6f505b16 7874
9a7e0b18 7875 total = to_ratio(period, runtime);
6f505b16 7876
4653f803
PZ
7877 /*
7878 * Nobody can have more than the global setting allows.
7879 */
7880 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7881 return -EINVAL;
6f505b16 7882
4653f803
PZ
7883 /*
7884 * The sum of our children's runtime should not exceed our own.
7885 */
9a7e0b18
PZ
7886 list_for_each_entry_rcu(child, &tg->children, siblings) {
7887 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7888 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7889
9a7e0b18
PZ
7890 if (child == d->tg) {
7891 period = d->rt_period;
7892 runtime = d->rt_runtime;
7893 }
6f505b16 7894
9a7e0b18 7895 sum += to_ratio(period, runtime);
9f0c1e56 7896 }
6f505b16 7897
9a7e0b18
PZ
7898 if (sum > total)
7899 return -EINVAL;
7900
7901 return 0;
6f505b16
PZ
7902}
7903
9a7e0b18 7904static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7905{
8277434e
PT
7906 int ret;
7907
9a7e0b18
PZ
7908 struct rt_schedulable_data data = {
7909 .tg = tg,
7910 .rt_period = period,
7911 .rt_runtime = runtime,
7912 };
7913
8277434e
PT
7914 rcu_read_lock();
7915 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7916 rcu_read_unlock();
7917
7918 return ret;
521f1a24
DG
7919}
7920
ab84d31e 7921static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7922 u64 rt_period, u64 rt_runtime)
6f505b16 7923{
ac086bc2 7924 int i, err = 0;
9f0c1e56 7925
2636ed5f
PZ
7926 /*
7927 * Disallowing the root group RT runtime is BAD, it would disallow the
7928 * kernel creating (and or operating) RT threads.
7929 */
7930 if (tg == &root_task_group && rt_runtime == 0)
7931 return -EINVAL;
7932
7933 /* No period doesn't make any sense. */
7934 if (rt_period == 0)
7935 return -EINVAL;
7936
9f0c1e56 7937 mutex_lock(&rt_constraints_mutex);
521f1a24 7938 read_lock(&tasklist_lock);
9a7e0b18
PZ
7939 err = __rt_schedulable(tg, rt_period, rt_runtime);
7940 if (err)
9f0c1e56 7941 goto unlock;
ac086bc2 7942
0986b11b 7943 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7944 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7945 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7946
7947 for_each_possible_cpu(i) {
7948 struct rt_rq *rt_rq = tg->rt_rq[i];
7949
0986b11b 7950 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7951 rt_rq->rt_runtime = rt_runtime;
0986b11b 7952 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7953 }
0986b11b 7954 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7955unlock:
521f1a24 7956 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7957 mutex_unlock(&rt_constraints_mutex);
7958
7959 return err;
6f505b16
PZ
7960}
7961
25cc7da7 7962static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
7963{
7964 u64 rt_runtime, rt_period;
7965
7966 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7967 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7968 if (rt_runtime_us < 0)
7969 rt_runtime = RUNTIME_INF;
7970
ab84d31e 7971 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7972}
7973
25cc7da7 7974static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
7975{
7976 u64 rt_runtime_us;
7977
d0b27fa7 7978 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7979 return -1;
7980
d0b27fa7 7981 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7982 do_div(rt_runtime_us, NSEC_PER_USEC);
7983 return rt_runtime_us;
7984}
d0b27fa7 7985
ce2f5fe4 7986static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
d0b27fa7
PZ
7987{
7988 u64 rt_runtime, rt_period;
7989
ce2f5fe4 7990 rt_period = rt_period_us * NSEC_PER_USEC;
d0b27fa7
PZ
7991 rt_runtime = tg->rt_bandwidth.rt_runtime;
7992
ab84d31e 7993 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7994}
7995
25cc7da7 7996static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
7997{
7998 u64 rt_period_us;
7999
8000 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8001 do_div(rt_period_us, NSEC_PER_USEC);
8002 return rt_period_us;
8003}
332ac17e 8004#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8005
332ac17e 8006#ifdef CONFIG_RT_GROUP_SCHED
d0b27fa7
PZ
8007static int sched_rt_global_constraints(void)
8008{
8009 int ret = 0;
8010
8011 mutex_lock(&rt_constraints_mutex);
9a7e0b18 8012 read_lock(&tasklist_lock);
4653f803 8013 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 8014 read_unlock(&tasklist_lock);
d0b27fa7
PZ
8015 mutex_unlock(&rt_constraints_mutex);
8016
8017 return ret;
8018}
54e99124 8019
25cc7da7 8020static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
8021{
8022 /* Don't accept realtime tasks when there is no way for them to run */
8023 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8024 return 0;
8025
8026 return 1;
8027}
8028
6d6bc0ad 8029#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8030static int sched_rt_global_constraints(void)
8031{
ac086bc2 8032 unsigned long flags;
332ac17e 8033 int i, ret = 0;
ec5d4989 8034
0986b11b 8035 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
8036 for_each_possible_cpu(i) {
8037 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8038
0986b11b 8039 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8040 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 8041 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8042 }
0986b11b 8043 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 8044
332ac17e 8045 return ret;
d0b27fa7 8046}
6d6bc0ad 8047#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8048
a1963b81 8049static int sched_dl_global_validate(void)
332ac17e 8050{
1724813d
PZ
8051 u64 runtime = global_rt_runtime();
8052 u64 period = global_rt_period();
332ac17e 8053 u64 new_bw = to_ratio(period, runtime);
f10e00f4 8054 struct dl_bw *dl_b;
1724813d 8055 int cpu, ret = 0;
49516342 8056 unsigned long flags;
332ac17e
DF
8057
8058 /*
8059 * Here we want to check the bandwidth not being set to some
8060 * value smaller than the currently allocated bandwidth in
8061 * any of the root_domains.
8062 *
8063 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
8064 * cycling on root_domains... Discussion on different/better
8065 * solutions is welcome!
8066 */
1724813d 8067 for_each_possible_cpu(cpu) {
f10e00f4
KT
8068 rcu_read_lock_sched();
8069 dl_b = dl_bw_of(cpu);
332ac17e 8070
49516342 8071 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d
PZ
8072 if (new_bw < dl_b->total_bw)
8073 ret = -EBUSY;
49516342 8074 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
1724813d 8075
f10e00f4
KT
8076 rcu_read_unlock_sched();
8077
1724813d
PZ
8078 if (ret)
8079 break;
332ac17e
DF
8080 }
8081
1724813d 8082 return ret;
332ac17e
DF
8083}
8084
1724813d 8085static void sched_dl_do_global(void)
ce0dbbbb 8086{
1724813d 8087 u64 new_bw = -1;
f10e00f4 8088 struct dl_bw *dl_b;
1724813d 8089 int cpu;
49516342 8090 unsigned long flags;
ce0dbbbb 8091
1724813d
PZ
8092 def_dl_bandwidth.dl_period = global_rt_period();
8093 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8094
8095 if (global_rt_runtime() != RUNTIME_INF)
8096 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8097
8098 /*
8099 * FIXME: As above...
8100 */
8101 for_each_possible_cpu(cpu) {
f10e00f4
KT
8102 rcu_read_lock_sched();
8103 dl_b = dl_bw_of(cpu);
1724813d 8104
49516342 8105 raw_spin_lock_irqsave(&dl_b->lock, flags);
1724813d 8106 dl_b->bw = new_bw;
49516342 8107 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
f10e00f4
KT
8108
8109 rcu_read_unlock_sched();
ce0dbbbb 8110 }
1724813d
PZ
8111}
8112
8113static int sched_rt_global_validate(void)
8114{
8115 if (sysctl_sched_rt_period <= 0)
8116 return -EINVAL;
8117
e9e7cb38
JL
8118 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8119 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
1724813d
PZ
8120 return -EINVAL;
8121
8122 return 0;
8123}
8124
8125static void sched_rt_do_global(void)
8126{
8127 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8128 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
ce0dbbbb
CW
8129}
8130
d0b27fa7 8131int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8132 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8133 loff_t *ppos)
8134{
d0b27fa7
PZ
8135 int old_period, old_runtime;
8136 static DEFINE_MUTEX(mutex);
1724813d 8137 int ret;
d0b27fa7
PZ
8138
8139 mutex_lock(&mutex);
8140 old_period = sysctl_sched_rt_period;
8141 old_runtime = sysctl_sched_rt_runtime;
8142
8d65af78 8143 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8144
8145 if (!ret && write) {
1724813d
PZ
8146 ret = sched_rt_global_validate();
8147 if (ret)
8148 goto undo;
8149
a1963b81 8150 ret = sched_dl_global_validate();
1724813d
PZ
8151 if (ret)
8152 goto undo;
8153
a1963b81 8154 ret = sched_rt_global_constraints();
1724813d
PZ
8155 if (ret)
8156 goto undo;
8157
8158 sched_rt_do_global();
8159 sched_dl_do_global();
8160 }
8161 if (0) {
8162undo:
8163 sysctl_sched_rt_period = old_period;
8164 sysctl_sched_rt_runtime = old_runtime;
d0b27fa7
PZ
8165 }
8166 mutex_unlock(&mutex);
8167
8168 return ret;
8169}
68318b8e 8170
1724813d 8171int sched_rr_handler(struct ctl_table *table, int write,
332ac17e
DF
8172 void __user *buffer, size_t *lenp,
8173 loff_t *ppos)
8174{
8175 int ret;
332ac17e 8176 static DEFINE_MUTEX(mutex);
332ac17e
DF
8177
8178 mutex_lock(&mutex);
332ac17e 8179 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1724813d
PZ
8180 /* make sure that internally we keep jiffies */
8181 /* also, writing zero resets timeslice to default */
332ac17e 8182 if (!ret && write) {
1724813d
PZ
8183 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8184 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
332ac17e
DF
8185 }
8186 mutex_unlock(&mutex);
332ac17e
DF
8187 return ret;
8188}
8189
052f1dc7 8190#ifdef CONFIG_CGROUP_SCHED
68318b8e 8191
a7c6d554 8192static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 8193{
a7c6d554 8194 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
8195}
8196
eb95419b
TH
8197static struct cgroup_subsys_state *
8198cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 8199{
eb95419b
TH
8200 struct task_group *parent = css_tg(parent_css);
8201 struct task_group *tg;
68318b8e 8202
eb95419b 8203 if (!parent) {
68318b8e 8204 /* This is early initialization for the top cgroup */
07e06b01 8205 return &root_task_group.css;
68318b8e
SV
8206 }
8207
ec7dc8ac 8208 tg = sched_create_group(parent);
68318b8e
SV
8209 if (IS_ERR(tg))
8210 return ERR_PTR(-ENOMEM);
8211
68318b8e
SV
8212 return &tg->css;
8213}
8214
eb95419b 8215static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
ace783b9 8216{
eb95419b 8217 struct task_group *tg = css_tg(css);
5c9d535b 8218 struct task_group *parent = css_tg(css->parent);
ace783b9 8219
63876986
TH
8220 if (parent)
8221 sched_online_group(tg, parent);
ace783b9
LZ
8222 return 0;
8223}
8224
eb95419b 8225static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 8226{
eb95419b 8227 struct task_group *tg = css_tg(css);
68318b8e
SV
8228
8229 sched_destroy_group(tg);
8230}
8231
eb95419b 8232static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
ace783b9 8233{
eb95419b 8234 struct task_group *tg = css_tg(css);
ace783b9
LZ
8235
8236 sched_offline_group(tg);
8237}
8238
7e47682e 8239static void cpu_cgroup_fork(struct task_struct *task, void *private)
eeb61e53
KT
8240{
8241 sched_move_task(task);
8242}
8243
eb95419b 8244static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
bb9d97b6 8245 struct cgroup_taskset *tset)
68318b8e 8246{
bb9d97b6
TH
8247 struct task_struct *task;
8248
924f0d9a 8249 cgroup_taskset_for_each(task, tset) {
b68aa230 8250#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 8251 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 8252 return -EINVAL;
b68aa230 8253#else
bb9d97b6
TH
8254 /* We don't support RT-tasks being in separate groups */
8255 if (task->sched_class != &fair_sched_class)
8256 return -EINVAL;
b68aa230 8257#endif
bb9d97b6 8258 }
be367d09
BB
8259 return 0;
8260}
68318b8e 8261
eb95419b 8262static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
bb9d97b6 8263 struct cgroup_taskset *tset)
68318b8e 8264{
bb9d97b6
TH
8265 struct task_struct *task;
8266
924f0d9a 8267 cgroup_taskset_for_each(task, tset)
bb9d97b6 8268 sched_move_task(task);
68318b8e
SV
8269}
8270
052f1dc7 8271#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8272static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8273 struct cftype *cftype, u64 shareval)
68318b8e 8274{
182446d0 8275 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8276}
8277
182446d0
TH
8278static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8279 struct cftype *cft)
68318b8e 8280{
182446d0 8281 struct task_group *tg = css_tg(css);
68318b8e 8282
c8b28116 8283 return (u64) scale_load_down(tg->shares);
68318b8e 8284}
ab84d31e
PT
8285
8286#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8287static DEFINE_MUTEX(cfs_constraints_mutex);
8288
ab84d31e
PT
8289const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8290const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8291
a790de99
PT
8292static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8293
ab84d31e
PT
8294static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8295{
56f570e5 8296 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8297 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8298
8299 if (tg == &root_task_group)
8300 return -EINVAL;
8301
8302 /*
8303 * Ensure we have at some amount of bandwidth every period. This is
8304 * to prevent reaching a state of large arrears when throttled via
8305 * entity_tick() resulting in prolonged exit starvation.
8306 */
8307 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8308 return -EINVAL;
8309
8310 /*
8311 * Likewise, bound things on the otherside by preventing insane quota
8312 * periods. This also allows us to normalize in computing quota
8313 * feasibility.
8314 */
8315 if (period > max_cfs_quota_period)
8316 return -EINVAL;
8317
0e59bdae
KT
8318 /*
8319 * Prevent race between setting of cfs_rq->runtime_enabled and
8320 * unthrottle_offline_cfs_rqs().
8321 */
8322 get_online_cpus();
a790de99
PT
8323 mutex_lock(&cfs_constraints_mutex);
8324 ret = __cfs_schedulable(tg, period, quota);
8325 if (ret)
8326 goto out_unlock;
8327
58088ad0 8328 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8329 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
8330 /*
8331 * If we need to toggle cfs_bandwidth_used, off->on must occur
8332 * before making related changes, and on->off must occur afterwards
8333 */
8334 if (runtime_enabled && !runtime_was_enabled)
8335 cfs_bandwidth_usage_inc();
ab84d31e
PT
8336 raw_spin_lock_irq(&cfs_b->lock);
8337 cfs_b->period = ns_to_ktime(period);
8338 cfs_b->quota = quota;
58088ad0 8339
a9cf55b2 8340 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 8341 /* restart the period timer (if active) to handle new period expiry */
77a4d1a1
PZ
8342 if (runtime_enabled)
8343 start_cfs_bandwidth(cfs_b);
ab84d31e
PT
8344 raw_spin_unlock_irq(&cfs_b->lock);
8345
0e59bdae 8346 for_each_online_cpu(i) {
ab84d31e 8347 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8348 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
8349
8350 raw_spin_lock_irq(&rq->lock);
58088ad0 8351 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8352 cfs_rq->runtime_remaining = 0;
671fd9da 8353
029632fb 8354 if (cfs_rq->throttled)
671fd9da 8355 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
8356 raw_spin_unlock_irq(&rq->lock);
8357 }
1ee14e6c
BS
8358 if (runtime_was_enabled && !runtime_enabled)
8359 cfs_bandwidth_usage_dec();
a790de99
PT
8360out_unlock:
8361 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 8362 put_online_cpus();
ab84d31e 8363
a790de99 8364 return ret;
ab84d31e
PT
8365}
8366
8367int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8368{
8369 u64 quota, period;
8370
029632fb 8371 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8372 if (cfs_quota_us < 0)
8373 quota = RUNTIME_INF;
8374 else
8375 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8376
8377 return tg_set_cfs_bandwidth(tg, period, quota);
8378}
8379
8380long tg_get_cfs_quota(struct task_group *tg)
8381{
8382 u64 quota_us;
8383
029632fb 8384 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8385 return -1;
8386
029632fb 8387 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8388 do_div(quota_us, NSEC_PER_USEC);
8389
8390 return quota_us;
8391}
8392
8393int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8394{
8395 u64 quota, period;
8396
8397 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8398 quota = tg->cfs_bandwidth.quota;
ab84d31e 8399
ab84d31e
PT
8400 return tg_set_cfs_bandwidth(tg, period, quota);
8401}
8402
8403long tg_get_cfs_period(struct task_group *tg)
8404{
8405 u64 cfs_period_us;
8406
029632fb 8407 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8408 do_div(cfs_period_us, NSEC_PER_USEC);
8409
8410 return cfs_period_us;
8411}
8412
182446d0
TH
8413static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8414 struct cftype *cft)
ab84d31e 8415{
182446d0 8416 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
8417}
8418
182446d0
TH
8419static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8420 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 8421{
182446d0 8422 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
8423}
8424
182446d0
TH
8425static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8426 struct cftype *cft)
ab84d31e 8427{
182446d0 8428 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
8429}
8430
182446d0
TH
8431static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8432 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 8433{
182446d0 8434 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
8435}
8436
a790de99
PT
8437struct cfs_schedulable_data {
8438 struct task_group *tg;
8439 u64 period, quota;
8440};
8441
8442/*
8443 * normalize group quota/period to be quota/max_period
8444 * note: units are usecs
8445 */
8446static u64 normalize_cfs_quota(struct task_group *tg,
8447 struct cfs_schedulable_data *d)
8448{
8449 u64 quota, period;
8450
8451 if (tg == d->tg) {
8452 period = d->period;
8453 quota = d->quota;
8454 } else {
8455 period = tg_get_cfs_period(tg);
8456 quota = tg_get_cfs_quota(tg);
8457 }
8458
8459 /* note: these should typically be equivalent */
8460 if (quota == RUNTIME_INF || quota == -1)
8461 return RUNTIME_INF;
8462
8463 return to_ratio(period, quota);
8464}
8465
8466static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8467{
8468 struct cfs_schedulable_data *d = data;
029632fb 8469 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8470 s64 quota = 0, parent_quota = -1;
8471
8472 if (!tg->parent) {
8473 quota = RUNTIME_INF;
8474 } else {
029632fb 8475 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8476
8477 quota = normalize_cfs_quota(tg, d);
9c58c79a 8478 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
8479
8480 /*
8481 * ensure max(child_quota) <= parent_quota, inherit when no
8482 * limit is set
8483 */
8484 if (quota == RUNTIME_INF)
8485 quota = parent_quota;
8486 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8487 return -EINVAL;
8488 }
9c58c79a 8489 cfs_b->hierarchical_quota = quota;
a790de99
PT
8490
8491 return 0;
8492}
8493
8494static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8495{
8277434e 8496 int ret;
a790de99
PT
8497 struct cfs_schedulable_data data = {
8498 .tg = tg,
8499 .period = period,
8500 .quota = quota,
8501 };
8502
8503 if (quota != RUNTIME_INF) {
8504 do_div(data.period, NSEC_PER_USEC);
8505 do_div(data.quota, NSEC_PER_USEC);
8506 }
8507
8277434e
PT
8508 rcu_read_lock();
8509 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8510 rcu_read_unlock();
8511
8512 return ret;
a790de99 8513}
e8da1b18 8514
2da8ca82 8515static int cpu_stats_show(struct seq_file *sf, void *v)
e8da1b18 8516{
2da8ca82 8517 struct task_group *tg = css_tg(seq_css(sf));
029632fb 8518 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 8519
44ffc75b
TH
8520 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8521 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8522 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18
NR
8523
8524 return 0;
8525}
ab84d31e 8526#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8527#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8528
052f1dc7 8529#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
8530static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8531 struct cftype *cft, s64 val)
6f505b16 8532{
182446d0 8533 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
8534}
8535
182446d0
TH
8536static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8537 struct cftype *cft)
6f505b16 8538{
182446d0 8539 return sched_group_rt_runtime(css_tg(css));
6f505b16 8540}
d0b27fa7 8541
182446d0
TH
8542static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8543 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 8544{
182446d0 8545 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
8546}
8547
182446d0
TH
8548static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8549 struct cftype *cft)
d0b27fa7 8550{
182446d0 8551 return sched_group_rt_period(css_tg(css));
d0b27fa7 8552}
6d6bc0ad 8553#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8554
fe5c7cc2 8555static struct cftype cpu_files[] = {
052f1dc7 8556#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8557 {
8558 .name = "shares",
f4c753b7
PM
8559 .read_u64 = cpu_shares_read_u64,
8560 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8561 },
052f1dc7 8562#endif
ab84d31e
PT
8563#ifdef CONFIG_CFS_BANDWIDTH
8564 {
8565 .name = "cfs_quota_us",
8566 .read_s64 = cpu_cfs_quota_read_s64,
8567 .write_s64 = cpu_cfs_quota_write_s64,
8568 },
8569 {
8570 .name = "cfs_period_us",
8571 .read_u64 = cpu_cfs_period_read_u64,
8572 .write_u64 = cpu_cfs_period_write_u64,
8573 },
e8da1b18
NR
8574 {
8575 .name = "stat",
2da8ca82 8576 .seq_show = cpu_stats_show,
e8da1b18 8577 },
ab84d31e 8578#endif
052f1dc7 8579#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8580 {
9f0c1e56 8581 .name = "rt_runtime_us",
06ecb27c
PM
8582 .read_s64 = cpu_rt_runtime_read,
8583 .write_s64 = cpu_rt_runtime_write,
6f505b16 8584 },
d0b27fa7
PZ
8585 {
8586 .name = "rt_period_us",
f4c753b7
PM
8587 .read_u64 = cpu_rt_period_read_uint,
8588 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8589 },
052f1dc7 8590#endif
4baf6e33 8591 { } /* terminate */
68318b8e
SV
8592};
8593
073219e9 8594struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748
TH
8595 .css_alloc = cpu_cgroup_css_alloc,
8596 .css_free = cpu_cgroup_css_free,
ace783b9
LZ
8597 .css_online = cpu_cgroup_css_online,
8598 .css_offline = cpu_cgroup_css_offline,
eeb61e53 8599 .fork = cpu_cgroup_fork,
bb9d97b6
TH
8600 .can_attach = cpu_cgroup_can_attach,
8601 .attach = cpu_cgroup_attach,
5577964e 8602 .legacy_cftypes = cpu_files,
68318b8e
SV
8603 .early_init = 1,
8604};
8605
052f1dc7 8606#endif /* CONFIG_CGROUP_SCHED */
d842de87 8607
b637a328
PM
8608void dump_cpu_task(int cpu)
8609{
8610 pr_info("Task dump for CPU %d:\n", cpu);
8611 sched_show_task(cpu_curr(cpu));
8612}