]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/sched/core.c
Merge tag 'v5.5-rc3' into sched/core, to pick up fixes
[mirror_ubuntu-hirsute-kernel.git] / kernel / sched / core.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
391e43da 3 * kernel/sched/core.c
1da177e4 4 *
d1ccc66d 5 * Core kernel scheduler code and related syscalls
1da177e4
LT
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
1da177e4 8 */
325ea10c 9#include "sched.h"
1da177e4 10
7281c8de 11#include <linux/nospec.h>
85f1abe0 12
0ed557aa
MR
13#include <linux/kcov.h>
14
96f951ed 15#include <asm/switch_to.h>
5517d86b 16#include <asm/tlb.h>
1da177e4 17
ea138446 18#include "../workqueue_internal.h"
771b53d0 19#include "../../fs/io-wq.h"
29d5e047 20#include "../smpboot.h"
6e0534f2 21
91c27493
VG
22#include "pelt.h"
23
a8d154b0 24#define CREATE_TRACE_POINTS
ad8d75ff 25#include <trace/events/sched.h>
a8d154b0 26
a056a5be
QY
27/*
28 * Export tracepoints that act as a bare tracehook (ie: have no trace event
29 * associated with them) to allow external modules to probe them.
30 */
31EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
32EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
33EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
34EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
35EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
36EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
37
029632fb 38DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 39
e9666d10 40#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
bf5c91ba
IM
41/*
42 * Debugging: various feature bits
765cc3a4
PB
43 *
44 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
45 * sysctl_sched_features, defined in sched.h, to allow constants propagation
46 * at compile time and compiler optimization based on features default.
bf5c91ba 47 */
f00b45c1
PZ
48#define SCHED_FEAT(name, enabled) \
49 (1UL << __SCHED_FEAT_##name) * enabled |
bf5c91ba 50const_debug unsigned int sysctl_sched_features =
391e43da 51#include "features.h"
f00b45c1 52 0;
f00b45c1 53#undef SCHED_FEAT
765cc3a4 54#endif
f00b45c1 55
b82d9fdd
PZ
56/*
57 * Number of tasks to iterate in a single balance run.
58 * Limited because this is done with IRQs disabled.
59 */
60const_debug unsigned int sysctl_sched_nr_migrate = 32;
61
fa85ae24 62/*
d1ccc66d 63 * period over which we measure -rt task CPU usage in us.
fa85ae24
PZ
64 * default: 1s
65 */
9f0c1e56 66unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 67
029632fb 68__read_mostly int scheduler_running;
6892b75e 69
9f0c1e56
PZ
70/*
71 * part of the period that we allow rt tasks to run in us.
72 * default: 0.95s
73 */
74int sysctl_sched_rt_runtime = 950000;
fa85ae24 75
3e71a462
PZ
76/*
77 * __task_rq_lock - lock the rq @p resides on.
78 */
eb580751 79struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
80 __acquires(rq->lock)
81{
82 struct rq *rq;
83
84 lockdep_assert_held(&p->pi_lock);
85
86 for (;;) {
87 rq = task_rq(p);
88 raw_spin_lock(&rq->lock);
89 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 90 rq_pin_lock(rq, rf);
3e71a462
PZ
91 return rq;
92 }
93 raw_spin_unlock(&rq->lock);
94
95 while (unlikely(task_on_rq_migrating(p)))
96 cpu_relax();
97 }
98}
99
100/*
101 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
102 */
eb580751 103struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
104 __acquires(p->pi_lock)
105 __acquires(rq->lock)
106{
107 struct rq *rq;
108
109 for (;;) {
eb580751 110 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
3e71a462
PZ
111 rq = task_rq(p);
112 raw_spin_lock(&rq->lock);
113 /*
114 * move_queued_task() task_rq_lock()
115 *
116 * ACQUIRE (rq->lock)
117 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
118 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
119 * [S] ->cpu = new_cpu [L] task_rq()
120 * [L] ->on_rq
121 * RELEASE (rq->lock)
122 *
c546951d 123 * If we observe the old CPU in task_rq_lock(), the acquire of
3e71a462
PZ
124 * the old rq->lock will fully serialize against the stores.
125 *
c546951d
AP
126 * If we observe the new CPU in task_rq_lock(), the address
127 * dependency headed by '[L] rq = task_rq()' and the acquire
128 * will pair with the WMB to ensure we then also see migrating.
3e71a462
PZ
129 */
130 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 131 rq_pin_lock(rq, rf);
3e71a462
PZ
132 return rq;
133 }
134 raw_spin_unlock(&rq->lock);
eb580751 135 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3e71a462
PZ
136
137 while (unlikely(task_on_rq_migrating(p)))
138 cpu_relax();
139 }
140}
141
535b9552
IM
142/*
143 * RQ-clock updating methods:
144 */
145
146static void update_rq_clock_task(struct rq *rq, s64 delta)
147{
148/*
149 * In theory, the compile should just see 0 here, and optimize out the call
150 * to sched_rt_avg_update. But I don't trust it...
151 */
11d4afd4
VG
152 s64 __maybe_unused steal = 0, irq_delta = 0;
153
535b9552
IM
154#ifdef CONFIG_IRQ_TIME_ACCOUNTING
155 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
156
157 /*
158 * Since irq_time is only updated on {soft,}irq_exit, we might run into
159 * this case when a previous update_rq_clock() happened inside a
160 * {soft,}irq region.
161 *
162 * When this happens, we stop ->clock_task and only update the
163 * prev_irq_time stamp to account for the part that fit, so that a next
164 * update will consume the rest. This ensures ->clock_task is
165 * monotonic.
166 *
167 * It does however cause some slight miss-attribution of {soft,}irq
168 * time, a more accurate solution would be to update the irq_time using
169 * the current rq->clock timestamp, except that would require using
170 * atomic ops.
171 */
172 if (irq_delta > delta)
173 irq_delta = delta;
174
175 rq->prev_irq_time += irq_delta;
176 delta -= irq_delta;
177#endif
178#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
179 if (static_key_false((&paravirt_steal_rq_enabled))) {
180 steal = paravirt_steal_clock(cpu_of(rq));
181 steal -= rq->prev_steal_time_rq;
182
183 if (unlikely(steal > delta))
184 steal = delta;
185
186 rq->prev_steal_time_rq += steal;
187 delta -= steal;
188 }
189#endif
190
191 rq->clock_task += delta;
192
11d4afd4 193#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
535b9552 194 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
91c27493 195 update_irq_load_avg(rq, irq_delta + steal);
535b9552 196#endif
23127296 197 update_rq_clock_pelt(rq, delta);
535b9552
IM
198}
199
200void update_rq_clock(struct rq *rq)
201{
202 s64 delta;
203
204 lockdep_assert_held(&rq->lock);
205
206 if (rq->clock_update_flags & RQCF_ACT_SKIP)
207 return;
208
209#ifdef CONFIG_SCHED_DEBUG
26ae58d2
PZ
210 if (sched_feat(WARN_DOUBLE_CLOCK))
211 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
535b9552
IM
212 rq->clock_update_flags |= RQCF_UPDATED;
213#endif
26ae58d2 214
535b9552
IM
215 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
216 if (delta < 0)
217 return;
218 rq->clock += delta;
219 update_rq_clock_task(rq, delta);
220}
221
222
8f4d37ec
PZ
223#ifdef CONFIG_SCHED_HRTICK
224/*
225 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 226 */
8f4d37ec 227
8f4d37ec
PZ
228static void hrtick_clear(struct rq *rq)
229{
230 if (hrtimer_active(&rq->hrtick_timer))
231 hrtimer_cancel(&rq->hrtick_timer);
232}
233
8f4d37ec
PZ
234/*
235 * High-resolution timer tick.
236 * Runs from hardirq context with interrupts disabled.
237 */
238static enum hrtimer_restart hrtick(struct hrtimer *timer)
239{
240 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
8a8c69c3 241 struct rq_flags rf;
8f4d37ec
PZ
242
243 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
244
8a8c69c3 245 rq_lock(rq, &rf);
3e51f33f 246 update_rq_clock(rq);
8f4d37ec 247 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
8a8c69c3 248 rq_unlock(rq, &rf);
8f4d37ec
PZ
249
250 return HRTIMER_NORESTART;
251}
252
95e904c7 253#ifdef CONFIG_SMP
971ee28c 254
4961b6e1 255static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
256{
257 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 258
d5096aa6 259 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
971ee28c
PZ
260}
261
31656519
PZ
262/*
263 * called from hardirq (IPI) context
264 */
265static void __hrtick_start(void *arg)
b328ca18 266{
31656519 267 struct rq *rq = arg;
8a8c69c3 268 struct rq_flags rf;
b328ca18 269
8a8c69c3 270 rq_lock(rq, &rf);
971ee28c 271 __hrtick_restart(rq);
31656519 272 rq->hrtick_csd_pending = 0;
8a8c69c3 273 rq_unlock(rq, &rf);
b328ca18
PZ
274}
275
31656519
PZ
276/*
277 * Called to set the hrtick timer state.
278 *
279 * called with rq->lock held and irqs disabled
280 */
029632fb 281void hrtick_start(struct rq *rq, u64 delay)
b328ca18 282{
31656519 283 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 284 ktime_t time;
285 s64 delta;
286
287 /*
288 * Don't schedule slices shorter than 10000ns, that just
289 * doesn't make sense and can cause timer DoS.
290 */
291 delta = max_t(s64, delay, 10000LL);
292 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 293
cc584b21 294 hrtimer_set_expires(timer, time);
31656519
PZ
295
296 if (rq == this_rq()) {
971ee28c 297 __hrtick_restart(rq);
31656519 298 } else if (!rq->hrtick_csd_pending) {
c46fff2a 299 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
31656519
PZ
300 rq->hrtick_csd_pending = 1;
301 }
b328ca18
PZ
302}
303
31656519
PZ
304#else
305/*
306 * Called to set the hrtick timer state.
307 *
308 * called with rq->lock held and irqs disabled
309 */
029632fb 310void hrtick_start(struct rq *rq, u64 delay)
31656519 311{
86893335
WL
312 /*
313 * Don't schedule slices shorter than 10000ns, that just
314 * doesn't make sense. Rely on vruntime for fairness.
315 */
316 delay = max_t(u64, delay, 10000LL);
4961b6e1 317 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
d5096aa6 318 HRTIMER_MODE_REL_PINNED_HARD);
31656519 319}
31656519 320#endif /* CONFIG_SMP */
8f4d37ec 321
77a021be 322static void hrtick_rq_init(struct rq *rq)
8f4d37ec 323{
31656519
PZ
324#ifdef CONFIG_SMP
325 rq->hrtick_csd_pending = 0;
8f4d37ec 326
31656519
PZ
327 rq->hrtick_csd.flags = 0;
328 rq->hrtick_csd.func = __hrtick_start;
329 rq->hrtick_csd.info = rq;
330#endif
8f4d37ec 331
d5096aa6 332 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
31656519 333 rq->hrtick_timer.function = hrtick;
8f4d37ec 334}
006c75f1 335#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
336static inline void hrtick_clear(struct rq *rq)
337{
338}
339
77a021be 340static inline void hrtick_rq_init(struct rq *rq)
8f4d37ec
PZ
341{
342}
006c75f1 343#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 344
5529578a
FW
345/*
346 * cmpxchg based fetch_or, macro so it works for different integer types
347 */
348#define fetch_or(ptr, mask) \
349 ({ \
350 typeof(ptr) _ptr = (ptr); \
351 typeof(mask) _mask = (mask); \
352 typeof(*_ptr) _old, _val = *_ptr; \
353 \
354 for (;;) { \
355 _old = cmpxchg(_ptr, _val, _val | _mask); \
356 if (_old == _val) \
357 break; \
358 _val = _old; \
359 } \
360 _old; \
361})
362
e3baac47 363#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
364/*
365 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
366 * this avoids any races wrt polling state changes and thereby avoids
367 * spurious IPIs.
368 */
369static bool set_nr_and_not_polling(struct task_struct *p)
370{
371 struct thread_info *ti = task_thread_info(p);
372 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
373}
e3baac47
PZ
374
375/*
376 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
377 *
378 * If this returns true, then the idle task promises to call
379 * sched_ttwu_pending() and reschedule soon.
380 */
381static bool set_nr_if_polling(struct task_struct *p)
382{
383 struct thread_info *ti = task_thread_info(p);
316c1608 384 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
385
386 for (;;) {
387 if (!(val & _TIF_POLLING_NRFLAG))
388 return false;
389 if (val & _TIF_NEED_RESCHED)
390 return true;
391 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
392 if (old == val)
393 break;
394 val = old;
395 }
396 return true;
397}
398
fd99f91a
PZ
399#else
400static bool set_nr_and_not_polling(struct task_struct *p)
401{
402 set_tsk_need_resched(p);
403 return true;
404}
e3baac47
PZ
405
406#ifdef CONFIG_SMP
407static bool set_nr_if_polling(struct task_struct *p)
408{
409 return false;
410}
411#endif
fd99f91a
PZ
412#endif
413
07879c6a 414static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
76751049
PZ
415{
416 struct wake_q_node *node = &task->wake_q;
417
418 /*
419 * Atomically grab the task, if ->wake_q is !nil already it means
420 * its already queued (either by us or someone else) and will get the
421 * wakeup due to that.
422 *
4c4e3731
PZ
423 * In order to ensure that a pending wakeup will observe our pending
424 * state, even in the failed case, an explicit smp_mb() must be used.
76751049 425 */
4c4e3731 426 smp_mb__before_atomic();
87ff19cb 427 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
07879c6a 428 return false;
76751049
PZ
429
430 /*
431 * The head is context local, there can be no concurrency.
432 */
433 *head->lastp = node;
434 head->lastp = &node->next;
07879c6a
DB
435 return true;
436}
437
438/**
439 * wake_q_add() - queue a wakeup for 'later' waking.
440 * @head: the wake_q_head to add @task to
441 * @task: the task to queue for 'later' wakeup
442 *
443 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
444 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
445 * instantly.
446 *
447 * This function must be used as-if it were wake_up_process(); IOW the task
448 * must be ready to be woken at this location.
449 */
450void wake_q_add(struct wake_q_head *head, struct task_struct *task)
451{
452 if (__wake_q_add(head, task))
453 get_task_struct(task);
454}
455
456/**
457 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
458 * @head: the wake_q_head to add @task to
459 * @task: the task to queue for 'later' wakeup
460 *
461 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
462 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
463 * instantly.
464 *
465 * This function must be used as-if it were wake_up_process(); IOW the task
466 * must be ready to be woken at this location.
467 *
468 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
469 * that already hold reference to @task can call the 'safe' version and trust
470 * wake_q to do the right thing depending whether or not the @task is already
471 * queued for wakeup.
472 */
473void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
474{
475 if (!__wake_q_add(head, task))
476 put_task_struct(task);
76751049
PZ
477}
478
479void wake_up_q(struct wake_q_head *head)
480{
481 struct wake_q_node *node = head->first;
482
483 while (node != WAKE_Q_TAIL) {
484 struct task_struct *task;
485
486 task = container_of(node, struct task_struct, wake_q);
487 BUG_ON(!task);
d1ccc66d 488 /* Task can safely be re-inserted now: */
76751049
PZ
489 node = node->next;
490 task->wake_q.next = NULL;
491
492 /*
7696f991
AP
493 * wake_up_process() executes a full barrier, which pairs with
494 * the queueing in wake_q_add() so as not to miss wakeups.
76751049
PZ
495 */
496 wake_up_process(task);
497 put_task_struct(task);
498 }
499}
500
c24d20db 501/*
8875125e 502 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
503 *
504 * On UP this means the setting of the need_resched flag, on SMP it
505 * might also involve a cross-CPU call to trigger the scheduler on
506 * the target CPU.
507 */
8875125e 508void resched_curr(struct rq *rq)
c24d20db 509{
8875125e 510 struct task_struct *curr = rq->curr;
c24d20db
IM
511 int cpu;
512
8875125e 513 lockdep_assert_held(&rq->lock);
c24d20db 514
8875125e 515 if (test_tsk_need_resched(curr))
c24d20db
IM
516 return;
517
8875125e 518 cpu = cpu_of(rq);
fd99f91a 519
f27dde8d 520 if (cpu == smp_processor_id()) {
8875125e 521 set_tsk_need_resched(curr);
f27dde8d 522 set_preempt_need_resched();
c24d20db 523 return;
f27dde8d 524 }
c24d20db 525
8875125e 526 if (set_nr_and_not_polling(curr))
c24d20db 527 smp_send_reschedule(cpu);
dfc68f29
AL
528 else
529 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
530}
531
029632fb 532void resched_cpu(int cpu)
c24d20db
IM
533{
534 struct rq *rq = cpu_rq(cpu);
535 unsigned long flags;
536
7c2102e5 537 raw_spin_lock_irqsave(&rq->lock, flags);
a0982dfa
PM
538 if (cpu_online(cpu) || cpu == smp_processor_id())
539 resched_curr(rq);
05fa785c 540 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 541}
06d8308c 542
b021fe3e 543#ifdef CONFIG_SMP
3451d024 544#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2 545/*
d1ccc66d
IM
546 * In the semi idle case, use the nearest busy CPU for migrating timers
547 * from an idle CPU. This is good for power-savings.
83cd4fe2
VP
548 *
549 * We don't do similar optimization for completely idle system, as
d1ccc66d
IM
550 * selecting an idle CPU will add more delays to the timers than intended
551 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
83cd4fe2 552 */
bc7a34b8 553int get_nohz_timer_target(void)
83cd4fe2 554{
bc7a34b8 555 int i, cpu = smp_processor_id();
83cd4fe2
VP
556 struct sched_domain *sd;
557
de201559 558 if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
6201b4d6
VK
559 return cpu;
560
057f3fad 561 rcu_read_lock();
83cd4fe2 562 for_each_domain(cpu, sd) {
057f3fad 563 for_each_cpu(i, sched_domain_span(sd)) {
44496922
WL
564 if (cpu == i)
565 continue;
566
de201559 567 if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) {
057f3fad
PZ
568 cpu = i;
569 goto unlock;
570 }
571 }
83cd4fe2 572 }
9642d18e 573
de201559
FW
574 if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
575 cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
057f3fad
PZ
576unlock:
577 rcu_read_unlock();
83cd4fe2
VP
578 return cpu;
579}
d1ccc66d 580
06d8308c
TG
581/*
582 * When add_timer_on() enqueues a timer into the timer wheel of an
583 * idle CPU then this timer might expire before the next timer event
584 * which is scheduled to wake up that CPU. In case of a completely
585 * idle system the next event might even be infinite time into the
586 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
587 * leaves the inner idle loop so the newly added timer is taken into
588 * account when the CPU goes back to idle and evaluates the timer
589 * wheel for the next timer event.
590 */
1c20091e 591static void wake_up_idle_cpu(int cpu)
06d8308c
TG
592{
593 struct rq *rq = cpu_rq(cpu);
594
595 if (cpu == smp_processor_id())
596 return;
597
67b9ca70 598 if (set_nr_and_not_polling(rq->idle))
06d8308c 599 smp_send_reschedule(cpu);
dfc68f29
AL
600 else
601 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
602}
603
c5bfece2 604static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 605{
53c5fa16
FW
606 /*
607 * We just need the target to call irq_exit() and re-evaluate
608 * the next tick. The nohz full kick at least implies that.
609 * If needed we can still optimize that later with an
610 * empty IRQ.
611 */
379d9ecb
PM
612 if (cpu_is_offline(cpu))
613 return true; /* Don't try to wake offline CPUs. */
c5bfece2 614 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
615 if (cpu != smp_processor_id() ||
616 tick_nohz_tick_stopped())
53c5fa16 617 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
618 return true;
619 }
620
621 return false;
622}
623
379d9ecb
PM
624/*
625 * Wake up the specified CPU. If the CPU is going offline, it is the
626 * caller's responsibility to deal with the lost wakeup, for example,
627 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
628 */
1c20091e
FW
629void wake_up_nohz_cpu(int cpu)
630{
c5bfece2 631 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
632 wake_up_idle_cpu(cpu);
633}
634
ca38062e 635static inline bool got_nohz_idle_kick(void)
45bf76df 636{
1c792db7 637 int cpu = smp_processor_id();
873b4c65 638
b7031a02 639 if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK))
873b4c65
VG
640 return false;
641
642 if (idle_cpu(cpu) && !need_resched())
643 return true;
644
645 /*
646 * We can't run Idle Load Balance on this CPU for this time so we
647 * cancel it and clear NOHZ_BALANCE_KICK
648 */
b7031a02 649 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
873b4c65 650 return false;
45bf76df
IM
651}
652
3451d024 653#else /* CONFIG_NO_HZ_COMMON */
45bf76df 654
ca38062e 655static inline bool got_nohz_idle_kick(void)
2069dd75 656{
ca38062e 657 return false;
2069dd75
PZ
658}
659
3451d024 660#endif /* CONFIG_NO_HZ_COMMON */
d842de87 661
ce831b38 662#ifdef CONFIG_NO_HZ_FULL
76d92ac3 663bool sched_can_stop_tick(struct rq *rq)
ce831b38 664{
76d92ac3
FW
665 int fifo_nr_running;
666
667 /* Deadline tasks, even if single, need the tick */
668 if (rq->dl.dl_nr_running)
669 return false;
670
1e78cdbd 671 /*
2548d546
PZ
672 * If there are more than one RR tasks, we need the tick to effect the
673 * actual RR behaviour.
1e78cdbd 674 */
76d92ac3
FW
675 if (rq->rt.rr_nr_running) {
676 if (rq->rt.rr_nr_running == 1)
677 return true;
678 else
679 return false;
1e78cdbd
RR
680 }
681
2548d546
PZ
682 /*
683 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
684 * forced preemption between FIFO tasks.
685 */
686 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
687 if (fifo_nr_running)
688 return true;
689
690 /*
691 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
692 * if there's more than one we need the tick for involuntary
693 * preemption.
694 */
695 if (rq->nr_running > 1)
541b8264 696 return false;
ce831b38 697
541b8264 698 return true;
ce831b38
FW
699}
700#endif /* CONFIG_NO_HZ_FULL */
6d6bc0ad 701#endif /* CONFIG_SMP */
18d95a28 702
a790de99
PT
703#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
704 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 705/*
8277434e
PT
706 * Iterate task_group tree rooted at *from, calling @down when first entering a
707 * node and @up when leaving it for the final time.
708 *
709 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 710 */
029632fb 711int walk_tg_tree_from(struct task_group *from,
8277434e 712 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
713{
714 struct task_group *parent, *child;
eb755805 715 int ret;
c09595f6 716
8277434e
PT
717 parent = from;
718
c09595f6 719down:
eb755805
PZ
720 ret = (*down)(parent, data);
721 if (ret)
8277434e 722 goto out;
c09595f6
PZ
723 list_for_each_entry_rcu(child, &parent->children, siblings) {
724 parent = child;
725 goto down;
726
727up:
728 continue;
729 }
eb755805 730 ret = (*up)(parent, data);
8277434e
PT
731 if (ret || parent == from)
732 goto out;
c09595f6
PZ
733
734 child = parent;
735 parent = parent->parent;
736 if (parent)
737 goto up;
8277434e 738out:
eb755805 739 return ret;
c09595f6
PZ
740}
741
029632fb 742int tg_nop(struct task_group *tg, void *data)
eb755805 743{
e2b245f8 744 return 0;
eb755805 745}
18d95a28
PZ
746#endif
747
9059393e 748static void set_load_weight(struct task_struct *p, bool update_load)
45bf76df 749{
f05998d4
NR
750 int prio = p->static_prio - MAX_RT_PRIO;
751 struct load_weight *load = &p->se.load;
752
dd41f596
IM
753 /*
754 * SCHED_IDLE tasks get minimal weight:
755 */
1da1843f 756 if (task_has_idle_policy(p)) {
c8b28116 757 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 758 load->inv_weight = WMULT_IDLEPRIO;
4a465e3e 759 p->se.runnable_weight = load->weight;
dd41f596
IM
760 return;
761 }
71f8bd46 762
9059393e
VG
763 /*
764 * SCHED_OTHER tasks have to update their load when changing their
765 * weight
766 */
767 if (update_load && p->sched_class == &fair_sched_class) {
768 reweight_task(p, prio);
769 } else {
770 load->weight = scale_load(sched_prio_to_weight[prio]);
771 load->inv_weight = sched_prio_to_wmult[prio];
4a465e3e 772 p->se.runnable_weight = load->weight;
9059393e 773 }
71f8bd46
IM
774}
775
69842cba 776#ifdef CONFIG_UCLAMP_TASK
2480c093
PB
777/*
778 * Serializes updates of utilization clamp values
779 *
780 * The (slow-path) user-space triggers utilization clamp value updates which
781 * can require updates on (fast-path) scheduler's data structures used to
782 * support enqueue/dequeue operations.
783 * While the per-CPU rq lock protects fast-path update operations, user-space
784 * requests are serialized using a mutex to reduce the risk of conflicting
785 * updates or API abuses.
786 */
787static DEFINE_MUTEX(uclamp_mutex);
788
e8f14172
PB
789/* Max allowed minimum utilization */
790unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
791
792/* Max allowed maximum utilization */
793unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
794
795/* All clamps are required to be less or equal than these values */
796static struct uclamp_se uclamp_default[UCLAMP_CNT];
69842cba
PB
797
798/* Integer rounded range for each bucket */
799#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
800
801#define for_each_clamp_id(clamp_id) \
802 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
803
804static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
805{
806 return clamp_value / UCLAMP_BUCKET_DELTA;
807}
808
60daf9c1
PB
809static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value)
810{
811 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value);
812}
813
7763baac 814static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
69842cba
PB
815{
816 if (clamp_id == UCLAMP_MIN)
817 return 0;
818 return SCHED_CAPACITY_SCALE;
819}
820
a509a7cd
PB
821static inline void uclamp_se_set(struct uclamp_se *uc_se,
822 unsigned int value, bool user_defined)
69842cba
PB
823{
824 uc_se->value = value;
825 uc_se->bucket_id = uclamp_bucket_id(value);
a509a7cd 826 uc_se->user_defined = user_defined;
69842cba
PB
827}
828
e496187d 829static inline unsigned int
0413d7f3 830uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
831 unsigned int clamp_value)
832{
833 /*
834 * Avoid blocked utilization pushing up the frequency when we go
835 * idle (which drops the max-clamp) by retaining the last known
836 * max-clamp.
837 */
838 if (clamp_id == UCLAMP_MAX) {
839 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
840 return clamp_value;
841 }
842
843 return uclamp_none(UCLAMP_MIN);
844}
845
0413d7f3 846static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
847 unsigned int clamp_value)
848{
849 /* Reset max-clamp retention only on idle exit */
850 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
851 return;
852
853 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
854}
855
69842cba 856static inline
7763baac 857unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
0413d7f3 858 unsigned int clamp_value)
69842cba
PB
859{
860 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
861 int bucket_id = UCLAMP_BUCKETS - 1;
862
863 /*
864 * Since both min and max clamps are max aggregated, find the
865 * top most bucket with tasks in.
866 */
867 for ( ; bucket_id >= 0; bucket_id--) {
868 if (!bucket[bucket_id].tasks)
869 continue;
870 return bucket[bucket_id].value;
871 }
872
873 /* No tasks -- default clamp values */
e496187d 874 return uclamp_idle_value(rq, clamp_id, clamp_value);
69842cba
PB
875}
876
3eac870a 877static inline struct uclamp_se
0413d7f3 878uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
3eac870a
PB
879{
880 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
881#ifdef CONFIG_UCLAMP_TASK_GROUP
882 struct uclamp_se uc_max;
883
884 /*
885 * Tasks in autogroups or root task group will be
886 * restricted by system defaults.
887 */
888 if (task_group_is_autogroup(task_group(p)))
889 return uc_req;
890 if (task_group(p) == &root_task_group)
891 return uc_req;
892
893 uc_max = task_group(p)->uclamp[clamp_id];
894 if (uc_req.value > uc_max.value || !uc_req.user_defined)
895 return uc_max;
896#endif
897
898 return uc_req;
899}
900
e8f14172
PB
901/*
902 * The effective clamp bucket index of a task depends on, by increasing
903 * priority:
904 * - the task specific clamp value, when explicitly requested from userspace
3eac870a
PB
905 * - the task group effective clamp value, for tasks not either in the root
906 * group or in an autogroup
e8f14172
PB
907 * - the system default clamp value, defined by the sysadmin
908 */
909static inline struct uclamp_se
0413d7f3 910uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
e8f14172 911{
3eac870a 912 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
e8f14172
PB
913 struct uclamp_se uc_max = uclamp_default[clamp_id];
914
915 /* System default restrictions always apply */
916 if (unlikely(uc_req.value > uc_max.value))
917 return uc_max;
918
919 return uc_req;
920}
921
7763baac 922unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
9d20ad7d
PB
923{
924 struct uclamp_se uc_eff;
925
926 /* Task currently refcounted: use back-annotated (effective) value */
927 if (p->uclamp[clamp_id].active)
928 return p->uclamp[clamp_id].value;
929
930 uc_eff = uclamp_eff_get(p, clamp_id);
931
932 return uc_eff.value;
933}
934
69842cba
PB
935/*
936 * When a task is enqueued on a rq, the clamp bucket currently defined by the
937 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
938 * updates the rq's clamp value if required.
60daf9c1
PB
939 *
940 * Tasks can have a task-specific value requested from user-space, track
941 * within each bucket the maximum value for tasks refcounted in it.
942 * This "local max aggregation" allows to track the exact "requested" value
943 * for each bucket when all its RUNNABLE tasks require the same clamp.
69842cba
PB
944 */
945static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
0413d7f3 946 enum uclamp_id clamp_id)
69842cba
PB
947{
948 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
949 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
950 struct uclamp_bucket *bucket;
951
952 lockdep_assert_held(&rq->lock);
953
e8f14172
PB
954 /* Update task effective clamp */
955 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
956
69842cba
PB
957 bucket = &uc_rq->bucket[uc_se->bucket_id];
958 bucket->tasks++;
e8f14172 959 uc_se->active = true;
69842cba 960
e496187d
PB
961 uclamp_idle_reset(rq, clamp_id, uc_se->value);
962
60daf9c1
PB
963 /*
964 * Local max aggregation: rq buckets always track the max
965 * "requested" clamp value of its RUNNABLE tasks.
966 */
967 if (bucket->tasks == 1 || uc_se->value > bucket->value)
968 bucket->value = uc_se->value;
969
69842cba 970 if (uc_se->value > READ_ONCE(uc_rq->value))
60daf9c1 971 WRITE_ONCE(uc_rq->value, uc_se->value);
69842cba
PB
972}
973
974/*
975 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
976 * is released. If this is the last task reference counting the rq's max
977 * active clamp value, then the rq's clamp value is updated.
978 *
979 * Both refcounted tasks and rq's cached clamp values are expected to be
980 * always valid. If it's detected they are not, as defensive programming,
981 * enforce the expected state and warn.
982 */
983static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
0413d7f3 984 enum uclamp_id clamp_id)
69842cba
PB
985{
986 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
987 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
988 struct uclamp_bucket *bucket;
e496187d 989 unsigned int bkt_clamp;
69842cba
PB
990 unsigned int rq_clamp;
991
992 lockdep_assert_held(&rq->lock);
993
994 bucket = &uc_rq->bucket[uc_se->bucket_id];
995 SCHED_WARN_ON(!bucket->tasks);
996 if (likely(bucket->tasks))
997 bucket->tasks--;
e8f14172 998 uc_se->active = false;
69842cba 999
60daf9c1
PB
1000 /*
1001 * Keep "local max aggregation" simple and accept to (possibly)
1002 * overboost some RUNNABLE tasks in the same bucket.
1003 * The rq clamp bucket value is reset to its base value whenever
1004 * there are no more RUNNABLE tasks refcounting it.
1005 */
69842cba
PB
1006 if (likely(bucket->tasks))
1007 return;
1008
1009 rq_clamp = READ_ONCE(uc_rq->value);
1010 /*
1011 * Defensive programming: this should never happen. If it happens,
1012 * e.g. due to future modification, warn and fixup the expected value.
1013 */
1014 SCHED_WARN_ON(bucket->value > rq_clamp);
e496187d
PB
1015 if (bucket->value >= rq_clamp) {
1016 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1017 WRITE_ONCE(uc_rq->value, bkt_clamp);
1018 }
69842cba
PB
1019}
1020
1021static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1022{
0413d7f3 1023 enum uclamp_id clamp_id;
69842cba
PB
1024
1025 if (unlikely(!p->sched_class->uclamp_enabled))
1026 return;
1027
1028 for_each_clamp_id(clamp_id)
1029 uclamp_rq_inc_id(rq, p, clamp_id);
e496187d
PB
1030
1031 /* Reset clamp idle holding when there is one RUNNABLE task */
1032 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1033 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
69842cba
PB
1034}
1035
1036static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1037{
0413d7f3 1038 enum uclamp_id clamp_id;
69842cba
PB
1039
1040 if (unlikely(!p->sched_class->uclamp_enabled))
1041 return;
1042
1043 for_each_clamp_id(clamp_id)
1044 uclamp_rq_dec_id(rq, p, clamp_id);
1045}
1046
babbe170 1047static inline void
0413d7f3 1048uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
babbe170
PB
1049{
1050 struct rq_flags rf;
1051 struct rq *rq;
1052
1053 /*
1054 * Lock the task and the rq where the task is (or was) queued.
1055 *
1056 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1057 * price to pay to safely serialize util_{min,max} updates with
1058 * enqueues, dequeues and migration operations.
1059 * This is the same locking schema used by __set_cpus_allowed_ptr().
1060 */
1061 rq = task_rq_lock(p, &rf);
1062
1063 /*
1064 * Setting the clamp bucket is serialized by task_rq_lock().
1065 * If the task is not yet RUNNABLE and its task_struct is not
1066 * affecting a valid clamp bucket, the next time it's enqueued,
1067 * it will already see the updated clamp bucket value.
1068 */
6e1ff077 1069 if (p->uclamp[clamp_id].active) {
babbe170
PB
1070 uclamp_rq_dec_id(rq, p, clamp_id);
1071 uclamp_rq_inc_id(rq, p, clamp_id);
1072 }
1073
1074 task_rq_unlock(rq, p, &rf);
1075}
1076
e3b8b6a0 1077#ifdef CONFIG_UCLAMP_TASK_GROUP
babbe170
PB
1078static inline void
1079uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1080 unsigned int clamps)
1081{
0413d7f3 1082 enum uclamp_id clamp_id;
babbe170
PB
1083 struct css_task_iter it;
1084 struct task_struct *p;
babbe170
PB
1085
1086 css_task_iter_start(css, 0, &it);
1087 while ((p = css_task_iter_next(&it))) {
1088 for_each_clamp_id(clamp_id) {
1089 if ((0x1 << clamp_id) & clamps)
1090 uclamp_update_active(p, clamp_id);
1091 }
1092 }
1093 css_task_iter_end(&it);
1094}
1095
7274a5c1
PB
1096static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1097static void uclamp_update_root_tg(void)
1098{
1099 struct task_group *tg = &root_task_group;
1100
1101 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1102 sysctl_sched_uclamp_util_min, false);
1103 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1104 sysctl_sched_uclamp_util_max, false);
1105
1106 rcu_read_lock();
1107 cpu_util_update_eff(&root_task_group.css);
1108 rcu_read_unlock();
1109}
1110#else
1111static void uclamp_update_root_tg(void) { }
1112#endif
1113
e8f14172
PB
1114int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1115 void __user *buffer, size_t *lenp,
1116 loff_t *ppos)
1117{
7274a5c1 1118 bool update_root_tg = false;
e8f14172 1119 int old_min, old_max;
e8f14172
PB
1120 int result;
1121
2480c093 1122 mutex_lock(&uclamp_mutex);
e8f14172
PB
1123 old_min = sysctl_sched_uclamp_util_min;
1124 old_max = sysctl_sched_uclamp_util_max;
1125
1126 result = proc_dointvec(table, write, buffer, lenp, ppos);
1127 if (result)
1128 goto undo;
1129 if (!write)
1130 goto done;
1131
1132 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1133 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) {
1134 result = -EINVAL;
1135 goto undo;
1136 }
1137
1138 if (old_min != sysctl_sched_uclamp_util_min) {
1139 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
a509a7cd 1140 sysctl_sched_uclamp_util_min, false);
7274a5c1 1141 update_root_tg = true;
e8f14172
PB
1142 }
1143 if (old_max != sysctl_sched_uclamp_util_max) {
1144 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
a509a7cd 1145 sysctl_sched_uclamp_util_max, false);
7274a5c1 1146 update_root_tg = true;
e8f14172
PB
1147 }
1148
7274a5c1
PB
1149 if (update_root_tg)
1150 uclamp_update_root_tg();
1151
e8f14172 1152 /*
7274a5c1
PB
1153 * We update all RUNNABLE tasks only when task groups are in use.
1154 * Otherwise, keep it simple and do just a lazy update at each next
1155 * task enqueue time.
e8f14172 1156 */
7274a5c1 1157
e8f14172
PB
1158 goto done;
1159
1160undo:
1161 sysctl_sched_uclamp_util_min = old_min;
1162 sysctl_sched_uclamp_util_max = old_max;
1163done:
2480c093 1164 mutex_unlock(&uclamp_mutex);
e8f14172
PB
1165
1166 return result;
1167}
1168
a509a7cd
PB
1169static int uclamp_validate(struct task_struct *p,
1170 const struct sched_attr *attr)
1171{
1172 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value;
1173 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value;
1174
1175 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN)
1176 lower_bound = attr->sched_util_min;
1177 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX)
1178 upper_bound = attr->sched_util_max;
1179
1180 if (lower_bound > upper_bound)
1181 return -EINVAL;
1182 if (upper_bound > SCHED_CAPACITY_SCALE)
1183 return -EINVAL;
1184
1185 return 0;
1186}
1187
1188static void __setscheduler_uclamp(struct task_struct *p,
1189 const struct sched_attr *attr)
1190{
0413d7f3 1191 enum uclamp_id clamp_id;
1a00d999
PB
1192
1193 /*
1194 * On scheduling class change, reset to default clamps for tasks
1195 * without a task-specific value.
1196 */
1197 for_each_clamp_id(clamp_id) {
1198 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1199 unsigned int clamp_value = uclamp_none(clamp_id);
1200
1201 /* Keep using defined clamps across class changes */
1202 if (uc_se->user_defined)
1203 continue;
1204
1205 /* By default, RT tasks always get 100% boost */
1206 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1207 clamp_value = uclamp_none(UCLAMP_MAX);
1208
1209 uclamp_se_set(uc_se, clamp_value, false);
1210 }
1211
a509a7cd
PB
1212 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1213 return;
1214
1215 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1216 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1217 attr->sched_util_min, true);
1218 }
1219
1220 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1221 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1222 attr->sched_util_max, true);
1223 }
1224}
1225
e8f14172
PB
1226static void uclamp_fork(struct task_struct *p)
1227{
0413d7f3 1228 enum uclamp_id clamp_id;
e8f14172
PB
1229
1230 for_each_clamp_id(clamp_id)
1231 p->uclamp[clamp_id].active = false;
a87498ac
PB
1232
1233 if (likely(!p->sched_reset_on_fork))
1234 return;
1235
1236 for_each_clamp_id(clamp_id) {
1a00d999
PB
1237 unsigned int clamp_value = uclamp_none(clamp_id);
1238
1239 /* By default, RT tasks always get 100% boost */
1240 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1241 clamp_value = uclamp_none(UCLAMP_MAX);
1242
1243 uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false);
a87498ac 1244 }
e8f14172
PB
1245}
1246
69842cba
PB
1247static void __init init_uclamp(void)
1248{
e8f14172 1249 struct uclamp_se uc_max = {};
0413d7f3 1250 enum uclamp_id clamp_id;
69842cba
PB
1251 int cpu;
1252
2480c093
PB
1253 mutex_init(&uclamp_mutex);
1254
e496187d 1255 for_each_possible_cpu(cpu) {
69842cba 1256 memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq));
e496187d
PB
1257 cpu_rq(cpu)->uclamp_flags = 0;
1258 }
69842cba 1259
69842cba 1260 for_each_clamp_id(clamp_id) {
e8f14172 1261 uclamp_se_set(&init_task.uclamp_req[clamp_id],
a509a7cd 1262 uclamp_none(clamp_id), false);
69842cba 1263 }
e8f14172
PB
1264
1265 /* System defaults allow max clamp values for both indexes */
a509a7cd 1266 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2480c093 1267 for_each_clamp_id(clamp_id) {
e8f14172 1268 uclamp_default[clamp_id] = uc_max;
2480c093
PB
1269#ifdef CONFIG_UCLAMP_TASK_GROUP
1270 root_task_group.uclamp_req[clamp_id] = uc_max;
0b60ba2d 1271 root_task_group.uclamp[clamp_id] = uc_max;
2480c093
PB
1272#endif
1273 }
69842cba
PB
1274}
1275
1276#else /* CONFIG_UCLAMP_TASK */
1277static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1278static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
a509a7cd
PB
1279static inline int uclamp_validate(struct task_struct *p,
1280 const struct sched_attr *attr)
1281{
1282 return -EOPNOTSUPP;
1283}
1284static void __setscheduler_uclamp(struct task_struct *p,
1285 const struct sched_attr *attr) { }
e8f14172 1286static inline void uclamp_fork(struct task_struct *p) { }
69842cba
PB
1287static inline void init_uclamp(void) { }
1288#endif /* CONFIG_UCLAMP_TASK */
1289
1de64443 1290static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 1291{
0a67d1ee
PZ
1292 if (!(flags & ENQUEUE_NOCLOCK))
1293 update_rq_clock(rq);
1294
eb414681 1295 if (!(flags & ENQUEUE_RESTORE)) {
1de64443 1296 sched_info_queued(rq, p);
eb414681
JW
1297 psi_enqueue(p, flags & ENQUEUE_WAKEUP);
1298 }
0a67d1ee 1299
69842cba 1300 uclamp_rq_inc(rq, p);
371fd7e7 1301 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
1302}
1303
1de64443 1304static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 1305{
0a67d1ee
PZ
1306 if (!(flags & DEQUEUE_NOCLOCK))
1307 update_rq_clock(rq);
1308
eb414681 1309 if (!(flags & DEQUEUE_SAVE)) {
1de64443 1310 sched_info_dequeued(rq, p);
eb414681
JW
1311 psi_dequeue(p, flags & DEQUEUE_SLEEP);
1312 }
0a67d1ee 1313
69842cba 1314 uclamp_rq_dec(rq, p);
371fd7e7 1315 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
1316}
1317
029632fb 1318void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
1319{
1320 if (task_contributes_to_load(p))
1321 rq->nr_uninterruptible--;
1322
371fd7e7 1323 enqueue_task(rq, p, flags);
7dd77884
PZ
1324
1325 p->on_rq = TASK_ON_RQ_QUEUED;
1e3c88bd
PZ
1326}
1327
029632fb 1328void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 1329{
7dd77884
PZ
1330 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
1331
1e3c88bd
PZ
1332 if (task_contributes_to_load(p))
1333 rq->nr_uninterruptible++;
1334
371fd7e7 1335 dequeue_task(rq, p, flags);
1e3c88bd
PZ
1336}
1337
14531189 1338/*
dd41f596 1339 * __normal_prio - return the priority that is based on the static prio
14531189 1340 */
14531189
IM
1341static inline int __normal_prio(struct task_struct *p)
1342{
dd41f596 1343 return p->static_prio;
14531189
IM
1344}
1345
b29739f9
IM
1346/*
1347 * Calculate the expected normal priority: i.e. priority
1348 * without taking RT-inheritance into account. Might be
1349 * boosted by interactivity modifiers. Changes upon fork,
1350 * setprio syscalls, and whenever the interactivity
1351 * estimator recalculates.
1352 */
36c8b586 1353static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1354{
1355 int prio;
1356
aab03e05
DF
1357 if (task_has_dl_policy(p))
1358 prio = MAX_DL_PRIO-1;
1359 else if (task_has_rt_policy(p))
b29739f9
IM
1360 prio = MAX_RT_PRIO-1 - p->rt_priority;
1361 else
1362 prio = __normal_prio(p);
1363 return prio;
1364}
1365
1366/*
1367 * Calculate the current priority, i.e. the priority
1368 * taken into account by the scheduler. This value might
1369 * be boosted by RT tasks, or might be boosted by
1370 * interactivity modifiers. Will be RT if the task got
1371 * RT-boosted. If not then it returns p->normal_prio.
1372 */
36c8b586 1373static int effective_prio(struct task_struct *p)
b29739f9
IM
1374{
1375 p->normal_prio = normal_prio(p);
1376 /*
1377 * If we are RT tasks or we were boosted to RT priority,
1378 * keep the priority unchanged. Otherwise, update priority
1379 * to the normal priority:
1380 */
1381 if (!rt_prio(p->prio))
1382 return p->normal_prio;
1383 return p->prio;
1384}
1385
1da177e4
LT
1386/**
1387 * task_curr - is this task currently executing on a CPU?
1388 * @p: the task in question.
e69f6186
YB
1389 *
1390 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 1391 */
36c8b586 1392inline int task_curr(const struct task_struct *p)
1da177e4
LT
1393{
1394 return cpu_curr(task_cpu(p)) == p;
1395}
1396
67dfa1b7 1397/*
4c9a4bc8
PZ
1398 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1399 * use the balance_callback list if you want balancing.
1400 *
1401 * this means any call to check_class_changed() must be followed by a call to
1402 * balance_callback().
67dfa1b7 1403 */
cb469845
SR
1404static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1405 const struct sched_class *prev_class,
da7a735e 1406 int oldprio)
cb469845
SR
1407{
1408 if (prev_class != p->sched_class) {
1409 if (prev_class->switched_from)
da7a735e 1410 prev_class->switched_from(rq, p);
4c9a4bc8 1411
da7a735e 1412 p->sched_class->switched_to(rq, p);
2d3d891d 1413 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1414 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1415}
1416
029632fb 1417void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1418{
1419 const struct sched_class *class;
1420
1421 if (p->sched_class == rq->curr->sched_class) {
1422 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1423 } else {
1424 for_each_class(class) {
1425 if (class == rq->curr->sched_class)
1426 break;
1427 if (class == p->sched_class) {
8875125e 1428 resched_curr(rq);
1e5a7405
PZ
1429 break;
1430 }
1431 }
1432 }
1433
1434 /*
1435 * A queue event has occurred, and we're going to schedule. In
1436 * this case, we can save a useless back to back clock update.
1437 */
da0c1e65 1438 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
adcc8da8 1439 rq_clock_skip_update(rq);
1e5a7405
PZ
1440}
1441
1da177e4 1442#ifdef CONFIG_SMP
175f0e25
PZ
1443
1444static inline bool is_per_cpu_kthread(struct task_struct *p)
1445{
1446 if (!(p->flags & PF_KTHREAD))
1447 return false;
1448
1449 if (p->nr_cpus_allowed != 1)
1450 return false;
1451
1452 return true;
1453}
1454
1455/*
bee98539 1456 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
175f0e25
PZ
1457 * __set_cpus_allowed_ptr() and select_fallback_rq().
1458 */
1459static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1460{
3bd37062 1461 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
175f0e25
PZ
1462 return false;
1463
1464 if (is_per_cpu_kthread(p))
1465 return cpu_online(cpu);
1466
1467 return cpu_active(cpu);
1468}
1469
5cc389bc
PZ
1470/*
1471 * This is how migration works:
1472 *
1473 * 1) we invoke migration_cpu_stop() on the target CPU using
1474 * stop_one_cpu().
1475 * 2) stopper starts to run (implicitly forcing the migrated thread
1476 * off the CPU)
1477 * 3) it checks whether the migrated task is still in the wrong runqueue.
1478 * 4) if it's in the wrong runqueue then the migration thread removes
1479 * it and puts it into the right queue.
1480 * 5) stopper completes and stop_one_cpu() returns and the migration
1481 * is done.
1482 */
1483
1484/*
1485 * move_queued_task - move a queued task to new rq.
1486 *
1487 * Returns (locked) new rq. Old rq's lock is released.
1488 */
8a8c69c3
PZ
1489static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
1490 struct task_struct *p, int new_cpu)
5cc389bc 1491{
5cc389bc
PZ
1492 lockdep_assert_held(&rq->lock);
1493
c546951d 1494 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
15ff991e 1495 dequeue_task(rq, p, DEQUEUE_NOCLOCK);
5cc389bc 1496 set_task_cpu(p, new_cpu);
8a8c69c3 1497 rq_unlock(rq, rf);
5cc389bc
PZ
1498
1499 rq = cpu_rq(new_cpu);
1500
8a8c69c3 1501 rq_lock(rq, rf);
5cc389bc 1502 BUG_ON(task_cpu(p) != new_cpu);
5cc389bc 1503 enqueue_task(rq, p, 0);
3ea94de1 1504 p->on_rq = TASK_ON_RQ_QUEUED;
5cc389bc
PZ
1505 check_preempt_curr(rq, p, 0);
1506
1507 return rq;
1508}
1509
1510struct migration_arg {
1511 struct task_struct *task;
1512 int dest_cpu;
1513};
1514
1515/*
d1ccc66d 1516 * Move (not current) task off this CPU, onto the destination CPU. We're doing
5cc389bc
PZ
1517 * this because either it can't run here any more (set_cpus_allowed()
1518 * away from this CPU, or CPU going down), or because we're
1519 * attempting to rebalance this task on exec (sched_exec).
1520 *
1521 * So we race with normal scheduler movements, but that's OK, as long
1522 * as the task is no longer on this CPU.
5cc389bc 1523 */
8a8c69c3
PZ
1524static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1525 struct task_struct *p, int dest_cpu)
5cc389bc 1526{
5cc389bc 1527 /* Affinity changed (again). */
175f0e25 1528 if (!is_cpu_allowed(p, dest_cpu))
5e16bbc2 1529 return rq;
5cc389bc 1530
15ff991e 1531 update_rq_clock(rq);
8a8c69c3 1532 rq = move_queued_task(rq, rf, p, dest_cpu);
5e16bbc2
PZ
1533
1534 return rq;
5cc389bc
PZ
1535}
1536
1537/*
1538 * migration_cpu_stop - this will be executed by a highprio stopper thread
1539 * and performs thread migration by bumping thread off CPU then
1540 * 'pushing' onto another runqueue.
1541 */
1542static int migration_cpu_stop(void *data)
1543{
1544 struct migration_arg *arg = data;
5e16bbc2
PZ
1545 struct task_struct *p = arg->task;
1546 struct rq *rq = this_rq();
8a8c69c3 1547 struct rq_flags rf;
5cc389bc
PZ
1548
1549 /*
d1ccc66d
IM
1550 * The original target CPU might have gone down and we might
1551 * be on another CPU but it doesn't matter.
5cc389bc
PZ
1552 */
1553 local_irq_disable();
1554 /*
1555 * We need to explicitly wake pending tasks before running
3bd37062 1556 * __migrate_task() such that we will not miss enforcing cpus_ptr
5cc389bc
PZ
1557 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1558 */
1559 sched_ttwu_pending();
5e16bbc2
PZ
1560
1561 raw_spin_lock(&p->pi_lock);
8a8c69c3 1562 rq_lock(rq, &rf);
5e16bbc2
PZ
1563 /*
1564 * If task_rq(p) != rq, it cannot be migrated here, because we're
1565 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1566 * we're holding p->pi_lock.
1567 */
bf89a304
CC
1568 if (task_rq(p) == rq) {
1569 if (task_on_rq_queued(p))
8a8c69c3 1570 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
bf89a304
CC
1571 else
1572 p->wake_cpu = arg->dest_cpu;
1573 }
8a8c69c3 1574 rq_unlock(rq, &rf);
5e16bbc2
PZ
1575 raw_spin_unlock(&p->pi_lock);
1576
5cc389bc
PZ
1577 local_irq_enable();
1578 return 0;
1579}
1580
c5b28038
PZ
1581/*
1582 * sched_class::set_cpus_allowed must do the below, but is not required to
1583 * actually call this function.
1584 */
1585void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1586{
3bd37062 1587 cpumask_copy(&p->cpus_mask, new_mask);
5cc389bc
PZ
1588 p->nr_cpus_allowed = cpumask_weight(new_mask);
1589}
1590
c5b28038
PZ
1591void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1592{
6c37067e
PZ
1593 struct rq *rq = task_rq(p);
1594 bool queued, running;
1595
c5b28038 1596 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1597
1598 queued = task_on_rq_queued(p);
1599 running = task_current(rq, p);
1600
1601 if (queued) {
1602 /*
1603 * Because __kthread_bind() calls this on blocked tasks without
1604 * holding rq->lock.
1605 */
1606 lockdep_assert_held(&rq->lock);
7a57f32a 1607 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
6c37067e
PZ
1608 }
1609 if (running)
1610 put_prev_task(rq, p);
1611
c5b28038 1612 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e 1613
6c37067e 1614 if (queued)
7134b3e9 1615 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 1616 if (running)
03b7fad1 1617 set_next_task(rq, p);
c5b28038
PZ
1618}
1619
5cc389bc
PZ
1620/*
1621 * Change a given task's CPU affinity. Migrate the thread to a
1622 * proper CPU and schedule it away if the CPU it's executing on
1623 * is removed from the allowed bitmask.
1624 *
1625 * NOTE: the caller must have a valid reference to the task, the
1626 * task must not exit() & deallocate itself prematurely. The
1627 * call is not atomic; no spinlocks may be held.
1628 */
25834c73
PZ
1629static int __set_cpus_allowed_ptr(struct task_struct *p,
1630 const struct cpumask *new_mask, bool check)
5cc389bc 1631{
e9d867a6 1632 const struct cpumask *cpu_valid_mask = cpu_active_mask;
5cc389bc 1633 unsigned int dest_cpu;
eb580751
PZ
1634 struct rq_flags rf;
1635 struct rq *rq;
5cc389bc
PZ
1636 int ret = 0;
1637
eb580751 1638 rq = task_rq_lock(p, &rf);
a499c3ea 1639 update_rq_clock(rq);
5cc389bc 1640
e9d867a6
PZI
1641 if (p->flags & PF_KTHREAD) {
1642 /*
1643 * Kernel threads are allowed on online && !active CPUs
1644 */
1645 cpu_valid_mask = cpu_online_mask;
1646 }
1647
25834c73
PZ
1648 /*
1649 * Must re-check here, to close a race against __kthread_bind(),
1650 * sched_setaffinity() is not guaranteed to observe the flag.
1651 */
1652 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1653 ret = -EINVAL;
1654 goto out;
1655 }
1656
3bd37062 1657 if (cpumask_equal(p->cpus_ptr, new_mask))
5cc389bc
PZ
1658 goto out;
1659
714e501e
KS
1660 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
1661 if (dest_cpu >= nr_cpu_ids) {
5cc389bc
PZ
1662 ret = -EINVAL;
1663 goto out;
1664 }
1665
1666 do_set_cpus_allowed(p, new_mask);
1667
e9d867a6
PZI
1668 if (p->flags & PF_KTHREAD) {
1669 /*
1670 * For kernel threads that do indeed end up on online &&
d1ccc66d 1671 * !active we want to ensure they are strict per-CPU threads.
e9d867a6
PZI
1672 */
1673 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1674 !cpumask_intersects(new_mask, cpu_active_mask) &&
1675 p->nr_cpus_allowed != 1);
1676 }
1677
5cc389bc
PZ
1678 /* Can the task run on the task's current CPU? If so, we're done */
1679 if (cpumask_test_cpu(task_cpu(p), new_mask))
1680 goto out;
1681
5cc389bc
PZ
1682 if (task_running(rq, p) || p->state == TASK_WAKING) {
1683 struct migration_arg arg = { p, dest_cpu };
1684 /* Need help from migration thread: drop lock and wait. */
eb580751 1685 task_rq_unlock(rq, p, &rf);
5cc389bc 1686 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5cc389bc 1687 return 0;
cbce1a68
PZ
1688 } else if (task_on_rq_queued(p)) {
1689 /*
1690 * OK, since we're going to drop the lock immediately
1691 * afterwards anyway.
1692 */
8a8c69c3 1693 rq = move_queued_task(rq, &rf, p, dest_cpu);
cbce1a68 1694 }
5cc389bc 1695out:
eb580751 1696 task_rq_unlock(rq, p, &rf);
5cc389bc
PZ
1697
1698 return ret;
1699}
25834c73
PZ
1700
1701int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1702{
1703 return __set_cpus_allowed_ptr(p, new_mask, false);
1704}
5cc389bc
PZ
1705EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1706
dd41f596 1707void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1708{
e2912009
PZ
1709#ifdef CONFIG_SCHED_DEBUG
1710 /*
1711 * We should never call set_task_cpu() on a blocked task,
1712 * ttwu() will sort out the placement.
1713 */
077614ee 1714 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1715 !p->on_rq);
0122ec5b 1716
3ea94de1
JP
1717 /*
1718 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1719 * because schedstat_wait_{start,end} rebase migrating task's wait_start
1720 * time relying on p->on_rq.
1721 */
1722 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1723 p->sched_class == &fair_sched_class &&
1724 (p->on_rq && !task_on_rq_migrating(p)));
1725
0122ec5b 1726#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1727 /*
1728 * The caller should hold either p->pi_lock or rq->lock, when changing
1729 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1730 *
1731 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1732 * see task_group().
6c6c54e1
PZ
1733 *
1734 * Furthermore, all task_rq users should acquire both locks, see
1735 * task_rq_lock().
1736 */
0122ec5b
PZ
1737 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1738 lockdep_is_held(&task_rq(p)->lock)));
1739#endif
4ff9083b
PZ
1740 /*
1741 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
1742 */
1743 WARN_ON_ONCE(!cpu_online(new_cpu));
e2912009
PZ
1744#endif
1745
de1d7286 1746 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1747
0c69774e 1748 if (task_cpu(p) != new_cpu) {
0a74bef8 1749 if (p->sched_class->migrate_task_rq)
1327237a 1750 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1751 p->se.nr_migrations++;
d7822b1e 1752 rseq_migrate(p);
ff303e66 1753 perf_event_task_migrate(p);
0c69774e 1754 }
dd41f596
IM
1755
1756 __set_task_cpu(p, new_cpu);
c65cc870
IM
1757}
1758
0ad4e3df 1759#ifdef CONFIG_NUMA_BALANCING
ac66f547
PZ
1760static void __migrate_swap_task(struct task_struct *p, int cpu)
1761{
da0c1e65 1762 if (task_on_rq_queued(p)) {
ac66f547 1763 struct rq *src_rq, *dst_rq;
8a8c69c3 1764 struct rq_flags srf, drf;
ac66f547
PZ
1765
1766 src_rq = task_rq(p);
1767 dst_rq = cpu_rq(cpu);
1768
8a8c69c3
PZ
1769 rq_pin_lock(src_rq, &srf);
1770 rq_pin_lock(dst_rq, &drf);
1771
ac66f547
PZ
1772 deactivate_task(src_rq, p, 0);
1773 set_task_cpu(p, cpu);
1774 activate_task(dst_rq, p, 0);
1775 check_preempt_curr(dst_rq, p, 0);
8a8c69c3
PZ
1776
1777 rq_unpin_lock(dst_rq, &drf);
1778 rq_unpin_lock(src_rq, &srf);
1779
ac66f547
PZ
1780 } else {
1781 /*
1782 * Task isn't running anymore; make it appear like we migrated
1783 * it before it went to sleep. This means on wakeup we make the
d1ccc66d 1784 * previous CPU our target instead of where it really is.
ac66f547
PZ
1785 */
1786 p->wake_cpu = cpu;
1787 }
1788}
1789
1790struct migration_swap_arg {
1791 struct task_struct *src_task, *dst_task;
1792 int src_cpu, dst_cpu;
1793};
1794
1795static int migrate_swap_stop(void *data)
1796{
1797 struct migration_swap_arg *arg = data;
1798 struct rq *src_rq, *dst_rq;
1799 int ret = -EAGAIN;
1800
62694cd5
PZ
1801 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1802 return -EAGAIN;
1803
ac66f547
PZ
1804 src_rq = cpu_rq(arg->src_cpu);
1805 dst_rq = cpu_rq(arg->dst_cpu);
1806
74602315
PZ
1807 double_raw_lock(&arg->src_task->pi_lock,
1808 &arg->dst_task->pi_lock);
ac66f547 1809 double_rq_lock(src_rq, dst_rq);
62694cd5 1810
ac66f547
PZ
1811 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1812 goto unlock;
1813
1814 if (task_cpu(arg->src_task) != arg->src_cpu)
1815 goto unlock;
1816
3bd37062 1817 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
ac66f547
PZ
1818 goto unlock;
1819
3bd37062 1820 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
ac66f547
PZ
1821 goto unlock;
1822
1823 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1824 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1825
1826 ret = 0;
1827
1828unlock:
1829 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
1830 raw_spin_unlock(&arg->dst_task->pi_lock);
1831 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
1832
1833 return ret;
1834}
1835
1836/*
1837 * Cross migrate two tasks
1838 */
0ad4e3df
SD
1839int migrate_swap(struct task_struct *cur, struct task_struct *p,
1840 int target_cpu, int curr_cpu)
ac66f547
PZ
1841{
1842 struct migration_swap_arg arg;
1843 int ret = -EINVAL;
1844
ac66f547
PZ
1845 arg = (struct migration_swap_arg){
1846 .src_task = cur,
0ad4e3df 1847 .src_cpu = curr_cpu,
ac66f547 1848 .dst_task = p,
0ad4e3df 1849 .dst_cpu = target_cpu,
ac66f547
PZ
1850 };
1851
1852 if (arg.src_cpu == arg.dst_cpu)
1853 goto out;
1854
6acce3ef
PZ
1855 /*
1856 * These three tests are all lockless; this is OK since all of them
1857 * will be re-checked with proper locks held further down the line.
1858 */
ac66f547
PZ
1859 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1860 goto out;
1861
3bd37062 1862 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
ac66f547
PZ
1863 goto out;
1864
3bd37062 1865 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
ac66f547
PZ
1866 goto out;
1867
286549dc 1868 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
1869 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1870
1871out:
ac66f547
PZ
1872 return ret;
1873}
0ad4e3df 1874#endif /* CONFIG_NUMA_BALANCING */
ac66f547 1875
1da177e4
LT
1876/*
1877 * wait_task_inactive - wait for a thread to unschedule.
1878 *
85ba2d86
RM
1879 * If @match_state is nonzero, it's the @p->state value just checked and
1880 * not expected to change. If it changes, i.e. @p might have woken up,
1881 * then return zero. When we succeed in waiting for @p to be off its CPU,
1882 * we return a positive number (its total switch count). If a second call
1883 * a short while later returns the same number, the caller can be sure that
1884 * @p has remained unscheduled the whole time.
1885 *
1da177e4
LT
1886 * The caller must ensure that the task *will* unschedule sometime soon,
1887 * else this function might spin for a *long* time. This function can't
1888 * be called with interrupts off, or it may introduce deadlock with
1889 * smp_call_function() if an IPI is sent by the same process we are
1890 * waiting to become inactive.
1891 */
85ba2d86 1892unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4 1893{
da0c1e65 1894 int running, queued;
eb580751 1895 struct rq_flags rf;
85ba2d86 1896 unsigned long ncsw;
70b97a7f 1897 struct rq *rq;
1da177e4 1898
3a5c359a
AK
1899 for (;;) {
1900 /*
1901 * We do the initial early heuristics without holding
1902 * any task-queue locks at all. We'll only try to get
1903 * the runqueue lock when things look like they will
1904 * work out!
1905 */
1906 rq = task_rq(p);
fa490cfd 1907
3a5c359a
AK
1908 /*
1909 * If the task is actively running on another CPU
1910 * still, just relax and busy-wait without holding
1911 * any locks.
1912 *
1913 * NOTE! Since we don't hold any locks, it's not
1914 * even sure that "rq" stays as the right runqueue!
1915 * But we don't care, since "task_running()" will
1916 * return false if the runqueue has changed and p
1917 * is actually now running somewhere else!
1918 */
85ba2d86
RM
1919 while (task_running(rq, p)) {
1920 if (match_state && unlikely(p->state != match_state))
1921 return 0;
3a5c359a 1922 cpu_relax();
85ba2d86 1923 }
fa490cfd 1924
3a5c359a
AK
1925 /*
1926 * Ok, time to look more closely! We need the rq
1927 * lock now, to be *sure*. If we're wrong, we'll
1928 * just go back and repeat.
1929 */
eb580751 1930 rq = task_rq_lock(p, &rf);
27a9da65 1931 trace_sched_wait_task(p);
3a5c359a 1932 running = task_running(rq, p);
da0c1e65 1933 queued = task_on_rq_queued(p);
85ba2d86 1934 ncsw = 0;
f31e11d8 1935 if (!match_state || p->state == match_state)
93dcf55f 1936 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
eb580751 1937 task_rq_unlock(rq, p, &rf);
fa490cfd 1938
85ba2d86
RM
1939 /*
1940 * If it changed from the expected state, bail out now.
1941 */
1942 if (unlikely(!ncsw))
1943 break;
1944
3a5c359a
AK
1945 /*
1946 * Was it really running after all now that we
1947 * checked with the proper locks actually held?
1948 *
1949 * Oops. Go back and try again..
1950 */
1951 if (unlikely(running)) {
1952 cpu_relax();
1953 continue;
1954 }
fa490cfd 1955
3a5c359a
AK
1956 /*
1957 * It's not enough that it's not actively running,
1958 * it must be off the runqueue _entirely_, and not
1959 * preempted!
1960 *
80dd99b3 1961 * So if it was still runnable (but just not actively
3a5c359a
AK
1962 * running right now), it's preempted, and we should
1963 * yield - it could be a while.
1964 */
da0c1e65 1965 if (unlikely(queued)) {
8b0e1953 1966 ktime_t to = NSEC_PER_SEC / HZ;
8eb90c30
TG
1967
1968 set_current_state(TASK_UNINTERRUPTIBLE);
1969 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1970 continue;
1971 }
fa490cfd 1972
3a5c359a
AK
1973 /*
1974 * Ahh, all good. It wasn't running, and it wasn't
1975 * runnable, which means that it will never become
1976 * running in the future either. We're all done!
1977 */
1978 break;
1979 }
85ba2d86
RM
1980
1981 return ncsw;
1da177e4
LT
1982}
1983
1984/***
1985 * kick_process - kick a running thread to enter/exit the kernel
1986 * @p: the to-be-kicked thread
1987 *
1988 * Cause a process which is running on another CPU to enter
1989 * kernel-mode, without any delay. (to get signals handled.)
1990 *
25985edc 1991 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1992 * because all it wants to ensure is that the remote task enters
1993 * the kernel. If the IPI races and the task has been migrated
1994 * to another CPU then no harm is done and the purpose has been
1995 * achieved as well.
1996 */
36c8b586 1997void kick_process(struct task_struct *p)
1da177e4
LT
1998{
1999 int cpu;
2000
2001 preempt_disable();
2002 cpu = task_cpu(p);
2003 if ((cpu != smp_processor_id()) && task_curr(p))
2004 smp_send_reschedule(cpu);
2005 preempt_enable();
2006}
b43e3521 2007EXPORT_SYMBOL_GPL(kick_process);
1da177e4 2008
30da688e 2009/*
3bd37062 2010 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
e9d867a6
PZI
2011 *
2012 * A few notes on cpu_active vs cpu_online:
2013 *
2014 * - cpu_active must be a subset of cpu_online
2015 *
97fb7a0a 2016 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
e9d867a6 2017 * see __set_cpus_allowed_ptr(). At this point the newly online
d1ccc66d 2018 * CPU isn't yet part of the sched domains, and balancing will not
e9d867a6
PZI
2019 * see it.
2020 *
d1ccc66d 2021 * - on CPU-down we clear cpu_active() to mask the sched domains and
e9d867a6 2022 * avoid the load balancer to place new tasks on the to be removed
d1ccc66d 2023 * CPU. Existing tasks will remain running there and will be taken
e9d867a6
PZI
2024 * off.
2025 *
2026 * This means that fallback selection must not select !active CPUs.
2027 * And can assume that any active CPU must be online. Conversely
2028 * select_task_rq() below may allow selection of !active CPUs in order
2029 * to satisfy the above rules.
30da688e 2030 */
5da9a0fb
PZ
2031static int select_fallback_rq(int cpu, struct task_struct *p)
2032{
aa00d89c
TC
2033 int nid = cpu_to_node(cpu);
2034 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
2035 enum { cpuset, possible, fail } state = cpuset;
2036 int dest_cpu;
5da9a0fb 2037
aa00d89c 2038 /*
d1ccc66d
IM
2039 * If the node that the CPU is on has been offlined, cpu_to_node()
2040 * will return -1. There is no CPU on the node, and we should
2041 * select the CPU on the other node.
aa00d89c
TC
2042 */
2043 if (nid != -1) {
2044 nodemask = cpumask_of_node(nid);
2045
2046 /* Look for allowed, online CPU in same node. */
2047 for_each_cpu(dest_cpu, nodemask) {
aa00d89c
TC
2048 if (!cpu_active(dest_cpu))
2049 continue;
3bd37062 2050 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
aa00d89c
TC
2051 return dest_cpu;
2052 }
2baab4e9 2053 }
5da9a0fb 2054
2baab4e9
PZ
2055 for (;;) {
2056 /* Any allowed, online CPU? */
3bd37062 2057 for_each_cpu(dest_cpu, p->cpus_ptr) {
175f0e25 2058 if (!is_cpu_allowed(p, dest_cpu))
2baab4e9 2059 continue;
175f0e25 2060
2baab4e9
PZ
2061 goto out;
2062 }
5da9a0fb 2063
e73e85f0 2064 /* No more Mr. Nice Guy. */
2baab4e9
PZ
2065 switch (state) {
2066 case cpuset:
e73e85f0
ON
2067 if (IS_ENABLED(CONFIG_CPUSETS)) {
2068 cpuset_cpus_allowed_fallback(p);
2069 state = possible;
2070 break;
2071 }
d1ccc66d 2072 /* Fall-through */
2baab4e9
PZ
2073 case possible:
2074 do_set_cpus_allowed(p, cpu_possible_mask);
2075 state = fail;
2076 break;
2077
2078 case fail:
2079 BUG();
2080 break;
2081 }
2082 }
2083
2084out:
2085 if (state != cpuset) {
2086 /*
2087 * Don't tell them about moving exiting tasks or
2088 * kernel threads (both mm NULL), since they never
2089 * leave kernel.
2090 */
2091 if (p->mm && printk_ratelimit()) {
aac74dc4 2092 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
2093 task_pid_nr(p), p->comm, cpu);
2094 }
5da9a0fb
PZ
2095 }
2096
2097 return dest_cpu;
2098}
2099
e2912009 2100/*
3bd37062 2101 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
e2912009 2102 */
970b13ba 2103static inline
ac66f547 2104int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 2105{
cbce1a68
PZ
2106 lockdep_assert_held(&p->pi_lock);
2107
4b53a341 2108 if (p->nr_cpus_allowed > 1)
6c1d9410 2109 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e9d867a6 2110 else
3bd37062 2111 cpu = cpumask_any(p->cpus_ptr);
e2912009
PZ
2112
2113 /*
2114 * In order not to call set_task_cpu() on a blocking task we need
3bd37062 2115 * to rely on ttwu() to place the task on a valid ->cpus_ptr
d1ccc66d 2116 * CPU.
e2912009
PZ
2117 *
2118 * Since this is common to all placement strategies, this lives here.
2119 *
2120 * [ this allows ->select_task() to simply return task_cpu(p) and
2121 * not worry about this generic constraint ]
2122 */
7af443ee 2123 if (unlikely(!is_cpu_allowed(p, cpu)))
5da9a0fb 2124 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
2125
2126 return cpu;
970b13ba 2127}
09a40af5
MG
2128
2129static void update_avg(u64 *avg, u64 sample)
2130{
2131 s64 diff = sample - *avg;
2132 *avg += diff >> 3;
2133}
25834c73 2134
f5832c19
NP
2135void sched_set_stop_task(int cpu, struct task_struct *stop)
2136{
2137 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2138 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2139
2140 if (stop) {
2141 /*
2142 * Make it appear like a SCHED_FIFO task, its something
2143 * userspace knows about and won't get confused about.
2144 *
2145 * Also, it will make PI more or less work without too
2146 * much confusion -- but then, stop work should not
2147 * rely on PI working anyway.
2148 */
2149 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2150
2151 stop->sched_class = &stop_sched_class;
2152 }
2153
2154 cpu_rq(cpu)->stop = stop;
2155
2156 if (old_stop) {
2157 /*
2158 * Reset it back to a normal scheduling class so that
2159 * it can die in pieces.
2160 */
2161 old_stop->sched_class = &rt_sched_class;
2162 }
2163}
2164
25834c73
PZ
2165#else
2166
2167static inline int __set_cpus_allowed_ptr(struct task_struct *p,
2168 const struct cpumask *new_mask, bool check)
2169{
2170 return set_cpus_allowed_ptr(p, new_mask);
2171}
2172
5cc389bc 2173#endif /* CONFIG_SMP */
970b13ba 2174
d7c01d27 2175static void
b84cb5df 2176ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 2177{
4fa8d299 2178 struct rq *rq;
b84cb5df 2179
4fa8d299
JP
2180 if (!schedstat_enabled())
2181 return;
2182
2183 rq = this_rq();
d7c01d27 2184
4fa8d299
JP
2185#ifdef CONFIG_SMP
2186 if (cpu == rq->cpu) {
b85c8b71
PZ
2187 __schedstat_inc(rq->ttwu_local);
2188 __schedstat_inc(p->se.statistics.nr_wakeups_local);
d7c01d27
PZ
2189 } else {
2190 struct sched_domain *sd;
2191
b85c8b71 2192 __schedstat_inc(p->se.statistics.nr_wakeups_remote);
057f3fad 2193 rcu_read_lock();
4fa8d299 2194 for_each_domain(rq->cpu, sd) {
d7c01d27 2195 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
b85c8b71 2196 __schedstat_inc(sd->ttwu_wake_remote);
d7c01d27
PZ
2197 break;
2198 }
2199 }
057f3fad 2200 rcu_read_unlock();
d7c01d27 2201 }
f339b9dc
PZ
2202
2203 if (wake_flags & WF_MIGRATED)
b85c8b71 2204 __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
d7c01d27
PZ
2205#endif /* CONFIG_SMP */
2206
b85c8b71
PZ
2207 __schedstat_inc(rq->ttwu_count);
2208 __schedstat_inc(p->se.statistics.nr_wakeups);
d7c01d27
PZ
2209
2210 if (wake_flags & WF_SYNC)
b85c8b71 2211 __schedstat_inc(p->se.statistics.nr_wakeups_sync);
d7c01d27
PZ
2212}
2213
23f41eeb
PZ
2214/*
2215 * Mark the task runnable and perform wakeup-preemption.
2216 */
e7904a28 2217static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2218 struct rq_flags *rf)
9ed3811a 2219{
9ed3811a 2220 check_preempt_curr(rq, p, wake_flags);
9ed3811a 2221 p->state = TASK_RUNNING;
fbd705a0
PZ
2222 trace_sched_wakeup(p);
2223
9ed3811a 2224#ifdef CONFIG_SMP
4c9a4bc8
PZ
2225 if (p->sched_class->task_woken) {
2226 /*
cbce1a68
PZ
2227 * Our task @p is fully woken up and running; so its safe to
2228 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 2229 */
d8ac8971 2230 rq_unpin_lock(rq, rf);
9ed3811a 2231 p->sched_class->task_woken(rq, p);
d8ac8971 2232 rq_repin_lock(rq, rf);
4c9a4bc8 2233 }
9ed3811a 2234
e69c6341 2235 if (rq->idle_stamp) {
78becc27 2236 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 2237 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 2238
abfafa54
JL
2239 update_avg(&rq->avg_idle, delta);
2240
2241 if (rq->avg_idle > max)
9ed3811a 2242 rq->avg_idle = max;
abfafa54 2243
9ed3811a
TH
2244 rq->idle_stamp = 0;
2245 }
2246#endif
2247}
2248
c05fbafb 2249static void
e7904a28 2250ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2251 struct rq_flags *rf)
c05fbafb 2252{
77558e4d 2253 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
b5179ac7 2254
cbce1a68
PZ
2255 lockdep_assert_held(&rq->lock);
2256
c05fbafb
PZ
2257#ifdef CONFIG_SMP
2258 if (p->sched_contributes_to_load)
2259 rq->nr_uninterruptible--;
b5179ac7 2260
b5179ac7 2261 if (wake_flags & WF_MIGRATED)
59efa0ba 2262 en_flags |= ENQUEUE_MIGRATED;
c05fbafb
PZ
2263#endif
2264
1b174a2c 2265 activate_task(rq, p, en_flags);
d8ac8971 2266 ttwu_do_wakeup(rq, p, wake_flags, rf);
c05fbafb
PZ
2267}
2268
2269/*
2270 * Called in case the task @p isn't fully descheduled from its runqueue,
2271 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2272 * since all we need to do is flip p->state to TASK_RUNNING, since
2273 * the task is still ->on_rq.
2274 */
2275static int ttwu_remote(struct task_struct *p, int wake_flags)
2276{
eb580751 2277 struct rq_flags rf;
c05fbafb
PZ
2278 struct rq *rq;
2279 int ret = 0;
2280
eb580751 2281 rq = __task_rq_lock(p, &rf);
da0c1e65 2282 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
2283 /* check_preempt_curr() may use rq clock */
2284 update_rq_clock(rq);
d8ac8971 2285 ttwu_do_wakeup(rq, p, wake_flags, &rf);
c05fbafb
PZ
2286 ret = 1;
2287 }
eb580751 2288 __task_rq_unlock(rq, &rf);
c05fbafb
PZ
2289
2290 return ret;
2291}
2292
317f3941 2293#ifdef CONFIG_SMP
e3baac47 2294void sched_ttwu_pending(void)
317f3941
PZ
2295{
2296 struct rq *rq = this_rq();
fa14ff4a 2297 struct llist_node *llist = llist_del_all(&rq->wake_list);
73215849 2298 struct task_struct *p, *t;
d8ac8971 2299 struct rq_flags rf;
317f3941 2300
e3baac47
PZ
2301 if (!llist)
2302 return;
2303
8a8c69c3 2304 rq_lock_irqsave(rq, &rf);
77558e4d 2305 update_rq_clock(rq);
317f3941 2306
73215849
BP
2307 llist_for_each_entry_safe(p, t, llist, wake_entry)
2308 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
317f3941 2309
8a8c69c3 2310 rq_unlock_irqrestore(rq, &rf);
317f3941
PZ
2311}
2312
2313void scheduler_ipi(void)
2314{
f27dde8d
PZ
2315 /*
2316 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
2317 * TIF_NEED_RESCHED remotely (for the first time) will also send
2318 * this IPI.
2319 */
8cb75e0c 2320 preempt_fold_need_resched();
f27dde8d 2321
fd2ac4f4 2322 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
2323 return;
2324
2325 /*
2326 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
2327 * traditionally all their work was done from the interrupt return
2328 * path. Now that we actually do some work, we need to make sure
2329 * we do call them.
2330 *
2331 * Some archs already do call them, luckily irq_enter/exit nest
2332 * properly.
2333 *
2334 * Arguably we should visit all archs and update all handlers,
2335 * however a fair share of IPIs are still resched only so this would
2336 * somewhat pessimize the simple resched case.
2337 */
2338 irq_enter();
fa14ff4a 2339 sched_ttwu_pending();
ca38062e
SS
2340
2341 /*
2342 * Check if someone kicked us for doing the nohz idle load balance.
2343 */
873b4c65 2344 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 2345 this_rq()->idle_balance = 1;
ca38062e 2346 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 2347 }
c5d753a5 2348 irq_exit();
317f3941
PZ
2349}
2350
b7e7ade3 2351static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
317f3941 2352{
e3baac47
PZ
2353 struct rq *rq = cpu_rq(cpu);
2354
b7e7ade3
PZ
2355 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
2356
e3baac47
PZ
2357 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
2358 if (!set_nr_if_polling(rq->idle))
2359 smp_send_reschedule(cpu);
2360 else
2361 trace_sched_wake_idle_without_ipi(cpu);
2362 }
317f3941 2363}
d6aa8f85 2364
f6be8af1
CL
2365void wake_up_if_idle(int cpu)
2366{
2367 struct rq *rq = cpu_rq(cpu);
8a8c69c3 2368 struct rq_flags rf;
f6be8af1 2369
fd7de1e8
AL
2370 rcu_read_lock();
2371
2372 if (!is_idle_task(rcu_dereference(rq->curr)))
2373 goto out;
f6be8af1
CL
2374
2375 if (set_nr_if_polling(rq->idle)) {
2376 trace_sched_wake_idle_without_ipi(cpu);
2377 } else {
8a8c69c3 2378 rq_lock_irqsave(rq, &rf);
f6be8af1
CL
2379 if (is_idle_task(rq->curr))
2380 smp_send_reschedule(cpu);
d1ccc66d 2381 /* Else CPU is not idle, do nothing here: */
8a8c69c3 2382 rq_unlock_irqrestore(rq, &rf);
f6be8af1 2383 }
fd7de1e8
AL
2384
2385out:
2386 rcu_read_unlock();
f6be8af1
CL
2387}
2388
39be3501 2389bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
2390{
2391 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
2392}
d6aa8f85 2393#endif /* CONFIG_SMP */
317f3941 2394
b5179ac7 2395static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
c05fbafb
PZ
2396{
2397 struct rq *rq = cpu_rq(cpu);
d8ac8971 2398 struct rq_flags rf;
c05fbafb 2399
17d9f311 2400#if defined(CONFIG_SMP)
39be3501 2401 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
d1ccc66d 2402 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
b7e7ade3 2403 ttwu_queue_remote(p, cpu, wake_flags);
317f3941
PZ
2404 return;
2405 }
2406#endif
2407
8a8c69c3 2408 rq_lock(rq, &rf);
77558e4d 2409 update_rq_clock(rq);
d8ac8971 2410 ttwu_do_activate(rq, p, wake_flags, &rf);
8a8c69c3 2411 rq_unlock(rq, &rf);
9ed3811a
TH
2412}
2413
8643cda5
PZ
2414/*
2415 * Notes on Program-Order guarantees on SMP systems.
2416 *
2417 * MIGRATION
2418 *
2419 * The basic program-order guarantee on SMP systems is that when a task [t]
d1ccc66d
IM
2420 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
2421 * execution on its new CPU [c1].
8643cda5
PZ
2422 *
2423 * For migration (of runnable tasks) this is provided by the following means:
2424 *
2425 * A) UNLOCK of the rq(c0)->lock scheduling out task t
2426 * B) migration for t is required to synchronize *both* rq(c0)->lock and
2427 * rq(c1)->lock (if not at the same time, then in that order).
2428 * C) LOCK of the rq(c1)->lock scheduling in task
2429 *
7696f991 2430 * Release/acquire chaining guarantees that B happens after A and C after B.
d1ccc66d 2431 * Note: the CPU doing B need not be c0 or c1
8643cda5
PZ
2432 *
2433 * Example:
2434 *
2435 * CPU0 CPU1 CPU2
2436 *
2437 * LOCK rq(0)->lock
2438 * sched-out X
2439 * sched-in Y
2440 * UNLOCK rq(0)->lock
2441 *
2442 * LOCK rq(0)->lock // orders against CPU0
2443 * dequeue X
2444 * UNLOCK rq(0)->lock
2445 *
2446 * LOCK rq(1)->lock
2447 * enqueue X
2448 * UNLOCK rq(1)->lock
2449 *
2450 * LOCK rq(1)->lock // orders against CPU2
2451 * sched-out Z
2452 * sched-in X
2453 * UNLOCK rq(1)->lock
2454 *
2455 *
2456 * BLOCKING -- aka. SLEEP + WAKEUP
2457 *
2458 * For blocking we (obviously) need to provide the same guarantee as for
2459 * migration. However the means are completely different as there is no lock
2460 * chain to provide order. Instead we do:
2461 *
2462 * 1) smp_store_release(X->on_cpu, 0)
1f03e8d2 2463 * 2) smp_cond_load_acquire(!X->on_cpu)
8643cda5
PZ
2464 *
2465 * Example:
2466 *
2467 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
2468 *
2469 * LOCK rq(0)->lock LOCK X->pi_lock
2470 * dequeue X
2471 * sched-out X
2472 * smp_store_release(X->on_cpu, 0);
2473 *
1f03e8d2 2474 * smp_cond_load_acquire(&X->on_cpu, !VAL);
8643cda5
PZ
2475 * X->state = WAKING
2476 * set_task_cpu(X,2)
2477 *
2478 * LOCK rq(2)->lock
2479 * enqueue X
2480 * X->state = RUNNING
2481 * UNLOCK rq(2)->lock
2482 *
2483 * LOCK rq(2)->lock // orders against CPU1
2484 * sched-out Z
2485 * sched-in X
2486 * UNLOCK rq(2)->lock
2487 *
2488 * UNLOCK X->pi_lock
2489 * UNLOCK rq(0)->lock
2490 *
2491 *
7696f991
AP
2492 * However, for wakeups there is a second guarantee we must provide, namely we
2493 * must ensure that CONDITION=1 done by the caller can not be reordered with
2494 * accesses to the task state; see try_to_wake_up() and set_current_state().
8643cda5
PZ
2495 */
2496
9ed3811a 2497/**
1da177e4 2498 * try_to_wake_up - wake up a thread
9ed3811a 2499 * @p: the thread to be awakened
1da177e4 2500 * @state: the mask of task states that can be woken
9ed3811a 2501 * @wake_flags: wake modifier flags (WF_*)
1da177e4 2502 *
a2250238 2503 * If (@state & @p->state) @p->state = TASK_RUNNING.
1da177e4 2504 *
a2250238
PZ
2505 * If the task was not queued/runnable, also place it back on a runqueue.
2506 *
2507 * Atomic against schedule() which would dequeue a task, also see
2508 * set_current_state().
2509 *
7696f991
AP
2510 * This function executes a full memory barrier before accessing the task
2511 * state; see set_current_state().
2512 *
a2250238
PZ
2513 * Return: %true if @p->state changes (an actual wakeup was done),
2514 * %false otherwise.
1da177e4 2515 */
e4a52bcb
PZ
2516static int
2517try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 2518{
1da177e4 2519 unsigned long flags;
c05fbafb 2520 int cpu, success = 0;
2398f2c6 2521
e3d85487 2522 preempt_disable();
aacedf26
PZ
2523 if (p == current) {
2524 /*
2525 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
2526 * == smp_processor_id()'. Together this means we can special
2527 * case the whole 'p->on_rq && ttwu_remote()' case below
2528 * without taking any locks.
2529 *
2530 * In particular:
2531 * - we rely on Program-Order guarantees for all the ordering,
2532 * - we're serialized against set_special_state() by virtue of
2533 * it disabling IRQs (this allows not taking ->pi_lock).
2534 */
2535 if (!(p->state & state))
e3d85487 2536 goto out;
aacedf26
PZ
2537
2538 success = 1;
2539 cpu = task_cpu(p);
2540 trace_sched_waking(p);
2541 p->state = TASK_RUNNING;
2542 trace_sched_wakeup(p);
2543 goto out;
2544 }
2545
e0acd0a6
ON
2546 /*
2547 * If we are going to wake up a thread waiting for CONDITION we
2548 * need to ensure that CONDITION=1 done by the caller can not be
2549 * reordered with p->state check below. This pairs with mb() in
2550 * set_current_state() the waiting thread does.
2551 */
013fdb80 2552 raw_spin_lock_irqsave(&p->pi_lock, flags);
d89e588c 2553 smp_mb__after_spinlock();
e9c84311 2554 if (!(p->state & state))
aacedf26 2555 goto unlock;
1da177e4 2556
fbd705a0
PZ
2557 trace_sched_waking(p);
2558
d1ccc66d
IM
2559 /* We're going to change ->state: */
2560 success = 1;
1da177e4 2561 cpu = task_cpu(p);
1da177e4 2562
135e8c92
BS
2563 /*
2564 * Ensure we load p->on_rq _after_ p->state, otherwise it would
2565 * be possible to, falsely, observe p->on_rq == 0 and get stuck
2566 * in smp_cond_load_acquire() below.
2567 *
3d85b270
AP
2568 * sched_ttwu_pending() try_to_wake_up()
2569 * STORE p->on_rq = 1 LOAD p->state
2570 * UNLOCK rq->lock
2571 *
2572 * __schedule() (switch to task 'p')
2573 * LOCK rq->lock smp_rmb();
2574 * smp_mb__after_spinlock();
2575 * UNLOCK rq->lock
135e8c92
BS
2576 *
2577 * [task p]
3d85b270 2578 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
135e8c92 2579 *
3d85b270
AP
2580 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
2581 * __schedule(). See the comment for smp_mb__after_spinlock().
135e8c92
BS
2582 */
2583 smp_rmb();
c05fbafb 2584 if (p->on_rq && ttwu_remote(p, wake_flags))
aacedf26 2585 goto unlock;
1da177e4 2586
1da177e4 2587#ifdef CONFIG_SMP
ecf7d01c
PZ
2588 /*
2589 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
2590 * possible to, falsely, observe p->on_cpu == 0.
2591 *
2592 * One must be running (->on_cpu == 1) in order to remove oneself
2593 * from the runqueue.
2594 *
3d85b270
AP
2595 * __schedule() (switch to task 'p') try_to_wake_up()
2596 * STORE p->on_cpu = 1 LOAD p->on_rq
2597 * UNLOCK rq->lock
2598 *
2599 * __schedule() (put 'p' to sleep)
2600 * LOCK rq->lock smp_rmb();
2601 * smp_mb__after_spinlock();
2602 * STORE p->on_rq = 0 LOAD p->on_cpu
ecf7d01c 2603 *
3d85b270
AP
2604 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
2605 * __schedule(). See the comment for smp_mb__after_spinlock().
ecf7d01c
PZ
2606 */
2607 smp_rmb();
2608
e9c84311 2609 /*
d1ccc66d 2610 * If the owning (remote) CPU is still in the middle of schedule() with
c05fbafb 2611 * this task as prev, wait until its done referencing the task.
b75a2253 2612 *
31cb1bc0 2613 * Pairs with the smp_store_release() in finish_task().
b75a2253
PZ
2614 *
2615 * This ensures that tasks getting woken will be fully ordered against
2616 * their previous state and preserve Program Order.
0970d299 2617 */
1f03e8d2 2618 smp_cond_load_acquire(&p->on_cpu, !VAL);
1da177e4 2619
a8e4f2ea 2620 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 2621 p->state = TASK_WAKING;
e7693a36 2622
e33a9bba 2623 if (p->in_iowait) {
c96f5471 2624 delayacct_blkio_end(p);
e33a9bba
TH
2625 atomic_dec(&task_rq(p)->nr_iowait);
2626 }
2627
ac66f547 2628 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
2629 if (task_cpu(p) != cpu) {
2630 wake_flags |= WF_MIGRATED;
eb414681 2631 psi_ttwu_dequeue(p);
e4a52bcb 2632 set_task_cpu(p, cpu);
f339b9dc 2633 }
e33a9bba
TH
2634
2635#else /* CONFIG_SMP */
2636
2637 if (p->in_iowait) {
c96f5471 2638 delayacct_blkio_end(p);
e33a9bba
TH
2639 atomic_dec(&task_rq(p)->nr_iowait);
2640 }
2641
1da177e4 2642#endif /* CONFIG_SMP */
1da177e4 2643
b5179ac7 2644 ttwu_queue(p, cpu, wake_flags);
aacedf26 2645unlock:
013fdb80 2646 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
aacedf26
PZ
2647out:
2648 if (success)
2649 ttwu_stat(p, cpu, wake_flags);
e3d85487 2650 preempt_enable();
1da177e4
LT
2651
2652 return success;
2653}
2654
50fa610a
DH
2655/**
2656 * wake_up_process - Wake up a specific process
2657 * @p: The process to be woken up.
2658 *
2659 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
2660 * processes.
2661 *
2662 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a 2663 *
7696f991 2664 * This function executes a full memory barrier before accessing the task state.
50fa610a 2665 */
7ad5b3a5 2666int wake_up_process(struct task_struct *p)
1da177e4 2667{
9067ac85 2668 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 2669}
1da177e4
LT
2670EXPORT_SYMBOL(wake_up_process);
2671
7ad5b3a5 2672int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2673{
2674 return try_to_wake_up(p, state, 0);
2675}
2676
1da177e4
LT
2677/*
2678 * Perform scheduler related setup for a newly forked process p.
2679 * p is forked by current.
dd41f596
IM
2680 *
2681 * __sched_fork() is basic setup used by init_idle() too:
2682 */
5e1576ed 2683static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2684{
fd2f4419
PZ
2685 p->on_rq = 0;
2686
2687 p->se.on_rq = 0;
dd41f596
IM
2688 p->se.exec_start = 0;
2689 p->se.sum_exec_runtime = 0;
f6cf891c 2690 p->se.prev_sum_exec_runtime = 0;
6c594c21 2691 p->se.nr_migrations = 0;
da7a735e 2692 p->se.vruntime = 0;
fd2f4419 2693 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 2694
ad936d86
BP
2695#ifdef CONFIG_FAIR_GROUP_SCHED
2696 p->se.cfs_rq = NULL;
2697#endif
2698
6cfb0d5d 2699#ifdef CONFIG_SCHEDSTATS
cb251765 2700 /* Even if schedstat is disabled, there should not be garbage */
41acab88 2701 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2702#endif
476d139c 2703
aab03e05 2704 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 2705 init_dl_task_timer(&p->dl);
209a0cbd 2706 init_dl_inactive_task_timer(&p->dl);
a5e7be3b 2707 __dl_clear_params(p);
aab03e05 2708
fa717060 2709 INIT_LIST_HEAD(&p->rt.run_list);
ff77e468
PZ
2710 p->rt.timeout = 0;
2711 p->rt.time_slice = sched_rr_timeslice;
2712 p->rt.on_rq = 0;
2713 p->rt.on_list = 0;
476d139c 2714
e107be36
AK
2715#ifdef CONFIG_PREEMPT_NOTIFIERS
2716 INIT_HLIST_HEAD(&p->preempt_notifiers);
2717#endif
cbee9f88 2718
5e1f0f09
MG
2719#ifdef CONFIG_COMPACTION
2720 p->capture_control = NULL;
2721#endif
13784475 2722 init_numa_balancing(clone_flags, p);
dd41f596
IM
2723}
2724
2a595721
SD
2725DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2726
1a687c2e 2727#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 2728
1a687c2e
MG
2729void set_numabalancing_state(bool enabled)
2730{
2731 if (enabled)
2a595721 2732 static_branch_enable(&sched_numa_balancing);
1a687c2e 2733 else
2a595721 2734 static_branch_disable(&sched_numa_balancing);
1a687c2e 2735}
54a43d54
AK
2736
2737#ifdef CONFIG_PROC_SYSCTL
2738int sysctl_numa_balancing(struct ctl_table *table, int write,
2739 void __user *buffer, size_t *lenp, loff_t *ppos)
2740{
2741 struct ctl_table t;
2742 int err;
2a595721 2743 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
2744
2745 if (write && !capable(CAP_SYS_ADMIN))
2746 return -EPERM;
2747
2748 t = *table;
2749 t.data = &state;
2750 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2751 if (err < 0)
2752 return err;
2753 if (write)
2754 set_numabalancing_state(state);
2755 return err;
2756}
2757#endif
2758#endif
dd41f596 2759
4698f88c
JP
2760#ifdef CONFIG_SCHEDSTATS
2761
cb251765 2762DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4698f88c 2763static bool __initdata __sched_schedstats = false;
cb251765 2764
cb251765
MG
2765static void set_schedstats(bool enabled)
2766{
2767 if (enabled)
2768 static_branch_enable(&sched_schedstats);
2769 else
2770 static_branch_disable(&sched_schedstats);
2771}
2772
2773void force_schedstat_enabled(void)
2774{
2775 if (!schedstat_enabled()) {
2776 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2777 static_branch_enable(&sched_schedstats);
2778 }
2779}
2780
2781static int __init setup_schedstats(char *str)
2782{
2783 int ret = 0;
2784 if (!str)
2785 goto out;
2786
4698f88c
JP
2787 /*
2788 * This code is called before jump labels have been set up, so we can't
2789 * change the static branch directly just yet. Instead set a temporary
2790 * variable so init_schedstats() can do it later.
2791 */
cb251765 2792 if (!strcmp(str, "enable")) {
4698f88c 2793 __sched_schedstats = true;
cb251765
MG
2794 ret = 1;
2795 } else if (!strcmp(str, "disable")) {
4698f88c 2796 __sched_schedstats = false;
cb251765
MG
2797 ret = 1;
2798 }
2799out:
2800 if (!ret)
2801 pr_warn("Unable to parse schedstats=\n");
2802
2803 return ret;
2804}
2805__setup("schedstats=", setup_schedstats);
2806
4698f88c
JP
2807static void __init init_schedstats(void)
2808{
2809 set_schedstats(__sched_schedstats);
2810}
2811
cb251765
MG
2812#ifdef CONFIG_PROC_SYSCTL
2813int sysctl_schedstats(struct ctl_table *table, int write,
2814 void __user *buffer, size_t *lenp, loff_t *ppos)
2815{
2816 struct ctl_table t;
2817 int err;
2818 int state = static_branch_likely(&sched_schedstats);
2819
2820 if (write && !capable(CAP_SYS_ADMIN))
2821 return -EPERM;
2822
2823 t = *table;
2824 t.data = &state;
2825 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2826 if (err < 0)
2827 return err;
2828 if (write)
2829 set_schedstats(state);
2830 return err;
2831}
4698f88c
JP
2832#endif /* CONFIG_PROC_SYSCTL */
2833#else /* !CONFIG_SCHEDSTATS */
2834static inline void init_schedstats(void) {}
2835#endif /* CONFIG_SCHEDSTATS */
dd41f596
IM
2836
2837/*
2838 * fork()/clone()-time setup:
2839 */
aab03e05 2840int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 2841{
0122ec5b 2842 unsigned long flags;
dd41f596 2843
5e1576ed 2844 __sched_fork(clone_flags, p);
06b83b5f 2845 /*
7dc603c9 2846 * We mark the process as NEW here. This guarantees that
06b83b5f
PZ
2847 * nobody will actually run it, and a signal or other external
2848 * event cannot wake it up and insert it on the runqueue either.
2849 */
7dc603c9 2850 p->state = TASK_NEW;
dd41f596 2851
c350a04e
MG
2852 /*
2853 * Make sure we do not leak PI boosting priority to the child.
2854 */
2855 p->prio = current->normal_prio;
2856
e8f14172
PB
2857 uclamp_fork(p);
2858
b9dc29e7
MG
2859 /*
2860 * Revert to default priority/policy on fork if requested.
2861 */
2862 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 2863 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 2864 p->policy = SCHED_NORMAL;
6c697bdf 2865 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
2866 p->rt_priority = 0;
2867 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2868 p->static_prio = NICE_TO_PRIO(0);
2869
2870 p->prio = p->normal_prio = __normal_prio(p);
9059393e 2871 set_load_weight(p, false);
6c697bdf 2872
b9dc29e7
MG
2873 /*
2874 * We don't need the reset flag anymore after the fork. It has
2875 * fulfilled its duty:
2876 */
2877 p->sched_reset_on_fork = 0;
2878 }
ca94c442 2879
af0fffd9 2880 if (dl_prio(p->prio))
aab03e05 2881 return -EAGAIN;
af0fffd9 2882 else if (rt_prio(p->prio))
aab03e05 2883 p->sched_class = &rt_sched_class;
af0fffd9 2884 else
2ddbf952 2885 p->sched_class = &fair_sched_class;
b29739f9 2886
7dc603c9 2887 init_entity_runnable_average(&p->se);
cd29fe6f 2888
86951599
PZ
2889 /*
2890 * The child is not yet in the pid-hash so no cgroup attach races,
2891 * and the cgroup is pinned to this child due to cgroup_fork()
2892 * is ran before sched_fork().
2893 *
2894 * Silence PROVE_RCU.
2895 */
0122ec5b 2896 raw_spin_lock_irqsave(&p->pi_lock, flags);
e210bffd 2897 /*
d1ccc66d 2898 * We're setting the CPU for the first time, we don't migrate,
e210bffd
PZ
2899 * so use __set_task_cpu().
2900 */
af0fffd9 2901 __set_task_cpu(p, smp_processor_id());
e210bffd
PZ
2902 if (p->sched_class->task_fork)
2903 p->sched_class->task_fork(p);
0122ec5b 2904 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2905
f6db8347 2906#ifdef CONFIG_SCHED_INFO
dd41f596 2907 if (likely(sched_info_on()))
52f17b6c 2908 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2909#endif
3ca7a440
PZ
2910#if defined(CONFIG_SMP)
2911 p->on_cpu = 0;
4866cde0 2912#endif
01028747 2913 init_task_preempt_count(p);
806c09a7 2914#ifdef CONFIG_SMP
917b627d 2915 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 2916 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 2917#endif
aab03e05 2918 return 0;
1da177e4
LT
2919}
2920
332ac17e
DF
2921unsigned long to_ratio(u64 period, u64 runtime)
2922{
2923 if (runtime == RUNTIME_INF)
c52f14d3 2924 return BW_UNIT;
332ac17e
DF
2925
2926 /*
2927 * Doing this here saves a lot of checks in all
2928 * the calling paths, and returning zero seems
2929 * safe for them anyway.
2930 */
2931 if (period == 0)
2932 return 0;
2933
c52f14d3 2934 return div64_u64(runtime << BW_SHIFT, period);
332ac17e
DF
2935}
2936
1da177e4
LT
2937/*
2938 * wake_up_new_task - wake up a newly created task for the first time.
2939 *
2940 * This function will do some initial scheduler statistics housekeeping
2941 * that must be done for every newly created context, then puts the task
2942 * on the runqueue and wakes it.
2943 */
3e51e3ed 2944void wake_up_new_task(struct task_struct *p)
1da177e4 2945{
eb580751 2946 struct rq_flags rf;
dd41f596 2947 struct rq *rq;
fabf318e 2948
eb580751 2949 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
7dc603c9 2950 p->state = TASK_RUNNING;
fabf318e
PZ
2951#ifdef CONFIG_SMP
2952 /*
2953 * Fork balancing, do it here and not earlier because:
3bd37062 2954 * - cpus_ptr can change in the fork path
d1ccc66d 2955 * - any previously selected CPU might disappear through hotplug
e210bffd
PZ
2956 *
2957 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
2958 * as we're not fully set-up yet.
fabf318e 2959 */
32e839dd 2960 p->recent_used_cpu = task_cpu(p);
e210bffd 2961 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735 2962#endif
b7fa30c9 2963 rq = __task_rq_lock(p, &rf);
4126bad6 2964 update_rq_clock(rq);
d0fe0b9c 2965 post_init_entity_util_avg(p);
0017d735 2966
7a57f32a 2967 activate_task(rq, p, ENQUEUE_NOCLOCK);
fbd705a0 2968 trace_sched_wakeup_new(p);
a7558e01 2969 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2970#ifdef CONFIG_SMP
0aaafaab
PZ
2971 if (p->sched_class->task_woken) {
2972 /*
2973 * Nothing relies on rq->lock after this, so its fine to
2974 * drop it.
2975 */
d8ac8971 2976 rq_unpin_lock(rq, &rf);
efbbd05a 2977 p->sched_class->task_woken(rq, p);
d8ac8971 2978 rq_repin_lock(rq, &rf);
0aaafaab 2979 }
9a897c5a 2980#endif
eb580751 2981 task_rq_unlock(rq, p, &rf);
1da177e4
LT
2982}
2983
e107be36
AK
2984#ifdef CONFIG_PREEMPT_NOTIFIERS
2985
b7203428 2986static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
1cde2930 2987
2ecd9d29
PZ
2988void preempt_notifier_inc(void)
2989{
b7203428 2990 static_branch_inc(&preempt_notifier_key);
2ecd9d29
PZ
2991}
2992EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2993
2994void preempt_notifier_dec(void)
2995{
b7203428 2996 static_branch_dec(&preempt_notifier_key);
2ecd9d29
PZ
2997}
2998EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2999
e107be36 3000/**
80dd99b3 3001 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 3002 * @notifier: notifier struct to register
e107be36
AK
3003 */
3004void preempt_notifier_register(struct preempt_notifier *notifier)
3005{
b7203428 3006 if (!static_branch_unlikely(&preempt_notifier_key))
2ecd9d29
PZ
3007 WARN(1, "registering preempt_notifier while notifiers disabled\n");
3008
e107be36
AK
3009 hlist_add_head(&notifier->link, &current->preempt_notifiers);
3010}
3011EXPORT_SYMBOL_GPL(preempt_notifier_register);
3012
3013/**
3014 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 3015 * @notifier: notifier struct to unregister
e107be36 3016 *
d84525a8 3017 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
3018 */
3019void preempt_notifier_unregister(struct preempt_notifier *notifier)
3020{
3021 hlist_del(&notifier->link);
3022}
3023EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3024
1cde2930 3025static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3026{
3027 struct preempt_notifier *notifier;
e107be36 3028
b67bfe0d 3029 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3030 notifier->ops->sched_in(notifier, raw_smp_processor_id());
3031}
3032
1cde2930
PZ
3033static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3034{
b7203428 3035 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3036 __fire_sched_in_preempt_notifiers(curr);
3037}
3038
e107be36 3039static void
1cde2930
PZ
3040__fire_sched_out_preempt_notifiers(struct task_struct *curr,
3041 struct task_struct *next)
e107be36
AK
3042{
3043 struct preempt_notifier *notifier;
e107be36 3044
b67bfe0d 3045 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3046 notifier->ops->sched_out(notifier, next);
3047}
3048
1cde2930
PZ
3049static __always_inline void
3050fire_sched_out_preempt_notifiers(struct task_struct *curr,
3051 struct task_struct *next)
3052{
b7203428 3053 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3054 __fire_sched_out_preempt_notifiers(curr, next);
3055}
3056
6d6bc0ad 3057#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 3058
1cde2930 3059static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3060{
3061}
3062
1cde2930 3063static inline void
e107be36
AK
3064fire_sched_out_preempt_notifiers(struct task_struct *curr,
3065 struct task_struct *next)
3066{
3067}
3068
6d6bc0ad 3069#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 3070
31cb1bc0 3071static inline void prepare_task(struct task_struct *next)
3072{
3073#ifdef CONFIG_SMP
3074 /*
3075 * Claim the task as running, we do this before switching to it
3076 * such that any running task will have this set.
3077 */
3078 next->on_cpu = 1;
3079#endif
3080}
3081
3082static inline void finish_task(struct task_struct *prev)
3083{
3084#ifdef CONFIG_SMP
3085 /*
3086 * After ->on_cpu is cleared, the task can be moved to a different CPU.
3087 * We must ensure this doesn't happen until the switch is completely
3088 * finished.
3089 *
3090 * In particular, the load of prev->state in finish_task_switch() must
3091 * happen before this.
3092 *
3093 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
3094 */
3095 smp_store_release(&prev->on_cpu, 0);
3096#endif
3097}
3098
269d5992
PZ
3099static inline void
3100prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
31cb1bc0 3101{
269d5992
PZ
3102 /*
3103 * Since the runqueue lock will be released by the next
3104 * task (which is an invalid locking op but in the case
3105 * of the scheduler it's an obvious special-case), so we
3106 * do an early lockdep release here:
3107 */
3108 rq_unpin_lock(rq, rf);
5facae4f 3109 spin_release(&rq->lock.dep_map, _THIS_IP_);
31cb1bc0 3110#ifdef CONFIG_DEBUG_SPINLOCK
3111 /* this is a valid case when another task releases the spinlock */
269d5992 3112 rq->lock.owner = next;
31cb1bc0 3113#endif
269d5992
PZ
3114}
3115
3116static inline void finish_lock_switch(struct rq *rq)
3117{
31cb1bc0 3118 /*
3119 * If we are tracking spinlock dependencies then we have to
3120 * fix up the runqueue lock - which gets 'carried over' from
3121 * prev into current:
3122 */
3123 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
31cb1bc0 3124 raw_spin_unlock_irq(&rq->lock);
3125}
3126
325ea10c
IM
3127/*
3128 * NOP if the arch has not defined these:
3129 */
3130
3131#ifndef prepare_arch_switch
3132# define prepare_arch_switch(next) do { } while (0)
3133#endif
3134
3135#ifndef finish_arch_post_lock_switch
3136# define finish_arch_post_lock_switch() do { } while (0)
3137#endif
3138
4866cde0
NP
3139/**
3140 * prepare_task_switch - prepare to switch tasks
3141 * @rq: the runqueue preparing to switch
421cee29 3142 * @prev: the current task that is being switched out
4866cde0
NP
3143 * @next: the task we are going to switch to.
3144 *
3145 * This is called with the rq lock held and interrupts off. It must
3146 * be paired with a subsequent finish_task_switch after the context
3147 * switch.
3148 *
3149 * prepare_task_switch sets up locking and calls architecture specific
3150 * hooks.
3151 */
e107be36
AK
3152static inline void
3153prepare_task_switch(struct rq *rq, struct task_struct *prev,
3154 struct task_struct *next)
4866cde0 3155{
0ed557aa 3156 kcov_prepare_switch(prev);
43148951 3157 sched_info_switch(rq, prev, next);
fe4b04fa 3158 perf_event_task_sched_out(prev, next);
d7822b1e 3159 rseq_preempt(prev);
e107be36 3160 fire_sched_out_preempt_notifiers(prev, next);
31cb1bc0 3161 prepare_task(next);
4866cde0
NP
3162 prepare_arch_switch(next);
3163}
3164
1da177e4
LT
3165/**
3166 * finish_task_switch - clean up after a task-switch
3167 * @prev: the thread we just switched away from.
3168 *
4866cde0
NP
3169 * finish_task_switch must be called after the context switch, paired
3170 * with a prepare_task_switch call before the context switch.
3171 * finish_task_switch will reconcile locking set up by prepare_task_switch,
3172 * and do any other architecture-specific cleanup actions.
1da177e4
LT
3173 *
3174 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 3175 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
3176 * with the lock held can cause deadlocks; see schedule() for
3177 * details.)
dfa50b60
ON
3178 *
3179 * The context switch have flipped the stack from under us and restored the
3180 * local variables which were saved when this task called schedule() in the
3181 * past. prev == current is still correct but we need to recalculate this_rq
3182 * because prev may have moved to another CPU.
1da177e4 3183 */
dfa50b60 3184static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
3185 __releases(rq->lock)
3186{
dfa50b60 3187 struct rq *rq = this_rq();
1da177e4 3188 struct mm_struct *mm = rq->prev_mm;
55a101f8 3189 long prev_state;
1da177e4 3190
609ca066
PZ
3191 /*
3192 * The previous task will have left us with a preempt_count of 2
3193 * because it left us after:
3194 *
3195 * schedule()
3196 * preempt_disable(); // 1
3197 * __schedule()
3198 * raw_spin_lock_irq(&rq->lock) // 2
3199 *
3200 * Also, see FORK_PREEMPT_COUNT.
3201 */
e2bf1c4b
PZ
3202 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
3203 "corrupted preempt_count: %s/%d/0x%x\n",
3204 current->comm, current->pid, preempt_count()))
3205 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 3206
1da177e4
LT
3207 rq->prev_mm = NULL;
3208
3209 /*
3210 * A task struct has one reference for the use as "current".
c394cc9f 3211 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
3212 * schedule one last time. The schedule call will never return, and
3213 * the scheduled task must drop that reference.
95913d97
PZ
3214 *
3215 * We must observe prev->state before clearing prev->on_cpu (in
31cb1bc0 3216 * finish_task), otherwise a concurrent wakeup can get prev
95913d97
PZ
3217 * running on another CPU and we could rave with its RUNNING -> DEAD
3218 * transition, resulting in a double drop.
1da177e4 3219 */
55a101f8 3220 prev_state = prev->state;
bf9fae9f 3221 vtime_task_switch(prev);
a8d757ef 3222 perf_event_task_sched_in(prev, current);
31cb1bc0 3223 finish_task(prev);
3224 finish_lock_switch(rq);
01f23e16 3225 finish_arch_post_lock_switch();
0ed557aa 3226 kcov_finish_switch(current);
e8fa1362 3227
e107be36 3228 fire_sched_in_preempt_notifiers(current);
306e0604 3229 /*
70216e18
MD
3230 * When switching through a kernel thread, the loop in
3231 * membarrier_{private,global}_expedited() may have observed that
3232 * kernel thread and not issued an IPI. It is therefore possible to
3233 * schedule between user->kernel->user threads without passing though
3234 * switch_mm(). Membarrier requires a barrier after storing to
3235 * rq->curr, before returning to userspace, so provide them here:
3236 *
3237 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
3238 * provided by mmdrop(),
3239 * - a sync_core for SYNC_CORE.
306e0604 3240 */
70216e18
MD
3241 if (mm) {
3242 membarrier_mm_sync_core_before_usermode(mm);
1da177e4 3243 mmdrop(mm);
70216e18 3244 }
1cef1150
PZ
3245 if (unlikely(prev_state == TASK_DEAD)) {
3246 if (prev->sched_class->task_dead)
3247 prev->sched_class->task_dead(prev);
68f24b08 3248
1cef1150
PZ
3249 /*
3250 * Remove function-return probe instances associated with this
3251 * task and put them back on the free list.
3252 */
3253 kprobe_flush_task(prev);
3254
3255 /* Task is done with its stack. */
3256 put_task_stack(prev);
3257
0ff7b2cf 3258 put_task_struct_rcu_user(prev);
c6fd91f0 3259 }
99e5ada9 3260
de734f89 3261 tick_nohz_task_switch();
dfa50b60 3262 return rq;
1da177e4
LT
3263}
3264
3f029d3c
GH
3265#ifdef CONFIG_SMP
3266
3f029d3c 3267/* rq->lock is NOT held, but preemption is disabled */
e3fca9e7 3268static void __balance_callback(struct rq *rq)
3f029d3c 3269{
e3fca9e7
PZ
3270 struct callback_head *head, *next;
3271 void (*func)(struct rq *rq);
3272 unsigned long flags;
3f029d3c 3273
e3fca9e7
PZ
3274 raw_spin_lock_irqsave(&rq->lock, flags);
3275 head = rq->balance_callback;
3276 rq->balance_callback = NULL;
3277 while (head) {
3278 func = (void (*)(struct rq *))head->func;
3279 next = head->next;
3280 head->next = NULL;
3281 head = next;
3f029d3c 3282
e3fca9e7 3283 func(rq);
3f029d3c 3284 }
e3fca9e7
PZ
3285 raw_spin_unlock_irqrestore(&rq->lock, flags);
3286}
3287
3288static inline void balance_callback(struct rq *rq)
3289{
3290 if (unlikely(rq->balance_callback))
3291 __balance_callback(rq);
3f029d3c
GH
3292}
3293
3294#else
da19ab51 3295
e3fca9e7 3296static inline void balance_callback(struct rq *rq)
3f029d3c 3297{
1da177e4
LT
3298}
3299
3f029d3c
GH
3300#endif
3301
1da177e4
LT
3302/**
3303 * schedule_tail - first thing a freshly forked thread must call.
3304 * @prev: the thread we just switched away from.
3305 */
722a9f92 3306asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
3307 __releases(rq->lock)
3308{
1a43a14a 3309 struct rq *rq;
da19ab51 3310
609ca066
PZ
3311 /*
3312 * New tasks start with FORK_PREEMPT_COUNT, see there and
3313 * finish_task_switch() for details.
3314 *
3315 * finish_task_switch() will drop rq->lock() and lower preempt_count
3316 * and the preempt_enable() will end up enabling preemption (on
3317 * PREEMPT_COUNT kernels).
3318 */
3319
dfa50b60 3320 rq = finish_task_switch(prev);
e3fca9e7 3321 balance_callback(rq);
1a43a14a 3322 preempt_enable();
70b97a7f 3323
1da177e4 3324 if (current->set_child_tid)
b488893a 3325 put_user(task_pid_vnr(current), current->set_child_tid);
088fe47c
EB
3326
3327 calculate_sigpending();
1da177e4
LT
3328}
3329
3330/*
dfa50b60 3331 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 3332 */
04936948 3333static __always_inline struct rq *
70b97a7f 3334context_switch(struct rq *rq, struct task_struct *prev,
d8ac8971 3335 struct task_struct *next, struct rq_flags *rf)
1da177e4 3336{
e107be36 3337 prepare_task_switch(rq, prev, next);
fe4b04fa 3338
9226d125
ZA
3339 /*
3340 * For paravirt, this is coupled with an exit in switch_to to
3341 * combine the page table reload and the switch backend into
3342 * one hypercall.
3343 */
224101ed 3344 arch_start_context_switch(prev);
9226d125 3345
306e0604 3346 /*
139d025c
PZ
3347 * kernel -> kernel lazy + transfer active
3348 * user -> kernel lazy + mmgrab() active
3349 *
3350 * kernel -> user switch + mmdrop() active
3351 * user -> user switch
306e0604 3352 */
139d025c
PZ
3353 if (!next->mm) { // to kernel
3354 enter_lazy_tlb(prev->active_mm, next);
3355
3356 next->active_mm = prev->active_mm;
3357 if (prev->mm) // from user
3358 mmgrab(prev->active_mm);
3359 else
3360 prev->active_mm = NULL;
3361 } else { // to user
227a4aad 3362 membarrier_switch_mm(rq, prev->active_mm, next->mm);
139d025c
PZ
3363 /*
3364 * sys_membarrier() requires an smp_mb() between setting
227a4aad 3365 * rq->curr / membarrier_switch_mm() and returning to userspace.
139d025c
PZ
3366 *
3367 * The below provides this either through switch_mm(), or in
3368 * case 'prev->active_mm == next->mm' through
3369 * finish_task_switch()'s mmdrop().
3370 */
139d025c 3371 switch_mm_irqs_off(prev->active_mm, next->mm, next);
1da177e4 3372
139d025c
PZ
3373 if (!prev->mm) { // from kernel
3374 /* will mmdrop() in finish_task_switch(). */
3375 rq->prev_mm = prev->active_mm;
3376 prev->active_mm = NULL;
3377 }
1da177e4 3378 }
92509b73 3379
cb42c9a3 3380 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
92509b73 3381
269d5992 3382 prepare_lock_switch(rq, next, rf);
1da177e4
LT
3383
3384 /* Here we just switch the register state and the stack. */
3385 switch_to(prev, next, prev);
dd41f596 3386 barrier();
dfa50b60
ON
3387
3388 return finish_task_switch(prev);
1da177e4
LT
3389}
3390
3391/*
1c3e8264 3392 * nr_running and nr_context_switches:
1da177e4
LT
3393 *
3394 * externally visible scheduler statistics: current number of runnable
1c3e8264 3395 * threads, total number of context switches performed since bootup.
1da177e4
LT
3396 */
3397unsigned long nr_running(void)
3398{
3399 unsigned long i, sum = 0;
3400
3401 for_each_online_cpu(i)
3402 sum += cpu_rq(i)->nr_running;
3403
3404 return sum;
f711f609 3405}
1da177e4 3406
2ee507c4 3407/*
d1ccc66d 3408 * Check if only the current task is running on the CPU.
00cc1633
DD
3409 *
3410 * Caution: this function does not check that the caller has disabled
3411 * preemption, thus the result might have a time-of-check-to-time-of-use
3412 * race. The caller is responsible to use it correctly, for example:
3413 *
dfcb245e 3414 * - from a non-preemptible section (of course)
00cc1633
DD
3415 *
3416 * - from a thread that is bound to a single CPU
3417 *
3418 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
3419 */
3420bool single_task_running(void)
3421{
00cc1633 3422 return raw_rq()->nr_running == 1;
2ee507c4
TC
3423}
3424EXPORT_SYMBOL(single_task_running);
3425
1da177e4 3426unsigned long long nr_context_switches(void)
46cb4b7c 3427{
cc94abfc
SR
3428 int i;
3429 unsigned long long sum = 0;
46cb4b7c 3430
0a945022 3431 for_each_possible_cpu(i)
1da177e4 3432 sum += cpu_rq(i)->nr_switches;
46cb4b7c 3433
1da177e4
LT
3434 return sum;
3435}
483b4ee6 3436
145d952a
DL
3437/*
3438 * Consumers of these two interfaces, like for example the cpuidle menu
3439 * governor, are using nonsensical data. Preferring shallow idle state selection
3440 * for a CPU that has IO-wait which might not even end up running the task when
3441 * it does become runnable.
3442 */
3443
3444unsigned long nr_iowait_cpu(int cpu)
3445{
3446 return atomic_read(&cpu_rq(cpu)->nr_iowait);
3447}
3448
e33a9bba
TH
3449/*
3450 * IO-wait accounting, and how its mostly bollocks (on SMP).
3451 *
3452 * The idea behind IO-wait account is to account the idle time that we could
3453 * have spend running if it were not for IO. That is, if we were to improve the
3454 * storage performance, we'd have a proportional reduction in IO-wait time.
3455 *
3456 * This all works nicely on UP, where, when a task blocks on IO, we account
3457 * idle time as IO-wait, because if the storage were faster, it could've been
3458 * running and we'd not be idle.
3459 *
3460 * This has been extended to SMP, by doing the same for each CPU. This however
3461 * is broken.
3462 *
3463 * Imagine for instance the case where two tasks block on one CPU, only the one
3464 * CPU will have IO-wait accounted, while the other has regular idle. Even
3465 * though, if the storage were faster, both could've ran at the same time,
3466 * utilising both CPUs.
3467 *
3468 * This means, that when looking globally, the current IO-wait accounting on
3469 * SMP is a lower bound, by reason of under accounting.
3470 *
3471 * Worse, since the numbers are provided per CPU, they are sometimes
3472 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
3473 * associated with any one particular CPU, it can wake to another CPU than it
3474 * blocked on. This means the per CPU IO-wait number is meaningless.
3475 *
3476 * Task CPU affinities can make all that even more 'interesting'.
3477 */
3478
1da177e4
LT
3479unsigned long nr_iowait(void)
3480{
3481 unsigned long i, sum = 0;
483b4ee6 3482
0a945022 3483 for_each_possible_cpu(i)
145d952a 3484 sum += nr_iowait_cpu(i);
46cb4b7c 3485
1da177e4
LT
3486 return sum;
3487}
483b4ee6 3488
dd41f596 3489#ifdef CONFIG_SMP
8a0be9ef 3490
46cb4b7c 3491/*
38022906
PZ
3492 * sched_exec - execve() is a valuable balancing opportunity, because at
3493 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 3494 */
38022906 3495void sched_exec(void)
46cb4b7c 3496{
38022906 3497 struct task_struct *p = current;
1da177e4 3498 unsigned long flags;
0017d735 3499 int dest_cpu;
46cb4b7c 3500
8f42ced9 3501 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 3502 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
3503 if (dest_cpu == smp_processor_id())
3504 goto unlock;
38022906 3505
8f42ced9 3506 if (likely(cpu_active(dest_cpu))) {
969c7921 3507 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 3508
8f42ced9
PZ
3509 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3510 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
3511 return;
3512 }
0017d735 3513unlock:
8f42ced9 3514 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 3515}
dd41f596 3516
1da177e4
LT
3517#endif
3518
1da177e4 3519DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 3520DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
3521
3522EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 3523EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 3524
6075620b
GG
3525/*
3526 * The function fair_sched_class.update_curr accesses the struct curr
3527 * and its field curr->exec_start; when called from task_sched_runtime(),
3528 * we observe a high rate of cache misses in practice.
3529 * Prefetching this data results in improved performance.
3530 */
3531static inline void prefetch_curr_exec_start(struct task_struct *p)
3532{
3533#ifdef CONFIG_FAIR_GROUP_SCHED
3534 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
3535#else
3536 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
3537#endif
3538 prefetch(curr);
3539 prefetch(&curr->exec_start);
3540}
3541
c5f8d995
HS
3542/*
3543 * Return accounted runtime for the task.
3544 * In case the task is currently running, return the runtime plus current's
3545 * pending runtime that have not been accounted yet.
3546 */
3547unsigned long long task_sched_runtime(struct task_struct *p)
3548{
eb580751 3549 struct rq_flags rf;
c5f8d995 3550 struct rq *rq;
6e998916 3551 u64 ns;
c5f8d995 3552
911b2898
PZ
3553#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
3554 /*
97fb7a0a 3555 * 64-bit doesn't need locks to atomically read a 64-bit value.
911b2898
PZ
3556 * So we have a optimization chance when the task's delta_exec is 0.
3557 * Reading ->on_cpu is racy, but this is ok.
3558 *
d1ccc66d
IM
3559 * If we race with it leaving CPU, we'll take a lock. So we're correct.
3560 * If we race with it entering CPU, unaccounted time is 0. This is
911b2898 3561 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
3562 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
3563 * been accounted, so we're correct here as well.
911b2898 3564 */
da0c1e65 3565 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
3566 return p->se.sum_exec_runtime;
3567#endif
3568
eb580751 3569 rq = task_rq_lock(p, &rf);
6e998916
SG
3570 /*
3571 * Must be ->curr _and_ ->on_rq. If dequeued, we would
3572 * project cycles that may never be accounted to this
3573 * thread, breaking clock_gettime().
3574 */
3575 if (task_current(rq, p) && task_on_rq_queued(p)) {
6075620b 3576 prefetch_curr_exec_start(p);
6e998916
SG
3577 update_rq_clock(rq);
3578 p->sched_class->update_curr(rq);
3579 }
3580 ns = p->se.sum_exec_runtime;
eb580751 3581 task_rq_unlock(rq, p, &rf);
c5f8d995
HS
3582
3583 return ns;
3584}
48f24c4d 3585
7835b98b
CL
3586/*
3587 * This function gets called by the timer code, with HZ frequency.
3588 * We call it with interrupts disabled.
7835b98b
CL
3589 */
3590void scheduler_tick(void)
3591{
7835b98b
CL
3592 int cpu = smp_processor_id();
3593 struct rq *rq = cpu_rq(cpu);
dd41f596 3594 struct task_struct *curr = rq->curr;
8a8c69c3 3595 struct rq_flags rf;
3e51f33f
PZ
3596
3597 sched_clock_tick();
dd41f596 3598
8a8c69c3
PZ
3599 rq_lock(rq, &rf);
3600
3e51f33f 3601 update_rq_clock(rq);
fa85ae24 3602 curr->sched_class->task_tick(rq, curr, 0);
3289bdb4 3603 calc_global_load_tick(rq);
eb414681 3604 psi_task_tick(rq);
8a8c69c3
PZ
3605
3606 rq_unlock(rq, &rf);
7835b98b 3607
e9d2b064 3608 perf_event_task_tick();
e220d2dc 3609
e418e1c2 3610#ifdef CONFIG_SMP
6eb57e0d 3611 rq->idle_balance = idle_cpu(cpu);
7caff66f 3612 trigger_load_balance(rq);
e418e1c2 3613#endif
1da177e4
LT
3614}
3615
265f22a9 3616#ifdef CONFIG_NO_HZ_FULL
d84b3131
FW
3617
3618struct tick_work {
3619 int cpu;
b55bd585 3620 atomic_t state;
d84b3131
FW
3621 struct delayed_work work;
3622};
b55bd585
PM
3623/* Values for ->state, see diagram below. */
3624#define TICK_SCHED_REMOTE_OFFLINE 0
3625#define TICK_SCHED_REMOTE_OFFLINING 1
3626#define TICK_SCHED_REMOTE_RUNNING 2
3627
3628/*
3629 * State diagram for ->state:
3630 *
3631 *
3632 * TICK_SCHED_REMOTE_OFFLINE
3633 * | ^
3634 * | |
3635 * | | sched_tick_remote()
3636 * | |
3637 * | |
3638 * +--TICK_SCHED_REMOTE_OFFLINING
3639 * | ^
3640 * | |
3641 * sched_tick_start() | | sched_tick_stop()
3642 * | |
3643 * V |
3644 * TICK_SCHED_REMOTE_RUNNING
3645 *
3646 *
3647 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
3648 * and sched_tick_start() are happy to leave the state in RUNNING.
3649 */
d84b3131
FW
3650
3651static struct tick_work __percpu *tick_work_cpu;
3652
3653static void sched_tick_remote(struct work_struct *work)
3654{
3655 struct delayed_work *dwork = to_delayed_work(work);
3656 struct tick_work *twork = container_of(dwork, struct tick_work, work);
3657 int cpu = twork->cpu;
3658 struct rq *rq = cpu_rq(cpu);
d9c0ffca 3659 struct task_struct *curr;
d84b3131 3660 struct rq_flags rf;
d9c0ffca 3661 u64 delta;
b55bd585 3662 int os;
d84b3131
FW
3663
3664 /*
3665 * Handle the tick only if it appears the remote CPU is running in full
3666 * dynticks mode. The check is racy by nature, but missing a tick or
3667 * having one too much is no big deal because the scheduler tick updates
3668 * statistics and checks timeslices in a time-independent way, regardless
3669 * of when exactly it is running.
3670 */
d9c0ffca
FW
3671 if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
3672 goto out_requeue;
d84b3131 3673
d9c0ffca
FW
3674 rq_lock_irq(rq, &rf);
3675 curr = rq->curr;
b55bd585 3676 if (is_idle_task(curr) || cpu_is_offline(cpu))
d9c0ffca 3677 goto out_unlock;
d84b3131 3678
d9c0ffca
FW
3679 update_rq_clock(rq);
3680 delta = rq_clock_task(rq) - curr->se.exec_start;
3681
3682 /*
3683 * Make sure the next tick runs within a reasonable
3684 * amount of time.
3685 */
3686 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
3687 curr->sched_class->task_tick(rq, curr, 0);
3688
3689out_unlock:
3690 rq_unlock_irq(rq, &rf);
d84b3131 3691
d9c0ffca 3692out_requeue:
d84b3131
FW
3693 /*
3694 * Run the remote tick once per second (1Hz). This arbitrary
3695 * frequency is large enough to avoid overload but short enough
b55bd585
PM
3696 * to keep scheduler internal stats reasonably up to date. But
3697 * first update state to reflect hotplug activity if required.
d84b3131 3698 */
b55bd585
PM
3699 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
3700 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
3701 if (os == TICK_SCHED_REMOTE_RUNNING)
3702 queue_delayed_work(system_unbound_wq, dwork, HZ);
d84b3131
FW
3703}
3704
3705static void sched_tick_start(int cpu)
3706{
b55bd585 3707 int os;
d84b3131
FW
3708 struct tick_work *twork;
3709
3710 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
3711 return;
3712
3713 WARN_ON_ONCE(!tick_work_cpu);
3714
3715 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
3716 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
3717 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
3718 if (os == TICK_SCHED_REMOTE_OFFLINE) {
3719 twork->cpu = cpu;
3720 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
3721 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
3722 }
d84b3131
FW
3723}
3724
3725#ifdef CONFIG_HOTPLUG_CPU
3726static void sched_tick_stop(int cpu)
3727{
3728 struct tick_work *twork;
b55bd585 3729 int os;
d84b3131
FW
3730
3731 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
3732 return;
3733
3734 WARN_ON_ONCE(!tick_work_cpu);
3735
3736 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
3737 /* There cannot be competing actions, but don't rely on stop-machine. */
3738 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
3739 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
3740 /* Don't cancel, as this would mess up the state machine. */
d84b3131
FW
3741}
3742#endif /* CONFIG_HOTPLUG_CPU */
3743
3744int __init sched_tick_offload_init(void)
3745{
3746 tick_work_cpu = alloc_percpu(struct tick_work);
3747 BUG_ON(!tick_work_cpu);
d84b3131
FW
3748 return 0;
3749}
3750
3751#else /* !CONFIG_NO_HZ_FULL */
3752static inline void sched_tick_start(int cpu) { }
3753static inline void sched_tick_stop(int cpu) { }
265f22a9 3754#endif
1da177e4 3755
c1a280b6 3756#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
c3bc8fd6 3757 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
47252cfb
SR
3758/*
3759 * If the value passed in is equal to the current preempt count
3760 * then we just disabled preemption. Start timing the latency.
3761 */
3762static inline void preempt_latency_start(int val)
3763{
3764 if (preempt_count() == val) {
3765 unsigned long ip = get_lock_parent_ip();
3766#ifdef CONFIG_DEBUG_PREEMPT
3767 current->preempt_disable_ip = ip;
3768#endif
3769 trace_preempt_off(CALLER_ADDR0, ip);
3770 }
3771}
7e49fcce 3772
edafe3a5 3773void preempt_count_add(int val)
1da177e4 3774{
6cd8a4bb 3775#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3776 /*
3777 * Underflow?
3778 */
9a11b49a
IM
3779 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3780 return;
6cd8a4bb 3781#endif
bdb43806 3782 __preempt_count_add(val);
6cd8a4bb 3783#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3784 /*
3785 * Spinlock count overflowing soon?
3786 */
33859f7f
MOS
3787 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3788 PREEMPT_MASK - 10);
6cd8a4bb 3789#endif
47252cfb 3790 preempt_latency_start(val);
1da177e4 3791}
bdb43806 3792EXPORT_SYMBOL(preempt_count_add);
edafe3a5 3793NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 3794
47252cfb
SR
3795/*
3796 * If the value passed in equals to the current preempt count
3797 * then we just enabled preemption. Stop timing the latency.
3798 */
3799static inline void preempt_latency_stop(int val)
3800{
3801 if (preempt_count() == val)
3802 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3803}
3804
edafe3a5 3805void preempt_count_sub(int val)
1da177e4 3806{
6cd8a4bb 3807#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3808 /*
3809 * Underflow?
3810 */
01e3eb82 3811 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 3812 return;
1da177e4
LT
3813 /*
3814 * Is the spinlock portion underflowing?
3815 */
9a11b49a
IM
3816 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3817 !(preempt_count() & PREEMPT_MASK)))
3818 return;
6cd8a4bb 3819#endif
9a11b49a 3820
47252cfb 3821 preempt_latency_stop(val);
bdb43806 3822 __preempt_count_sub(val);
1da177e4 3823}
bdb43806 3824EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 3825NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4 3826
47252cfb
SR
3827#else
3828static inline void preempt_latency_start(int val) { }
3829static inline void preempt_latency_stop(int val) { }
1da177e4
LT
3830#endif
3831
59ddbcb2
IM
3832static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3833{
3834#ifdef CONFIG_DEBUG_PREEMPT
3835 return p->preempt_disable_ip;
3836#else
3837 return 0;
3838#endif
3839}
3840
1da177e4 3841/*
dd41f596 3842 * Print scheduling while atomic bug:
1da177e4 3843 */
dd41f596 3844static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3845{
d1c6d149
VN
3846 /* Save this before calling printk(), since that will clobber it */
3847 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
3848
664dfa65
DJ
3849 if (oops_in_progress)
3850 return;
3851
3df0fc5b
PZ
3852 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3853 prev->comm, prev->pid, preempt_count());
838225b4 3854
dd41f596 3855 debug_show_held_locks(prev);
e21f5b15 3856 print_modules();
dd41f596
IM
3857 if (irqs_disabled())
3858 print_irqtrace_events(prev);
d1c6d149
VN
3859 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
3860 && in_atomic_preempt_off()) {
8f47b187 3861 pr_err("Preemption disabled at:");
d1c6d149 3862 print_ip_sym(preempt_disable_ip);
8f47b187
TG
3863 pr_cont("\n");
3864 }
748c7201
DBO
3865 if (panic_on_warn)
3866 panic("scheduling while atomic\n");
3867
6135fc1e 3868 dump_stack();
373d4d09 3869 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 3870}
1da177e4 3871
dd41f596
IM
3872/*
3873 * Various schedule()-time debugging checks and statistics:
3874 */
312364f3 3875static inline void schedule_debug(struct task_struct *prev, bool preempt)
dd41f596 3876{
0d9e2632 3877#ifdef CONFIG_SCHED_STACK_END_CHECK
29d64551
JH
3878 if (task_stack_end_corrupted(prev))
3879 panic("corrupted stack end detected inside scheduler\n");
0d9e2632 3880#endif
b99def8b 3881
312364f3
DV
3882#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
3883 if (!preempt && prev->state && prev->non_block_count) {
3884 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
3885 prev->comm, prev->pid, prev->non_block_count);
3886 dump_stack();
3887 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3888 }
3889#endif
3890
1dc0fffc 3891 if (unlikely(in_atomic_preempt_off())) {
dd41f596 3892 __schedule_bug(prev);
1dc0fffc
PZ
3893 preempt_count_set(PREEMPT_DISABLED);
3894 }
b3fbab05 3895 rcu_sleep_check();
dd41f596 3896
1da177e4
LT
3897 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3898
ae92882e 3899 schedstat_inc(this_rq()->sched_count);
dd41f596
IM
3900}
3901
3902/*
3903 * Pick up the highest-prio task:
3904 */
3905static inline struct task_struct *
d8ac8971 3906pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
dd41f596 3907{
49ee5768 3908 const struct sched_class *class;
dd41f596 3909 struct task_struct *p;
1da177e4
LT
3910
3911 /*
0ba87bb2
PZ
3912 * Optimization: we know that if all tasks are in the fair class we can
3913 * call that function directly, but only if the @prev task wasn't of a
3914 * higher scheduling class, because otherwise those loose the
3915 * opportunity to pull in more work from other CPUs.
1da177e4 3916 */
0ba87bb2
PZ
3917 if (likely((prev->sched_class == &idle_sched_class ||
3918 prev->sched_class == &fair_sched_class) &&
3919 rq->nr_running == rq->cfs.h_nr_running)) {
3920
5d7d6056 3921 p = pick_next_task_fair(rq, prev, rf);
6ccdc84b 3922 if (unlikely(p == RETRY_TASK))
67692435 3923 goto restart;
6ccdc84b 3924
d1ccc66d 3925 /* Assumes fair_sched_class->next == idle_sched_class */
5d7d6056 3926 if (!p) {
f488e105 3927 put_prev_task(rq, prev);
98c2f700 3928 p = pick_next_task_idle(rq);
f488e105 3929 }
6ccdc84b
PZ
3930
3931 return p;
1da177e4
LT
3932 }
3933
67692435 3934restart:
6e2df058 3935#ifdef CONFIG_SMP
67692435 3936 /*
6e2df058
PZ
3937 * We must do the balancing pass before put_next_task(), such
3938 * that when we release the rq->lock the task is in the same
3939 * state as before we took rq->lock.
3940 *
3941 * We can terminate the balance pass as soon as we know there is
3942 * a runnable task of @class priority or higher.
67692435 3943 */
6e2df058
PZ
3944 for_class_range(class, prev->sched_class, &idle_sched_class) {
3945 if (class->balance(rq, prev, rf))
3946 break;
3947 }
3948#endif
3949
3950 put_prev_task(rq, prev);
67692435 3951
34f971f6 3952 for_each_class(class) {
98c2f700 3953 p = class->pick_next_task(rq);
67692435 3954 if (p)
dd41f596 3955 return p;
dd41f596 3956 }
34f971f6 3957
d1ccc66d
IM
3958 /* The idle class should always have a runnable task: */
3959 BUG();
dd41f596 3960}
1da177e4 3961
dd41f596 3962/*
c259e01a 3963 * __schedule() is the main scheduler function.
edde96ea
PE
3964 *
3965 * The main means of driving the scheduler and thus entering this function are:
3966 *
3967 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3968 *
3969 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3970 * paths. For example, see arch/x86/entry_64.S.
3971 *
3972 * To drive preemption between tasks, the scheduler sets the flag in timer
3973 * interrupt handler scheduler_tick().
3974 *
3975 * 3. Wakeups don't really cause entry into schedule(). They add a
3976 * task to the run-queue and that's it.
3977 *
3978 * Now, if the new task added to the run-queue preempts the current
3979 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3980 * called on the nearest possible occasion:
3981 *
c1a280b6 3982 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
edde96ea
PE
3983 *
3984 * - in syscall or exception context, at the next outmost
3985 * preempt_enable(). (this might be as soon as the wake_up()'s
3986 * spin_unlock()!)
3987 *
3988 * - in IRQ context, return from interrupt-handler to
3989 * preemptible context
3990 *
c1a280b6 3991 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
edde96ea
PE
3992 * then at the next:
3993 *
3994 * - cond_resched() call
3995 * - explicit schedule() call
3996 * - return from syscall or exception to user-space
3997 * - return from interrupt-handler to user-space
bfd9b2b5 3998 *
b30f0e3f 3999 * WARNING: must be called with preemption disabled!
dd41f596 4000 */
499d7955 4001static void __sched notrace __schedule(bool preempt)
dd41f596
IM
4002{
4003 struct task_struct *prev, *next;
67ca7bde 4004 unsigned long *switch_count;
d8ac8971 4005 struct rq_flags rf;
dd41f596 4006 struct rq *rq;
31656519 4007 int cpu;
dd41f596 4008
dd41f596
IM
4009 cpu = smp_processor_id();
4010 rq = cpu_rq(cpu);
dd41f596 4011 prev = rq->curr;
dd41f596 4012
312364f3 4013 schedule_debug(prev, preempt);
1da177e4 4014
31656519 4015 if (sched_feat(HRTICK))
f333fdc9 4016 hrtick_clear(rq);
8f4d37ec 4017
46a5d164 4018 local_irq_disable();
bcbfdd01 4019 rcu_note_context_switch(preempt);
46a5d164 4020
e0acd0a6
ON
4021 /*
4022 * Make sure that signal_pending_state()->signal_pending() below
4023 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
4024 * done by the caller to avoid the race with signal_wake_up().
306e0604
MD
4025 *
4026 * The membarrier system call requires a full memory barrier
4027 * after coming from user-space, before storing to rq->curr.
e0acd0a6 4028 */
8a8c69c3 4029 rq_lock(rq, &rf);
d89e588c 4030 smp_mb__after_spinlock();
1da177e4 4031
d1ccc66d
IM
4032 /* Promote REQ to ACT */
4033 rq->clock_update_flags <<= 1;
bce4dc80 4034 update_rq_clock(rq);
9edfbfed 4035
246d86b5 4036 switch_count = &prev->nivcsw;
fc13aeba 4037 if (!preempt && prev->state) {
34ec35ad 4038 if (signal_pending_state(prev->state, prev)) {
1da177e4 4039 prev->state = TASK_RUNNING;
21aa9af0 4040 } else {
bce4dc80 4041 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
2acca55e 4042
e33a9bba
TH
4043 if (prev->in_iowait) {
4044 atomic_inc(&rq->nr_iowait);
4045 delayacct_blkio_start();
4046 }
21aa9af0 4047 }
dd41f596 4048 switch_count = &prev->nvcsw;
1da177e4
LT
4049 }
4050
d8ac8971 4051 next = pick_next_task(rq, prev, &rf);
f26f9aff 4052 clear_tsk_need_resched(prev);
f27dde8d 4053 clear_preempt_need_resched();
1da177e4 4054
1da177e4 4055 if (likely(prev != next)) {
1da177e4 4056 rq->nr_switches++;
5311a98f
EB
4057 /*
4058 * RCU users of rcu_dereference(rq->curr) may not see
4059 * changes to task_struct made by pick_next_task().
4060 */
4061 RCU_INIT_POINTER(rq->curr, next);
22e4ebb9
MD
4062 /*
4063 * The membarrier system call requires each architecture
4064 * to have a full memory barrier after updating
306e0604
MD
4065 * rq->curr, before returning to user-space.
4066 *
4067 * Here are the schemes providing that barrier on the
4068 * various architectures:
4069 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
4070 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
4071 * - finish_lock_switch() for weakly-ordered
4072 * architectures where spin_unlock is a full barrier,
4073 * - switch_to() for arm64 (weakly-ordered, spin_unlock
4074 * is a RELEASE barrier),
22e4ebb9 4075 */
1da177e4
LT
4076 ++*switch_count;
4077
c73464b1 4078 trace_sched_switch(preempt, prev, next);
d1ccc66d
IM
4079
4080 /* Also unlocks the rq: */
4081 rq = context_switch(rq, prev, next, &rf);
cbce1a68 4082 } else {
cb42c9a3 4083 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
8a8c69c3 4084 rq_unlock_irq(rq, &rf);
cbce1a68 4085 }
1da177e4 4086
e3fca9e7 4087 balance_callback(rq);
1da177e4 4088}
c259e01a 4089
9af6528e
PZ
4090void __noreturn do_task_dead(void)
4091{
d1ccc66d 4092 /* Causes final put_task_struct in finish_task_switch(): */
b5bf9a90 4093 set_special_state(TASK_DEAD);
d1ccc66d
IM
4094
4095 /* Tell freezer to ignore us: */
4096 current->flags |= PF_NOFREEZE;
4097
9af6528e
PZ
4098 __schedule(false);
4099 BUG();
d1ccc66d
IM
4100
4101 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
9af6528e 4102 for (;;)
d1ccc66d 4103 cpu_relax();
9af6528e
PZ
4104}
4105
9c40cef2
TG
4106static inline void sched_submit_work(struct task_struct *tsk)
4107{
b0fdc013 4108 if (!tsk->state)
9c40cef2 4109 return;
6d25be57
TG
4110
4111 /*
4112 * If a worker went to sleep, notify and ask workqueue whether
4113 * it wants to wake up a task to maintain concurrency.
4114 * As this function is called inside the schedule() context,
4115 * we disable preemption to avoid it calling schedule() again
4116 * in the possible wakeup of a kworker.
4117 */
771b53d0 4118 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6d25be57 4119 preempt_disable();
771b53d0
JA
4120 if (tsk->flags & PF_WQ_WORKER)
4121 wq_worker_sleeping(tsk);
4122 else
4123 io_wq_worker_sleeping(tsk);
6d25be57
TG
4124 preempt_enable_no_resched();
4125 }
4126
b0fdc013
SAS
4127 if (tsk_is_pi_blocked(tsk))
4128 return;
4129
9c40cef2
TG
4130 /*
4131 * If we are going to sleep and we have plugged IO queued,
4132 * make sure to submit it to avoid deadlocks.
4133 */
4134 if (blk_needs_flush_plug(tsk))
4135 blk_schedule_flush_plug(tsk);
4136}
4137
6d25be57
TG
4138static void sched_update_worker(struct task_struct *tsk)
4139{
771b53d0
JA
4140 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
4141 if (tsk->flags & PF_WQ_WORKER)
4142 wq_worker_running(tsk);
4143 else
4144 io_wq_worker_running(tsk);
4145 }
6d25be57
TG
4146}
4147
722a9f92 4148asmlinkage __visible void __sched schedule(void)
c259e01a 4149{
9c40cef2
TG
4150 struct task_struct *tsk = current;
4151
4152 sched_submit_work(tsk);
bfd9b2b5 4153 do {
b30f0e3f 4154 preempt_disable();
fc13aeba 4155 __schedule(false);
b30f0e3f 4156 sched_preempt_enable_no_resched();
bfd9b2b5 4157 } while (need_resched());
6d25be57 4158 sched_update_worker(tsk);
c259e01a 4159}
1da177e4
LT
4160EXPORT_SYMBOL(schedule);
4161
8663effb
SRV
4162/*
4163 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
4164 * state (have scheduled out non-voluntarily) by making sure that all
4165 * tasks have either left the run queue or have gone into user space.
4166 * As idle tasks do not do either, they must not ever be preempted
4167 * (schedule out non-voluntarily).
4168 *
4169 * schedule_idle() is similar to schedule_preempt_disable() except that it
4170 * never enables preemption because it does not call sched_submit_work().
4171 */
4172void __sched schedule_idle(void)
4173{
4174 /*
4175 * As this skips calling sched_submit_work(), which the idle task does
4176 * regardless because that function is a nop when the task is in a
4177 * TASK_RUNNING state, make sure this isn't used someplace that the
4178 * current task can be in any other state. Note, idle is always in the
4179 * TASK_RUNNING state.
4180 */
4181 WARN_ON_ONCE(current->state);
4182 do {
4183 __schedule(false);
4184 } while (need_resched());
4185}
4186
91d1aa43 4187#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 4188asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
4189{
4190 /*
4191 * If we come here after a random call to set_need_resched(),
4192 * or we have been woken up remotely but the IPI has not yet arrived,
4193 * we haven't yet exited the RCU idle mode. Do it here manually until
4194 * we find a better solution.
7cc78f8f
AL
4195 *
4196 * NB: There are buggy callers of this function. Ideally we
c467ea76 4197 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 4198 * too frequently to make sense yet.
20ab65e3 4199 */
7cc78f8f 4200 enum ctx_state prev_state = exception_enter();
20ab65e3 4201 schedule();
7cc78f8f 4202 exception_exit(prev_state);
20ab65e3
FW
4203}
4204#endif
4205
c5491ea7
TG
4206/**
4207 * schedule_preempt_disabled - called with preemption disabled
4208 *
4209 * Returns with preemption disabled. Note: preempt_count must be 1
4210 */
4211void __sched schedule_preempt_disabled(void)
4212{
ba74c144 4213 sched_preempt_enable_no_resched();
c5491ea7
TG
4214 schedule();
4215 preempt_disable();
4216}
4217
06b1f808 4218static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
4219{
4220 do {
47252cfb
SR
4221 /*
4222 * Because the function tracer can trace preempt_count_sub()
4223 * and it also uses preempt_enable/disable_notrace(), if
4224 * NEED_RESCHED is set, the preempt_enable_notrace() called
4225 * by the function tracer will call this function again and
4226 * cause infinite recursion.
4227 *
4228 * Preemption must be disabled here before the function
4229 * tracer can trace. Break up preempt_disable() into two
4230 * calls. One to disable preemption without fear of being
4231 * traced. The other to still record the preemption latency,
4232 * which can also be traced by the function tracer.
4233 */
499d7955 4234 preempt_disable_notrace();
47252cfb 4235 preempt_latency_start(1);
fc13aeba 4236 __schedule(true);
47252cfb 4237 preempt_latency_stop(1);
499d7955 4238 preempt_enable_no_resched_notrace();
a18b5d01
FW
4239
4240 /*
4241 * Check again in case we missed a preemption opportunity
4242 * between schedule and now.
4243 */
a18b5d01
FW
4244 } while (need_resched());
4245}
4246
c1a280b6 4247#ifdef CONFIG_PREEMPTION
1da177e4 4248/*
a49b4f40
VS
4249 * This is the entry point to schedule() from in-kernel preemption
4250 * off of preempt_enable.
1da177e4 4251 */
722a9f92 4252asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 4253{
1da177e4
LT
4254 /*
4255 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 4256 * we do not want to preempt the current task. Just return..
1da177e4 4257 */
fbb00b56 4258 if (likely(!preemptible()))
1da177e4
LT
4259 return;
4260
a18b5d01 4261 preempt_schedule_common();
1da177e4 4262}
376e2424 4263NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 4264EXPORT_SYMBOL(preempt_schedule);
009f60e2 4265
009f60e2 4266/**
4eaca0a8 4267 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
4268 *
4269 * The tracing infrastructure uses preempt_enable_notrace to prevent
4270 * recursion and tracing preempt enabling caused by the tracing
4271 * infrastructure itself. But as tracing can happen in areas coming
4272 * from userspace or just about to enter userspace, a preempt enable
4273 * can occur before user_exit() is called. This will cause the scheduler
4274 * to be called when the system is still in usermode.
4275 *
4276 * To prevent this, the preempt_enable_notrace will use this function
4277 * instead of preempt_schedule() to exit user context if needed before
4278 * calling the scheduler.
4279 */
4eaca0a8 4280asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
4281{
4282 enum ctx_state prev_ctx;
4283
4284 if (likely(!preemptible()))
4285 return;
4286
4287 do {
47252cfb
SR
4288 /*
4289 * Because the function tracer can trace preempt_count_sub()
4290 * and it also uses preempt_enable/disable_notrace(), if
4291 * NEED_RESCHED is set, the preempt_enable_notrace() called
4292 * by the function tracer will call this function again and
4293 * cause infinite recursion.
4294 *
4295 * Preemption must be disabled here before the function
4296 * tracer can trace. Break up preempt_disable() into two
4297 * calls. One to disable preemption without fear of being
4298 * traced. The other to still record the preemption latency,
4299 * which can also be traced by the function tracer.
4300 */
3d8f74dd 4301 preempt_disable_notrace();
47252cfb 4302 preempt_latency_start(1);
009f60e2
ON
4303 /*
4304 * Needs preempt disabled in case user_exit() is traced
4305 * and the tracer calls preempt_enable_notrace() causing
4306 * an infinite recursion.
4307 */
4308 prev_ctx = exception_enter();
fc13aeba 4309 __schedule(true);
009f60e2
ON
4310 exception_exit(prev_ctx);
4311
47252cfb 4312 preempt_latency_stop(1);
3d8f74dd 4313 preempt_enable_no_resched_notrace();
009f60e2
ON
4314 } while (need_resched());
4315}
4eaca0a8 4316EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 4317
c1a280b6 4318#endif /* CONFIG_PREEMPTION */
1da177e4
LT
4319
4320/*
a49b4f40 4321 * This is the entry point to schedule() from kernel preemption
1da177e4
LT
4322 * off of irq context.
4323 * Note, that this is called and return with irqs disabled. This will
4324 * protect us against recursive calling from irq.
4325 */
722a9f92 4326asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 4327{
b22366cd 4328 enum ctx_state prev_state;
6478d880 4329
2ed6e34f 4330 /* Catch callers which need to be fixed */
f27dde8d 4331 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 4332
b22366cd
FW
4333 prev_state = exception_enter();
4334
3a5c359a 4335 do {
3d8f74dd 4336 preempt_disable();
3a5c359a 4337 local_irq_enable();
fc13aeba 4338 __schedule(true);
3a5c359a 4339 local_irq_disable();
3d8f74dd 4340 sched_preempt_enable_no_resched();
5ed0cec0 4341 } while (need_resched());
b22366cd
FW
4342
4343 exception_exit(prev_state);
1da177e4
LT
4344}
4345
ac6424b9 4346int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
95cdf3b7 4347 void *key)
1da177e4 4348{
63859d4f 4349 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 4350}
1da177e4
LT
4351EXPORT_SYMBOL(default_wake_function);
4352
b29739f9
IM
4353#ifdef CONFIG_RT_MUTEXES
4354
acd58620
PZ
4355static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
4356{
4357 if (pi_task)
4358 prio = min(prio, pi_task->prio);
4359
4360 return prio;
4361}
4362
4363static inline int rt_effective_prio(struct task_struct *p, int prio)
4364{
4365 struct task_struct *pi_task = rt_mutex_get_top_task(p);
4366
4367 return __rt_effective_prio(pi_task, prio);
4368}
4369
b29739f9
IM
4370/*
4371 * rt_mutex_setprio - set the current priority of a task
acd58620
PZ
4372 * @p: task to boost
4373 * @pi_task: donor task
b29739f9
IM
4374 *
4375 * This function changes the 'effective' priority of a task. It does
4376 * not touch ->normal_prio like __setscheduler().
4377 *
c365c292
TG
4378 * Used by the rt_mutex code to implement priority inheritance
4379 * logic. Call site only calls if the priority of the task changed.
b29739f9 4380 */
acd58620 4381void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
b29739f9 4382{
acd58620 4383 int prio, oldprio, queued, running, queue_flag =
7a57f32a 4384 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
83ab0aa0 4385 const struct sched_class *prev_class;
eb580751
PZ
4386 struct rq_flags rf;
4387 struct rq *rq;
b29739f9 4388
acd58620
PZ
4389 /* XXX used to be waiter->prio, not waiter->task->prio */
4390 prio = __rt_effective_prio(pi_task, p->normal_prio);
4391
4392 /*
4393 * If nothing changed; bail early.
4394 */
4395 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
4396 return;
b29739f9 4397
eb580751 4398 rq = __task_rq_lock(p, &rf);
80f5c1b8 4399 update_rq_clock(rq);
acd58620
PZ
4400 /*
4401 * Set under pi_lock && rq->lock, such that the value can be used under
4402 * either lock.
4403 *
4404 * Note that there is loads of tricky to make this pointer cache work
4405 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
4406 * ensure a task is de-boosted (pi_task is set to NULL) before the
4407 * task is allowed to run again (and can exit). This ensures the pointer
4408 * points to a blocked task -- which guaratees the task is present.
4409 */
4410 p->pi_top_task = pi_task;
4411
4412 /*
4413 * For FIFO/RR we only need to set prio, if that matches we're done.
4414 */
4415 if (prio == p->prio && !dl_prio(prio))
4416 goto out_unlock;
b29739f9 4417
1c4dd99b
TG
4418 /*
4419 * Idle task boosting is a nono in general. There is one
4420 * exception, when PREEMPT_RT and NOHZ is active:
4421 *
4422 * The idle task calls get_next_timer_interrupt() and holds
4423 * the timer wheel base->lock on the CPU and another CPU wants
4424 * to access the timer (probably to cancel it). We can safely
4425 * ignore the boosting request, as the idle CPU runs this code
4426 * with interrupts disabled and will complete the lock
4427 * protected section without being interrupted. So there is no
4428 * real need to boost.
4429 */
4430 if (unlikely(p == rq->idle)) {
4431 WARN_ON(p != rq->curr);
4432 WARN_ON(p->pi_blocked_on);
4433 goto out_unlock;
4434 }
4435
b91473ff 4436 trace_sched_pi_setprio(p, pi_task);
d5f9f942 4437 oldprio = p->prio;
ff77e468
PZ
4438
4439 if (oldprio == prio)
4440 queue_flag &= ~DEQUEUE_MOVE;
4441
83ab0aa0 4442 prev_class = p->sched_class;
da0c1e65 4443 queued = task_on_rq_queued(p);
051a1d1a 4444 running = task_current(rq, p);
da0c1e65 4445 if (queued)
ff77e468 4446 dequeue_task(rq, p, queue_flag);
0e1f3483 4447 if (running)
f3cd1c4e 4448 put_prev_task(rq, p);
dd41f596 4449
2d3d891d
DF
4450 /*
4451 * Boosting condition are:
4452 * 1. -rt task is running and holds mutex A
4453 * --> -dl task blocks on mutex A
4454 *
4455 * 2. -dl task is running and holds mutex A
4456 * --> -dl task blocks on mutex A and could preempt the
4457 * running task
4458 */
4459 if (dl_prio(prio)) {
466af29b
ON
4460 if (!dl_prio(p->normal_prio) ||
4461 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 4462 p->dl.dl_boosted = 1;
ff77e468 4463 queue_flag |= ENQUEUE_REPLENISH;
2d3d891d
DF
4464 } else
4465 p->dl.dl_boosted = 0;
aab03e05 4466 p->sched_class = &dl_sched_class;
2d3d891d
DF
4467 } else if (rt_prio(prio)) {
4468 if (dl_prio(oldprio))
4469 p->dl.dl_boosted = 0;
4470 if (oldprio < prio)
ff77e468 4471 queue_flag |= ENQUEUE_HEAD;
dd41f596 4472 p->sched_class = &rt_sched_class;
2d3d891d
DF
4473 } else {
4474 if (dl_prio(oldprio))
4475 p->dl.dl_boosted = 0;
746db944
BS
4476 if (rt_prio(oldprio))
4477 p->rt.timeout = 0;
dd41f596 4478 p->sched_class = &fair_sched_class;
2d3d891d 4479 }
dd41f596 4480
b29739f9
IM
4481 p->prio = prio;
4482
da0c1e65 4483 if (queued)
ff77e468 4484 enqueue_task(rq, p, queue_flag);
a399d233 4485 if (running)
03b7fad1 4486 set_next_task(rq, p);
cb469845 4487
da7a735e 4488 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 4489out_unlock:
d1ccc66d
IM
4490 /* Avoid rq from going away on us: */
4491 preempt_disable();
eb580751 4492 __task_rq_unlock(rq, &rf);
4c9a4bc8
PZ
4493
4494 balance_callback(rq);
4495 preempt_enable();
b29739f9 4496}
acd58620
PZ
4497#else
4498static inline int rt_effective_prio(struct task_struct *p, int prio)
4499{
4500 return prio;
4501}
b29739f9 4502#endif
d50dde5a 4503
36c8b586 4504void set_user_nice(struct task_struct *p, long nice)
1da177e4 4505{
49bd21ef
PZ
4506 bool queued, running;
4507 int old_prio, delta;
eb580751 4508 struct rq_flags rf;
70b97a7f 4509 struct rq *rq;
1da177e4 4510
75e45d51 4511 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
4512 return;
4513 /*
4514 * We have to be careful, if called from sys_setpriority(),
4515 * the task might be in the middle of scheduling on another CPU.
4516 */
eb580751 4517 rq = task_rq_lock(p, &rf);
2fb8d367
PZ
4518 update_rq_clock(rq);
4519
1da177e4
LT
4520 /*
4521 * The RT priorities are set via sched_setscheduler(), but we still
4522 * allow the 'normal' nice value to be set - but as expected
4523 * it wont have any effect on scheduling until the task is
aab03e05 4524 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 4525 */
aab03e05 4526 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
4527 p->static_prio = NICE_TO_PRIO(nice);
4528 goto out_unlock;
4529 }
da0c1e65 4530 queued = task_on_rq_queued(p);
49bd21ef 4531 running = task_current(rq, p);
da0c1e65 4532 if (queued)
7a57f32a 4533 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
49bd21ef
PZ
4534 if (running)
4535 put_prev_task(rq, p);
1da177e4 4536
1da177e4 4537 p->static_prio = NICE_TO_PRIO(nice);
9059393e 4538 set_load_weight(p, true);
b29739f9
IM
4539 old_prio = p->prio;
4540 p->prio = effective_prio(p);
4541 delta = p->prio - old_prio;
1da177e4 4542
5443a0be 4543 if (queued)
7134b3e9 4544 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
49bd21ef 4545 if (running)
03b7fad1 4546 set_next_task(rq, p);
5443a0be
FW
4547
4548 /*
4549 * If the task increased its priority or is running and
4550 * lowered its priority, then reschedule its CPU:
4551 */
4552 p->sched_class->prio_changed(rq, p, old_prio);
4553
1da177e4 4554out_unlock:
eb580751 4555 task_rq_unlock(rq, p, &rf);
1da177e4 4556}
1da177e4
LT
4557EXPORT_SYMBOL(set_user_nice);
4558
e43379f1
MM
4559/*
4560 * can_nice - check if a task can reduce its nice value
4561 * @p: task
4562 * @nice: nice value
4563 */
36c8b586 4564int can_nice(const struct task_struct *p, const int nice)
e43379f1 4565{
d1ccc66d 4566 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7aa2c016 4567 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 4568
78d7d407 4569 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
4570 capable(CAP_SYS_NICE));
4571}
4572
1da177e4
LT
4573#ifdef __ARCH_WANT_SYS_NICE
4574
4575/*
4576 * sys_nice - change the priority of the current process.
4577 * @increment: priority increment
4578 *
4579 * sys_setpriority is a more generic, but much slower function that
4580 * does similar things.
4581 */
5add95d4 4582SYSCALL_DEFINE1(nice, int, increment)
1da177e4 4583{
48f24c4d 4584 long nice, retval;
1da177e4
LT
4585
4586 /*
4587 * Setpriority might change our priority at the same moment.
4588 * We don't have to worry. Conceptually one call occurs first
4589 * and we have a single winner.
4590 */
a9467fa3 4591 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 4592 nice = task_nice(current) + increment;
1da177e4 4593
a9467fa3 4594 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
4595 if (increment < 0 && !can_nice(current, nice))
4596 return -EPERM;
4597
1da177e4
LT
4598 retval = security_task_setnice(current, nice);
4599 if (retval)
4600 return retval;
4601
4602 set_user_nice(current, nice);
4603 return 0;
4604}
4605
4606#endif
4607
4608/**
4609 * task_prio - return the priority value of a given task.
4610 * @p: the task in question.
4611 *
e69f6186 4612 * Return: The priority value as seen by users in /proc.
1da177e4
LT
4613 * RT tasks are offset by -200. Normal tasks are centered
4614 * around 0, value goes from -16 to +15.
4615 */
36c8b586 4616int task_prio(const struct task_struct *p)
1da177e4
LT
4617{
4618 return p->prio - MAX_RT_PRIO;
4619}
4620
1da177e4 4621/**
d1ccc66d 4622 * idle_cpu - is a given CPU idle currently?
1da177e4 4623 * @cpu: the processor in question.
e69f6186
YB
4624 *
4625 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
4626 */
4627int idle_cpu(int cpu)
4628{
908a3283
TG
4629 struct rq *rq = cpu_rq(cpu);
4630
4631 if (rq->curr != rq->idle)
4632 return 0;
4633
4634 if (rq->nr_running)
4635 return 0;
4636
4637#ifdef CONFIG_SMP
4638 if (!llist_empty(&rq->wake_list))
4639 return 0;
4640#endif
4641
4642 return 1;
1da177e4
LT
4643}
4644
943d355d
RJ
4645/**
4646 * available_idle_cpu - is a given CPU idle for enqueuing work.
4647 * @cpu: the CPU in question.
4648 *
4649 * Return: 1 if the CPU is currently idle. 0 otherwise.
4650 */
4651int available_idle_cpu(int cpu)
4652{
4653 if (!idle_cpu(cpu))
4654 return 0;
4655
247f2f6f
RJ
4656 if (vcpu_is_preempted(cpu))
4657 return 0;
4658
908a3283 4659 return 1;
1da177e4
LT
4660}
4661
1da177e4 4662/**
d1ccc66d 4663 * idle_task - return the idle task for a given CPU.
1da177e4 4664 * @cpu: the processor in question.
e69f6186 4665 *
d1ccc66d 4666 * Return: The idle task for the CPU @cpu.
1da177e4 4667 */
36c8b586 4668struct task_struct *idle_task(int cpu)
1da177e4
LT
4669{
4670 return cpu_rq(cpu)->idle;
4671}
4672
4673/**
4674 * find_process_by_pid - find a process with a matching PID value.
4675 * @pid: the pid in question.
e69f6186
YB
4676 *
4677 * The task of @pid, if found. %NULL otherwise.
1da177e4 4678 */
a9957449 4679static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 4680{
228ebcbe 4681 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
4682}
4683
c13db6b1
SR
4684/*
4685 * sched_setparam() passes in -1 for its policy, to let the functions
4686 * it calls know not to change it.
4687 */
4688#define SETPARAM_POLICY -1
4689
c365c292
TG
4690static void __setscheduler_params(struct task_struct *p,
4691 const struct sched_attr *attr)
1da177e4 4692{
d50dde5a
DF
4693 int policy = attr->sched_policy;
4694
c13db6b1 4695 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
4696 policy = p->policy;
4697
1da177e4 4698 p->policy = policy;
d50dde5a 4699
aab03e05
DF
4700 if (dl_policy(policy))
4701 __setparam_dl(p, attr);
39fd8fd2 4702 else if (fair_policy(policy))
d50dde5a
DF
4703 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
4704
39fd8fd2
PZ
4705 /*
4706 * __sched_setscheduler() ensures attr->sched_priority == 0 when
4707 * !rt_policy. Always setting this ensures that things like
4708 * getparam()/getattr() don't report silly values for !rt tasks.
4709 */
4710 p->rt_priority = attr->sched_priority;
383afd09 4711 p->normal_prio = normal_prio(p);
9059393e 4712 set_load_weight(p, true);
c365c292 4713}
39fd8fd2 4714
c365c292
TG
4715/* Actually do priority change: must hold pi & rq lock. */
4716static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 4717 const struct sched_attr *attr, bool keep_boost)
c365c292 4718{
a509a7cd
PB
4719 /*
4720 * If params can't change scheduling class changes aren't allowed
4721 * either.
4722 */
4723 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
4724 return;
4725
c365c292 4726 __setscheduler_params(p, attr);
d50dde5a 4727
383afd09 4728 /*
0782e63b
TG
4729 * Keep a potential priority boosting if called from
4730 * sched_setscheduler().
383afd09 4731 */
acd58620 4732 p->prio = normal_prio(p);
0782e63b 4733 if (keep_boost)
acd58620 4734 p->prio = rt_effective_prio(p, p->prio);
383afd09 4735
aab03e05
DF
4736 if (dl_prio(p->prio))
4737 p->sched_class = &dl_sched_class;
4738 else if (rt_prio(p->prio))
ffd44db5
PZ
4739 p->sched_class = &rt_sched_class;
4740 else
4741 p->sched_class = &fair_sched_class;
1da177e4 4742}
aab03e05 4743
c69e8d9c 4744/*
d1ccc66d 4745 * Check the target process has a UID that matches the current process's:
c69e8d9c
DH
4746 */
4747static bool check_same_owner(struct task_struct *p)
4748{
4749 const struct cred *cred = current_cred(), *pcred;
4750 bool match;
4751
4752 rcu_read_lock();
4753 pcred = __task_cred(p);
9c806aa0
EB
4754 match = (uid_eq(cred->euid, pcred->euid) ||
4755 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
4756 rcu_read_unlock();
4757 return match;
4758}
4759
d50dde5a
DF
4760static int __sched_setscheduler(struct task_struct *p,
4761 const struct sched_attr *attr,
dbc7f069 4762 bool user, bool pi)
1da177e4 4763{
383afd09
SR
4764 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
4765 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 4766 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 4767 int new_effective_prio, policy = attr->sched_policy;
83ab0aa0 4768 const struct sched_class *prev_class;
eb580751 4769 struct rq_flags rf;
ca94c442 4770 int reset_on_fork;
7a57f32a 4771 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
eb580751 4772 struct rq *rq;
1da177e4 4773
896bbb25
SRV
4774 /* The pi code expects interrupts enabled */
4775 BUG_ON(pi && in_interrupt());
1da177e4 4776recheck:
d1ccc66d 4777 /* Double check policy once rq lock held: */
ca94c442
LP
4778 if (policy < 0) {
4779 reset_on_fork = p->sched_reset_on_fork;
1da177e4 4780 policy = oldpolicy = p->policy;
ca94c442 4781 } else {
7479f3c9 4782 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 4783
20f9cd2a 4784 if (!valid_policy(policy))
ca94c442
LP
4785 return -EINVAL;
4786 }
4787
794a56eb 4788 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7479f3c9
PZ
4789 return -EINVAL;
4790
1da177e4
LT
4791 /*
4792 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
4793 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4794 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 4795 */
0bb040a4 4796 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 4797 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 4798 return -EINVAL;
aab03e05
DF
4799 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
4800 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
4801 return -EINVAL;
4802
37e4ab3f
OC
4803 /*
4804 * Allow unprivileged RT tasks to decrease priority:
4805 */
961ccddd 4806 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 4807 if (fair_policy(policy)) {
d0ea0268 4808 if (attr->sched_nice < task_nice(p) &&
eaad4513 4809 !can_nice(p, attr->sched_nice))
d50dde5a
DF
4810 return -EPERM;
4811 }
4812
e05606d3 4813 if (rt_policy(policy)) {
a44702e8
ON
4814 unsigned long rlim_rtprio =
4815 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909 4816
d1ccc66d 4817 /* Can't set/change the rt policy: */
8dc3e909
ON
4818 if (policy != p->policy && !rlim_rtprio)
4819 return -EPERM;
4820
d1ccc66d 4821 /* Can't increase priority: */
d50dde5a
DF
4822 if (attr->sched_priority > p->rt_priority &&
4823 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
4824 return -EPERM;
4825 }
c02aa73b 4826
d44753b8
JL
4827 /*
4828 * Can't set/change SCHED_DEADLINE policy at all for now
4829 * (safest behavior); in the future we would like to allow
4830 * unprivileged DL tasks to increase their relative deadline
4831 * or reduce their runtime (both ways reducing utilization)
4832 */
4833 if (dl_policy(policy))
4834 return -EPERM;
4835
dd41f596 4836 /*
c02aa73b
DH
4837 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4838 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 4839 */
1da1843f 4840 if (task_has_idle_policy(p) && !idle_policy(policy)) {
d0ea0268 4841 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
4842 return -EPERM;
4843 }
5fe1d75f 4844
d1ccc66d 4845 /* Can't change other user's priorities: */
c69e8d9c 4846 if (!check_same_owner(p))
37e4ab3f 4847 return -EPERM;
ca94c442 4848
d1ccc66d 4849 /* Normal users shall not reset the sched_reset_on_fork flag: */
ca94c442
LP
4850 if (p->sched_reset_on_fork && !reset_on_fork)
4851 return -EPERM;
37e4ab3f 4852 }
1da177e4 4853
725aad24 4854 if (user) {
794a56eb
JL
4855 if (attr->sched_flags & SCHED_FLAG_SUGOV)
4856 return -EINVAL;
4857
b0ae1981 4858 retval = security_task_setscheduler(p);
725aad24
JF
4859 if (retval)
4860 return retval;
4861 }
4862
a509a7cd
PB
4863 /* Update task specific "requested" clamps */
4864 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
4865 retval = uclamp_validate(p, attr);
4866 if (retval)
4867 return retval;
4868 }
4869
710da3c8
JL
4870 if (pi)
4871 cpuset_read_lock();
4872
b29739f9 4873 /*
d1ccc66d 4874 * Make sure no PI-waiters arrive (or leave) while we are
b29739f9 4875 * changing the priority of the task:
0122ec5b 4876 *
25985edc 4877 * To be able to change p->policy safely, the appropriate
1da177e4
LT
4878 * runqueue lock must be held.
4879 */
eb580751 4880 rq = task_rq_lock(p, &rf);
80f5c1b8 4881 update_rq_clock(rq);
dc61b1d6 4882
34f971f6 4883 /*
d1ccc66d 4884 * Changing the policy of the stop threads its a very bad idea:
34f971f6
PZ
4885 */
4886 if (p == rq->stop) {
4b211f2b
MP
4887 retval = -EINVAL;
4888 goto unlock;
34f971f6
PZ
4889 }
4890
a51e9198 4891 /*
d6b1e911
TG
4892 * If not changing anything there's no need to proceed further,
4893 * but store a possible modification of reset_on_fork.
a51e9198 4894 */
d50dde5a 4895 if (unlikely(policy == p->policy)) {
d0ea0268 4896 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
4897 goto change;
4898 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
4899 goto change;
75381608 4900 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 4901 goto change;
a509a7cd
PB
4902 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
4903 goto change;
d50dde5a 4904
d6b1e911 4905 p->sched_reset_on_fork = reset_on_fork;
4b211f2b
MP
4906 retval = 0;
4907 goto unlock;
a51e9198 4908 }
d50dde5a 4909change:
a51e9198 4910
dc61b1d6 4911 if (user) {
332ac17e 4912#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
4913 /*
4914 * Do not allow realtime tasks into groups that have no runtime
4915 * assigned.
4916 */
4917 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
4918 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4919 !task_group_is_autogroup(task_group(p))) {
4b211f2b
MP
4920 retval = -EPERM;
4921 goto unlock;
dc61b1d6 4922 }
dc61b1d6 4923#endif
332ac17e 4924#ifdef CONFIG_SMP
794a56eb
JL
4925 if (dl_bandwidth_enabled() && dl_policy(policy) &&
4926 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
332ac17e 4927 cpumask_t *span = rq->rd->span;
332ac17e
DF
4928
4929 /*
4930 * Don't allow tasks with an affinity mask smaller than
4931 * the entire root_domain to become SCHED_DEADLINE. We
4932 * will also fail if there's no bandwidth available.
4933 */
3bd37062 4934 if (!cpumask_subset(span, p->cpus_ptr) ||
e4099a5e 4935 rq->rd->dl_bw.bw == 0) {
4b211f2b
MP
4936 retval = -EPERM;
4937 goto unlock;
332ac17e
DF
4938 }
4939 }
4940#endif
4941 }
dc61b1d6 4942
d1ccc66d 4943 /* Re-check policy now with rq lock held: */
1da177e4
LT
4944 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4945 policy = oldpolicy = -1;
eb580751 4946 task_rq_unlock(rq, p, &rf);
710da3c8
JL
4947 if (pi)
4948 cpuset_read_unlock();
1da177e4
LT
4949 goto recheck;
4950 }
332ac17e
DF
4951
4952 /*
4953 * If setscheduling to SCHED_DEADLINE (or changing the parameters
4954 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4955 * is available.
4956 */
06a76fe0 4957 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
4b211f2b
MP
4958 retval = -EBUSY;
4959 goto unlock;
332ac17e
DF
4960 }
4961
c365c292
TG
4962 p->sched_reset_on_fork = reset_on_fork;
4963 oldprio = p->prio;
4964
dbc7f069
PZ
4965 if (pi) {
4966 /*
4967 * Take priority boosted tasks into account. If the new
4968 * effective priority is unchanged, we just store the new
4969 * normal parameters and do not touch the scheduler class and
4970 * the runqueue. This will be done when the task deboost
4971 * itself.
4972 */
acd58620 4973 new_effective_prio = rt_effective_prio(p, newprio);
ff77e468
PZ
4974 if (new_effective_prio == oldprio)
4975 queue_flags &= ~DEQUEUE_MOVE;
c365c292
TG
4976 }
4977
da0c1e65 4978 queued = task_on_rq_queued(p);
051a1d1a 4979 running = task_current(rq, p);
da0c1e65 4980 if (queued)
ff77e468 4981 dequeue_task(rq, p, queue_flags);
0e1f3483 4982 if (running)
f3cd1c4e 4983 put_prev_task(rq, p);
f6b53205 4984
83ab0aa0 4985 prev_class = p->sched_class;
a509a7cd 4986
dbc7f069 4987 __setscheduler(rq, p, attr, pi);
a509a7cd 4988 __setscheduler_uclamp(p, attr);
f6b53205 4989
da0c1e65 4990 if (queued) {
81a44c54
TG
4991 /*
4992 * We enqueue to tail when the priority of a task is
4993 * increased (user space view).
4994 */
ff77e468
PZ
4995 if (oldprio < p->prio)
4996 queue_flags |= ENQUEUE_HEAD;
1de64443 4997
ff77e468 4998 enqueue_task(rq, p, queue_flags);
81a44c54 4999 }
a399d233 5000 if (running)
03b7fad1 5001 set_next_task(rq, p);
cb469845 5002
da7a735e 5003 check_class_changed(rq, p, prev_class, oldprio);
d1ccc66d
IM
5004
5005 /* Avoid rq from going away on us: */
5006 preempt_disable();
eb580751 5007 task_rq_unlock(rq, p, &rf);
b29739f9 5008
710da3c8
JL
5009 if (pi) {
5010 cpuset_read_unlock();
dbc7f069 5011 rt_mutex_adjust_pi(p);
710da3c8 5012 }
95e02ca9 5013
d1ccc66d 5014 /* Run balance callbacks after we've adjusted the PI chain: */
4c9a4bc8
PZ
5015 balance_callback(rq);
5016 preempt_enable();
95e02ca9 5017
1da177e4 5018 return 0;
4b211f2b
MP
5019
5020unlock:
5021 task_rq_unlock(rq, p, &rf);
710da3c8
JL
5022 if (pi)
5023 cpuset_read_unlock();
4b211f2b 5024 return retval;
1da177e4 5025}
961ccddd 5026
7479f3c9
PZ
5027static int _sched_setscheduler(struct task_struct *p, int policy,
5028 const struct sched_param *param, bool check)
5029{
5030 struct sched_attr attr = {
5031 .sched_policy = policy,
5032 .sched_priority = param->sched_priority,
5033 .sched_nice = PRIO_TO_NICE(p->static_prio),
5034 };
5035
c13db6b1
SR
5036 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
5037 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
5038 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
5039 policy &= ~SCHED_RESET_ON_FORK;
5040 attr.sched_policy = policy;
5041 }
5042
dbc7f069 5043 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 5044}
961ccddd
RR
5045/**
5046 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5047 * @p: the task in question.
5048 * @policy: new policy.
5049 * @param: structure containing the new RT priority.
5050 *
e69f6186
YB
5051 * Return: 0 on success. An error code otherwise.
5052 *
961ccddd
RR
5053 * NOTE that the task may be already dead.
5054 */
5055int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 5056 const struct sched_param *param)
961ccddd 5057{
7479f3c9 5058 return _sched_setscheduler(p, policy, param, true);
961ccddd 5059}
1da177e4
LT
5060EXPORT_SYMBOL_GPL(sched_setscheduler);
5061
d50dde5a
DF
5062int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
5063{
dbc7f069 5064 return __sched_setscheduler(p, attr, true, true);
d50dde5a
DF
5065}
5066EXPORT_SYMBOL_GPL(sched_setattr);
5067
794a56eb
JL
5068int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
5069{
5070 return __sched_setscheduler(p, attr, false, true);
5071}
5072
961ccddd
RR
5073/**
5074 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5075 * @p: the task in question.
5076 * @policy: new policy.
5077 * @param: structure containing the new RT priority.
5078 *
5079 * Just like sched_setscheduler, only don't bother checking if the
5080 * current context has permission. For example, this is needed in
5081 * stop_machine(): we create temporary high priority worker threads,
5082 * but our caller might not have that capability.
e69f6186
YB
5083 *
5084 * Return: 0 on success. An error code otherwise.
961ccddd
RR
5085 */
5086int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 5087 const struct sched_param *param)
961ccddd 5088{
7479f3c9 5089 return _sched_setscheduler(p, policy, param, false);
961ccddd 5090}
84778472 5091EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
961ccddd 5092
95cdf3b7
IM
5093static int
5094do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 5095{
1da177e4
LT
5096 struct sched_param lparam;
5097 struct task_struct *p;
36c8b586 5098 int retval;
1da177e4
LT
5099
5100 if (!param || pid < 0)
5101 return -EINVAL;
5102 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5103 return -EFAULT;
5fe1d75f
ON
5104
5105 rcu_read_lock();
5106 retval = -ESRCH;
1da177e4 5107 p = find_process_by_pid(pid);
710da3c8
JL
5108 if (likely(p))
5109 get_task_struct(p);
5fe1d75f 5110 rcu_read_unlock();
36c8b586 5111
710da3c8
JL
5112 if (likely(p)) {
5113 retval = sched_setscheduler(p, policy, &lparam);
5114 put_task_struct(p);
5115 }
5116
1da177e4
LT
5117 return retval;
5118}
5119
d50dde5a
DF
5120/*
5121 * Mimics kernel/events/core.c perf_copy_attr().
5122 */
d1ccc66d 5123static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
d50dde5a
DF
5124{
5125 u32 size;
5126 int ret;
5127
d1ccc66d 5128 /* Zero the full structure, so that a short copy will be nice: */
d50dde5a
DF
5129 memset(attr, 0, sizeof(*attr));
5130
5131 ret = get_user(size, &uattr->size);
5132 if (ret)
5133 return ret;
5134
d1ccc66d
IM
5135 /* ABI compatibility quirk: */
5136 if (!size)
d50dde5a 5137 size = SCHED_ATTR_SIZE_VER0;
dff3a85f 5138 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
d50dde5a
DF
5139 goto err_size;
5140
dff3a85f
AS
5141 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
5142 if (ret) {
5143 if (ret == -E2BIG)
5144 goto err_size;
5145 return ret;
d50dde5a
DF
5146 }
5147
a509a7cd
PB
5148 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
5149 size < SCHED_ATTR_SIZE_VER1)
5150 return -EINVAL;
5151
d50dde5a 5152 /*
d1ccc66d 5153 * XXX: Do we want to be lenient like existing syscalls; or do we want
d50dde5a
DF
5154 * to be strict and return an error on out-of-bounds values?
5155 */
75e45d51 5156 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 5157
e78c7bca 5158 return 0;
d50dde5a
DF
5159
5160err_size:
5161 put_user(sizeof(*attr), &uattr->size);
e78c7bca 5162 return -E2BIG;
d50dde5a
DF
5163}
5164
1da177e4
LT
5165/**
5166 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5167 * @pid: the pid in question.
5168 * @policy: new policy.
5169 * @param: structure containing the new RT priority.
e69f6186
YB
5170 *
5171 * Return: 0 on success. An error code otherwise.
1da177e4 5172 */
d1ccc66d 5173SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
1da177e4 5174{
c21761f1
JB
5175 if (policy < 0)
5176 return -EINVAL;
5177
1da177e4
LT
5178 return do_sched_setscheduler(pid, policy, param);
5179}
5180
5181/**
5182 * sys_sched_setparam - set/change the RT priority of a thread
5183 * @pid: the pid in question.
5184 * @param: structure containing the new RT priority.
e69f6186
YB
5185 *
5186 * Return: 0 on success. An error code otherwise.
1da177e4 5187 */
5add95d4 5188SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 5189{
c13db6b1 5190 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
5191}
5192
d50dde5a
DF
5193/**
5194 * sys_sched_setattr - same as above, but with extended sched_attr
5195 * @pid: the pid in question.
5778fccf 5196 * @uattr: structure containing the extended parameters.
db66d756 5197 * @flags: for future extension.
d50dde5a 5198 */
6d35ab48
PZ
5199SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
5200 unsigned int, flags)
d50dde5a
DF
5201{
5202 struct sched_attr attr;
5203 struct task_struct *p;
5204 int retval;
5205
6d35ab48 5206 if (!uattr || pid < 0 || flags)
d50dde5a
DF
5207 return -EINVAL;
5208
143cf23d
MK
5209 retval = sched_copy_attr(uattr, &attr);
5210 if (retval)
5211 return retval;
d50dde5a 5212
b14ed2c2 5213 if ((int)attr.sched_policy < 0)
dbdb2275 5214 return -EINVAL;
1d6362fa
PB
5215 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
5216 attr.sched_policy = SETPARAM_POLICY;
d50dde5a
DF
5217
5218 rcu_read_lock();
5219 retval = -ESRCH;
5220 p = find_process_by_pid(pid);
a509a7cd
PB
5221 if (likely(p))
5222 get_task_struct(p);
d50dde5a
DF
5223 rcu_read_unlock();
5224
a509a7cd
PB
5225 if (likely(p)) {
5226 retval = sched_setattr(p, &attr);
5227 put_task_struct(p);
5228 }
5229
d50dde5a
DF
5230 return retval;
5231}
5232
1da177e4
LT
5233/**
5234 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5235 * @pid: the pid in question.
e69f6186
YB
5236 *
5237 * Return: On success, the policy of the thread. Otherwise, a negative error
5238 * code.
1da177e4 5239 */
5add95d4 5240SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 5241{
36c8b586 5242 struct task_struct *p;
3a5c359a 5243 int retval;
1da177e4
LT
5244
5245 if (pid < 0)
3a5c359a 5246 return -EINVAL;
1da177e4
LT
5247
5248 retval = -ESRCH;
5fe85be0 5249 rcu_read_lock();
1da177e4
LT
5250 p = find_process_by_pid(pid);
5251 if (p) {
5252 retval = security_task_getscheduler(p);
5253 if (!retval)
ca94c442
LP
5254 retval = p->policy
5255 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 5256 }
5fe85be0 5257 rcu_read_unlock();
1da177e4
LT
5258 return retval;
5259}
5260
5261/**
ca94c442 5262 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
5263 * @pid: the pid in question.
5264 * @param: structure containing the RT priority.
e69f6186
YB
5265 *
5266 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
5267 * code.
1da177e4 5268 */
5add95d4 5269SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 5270{
ce5f7f82 5271 struct sched_param lp = { .sched_priority = 0 };
36c8b586 5272 struct task_struct *p;
3a5c359a 5273 int retval;
1da177e4
LT
5274
5275 if (!param || pid < 0)
3a5c359a 5276 return -EINVAL;
1da177e4 5277
5fe85be0 5278 rcu_read_lock();
1da177e4
LT
5279 p = find_process_by_pid(pid);
5280 retval = -ESRCH;
5281 if (!p)
5282 goto out_unlock;
5283
5284 retval = security_task_getscheduler(p);
5285 if (retval)
5286 goto out_unlock;
5287
ce5f7f82
PZ
5288 if (task_has_rt_policy(p))
5289 lp.sched_priority = p->rt_priority;
5fe85be0 5290 rcu_read_unlock();
1da177e4
LT
5291
5292 /*
5293 * This one might sleep, we cannot do it with a spinlock held ...
5294 */
5295 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5296
1da177e4
LT
5297 return retval;
5298
5299out_unlock:
5fe85be0 5300 rcu_read_unlock();
1da177e4
LT
5301 return retval;
5302}
5303
1251201c
IM
5304/*
5305 * Copy the kernel size attribute structure (which might be larger
5306 * than what user-space knows about) to user-space.
5307 *
5308 * Note that all cases are valid: user-space buffer can be larger or
5309 * smaller than the kernel-space buffer. The usual case is that both
5310 * have the same size.
5311 */
5312static int
5313sched_attr_copy_to_user(struct sched_attr __user *uattr,
5314 struct sched_attr *kattr,
5315 unsigned int usize)
d50dde5a 5316{
1251201c 5317 unsigned int ksize = sizeof(*kattr);
d50dde5a 5318
96d4f267 5319 if (!access_ok(uattr, usize))
d50dde5a
DF
5320 return -EFAULT;
5321
5322 /*
1251201c
IM
5323 * sched_getattr() ABI forwards and backwards compatibility:
5324 *
5325 * If usize == ksize then we just copy everything to user-space and all is good.
5326 *
5327 * If usize < ksize then we only copy as much as user-space has space for,
5328 * this keeps ABI compatibility as well. We skip the rest.
5329 *
5330 * If usize > ksize then user-space is using a newer version of the ABI,
5331 * which part the kernel doesn't know about. Just ignore it - tooling can
5332 * detect the kernel's knowledge of attributes from the attr->size value
5333 * which is set to ksize in this case.
d50dde5a 5334 */
1251201c 5335 kattr->size = min(usize, ksize);
d50dde5a 5336
1251201c 5337 if (copy_to_user(uattr, kattr, kattr->size))
d50dde5a
DF
5338 return -EFAULT;
5339
22400674 5340 return 0;
d50dde5a
DF
5341}
5342
5343/**
aab03e05 5344 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 5345 * @pid: the pid in question.
5778fccf 5346 * @uattr: structure containing the extended parameters.
dff3a85f 5347 * @usize: sizeof(attr) for fwd/bwd comp.
db66d756 5348 * @flags: for future extension.
d50dde5a 5349 */
6d35ab48 5350SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1251201c 5351 unsigned int, usize, unsigned int, flags)
d50dde5a 5352{
1251201c 5353 struct sched_attr kattr = { };
d50dde5a
DF
5354 struct task_struct *p;
5355 int retval;
5356
1251201c
IM
5357 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
5358 usize < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
5359 return -EINVAL;
5360
5361 rcu_read_lock();
5362 p = find_process_by_pid(pid);
5363 retval = -ESRCH;
5364 if (!p)
5365 goto out_unlock;
5366
5367 retval = security_task_getscheduler(p);
5368 if (retval)
5369 goto out_unlock;
5370
1251201c 5371 kattr.sched_policy = p->policy;
7479f3c9 5372 if (p->sched_reset_on_fork)
1251201c 5373 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05 5374 if (task_has_dl_policy(p))
1251201c 5375 __getparam_dl(p, &kattr);
aab03e05 5376 else if (task_has_rt_policy(p))
1251201c 5377 kattr.sched_priority = p->rt_priority;
d50dde5a 5378 else
1251201c 5379 kattr.sched_nice = task_nice(p);
d50dde5a 5380
a509a7cd 5381#ifdef CONFIG_UCLAMP_TASK
1251201c
IM
5382 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
5383 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
a509a7cd
PB
5384#endif
5385
d50dde5a
DF
5386 rcu_read_unlock();
5387
1251201c 5388 return sched_attr_copy_to_user(uattr, &kattr, usize);
d50dde5a
DF
5389
5390out_unlock:
5391 rcu_read_unlock();
5392 return retval;
5393}
5394
96f874e2 5395long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 5396{
5a16f3d3 5397 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
5398 struct task_struct *p;
5399 int retval;
1da177e4 5400
23f5d142 5401 rcu_read_lock();
1da177e4
LT
5402
5403 p = find_process_by_pid(pid);
5404 if (!p) {
23f5d142 5405 rcu_read_unlock();
1da177e4
LT
5406 return -ESRCH;
5407 }
5408
23f5d142 5409 /* Prevent p going away */
1da177e4 5410 get_task_struct(p);
23f5d142 5411 rcu_read_unlock();
1da177e4 5412
14a40ffc
TH
5413 if (p->flags & PF_NO_SETAFFINITY) {
5414 retval = -EINVAL;
5415 goto out_put_task;
5416 }
5a16f3d3
RR
5417 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5418 retval = -ENOMEM;
5419 goto out_put_task;
5420 }
5421 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5422 retval = -ENOMEM;
5423 goto out_free_cpus_allowed;
5424 }
1da177e4 5425 retval = -EPERM;
4c44aaaf
EB
5426 if (!check_same_owner(p)) {
5427 rcu_read_lock();
5428 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
5429 rcu_read_unlock();
16303ab2 5430 goto out_free_new_mask;
4c44aaaf
EB
5431 }
5432 rcu_read_unlock();
5433 }
1da177e4 5434
b0ae1981 5435 retval = security_task_setscheduler(p);
e7834f8f 5436 if (retval)
16303ab2 5437 goto out_free_new_mask;
e7834f8f 5438
e4099a5e
PZ
5439
5440 cpuset_cpus_allowed(p, cpus_allowed);
5441 cpumask_and(new_mask, in_mask, cpus_allowed);
5442
332ac17e
DF
5443 /*
5444 * Since bandwidth control happens on root_domain basis,
5445 * if admission test is enabled, we only admit -deadline
5446 * tasks allowed to run on all the CPUs in the task's
5447 * root_domain.
5448 */
5449#ifdef CONFIG_SMP
f1e3a093
KT
5450 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
5451 rcu_read_lock();
5452 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 5453 retval = -EBUSY;
f1e3a093 5454 rcu_read_unlock();
16303ab2 5455 goto out_free_new_mask;
332ac17e 5456 }
f1e3a093 5457 rcu_read_unlock();
332ac17e
DF
5458 }
5459#endif
49246274 5460again:
25834c73 5461 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 5462
8707d8b8 5463 if (!retval) {
5a16f3d3
RR
5464 cpuset_cpus_allowed(p, cpus_allowed);
5465 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
5466 /*
5467 * We must have raced with a concurrent cpuset
5468 * update. Just reset the cpus_allowed to the
5469 * cpuset's cpus_allowed
5470 */
5a16f3d3 5471 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
5472 goto again;
5473 }
5474 }
16303ab2 5475out_free_new_mask:
5a16f3d3
RR
5476 free_cpumask_var(new_mask);
5477out_free_cpus_allowed:
5478 free_cpumask_var(cpus_allowed);
5479out_put_task:
1da177e4 5480 put_task_struct(p);
1da177e4
LT
5481 return retval;
5482}
5483
5484static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 5485 struct cpumask *new_mask)
1da177e4 5486{
96f874e2
RR
5487 if (len < cpumask_size())
5488 cpumask_clear(new_mask);
5489 else if (len > cpumask_size())
5490 len = cpumask_size();
5491
1da177e4
LT
5492 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5493}
5494
5495/**
d1ccc66d 5496 * sys_sched_setaffinity - set the CPU affinity of a process
1da177e4
LT
5497 * @pid: pid of the process
5498 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 5499 * @user_mask_ptr: user-space pointer to the new CPU mask
e69f6186
YB
5500 *
5501 * Return: 0 on success. An error code otherwise.
1da177e4 5502 */
5add95d4
HC
5503SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5504 unsigned long __user *, user_mask_ptr)
1da177e4 5505{
5a16f3d3 5506 cpumask_var_t new_mask;
1da177e4
LT
5507 int retval;
5508
5a16f3d3
RR
5509 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5510 return -ENOMEM;
1da177e4 5511
5a16f3d3
RR
5512 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5513 if (retval == 0)
5514 retval = sched_setaffinity(pid, new_mask);
5515 free_cpumask_var(new_mask);
5516 return retval;
1da177e4
LT
5517}
5518
96f874e2 5519long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 5520{
36c8b586 5521 struct task_struct *p;
31605683 5522 unsigned long flags;
1da177e4 5523 int retval;
1da177e4 5524
23f5d142 5525 rcu_read_lock();
1da177e4
LT
5526
5527 retval = -ESRCH;
5528 p = find_process_by_pid(pid);
5529 if (!p)
5530 goto out_unlock;
5531
e7834f8f
DQ
5532 retval = security_task_getscheduler(p);
5533 if (retval)
5534 goto out_unlock;
5535
013fdb80 5536 raw_spin_lock_irqsave(&p->pi_lock, flags);
3bd37062 5537 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
013fdb80 5538 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
5539
5540out_unlock:
23f5d142 5541 rcu_read_unlock();
1da177e4 5542
9531b62f 5543 return retval;
1da177e4
LT
5544}
5545
5546/**
d1ccc66d 5547 * sys_sched_getaffinity - get the CPU affinity of a process
1da177e4
LT
5548 * @pid: pid of the process
5549 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 5550 * @user_mask_ptr: user-space pointer to hold the current CPU mask
e69f6186 5551 *
599b4840
ZW
5552 * Return: size of CPU mask copied to user_mask_ptr on success. An
5553 * error code otherwise.
1da177e4 5554 */
5add95d4
HC
5555SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5556 unsigned long __user *, user_mask_ptr)
1da177e4
LT
5557{
5558 int ret;
f17c8607 5559 cpumask_var_t mask;
1da177e4 5560
84fba5ec 5561 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
5562 return -EINVAL;
5563 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
5564 return -EINVAL;
5565
f17c8607
RR
5566 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5567 return -ENOMEM;
1da177e4 5568
f17c8607
RR
5569 ret = sched_getaffinity(pid, mask);
5570 if (ret == 0) {
4de373a1 5571 unsigned int retlen = min(len, cpumask_size());
cd3d8031
KM
5572
5573 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
5574 ret = -EFAULT;
5575 else
cd3d8031 5576 ret = retlen;
f17c8607
RR
5577 }
5578 free_cpumask_var(mask);
1da177e4 5579
f17c8607 5580 return ret;
1da177e4
LT
5581}
5582
5583/**
5584 * sys_sched_yield - yield the current processor to other threads.
5585 *
dd41f596
IM
5586 * This function yields the current CPU to other tasks. If there are no
5587 * other threads running on this CPU then this function will return.
e69f6186
YB
5588 *
5589 * Return: 0.
1da177e4 5590 */
7d4dd4f1 5591static void do_sched_yield(void)
1da177e4 5592{
8a8c69c3
PZ
5593 struct rq_flags rf;
5594 struct rq *rq;
5595
246b3b33 5596 rq = this_rq_lock_irq(&rf);
1da177e4 5597
ae92882e 5598 schedstat_inc(rq->yld_count);
4530d7ab 5599 current->sched_class->yield_task(rq);
1da177e4
LT
5600
5601 /*
5602 * Since we are going to call schedule() anyway, there's
5603 * no need to preempt or enable interrupts:
5604 */
8a8c69c3
PZ
5605 preempt_disable();
5606 rq_unlock(rq, &rf);
ba74c144 5607 sched_preempt_enable_no_resched();
1da177e4
LT
5608
5609 schedule();
7d4dd4f1 5610}
1da177e4 5611
7d4dd4f1
DB
5612SYSCALL_DEFINE0(sched_yield)
5613{
5614 do_sched_yield();
1da177e4
LT
5615 return 0;
5616}
5617
c1a280b6 5618#ifndef CONFIG_PREEMPTION
02b67cc3 5619int __sched _cond_resched(void)
1da177e4 5620{
fe32d3cd 5621 if (should_resched(0)) {
a18b5d01 5622 preempt_schedule_common();
1da177e4
LT
5623 return 1;
5624 }
f79c3ad6 5625 rcu_all_qs();
1da177e4
LT
5626 return 0;
5627}
02b67cc3 5628EXPORT_SYMBOL(_cond_resched);
35a773a0 5629#endif
1da177e4
LT
5630
5631/*
613afbf8 5632 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
5633 * call schedule, and on return reacquire the lock.
5634 *
c1a280b6 5635 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
1da177e4
LT
5636 * operations here to prevent schedule() from being called twice (once via
5637 * spin_unlock(), once by hand).
5638 */
613afbf8 5639int __cond_resched_lock(spinlock_t *lock)
1da177e4 5640{
fe32d3cd 5641 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
5642 int ret = 0;
5643
f607c668
PZ
5644 lockdep_assert_held(lock);
5645
4a81e832 5646 if (spin_needbreak(lock) || resched) {
1da177e4 5647 spin_unlock(lock);
d86ee480 5648 if (resched)
a18b5d01 5649 preempt_schedule_common();
95c354fe
NP
5650 else
5651 cpu_relax();
6df3cecb 5652 ret = 1;
1da177e4 5653 spin_lock(lock);
1da177e4 5654 }
6df3cecb 5655 return ret;
1da177e4 5656}
613afbf8 5657EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 5658
1da177e4
LT
5659/**
5660 * yield - yield the current processor to other threads.
5661 *
8e3fabfd
PZ
5662 * Do not ever use this function, there's a 99% chance you're doing it wrong.
5663 *
5664 * The scheduler is at all times free to pick the calling task as the most
5665 * eligible task to run, if removing the yield() call from your code breaks
5666 * it, its already broken.
5667 *
5668 * Typical broken usage is:
5669 *
5670 * while (!event)
d1ccc66d 5671 * yield();
8e3fabfd
PZ
5672 *
5673 * where one assumes that yield() will let 'the other' process run that will
5674 * make event true. If the current task is a SCHED_FIFO task that will never
5675 * happen. Never use yield() as a progress guarantee!!
5676 *
5677 * If you want to use yield() to wait for something, use wait_event().
5678 * If you want to use yield() to be 'nice' for others, use cond_resched().
5679 * If you still want to use yield(), do not!
1da177e4
LT
5680 */
5681void __sched yield(void)
5682{
5683 set_current_state(TASK_RUNNING);
7d4dd4f1 5684 do_sched_yield();
1da177e4 5685}
1da177e4
LT
5686EXPORT_SYMBOL(yield);
5687
d95f4122
MG
5688/**
5689 * yield_to - yield the current processor to another thread in
5690 * your thread group, or accelerate that thread toward the
5691 * processor it's on.
16addf95
RD
5692 * @p: target task
5693 * @preempt: whether task preemption is allowed or not
d95f4122
MG
5694 *
5695 * It's the caller's job to ensure that the target task struct
5696 * can't go away on us before we can do any checks.
5697 *
e69f6186 5698 * Return:
7b270f60
PZ
5699 * true (>0) if we indeed boosted the target task.
5700 * false (0) if we failed to boost the target.
5701 * -ESRCH if there's no task to yield to.
d95f4122 5702 */
fa93384f 5703int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
5704{
5705 struct task_struct *curr = current;
5706 struct rq *rq, *p_rq;
5707 unsigned long flags;
c3c18640 5708 int yielded = 0;
d95f4122
MG
5709
5710 local_irq_save(flags);
5711 rq = this_rq();
5712
5713again:
5714 p_rq = task_rq(p);
7b270f60
PZ
5715 /*
5716 * If we're the only runnable task on the rq and target rq also
5717 * has only one task, there's absolutely no point in yielding.
5718 */
5719 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
5720 yielded = -ESRCH;
5721 goto out_irq;
5722 }
5723
d95f4122 5724 double_rq_lock(rq, p_rq);
39e24d8f 5725 if (task_rq(p) != p_rq) {
d95f4122
MG
5726 double_rq_unlock(rq, p_rq);
5727 goto again;
5728 }
5729
5730 if (!curr->sched_class->yield_to_task)
7b270f60 5731 goto out_unlock;
d95f4122
MG
5732
5733 if (curr->sched_class != p->sched_class)
7b270f60 5734 goto out_unlock;
d95f4122
MG
5735
5736 if (task_running(p_rq, p) || p->state)
7b270f60 5737 goto out_unlock;
d95f4122
MG
5738
5739 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 5740 if (yielded) {
ae92882e 5741 schedstat_inc(rq->yld_count);
6d1cafd8
VP
5742 /*
5743 * Make p's CPU reschedule; pick_next_entity takes care of
5744 * fairness.
5745 */
5746 if (preempt && rq != p_rq)
8875125e 5747 resched_curr(p_rq);
6d1cafd8 5748 }
d95f4122 5749
7b270f60 5750out_unlock:
d95f4122 5751 double_rq_unlock(rq, p_rq);
7b270f60 5752out_irq:
d95f4122
MG
5753 local_irq_restore(flags);
5754
7b270f60 5755 if (yielded > 0)
d95f4122
MG
5756 schedule();
5757
5758 return yielded;
5759}
5760EXPORT_SYMBOL_GPL(yield_to);
5761
10ab5643
TH
5762int io_schedule_prepare(void)
5763{
5764 int old_iowait = current->in_iowait;
5765
5766 current->in_iowait = 1;
5767 blk_schedule_flush_plug(current);
5768
5769 return old_iowait;
5770}
5771
5772void io_schedule_finish(int token)
5773{
5774 current->in_iowait = token;
5775}
5776
1da177e4 5777/*
41a2d6cf 5778 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 5779 * that process accounting knows that this is a task in IO wait state.
1da177e4 5780 */
1da177e4
LT
5781long __sched io_schedule_timeout(long timeout)
5782{
10ab5643 5783 int token;
1da177e4
LT
5784 long ret;
5785
10ab5643 5786 token = io_schedule_prepare();
1da177e4 5787 ret = schedule_timeout(timeout);
10ab5643 5788 io_schedule_finish(token);
9cff8ade 5789
1da177e4
LT
5790 return ret;
5791}
9cff8ade 5792EXPORT_SYMBOL(io_schedule_timeout);
1da177e4 5793
e3b929b0 5794void __sched io_schedule(void)
10ab5643
TH
5795{
5796 int token;
5797
5798 token = io_schedule_prepare();
5799 schedule();
5800 io_schedule_finish(token);
5801}
5802EXPORT_SYMBOL(io_schedule);
5803
1da177e4
LT
5804/**
5805 * sys_sched_get_priority_max - return maximum RT priority.
5806 * @policy: scheduling class.
5807 *
e69f6186
YB
5808 * Return: On success, this syscall returns the maximum
5809 * rt_priority that can be used by a given scheduling class.
5810 * On failure, a negative error code is returned.
1da177e4 5811 */
5add95d4 5812SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
5813{
5814 int ret = -EINVAL;
5815
5816 switch (policy) {
5817 case SCHED_FIFO:
5818 case SCHED_RR:
5819 ret = MAX_USER_RT_PRIO-1;
5820 break;
aab03e05 5821 case SCHED_DEADLINE:
1da177e4 5822 case SCHED_NORMAL:
b0a9499c 5823 case SCHED_BATCH:
dd41f596 5824 case SCHED_IDLE:
1da177e4
LT
5825 ret = 0;
5826 break;
5827 }
5828 return ret;
5829}
5830
5831/**
5832 * sys_sched_get_priority_min - return minimum RT priority.
5833 * @policy: scheduling class.
5834 *
e69f6186
YB
5835 * Return: On success, this syscall returns the minimum
5836 * rt_priority that can be used by a given scheduling class.
5837 * On failure, a negative error code is returned.
1da177e4 5838 */
5add95d4 5839SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
5840{
5841 int ret = -EINVAL;
5842
5843 switch (policy) {
5844 case SCHED_FIFO:
5845 case SCHED_RR:
5846 ret = 1;
5847 break;
aab03e05 5848 case SCHED_DEADLINE:
1da177e4 5849 case SCHED_NORMAL:
b0a9499c 5850 case SCHED_BATCH:
dd41f596 5851 case SCHED_IDLE:
1da177e4
LT
5852 ret = 0;
5853 }
5854 return ret;
5855}
5856
abca5fc5 5857static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1da177e4 5858{
36c8b586 5859 struct task_struct *p;
a4ec24b4 5860 unsigned int time_slice;
eb580751 5861 struct rq_flags rf;
dba091b9 5862 struct rq *rq;
3a5c359a 5863 int retval;
1da177e4
LT
5864
5865 if (pid < 0)
3a5c359a 5866 return -EINVAL;
1da177e4
LT
5867
5868 retval = -ESRCH;
1a551ae7 5869 rcu_read_lock();
1da177e4
LT
5870 p = find_process_by_pid(pid);
5871 if (!p)
5872 goto out_unlock;
5873
5874 retval = security_task_getscheduler(p);
5875 if (retval)
5876 goto out_unlock;
5877
eb580751 5878 rq = task_rq_lock(p, &rf);
a57beec5
PZ
5879 time_slice = 0;
5880 if (p->sched_class->get_rr_interval)
5881 time_slice = p->sched_class->get_rr_interval(rq, p);
eb580751 5882 task_rq_unlock(rq, p, &rf);
a4ec24b4 5883
1a551ae7 5884 rcu_read_unlock();
abca5fc5
AV
5885 jiffies_to_timespec64(time_slice, t);
5886 return 0;
3a5c359a 5887
1da177e4 5888out_unlock:
1a551ae7 5889 rcu_read_unlock();
1da177e4
LT
5890 return retval;
5891}
5892
2064a5ab
RD
5893/**
5894 * sys_sched_rr_get_interval - return the default timeslice of a process.
5895 * @pid: pid of the process.
5896 * @interval: userspace pointer to the timeslice value.
5897 *
5898 * this syscall writes the default timeslice value of a given process
5899 * into the user-space timespec buffer. A value of '0' means infinity.
5900 *
5901 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5902 * an error code.
5903 */
abca5fc5 5904SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
474b9c77 5905 struct __kernel_timespec __user *, interval)
abca5fc5
AV
5906{
5907 struct timespec64 t;
5908 int retval = sched_rr_get_interval(pid, &t);
5909
5910 if (retval == 0)
5911 retval = put_timespec64(&t, interval);
5912
5913 return retval;
5914}
5915
474b9c77 5916#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724
AB
5917SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
5918 struct old_timespec32 __user *, interval)
abca5fc5
AV
5919{
5920 struct timespec64 t;
5921 int retval = sched_rr_get_interval(pid, &t);
5922
5923 if (retval == 0)
9afc5eee 5924 retval = put_old_timespec32(&t, interval);
abca5fc5
AV
5925 return retval;
5926}
5927#endif
5928
82a1fcb9 5929void sched_show_task(struct task_struct *p)
1da177e4 5930{
1da177e4 5931 unsigned long free = 0;
4e79752c 5932 int ppid;
c930b2c0 5933
38200502
TH
5934 if (!try_get_task_stack(p))
5935 return;
20435d84
XX
5936
5937 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
5938
5939 if (p->state == TASK_RUNNING)
3df0fc5b 5940 printk(KERN_CONT " running task ");
1da177e4 5941#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 5942 free = stack_not_used(p);
1da177e4 5943#endif
a90e984c 5944 ppid = 0;
4e79752c 5945 rcu_read_lock();
a90e984c
ON
5946 if (pid_alive(p))
5947 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 5948 rcu_read_unlock();
3df0fc5b 5949 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 5950 task_pid_nr(p), ppid,
aa47b7e0 5951 (unsigned long)task_thread_info(p)->flags);
1da177e4 5952
3d1cb205 5953 print_worker_info(KERN_INFO, p);
5fb5e6de 5954 show_stack(p, NULL);
38200502 5955 put_task_stack(p);
1da177e4 5956}
0032f4e8 5957EXPORT_SYMBOL_GPL(sched_show_task);
1da177e4 5958
5d68cc95
PZ
5959static inline bool
5960state_filter_match(unsigned long state_filter, struct task_struct *p)
5961{
5962 /* no filter, everything matches */
5963 if (!state_filter)
5964 return true;
5965
5966 /* filter, but doesn't match */
5967 if (!(p->state & state_filter))
5968 return false;
5969
5970 /*
5971 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
5972 * TASK_KILLABLE).
5973 */
5974 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
5975 return false;
5976
5977 return true;
5978}
5979
5980
e59e2ae2 5981void show_state_filter(unsigned long state_filter)
1da177e4 5982{
36c8b586 5983 struct task_struct *g, *p;
1da177e4 5984
4bd77321 5985#if BITS_PER_LONG == 32
3df0fc5b
PZ
5986 printk(KERN_INFO
5987 " task PC stack pid father\n");
1da177e4 5988#else
3df0fc5b
PZ
5989 printk(KERN_INFO
5990 " task PC stack pid father\n");
1da177e4 5991#endif
510f5acc 5992 rcu_read_lock();
5d07f420 5993 for_each_process_thread(g, p) {
1da177e4
LT
5994 /*
5995 * reset the NMI-timeout, listing all files on a slow
25985edc 5996 * console might take a lot of time:
57675cb9
AR
5997 * Also, reset softlockup watchdogs on all CPUs, because
5998 * another CPU might be blocked waiting for us to process
5999 * an IPI.
1da177e4
LT
6000 */
6001 touch_nmi_watchdog();
57675cb9 6002 touch_all_softlockup_watchdogs();
5d68cc95 6003 if (state_filter_match(state_filter, p))
82a1fcb9 6004 sched_show_task(p);
5d07f420 6005 }
1da177e4 6006
dd41f596 6007#ifdef CONFIG_SCHED_DEBUG
fb90a6e9
RV
6008 if (!state_filter)
6009 sysrq_sched_debug_show();
dd41f596 6010#endif
510f5acc 6011 rcu_read_unlock();
e59e2ae2
IM
6012 /*
6013 * Only show locks if all tasks are dumped:
6014 */
93335a21 6015 if (!state_filter)
e59e2ae2 6016 debug_show_all_locks();
1da177e4
LT
6017}
6018
f340c0d1
IM
6019/**
6020 * init_idle - set up an idle thread for a given CPU
6021 * @idle: task in question
d1ccc66d 6022 * @cpu: CPU the idle task belongs to
f340c0d1
IM
6023 *
6024 * NOTE: this function does not set the idle thread's NEED_RESCHED
6025 * flag, to make booting more robust.
6026 */
0db0628d 6027void init_idle(struct task_struct *idle, int cpu)
1da177e4 6028{
70b97a7f 6029 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
6030 unsigned long flags;
6031
ff51ff84
PZ
6032 __sched_fork(0, idle);
6033
25834c73
PZ
6034 raw_spin_lock_irqsave(&idle->pi_lock, flags);
6035 raw_spin_lock(&rq->lock);
5cbd54ef 6036
06b83b5f 6037 idle->state = TASK_RUNNING;
dd41f596 6038 idle->se.exec_start = sched_clock();
c1de45ca 6039 idle->flags |= PF_IDLE;
dd41f596 6040
e1b77c92
MR
6041 kasan_unpoison_task_stack(idle);
6042
de9b8f5d
PZ
6043#ifdef CONFIG_SMP
6044 /*
6045 * Its possible that init_idle() gets called multiple times on a task,
6046 * in that case do_set_cpus_allowed() will not do the right thing.
6047 *
6048 * And since this is boot we can forgo the serialization.
6049 */
6050 set_cpus_allowed_common(idle, cpumask_of(cpu));
6051#endif
6506cf6c
PZ
6052 /*
6053 * We're having a chicken and egg problem, even though we are
d1ccc66d 6054 * holding rq->lock, the CPU isn't yet set to this CPU so the
6506cf6c
PZ
6055 * lockdep check in task_group() will fail.
6056 *
6057 * Similar case to sched_fork(). / Alternatively we could
6058 * use task_rq_lock() here and obtain the other rq->lock.
6059 *
6060 * Silence PROVE_RCU
6061 */
6062 rcu_read_lock();
dd41f596 6063 __set_task_cpu(idle, cpu);
6506cf6c 6064 rcu_read_unlock();
1da177e4 6065
5311a98f
EB
6066 rq->idle = idle;
6067 rcu_assign_pointer(rq->curr, idle);
da0c1e65 6068 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 6069#ifdef CONFIG_SMP
3ca7a440 6070 idle->on_cpu = 1;
4866cde0 6071#endif
25834c73
PZ
6072 raw_spin_unlock(&rq->lock);
6073 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
6074
6075 /* Set the preempt count _outside_ the spinlocks! */
01028747 6076 init_idle_preempt_count(idle, cpu);
55cd5340 6077
dd41f596
IM
6078 /*
6079 * The idle tasks have their own, simple scheduling class:
6080 */
6081 idle->sched_class = &idle_sched_class;
868baf07 6082 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 6083 vtime_init_idle(idle, cpu);
de9b8f5d 6084#ifdef CONFIG_SMP
f1c6f1a7
CE
6085 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
6086#endif
19978ca6
IM
6087}
6088
e1d4eeec
NP
6089#ifdef CONFIG_SMP
6090
f82f8042
JL
6091int cpuset_cpumask_can_shrink(const struct cpumask *cur,
6092 const struct cpumask *trial)
6093{
06a76fe0 6094 int ret = 1;
f82f8042 6095
bb2bc55a
MG
6096 if (!cpumask_weight(cur))
6097 return ret;
6098
06a76fe0 6099 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
f82f8042
JL
6100
6101 return ret;
6102}
6103
7f51412a
JL
6104int task_can_attach(struct task_struct *p,
6105 const struct cpumask *cs_cpus_allowed)
6106{
6107 int ret = 0;
6108
6109 /*
6110 * Kthreads which disallow setaffinity shouldn't be moved
d1ccc66d 6111 * to a new cpuset; we don't want to change their CPU
7f51412a
JL
6112 * affinity and isolating such threads by their set of
6113 * allowed nodes is unnecessary. Thus, cpusets are not
6114 * applicable for such threads. This prevents checking for
6115 * success of set_cpus_allowed_ptr() on all attached tasks
3bd37062 6116 * before cpus_mask may be changed.
7f51412a
JL
6117 */
6118 if (p->flags & PF_NO_SETAFFINITY) {
6119 ret = -EINVAL;
6120 goto out;
6121 }
6122
7f51412a 6123 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
06a76fe0
NP
6124 cs_cpus_allowed))
6125 ret = dl_task_can_attach(p, cs_cpus_allowed);
7f51412a 6126
7f51412a
JL
6127out:
6128 return ret;
6129}
6130
f2cb1360 6131bool sched_smp_initialized __read_mostly;
e26fbffd 6132
e6628d5b
MG
6133#ifdef CONFIG_NUMA_BALANCING
6134/* Migrate current task p to target_cpu */
6135int migrate_task_to(struct task_struct *p, int target_cpu)
6136{
6137 struct migration_arg arg = { p, target_cpu };
6138 int curr_cpu = task_cpu(p);
6139
6140 if (curr_cpu == target_cpu)
6141 return 0;
6142
3bd37062 6143 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
e6628d5b
MG
6144 return -EINVAL;
6145
6146 /* TODO: This is not properly updating schedstats */
6147
286549dc 6148 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
6149 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
6150}
0ec8aa00
PZ
6151
6152/*
6153 * Requeue a task on a given node and accurately track the number of NUMA
6154 * tasks on the runqueues
6155 */
6156void sched_setnuma(struct task_struct *p, int nid)
6157{
da0c1e65 6158 bool queued, running;
eb580751
PZ
6159 struct rq_flags rf;
6160 struct rq *rq;
0ec8aa00 6161
eb580751 6162 rq = task_rq_lock(p, &rf);
da0c1e65 6163 queued = task_on_rq_queued(p);
0ec8aa00
PZ
6164 running = task_current(rq, p);
6165
da0c1e65 6166 if (queued)
1de64443 6167 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 6168 if (running)
f3cd1c4e 6169 put_prev_task(rq, p);
0ec8aa00
PZ
6170
6171 p->numa_preferred_nid = nid;
0ec8aa00 6172
da0c1e65 6173 if (queued)
7134b3e9 6174 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 6175 if (running)
03b7fad1 6176 set_next_task(rq, p);
eb580751 6177 task_rq_unlock(rq, p, &rf);
0ec8aa00 6178}
5cc389bc 6179#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 6180
1da177e4 6181#ifdef CONFIG_HOTPLUG_CPU
054b9108 6182/*
d1ccc66d 6183 * Ensure that the idle task is using init_mm right before its CPU goes
48c5ccae 6184 * offline.
054b9108 6185 */
48c5ccae 6186void idle_task_exit(void)
1da177e4 6187{
48c5ccae 6188 struct mm_struct *mm = current->active_mm;
e76bd8d9 6189
48c5ccae 6190 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 6191
a53efe5f 6192 if (mm != &init_mm) {
252d2a41 6193 switch_mm(mm, &init_mm, current);
3eda69c9 6194 current->active_mm = &init_mm;
a53efe5f
MS
6195 finish_arch_post_lock_switch();
6196 }
48c5ccae 6197 mmdrop(mm);
1da177e4
LT
6198}
6199
6200/*
5d180232
PZ
6201 * Since this CPU is going 'away' for a while, fold any nr_active delta
6202 * we might have. Assumes we're called after migrate_tasks() so that the
d60585c5
TG
6203 * nr_active count is stable. We need to take the teardown thread which
6204 * is calling this into account, so we hand in adjust = 1 to the load
6205 * calculation.
5d180232
PZ
6206 *
6207 * Also see the comment "Global load-average calculations".
1da177e4 6208 */
5d180232 6209static void calc_load_migrate(struct rq *rq)
1da177e4 6210{
d60585c5 6211 long delta = calc_load_fold_active(rq, 1);
5d180232
PZ
6212 if (delta)
6213 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
6214}
6215
10e7071b 6216static struct task_struct *__pick_migrate_task(struct rq *rq)
3f1d2a31 6217{
10e7071b
PZ
6218 const struct sched_class *class;
6219 struct task_struct *next;
3f1d2a31 6220
10e7071b 6221 for_each_class(class) {
98c2f700 6222 next = class->pick_next_task(rq);
10e7071b 6223 if (next) {
6e2df058 6224 next->sched_class->put_prev_task(rq, next);
10e7071b
PZ
6225 return next;
6226 }
6227 }
3f1d2a31 6228
10e7071b
PZ
6229 /* The idle class should always have a runnable task */
6230 BUG();
6231}
3f1d2a31 6232
48f24c4d 6233/*
48c5ccae
PZ
6234 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6235 * try_to_wake_up()->select_task_rq().
6236 *
6237 * Called with rq->lock held even though we'er in stop_machine() and
6238 * there's no concurrency possible, we hold the required locks anyway
6239 * because of lock validation efforts.
1da177e4 6240 */
8a8c69c3 6241static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
1da177e4 6242{
5e16bbc2 6243 struct rq *rq = dead_rq;
48c5ccae 6244 struct task_struct *next, *stop = rq->stop;
8a8c69c3 6245 struct rq_flags orf = *rf;
48c5ccae 6246 int dest_cpu;
1da177e4
LT
6247
6248 /*
48c5ccae
PZ
6249 * Fudge the rq selection such that the below task selection loop
6250 * doesn't get stuck on the currently eligible stop task.
6251 *
6252 * We're currently inside stop_machine() and the rq is either stuck
6253 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6254 * either way we should never end up calling schedule() until we're
6255 * done here.
1da177e4 6256 */
48c5ccae 6257 rq->stop = NULL;
48f24c4d 6258
77bd3970
FW
6259 /*
6260 * put_prev_task() and pick_next_task() sched
6261 * class method both need to have an up-to-date
6262 * value of rq->clock[_task]
6263 */
6264 update_rq_clock(rq);
6265
5e16bbc2 6266 for (;;) {
48c5ccae
PZ
6267 /*
6268 * There's this thread running, bail when that's the only
d1ccc66d 6269 * remaining thread:
48c5ccae
PZ
6270 */
6271 if (rq->nr_running == 1)
dd41f596 6272 break;
48c5ccae 6273
10e7071b 6274 next = __pick_migrate_task(rq);
e692ab53 6275
5473e0cc 6276 /*
3bd37062 6277 * Rules for changing task_struct::cpus_mask are holding
5473e0cc
WL
6278 * both pi_lock and rq->lock, such that holding either
6279 * stabilizes the mask.
6280 *
6281 * Drop rq->lock is not quite as disastrous as it usually is
6282 * because !cpu_active at this point, which means load-balance
6283 * will not interfere. Also, stop-machine.
6284 */
8a8c69c3 6285 rq_unlock(rq, rf);
5473e0cc 6286 raw_spin_lock(&next->pi_lock);
8a8c69c3 6287 rq_relock(rq, rf);
5473e0cc
WL
6288
6289 /*
6290 * Since we're inside stop-machine, _nothing_ should have
6291 * changed the task, WARN if weird stuff happened, because in
6292 * that case the above rq->lock drop is a fail too.
6293 */
6294 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
6295 raw_spin_unlock(&next->pi_lock);
6296 continue;
6297 }
6298
48c5ccae 6299 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 6300 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
8a8c69c3 6301 rq = __migrate_task(rq, rf, next, dest_cpu);
5e16bbc2 6302 if (rq != dead_rq) {
8a8c69c3 6303 rq_unlock(rq, rf);
5e16bbc2 6304 rq = dead_rq;
8a8c69c3
PZ
6305 *rf = orf;
6306 rq_relock(rq, rf);
5e16bbc2 6307 }
5473e0cc 6308 raw_spin_unlock(&next->pi_lock);
1da177e4 6309 }
dce48a84 6310
48c5ccae 6311 rq->stop = stop;
dce48a84 6312}
1da177e4
LT
6313#endif /* CONFIG_HOTPLUG_CPU */
6314
f2cb1360 6315void set_rq_online(struct rq *rq)
1f11eb6a
GH
6316{
6317 if (!rq->online) {
6318 const struct sched_class *class;
6319
c6c4927b 6320 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6321 rq->online = 1;
6322
6323 for_each_class(class) {
6324 if (class->rq_online)
6325 class->rq_online(rq);
6326 }
6327 }
6328}
6329
f2cb1360 6330void set_rq_offline(struct rq *rq)
1f11eb6a
GH
6331{
6332 if (rq->online) {
6333 const struct sched_class *class;
6334
6335 for_each_class(class) {
6336 if (class->rq_offline)
6337 class->rq_offline(rq);
6338 }
6339
c6c4927b 6340 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6341 rq->online = 0;
6342 }
6343}
6344
d1ccc66d
IM
6345/*
6346 * used to mark begin/end of suspend/resume:
6347 */
6348static int num_cpus_frozen;
d35be8ba 6349
1da177e4 6350/*
3a101d05
TH
6351 * Update cpusets according to cpu_active mask. If cpusets are
6352 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6353 * around partition_sched_domains().
d35be8ba
SB
6354 *
6355 * If we come here as part of a suspend/resume, don't touch cpusets because we
6356 * want to restore it back to its original state upon resume anyway.
1da177e4 6357 */
40190a78 6358static void cpuset_cpu_active(void)
e761b772 6359{
40190a78 6360 if (cpuhp_tasks_frozen) {
d35be8ba
SB
6361 /*
6362 * num_cpus_frozen tracks how many CPUs are involved in suspend
6363 * resume sequence. As long as this is not the last online
6364 * operation in the resume sequence, just build a single sched
6365 * domain, ignoring cpusets.
6366 */
50e76632
PZ
6367 partition_sched_domains(1, NULL, NULL);
6368 if (--num_cpus_frozen)
135fb3e1 6369 return;
d35be8ba
SB
6370 /*
6371 * This is the last CPU online operation. So fall through and
6372 * restore the original sched domains by considering the
6373 * cpuset configurations.
6374 */
50e76632 6375 cpuset_force_rebuild();
3a101d05 6376 }
30e03acd 6377 cpuset_update_active_cpus();
3a101d05 6378}
e761b772 6379
40190a78 6380static int cpuset_cpu_inactive(unsigned int cpu)
3a101d05 6381{
40190a78 6382 if (!cpuhp_tasks_frozen) {
06a76fe0 6383 if (dl_cpu_busy(cpu))
135fb3e1 6384 return -EBUSY;
30e03acd 6385 cpuset_update_active_cpus();
135fb3e1 6386 } else {
d35be8ba
SB
6387 num_cpus_frozen++;
6388 partition_sched_domains(1, NULL, NULL);
e761b772 6389 }
135fb3e1 6390 return 0;
e761b772 6391}
e761b772 6392
40190a78 6393int sched_cpu_activate(unsigned int cpu)
135fb3e1 6394{
7d976699 6395 struct rq *rq = cpu_rq(cpu);
8a8c69c3 6396 struct rq_flags rf;
7d976699 6397
ba2591a5
PZ
6398#ifdef CONFIG_SCHED_SMT
6399 /*
c5511d03 6400 * When going up, increment the number of cores with SMT present.
ba2591a5 6401 */
c5511d03
PZI
6402 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
6403 static_branch_inc_cpuslocked(&sched_smt_present);
ba2591a5 6404#endif
40190a78 6405 set_cpu_active(cpu, true);
135fb3e1 6406
40190a78 6407 if (sched_smp_initialized) {
135fb3e1 6408 sched_domains_numa_masks_set(cpu);
40190a78 6409 cpuset_cpu_active();
e761b772 6410 }
7d976699
TG
6411
6412 /*
6413 * Put the rq online, if not already. This happens:
6414 *
6415 * 1) In the early boot process, because we build the real domains
d1ccc66d 6416 * after all CPUs have been brought up.
7d976699
TG
6417 *
6418 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
6419 * domains.
6420 */
8a8c69c3 6421 rq_lock_irqsave(rq, &rf);
7d976699
TG
6422 if (rq->rd) {
6423 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6424 set_rq_online(rq);
6425 }
8a8c69c3 6426 rq_unlock_irqrestore(rq, &rf);
7d976699 6427
40190a78 6428 return 0;
135fb3e1
TG
6429}
6430
40190a78 6431int sched_cpu_deactivate(unsigned int cpu)
135fb3e1 6432{
135fb3e1
TG
6433 int ret;
6434
40190a78 6435 set_cpu_active(cpu, false);
b2454caa
PZ
6436 /*
6437 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
6438 * users of this state to go away such that all new such users will
6439 * observe it.
6440 *
b2454caa
PZ
6441 * Do sync before park smpboot threads to take care the rcu boost case.
6442 */
309ba859 6443 synchronize_rcu();
40190a78 6444
c5511d03
PZI
6445#ifdef CONFIG_SCHED_SMT
6446 /*
6447 * When going down, decrement the number of cores with SMT present.
6448 */
6449 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
6450 static_branch_dec_cpuslocked(&sched_smt_present);
6451#endif
6452
40190a78
TG
6453 if (!sched_smp_initialized)
6454 return 0;
6455
6456 ret = cpuset_cpu_inactive(cpu);
6457 if (ret) {
6458 set_cpu_active(cpu, true);
6459 return ret;
135fb3e1 6460 }
40190a78
TG
6461 sched_domains_numa_masks_clear(cpu);
6462 return 0;
135fb3e1
TG
6463}
6464
94baf7a5
TG
6465static void sched_rq_cpu_starting(unsigned int cpu)
6466{
6467 struct rq *rq = cpu_rq(cpu);
6468
6469 rq->calc_load_update = calc_load_update;
94baf7a5
TG
6470 update_max_interval();
6471}
6472
135fb3e1
TG
6473int sched_cpu_starting(unsigned int cpu)
6474{
94baf7a5 6475 sched_rq_cpu_starting(cpu);
d84b3131 6476 sched_tick_start(cpu);
135fb3e1 6477 return 0;
e761b772 6478}
e761b772 6479
f2785ddb
TG
6480#ifdef CONFIG_HOTPLUG_CPU
6481int sched_cpu_dying(unsigned int cpu)
6482{
6483 struct rq *rq = cpu_rq(cpu);
8a8c69c3 6484 struct rq_flags rf;
f2785ddb
TG
6485
6486 /* Handle pending wakeups and then migrate everything off */
6487 sched_ttwu_pending();
d84b3131 6488 sched_tick_stop(cpu);
8a8c69c3
PZ
6489
6490 rq_lock_irqsave(rq, &rf);
f2785ddb
TG
6491 if (rq->rd) {
6492 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6493 set_rq_offline(rq);
6494 }
8a8c69c3 6495 migrate_tasks(rq, &rf);
f2785ddb 6496 BUG_ON(rq->nr_running != 1);
8a8c69c3
PZ
6497 rq_unlock_irqrestore(rq, &rf);
6498
f2785ddb
TG
6499 calc_load_migrate(rq);
6500 update_max_interval();
00357f5e 6501 nohz_balance_exit_idle(rq);
e5ef27d0 6502 hrtick_clear(rq);
f2785ddb
TG
6503 return 0;
6504}
6505#endif
6506
1da177e4
LT
6507void __init sched_init_smp(void)
6508{
cb83b629
PZ
6509 sched_init_numa();
6510
6acce3ef
PZ
6511 /*
6512 * There's no userspace yet to cause hotplug operations; hence all the
d1ccc66d 6513 * CPU masks are stable and all blatant races in the below code cannot
b5a4e2bb 6514 * happen.
6acce3ef 6515 */
712555ee 6516 mutex_lock(&sched_domains_mutex);
8d5dc512 6517 sched_init_domains(cpu_active_mask);
712555ee 6518 mutex_unlock(&sched_domains_mutex);
e761b772 6519
5c1e1767 6520 /* Move init over to a non-isolated CPU */
edb93821 6521 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
5c1e1767 6522 BUG();
19978ca6 6523 sched_init_granularity();
4212823f 6524
0e3900e6 6525 init_sched_rt_class();
1baca4ce 6526 init_sched_dl_class();
1b568f0a 6527
e26fbffd 6528 sched_smp_initialized = true;
1da177e4 6529}
e26fbffd
TG
6530
6531static int __init migration_init(void)
6532{
77a5352b 6533 sched_cpu_starting(smp_processor_id());
e26fbffd 6534 return 0;
1da177e4 6535}
e26fbffd
TG
6536early_initcall(migration_init);
6537
1da177e4
LT
6538#else
6539void __init sched_init_smp(void)
6540{
19978ca6 6541 sched_init_granularity();
1da177e4
LT
6542}
6543#endif /* CONFIG_SMP */
6544
6545int in_sched_functions(unsigned long addr)
6546{
1da177e4
LT
6547 return in_lock_functions(addr) ||
6548 (addr >= (unsigned long)__sched_text_start
6549 && addr < (unsigned long)__sched_text_end);
6550}
6551
029632fb 6552#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
6553/*
6554 * Default task group.
6555 * Every task in system belongs to this group at bootup.
6556 */
029632fb 6557struct task_group root_task_group;
35cf4e50 6558LIST_HEAD(task_groups);
b0367629
WL
6559
6560/* Cacheline aligned slab cache for task_group */
6561static struct kmem_cache *task_group_cache __read_mostly;
052f1dc7 6562#endif
6f505b16 6563
e6252c3e 6564DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
10e2f1ac 6565DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
6f505b16 6566
1da177e4
LT
6567void __init sched_init(void)
6568{
a1dc0446 6569 unsigned long ptr = 0;
55627e3c 6570 int i;
434d53b0 6571
5822a454 6572 wait_bit_init();
9dcb8b68 6573
434d53b0 6574#ifdef CONFIG_FAIR_GROUP_SCHED
a1dc0446 6575 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0
MT
6576#endif
6577#ifdef CONFIG_RT_GROUP_SCHED
a1dc0446 6578 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0 6579#endif
a1dc0446
QC
6580 if (ptr) {
6581 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
434d53b0
MT
6582
6583#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 6584 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
6585 ptr += nr_cpu_ids * sizeof(void **);
6586
07e06b01 6587 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 6588 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 6589
6d6bc0ad 6590#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 6591#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6592 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
6593 ptr += nr_cpu_ids * sizeof(void **);
6594
07e06b01 6595 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
6596 ptr += nr_cpu_ids * sizeof(void **);
6597
6d6bc0ad 6598#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 6599 }
df7c8e84 6600#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
6601 for_each_possible_cpu(i) {
6602 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
6603 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
10e2f1ac
PZ
6604 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
6605 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 6606 }
b74e6278 6607#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 6608
d1ccc66d
IM
6609 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
6610 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
332ac17e 6611
57d885fe
GH
6612#ifdef CONFIG_SMP
6613 init_defrootdomain();
6614#endif
6615
d0b27fa7 6616#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6617 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 6618 global_rt_period(), global_rt_runtime());
6d6bc0ad 6619#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 6620
7c941438 6621#ifdef CONFIG_CGROUP_SCHED
b0367629
WL
6622 task_group_cache = KMEM_CACHE(task_group, 0);
6623
07e06b01
YZ
6624 list_add(&root_task_group.list, &task_groups);
6625 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 6626 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 6627 autogroup_init(&init_task);
7c941438 6628#endif /* CONFIG_CGROUP_SCHED */
6f505b16 6629
0a945022 6630 for_each_possible_cpu(i) {
70b97a7f 6631 struct rq *rq;
1da177e4
LT
6632
6633 rq = cpu_rq(i);
05fa785c 6634 raw_spin_lock_init(&rq->lock);
7897986b 6635 rq->nr_running = 0;
dce48a84
TG
6636 rq->calc_load_active = 0;
6637 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 6638 init_cfs_rq(&rq->cfs);
07c54f7a
AV
6639 init_rt_rq(&rq->rt);
6640 init_dl_rq(&rq->dl);
dd41f596 6641#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 6642 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 6643 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9c2791f9 6644 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
354d60c2 6645 /*
d1ccc66d 6646 * How much CPU bandwidth does root_task_group get?
354d60c2
DG
6647 *
6648 * In case of task-groups formed thr' the cgroup filesystem, it
d1ccc66d
IM
6649 * gets 100% of the CPU resources in the system. This overall
6650 * system CPU resource is divided among the tasks of
07e06b01 6651 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
6652 * based on each entity's (task or task-group's) weight
6653 * (se->load.weight).
6654 *
07e06b01 6655 * In other words, if root_task_group has 10 tasks of weight
354d60c2 6656 * 1024) and two child groups A0 and A1 (of weight 1024 each),
d1ccc66d 6657 * then A0's share of the CPU resource is:
354d60c2 6658 *
0d905bca 6659 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 6660 *
07e06b01
YZ
6661 * We achieve this by letting root_task_group's tasks sit
6662 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 6663 */
ab84d31e 6664 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 6665 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
6666#endif /* CONFIG_FAIR_GROUP_SCHED */
6667
6668 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 6669#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6670 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 6671#endif
1da177e4 6672#ifdef CONFIG_SMP
41c7ce9a 6673 rq->sd = NULL;
57d885fe 6674 rq->rd = NULL;
ca6d75e6 6675 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 6676 rq->balance_callback = NULL;
1da177e4 6677 rq->active_balance = 0;
dd41f596 6678 rq->next_balance = jiffies;
1da177e4 6679 rq->push_cpu = 0;
0a2966b4 6680 rq->cpu = i;
1f11eb6a 6681 rq->online = 0;
eae0c9df
MG
6682 rq->idle_stamp = 0;
6683 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 6684 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
6685
6686 INIT_LIST_HEAD(&rq->cfs_tasks);
6687
dc938520 6688 rq_attach_root(rq, &def_root_domain);
3451d024 6689#ifdef CONFIG_NO_HZ_COMMON
9fd81dd5 6690 rq->last_load_update_tick = jiffies;
e022e0d3 6691 rq->last_blocked_load_update_tick = jiffies;
a22e47a4 6692 atomic_set(&rq->nohz_flags, 0);
83cd4fe2 6693#endif
9fd81dd5 6694#endif /* CONFIG_SMP */
77a021be 6695 hrtick_rq_init(rq);
1da177e4 6696 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
6697 }
6698
9059393e 6699 set_load_weight(&init_task, false);
b50f60ce 6700
1da177e4
LT
6701 /*
6702 * The boot idle thread does lazy MMU switching as well:
6703 */
f1f10076 6704 mmgrab(&init_mm);
1da177e4
LT
6705 enter_lazy_tlb(&init_mm, current);
6706
6707 /*
6708 * Make us the idle thread. Technically, schedule() should not be
6709 * called from this thread, however somewhere below it might be,
6710 * but because we are the idle thread, we just pick up running again
6711 * when this runqueue becomes "idle".
6712 */
6713 init_idle(current, smp_processor_id());
dce48a84
TG
6714
6715 calc_load_update = jiffies + LOAD_FREQ;
6716
bf4d83f6 6717#ifdef CONFIG_SMP
29d5e047 6718 idle_thread_set_boot_cpu();
029632fb
PZ
6719#endif
6720 init_sched_fair_class();
6a7b3dc3 6721
4698f88c
JP
6722 init_schedstats();
6723
eb414681
JW
6724 psi_init();
6725
69842cba
PB
6726 init_uclamp();
6727
6892b75e 6728 scheduler_running = 1;
1da177e4
LT
6729}
6730
d902db1e 6731#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
6732static inline int preempt_count_equals(int preempt_offset)
6733{
da7142e2 6734 int nested = preempt_count() + rcu_preempt_depth();
e4aafea2 6735
4ba8216c 6736 return (nested == preempt_offset);
e4aafea2
FW
6737}
6738
d894837f 6739void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6740{
8eb23b9f
PZ
6741 /*
6742 * Blocking primitives will set (and therefore destroy) current->state,
6743 * since we will exit with TASK_RUNNING make sure we enter with it,
6744 * otherwise we will destroy state.
6745 */
00845eb9 6746 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
6747 "do not call blocking ops when !TASK_RUNNING; "
6748 "state=%lx set at [<%p>] %pS\n",
6749 current->state,
6750 (void *)current->task_state_change,
00845eb9 6751 (void *)current->task_state_change);
8eb23b9f 6752
3427445a
PZ
6753 ___might_sleep(file, line, preempt_offset);
6754}
6755EXPORT_SYMBOL(__might_sleep);
6756
6757void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6758{
d1ccc66d
IM
6759 /* Ratelimiting timestamp: */
6760 static unsigned long prev_jiffy;
6761
d1c6d149 6762 unsigned long preempt_disable_ip;
1da177e4 6763
d1ccc66d
IM
6764 /* WARN_ON_ONCE() by default, no rate limit required: */
6765 rcu_sleep_check();
6766
db273be2 6767 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
312364f3 6768 !is_idle_task(current) && !current->non_block_count) ||
1c3c5eab
TG
6769 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
6770 oops_in_progress)
aef745fc 6771 return;
1c3c5eab 6772
aef745fc
IM
6773 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6774 return;
6775 prev_jiffy = jiffies;
6776
d1ccc66d 6777 /* Save this before calling printk(), since that will clobber it: */
d1c6d149
VN
6778 preempt_disable_ip = get_preempt_disable_ip(current);
6779
3df0fc5b
PZ
6780 printk(KERN_ERR
6781 "BUG: sleeping function called from invalid context at %s:%d\n",
6782 file, line);
6783 printk(KERN_ERR
312364f3
DV
6784 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
6785 in_atomic(), irqs_disabled(), current->non_block_count,
3df0fc5b 6786 current->pid, current->comm);
aef745fc 6787
a8b686b3
ES
6788 if (task_stack_end_corrupted(current))
6789 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
6790
aef745fc
IM
6791 debug_show_held_locks(current);
6792 if (irqs_disabled())
6793 print_irqtrace_events(current);
d1c6d149
VN
6794 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
6795 && !preempt_count_equals(preempt_offset)) {
8f47b187 6796 pr_err("Preemption disabled at:");
d1c6d149 6797 print_ip_sym(preempt_disable_ip);
8f47b187
TG
6798 pr_cont("\n");
6799 }
aef745fc 6800 dump_stack();
f0b22e39 6801 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1da177e4 6802}
3427445a 6803EXPORT_SYMBOL(___might_sleep);
568f1967
PZ
6804
6805void __cant_sleep(const char *file, int line, int preempt_offset)
6806{
6807 static unsigned long prev_jiffy;
6808
6809 if (irqs_disabled())
6810 return;
6811
6812 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
6813 return;
6814
6815 if (preempt_count() > preempt_offset)
6816 return;
6817
6818 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6819 return;
6820 prev_jiffy = jiffies;
6821
6822 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
6823 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6824 in_atomic(), irqs_disabled(),
6825 current->pid, current->comm);
6826
6827 debug_show_held_locks(current);
6828 dump_stack();
6829 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
6830}
6831EXPORT_SYMBOL_GPL(__cant_sleep);
1da177e4
LT
6832#endif
6833
6834#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 6835void normalize_rt_tasks(void)
3a5e4dc1 6836{
dbc7f069 6837 struct task_struct *g, *p;
d50dde5a
DF
6838 struct sched_attr attr = {
6839 .sched_policy = SCHED_NORMAL,
6840 };
1da177e4 6841
3472eaa1 6842 read_lock(&tasklist_lock);
5d07f420 6843 for_each_process_thread(g, p) {
178be793
IM
6844 /*
6845 * Only normalize user tasks:
6846 */
3472eaa1 6847 if (p->flags & PF_KTHREAD)
178be793
IM
6848 continue;
6849
4fa8d299
JP
6850 p->se.exec_start = 0;
6851 schedstat_set(p->se.statistics.wait_start, 0);
6852 schedstat_set(p->se.statistics.sleep_start, 0);
6853 schedstat_set(p->se.statistics.block_start, 0);
dd41f596 6854
aab03e05 6855 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
6856 /*
6857 * Renice negative nice level userspace
6858 * tasks back to 0:
6859 */
3472eaa1 6860 if (task_nice(p) < 0)
dd41f596 6861 set_user_nice(p, 0);
1da177e4 6862 continue;
dd41f596 6863 }
1da177e4 6864
dbc7f069 6865 __sched_setscheduler(p, &attr, false, false);
5d07f420 6866 }
3472eaa1 6867 read_unlock(&tasklist_lock);
1da177e4
LT
6868}
6869
6870#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 6871
67fc4e0c 6872#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 6873/*
67fc4e0c 6874 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
6875 *
6876 * They can only be called when the whole system has been
6877 * stopped - every CPU needs to be quiescent, and no scheduling
6878 * activity can take place. Using them for anything else would
6879 * be a serious bug, and as a result, they aren't even visible
6880 * under any other configuration.
6881 */
6882
6883/**
d1ccc66d 6884 * curr_task - return the current task for a given CPU.
1df5c10a
LT
6885 * @cpu: the processor in question.
6886 *
6887 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
6888 *
6889 * Return: The current task for @cpu.
1df5c10a 6890 */
36c8b586 6891struct task_struct *curr_task(int cpu)
1df5c10a
LT
6892{
6893 return cpu_curr(cpu);
6894}
6895
67fc4e0c
JW
6896#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6897
6898#ifdef CONFIG_IA64
1df5c10a 6899/**
5feeb783 6900 * ia64_set_curr_task - set the current task for a given CPU.
1df5c10a
LT
6901 * @cpu: the processor in question.
6902 * @p: the task pointer to set.
6903 *
6904 * Description: This function must only be used when non-maskable interrupts
41a2d6cf 6905 * are serviced on a separate stack. It allows the architecture to switch the
d1ccc66d 6906 * notion of the current task on a CPU in a non-blocking manner. This function
1df5c10a
LT
6907 * must be called with all CPU's synchronized, and interrupts disabled, the
6908 * and caller must save the original value of the current task (see
6909 * curr_task() above) and restore that value before reenabling interrupts and
6910 * re-starting the system.
6911 *
6912 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6913 */
a458ae2e 6914void ia64_set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
6915{
6916 cpu_curr(cpu) = p;
6917}
6918
6919#endif
29f59db3 6920
7c941438 6921#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
6922/* task_group_lock serializes the addition/removal of task groups */
6923static DEFINE_SPINLOCK(task_group_lock);
6924
2480c093
PB
6925static inline void alloc_uclamp_sched_group(struct task_group *tg,
6926 struct task_group *parent)
6927{
6928#ifdef CONFIG_UCLAMP_TASK_GROUP
0413d7f3 6929 enum uclamp_id clamp_id;
2480c093
PB
6930
6931 for_each_clamp_id(clamp_id) {
6932 uclamp_se_set(&tg->uclamp_req[clamp_id],
6933 uclamp_none(clamp_id), false);
0b60ba2d 6934 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
2480c093
PB
6935 }
6936#endif
6937}
6938
2f5177f0 6939static void sched_free_group(struct task_group *tg)
bccbe08a
PZ
6940{
6941 free_fair_sched_group(tg);
6942 free_rt_sched_group(tg);
e9aa1dd1 6943 autogroup_free(tg);
b0367629 6944 kmem_cache_free(task_group_cache, tg);
bccbe08a
PZ
6945}
6946
6947/* allocate runqueue etc for a new task group */
ec7dc8ac 6948struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
6949{
6950 struct task_group *tg;
bccbe08a 6951
b0367629 6952 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
bccbe08a
PZ
6953 if (!tg)
6954 return ERR_PTR(-ENOMEM);
6955
ec7dc8ac 6956 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
6957 goto err;
6958
ec7dc8ac 6959 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
6960 goto err;
6961
2480c093
PB
6962 alloc_uclamp_sched_group(tg, parent);
6963
ace783b9
LZ
6964 return tg;
6965
6966err:
2f5177f0 6967 sched_free_group(tg);
ace783b9
LZ
6968 return ERR_PTR(-ENOMEM);
6969}
6970
6971void sched_online_group(struct task_group *tg, struct task_group *parent)
6972{
6973 unsigned long flags;
6974
8ed36996 6975 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 6976 list_add_rcu(&tg->list, &task_groups);
f473aa5e 6977
d1ccc66d
IM
6978 /* Root should already exist: */
6979 WARN_ON(!parent);
f473aa5e
PZ
6980
6981 tg->parent = parent;
f473aa5e 6982 INIT_LIST_HEAD(&tg->children);
09f2724a 6983 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 6984 spin_unlock_irqrestore(&task_group_lock, flags);
8663e24d
PZ
6985
6986 online_fair_sched_group(tg);
29f59db3
SV
6987}
6988
9b5b7751 6989/* rcu callback to free various structures associated with a task group */
2f5177f0 6990static void sched_free_group_rcu(struct rcu_head *rhp)
29f59db3 6991{
d1ccc66d 6992 /* Now it should be safe to free those cfs_rqs: */
2f5177f0 6993 sched_free_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
6994}
6995
4cf86d77 6996void sched_destroy_group(struct task_group *tg)
ace783b9 6997{
d1ccc66d 6998 /* Wait for possible concurrent references to cfs_rqs complete: */
2f5177f0 6999 call_rcu(&tg->rcu, sched_free_group_rcu);
ace783b9
LZ
7000}
7001
7002void sched_offline_group(struct task_group *tg)
29f59db3 7003{
8ed36996 7004 unsigned long flags;
29f59db3 7005
d1ccc66d 7006 /* End participation in shares distribution: */
6fe1f348 7007 unregister_fair_sched_group(tg);
3d4b47b4
PZ
7008
7009 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7010 list_del_rcu(&tg->list);
f473aa5e 7011 list_del_rcu(&tg->siblings);
8ed36996 7012 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7013}
7014
ea86cb4b 7015static void sched_change_group(struct task_struct *tsk, int type)
29f59db3 7016{
8323f26c 7017 struct task_group *tg;
29f59db3 7018
f7b8a47d
KT
7019 /*
7020 * All callers are synchronized by task_rq_lock(); we do not use RCU
7021 * which is pointless here. Thus, we pass "true" to task_css_check()
7022 * to prevent lockdep warnings.
7023 */
7024 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7025 struct task_group, css);
7026 tg = autogroup_task_group(tsk, tg);
7027 tsk->sched_task_group = tg;
7028
810b3817 7029#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
7030 if (tsk->sched_class->task_change_group)
7031 tsk->sched_class->task_change_group(tsk, type);
b2b5ce02 7032 else
810b3817 7033#endif
b2b5ce02 7034 set_task_rq(tsk, task_cpu(tsk));
ea86cb4b
VG
7035}
7036
7037/*
7038 * Change task's runqueue when it moves between groups.
7039 *
7040 * The caller of this function should have put the task in its new group by
7041 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
7042 * its new group.
7043 */
7044void sched_move_task(struct task_struct *tsk)
7045{
7a57f32a
PZ
7046 int queued, running, queue_flags =
7047 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
ea86cb4b
VG
7048 struct rq_flags rf;
7049 struct rq *rq;
7050
7051 rq = task_rq_lock(tsk, &rf);
1b1d6225 7052 update_rq_clock(rq);
ea86cb4b
VG
7053
7054 running = task_current(rq, tsk);
7055 queued = task_on_rq_queued(tsk);
7056
7057 if (queued)
7a57f32a 7058 dequeue_task(rq, tsk, queue_flags);
bb3bac2c 7059 if (running)
ea86cb4b
VG
7060 put_prev_task(rq, tsk);
7061
7062 sched_change_group(tsk, TASK_MOVE_GROUP);
810b3817 7063
da0c1e65 7064 if (queued)
7a57f32a 7065 enqueue_task(rq, tsk, queue_flags);
bb3bac2c 7066 if (running)
03b7fad1 7067 set_next_task(rq, tsk);
29f59db3 7068
eb580751 7069 task_rq_unlock(rq, tsk, &rf);
29f59db3 7070}
68318b8e 7071
a7c6d554 7072static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 7073{
a7c6d554 7074 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
7075}
7076
eb95419b
TH
7077static struct cgroup_subsys_state *
7078cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 7079{
eb95419b
TH
7080 struct task_group *parent = css_tg(parent_css);
7081 struct task_group *tg;
68318b8e 7082
eb95419b 7083 if (!parent) {
68318b8e 7084 /* This is early initialization for the top cgroup */
07e06b01 7085 return &root_task_group.css;
68318b8e
SV
7086 }
7087
ec7dc8ac 7088 tg = sched_create_group(parent);
68318b8e
SV
7089 if (IS_ERR(tg))
7090 return ERR_PTR(-ENOMEM);
7091
68318b8e
SV
7092 return &tg->css;
7093}
7094
96b77745
KK
7095/* Expose task group only after completing cgroup initialization */
7096static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7097{
7098 struct task_group *tg = css_tg(css);
7099 struct task_group *parent = css_tg(css->parent);
7100
7101 if (parent)
7102 sched_online_group(tg, parent);
7103 return 0;
7104}
7105
2f5177f0 7106static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
ace783b9 7107{
eb95419b 7108 struct task_group *tg = css_tg(css);
ace783b9 7109
2f5177f0 7110 sched_offline_group(tg);
ace783b9
LZ
7111}
7112
eb95419b 7113static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 7114{
eb95419b 7115 struct task_group *tg = css_tg(css);
68318b8e 7116
2f5177f0
PZ
7117 /*
7118 * Relies on the RCU grace period between css_released() and this.
7119 */
7120 sched_free_group(tg);
ace783b9
LZ
7121}
7122
ea86cb4b
VG
7123/*
7124 * This is called before wake_up_new_task(), therefore we really only
7125 * have to set its group bits, all the other stuff does not apply.
7126 */
b53202e6 7127static void cpu_cgroup_fork(struct task_struct *task)
eeb61e53 7128{
ea86cb4b
VG
7129 struct rq_flags rf;
7130 struct rq *rq;
7131
7132 rq = task_rq_lock(task, &rf);
7133
80f5c1b8 7134 update_rq_clock(rq);
ea86cb4b
VG
7135 sched_change_group(task, TASK_SET_GROUP);
7136
7137 task_rq_unlock(rq, task, &rf);
eeb61e53
KT
7138}
7139
1f7dd3e5 7140static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
68318b8e 7141{
bb9d97b6 7142 struct task_struct *task;
1f7dd3e5 7143 struct cgroup_subsys_state *css;
7dc603c9 7144 int ret = 0;
bb9d97b6 7145
1f7dd3e5 7146 cgroup_taskset_for_each(task, css, tset) {
b68aa230 7147#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 7148 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 7149 return -EINVAL;
b68aa230 7150#endif
7dc603c9
PZ
7151 /*
7152 * Serialize against wake_up_new_task() such that if its
7153 * running, we're sure to observe its full state.
7154 */
7155 raw_spin_lock_irq(&task->pi_lock);
7156 /*
7157 * Avoid calling sched_move_task() before wake_up_new_task()
7158 * has happened. This would lead to problems with PELT, due to
7159 * move wanting to detach+attach while we're not attached yet.
7160 */
7161 if (task->state == TASK_NEW)
7162 ret = -EINVAL;
7163 raw_spin_unlock_irq(&task->pi_lock);
7164
7165 if (ret)
7166 break;
bb9d97b6 7167 }
7dc603c9 7168 return ret;
be367d09 7169}
68318b8e 7170
1f7dd3e5 7171static void cpu_cgroup_attach(struct cgroup_taskset *tset)
68318b8e 7172{
bb9d97b6 7173 struct task_struct *task;
1f7dd3e5 7174 struct cgroup_subsys_state *css;
bb9d97b6 7175
1f7dd3e5 7176 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 7177 sched_move_task(task);
68318b8e
SV
7178}
7179
2480c093 7180#ifdef CONFIG_UCLAMP_TASK_GROUP
0b60ba2d
PB
7181static void cpu_util_update_eff(struct cgroup_subsys_state *css)
7182{
7183 struct cgroup_subsys_state *top_css = css;
7184 struct uclamp_se *uc_parent = NULL;
7185 struct uclamp_se *uc_se = NULL;
7186 unsigned int eff[UCLAMP_CNT];
0413d7f3 7187 enum uclamp_id clamp_id;
0b60ba2d
PB
7188 unsigned int clamps;
7189
7190 css_for_each_descendant_pre(css, top_css) {
7191 uc_parent = css_tg(css)->parent
7192 ? css_tg(css)->parent->uclamp : NULL;
7193
7194 for_each_clamp_id(clamp_id) {
7195 /* Assume effective clamps matches requested clamps */
7196 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
7197 /* Cap effective clamps with parent's effective clamps */
7198 if (uc_parent &&
7199 eff[clamp_id] > uc_parent[clamp_id].value) {
7200 eff[clamp_id] = uc_parent[clamp_id].value;
7201 }
7202 }
7203 /* Ensure protection is always capped by limit */
7204 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
7205
7206 /* Propagate most restrictive effective clamps */
7207 clamps = 0x0;
7208 uc_se = css_tg(css)->uclamp;
7209 for_each_clamp_id(clamp_id) {
7210 if (eff[clamp_id] == uc_se[clamp_id].value)
7211 continue;
7212 uc_se[clamp_id].value = eff[clamp_id];
7213 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
7214 clamps |= (0x1 << clamp_id);
7215 }
babbe170 7216 if (!clamps) {
0b60ba2d 7217 css = css_rightmost_descendant(css);
babbe170
PB
7218 continue;
7219 }
7220
7221 /* Immediately update descendants RUNNABLE tasks */
7222 uclamp_update_active_tasks(css, clamps);
0b60ba2d
PB
7223 }
7224}
2480c093
PB
7225
7226/*
7227 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
7228 * C expression. Since there is no way to convert a macro argument (N) into a
7229 * character constant, use two levels of macros.
7230 */
7231#define _POW10(exp) ((unsigned int)1e##exp)
7232#define POW10(exp) _POW10(exp)
7233
7234struct uclamp_request {
7235#define UCLAMP_PERCENT_SHIFT 2
7236#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
7237 s64 percent;
7238 u64 util;
7239 int ret;
7240};
7241
7242static inline struct uclamp_request
7243capacity_from_percent(char *buf)
7244{
7245 struct uclamp_request req = {
7246 .percent = UCLAMP_PERCENT_SCALE,
7247 .util = SCHED_CAPACITY_SCALE,
7248 .ret = 0,
7249 };
7250
7251 buf = strim(buf);
7252 if (strcmp(buf, "max")) {
7253 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
7254 &req.percent);
7255 if (req.ret)
7256 return req;
7257 if (req.percent > UCLAMP_PERCENT_SCALE) {
7258 req.ret = -ERANGE;
7259 return req;
7260 }
7261
7262 req.util = req.percent << SCHED_CAPACITY_SHIFT;
7263 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
7264 }
7265
7266 return req;
7267}
7268
7269static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
7270 size_t nbytes, loff_t off,
7271 enum uclamp_id clamp_id)
7272{
7273 struct uclamp_request req;
7274 struct task_group *tg;
7275
7276 req = capacity_from_percent(buf);
7277 if (req.ret)
7278 return req.ret;
7279
7280 mutex_lock(&uclamp_mutex);
7281 rcu_read_lock();
7282
7283 tg = css_tg(of_css(of));
7284 if (tg->uclamp_req[clamp_id].value != req.util)
7285 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
7286
7287 /*
7288 * Because of not recoverable conversion rounding we keep track of the
7289 * exact requested value
7290 */
7291 tg->uclamp_pct[clamp_id] = req.percent;
7292
0b60ba2d
PB
7293 /* Update effective clamps to track the most restrictive value */
7294 cpu_util_update_eff(of_css(of));
7295
2480c093
PB
7296 rcu_read_unlock();
7297 mutex_unlock(&uclamp_mutex);
7298
7299 return nbytes;
7300}
7301
7302static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
7303 char *buf, size_t nbytes,
7304 loff_t off)
7305{
7306 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
7307}
7308
7309static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
7310 char *buf, size_t nbytes,
7311 loff_t off)
7312{
7313 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
7314}
7315
7316static inline void cpu_uclamp_print(struct seq_file *sf,
7317 enum uclamp_id clamp_id)
7318{
7319 struct task_group *tg;
7320 u64 util_clamp;
7321 u64 percent;
7322 u32 rem;
7323
7324 rcu_read_lock();
7325 tg = css_tg(seq_css(sf));
7326 util_clamp = tg->uclamp_req[clamp_id].value;
7327 rcu_read_unlock();
7328
7329 if (util_clamp == SCHED_CAPACITY_SCALE) {
7330 seq_puts(sf, "max\n");
7331 return;
7332 }
7333
7334 percent = tg->uclamp_pct[clamp_id];
7335 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
7336 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
7337}
7338
7339static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
7340{
7341 cpu_uclamp_print(sf, UCLAMP_MIN);
7342 return 0;
7343}
7344
7345static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
7346{
7347 cpu_uclamp_print(sf, UCLAMP_MAX);
7348 return 0;
7349}
7350#endif /* CONFIG_UCLAMP_TASK_GROUP */
7351
052f1dc7 7352#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
7353static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
7354 struct cftype *cftype, u64 shareval)
68318b8e 7355{
5b61d50a
KK
7356 if (shareval > scale_load_down(ULONG_MAX))
7357 shareval = MAX_SHARES;
182446d0 7358 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
7359}
7360
182446d0
TH
7361static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
7362 struct cftype *cft)
68318b8e 7363{
182446d0 7364 struct task_group *tg = css_tg(css);
68318b8e 7365
c8b28116 7366 return (u64) scale_load_down(tg->shares);
68318b8e 7367}
ab84d31e
PT
7368
7369#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
7370static DEFINE_MUTEX(cfs_constraints_mutex);
7371
ab84d31e 7372const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
b1546edc 7373static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
ab84d31e 7374
a790de99
PT
7375static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7376
ab84d31e
PT
7377static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7378{
56f570e5 7379 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 7380 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
7381
7382 if (tg == &root_task_group)
7383 return -EINVAL;
7384
7385 /*
7386 * Ensure we have at some amount of bandwidth every period. This is
7387 * to prevent reaching a state of large arrears when throttled via
7388 * entity_tick() resulting in prolonged exit starvation.
7389 */
7390 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7391 return -EINVAL;
7392
7393 /*
7394 * Likewise, bound things on the otherside by preventing insane quota
7395 * periods. This also allows us to normalize in computing quota
7396 * feasibility.
7397 */
7398 if (period > max_cfs_quota_period)
7399 return -EINVAL;
7400
0e59bdae
KT
7401 /*
7402 * Prevent race between setting of cfs_rq->runtime_enabled and
7403 * unthrottle_offline_cfs_rqs().
7404 */
7405 get_online_cpus();
a790de99
PT
7406 mutex_lock(&cfs_constraints_mutex);
7407 ret = __cfs_schedulable(tg, period, quota);
7408 if (ret)
7409 goto out_unlock;
7410
58088ad0 7411 runtime_enabled = quota != RUNTIME_INF;
56f570e5 7412 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
7413 /*
7414 * If we need to toggle cfs_bandwidth_used, off->on must occur
7415 * before making related changes, and on->off must occur afterwards
7416 */
7417 if (runtime_enabled && !runtime_was_enabled)
7418 cfs_bandwidth_usage_inc();
ab84d31e
PT
7419 raw_spin_lock_irq(&cfs_b->lock);
7420 cfs_b->period = ns_to_ktime(period);
7421 cfs_b->quota = quota;
58088ad0 7422
a9cf55b2 7423 __refill_cfs_bandwidth_runtime(cfs_b);
d1ccc66d
IM
7424
7425 /* Restart the period timer (if active) to handle new period expiry: */
77a4d1a1
PZ
7426 if (runtime_enabled)
7427 start_cfs_bandwidth(cfs_b);
d1ccc66d 7428
ab84d31e
PT
7429 raw_spin_unlock_irq(&cfs_b->lock);
7430
0e59bdae 7431 for_each_online_cpu(i) {
ab84d31e 7432 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 7433 struct rq *rq = cfs_rq->rq;
8a8c69c3 7434 struct rq_flags rf;
ab84d31e 7435
8a8c69c3 7436 rq_lock_irq(rq, &rf);
58088ad0 7437 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 7438 cfs_rq->runtime_remaining = 0;
671fd9da 7439
029632fb 7440 if (cfs_rq->throttled)
671fd9da 7441 unthrottle_cfs_rq(cfs_rq);
8a8c69c3 7442 rq_unlock_irq(rq, &rf);
ab84d31e 7443 }
1ee14e6c
BS
7444 if (runtime_was_enabled && !runtime_enabled)
7445 cfs_bandwidth_usage_dec();
a790de99
PT
7446out_unlock:
7447 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 7448 put_online_cpus();
ab84d31e 7449
a790de99 7450 return ret;
ab84d31e
PT
7451}
7452
b1546edc 7453static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
ab84d31e
PT
7454{
7455 u64 quota, period;
7456
029632fb 7457 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7458 if (cfs_quota_us < 0)
7459 quota = RUNTIME_INF;
1a8b4540 7460 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
ab84d31e 7461 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
1a8b4540
KK
7462 else
7463 return -EINVAL;
ab84d31e
PT
7464
7465 return tg_set_cfs_bandwidth(tg, period, quota);
7466}
7467
b1546edc 7468static long tg_get_cfs_quota(struct task_group *tg)
ab84d31e
PT
7469{
7470 u64 quota_us;
7471
029632fb 7472 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
7473 return -1;
7474
029632fb 7475 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
7476 do_div(quota_us, NSEC_PER_USEC);
7477
7478 return quota_us;
7479}
7480
b1546edc 7481static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
ab84d31e
PT
7482{
7483 u64 quota, period;
7484
1a8b4540
KK
7485 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
7486 return -EINVAL;
7487
ab84d31e 7488 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 7489 quota = tg->cfs_bandwidth.quota;
ab84d31e 7490
ab84d31e
PT
7491 return tg_set_cfs_bandwidth(tg, period, quota);
7492}
7493
b1546edc 7494static long tg_get_cfs_period(struct task_group *tg)
ab84d31e
PT
7495{
7496 u64 cfs_period_us;
7497
029632fb 7498 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7499 do_div(cfs_period_us, NSEC_PER_USEC);
7500
7501 return cfs_period_us;
7502}
7503
182446d0
TH
7504static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
7505 struct cftype *cft)
ab84d31e 7506{
182446d0 7507 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
7508}
7509
182446d0
TH
7510static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
7511 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 7512{
182446d0 7513 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
7514}
7515
182446d0
TH
7516static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
7517 struct cftype *cft)
ab84d31e 7518{
182446d0 7519 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
7520}
7521
182446d0
TH
7522static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
7523 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 7524{
182446d0 7525 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
7526}
7527
a790de99
PT
7528struct cfs_schedulable_data {
7529 struct task_group *tg;
7530 u64 period, quota;
7531};
7532
7533/*
7534 * normalize group quota/period to be quota/max_period
7535 * note: units are usecs
7536 */
7537static u64 normalize_cfs_quota(struct task_group *tg,
7538 struct cfs_schedulable_data *d)
7539{
7540 u64 quota, period;
7541
7542 if (tg == d->tg) {
7543 period = d->period;
7544 quota = d->quota;
7545 } else {
7546 period = tg_get_cfs_period(tg);
7547 quota = tg_get_cfs_quota(tg);
7548 }
7549
7550 /* note: these should typically be equivalent */
7551 if (quota == RUNTIME_INF || quota == -1)
7552 return RUNTIME_INF;
7553
7554 return to_ratio(period, quota);
7555}
7556
7557static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7558{
7559 struct cfs_schedulable_data *d = data;
029632fb 7560 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
7561 s64 quota = 0, parent_quota = -1;
7562
7563 if (!tg->parent) {
7564 quota = RUNTIME_INF;
7565 } else {
029632fb 7566 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
7567
7568 quota = normalize_cfs_quota(tg, d);
9c58c79a 7569 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
7570
7571 /*
c53593e5
TH
7572 * Ensure max(child_quota) <= parent_quota. On cgroup2,
7573 * always take the min. On cgroup1, only inherit when no
d1ccc66d 7574 * limit is set:
a790de99 7575 */
c53593e5
TH
7576 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
7577 quota = min(quota, parent_quota);
7578 } else {
7579 if (quota == RUNTIME_INF)
7580 quota = parent_quota;
7581 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7582 return -EINVAL;
7583 }
a790de99 7584 }
9c58c79a 7585 cfs_b->hierarchical_quota = quota;
a790de99
PT
7586
7587 return 0;
7588}
7589
7590static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7591{
8277434e 7592 int ret;
a790de99
PT
7593 struct cfs_schedulable_data data = {
7594 .tg = tg,
7595 .period = period,
7596 .quota = quota,
7597 };
7598
7599 if (quota != RUNTIME_INF) {
7600 do_div(data.period, NSEC_PER_USEC);
7601 do_div(data.quota, NSEC_PER_USEC);
7602 }
7603
8277434e
PT
7604 rcu_read_lock();
7605 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7606 rcu_read_unlock();
7607
7608 return ret;
a790de99 7609}
e8da1b18 7610
a1f7164c 7611static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
e8da1b18 7612{
2da8ca82 7613 struct task_group *tg = css_tg(seq_css(sf));
029632fb 7614 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 7615
44ffc75b
TH
7616 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
7617 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
7618 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18 7619
3d6c50c2
YW
7620 if (schedstat_enabled() && tg != &root_task_group) {
7621 u64 ws = 0;
7622 int i;
7623
7624 for_each_possible_cpu(i)
7625 ws += schedstat_val(tg->se[i]->statistics.wait_sum);
7626
7627 seq_printf(sf, "wait_sum %llu\n", ws);
7628 }
7629
e8da1b18
NR
7630 return 0;
7631}
ab84d31e 7632#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 7633#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 7634
052f1dc7 7635#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
7636static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
7637 struct cftype *cft, s64 val)
6f505b16 7638{
182446d0 7639 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
7640}
7641
182446d0
TH
7642static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
7643 struct cftype *cft)
6f505b16 7644{
182446d0 7645 return sched_group_rt_runtime(css_tg(css));
6f505b16 7646}
d0b27fa7 7647
182446d0
TH
7648static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
7649 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 7650{
182446d0 7651 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
7652}
7653
182446d0
TH
7654static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
7655 struct cftype *cft)
d0b27fa7 7656{
182446d0 7657 return sched_group_rt_period(css_tg(css));
d0b27fa7 7658}
6d6bc0ad 7659#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 7660
a1f7164c 7661static struct cftype cpu_legacy_files[] = {
052f1dc7 7662#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
7663 {
7664 .name = "shares",
f4c753b7
PM
7665 .read_u64 = cpu_shares_read_u64,
7666 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 7667 },
052f1dc7 7668#endif
ab84d31e
PT
7669#ifdef CONFIG_CFS_BANDWIDTH
7670 {
7671 .name = "cfs_quota_us",
7672 .read_s64 = cpu_cfs_quota_read_s64,
7673 .write_s64 = cpu_cfs_quota_write_s64,
7674 },
7675 {
7676 .name = "cfs_period_us",
7677 .read_u64 = cpu_cfs_period_read_u64,
7678 .write_u64 = cpu_cfs_period_write_u64,
7679 },
e8da1b18
NR
7680 {
7681 .name = "stat",
a1f7164c 7682 .seq_show = cpu_cfs_stat_show,
e8da1b18 7683 },
ab84d31e 7684#endif
052f1dc7 7685#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7686 {
9f0c1e56 7687 .name = "rt_runtime_us",
06ecb27c
PM
7688 .read_s64 = cpu_rt_runtime_read,
7689 .write_s64 = cpu_rt_runtime_write,
6f505b16 7690 },
d0b27fa7
PZ
7691 {
7692 .name = "rt_period_us",
f4c753b7
PM
7693 .read_u64 = cpu_rt_period_read_uint,
7694 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 7695 },
2480c093
PB
7696#endif
7697#ifdef CONFIG_UCLAMP_TASK_GROUP
7698 {
7699 .name = "uclamp.min",
7700 .flags = CFTYPE_NOT_ON_ROOT,
7701 .seq_show = cpu_uclamp_min_show,
7702 .write = cpu_uclamp_min_write,
7703 },
7704 {
7705 .name = "uclamp.max",
7706 .flags = CFTYPE_NOT_ON_ROOT,
7707 .seq_show = cpu_uclamp_max_show,
7708 .write = cpu_uclamp_max_write,
7709 },
052f1dc7 7710#endif
d1ccc66d 7711 { } /* Terminate */
68318b8e
SV
7712};
7713
d41bf8c9
TH
7714static int cpu_extra_stat_show(struct seq_file *sf,
7715 struct cgroup_subsys_state *css)
0d593634 7716{
0d593634
TH
7717#ifdef CONFIG_CFS_BANDWIDTH
7718 {
d41bf8c9 7719 struct task_group *tg = css_tg(css);
0d593634
TH
7720 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7721 u64 throttled_usec;
7722
7723 throttled_usec = cfs_b->throttled_time;
7724 do_div(throttled_usec, NSEC_PER_USEC);
7725
7726 seq_printf(sf, "nr_periods %d\n"
7727 "nr_throttled %d\n"
7728 "throttled_usec %llu\n",
7729 cfs_b->nr_periods, cfs_b->nr_throttled,
7730 throttled_usec);
7731 }
7732#endif
7733 return 0;
7734}
7735
7736#ifdef CONFIG_FAIR_GROUP_SCHED
7737static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
7738 struct cftype *cft)
7739{
7740 struct task_group *tg = css_tg(css);
7741 u64 weight = scale_load_down(tg->shares);
7742
7743 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
7744}
7745
7746static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
7747 struct cftype *cft, u64 weight)
7748{
7749 /*
7750 * cgroup weight knobs should use the common MIN, DFL and MAX
7751 * values which are 1, 100 and 10000 respectively. While it loses
7752 * a bit of range on both ends, it maps pretty well onto the shares
7753 * value used by scheduler and the round-trip conversions preserve
7754 * the original value over the entire range.
7755 */
7756 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
7757 return -ERANGE;
7758
7759 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
7760
7761 return sched_group_set_shares(css_tg(css), scale_load(weight));
7762}
7763
7764static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
7765 struct cftype *cft)
7766{
7767 unsigned long weight = scale_load_down(css_tg(css)->shares);
7768 int last_delta = INT_MAX;
7769 int prio, delta;
7770
7771 /* find the closest nice value to the current weight */
7772 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
7773 delta = abs(sched_prio_to_weight[prio] - weight);
7774 if (delta >= last_delta)
7775 break;
7776 last_delta = delta;
7777 }
7778
7779 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
7780}
7781
7782static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
7783 struct cftype *cft, s64 nice)
7784{
7785 unsigned long weight;
7281c8de 7786 int idx;
0d593634
TH
7787
7788 if (nice < MIN_NICE || nice > MAX_NICE)
7789 return -ERANGE;
7790
7281c8de
PZ
7791 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
7792 idx = array_index_nospec(idx, 40);
7793 weight = sched_prio_to_weight[idx];
7794
0d593634
TH
7795 return sched_group_set_shares(css_tg(css), scale_load(weight));
7796}
7797#endif
7798
7799static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
7800 long period, long quota)
7801{
7802 if (quota < 0)
7803 seq_puts(sf, "max");
7804 else
7805 seq_printf(sf, "%ld", quota);
7806
7807 seq_printf(sf, " %ld\n", period);
7808}
7809
7810/* caller should put the current value in *@periodp before calling */
7811static int __maybe_unused cpu_period_quota_parse(char *buf,
7812 u64 *periodp, u64 *quotap)
7813{
7814 char tok[21]; /* U64_MAX */
7815
4c47acd8 7816 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
0d593634
TH
7817 return -EINVAL;
7818
7819 *periodp *= NSEC_PER_USEC;
7820
7821 if (sscanf(tok, "%llu", quotap))
7822 *quotap *= NSEC_PER_USEC;
7823 else if (!strcmp(tok, "max"))
7824 *quotap = RUNTIME_INF;
7825 else
7826 return -EINVAL;
7827
7828 return 0;
7829}
7830
7831#ifdef CONFIG_CFS_BANDWIDTH
7832static int cpu_max_show(struct seq_file *sf, void *v)
7833{
7834 struct task_group *tg = css_tg(seq_css(sf));
7835
7836 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
7837 return 0;
7838}
7839
7840static ssize_t cpu_max_write(struct kernfs_open_file *of,
7841 char *buf, size_t nbytes, loff_t off)
7842{
7843 struct task_group *tg = css_tg(of_css(of));
7844 u64 period = tg_get_cfs_period(tg);
7845 u64 quota;
7846 int ret;
7847
7848 ret = cpu_period_quota_parse(buf, &period, &quota);
7849 if (!ret)
7850 ret = tg_set_cfs_bandwidth(tg, period, quota);
7851 return ret ?: nbytes;
7852}
7853#endif
7854
7855static struct cftype cpu_files[] = {
0d593634
TH
7856#ifdef CONFIG_FAIR_GROUP_SCHED
7857 {
7858 .name = "weight",
7859 .flags = CFTYPE_NOT_ON_ROOT,
7860 .read_u64 = cpu_weight_read_u64,
7861 .write_u64 = cpu_weight_write_u64,
7862 },
7863 {
7864 .name = "weight.nice",
7865 .flags = CFTYPE_NOT_ON_ROOT,
7866 .read_s64 = cpu_weight_nice_read_s64,
7867 .write_s64 = cpu_weight_nice_write_s64,
7868 },
7869#endif
7870#ifdef CONFIG_CFS_BANDWIDTH
7871 {
7872 .name = "max",
7873 .flags = CFTYPE_NOT_ON_ROOT,
7874 .seq_show = cpu_max_show,
7875 .write = cpu_max_write,
7876 },
2480c093
PB
7877#endif
7878#ifdef CONFIG_UCLAMP_TASK_GROUP
7879 {
7880 .name = "uclamp.min",
7881 .flags = CFTYPE_NOT_ON_ROOT,
7882 .seq_show = cpu_uclamp_min_show,
7883 .write = cpu_uclamp_min_write,
7884 },
7885 {
7886 .name = "uclamp.max",
7887 .flags = CFTYPE_NOT_ON_ROOT,
7888 .seq_show = cpu_uclamp_max_show,
7889 .write = cpu_uclamp_max_write,
7890 },
0d593634
TH
7891#endif
7892 { } /* terminate */
7893};
7894
073219e9 7895struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748 7896 .css_alloc = cpu_cgroup_css_alloc,
96b77745 7897 .css_online = cpu_cgroup_css_online,
2f5177f0 7898 .css_released = cpu_cgroup_css_released,
92fb9748 7899 .css_free = cpu_cgroup_css_free,
d41bf8c9 7900 .css_extra_stat_show = cpu_extra_stat_show,
eeb61e53 7901 .fork = cpu_cgroup_fork,
bb9d97b6
TH
7902 .can_attach = cpu_cgroup_can_attach,
7903 .attach = cpu_cgroup_attach,
a1f7164c 7904 .legacy_cftypes = cpu_legacy_files,
0d593634 7905 .dfl_cftypes = cpu_files,
b38e42e9 7906 .early_init = true,
0d593634 7907 .threaded = true,
68318b8e
SV
7908};
7909
052f1dc7 7910#endif /* CONFIG_CGROUP_SCHED */
d842de87 7911
b637a328
PM
7912void dump_cpu_task(int cpu)
7913{
7914 pr_info("Task dump for CPU %d:\n", cpu);
7915 sched_show_task(cpu_curr(cpu));
7916}
ed82b8a1
AK
7917
7918/*
7919 * Nice levels are multiplicative, with a gentle 10% change for every
7920 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
7921 * nice 1, it will get ~10% less CPU time than another CPU-bound task
7922 * that remained on nice 0.
7923 *
7924 * The "10% effect" is relative and cumulative: from _any_ nice level,
7925 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
7926 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
7927 * If a task goes up by ~10% and another task goes down by ~10% then
7928 * the relative distance between them is ~25%.)
7929 */
7930const int sched_prio_to_weight[40] = {
7931 /* -20 */ 88761, 71755, 56483, 46273, 36291,
7932 /* -15 */ 29154, 23254, 18705, 14949, 11916,
7933 /* -10 */ 9548, 7620, 6100, 4904, 3906,
7934 /* -5 */ 3121, 2501, 1991, 1586, 1277,
7935 /* 0 */ 1024, 820, 655, 526, 423,
7936 /* 5 */ 335, 272, 215, 172, 137,
7937 /* 10 */ 110, 87, 70, 56, 45,
7938 /* 15 */ 36, 29, 23, 18, 15,
7939};
7940
7941/*
7942 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
7943 *
7944 * In cases where the weight does not change often, we can use the
7945 * precalculated inverse to speed up arithmetics by turning divisions
7946 * into multiplications:
7947 */
7948const u32 sched_prio_to_wmult[40] = {
7949 /* -20 */ 48388, 59856, 76040, 92818, 118348,
7950 /* -15 */ 147320, 184698, 229616, 287308, 360437,
7951 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
7952 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
7953 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
7954 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
7955 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
7956 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
7957};
14a7405b
IM
7958
7959#undef CREATE_TRACE_POINTS