]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/core.c
sched: Fix migration_cpu_stop() requeueing
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / core.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
391e43da 3 * kernel/sched/core.c
1da177e4 4 *
d1ccc66d 5 * Core kernel scheduler code and related syscalls
1da177e4
LT
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
1da177e4 8 */
9d246053
PA
9#define CREATE_TRACE_POINTS
10#include <trace/events/sched.h>
11#undef CREATE_TRACE_POINTS
12
325ea10c 13#include "sched.h"
1da177e4 14
7281c8de 15#include <linux/nospec.h>
85f1abe0 16
0ed557aa 17#include <linux/kcov.h>
d08b9f0c 18#include <linux/scs.h>
0ed557aa 19
96f951ed 20#include <asm/switch_to.h>
5517d86b 21#include <asm/tlb.h>
1da177e4 22
ea138446 23#include "../workqueue_internal.h"
771b53d0 24#include "../../fs/io-wq.h"
29d5e047 25#include "../smpboot.h"
6e0534f2 26
91c27493 27#include "pelt.h"
1f8db415 28#include "smp.h"
91c27493 29
a056a5be
QY
30/*
31 * Export tracepoints that act as a bare tracehook (ie: have no trace event
32 * associated with them) to allow external modules to probe them.
33 */
34EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
35EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
36EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
37EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
38EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
51cf18c9 39EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
a056a5be 40EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
4581bea8
VD
41EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
42EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
9d246053 43EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
a056a5be 44
029632fb 45DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 46
a73f863a 47#ifdef CONFIG_SCHED_DEBUG
bf5c91ba
IM
48/*
49 * Debugging: various feature bits
765cc3a4
PB
50 *
51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
52 * sysctl_sched_features, defined in sched.h, to allow constants propagation
53 * at compile time and compiler optimization based on features default.
bf5c91ba 54 */
f00b45c1
PZ
55#define SCHED_FEAT(name, enabled) \
56 (1UL << __SCHED_FEAT_##name) * enabled |
bf5c91ba 57const_debug unsigned int sysctl_sched_features =
391e43da 58#include "features.h"
f00b45c1 59 0;
f00b45c1 60#undef SCHED_FEAT
765cc3a4 61#endif
f00b45c1 62
b82d9fdd
PZ
63/*
64 * Number of tasks to iterate in a single balance run.
65 * Limited because this is done with IRQs disabled.
66 */
67const_debug unsigned int sysctl_sched_nr_migrate = 32;
68
fa85ae24 69/*
d1ccc66d 70 * period over which we measure -rt task CPU usage in us.
fa85ae24
PZ
71 * default: 1s
72 */
9f0c1e56 73unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 74
029632fb 75__read_mostly int scheduler_running;
6892b75e 76
9f0c1e56
PZ
77/*
78 * part of the period that we allow rt tasks to run in us.
79 * default: 0.95s
80 */
81int sysctl_sched_rt_runtime = 950000;
fa85ae24 82
58877d34
PZ
83
84/*
85 * Serialization rules:
86 *
87 * Lock order:
88 *
89 * p->pi_lock
90 * rq->lock
91 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
92 *
93 * rq1->lock
94 * rq2->lock where: rq1 < rq2
95 *
96 * Regular state:
97 *
98 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
99 * local CPU's rq->lock, it optionally removes the task from the runqueue and
b19a888c 100 * always looks at the local rq data structures to find the most eligible task
58877d34
PZ
101 * to run next.
102 *
103 * Task enqueue is also under rq->lock, possibly taken from another CPU.
104 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
105 * the local CPU to avoid bouncing the runqueue state around [ see
106 * ttwu_queue_wakelist() ]
107 *
108 * Task wakeup, specifically wakeups that involve migration, are horribly
109 * complicated to avoid having to take two rq->locks.
110 *
111 * Special state:
112 *
113 * System-calls and anything external will use task_rq_lock() which acquires
114 * both p->pi_lock and rq->lock. As a consequence the state they change is
115 * stable while holding either lock:
116 *
117 * - sched_setaffinity()/
118 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
119 * - set_user_nice(): p->se.load, p->*prio
120 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
121 * p->se.load, p->rt_priority,
122 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
123 * - sched_setnuma(): p->numa_preferred_nid
124 * - sched_move_task()/
125 * cpu_cgroup_fork(): p->sched_task_group
126 * - uclamp_update_active() p->uclamp*
127 *
128 * p->state <- TASK_*:
129 *
130 * is changed locklessly using set_current_state(), __set_current_state() or
131 * set_special_state(), see their respective comments, or by
132 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
133 * concurrent self.
134 *
135 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
136 *
137 * is set by activate_task() and cleared by deactivate_task(), under
138 * rq->lock. Non-zero indicates the task is runnable, the special
139 * ON_RQ_MIGRATING state is used for migration without holding both
140 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
141 *
142 * p->on_cpu <- { 0, 1 }:
143 *
144 * is set by prepare_task() and cleared by finish_task() such that it will be
145 * set before p is scheduled-in and cleared after p is scheduled-out, both
146 * under rq->lock. Non-zero indicates the task is running on its CPU.
147 *
148 * [ The astute reader will observe that it is possible for two tasks on one
149 * CPU to have ->on_cpu = 1 at the same time. ]
150 *
151 * task_cpu(p): is changed by set_task_cpu(), the rules are:
152 *
153 * - Don't call set_task_cpu() on a blocked task:
154 *
155 * We don't care what CPU we're not running on, this simplifies hotplug,
156 * the CPU assignment of blocked tasks isn't required to be valid.
157 *
158 * - for try_to_wake_up(), called under p->pi_lock:
159 *
160 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
161 *
162 * - for migration called under rq->lock:
163 * [ see task_on_rq_migrating() in task_rq_lock() ]
164 *
165 * o move_queued_task()
166 * o detach_task()
167 *
168 * - for migration called under double_rq_lock():
169 *
170 * o __migrate_swap_task()
171 * o push_rt_task() / pull_rt_task()
172 * o push_dl_task() / pull_dl_task()
173 * o dl_task_offline_migration()
174 *
175 */
176
3e71a462
PZ
177/*
178 * __task_rq_lock - lock the rq @p resides on.
179 */
eb580751 180struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
181 __acquires(rq->lock)
182{
183 struct rq *rq;
184
185 lockdep_assert_held(&p->pi_lock);
186
187 for (;;) {
188 rq = task_rq(p);
189 raw_spin_lock(&rq->lock);
190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 191 rq_pin_lock(rq, rf);
3e71a462
PZ
192 return rq;
193 }
194 raw_spin_unlock(&rq->lock);
195
196 while (unlikely(task_on_rq_migrating(p)))
197 cpu_relax();
198 }
199}
200
201/*
202 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
203 */
eb580751 204struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
205 __acquires(p->pi_lock)
206 __acquires(rq->lock)
207{
208 struct rq *rq;
209
210 for (;;) {
eb580751 211 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
3e71a462
PZ
212 rq = task_rq(p);
213 raw_spin_lock(&rq->lock);
214 /*
215 * move_queued_task() task_rq_lock()
216 *
217 * ACQUIRE (rq->lock)
218 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
219 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
220 * [S] ->cpu = new_cpu [L] task_rq()
221 * [L] ->on_rq
222 * RELEASE (rq->lock)
223 *
c546951d 224 * If we observe the old CPU in task_rq_lock(), the acquire of
3e71a462
PZ
225 * the old rq->lock will fully serialize against the stores.
226 *
c546951d
AP
227 * If we observe the new CPU in task_rq_lock(), the address
228 * dependency headed by '[L] rq = task_rq()' and the acquire
229 * will pair with the WMB to ensure we then also see migrating.
3e71a462
PZ
230 */
231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 232 rq_pin_lock(rq, rf);
3e71a462
PZ
233 return rq;
234 }
235 raw_spin_unlock(&rq->lock);
eb580751 236 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3e71a462
PZ
237
238 while (unlikely(task_on_rq_migrating(p)))
239 cpu_relax();
240 }
241}
242
535b9552
IM
243/*
244 * RQ-clock updating methods:
245 */
246
247static void update_rq_clock_task(struct rq *rq, s64 delta)
248{
249/*
250 * In theory, the compile should just see 0 here, and optimize out the call
251 * to sched_rt_avg_update. But I don't trust it...
252 */
11d4afd4
VG
253 s64 __maybe_unused steal = 0, irq_delta = 0;
254
535b9552
IM
255#ifdef CONFIG_IRQ_TIME_ACCOUNTING
256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
257
258 /*
259 * Since irq_time is only updated on {soft,}irq_exit, we might run into
260 * this case when a previous update_rq_clock() happened inside a
261 * {soft,}irq region.
262 *
263 * When this happens, we stop ->clock_task and only update the
264 * prev_irq_time stamp to account for the part that fit, so that a next
265 * update will consume the rest. This ensures ->clock_task is
266 * monotonic.
267 *
268 * It does however cause some slight miss-attribution of {soft,}irq
269 * time, a more accurate solution would be to update the irq_time using
270 * the current rq->clock timestamp, except that would require using
271 * atomic ops.
272 */
273 if (irq_delta > delta)
274 irq_delta = delta;
275
276 rq->prev_irq_time += irq_delta;
277 delta -= irq_delta;
278#endif
279#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
280 if (static_key_false((&paravirt_steal_rq_enabled))) {
281 steal = paravirt_steal_clock(cpu_of(rq));
282 steal -= rq->prev_steal_time_rq;
283
284 if (unlikely(steal > delta))
285 steal = delta;
286
287 rq->prev_steal_time_rq += steal;
288 delta -= steal;
289 }
290#endif
291
292 rq->clock_task += delta;
293
11d4afd4 294#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
535b9552 295 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
91c27493 296 update_irq_load_avg(rq, irq_delta + steal);
535b9552 297#endif
23127296 298 update_rq_clock_pelt(rq, delta);
535b9552
IM
299}
300
301void update_rq_clock(struct rq *rq)
302{
303 s64 delta;
304
305 lockdep_assert_held(&rq->lock);
306
307 if (rq->clock_update_flags & RQCF_ACT_SKIP)
308 return;
309
310#ifdef CONFIG_SCHED_DEBUG
26ae58d2
PZ
311 if (sched_feat(WARN_DOUBLE_CLOCK))
312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
535b9552
IM
313 rq->clock_update_flags |= RQCF_UPDATED;
314#endif
26ae58d2 315
535b9552
IM
316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
317 if (delta < 0)
318 return;
319 rq->clock += delta;
320 update_rq_clock_task(rq, delta);
321}
322
8f4d37ec
PZ
323#ifdef CONFIG_SCHED_HRTICK
324/*
325 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 326 */
8f4d37ec 327
8f4d37ec
PZ
328static void hrtick_clear(struct rq *rq)
329{
330 if (hrtimer_active(&rq->hrtick_timer))
331 hrtimer_cancel(&rq->hrtick_timer);
332}
333
8f4d37ec
PZ
334/*
335 * High-resolution timer tick.
336 * Runs from hardirq context with interrupts disabled.
337 */
338static enum hrtimer_restart hrtick(struct hrtimer *timer)
339{
340 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
8a8c69c3 341 struct rq_flags rf;
8f4d37ec
PZ
342
343 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
344
8a8c69c3 345 rq_lock(rq, &rf);
3e51f33f 346 update_rq_clock(rq);
8f4d37ec 347 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
8a8c69c3 348 rq_unlock(rq, &rf);
8f4d37ec
PZ
349
350 return HRTIMER_NORESTART;
351}
352
95e904c7 353#ifdef CONFIG_SMP
971ee28c 354
4961b6e1 355static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
356{
357 struct hrtimer *timer = &rq->hrtick_timer;
156ec6f4 358 ktime_t time = rq->hrtick_time;
971ee28c 359
156ec6f4 360 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
971ee28c
PZ
361}
362
31656519
PZ
363/*
364 * called from hardirq (IPI) context
365 */
366static void __hrtick_start(void *arg)
b328ca18 367{
31656519 368 struct rq *rq = arg;
8a8c69c3 369 struct rq_flags rf;
b328ca18 370
8a8c69c3 371 rq_lock(rq, &rf);
971ee28c 372 __hrtick_restart(rq);
8a8c69c3 373 rq_unlock(rq, &rf);
b328ca18
PZ
374}
375
31656519
PZ
376/*
377 * Called to set the hrtick timer state.
378 *
379 * called with rq->lock held and irqs disabled
380 */
029632fb 381void hrtick_start(struct rq *rq, u64 delay)
b328ca18 382{
31656519 383 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 384 s64 delta;
385
386 /*
387 * Don't schedule slices shorter than 10000ns, that just
388 * doesn't make sense and can cause timer DoS.
389 */
390 delta = max_t(s64, delay, 10000LL);
156ec6f4 391 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
31656519 392
fd3eafda 393 if (rq == this_rq())
971ee28c 394 __hrtick_restart(rq);
fd3eafda 395 else
c46fff2a 396 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
b328ca18
PZ
397}
398
31656519
PZ
399#else
400/*
401 * Called to set the hrtick timer state.
402 *
403 * called with rq->lock held and irqs disabled
404 */
029632fb 405void hrtick_start(struct rq *rq, u64 delay)
31656519 406{
86893335
WL
407 /*
408 * Don't schedule slices shorter than 10000ns, that just
409 * doesn't make sense. Rely on vruntime for fairness.
410 */
411 delay = max_t(u64, delay, 10000LL);
4961b6e1 412 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
d5096aa6 413 HRTIMER_MODE_REL_PINNED_HARD);
31656519 414}
90b5363a 415
31656519 416#endif /* CONFIG_SMP */
8f4d37ec 417
77a021be 418static void hrtick_rq_init(struct rq *rq)
8f4d37ec 419{
31656519 420#ifdef CONFIG_SMP
545b8c8d 421 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
31656519 422#endif
d5096aa6 423 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
31656519 424 rq->hrtick_timer.function = hrtick;
8f4d37ec 425}
006c75f1 426#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
427static inline void hrtick_clear(struct rq *rq)
428{
429}
430
77a021be 431static inline void hrtick_rq_init(struct rq *rq)
8f4d37ec
PZ
432{
433}
006c75f1 434#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 435
5529578a
FW
436/*
437 * cmpxchg based fetch_or, macro so it works for different integer types
438 */
439#define fetch_or(ptr, mask) \
440 ({ \
441 typeof(ptr) _ptr = (ptr); \
442 typeof(mask) _mask = (mask); \
443 typeof(*_ptr) _old, _val = *_ptr; \
444 \
445 for (;;) { \
446 _old = cmpxchg(_ptr, _val, _val | _mask); \
447 if (_old == _val) \
448 break; \
449 _val = _old; \
450 } \
451 _old; \
452})
453
e3baac47 454#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
455/*
456 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
457 * this avoids any races wrt polling state changes and thereby avoids
458 * spurious IPIs.
459 */
460static bool set_nr_and_not_polling(struct task_struct *p)
461{
462 struct thread_info *ti = task_thread_info(p);
463 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
464}
e3baac47
PZ
465
466/*
467 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
468 *
469 * If this returns true, then the idle task promises to call
470 * sched_ttwu_pending() and reschedule soon.
471 */
472static bool set_nr_if_polling(struct task_struct *p)
473{
474 struct thread_info *ti = task_thread_info(p);
316c1608 475 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
476
477 for (;;) {
478 if (!(val & _TIF_POLLING_NRFLAG))
479 return false;
480 if (val & _TIF_NEED_RESCHED)
481 return true;
482 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
483 if (old == val)
484 break;
485 val = old;
486 }
487 return true;
488}
489
fd99f91a
PZ
490#else
491static bool set_nr_and_not_polling(struct task_struct *p)
492{
493 set_tsk_need_resched(p);
494 return true;
495}
e3baac47
PZ
496
497#ifdef CONFIG_SMP
498static bool set_nr_if_polling(struct task_struct *p)
499{
500 return false;
501}
502#endif
fd99f91a
PZ
503#endif
504
07879c6a 505static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
76751049
PZ
506{
507 struct wake_q_node *node = &task->wake_q;
508
509 /*
510 * Atomically grab the task, if ->wake_q is !nil already it means
b19a888c 511 * it's already queued (either by us or someone else) and will get the
76751049
PZ
512 * wakeup due to that.
513 *
4c4e3731
PZ
514 * In order to ensure that a pending wakeup will observe our pending
515 * state, even in the failed case, an explicit smp_mb() must be used.
76751049 516 */
4c4e3731 517 smp_mb__before_atomic();
87ff19cb 518 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
07879c6a 519 return false;
76751049
PZ
520
521 /*
522 * The head is context local, there can be no concurrency.
523 */
524 *head->lastp = node;
525 head->lastp = &node->next;
07879c6a
DB
526 return true;
527}
528
529/**
530 * wake_q_add() - queue a wakeup for 'later' waking.
531 * @head: the wake_q_head to add @task to
532 * @task: the task to queue for 'later' wakeup
533 *
534 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
535 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
536 * instantly.
537 *
538 * This function must be used as-if it were wake_up_process(); IOW the task
539 * must be ready to be woken at this location.
540 */
541void wake_q_add(struct wake_q_head *head, struct task_struct *task)
542{
543 if (__wake_q_add(head, task))
544 get_task_struct(task);
545}
546
547/**
548 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
549 * @head: the wake_q_head to add @task to
550 * @task: the task to queue for 'later' wakeup
551 *
552 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
553 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
554 * instantly.
555 *
556 * This function must be used as-if it were wake_up_process(); IOW the task
557 * must be ready to be woken at this location.
558 *
559 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
560 * that already hold reference to @task can call the 'safe' version and trust
561 * wake_q to do the right thing depending whether or not the @task is already
562 * queued for wakeup.
563 */
564void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
565{
566 if (!__wake_q_add(head, task))
567 put_task_struct(task);
76751049
PZ
568}
569
570void wake_up_q(struct wake_q_head *head)
571{
572 struct wake_q_node *node = head->first;
573
574 while (node != WAKE_Q_TAIL) {
575 struct task_struct *task;
576
577 task = container_of(node, struct task_struct, wake_q);
578 BUG_ON(!task);
d1ccc66d 579 /* Task can safely be re-inserted now: */
76751049
PZ
580 node = node->next;
581 task->wake_q.next = NULL;
582
583 /*
7696f991
AP
584 * wake_up_process() executes a full barrier, which pairs with
585 * the queueing in wake_q_add() so as not to miss wakeups.
76751049
PZ
586 */
587 wake_up_process(task);
588 put_task_struct(task);
589 }
590}
591
c24d20db 592/*
8875125e 593 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
594 *
595 * On UP this means the setting of the need_resched flag, on SMP it
596 * might also involve a cross-CPU call to trigger the scheduler on
597 * the target CPU.
598 */
8875125e 599void resched_curr(struct rq *rq)
c24d20db 600{
8875125e 601 struct task_struct *curr = rq->curr;
c24d20db
IM
602 int cpu;
603
8875125e 604 lockdep_assert_held(&rq->lock);
c24d20db 605
8875125e 606 if (test_tsk_need_resched(curr))
c24d20db
IM
607 return;
608
8875125e 609 cpu = cpu_of(rq);
fd99f91a 610
f27dde8d 611 if (cpu == smp_processor_id()) {
8875125e 612 set_tsk_need_resched(curr);
f27dde8d 613 set_preempt_need_resched();
c24d20db 614 return;
f27dde8d 615 }
c24d20db 616
8875125e 617 if (set_nr_and_not_polling(curr))
c24d20db 618 smp_send_reschedule(cpu);
dfc68f29
AL
619 else
620 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
621}
622
029632fb 623void resched_cpu(int cpu)
c24d20db
IM
624{
625 struct rq *rq = cpu_rq(cpu);
626 unsigned long flags;
627
7c2102e5 628 raw_spin_lock_irqsave(&rq->lock, flags);
a0982dfa
PM
629 if (cpu_online(cpu) || cpu == smp_processor_id())
630 resched_curr(rq);
05fa785c 631 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 632}
06d8308c 633
b021fe3e 634#ifdef CONFIG_SMP
3451d024 635#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2 636/*
d1ccc66d
IM
637 * In the semi idle case, use the nearest busy CPU for migrating timers
638 * from an idle CPU. This is good for power-savings.
83cd4fe2
VP
639 *
640 * We don't do similar optimization for completely idle system, as
d1ccc66d
IM
641 * selecting an idle CPU will add more delays to the timers than intended
642 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
83cd4fe2 643 */
bc7a34b8 644int get_nohz_timer_target(void)
83cd4fe2 645{
e938b9c9 646 int i, cpu = smp_processor_id(), default_cpu = -1;
83cd4fe2
VP
647 struct sched_domain *sd;
648
e938b9c9
WL
649 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
650 if (!idle_cpu(cpu))
651 return cpu;
652 default_cpu = cpu;
653 }
6201b4d6 654
057f3fad 655 rcu_read_lock();
83cd4fe2 656 for_each_domain(cpu, sd) {
e938b9c9
WL
657 for_each_cpu_and(i, sched_domain_span(sd),
658 housekeeping_cpumask(HK_FLAG_TIMER)) {
44496922
WL
659 if (cpu == i)
660 continue;
661
e938b9c9 662 if (!idle_cpu(i)) {
057f3fad
PZ
663 cpu = i;
664 goto unlock;
665 }
666 }
83cd4fe2 667 }
9642d18e 668
e938b9c9
WL
669 if (default_cpu == -1)
670 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
671 cpu = default_cpu;
057f3fad
PZ
672unlock:
673 rcu_read_unlock();
83cd4fe2
VP
674 return cpu;
675}
d1ccc66d 676
06d8308c
TG
677/*
678 * When add_timer_on() enqueues a timer into the timer wheel of an
679 * idle CPU then this timer might expire before the next timer event
680 * which is scheduled to wake up that CPU. In case of a completely
681 * idle system the next event might even be infinite time into the
682 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
683 * leaves the inner idle loop so the newly added timer is taken into
684 * account when the CPU goes back to idle and evaluates the timer
685 * wheel for the next timer event.
686 */
1c20091e 687static void wake_up_idle_cpu(int cpu)
06d8308c
TG
688{
689 struct rq *rq = cpu_rq(cpu);
690
691 if (cpu == smp_processor_id())
692 return;
693
67b9ca70 694 if (set_nr_and_not_polling(rq->idle))
06d8308c 695 smp_send_reschedule(cpu);
dfc68f29
AL
696 else
697 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
698}
699
c5bfece2 700static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 701{
53c5fa16
FW
702 /*
703 * We just need the target to call irq_exit() and re-evaluate
704 * the next tick. The nohz full kick at least implies that.
705 * If needed we can still optimize that later with an
706 * empty IRQ.
707 */
379d9ecb
PM
708 if (cpu_is_offline(cpu))
709 return true; /* Don't try to wake offline CPUs. */
c5bfece2 710 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
711 if (cpu != smp_processor_id() ||
712 tick_nohz_tick_stopped())
53c5fa16 713 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
714 return true;
715 }
716
717 return false;
718}
719
379d9ecb
PM
720/*
721 * Wake up the specified CPU. If the CPU is going offline, it is the
722 * caller's responsibility to deal with the lost wakeup, for example,
723 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
724 */
1c20091e
FW
725void wake_up_nohz_cpu(int cpu)
726{
c5bfece2 727 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
728 wake_up_idle_cpu(cpu);
729}
730
19a1f5ec 731static void nohz_csd_func(void *info)
45bf76df 732{
19a1f5ec
PZ
733 struct rq *rq = info;
734 int cpu = cpu_of(rq);
735 unsigned int flags;
873b4c65
VG
736
737 /*
19a1f5ec 738 * Release the rq::nohz_csd.
873b4c65 739 */
19a1f5ec
PZ
740 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
741 WARN_ON(!(flags & NOHZ_KICK_MASK));
45bf76df 742
19a1f5ec
PZ
743 rq->idle_balance = idle_cpu(cpu);
744 if (rq->idle_balance && !need_resched()) {
745 rq->nohz_idle_balance = flags;
90b5363a
PZI
746 raise_softirq_irqoff(SCHED_SOFTIRQ);
747 }
2069dd75
PZ
748}
749
3451d024 750#endif /* CONFIG_NO_HZ_COMMON */
d842de87 751
ce831b38 752#ifdef CONFIG_NO_HZ_FULL
76d92ac3 753bool sched_can_stop_tick(struct rq *rq)
ce831b38 754{
76d92ac3
FW
755 int fifo_nr_running;
756
757 /* Deadline tasks, even if single, need the tick */
758 if (rq->dl.dl_nr_running)
759 return false;
760
1e78cdbd 761 /*
b19a888c 762 * If there are more than one RR tasks, we need the tick to affect the
2548d546 763 * actual RR behaviour.
1e78cdbd 764 */
76d92ac3
FW
765 if (rq->rt.rr_nr_running) {
766 if (rq->rt.rr_nr_running == 1)
767 return true;
768 else
769 return false;
1e78cdbd
RR
770 }
771
2548d546
PZ
772 /*
773 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
774 * forced preemption between FIFO tasks.
775 */
776 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
777 if (fifo_nr_running)
778 return true;
779
780 /*
781 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
782 * if there's more than one we need the tick for involuntary
783 * preemption.
784 */
785 if (rq->nr_running > 1)
541b8264 786 return false;
ce831b38 787
541b8264 788 return true;
ce831b38
FW
789}
790#endif /* CONFIG_NO_HZ_FULL */
6d6bc0ad 791#endif /* CONFIG_SMP */
18d95a28 792
a790de99
PT
793#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
794 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 795/*
8277434e
PT
796 * Iterate task_group tree rooted at *from, calling @down when first entering a
797 * node and @up when leaving it for the final time.
798 *
799 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 800 */
029632fb 801int walk_tg_tree_from(struct task_group *from,
8277434e 802 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
803{
804 struct task_group *parent, *child;
eb755805 805 int ret;
c09595f6 806
8277434e
PT
807 parent = from;
808
c09595f6 809down:
eb755805
PZ
810 ret = (*down)(parent, data);
811 if (ret)
8277434e 812 goto out;
c09595f6
PZ
813 list_for_each_entry_rcu(child, &parent->children, siblings) {
814 parent = child;
815 goto down;
816
817up:
818 continue;
819 }
eb755805 820 ret = (*up)(parent, data);
8277434e
PT
821 if (ret || parent == from)
822 goto out;
c09595f6
PZ
823
824 child = parent;
825 parent = parent->parent;
826 if (parent)
827 goto up;
8277434e 828out:
eb755805 829 return ret;
c09595f6
PZ
830}
831
029632fb 832int tg_nop(struct task_group *tg, void *data)
eb755805 833{
e2b245f8 834 return 0;
eb755805 835}
18d95a28
PZ
836#endif
837
9059393e 838static void set_load_weight(struct task_struct *p, bool update_load)
45bf76df 839{
f05998d4
NR
840 int prio = p->static_prio - MAX_RT_PRIO;
841 struct load_weight *load = &p->se.load;
842
dd41f596
IM
843 /*
844 * SCHED_IDLE tasks get minimal weight:
845 */
1da1843f 846 if (task_has_idle_policy(p)) {
c8b28116 847 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 848 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
849 return;
850 }
71f8bd46 851
9059393e
VG
852 /*
853 * SCHED_OTHER tasks have to update their load when changing their
854 * weight
855 */
856 if (update_load && p->sched_class == &fair_sched_class) {
857 reweight_task(p, prio);
858 } else {
859 load->weight = scale_load(sched_prio_to_weight[prio]);
860 load->inv_weight = sched_prio_to_wmult[prio];
861 }
71f8bd46
IM
862}
863
69842cba 864#ifdef CONFIG_UCLAMP_TASK
2480c093
PB
865/*
866 * Serializes updates of utilization clamp values
867 *
868 * The (slow-path) user-space triggers utilization clamp value updates which
869 * can require updates on (fast-path) scheduler's data structures used to
870 * support enqueue/dequeue operations.
871 * While the per-CPU rq lock protects fast-path update operations, user-space
872 * requests are serialized using a mutex to reduce the risk of conflicting
873 * updates or API abuses.
874 */
875static DEFINE_MUTEX(uclamp_mutex);
876
e8f14172
PB
877/* Max allowed minimum utilization */
878unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
879
880/* Max allowed maximum utilization */
881unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
882
13685c4a
QY
883/*
884 * By default RT tasks run at the maximum performance point/capacity of the
885 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
886 * SCHED_CAPACITY_SCALE.
887 *
888 * This knob allows admins to change the default behavior when uclamp is being
889 * used. In battery powered devices, particularly, running at the maximum
890 * capacity and frequency will increase energy consumption and shorten the
891 * battery life.
892 *
893 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
894 *
895 * This knob will not override the system default sched_util_clamp_min defined
896 * above.
897 */
898unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
899
e8f14172
PB
900/* All clamps are required to be less or equal than these values */
901static struct uclamp_se uclamp_default[UCLAMP_CNT];
69842cba 902
46609ce2
QY
903/*
904 * This static key is used to reduce the uclamp overhead in the fast path. It
905 * primarily disables the call to uclamp_rq_{inc, dec}() in
906 * enqueue/dequeue_task().
907 *
908 * This allows users to continue to enable uclamp in their kernel config with
909 * minimum uclamp overhead in the fast path.
910 *
911 * As soon as userspace modifies any of the uclamp knobs, the static key is
912 * enabled, since we have an actual users that make use of uclamp
913 * functionality.
914 *
915 * The knobs that would enable this static key are:
916 *
917 * * A task modifying its uclamp value with sched_setattr().
918 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
919 * * An admin modifying the cgroup cpu.uclamp.{min, max}
920 */
921DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
922
69842cba
PB
923/* Integer rounded range for each bucket */
924#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
925
926#define for_each_clamp_id(clamp_id) \
927 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
928
929static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
930{
931 return clamp_value / UCLAMP_BUCKET_DELTA;
932}
933
7763baac 934static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
69842cba
PB
935{
936 if (clamp_id == UCLAMP_MIN)
937 return 0;
938 return SCHED_CAPACITY_SCALE;
939}
940
a509a7cd
PB
941static inline void uclamp_se_set(struct uclamp_se *uc_se,
942 unsigned int value, bool user_defined)
69842cba
PB
943{
944 uc_se->value = value;
945 uc_se->bucket_id = uclamp_bucket_id(value);
a509a7cd 946 uc_se->user_defined = user_defined;
69842cba
PB
947}
948
e496187d 949static inline unsigned int
0413d7f3 950uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
951 unsigned int clamp_value)
952{
953 /*
954 * Avoid blocked utilization pushing up the frequency when we go
955 * idle (which drops the max-clamp) by retaining the last known
956 * max-clamp.
957 */
958 if (clamp_id == UCLAMP_MAX) {
959 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
960 return clamp_value;
961 }
962
963 return uclamp_none(UCLAMP_MIN);
964}
965
0413d7f3 966static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
967 unsigned int clamp_value)
968{
969 /* Reset max-clamp retention only on idle exit */
970 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
971 return;
972
973 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
974}
975
69842cba 976static inline
7763baac 977unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
0413d7f3 978 unsigned int clamp_value)
69842cba
PB
979{
980 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
981 int bucket_id = UCLAMP_BUCKETS - 1;
982
983 /*
984 * Since both min and max clamps are max aggregated, find the
985 * top most bucket with tasks in.
986 */
987 for ( ; bucket_id >= 0; bucket_id--) {
988 if (!bucket[bucket_id].tasks)
989 continue;
990 return bucket[bucket_id].value;
991 }
992
993 /* No tasks -- default clamp values */
e496187d 994 return uclamp_idle_value(rq, clamp_id, clamp_value);
69842cba
PB
995}
996
13685c4a
QY
997static void __uclamp_update_util_min_rt_default(struct task_struct *p)
998{
999 unsigned int default_util_min;
1000 struct uclamp_se *uc_se;
1001
1002 lockdep_assert_held(&p->pi_lock);
1003
1004 uc_se = &p->uclamp_req[UCLAMP_MIN];
1005
1006 /* Only sync if user didn't override the default */
1007 if (uc_se->user_defined)
1008 return;
1009
1010 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1011 uclamp_se_set(uc_se, default_util_min, false);
1012}
1013
1014static void uclamp_update_util_min_rt_default(struct task_struct *p)
1015{
1016 struct rq_flags rf;
1017 struct rq *rq;
1018
1019 if (!rt_task(p))
1020 return;
1021
1022 /* Protect updates to p->uclamp_* */
1023 rq = task_rq_lock(p, &rf);
1024 __uclamp_update_util_min_rt_default(p);
1025 task_rq_unlock(rq, p, &rf);
1026}
1027
1028static void uclamp_sync_util_min_rt_default(void)
1029{
1030 struct task_struct *g, *p;
1031
1032 /*
1033 * copy_process() sysctl_uclamp
1034 * uclamp_min_rt = X;
1035 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1036 * // link thread smp_mb__after_spinlock()
1037 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1038 * sched_post_fork() for_each_process_thread()
1039 * __uclamp_sync_rt() __uclamp_sync_rt()
1040 *
1041 * Ensures that either sched_post_fork() will observe the new
1042 * uclamp_min_rt or for_each_process_thread() will observe the new
1043 * task.
1044 */
1045 read_lock(&tasklist_lock);
1046 smp_mb__after_spinlock();
1047 read_unlock(&tasklist_lock);
1048
1049 rcu_read_lock();
1050 for_each_process_thread(g, p)
1051 uclamp_update_util_min_rt_default(p);
1052 rcu_read_unlock();
1053}
1054
3eac870a 1055static inline struct uclamp_se
0413d7f3 1056uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
3eac870a
PB
1057{
1058 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1059#ifdef CONFIG_UCLAMP_TASK_GROUP
1060 struct uclamp_se uc_max;
1061
1062 /*
1063 * Tasks in autogroups or root task group will be
1064 * restricted by system defaults.
1065 */
1066 if (task_group_is_autogroup(task_group(p)))
1067 return uc_req;
1068 if (task_group(p) == &root_task_group)
1069 return uc_req;
1070
1071 uc_max = task_group(p)->uclamp[clamp_id];
1072 if (uc_req.value > uc_max.value || !uc_req.user_defined)
1073 return uc_max;
1074#endif
1075
1076 return uc_req;
1077}
1078
e8f14172
PB
1079/*
1080 * The effective clamp bucket index of a task depends on, by increasing
1081 * priority:
1082 * - the task specific clamp value, when explicitly requested from userspace
3eac870a
PB
1083 * - the task group effective clamp value, for tasks not either in the root
1084 * group or in an autogroup
e8f14172
PB
1085 * - the system default clamp value, defined by the sysadmin
1086 */
1087static inline struct uclamp_se
0413d7f3 1088uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
e8f14172 1089{
3eac870a 1090 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
e8f14172
PB
1091 struct uclamp_se uc_max = uclamp_default[clamp_id];
1092
1093 /* System default restrictions always apply */
1094 if (unlikely(uc_req.value > uc_max.value))
1095 return uc_max;
1096
1097 return uc_req;
1098}
1099
686516b5 1100unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
9d20ad7d
PB
1101{
1102 struct uclamp_se uc_eff;
1103
1104 /* Task currently refcounted: use back-annotated (effective) value */
1105 if (p->uclamp[clamp_id].active)
686516b5 1106 return (unsigned long)p->uclamp[clamp_id].value;
9d20ad7d
PB
1107
1108 uc_eff = uclamp_eff_get(p, clamp_id);
1109
686516b5 1110 return (unsigned long)uc_eff.value;
9d20ad7d
PB
1111}
1112
69842cba
PB
1113/*
1114 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1115 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1116 * updates the rq's clamp value if required.
60daf9c1
PB
1117 *
1118 * Tasks can have a task-specific value requested from user-space, track
1119 * within each bucket the maximum value for tasks refcounted in it.
1120 * This "local max aggregation" allows to track the exact "requested" value
1121 * for each bucket when all its RUNNABLE tasks require the same clamp.
69842cba
PB
1122 */
1123static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
0413d7f3 1124 enum uclamp_id clamp_id)
69842cba
PB
1125{
1126 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1127 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1128 struct uclamp_bucket *bucket;
1129
1130 lockdep_assert_held(&rq->lock);
1131
e8f14172
PB
1132 /* Update task effective clamp */
1133 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1134
69842cba
PB
1135 bucket = &uc_rq->bucket[uc_se->bucket_id];
1136 bucket->tasks++;
e8f14172 1137 uc_se->active = true;
69842cba 1138
e496187d
PB
1139 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1140
60daf9c1
PB
1141 /*
1142 * Local max aggregation: rq buckets always track the max
1143 * "requested" clamp value of its RUNNABLE tasks.
1144 */
1145 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1146 bucket->value = uc_se->value;
1147
69842cba 1148 if (uc_se->value > READ_ONCE(uc_rq->value))
60daf9c1 1149 WRITE_ONCE(uc_rq->value, uc_se->value);
69842cba
PB
1150}
1151
1152/*
1153 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1154 * is released. If this is the last task reference counting the rq's max
1155 * active clamp value, then the rq's clamp value is updated.
1156 *
1157 * Both refcounted tasks and rq's cached clamp values are expected to be
1158 * always valid. If it's detected they are not, as defensive programming,
1159 * enforce the expected state and warn.
1160 */
1161static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
0413d7f3 1162 enum uclamp_id clamp_id)
69842cba
PB
1163{
1164 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1165 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1166 struct uclamp_bucket *bucket;
e496187d 1167 unsigned int bkt_clamp;
69842cba
PB
1168 unsigned int rq_clamp;
1169
1170 lockdep_assert_held(&rq->lock);
1171
46609ce2
QY
1172 /*
1173 * If sched_uclamp_used was enabled after task @p was enqueued,
1174 * we could end up with unbalanced call to uclamp_rq_dec_id().
1175 *
1176 * In this case the uc_se->active flag should be false since no uclamp
1177 * accounting was performed at enqueue time and we can just return
1178 * here.
1179 *
b19a888c 1180 * Need to be careful of the following enqueue/dequeue ordering
46609ce2
QY
1181 * problem too
1182 *
1183 * enqueue(taskA)
1184 * // sched_uclamp_used gets enabled
1185 * enqueue(taskB)
1186 * dequeue(taskA)
b19a888c 1187 * // Must not decrement bucket->tasks here
46609ce2
QY
1188 * dequeue(taskB)
1189 *
1190 * where we could end up with stale data in uc_se and
1191 * bucket[uc_se->bucket_id].
1192 *
1193 * The following check here eliminates the possibility of such race.
1194 */
1195 if (unlikely(!uc_se->active))
1196 return;
1197
69842cba 1198 bucket = &uc_rq->bucket[uc_se->bucket_id];
46609ce2 1199
69842cba
PB
1200 SCHED_WARN_ON(!bucket->tasks);
1201 if (likely(bucket->tasks))
1202 bucket->tasks--;
46609ce2 1203
e8f14172 1204 uc_se->active = false;
69842cba 1205
60daf9c1
PB
1206 /*
1207 * Keep "local max aggregation" simple and accept to (possibly)
1208 * overboost some RUNNABLE tasks in the same bucket.
1209 * The rq clamp bucket value is reset to its base value whenever
1210 * there are no more RUNNABLE tasks refcounting it.
1211 */
69842cba
PB
1212 if (likely(bucket->tasks))
1213 return;
1214
1215 rq_clamp = READ_ONCE(uc_rq->value);
1216 /*
1217 * Defensive programming: this should never happen. If it happens,
1218 * e.g. due to future modification, warn and fixup the expected value.
1219 */
1220 SCHED_WARN_ON(bucket->value > rq_clamp);
e496187d
PB
1221 if (bucket->value >= rq_clamp) {
1222 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1223 WRITE_ONCE(uc_rq->value, bkt_clamp);
1224 }
69842cba
PB
1225}
1226
1227static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1228{
0413d7f3 1229 enum uclamp_id clamp_id;
69842cba 1230
46609ce2
QY
1231 /*
1232 * Avoid any overhead until uclamp is actually used by the userspace.
1233 *
1234 * The condition is constructed such that a NOP is generated when
1235 * sched_uclamp_used is disabled.
1236 */
1237 if (!static_branch_unlikely(&sched_uclamp_used))
1238 return;
1239
69842cba
PB
1240 if (unlikely(!p->sched_class->uclamp_enabled))
1241 return;
1242
1243 for_each_clamp_id(clamp_id)
1244 uclamp_rq_inc_id(rq, p, clamp_id);
e496187d
PB
1245
1246 /* Reset clamp idle holding when there is one RUNNABLE task */
1247 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1248 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
69842cba
PB
1249}
1250
1251static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1252{
0413d7f3 1253 enum uclamp_id clamp_id;
69842cba 1254
46609ce2
QY
1255 /*
1256 * Avoid any overhead until uclamp is actually used by the userspace.
1257 *
1258 * The condition is constructed such that a NOP is generated when
1259 * sched_uclamp_used is disabled.
1260 */
1261 if (!static_branch_unlikely(&sched_uclamp_used))
1262 return;
1263
69842cba
PB
1264 if (unlikely(!p->sched_class->uclamp_enabled))
1265 return;
1266
1267 for_each_clamp_id(clamp_id)
1268 uclamp_rq_dec_id(rq, p, clamp_id);
1269}
1270
babbe170 1271static inline void
0413d7f3 1272uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
babbe170
PB
1273{
1274 struct rq_flags rf;
1275 struct rq *rq;
1276
1277 /*
1278 * Lock the task and the rq where the task is (or was) queued.
1279 *
1280 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1281 * price to pay to safely serialize util_{min,max} updates with
1282 * enqueues, dequeues and migration operations.
1283 * This is the same locking schema used by __set_cpus_allowed_ptr().
1284 */
1285 rq = task_rq_lock(p, &rf);
1286
1287 /*
1288 * Setting the clamp bucket is serialized by task_rq_lock().
1289 * If the task is not yet RUNNABLE and its task_struct is not
1290 * affecting a valid clamp bucket, the next time it's enqueued,
1291 * it will already see the updated clamp bucket value.
1292 */
6e1ff077 1293 if (p->uclamp[clamp_id].active) {
babbe170
PB
1294 uclamp_rq_dec_id(rq, p, clamp_id);
1295 uclamp_rq_inc_id(rq, p, clamp_id);
1296 }
1297
1298 task_rq_unlock(rq, p, &rf);
1299}
1300
e3b8b6a0 1301#ifdef CONFIG_UCLAMP_TASK_GROUP
babbe170
PB
1302static inline void
1303uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1304 unsigned int clamps)
1305{
0413d7f3 1306 enum uclamp_id clamp_id;
babbe170
PB
1307 struct css_task_iter it;
1308 struct task_struct *p;
babbe170
PB
1309
1310 css_task_iter_start(css, 0, &it);
1311 while ((p = css_task_iter_next(&it))) {
1312 for_each_clamp_id(clamp_id) {
1313 if ((0x1 << clamp_id) & clamps)
1314 uclamp_update_active(p, clamp_id);
1315 }
1316 }
1317 css_task_iter_end(&it);
1318}
1319
7274a5c1
PB
1320static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1321static void uclamp_update_root_tg(void)
1322{
1323 struct task_group *tg = &root_task_group;
1324
1325 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1326 sysctl_sched_uclamp_util_min, false);
1327 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1328 sysctl_sched_uclamp_util_max, false);
1329
1330 rcu_read_lock();
1331 cpu_util_update_eff(&root_task_group.css);
1332 rcu_read_unlock();
1333}
1334#else
1335static void uclamp_update_root_tg(void) { }
1336#endif
1337
e8f14172 1338int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
32927393 1339 void *buffer, size_t *lenp, loff_t *ppos)
e8f14172 1340{
7274a5c1 1341 bool update_root_tg = false;
13685c4a 1342 int old_min, old_max, old_min_rt;
e8f14172
PB
1343 int result;
1344
2480c093 1345 mutex_lock(&uclamp_mutex);
e8f14172
PB
1346 old_min = sysctl_sched_uclamp_util_min;
1347 old_max = sysctl_sched_uclamp_util_max;
13685c4a 1348 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
e8f14172
PB
1349
1350 result = proc_dointvec(table, write, buffer, lenp, ppos);
1351 if (result)
1352 goto undo;
1353 if (!write)
1354 goto done;
1355
1356 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
13685c4a
QY
1357 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1358 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1359
e8f14172
PB
1360 result = -EINVAL;
1361 goto undo;
1362 }
1363
1364 if (old_min != sysctl_sched_uclamp_util_min) {
1365 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
a509a7cd 1366 sysctl_sched_uclamp_util_min, false);
7274a5c1 1367 update_root_tg = true;
e8f14172
PB
1368 }
1369 if (old_max != sysctl_sched_uclamp_util_max) {
1370 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
a509a7cd 1371 sysctl_sched_uclamp_util_max, false);
7274a5c1 1372 update_root_tg = true;
e8f14172
PB
1373 }
1374
46609ce2
QY
1375 if (update_root_tg) {
1376 static_branch_enable(&sched_uclamp_used);
7274a5c1 1377 uclamp_update_root_tg();
46609ce2 1378 }
7274a5c1 1379
13685c4a
QY
1380 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1381 static_branch_enable(&sched_uclamp_used);
1382 uclamp_sync_util_min_rt_default();
1383 }
7274a5c1 1384
e8f14172 1385 /*
7274a5c1
PB
1386 * We update all RUNNABLE tasks only when task groups are in use.
1387 * Otherwise, keep it simple and do just a lazy update at each next
1388 * task enqueue time.
e8f14172 1389 */
7274a5c1 1390
e8f14172
PB
1391 goto done;
1392
1393undo:
1394 sysctl_sched_uclamp_util_min = old_min;
1395 sysctl_sched_uclamp_util_max = old_max;
13685c4a 1396 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
e8f14172 1397done:
2480c093 1398 mutex_unlock(&uclamp_mutex);
e8f14172
PB
1399
1400 return result;
1401}
1402
a509a7cd
PB
1403static int uclamp_validate(struct task_struct *p,
1404 const struct sched_attr *attr)
1405{
480a6ca2
DE
1406 int util_min = p->uclamp_req[UCLAMP_MIN].value;
1407 int util_max = p->uclamp_req[UCLAMP_MAX].value;
a509a7cd 1408
480a6ca2
DE
1409 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1410 util_min = attr->sched_util_min;
a509a7cd 1411
480a6ca2
DE
1412 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1413 return -EINVAL;
1414 }
1415
1416 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1417 util_max = attr->sched_util_max;
1418
1419 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1420 return -EINVAL;
1421 }
1422
1423 if (util_min != -1 && util_max != -1 && util_min > util_max)
a509a7cd
PB
1424 return -EINVAL;
1425
e65855a5
QY
1426 /*
1427 * We have valid uclamp attributes; make sure uclamp is enabled.
1428 *
1429 * We need to do that here, because enabling static branches is a
1430 * blocking operation which obviously cannot be done while holding
1431 * scheduler locks.
1432 */
1433 static_branch_enable(&sched_uclamp_used);
1434
a509a7cd
PB
1435 return 0;
1436}
1437
480a6ca2
DE
1438static bool uclamp_reset(const struct sched_attr *attr,
1439 enum uclamp_id clamp_id,
1440 struct uclamp_se *uc_se)
1441{
1442 /* Reset on sched class change for a non user-defined clamp value. */
1443 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1444 !uc_se->user_defined)
1445 return true;
1446
1447 /* Reset on sched_util_{min,max} == -1. */
1448 if (clamp_id == UCLAMP_MIN &&
1449 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1450 attr->sched_util_min == -1) {
1451 return true;
1452 }
1453
1454 if (clamp_id == UCLAMP_MAX &&
1455 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1456 attr->sched_util_max == -1) {
1457 return true;
1458 }
1459
1460 return false;
1461}
1462
a509a7cd
PB
1463static void __setscheduler_uclamp(struct task_struct *p,
1464 const struct sched_attr *attr)
1465{
0413d7f3 1466 enum uclamp_id clamp_id;
1a00d999 1467
1a00d999
PB
1468 for_each_clamp_id(clamp_id) {
1469 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
480a6ca2 1470 unsigned int value;
1a00d999 1471
480a6ca2 1472 if (!uclamp_reset(attr, clamp_id, uc_se))
1a00d999
PB
1473 continue;
1474
13685c4a
QY
1475 /*
1476 * RT by default have a 100% boost value that could be modified
1477 * at runtime.
1478 */
1a00d999 1479 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
480a6ca2 1480 value = sysctl_sched_uclamp_util_min_rt_default;
13685c4a 1481 else
480a6ca2
DE
1482 value = uclamp_none(clamp_id);
1483
1484 uclamp_se_set(uc_se, value, false);
1a00d999 1485
1a00d999
PB
1486 }
1487
a509a7cd
PB
1488 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1489 return;
1490
480a6ca2
DE
1491 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1492 attr->sched_util_min != -1) {
a509a7cd
PB
1493 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1494 attr->sched_util_min, true);
1495 }
1496
480a6ca2
DE
1497 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1498 attr->sched_util_max != -1) {
a509a7cd
PB
1499 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1500 attr->sched_util_max, true);
1501 }
1502}
1503
e8f14172
PB
1504static void uclamp_fork(struct task_struct *p)
1505{
0413d7f3 1506 enum uclamp_id clamp_id;
e8f14172 1507
13685c4a
QY
1508 /*
1509 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1510 * as the task is still at its early fork stages.
1511 */
e8f14172
PB
1512 for_each_clamp_id(clamp_id)
1513 p->uclamp[clamp_id].active = false;
a87498ac
PB
1514
1515 if (likely(!p->sched_reset_on_fork))
1516 return;
1517
1518 for_each_clamp_id(clamp_id) {
eaf5a92e
QP
1519 uclamp_se_set(&p->uclamp_req[clamp_id],
1520 uclamp_none(clamp_id), false);
a87498ac 1521 }
e8f14172
PB
1522}
1523
13685c4a
QY
1524static void uclamp_post_fork(struct task_struct *p)
1525{
1526 uclamp_update_util_min_rt_default(p);
1527}
1528
d81ae8aa
QY
1529static void __init init_uclamp_rq(struct rq *rq)
1530{
1531 enum uclamp_id clamp_id;
1532 struct uclamp_rq *uc_rq = rq->uclamp;
1533
1534 for_each_clamp_id(clamp_id) {
1535 uc_rq[clamp_id] = (struct uclamp_rq) {
1536 .value = uclamp_none(clamp_id)
1537 };
1538 }
1539
1540 rq->uclamp_flags = 0;
1541}
1542
69842cba
PB
1543static void __init init_uclamp(void)
1544{
e8f14172 1545 struct uclamp_se uc_max = {};
0413d7f3 1546 enum uclamp_id clamp_id;
69842cba
PB
1547 int cpu;
1548
d81ae8aa
QY
1549 for_each_possible_cpu(cpu)
1550 init_uclamp_rq(cpu_rq(cpu));
69842cba 1551
69842cba 1552 for_each_clamp_id(clamp_id) {
e8f14172 1553 uclamp_se_set(&init_task.uclamp_req[clamp_id],
a509a7cd 1554 uclamp_none(clamp_id), false);
69842cba 1555 }
e8f14172
PB
1556
1557 /* System defaults allow max clamp values for both indexes */
a509a7cd 1558 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2480c093 1559 for_each_clamp_id(clamp_id) {
e8f14172 1560 uclamp_default[clamp_id] = uc_max;
2480c093
PB
1561#ifdef CONFIG_UCLAMP_TASK_GROUP
1562 root_task_group.uclamp_req[clamp_id] = uc_max;
0b60ba2d 1563 root_task_group.uclamp[clamp_id] = uc_max;
2480c093
PB
1564#endif
1565 }
69842cba
PB
1566}
1567
1568#else /* CONFIG_UCLAMP_TASK */
1569static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1570static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
a509a7cd
PB
1571static inline int uclamp_validate(struct task_struct *p,
1572 const struct sched_attr *attr)
1573{
1574 return -EOPNOTSUPP;
1575}
1576static void __setscheduler_uclamp(struct task_struct *p,
1577 const struct sched_attr *attr) { }
e8f14172 1578static inline void uclamp_fork(struct task_struct *p) { }
13685c4a 1579static inline void uclamp_post_fork(struct task_struct *p) { }
69842cba
PB
1580static inline void init_uclamp(void) { }
1581#endif /* CONFIG_UCLAMP_TASK */
1582
1de64443 1583static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 1584{
0a67d1ee
PZ
1585 if (!(flags & ENQUEUE_NOCLOCK))
1586 update_rq_clock(rq);
1587
eb414681 1588 if (!(flags & ENQUEUE_RESTORE)) {
1de64443 1589 sched_info_queued(rq, p);
eb414681
JW
1590 psi_enqueue(p, flags & ENQUEUE_WAKEUP);
1591 }
0a67d1ee 1592
69842cba 1593 uclamp_rq_inc(rq, p);
371fd7e7 1594 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
1595}
1596
1de64443 1597static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 1598{
0a67d1ee
PZ
1599 if (!(flags & DEQUEUE_NOCLOCK))
1600 update_rq_clock(rq);
1601
eb414681 1602 if (!(flags & DEQUEUE_SAVE)) {
1de64443 1603 sched_info_dequeued(rq, p);
eb414681
JW
1604 psi_dequeue(p, flags & DEQUEUE_SLEEP);
1605 }
0a67d1ee 1606
69842cba 1607 uclamp_rq_dec(rq, p);
371fd7e7 1608 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
1609}
1610
029632fb 1611void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 1612{
371fd7e7 1613 enqueue_task(rq, p, flags);
7dd77884
PZ
1614
1615 p->on_rq = TASK_ON_RQ_QUEUED;
1e3c88bd
PZ
1616}
1617
029632fb 1618void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 1619{
7dd77884
PZ
1620 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
1621
371fd7e7 1622 dequeue_task(rq, p, flags);
1e3c88bd
PZ
1623}
1624
14531189 1625/*
dd41f596 1626 * __normal_prio - return the priority that is based on the static prio
14531189 1627 */
14531189
IM
1628static inline int __normal_prio(struct task_struct *p)
1629{
dd41f596 1630 return p->static_prio;
14531189
IM
1631}
1632
b29739f9
IM
1633/*
1634 * Calculate the expected normal priority: i.e. priority
1635 * without taking RT-inheritance into account. Might be
1636 * boosted by interactivity modifiers. Changes upon fork,
1637 * setprio syscalls, and whenever the interactivity
1638 * estimator recalculates.
1639 */
36c8b586 1640static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1641{
1642 int prio;
1643
aab03e05
DF
1644 if (task_has_dl_policy(p))
1645 prio = MAX_DL_PRIO-1;
1646 else if (task_has_rt_policy(p))
b29739f9
IM
1647 prio = MAX_RT_PRIO-1 - p->rt_priority;
1648 else
1649 prio = __normal_prio(p);
1650 return prio;
1651}
1652
1653/*
1654 * Calculate the current priority, i.e. the priority
1655 * taken into account by the scheduler. This value might
1656 * be boosted by RT tasks, or might be boosted by
1657 * interactivity modifiers. Will be RT if the task got
1658 * RT-boosted. If not then it returns p->normal_prio.
1659 */
36c8b586 1660static int effective_prio(struct task_struct *p)
b29739f9
IM
1661{
1662 p->normal_prio = normal_prio(p);
1663 /*
1664 * If we are RT tasks or we were boosted to RT priority,
1665 * keep the priority unchanged. Otherwise, update priority
1666 * to the normal priority:
1667 */
1668 if (!rt_prio(p->prio))
1669 return p->normal_prio;
1670 return p->prio;
1671}
1672
1da177e4
LT
1673/**
1674 * task_curr - is this task currently executing on a CPU?
1675 * @p: the task in question.
e69f6186
YB
1676 *
1677 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 1678 */
36c8b586 1679inline int task_curr(const struct task_struct *p)
1da177e4
LT
1680{
1681 return cpu_curr(task_cpu(p)) == p;
1682}
1683
67dfa1b7 1684/*
4c9a4bc8
PZ
1685 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1686 * use the balance_callback list if you want balancing.
1687 *
1688 * this means any call to check_class_changed() must be followed by a call to
1689 * balance_callback().
67dfa1b7 1690 */
cb469845
SR
1691static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1692 const struct sched_class *prev_class,
da7a735e 1693 int oldprio)
cb469845
SR
1694{
1695 if (prev_class != p->sched_class) {
1696 if (prev_class->switched_from)
da7a735e 1697 prev_class->switched_from(rq, p);
4c9a4bc8 1698
da7a735e 1699 p->sched_class->switched_to(rq, p);
2d3d891d 1700 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1701 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1702}
1703
029632fb 1704void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405 1705{
aa93cd53 1706 if (p->sched_class == rq->curr->sched_class)
1e5a7405 1707 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
aa93cd53
KT
1708 else if (p->sched_class > rq->curr->sched_class)
1709 resched_curr(rq);
1e5a7405
PZ
1710
1711 /*
1712 * A queue event has occurred, and we're going to schedule. In
1713 * this case, we can save a useless back to back clock update.
1714 */
da0c1e65 1715 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
adcc8da8 1716 rq_clock_skip_update(rq);
1e5a7405
PZ
1717}
1718
1da177e4 1719#ifdef CONFIG_SMP
175f0e25 1720
af449901
PZ
1721static void
1722__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
1723
1724static int __set_cpus_allowed_ptr(struct task_struct *p,
1725 const struct cpumask *new_mask,
1726 u32 flags);
1727
1728static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
1729{
1730 if (likely(!p->migration_disabled))
1731 return;
1732
1733 if (p->cpus_ptr != &p->cpus_mask)
1734 return;
1735
1736 /*
1737 * Violates locking rules! see comment in __do_set_cpus_allowed().
1738 */
1739 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
1740}
1741
1742void migrate_disable(void)
1743{
3015ef4b
TG
1744 struct task_struct *p = current;
1745
1746 if (p->migration_disabled) {
1747 p->migration_disabled++;
af449901 1748 return;
3015ef4b 1749 }
af449901 1750
3015ef4b
TG
1751 preempt_disable();
1752 this_rq()->nr_pinned++;
1753 p->migration_disabled = 1;
1754 preempt_enable();
af449901
PZ
1755}
1756EXPORT_SYMBOL_GPL(migrate_disable);
1757
1758void migrate_enable(void)
1759{
1760 struct task_struct *p = current;
1761
6d337eab
PZ
1762 if (p->migration_disabled > 1) {
1763 p->migration_disabled--;
af449901 1764 return;
6d337eab 1765 }
af449901 1766
6d337eab
PZ
1767 /*
1768 * Ensure stop_task runs either before or after this, and that
1769 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
1770 */
1771 preempt_disable();
1772 if (p->cpus_ptr != &p->cpus_mask)
1773 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
1774 /*
1775 * Mustn't clear migration_disabled() until cpus_ptr points back at the
1776 * regular cpus_mask, otherwise things that race (eg.
1777 * select_fallback_rq) get confused.
1778 */
af449901 1779 barrier();
6d337eab 1780 p->migration_disabled = 0;
3015ef4b 1781 this_rq()->nr_pinned--;
6d337eab 1782 preempt_enable();
af449901
PZ
1783}
1784EXPORT_SYMBOL_GPL(migrate_enable);
1785
3015ef4b
TG
1786static inline bool rq_has_pinned_tasks(struct rq *rq)
1787{
1788 return rq->nr_pinned;
1789}
1790
175f0e25 1791/*
bee98539 1792 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
175f0e25
PZ
1793 * __set_cpus_allowed_ptr() and select_fallback_rq().
1794 */
1795static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1796{
5ba2ffba 1797 /* When not in the task's cpumask, no point in looking further. */
3bd37062 1798 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
175f0e25
PZ
1799 return false;
1800
5ba2ffba
PZ
1801 /* migrate_disabled() must be allowed to finish. */
1802 if (is_migration_disabled(p))
175f0e25
PZ
1803 return cpu_online(cpu);
1804
5ba2ffba
PZ
1805 /* Non kernel threads are not allowed during either online or offline. */
1806 if (!(p->flags & PF_KTHREAD))
1807 return cpu_active(cpu);
1808
1809 /* KTHREAD_IS_PER_CPU is always allowed. */
1810 if (kthread_is_per_cpu(p))
1811 return cpu_online(cpu);
1812
1813 /* Regular kernel threads don't get to stay during offline. */
1814 if (cpu_rq(cpu)->balance_push)
1815 return false;
1816
1817 /* But are allowed during online. */
1818 return cpu_online(cpu);
175f0e25
PZ
1819}
1820
5cc389bc
PZ
1821/*
1822 * This is how migration works:
1823 *
1824 * 1) we invoke migration_cpu_stop() on the target CPU using
1825 * stop_one_cpu().
1826 * 2) stopper starts to run (implicitly forcing the migrated thread
1827 * off the CPU)
1828 * 3) it checks whether the migrated task is still in the wrong runqueue.
1829 * 4) if it's in the wrong runqueue then the migration thread removes
1830 * it and puts it into the right queue.
1831 * 5) stopper completes and stop_one_cpu() returns and the migration
1832 * is done.
1833 */
1834
1835/*
1836 * move_queued_task - move a queued task to new rq.
1837 *
1838 * Returns (locked) new rq. Old rq's lock is released.
1839 */
8a8c69c3
PZ
1840static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
1841 struct task_struct *p, int new_cpu)
5cc389bc 1842{
5cc389bc
PZ
1843 lockdep_assert_held(&rq->lock);
1844
58877d34 1845 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
5cc389bc 1846 set_task_cpu(p, new_cpu);
8a8c69c3 1847 rq_unlock(rq, rf);
5cc389bc
PZ
1848
1849 rq = cpu_rq(new_cpu);
1850
8a8c69c3 1851 rq_lock(rq, rf);
5cc389bc 1852 BUG_ON(task_cpu(p) != new_cpu);
58877d34 1853 activate_task(rq, p, 0);
5cc389bc
PZ
1854 check_preempt_curr(rq, p, 0);
1855
1856 return rq;
1857}
1858
1859struct migration_arg {
6d337eab
PZ
1860 struct task_struct *task;
1861 int dest_cpu;
1862 struct set_affinity_pending *pending;
1863};
1864
1865struct set_affinity_pending {
1866 refcount_t refs;
1867 struct completion done;
1868 struct cpu_stop_work stop_work;
1869 struct migration_arg arg;
5cc389bc
PZ
1870};
1871
1872/*
d1ccc66d 1873 * Move (not current) task off this CPU, onto the destination CPU. We're doing
5cc389bc
PZ
1874 * this because either it can't run here any more (set_cpus_allowed()
1875 * away from this CPU, or CPU going down), or because we're
1876 * attempting to rebalance this task on exec (sched_exec).
1877 *
1878 * So we race with normal scheduler movements, but that's OK, as long
1879 * as the task is no longer on this CPU.
5cc389bc 1880 */
8a8c69c3
PZ
1881static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1882 struct task_struct *p, int dest_cpu)
5cc389bc 1883{
5cc389bc 1884 /* Affinity changed (again). */
175f0e25 1885 if (!is_cpu_allowed(p, dest_cpu))
5e16bbc2 1886 return rq;
5cc389bc 1887
15ff991e 1888 update_rq_clock(rq);
8a8c69c3 1889 rq = move_queued_task(rq, rf, p, dest_cpu);
5e16bbc2
PZ
1890
1891 return rq;
5cc389bc
PZ
1892}
1893
1894/*
1895 * migration_cpu_stop - this will be executed by a highprio stopper thread
1896 * and performs thread migration by bumping thread off CPU then
1897 * 'pushing' onto another runqueue.
1898 */
1899static int migration_cpu_stop(void *data)
1900{
6d337eab 1901 struct set_affinity_pending *pending;
5cc389bc 1902 struct migration_arg *arg = data;
5e16bbc2 1903 struct task_struct *p = arg->task;
6d337eab 1904 int dest_cpu = arg->dest_cpu;
5e16bbc2 1905 struct rq *rq = this_rq();
6d337eab 1906 bool complete = false;
8a8c69c3 1907 struct rq_flags rf;
5cc389bc
PZ
1908
1909 /*
d1ccc66d
IM
1910 * The original target CPU might have gone down and we might
1911 * be on another CPU but it doesn't matter.
5cc389bc 1912 */
6d337eab 1913 local_irq_save(rf.flags);
5cc389bc
PZ
1914 /*
1915 * We need to explicitly wake pending tasks before running
3bd37062 1916 * __migrate_task() such that we will not miss enforcing cpus_ptr
5cc389bc
PZ
1917 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1918 */
a1488664 1919 flush_smp_call_function_from_idle();
5e16bbc2
PZ
1920
1921 raw_spin_lock(&p->pi_lock);
8a8c69c3 1922 rq_lock(rq, &rf);
6d337eab
PZ
1923
1924 pending = p->migration_pending;
8a6edb52
PZ
1925 if (pending && !arg->pending) {
1926 /*
1927 * This happens from sched_exec() and migrate_task_to(),
1928 * neither of them care about pending and just want a task to
1929 * maybe move about.
1930 *
1931 * Even if there is a pending, we can ignore it, since
1932 * affine_move_task() will have it's own stop_work's in flight
1933 * which will manage the completion.
1934 *
1935 * Notably, pending doesn't need to match arg->pending. This can
1936 * happen when tripple concurrent affine_move_task() first sets
1937 * pending, then clears pending and eventually sets another
1938 * pending.
1939 */
1940 pending = NULL;
1941 }
1942
5e16bbc2
PZ
1943 /*
1944 * If task_rq(p) != rq, it cannot be migrated here, because we're
1945 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1946 * we're holding p->pi_lock.
1947 */
bf89a304 1948 if (task_rq(p) == rq) {
6d337eab
PZ
1949 if (is_migration_disabled(p))
1950 goto out;
1951
1952 if (pending) {
1953 p->migration_pending = NULL;
1954 complete = true;
1955 }
1956
1957 /* migrate_enable() -- we must not race against SCA */
1958 if (dest_cpu < 0) {
1959 /*
1960 * When this was migrate_enable() but we no longer
1961 * have a @pending, a concurrent SCA 'fixed' things
1962 * and we should be valid again. Nothing to do.
1963 */
1964 if (!pending) {
1293771e 1965 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
6d337eab
PZ
1966 goto out;
1967 }
1968
1969 dest_cpu = cpumask_any_distribute(&p->cpus_mask);
1970 }
1971
bf89a304 1972 if (task_on_rq_queued(p))
6d337eab 1973 rq = __migrate_task(rq, &rf, p, dest_cpu);
bf89a304 1974 else
6d337eab
PZ
1975 p->wake_cpu = dest_cpu;
1976
d707faa6 1977 } else if (dest_cpu < 0 || pending) {
6d337eab
PZ
1978 /*
1979 * This happens when we get migrated between migrate_enable()'s
1980 * preempt_enable() and scheduling the stopper task. At that
1981 * point we're a regular task again and not current anymore.
1982 *
1983 * A !PREEMPT kernel has a giant hole here, which makes it far
1984 * more likely.
1985 */
1986
d707faa6
VS
1987 /*
1988 * The task moved before the stopper got to run. We're holding
1989 * ->pi_lock, so the allowed mask is stable - if it got
1990 * somewhere allowed, we're done.
1991 */
1992 if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
1993 p->migration_pending = NULL;
1994 complete = true;
1995 goto out;
1996 }
1997
6d337eab
PZ
1998 /*
1999 * When this was migrate_enable() but we no longer have an
2000 * @pending, a concurrent SCA 'fixed' things and we should be
2001 * valid again. Nothing to do.
2002 */
2003 if (!pending) {
1293771e 2004 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
6d337eab
PZ
2005 goto out;
2006 }
2007
2008 /*
2009 * When migrate_enable() hits a rq mis-match we can't reliably
2010 * determine is_migration_disabled() and so have to chase after
2011 * it.
2012 */
2013 task_rq_unlock(rq, p, &rf);
2014 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2015 &pending->arg, &pending->stop_work);
2016 return 0;
bf89a304 2017 }
6d337eab
PZ
2018out:
2019 task_rq_unlock(rq, p, &rf);
2020
2021 if (complete)
2022 complete_all(&pending->done);
2023
2024 /* For pending->{arg,stop_work} */
2025 pending = arg->pending;
2026 if (pending && refcount_dec_and_test(&pending->refs))
2027 wake_up_var(&pending->refs);
5e16bbc2 2028
5cc389bc
PZ
2029 return 0;
2030}
2031
a7c81556
PZ
2032int push_cpu_stop(void *arg)
2033{
2034 struct rq *lowest_rq = NULL, *rq = this_rq();
2035 struct task_struct *p = arg;
2036
2037 raw_spin_lock_irq(&p->pi_lock);
2038 raw_spin_lock(&rq->lock);
2039
2040 if (task_rq(p) != rq)
2041 goto out_unlock;
2042
2043 if (is_migration_disabled(p)) {
2044 p->migration_flags |= MDF_PUSH;
2045 goto out_unlock;
2046 }
2047
2048 p->migration_flags &= ~MDF_PUSH;
2049
2050 if (p->sched_class->find_lock_rq)
2051 lowest_rq = p->sched_class->find_lock_rq(p, rq);
5e16bbc2 2052
a7c81556
PZ
2053 if (!lowest_rq)
2054 goto out_unlock;
2055
2056 // XXX validate p is still the highest prio task
2057 if (task_rq(p) == rq) {
2058 deactivate_task(rq, p, 0);
2059 set_task_cpu(p, lowest_rq->cpu);
2060 activate_task(lowest_rq, p, 0);
2061 resched_curr(lowest_rq);
2062 }
2063
2064 double_unlock_balance(rq, lowest_rq);
2065
2066out_unlock:
2067 rq->push_busy = false;
2068 raw_spin_unlock(&rq->lock);
2069 raw_spin_unlock_irq(&p->pi_lock);
2070
2071 put_task_struct(p);
5cc389bc
PZ
2072 return 0;
2073}
2074
c5b28038
PZ
2075/*
2076 * sched_class::set_cpus_allowed must do the below, but is not required to
2077 * actually call this function.
2078 */
9cfc3e18 2079void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
5cc389bc 2080{
af449901
PZ
2081 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2082 p->cpus_ptr = new_mask;
2083 return;
2084 }
2085
3bd37062 2086 cpumask_copy(&p->cpus_mask, new_mask);
5cc389bc
PZ
2087 p->nr_cpus_allowed = cpumask_weight(new_mask);
2088}
2089
9cfc3e18
PZ
2090static void
2091__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
c5b28038 2092{
6c37067e
PZ
2093 struct rq *rq = task_rq(p);
2094 bool queued, running;
2095
af449901
PZ
2096 /*
2097 * This here violates the locking rules for affinity, since we're only
2098 * supposed to change these variables while holding both rq->lock and
2099 * p->pi_lock.
2100 *
2101 * HOWEVER, it magically works, because ttwu() is the only code that
2102 * accesses these variables under p->pi_lock and only does so after
2103 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2104 * before finish_task().
2105 *
2106 * XXX do further audits, this smells like something putrid.
2107 */
2108 if (flags & SCA_MIGRATE_DISABLE)
2109 SCHED_WARN_ON(!p->on_cpu);
2110 else
2111 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
2112
2113 queued = task_on_rq_queued(p);
2114 running = task_current(rq, p);
2115
2116 if (queued) {
2117 /*
2118 * Because __kthread_bind() calls this on blocked tasks without
2119 * holding rq->lock.
2120 */
2121 lockdep_assert_held(&rq->lock);
7a57f32a 2122 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
6c37067e
PZ
2123 }
2124 if (running)
2125 put_prev_task(rq, p);
2126
9cfc3e18 2127 p->sched_class->set_cpus_allowed(p, new_mask, flags);
6c37067e 2128
6c37067e 2129 if (queued)
7134b3e9 2130 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 2131 if (running)
03b7fad1 2132 set_next_task(rq, p);
c5b28038
PZ
2133}
2134
9cfc3e18
PZ
2135void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2136{
2137 __do_set_cpus_allowed(p, new_mask, 0);
2138}
2139
6d337eab 2140/*
c777d847
VS
2141 * This function is wildly self concurrent; here be dragons.
2142 *
2143 *
2144 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2145 * designated task is enqueued on an allowed CPU. If that task is currently
2146 * running, we have to kick it out using the CPU stopper.
2147 *
2148 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2149 * Consider:
2150 *
2151 * Initial conditions: P0->cpus_mask = [0, 1]
2152 *
2153 * P0@CPU0 P1
2154 *
2155 * migrate_disable();
2156 * <preempted>
2157 * set_cpus_allowed_ptr(P0, [1]);
2158 *
2159 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2160 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2161 * This means we need the following scheme:
2162 *
2163 * P0@CPU0 P1
2164 *
2165 * migrate_disable();
2166 * <preempted>
2167 * set_cpus_allowed_ptr(P0, [1]);
2168 * <blocks>
2169 * <resumes>
2170 * migrate_enable();
2171 * __set_cpus_allowed_ptr();
2172 * <wakes local stopper>
2173 * `--> <woken on migration completion>
2174 *
2175 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2176 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2177 * task p are serialized by p->pi_lock, which we can leverage: the one that
2178 * should come into effect at the end of the Migrate-Disable region is the last
2179 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2180 * but we still need to properly signal those waiting tasks at the appropriate
2181 * moment.
2182 *
2183 * This is implemented using struct set_affinity_pending. The first
2184 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2185 * setup an instance of that struct and install it on the targeted task_struct.
2186 * Any and all further callers will reuse that instance. Those then wait for
2187 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2188 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2189 *
2190 *
2191 * (1) In the cases covered above. There is one more where the completion is
2192 * signaled within affine_move_task() itself: when a subsequent affinity request
2193 * cancels the need for an active migration. Consider:
2194 *
2195 * Initial conditions: P0->cpus_mask = [0, 1]
2196 *
2197 * P0@CPU0 P1 P2
2198 *
2199 * migrate_disable();
2200 * <preempted>
2201 * set_cpus_allowed_ptr(P0, [1]);
2202 * <blocks>
2203 * set_cpus_allowed_ptr(P0, [0, 1]);
2204 * <signal completion>
2205 * <awakes>
2206 *
2207 * Note that the above is safe vs a concurrent migrate_enable(), as any
2208 * pending affinity completion is preceded by an uninstallation of
2209 * p->migration_pending done with p->pi_lock held.
6d337eab
PZ
2210 */
2211static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2212 int dest_cpu, unsigned int flags)
2213{
2214 struct set_affinity_pending my_pending = { }, *pending = NULL;
6d337eab
PZ
2215 bool complete = false;
2216
2217 /* Can the task run on the task's current CPU? If so, we're done */
2218 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
a7c81556
PZ
2219 struct task_struct *push_task = NULL;
2220
2221 if ((flags & SCA_MIGRATE_ENABLE) &&
2222 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2223 rq->push_busy = true;
2224 push_task = get_task_struct(p);
2225 }
2226
6d337eab
PZ
2227 pending = p->migration_pending;
2228 if (pending) {
2229 refcount_inc(&pending->refs);
2230 p->migration_pending = NULL;
2231 complete = true;
2232 }
2233 task_rq_unlock(rq, p, rf);
2234
a7c81556
PZ
2235 if (push_task) {
2236 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2237 p, &rq->push_work);
2238 }
2239
6d337eab
PZ
2240 if (complete)
2241 goto do_complete;
2242
2243 return 0;
2244 }
2245
2246 if (!(flags & SCA_MIGRATE_ENABLE)) {
2247 /* serialized by p->pi_lock */
2248 if (!p->migration_pending) {
c777d847 2249 /* Install the request */
6d337eab
PZ
2250 refcount_set(&my_pending.refs, 1);
2251 init_completion(&my_pending.done);
8a6edb52
PZ
2252 my_pending.arg = (struct migration_arg) {
2253 .task = p,
2254 .dest_cpu = -1, /* any */
2255 .pending = &my_pending,
2256 };
2257
6d337eab
PZ
2258 p->migration_pending = &my_pending;
2259 } else {
2260 pending = p->migration_pending;
2261 refcount_inc(&pending->refs);
2262 }
2263 }
2264 pending = p->migration_pending;
2265 /*
2266 * - !MIGRATE_ENABLE:
2267 * we'll have installed a pending if there wasn't one already.
2268 *
2269 * - MIGRATE_ENABLE:
2270 * we're here because the current CPU isn't matching anymore,
2271 * the only way that can happen is because of a concurrent
2272 * set_cpus_allowed_ptr() call, which should then still be
2273 * pending completion.
2274 *
2275 * Either way, we really should have a @pending here.
2276 */
2277 if (WARN_ON_ONCE(!pending)) {
2278 task_rq_unlock(rq, p, rf);
2279 return -EINVAL;
2280 }
2281
2282 if (flags & SCA_MIGRATE_ENABLE) {
2283
2284 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
a7c81556 2285 p->migration_flags &= ~MDF_PUSH;
6d337eab
PZ
2286 task_rq_unlock(rq, p, rf);
2287
6d337eab
PZ
2288 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2289 &pending->arg, &pending->stop_work);
2290
2291 return 0;
2292 }
2293
2294 if (task_running(rq, p) || p->state == TASK_WAKING) {
c777d847
VS
2295 /*
2296 * Lessen races (and headaches) by delegating
2297 * is_migration_disabled(p) checks to the stopper, which will
2298 * run on the same CPU as said p.
2299 */
8a6edb52 2300 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
6d337eab 2301 task_rq_unlock(rq, p, rf);
8a6edb52
PZ
2302
2303 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2304 &pending->arg, &pending->stop_work);
6d337eab
PZ
2305
2306 } else {
2307
2308 if (!is_migration_disabled(p)) {
2309 if (task_on_rq_queued(p))
2310 rq = move_queued_task(rq, rf, p, dest_cpu);
2311
2312 p->migration_pending = NULL;
2313 complete = true;
2314 }
2315 task_rq_unlock(rq, p, rf);
2316
2317do_complete:
2318 if (complete)
2319 complete_all(&pending->done);
2320 }
2321
2322 wait_for_completion(&pending->done);
2323
2324 if (refcount_dec_and_test(&pending->refs))
2325 wake_up_var(&pending->refs);
2326
c777d847
VS
2327 /*
2328 * Block the original owner of &pending until all subsequent callers
2329 * have seen the completion and decremented the refcount
2330 */
6d337eab
PZ
2331 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2332
2333 return 0;
2334}
2335
5cc389bc
PZ
2336/*
2337 * Change a given task's CPU affinity. Migrate the thread to a
2338 * proper CPU and schedule it away if the CPU it's executing on
2339 * is removed from the allowed bitmask.
2340 *
2341 * NOTE: the caller must have a valid reference to the task, the
2342 * task must not exit() & deallocate itself prematurely. The
2343 * call is not atomic; no spinlocks may be held.
2344 */
25834c73 2345static int __set_cpus_allowed_ptr(struct task_struct *p,
9cfc3e18
PZ
2346 const struct cpumask *new_mask,
2347 u32 flags)
5cc389bc 2348{
e9d867a6 2349 const struct cpumask *cpu_valid_mask = cpu_active_mask;
5cc389bc 2350 unsigned int dest_cpu;
eb580751
PZ
2351 struct rq_flags rf;
2352 struct rq *rq;
5cc389bc
PZ
2353 int ret = 0;
2354
eb580751 2355 rq = task_rq_lock(p, &rf);
a499c3ea 2356 update_rq_clock(rq);
5cc389bc 2357
af449901 2358 if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
e9d867a6 2359 /*
741ba80f
PZ
2360 * Kernel threads are allowed on online && !active CPUs,
2361 * however, during cpu-hot-unplug, even these might get pushed
2362 * away if not KTHREAD_IS_PER_CPU.
af449901
PZ
2363 *
2364 * Specifically, migration_disabled() tasks must not fail the
2365 * cpumask_any_and_distribute() pick below, esp. so on
2366 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2367 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
e9d867a6
PZI
2368 */
2369 cpu_valid_mask = cpu_online_mask;
2370 }
2371
25834c73
PZ
2372 /*
2373 * Must re-check here, to close a race against __kthread_bind(),
2374 * sched_setaffinity() is not guaranteed to observe the flag.
2375 */
9cfc3e18 2376 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
25834c73
PZ
2377 ret = -EINVAL;
2378 goto out;
2379 }
2380
885b3ba4
VS
2381 if (!(flags & SCA_MIGRATE_ENABLE)) {
2382 if (cpumask_equal(&p->cpus_mask, new_mask))
2383 goto out;
2384
2385 if (WARN_ON_ONCE(p == current &&
2386 is_migration_disabled(p) &&
2387 !cpumask_test_cpu(task_cpu(p), new_mask))) {
2388 ret = -EBUSY;
2389 goto out;
2390 }
2391 }
5cc389bc 2392
46a87b38
PT
2393 /*
2394 * Picking a ~random cpu helps in cases where we are changing affinity
2395 * for groups of tasks (ie. cpuset), so that load balancing is not
2396 * immediately required to distribute the tasks within their new mask.
2397 */
2398 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
714e501e 2399 if (dest_cpu >= nr_cpu_ids) {
5cc389bc
PZ
2400 ret = -EINVAL;
2401 goto out;
2402 }
2403
9cfc3e18 2404 __do_set_cpus_allowed(p, new_mask, flags);
5cc389bc 2405
6d337eab 2406 return affine_move_task(rq, p, &rf, dest_cpu, flags);
5cc389bc 2407
5cc389bc 2408out:
eb580751 2409 task_rq_unlock(rq, p, &rf);
5cc389bc
PZ
2410
2411 return ret;
2412}
25834c73
PZ
2413
2414int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
2415{
9cfc3e18 2416 return __set_cpus_allowed_ptr(p, new_mask, 0);
25834c73 2417}
5cc389bc
PZ
2418EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
2419
dd41f596 2420void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 2421{
e2912009
PZ
2422#ifdef CONFIG_SCHED_DEBUG
2423 /*
2424 * We should never call set_task_cpu() on a blocked task,
2425 * ttwu() will sort out the placement.
2426 */
077614ee 2427 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 2428 !p->on_rq);
0122ec5b 2429
3ea94de1
JP
2430 /*
2431 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
2432 * because schedstat_wait_{start,end} rebase migrating task's wait_start
2433 * time relying on p->on_rq.
2434 */
2435 WARN_ON_ONCE(p->state == TASK_RUNNING &&
2436 p->sched_class == &fair_sched_class &&
2437 (p->on_rq && !task_on_rq_migrating(p)));
2438
0122ec5b 2439#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
2440 /*
2441 * The caller should hold either p->pi_lock or rq->lock, when changing
2442 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2443 *
2444 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 2445 * see task_group().
6c6c54e1
PZ
2446 *
2447 * Furthermore, all task_rq users should acquire both locks, see
2448 * task_rq_lock().
2449 */
0122ec5b
PZ
2450 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2451 lockdep_is_held(&task_rq(p)->lock)));
2452#endif
4ff9083b
PZ
2453 /*
2454 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
2455 */
2456 WARN_ON_ONCE(!cpu_online(new_cpu));
af449901
PZ
2457
2458 WARN_ON_ONCE(is_migration_disabled(p));
e2912009
PZ
2459#endif
2460
de1d7286 2461 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 2462
0c69774e 2463 if (task_cpu(p) != new_cpu) {
0a74bef8 2464 if (p->sched_class->migrate_task_rq)
1327237a 2465 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 2466 p->se.nr_migrations++;
d7822b1e 2467 rseq_migrate(p);
ff303e66 2468 perf_event_task_migrate(p);
0c69774e 2469 }
dd41f596
IM
2470
2471 __set_task_cpu(p, new_cpu);
c65cc870
IM
2472}
2473
0ad4e3df 2474#ifdef CONFIG_NUMA_BALANCING
ac66f547
PZ
2475static void __migrate_swap_task(struct task_struct *p, int cpu)
2476{
da0c1e65 2477 if (task_on_rq_queued(p)) {
ac66f547 2478 struct rq *src_rq, *dst_rq;
8a8c69c3 2479 struct rq_flags srf, drf;
ac66f547
PZ
2480
2481 src_rq = task_rq(p);
2482 dst_rq = cpu_rq(cpu);
2483
8a8c69c3
PZ
2484 rq_pin_lock(src_rq, &srf);
2485 rq_pin_lock(dst_rq, &drf);
2486
ac66f547
PZ
2487 deactivate_task(src_rq, p, 0);
2488 set_task_cpu(p, cpu);
2489 activate_task(dst_rq, p, 0);
2490 check_preempt_curr(dst_rq, p, 0);
8a8c69c3
PZ
2491
2492 rq_unpin_lock(dst_rq, &drf);
2493 rq_unpin_lock(src_rq, &srf);
2494
ac66f547
PZ
2495 } else {
2496 /*
2497 * Task isn't running anymore; make it appear like we migrated
2498 * it before it went to sleep. This means on wakeup we make the
d1ccc66d 2499 * previous CPU our target instead of where it really is.
ac66f547
PZ
2500 */
2501 p->wake_cpu = cpu;
2502 }
2503}
2504
2505struct migration_swap_arg {
2506 struct task_struct *src_task, *dst_task;
2507 int src_cpu, dst_cpu;
2508};
2509
2510static int migrate_swap_stop(void *data)
2511{
2512 struct migration_swap_arg *arg = data;
2513 struct rq *src_rq, *dst_rq;
2514 int ret = -EAGAIN;
2515
62694cd5
PZ
2516 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
2517 return -EAGAIN;
2518
ac66f547
PZ
2519 src_rq = cpu_rq(arg->src_cpu);
2520 dst_rq = cpu_rq(arg->dst_cpu);
2521
74602315
PZ
2522 double_raw_lock(&arg->src_task->pi_lock,
2523 &arg->dst_task->pi_lock);
ac66f547 2524 double_rq_lock(src_rq, dst_rq);
62694cd5 2525
ac66f547
PZ
2526 if (task_cpu(arg->dst_task) != arg->dst_cpu)
2527 goto unlock;
2528
2529 if (task_cpu(arg->src_task) != arg->src_cpu)
2530 goto unlock;
2531
3bd37062 2532 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
ac66f547
PZ
2533 goto unlock;
2534
3bd37062 2535 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
ac66f547
PZ
2536 goto unlock;
2537
2538 __migrate_swap_task(arg->src_task, arg->dst_cpu);
2539 __migrate_swap_task(arg->dst_task, arg->src_cpu);
2540
2541 ret = 0;
2542
2543unlock:
2544 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
2545 raw_spin_unlock(&arg->dst_task->pi_lock);
2546 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
2547
2548 return ret;
2549}
2550
2551/*
2552 * Cross migrate two tasks
2553 */
0ad4e3df
SD
2554int migrate_swap(struct task_struct *cur, struct task_struct *p,
2555 int target_cpu, int curr_cpu)
ac66f547
PZ
2556{
2557 struct migration_swap_arg arg;
2558 int ret = -EINVAL;
2559
ac66f547
PZ
2560 arg = (struct migration_swap_arg){
2561 .src_task = cur,
0ad4e3df 2562 .src_cpu = curr_cpu,
ac66f547 2563 .dst_task = p,
0ad4e3df 2564 .dst_cpu = target_cpu,
ac66f547
PZ
2565 };
2566
2567 if (arg.src_cpu == arg.dst_cpu)
2568 goto out;
2569
6acce3ef
PZ
2570 /*
2571 * These three tests are all lockless; this is OK since all of them
2572 * will be re-checked with proper locks held further down the line.
2573 */
ac66f547
PZ
2574 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
2575 goto out;
2576
3bd37062 2577 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
ac66f547
PZ
2578 goto out;
2579
3bd37062 2580 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
ac66f547
PZ
2581 goto out;
2582
286549dc 2583 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
2584 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
2585
2586out:
ac66f547
PZ
2587 return ret;
2588}
0ad4e3df 2589#endif /* CONFIG_NUMA_BALANCING */
ac66f547 2590
1da177e4
LT
2591/*
2592 * wait_task_inactive - wait for a thread to unschedule.
2593 *
85ba2d86
RM
2594 * If @match_state is nonzero, it's the @p->state value just checked and
2595 * not expected to change. If it changes, i.e. @p might have woken up,
2596 * then return zero. When we succeed in waiting for @p to be off its CPU,
2597 * we return a positive number (its total switch count). If a second call
2598 * a short while later returns the same number, the caller can be sure that
2599 * @p has remained unscheduled the whole time.
2600 *
1da177e4
LT
2601 * The caller must ensure that the task *will* unschedule sometime soon,
2602 * else this function might spin for a *long* time. This function can't
2603 * be called with interrupts off, or it may introduce deadlock with
2604 * smp_call_function() if an IPI is sent by the same process we are
2605 * waiting to become inactive.
2606 */
85ba2d86 2607unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4 2608{
da0c1e65 2609 int running, queued;
eb580751 2610 struct rq_flags rf;
85ba2d86 2611 unsigned long ncsw;
70b97a7f 2612 struct rq *rq;
1da177e4 2613
3a5c359a
AK
2614 for (;;) {
2615 /*
2616 * We do the initial early heuristics without holding
2617 * any task-queue locks at all. We'll only try to get
2618 * the runqueue lock when things look like they will
2619 * work out!
2620 */
2621 rq = task_rq(p);
fa490cfd 2622
3a5c359a
AK
2623 /*
2624 * If the task is actively running on another CPU
2625 * still, just relax and busy-wait without holding
2626 * any locks.
2627 *
2628 * NOTE! Since we don't hold any locks, it's not
2629 * even sure that "rq" stays as the right runqueue!
2630 * But we don't care, since "task_running()" will
2631 * return false if the runqueue has changed and p
2632 * is actually now running somewhere else!
2633 */
85ba2d86
RM
2634 while (task_running(rq, p)) {
2635 if (match_state && unlikely(p->state != match_state))
2636 return 0;
3a5c359a 2637 cpu_relax();
85ba2d86 2638 }
fa490cfd 2639
3a5c359a
AK
2640 /*
2641 * Ok, time to look more closely! We need the rq
2642 * lock now, to be *sure*. If we're wrong, we'll
2643 * just go back and repeat.
2644 */
eb580751 2645 rq = task_rq_lock(p, &rf);
27a9da65 2646 trace_sched_wait_task(p);
3a5c359a 2647 running = task_running(rq, p);
da0c1e65 2648 queued = task_on_rq_queued(p);
85ba2d86 2649 ncsw = 0;
f31e11d8 2650 if (!match_state || p->state == match_state)
93dcf55f 2651 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
eb580751 2652 task_rq_unlock(rq, p, &rf);
fa490cfd 2653
85ba2d86
RM
2654 /*
2655 * If it changed from the expected state, bail out now.
2656 */
2657 if (unlikely(!ncsw))
2658 break;
2659
3a5c359a
AK
2660 /*
2661 * Was it really running after all now that we
2662 * checked with the proper locks actually held?
2663 *
2664 * Oops. Go back and try again..
2665 */
2666 if (unlikely(running)) {
2667 cpu_relax();
2668 continue;
2669 }
fa490cfd 2670
3a5c359a
AK
2671 /*
2672 * It's not enough that it's not actively running,
2673 * it must be off the runqueue _entirely_, and not
2674 * preempted!
2675 *
80dd99b3 2676 * So if it was still runnable (but just not actively
3a5c359a
AK
2677 * running right now), it's preempted, and we should
2678 * yield - it could be a while.
2679 */
da0c1e65 2680 if (unlikely(queued)) {
8b0e1953 2681 ktime_t to = NSEC_PER_SEC / HZ;
8eb90c30
TG
2682
2683 set_current_state(TASK_UNINTERRUPTIBLE);
2684 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
2685 continue;
2686 }
fa490cfd 2687
3a5c359a
AK
2688 /*
2689 * Ahh, all good. It wasn't running, and it wasn't
2690 * runnable, which means that it will never become
2691 * running in the future either. We're all done!
2692 */
2693 break;
2694 }
85ba2d86
RM
2695
2696 return ncsw;
1da177e4
LT
2697}
2698
2699/***
2700 * kick_process - kick a running thread to enter/exit the kernel
2701 * @p: the to-be-kicked thread
2702 *
2703 * Cause a process which is running on another CPU to enter
2704 * kernel-mode, without any delay. (to get signals handled.)
2705 *
25985edc 2706 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
2707 * because all it wants to ensure is that the remote task enters
2708 * the kernel. If the IPI races and the task has been migrated
2709 * to another CPU then no harm is done and the purpose has been
2710 * achieved as well.
2711 */
36c8b586 2712void kick_process(struct task_struct *p)
1da177e4
LT
2713{
2714 int cpu;
2715
2716 preempt_disable();
2717 cpu = task_cpu(p);
2718 if ((cpu != smp_processor_id()) && task_curr(p))
2719 smp_send_reschedule(cpu);
2720 preempt_enable();
2721}
b43e3521 2722EXPORT_SYMBOL_GPL(kick_process);
1da177e4 2723
30da688e 2724/*
3bd37062 2725 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
e9d867a6
PZI
2726 *
2727 * A few notes on cpu_active vs cpu_online:
2728 *
2729 * - cpu_active must be a subset of cpu_online
2730 *
97fb7a0a 2731 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
e9d867a6 2732 * see __set_cpus_allowed_ptr(). At this point the newly online
d1ccc66d 2733 * CPU isn't yet part of the sched domains, and balancing will not
e9d867a6
PZI
2734 * see it.
2735 *
d1ccc66d 2736 * - on CPU-down we clear cpu_active() to mask the sched domains and
e9d867a6 2737 * avoid the load balancer to place new tasks on the to be removed
d1ccc66d 2738 * CPU. Existing tasks will remain running there and will be taken
e9d867a6
PZI
2739 * off.
2740 *
2741 * This means that fallback selection must not select !active CPUs.
2742 * And can assume that any active CPU must be online. Conversely
2743 * select_task_rq() below may allow selection of !active CPUs in order
2744 * to satisfy the above rules.
30da688e 2745 */
5da9a0fb
PZ
2746static int select_fallback_rq(int cpu, struct task_struct *p)
2747{
aa00d89c
TC
2748 int nid = cpu_to_node(cpu);
2749 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
2750 enum { cpuset, possible, fail } state = cpuset;
2751 int dest_cpu;
5da9a0fb 2752
aa00d89c 2753 /*
d1ccc66d
IM
2754 * If the node that the CPU is on has been offlined, cpu_to_node()
2755 * will return -1. There is no CPU on the node, and we should
2756 * select the CPU on the other node.
aa00d89c
TC
2757 */
2758 if (nid != -1) {
2759 nodemask = cpumask_of_node(nid);
2760
2761 /* Look for allowed, online CPU in same node. */
2762 for_each_cpu(dest_cpu, nodemask) {
aa00d89c
TC
2763 if (!cpu_active(dest_cpu))
2764 continue;
3bd37062 2765 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
aa00d89c
TC
2766 return dest_cpu;
2767 }
2baab4e9 2768 }
5da9a0fb 2769
2baab4e9
PZ
2770 for (;;) {
2771 /* Any allowed, online CPU? */
3bd37062 2772 for_each_cpu(dest_cpu, p->cpus_ptr) {
175f0e25 2773 if (!is_cpu_allowed(p, dest_cpu))
2baab4e9 2774 continue;
175f0e25 2775
2baab4e9
PZ
2776 goto out;
2777 }
5da9a0fb 2778
e73e85f0 2779 /* No more Mr. Nice Guy. */
2baab4e9
PZ
2780 switch (state) {
2781 case cpuset:
e73e85f0
ON
2782 if (IS_ENABLED(CONFIG_CPUSETS)) {
2783 cpuset_cpus_allowed_fallback(p);
2784 state = possible;
2785 break;
2786 }
df561f66 2787 fallthrough;
2baab4e9 2788 case possible:
af449901
PZ
2789 /*
2790 * XXX When called from select_task_rq() we only
2791 * hold p->pi_lock and again violate locking order.
2792 *
2793 * More yuck to audit.
2794 */
2baab4e9
PZ
2795 do_set_cpus_allowed(p, cpu_possible_mask);
2796 state = fail;
2797 break;
2798
2799 case fail:
2800 BUG();
2801 break;
2802 }
2803 }
2804
2805out:
2806 if (state != cpuset) {
2807 /*
2808 * Don't tell them about moving exiting tasks or
2809 * kernel threads (both mm NULL), since they never
2810 * leave kernel.
2811 */
2812 if (p->mm && printk_ratelimit()) {
aac74dc4 2813 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
2814 task_pid_nr(p), p->comm, cpu);
2815 }
5da9a0fb
PZ
2816 }
2817
2818 return dest_cpu;
2819}
2820
e2912009 2821/*
3bd37062 2822 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
e2912009 2823 */
970b13ba 2824static inline
3aef1551 2825int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
970b13ba 2826{
cbce1a68
PZ
2827 lockdep_assert_held(&p->pi_lock);
2828
af449901 2829 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3aef1551 2830 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
e9d867a6 2831 else
3bd37062 2832 cpu = cpumask_any(p->cpus_ptr);
e2912009
PZ
2833
2834 /*
2835 * In order not to call set_task_cpu() on a blocking task we need
3bd37062 2836 * to rely on ttwu() to place the task on a valid ->cpus_ptr
d1ccc66d 2837 * CPU.
e2912009
PZ
2838 *
2839 * Since this is common to all placement strategies, this lives here.
2840 *
2841 * [ this allows ->select_task() to simply return task_cpu(p) and
2842 * not worry about this generic constraint ]
2843 */
7af443ee 2844 if (unlikely(!is_cpu_allowed(p, cpu)))
5da9a0fb 2845 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
2846
2847 return cpu;
970b13ba 2848}
09a40af5 2849
f5832c19
NP
2850void sched_set_stop_task(int cpu, struct task_struct *stop)
2851{
ded467dc 2852 static struct lock_class_key stop_pi_lock;
f5832c19
NP
2853 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2854 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2855
2856 if (stop) {
2857 /*
2858 * Make it appear like a SCHED_FIFO task, its something
2859 * userspace knows about and won't get confused about.
2860 *
2861 * Also, it will make PI more or less work without too
2862 * much confusion -- but then, stop work should not
2863 * rely on PI working anyway.
2864 */
2865 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2866
2867 stop->sched_class = &stop_sched_class;
ded467dc
PZ
2868
2869 /*
2870 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
2871 * adjust the effective priority of a task. As a result,
2872 * rt_mutex_setprio() can trigger (RT) balancing operations,
2873 * which can then trigger wakeups of the stop thread to push
2874 * around the current task.
2875 *
2876 * The stop task itself will never be part of the PI-chain, it
2877 * never blocks, therefore that ->pi_lock recursion is safe.
2878 * Tell lockdep about this by placing the stop->pi_lock in its
2879 * own class.
2880 */
2881 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
f5832c19
NP
2882 }
2883
2884 cpu_rq(cpu)->stop = stop;
2885
2886 if (old_stop) {
2887 /*
2888 * Reset it back to a normal scheduling class so that
2889 * it can die in pieces.
2890 */
2891 old_stop->sched_class = &rt_sched_class;
2892 }
2893}
2894
74d862b6 2895#else /* CONFIG_SMP */
25834c73
PZ
2896
2897static inline int __set_cpus_allowed_ptr(struct task_struct *p,
9cfc3e18
PZ
2898 const struct cpumask *new_mask,
2899 u32 flags)
25834c73
PZ
2900{
2901 return set_cpus_allowed_ptr(p, new_mask);
2902}
2903
af449901
PZ
2904static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
2905
3015ef4b
TG
2906static inline bool rq_has_pinned_tasks(struct rq *rq)
2907{
2908 return false;
2909}
2910
74d862b6 2911#endif /* !CONFIG_SMP */
970b13ba 2912
d7c01d27 2913static void
b84cb5df 2914ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 2915{
4fa8d299 2916 struct rq *rq;
b84cb5df 2917
4fa8d299
JP
2918 if (!schedstat_enabled())
2919 return;
2920
2921 rq = this_rq();
d7c01d27 2922
4fa8d299
JP
2923#ifdef CONFIG_SMP
2924 if (cpu == rq->cpu) {
b85c8b71
PZ
2925 __schedstat_inc(rq->ttwu_local);
2926 __schedstat_inc(p->se.statistics.nr_wakeups_local);
d7c01d27
PZ
2927 } else {
2928 struct sched_domain *sd;
2929
b85c8b71 2930 __schedstat_inc(p->se.statistics.nr_wakeups_remote);
057f3fad 2931 rcu_read_lock();
4fa8d299 2932 for_each_domain(rq->cpu, sd) {
d7c01d27 2933 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
b85c8b71 2934 __schedstat_inc(sd->ttwu_wake_remote);
d7c01d27
PZ
2935 break;
2936 }
2937 }
057f3fad 2938 rcu_read_unlock();
d7c01d27 2939 }
f339b9dc
PZ
2940
2941 if (wake_flags & WF_MIGRATED)
b85c8b71 2942 __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
d7c01d27
PZ
2943#endif /* CONFIG_SMP */
2944
b85c8b71
PZ
2945 __schedstat_inc(rq->ttwu_count);
2946 __schedstat_inc(p->se.statistics.nr_wakeups);
d7c01d27
PZ
2947
2948 if (wake_flags & WF_SYNC)
b85c8b71 2949 __schedstat_inc(p->se.statistics.nr_wakeups_sync);
d7c01d27
PZ
2950}
2951
23f41eeb
PZ
2952/*
2953 * Mark the task runnable and perform wakeup-preemption.
2954 */
e7904a28 2955static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2956 struct rq_flags *rf)
9ed3811a 2957{
9ed3811a 2958 check_preempt_curr(rq, p, wake_flags);
9ed3811a 2959 p->state = TASK_RUNNING;
fbd705a0
PZ
2960 trace_sched_wakeup(p);
2961
9ed3811a 2962#ifdef CONFIG_SMP
4c9a4bc8
PZ
2963 if (p->sched_class->task_woken) {
2964 /*
b19a888c 2965 * Our task @p is fully woken up and running; so it's safe to
cbce1a68 2966 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 2967 */
d8ac8971 2968 rq_unpin_lock(rq, rf);
9ed3811a 2969 p->sched_class->task_woken(rq, p);
d8ac8971 2970 rq_repin_lock(rq, rf);
4c9a4bc8 2971 }
9ed3811a 2972
e69c6341 2973 if (rq->idle_stamp) {
78becc27 2974 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 2975 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 2976
abfafa54
JL
2977 update_avg(&rq->avg_idle, delta);
2978
2979 if (rq->avg_idle > max)
9ed3811a 2980 rq->avg_idle = max;
abfafa54 2981
9ed3811a
TH
2982 rq->idle_stamp = 0;
2983 }
2984#endif
2985}
2986
c05fbafb 2987static void
e7904a28 2988ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2989 struct rq_flags *rf)
c05fbafb 2990{
77558e4d 2991 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
b5179ac7 2992
cbce1a68
PZ
2993 lockdep_assert_held(&rq->lock);
2994
c05fbafb
PZ
2995 if (p->sched_contributes_to_load)
2996 rq->nr_uninterruptible--;
b5179ac7 2997
dbfb089d 2998#ifdef CONFIG_SMP
b5179ac7 2999 if (wake_flags & WF_MIGRATED)
59efa0ba 3000 en_flags |= ENQUEUE_MIGRATED;
ec618b84 3001 else
c05fbafb 3002#endif
ec618b84
PZ
3003 if (p->in_iowait) {
3004 delayacct_blkio_end(p);
3005 atomic_dec(&task_rq(p)->nr_iowait);
3006 }
c05fbafb 3007
1b174a2c 3008 activate_task(rq, p, en_flags);
d8ac8971 3009 ttwu_do_wakeup(rq, p, wake_flags, rf);
c05fbafb
PZ
3010}
3011
3012/*
58877d34
PZ
3013 * Consider @p being inside a wait loop:
3014 *
3015 * for (;;) {
3016 * set_current_state(TASK_UNINTERRUPTIBLE);
3017 *
3018 * if (CONDITION)
3019 * break;
3020 *
3021 * schedule();
3022 * }
3023 * __set_current_state(TASK_RUNNING);
3024 *
3025 * between set_current_state() and schedule(). In this case @p is still
3026 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3027 * an atomic manner.
3028 *
3029 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3030 * then schedule() must still happen and p->state can be changed to
3031 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3032 * need to do a full wakeup with enqueue.
3033 *
3034 * Returns: %true when the wakeup is done,
3035 * %false otherwise.
c05fbafb 3036 */
58877d34 3037static int ttwu_runnable(struct task_struct *p, int wake_flags)
c05fbafb 3038{
eb580751 3039 struct rq_flags rf;
c05fbafb
PZ
3040 struct rq *rq;
3041 int ret = 0;
3042
eb580751 3043 rq = __task_rq_lock(p, &rf);
da0c1e65 3044 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
3045 /* check_preempt_curr() may use rq clock */
3046 update_rq_clock(rq);
d8ac8971 3047 ttwu_do_wakeup(rq, p, wake_flags, &rf);
c05fbafb
PZ
3048 ret = 1;
3049 }
eb580751 3050 __task_rq_unlock(rq, &rf);
c05fbafb
PZ
3051
3052 return ret;
3053}
3054
317f3941 3055#ifdef CONFIG_SMP
a1488664 3056void sched_ttwu_pending(void *arg)
317f3941 3057{
a1488664 3058 struct llist_node *llist = arg;
317f3941 3059 struct rq *rq = this_rq();
73215849 3060 struct task_struct *p, *t;
d8ac8971 3061 struct rq_flags rf;
317f3941 3062
e3baac47
PZ
3063 if (!llist)
3064 return;
3065
126c2092
PZ
3066 /*
3067 * rq::ttwu_pending racy indication of out-standing wakeups.
3068 * Races such that false-negatives are possible, since they
3069 * are shorter lived that false-positives would be.
3070 */
3071 WRITE_ONCE(rq->ttwu_pending, 0);
3072
8a8c69c3 3073 rq_lock_irqsave(rq, &rf);
77558e4d 3074 update_rq_clock(rq);
317f3941 3075
8c4890d1 3076 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
b6e13e85
PZ
3077 if (WARN_ON_ONCE(p->on_cpu))
3078 smp_cond_load_acquire(&p->on_cpu, !VAL);
3079
3080 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3081 set_task_cpu(p, cpu_of(rq));
3082
73215849 3083 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
b6e13e85 3084 }
317f3941 3085
8a8c69c3 3086 rq_unlock_irqrestore(rq, &rf);
317f3941
PZ
3087}
3088
b2a02fc4 3089void send_call_function_single_ipi(int cpu)
317f3941 3090{
b2a02fc4 3091 struct rq *rq = cpu_rq(cpu);
ca38062e 3092
b2a02fc4
PZ
3093 if (!set_nr_if_polling(rq->idle))
3094 arch_send_call_function_single_ipi(cpu);
3095 else
3096 trace_sched_wake_idle_without_ipi(cpu);
317f3941
PZ
3097}
3098
2ebb1771
MG
3099/*
3100 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3101 * necessary. The wakee CPU on receipt of the IPI will queue the task
3102 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3103 * of the wakeup instead of the waker.
3104 */
3105static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
317f3941 3106{
e3baac47
PZ
3107 struct rq *rq = cpu_rq(cpu);
3108
b7e7ade3
PZ
3109 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3110
126c2092 3111 WRITE_ONCE(rq->ttwu_pending, 1);
8c4890d1 3112 __smp_call_single_queue(cpu, &p->wake_entry.llist);
317f3941 3113}
d6aa8f85 3114
f6be8af1
CL
3115void wake_up_if_idle(int cpu)
3116{
3117 struct rq *rq = cpu_rq(cpu);
8a8c69c3 3118 struct rq_flags rf;
f6be8af1 3119
fd7de1e8
AL
3120 rcu_read_lock();
3121
3122 if (!is_idle_task(rcu_dereference(rq->curr)))
3123 goto out;
f6be8af1
CL
3124
3125 if (set_nr_if_polling(rq->idle)) {
3126 trace_sched_wake_idle_without_ipi(cpu);
3127 } else {
8a8c69c3 3128 rq_lock_irqsave(rq, &rf);
f6be8af1
CL
3129 if (is_idle_task(rq->curr))
3130 smp_send_reschedule(cpu);
d1ccc66d 3131 /* Else CPU is not idle, do nothing here: */
8a8c69c3 3132 rq_unlock_irqrestore(rq, &rf);
f6be8af1 3133 }
fd7de1e8
AL
3134
3135out:
3136 rcu_read_unlock();
f6be8af1
CL
3137}
3138
39be3501 3139bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
3140{
3141 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3142}
c6e7bd7a 3143
2ebb1771
MG
3144static inline bool ttwu_queue_cond(int cpu, int wake_flags)
3145{
5ba2ffba
PZ
3146 /*
3147 * Do not complicate things with the async wake_list while the CPU is
3148 * in hotplug state.
3149 */
3150 if (!cpu_active(cpu))
3151 return false;
3152
2ebb1771
MG
3153 /*
3154 * If the CPU does not share cache, then queue the task on the
3155 * remote rqs wakelist to avoid accessing remote data.
3156 */
3157 if (!cpus_share_cache(smp_processor_id(), cpu))
3158 return true;
3159
3160 /*
3161 * If the task is descheduling and the only running task on the
3162 * CPU then use the wakelist to offload the task activation to
3163 * the soon-to-be-idle CPU as the current CPU is likely busy.
3164 * nr_running is checked to avoid unnecessary task stacking.
3165 */
739f70b4 3166 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
2ebb1771
MG
3167 return true;
3168
3169 return false;
3170}
3171
3172static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
c6e7bd7a 3173{
2ebb1771 3174 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
b6e13e85
PZ
3175 if (WARN_ON_ONCE(cpu == smp_processor_id()))
3176 return false;
3177
c6e7bd7a 3178 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
2ebb1771 3179 __ttwu_queue_wakelist(p, cpu, wake_flags);
c6e7bd7a
PZ
3180 return true;
3181 }
3182
3183 return false;
3184}
58877d34
PZ
3185
3186#else /* !CONFIG_SMP */
3187
3188static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3189{
3190 return false;
3191}
3192
d6aa8f85 3193#endif /* CONFIG_SMP */
317f3941 3194
b5179ac7 3195static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
c05fbafb
PZ
3196{
3197 struct rq *rq = cpu_rq(cpu);
d8ac8971 3198 struct rq_flags rf;
c05fbafb 3199
2ebb1771 3200 if (ttwu_queue_wakelist(p, cpu, wake_flags))
317f3941 3201 return;
317f3941 3202
8a8c69c3 3203 rq_lock(rq, &rf);
77558e4d 3204 update_rq_clock(rq);
d8ac8971 3205 ttwu_do_activate(rq, p, wake_flags, &rf);
8a8c69c3 3206 rq_unlock(rq, &rf);
9ed3811a
TH
3207}
3208
8643cda5
PZ
3209/*
3210 * Notes on Program-Order guarantees on SMP systems.
3211 *
3212 * MIGRATION
3213 *
3214 * The basic program-order guarantee on SMP systems is that when a task [t]
d1ccc66d
IM
3215 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3216 * execution on its new CPU [c1].
8643cda5
PZ
3217 *
3218 * For migration (of runnable tasks) this is provided by the following means:
3219 *
3220 * A) UNLOCK of the rq(c0)->lock scheduling out task t
3221 * B) migration for t is required to synchronize *both* rq(c0)->lock and
3222 * rq(c1)->lock (if not at the same time, then in that order).
3223 * C) LOCK of the rq(c1)->lock scheduling in task
3224 *
7696f991 3225 * Release/acquire chaining guarantees that B happens after A and C after B.
d1ccc66d 3226 * Note: the CPU doing B need not be c0 or c1
8643cda5
PZ
3227 *
3228 * Example:
3229 *
3230 * CPU0 CPU1 CPU2
3231 *
3232 * LOCK rq(0)->lock
3233 * sched-out X
3234 * sched-in Y
3235 * UNLOCK rq(0)->lock
3236 *
3237 * LOCK rq(0)->lock // orders against CPU0
3238 * dequeue X
3239 * UNLOCK rq(0)->lock
3240 *
3241 * LOCK rq(1)->lock
3242 * enqueue X
3243 * UNLOCK rq(1)->lock
3244 *
3245 * LOCK rq(1)->lock // orders against CPU2
3246 * sched-out Z
3247 * sched-in X
3248 * UNLOCK rq(1)->lock
3249 *
3250 *
3251 * BLOCKING -- aka. SLEEP + WAKEUP
3252 *
3253 * For blocking we (obviously) need to provide the same guarantee as for
3254 * migration. However the means are completely different as there is no lock
3255 * chain to provide order. Instead we do:
3256 *
58877d34
PZ
3257 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
3258 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
8643cda5
PZ
3259 *
3260 * Example:
3261 *
3262 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
3263 *
3264 * LOCK rq(0)->lock LOCK X->pi_lock
3265 * dequeue X
3266 * sched-out X
3267 * smp_store_release(X->on_cpu, 0);
3268 *
1f03e8d2 3269 * smp_cond_load_acquire(&X->on_cpu, !VAL);
8643cda5
PZ
3270 * X->state = WAKING
3271 * set_task_cpu(X,2)
3272 *
3273 * LOCK rq(2)->lock
3274 * enqueue X
3275 * X->state = RUNNING
3276 * UNLOCK rq(2)->lock
3277 *
3278 * LOCK rq(2)->lock // orders against CPU1
3279 * sched-out Z
3280 * sched-in X
3281 * UNLOCK rq(2)->lock
3282 *
3283 * UNLOCK X->pi_lock
3284 * UNLOCK rq(0)->lock
3285 *
3286 *
7696f991
AP
3287 * However, for wakeups there is a second guarantee we must provide, namely we
3288 * must ensure that CONDITION=1 done by the caller can not be reordered with
3289 * accesses to the task state; see try_to_wake_up() and set_current_state().
8643cda5
PZ
3290 */
3291
9ed3811a 3292/**
1da177e4 3293 * try_to_wake_up - wake up a thread
9ed3811a 3294 * @p: the thread to be awakened
1da177e4 3295 * @state: the mask of task states that can be woken
9ed3811a 3296 * @wake_flags: wake modifier flags (WF_*)
1da177e4 3297 *
58877d34
PZ
3298 * Conceptually does:
3299 *
3300 * If (@state & @p->state) @p->state = TASK_RUNNING.
1da177e4 3301 *
a2250238
PZ
3302 * If the task was not queued/runnable, also place it back on a runqueue.
3303 *
58877d34
PZ
3304 * This function is atomic against schedule() which would dequeue the task.
3305 *
3306 * It issues a full memory barrier before accessing @p->state, see the comment
3307 * with set_current_state().
a2250238 3308 *
58877d34 3309 * Uses p->pi_lock to serialize against concurrent wake-ups.
a2250238 3310 *
58877d34
PZ
3311 * Relies on p->pi_lock stabilizing:
3312 * - p->sched_class
3313 * - p->cpus_ptr
3314 * - p->sched_task_group
3315 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
3316 *
3317 * Tries really hard to only take one task_rq(p)->lock for performance.
3318 * Takes rq->lock in:
3319 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
3320 * - ttwu_queue() -- new rq, for enqueue of the task;
3321 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
3322 *
3323 * As a consequence we race really badly with just about everything. See the
3324 * many memory barriers and their comments for details.
7696f991 3325 *
a2250238
PZ
3326 * Return: %true if @p->state changes (an actual wakeup was done),
3327 * %false otherwise.
1da177e4 3328 */
e4a52bcb
PZ
3329static int
3330try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 3331{
1da177e4 3332 unsigned long flags;
c05fbafb 3333 int cpu, success = 0;
2398f2c6 3334
e3d85487 3335 preempt_disable();
aacedf26
PZ
3336 if (p == current) {
3337 /*
3338 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
3339 * == smp_processor_id()'. Together this means we can special
58877d34 3340 * case the whole 'p->on_rq && ttwu_runnable()' case below
aacedf26
PZ
3341 * without taking any locks.
3342 *
3343 * In particular:
3344 * - we rely on Program-Order guarantees for all the ordering,
3345 * - we're serialized against set_special_state() by virtue of
3346 * it disabling IRQs (this allows not taking ->pi_lock).
3347 */
3348 if (!(p->state & state))
e3d85487 3349 goto out;
aacedf26
PZ
3350
3351 success = 1;
aacedf26
PZ
3352 trace_sched_waking(p);
3353 p->state = TASK_RUNNING;
3354 trace_sched_wakeup(p);
3355 goto out;
3356 }
3357
e0acd0a6
ON
3358 /*
3359 * If we are going to wake up a thread waiting for CONDITION we
3360 * need to ensure that CONDITION=1 done by the caller can not be
58877d34
PZ
3361 * reordered with p->state check below. This pairs with smp_store_mb()
3362 * in set_current_state() that the waiting thread does.
e0acd0a6 3363 */
013fdb80 3364 raw_spin_lock_irqsave(&p->pi_lock, flags);
d89e588c 3365 smp_mb__after_spinlock();
e9c84311 3366 if (!(p->state & state))
aacedf26 3367 goto unlock;
1da177e4 3368
fbd705a0
PZ
3369 trace_sched_waking(p);
3370
d1ccc66d
IM
3371 /* We're going to change ->state: */
3372 success = 1;
1da177e4 3373
135e8c92
BS
3374 /*
3375 * Ensure we load p->on_rq _after_ p->state, otherwise it would
3376 * be possible to, falsely, observe p->on_rq == 0 and get stuck
3377 * in smp_cond_load_acquire() below.
3378 *
3d85b270
AP
3379 * sched_ttwu_pending() try_to_wake_up()
3380 * STORE p->on_rq = 1 LOAD p->state
3381 * UNLOCK rq->lock
3382 *
3383 * __schedule() (switch to task 'p')
3384 * LOCK rq->lock smp_rmb();
3385 * smp_mb__after_spinlock();
3386 * UNLOCK rq->lock
135e8c92
BS
3387 *
3388 * [task p]
3d85b270 3389 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
135e8c92 3390 *
3d85b270
AP
3391 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3392 * __schedule(). See the comment for smp_mb__after_spinlock().
2beaf328
PM
3393 *
3394 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
135e8c92
BS
3395 */
3396 smp_rmb();
58877d34 3397 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
aacedf26 3398 goto unlock;
1da177e4 3399
1da177e4 3400#ifdef CONFIG_SMP
ecf7d01c
PZ
3401 /*
3402 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
3403 * possible to, falsely, observe p->on_cpu == 0.
3404 *
3405 * One must be running (->on_cpu == 1) in order to remove oneself
3406 * from the runqueue.
3407 *
3d85b270
AP
3408 * __schedule() (switch to task 'p') try_to_wake_up()
3409 * STORE p->on_cpu = 1 LOAD p->on_rq
3410 * UNLOCK rq->lock
3411 *
3412 * __schedule() (put 'p' to sleep)
3413 * LOCK rq->lock smp_rmb();
3414 * smp_mb__after_spinlock();
3415 * STORE p->on_rq = 0 LOAD p->on_cpu
ecf7d01c 3416 *
3d85b270
AP
3417 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3418 * __schedule(). See the comment for smp_mb__after_spinlock().
dbfb089d
PZ
3419 *
3420 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
3421 * schedule()'s deactivate_task() has 'happened' and p will no longer
3422 * care about it's own p->state. See the comment in __schedule().
ecf7d01c 3423 */
dbfb089d
PZ
3424 smp_acquire__after_ctrl_dep();
3425
3426 /*
3427 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
3428 * == 0), which means we need to do an enqueue, change p->state to
3429 * TASK_WAKING such that we can unlock p->pi_lock before doing the
3430 * enqueue, such as ttwu_queue_wakelist().
3431 */
3432 p->state = TASK_WAKING;
ecf7d01c 3433
c6e7bd7a
PZ
3434 /*
3435 * If the owning (remote) CPU is still in the middle of schedule() with
3436 * this task as prev, considering queueing p on the remote CPUs wake_list
3437 * which potentially sends an IPI instead of spinning on p->on_cpu to
3438 * let the waker make forward progress. This is safe because IRQs are
3439 * disabled and the IPI will deliver after on_cpu is cleared.
b6e13e85
PZ
3440 *
3441 * Ensure we load task_cpu(p) after p->on_cpu:
3442 *
3443 * set_task_cpu(p, cpu);
3444 * STORE p->cpu = @cpu
3445 * __schedule() (switch to task 'p')
3446 * LOCK rq->lock
3447 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
3448 * STORE p->on_cpu = 1 LOAD p->cpu
3449 *
3450 * to ensure we observe the correct CPU on which the task is currently
3451 * scheduling.
c6e7bd7a 3452 */
b6e13e85 3453 if (smp_load_acquire(&p->on_cpu) &&
739f70b4 3454 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
c6e7bd7a
PZ
3455 goto unlock;
3456
e9c84311 3457 /*
d1ccc66d 3458 * If the owning (remote) CPU is still in the middle of schedule() with
b19a888c 3459 * this task as prev, wait until it's done referencing the task.
b75a2253 3460 *
31cb1bc0 3461 * Pairs with the smp_store_release() in finish_task().
b75a2253
PZ
3462 *
3463 * This ensures that tasks getting woken will be fully ordered against
3464 * their previous state and preserve Program Order.
0970d299 3465 */
1f03e8d2 3466 smp_cond_load_acquire(&p->on_cpu, !VAL);
1da177e4 3467
3aef1551 3468 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
f339b9dc 3469 if (task_cpu(p) != cpu) {
ec618b84
PZ
3470 if (p->in_iowait) {
3471 delayacct_blkio_end(p);
3472 atomic_dec(&task_rq(p)->nr_iowait);
3473 }
3474
f339b9dc 3475 wake_flags |= WF_MIGRATED;
eb414681 3476 psi_ttwu_dequeue(p);
e4a52bcb 3477 set_task_cpu(p, cpu);
f339b9dc 3478 }
b6e13e85
PZ
3479#else
3480 cpu = task_cpu(p);
1da177e4 3481#endif /* CONFIG_SMP */
1da177e4 3482
b5179ac7 3483 ttwu_queue(p, cpu, wake_flags);
aacedf26 3484unlock:
013fdb80 3485 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
aacedf26
PZ
3486out:
3487 if (success)
b6e13e85 3488 ttwu_stat(p, task_cpu(p), wake_flags);
e3d85487 3489 preempt_enable();
1da177e4
LT
3490
3491 return success;
3492}
3493
2beaf328
PM
3494/**
3495 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
1b7af295 3496 * @p: Process for which the function is to be invoked, can be @current.
2beaf328
PM
3497 * @func: Function to invoke.
3498 * @arg: Argument to function.
3499 *
3500 * If the specified task can be quickly locked into a definite state
3501 * (either sleeping or on a given runqueue), arrange to keep it in that
3502 * state while invoking @func(@arg). This function can use ->on_rq and
3503 * task_curr() to work out what the state is, if required. Given that
3504 * @func can be invoked with a runqueue lock held, it had better be quite
3505 * lightweight.
3506 *
3507 * Returns:
3508 * @false if the task slipped out from under the locks.
3509 * @true if the task was locked onto a runqueue or is sleeping.
3510 * However, @func can override this by returning @false.
3511 */
3512bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
3513{
2beaf328 3514 struct rq_flags rf;
1b7af295 3515 bool ret = false;
2beaf328
PM
3516 struct rq *rq;
3517
1b7af295 3518 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2beaf328
PM
3519 if (p->on_rq) {
3520 rq = __task_rq_lock(p, &rf);
3521 if (task_rq(p) == rq)
3522 ret = func(p, arg);
3523 rq_unlock(rq, &rf);
3524 } else {
3525 switch (p->state) {
3526 case TASK_RUNNING:
3527 case TASK_WAKING:
3528 break;
3529 default:
3530 smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
3531 if (!p->on_rq)
3532 ret = func(p, arg);
3533 }
3534 }
1b7af295 3535 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2beaf328
PM
3536 return ret;
3537}
3538
50fa610a
DH
3539/**
3540 * wake_up_process - Wake up a specific process
3541 * @p: The process to be woken up.
3542 *
3543 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
3544 * processes.
3545 *
3546 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a 3547 *
7696f991 3548 * This function executes a full memory barrier before accessing the task state.
50fa610a 3549 */
7ad5b3a5 3550int wake_up_process(struct task_struct *p)
1da177e4 3551{
9067ac85 3552 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 3553}
1da177e4
LT
3554EXPORT_SYMBOL(wake_up_process);
3555
7ad5b3a5 3556int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
3557{
3558 return try_to_wake_up(p, state, 0);
3559}
3560
1da177e4
LT
3561/*
3562 * Perform scheduler related setup for a newly forked process p.
3563 * p is forked by current.
dd41f596
IM
3564 *
3565 * __sched_fork() is basic setup used by init_idle() too:
3566 */
5e1576ed 3567static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 3568{
fd2f4419
PZ
3569 p->on_rq = 0;
3570
3571 p->se.on_rq = 0;
dd41f596
IM
3572 p->se.exec_start = 0;
3573 p->se.sum_exec_runtime = 0;
f6cf891c 3574 p->se.prev_sum_exec_runtime = 0;
6c594c21 3575 p->se.nr_migrations = 0;
da7a735e 3576 p->se.vruntime = 0;
fd2f4419 3577 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 3578
ad936d86
BP
3579#ifdef CONFIG_FAIR_GROUP_SCHED
3580 p->se.cfs_rq = NULL;
3581#endif
3582
6cfb0d5d 3583#ifdef CONFIG_SCHEDSTATS
cb251765 3584 /* Even if schedstat is disabled, there should not be garbage */
41acab88 3585 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 3586#endif
476d139c 3587
aab03e05 3588 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 3589 init_dl_task_timer(&p->dl);
209a0cbd 3590 init_dl_inactive_task_timer(&p->dl);
a5e7be3b 3591 __dl_clear_params(p);
aab03e05 3592
fa717060 3593 INIT_LIST_HEAD(&p->rt.run_list);
ff77e468
PZ
3594 p->rt.timeout = 0;
3595 p->rt.time_slice = sched_rr_timeslice;
3596 p->rt.on_rq = 0;
3597 p->rt.on_list = 0;
476d139c 3598
e107be36
AK
3599#ifdef CONFIG_PREEMPT_NOTIFIERS
3600 INIT_HLIST_HEAD(&p->preempt_notifiers);
3601#endif
cbee9f88 3602
5e1f0f09
MG
3603#ifdef CONFIG_COMPACTION
3604 p->capture_control = NULL;
3605#endif
13784475 3606 init_numa_balancing(clone_flags, p);
a1488664 3607#ifdef CONFIG_SMP
8c4890d1 3608 p->wake_entry.u_flags = CSD_TYPE_TTWU;
6d337eab 3609 p->migration_pending = NULL;
a1488664 3610#endif
dd41f596
IM
3611}
3612
2a595721
SD
3613DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
3614
1a687c2e 3615#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 3616
1a687c2e
MG
3617void set_numabalancing_state(bool enabled)
3618{
3619 if (enabled)
2a595721 3620 static_branch_enable(&sched_numa_balancing);
1a687c2e 3621 else
2a595721 3622 static_branch_disable(&sched_numa_balancing);
1a687c2e 3623}
54a43d54
AK
3624
3625#ifdef CONFIG_PROC_SYSCTL
3626int sysctl_numa_balancing(struct ctl_table *table, int write,
32927393 3627 void *buffer, size_t *lenp, loff_t *ppos)
54a43d54
AK
3628{
3629 struct ctl_table t;
3630 int err;
2a595721 3631 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
3632
3633 if (write && !capable(CAP_SYS_ADMIN))
3634 return -EPERM;
3635
3636 t = *table;
3637 t.data = &state;
3638 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3639 if (err < 0)
3640 return err;
3641 if (write)
3642 set_numabalancing_state(state);
3643 return err;
3644}
3645#endif
3646#endif
dd41f596 3647
4698f88c
JP
3648#ifdef CONFIG_SCHEDSTATS
3649
cb251765 3650DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4698f88c 3651static bool __initdata __sched_schedstats = false;
cb251765 3652
cb251765
MG
3653static void set_schedstats(bool enabled)
3654{
3655 if (enabled)
3656 static_branch_enable(&sched_schedstats);
3657 else
3658 static_branch_disable(&sched_schedstats);
3659}
3660
3661void force_schedstat_enabled(void)
3662{
3663 if (!schedstat_enabled()) {
3664 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
3665 static_branch_enable(&sched_schedstats);
3666 }
3667}
3668
3669static int __init setup_schedstats(char *str)
3670{
3671 int ret = 0;
3672 if (!str)
3673 goto out;
3674
4698f88c
JP
3675 /*
3676 * This code is called before jump labels have been set up, so we can't
3677 * change the static branch directly just yet. Instead set a temporary
3678 * variable so init_schedstats() can do it later.
3679 */
cb251765 3680 if (!strcmp(str, "enable")) {
4698f88c 3681 __sched_schedstats = true;
cb251765
MG
3682 ret = 1;
3683 } else if (!strcmp(str, "disable")) {
4698f88c 3684 __sched_schedstats = false;
cb251765
MG
3685 ret = 1;
3686 }
3687out:
3688 if (!ret)
3689 pr_warn("Unable to parse schedstats=\n");
3690
3691 return ret;
3692}
3693__setup("schedstats=", setup_schedstats);
3694
4698f88c
JP
3695static void __init init_schedstats(void)
3696{
3697 set_schedstats(__sched_schedstats);
3698}
3699
cb251765 3700#ifdef CONFIG_PROC_SYSCTL
32927393
CH
3701int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
3702 size_t *lenp, loff_t *ppos)
cb251765
MG
3703{
3704 struct ctl_table t;
3705 int err;
3706 int state = static_branch_likely(&sched_schedstats);
3707
3708 if (write && !capable(CAP_SYS_ADMIN))
3709 return -EPERM;
3710
3711 t = *table;
3712 t.data = &state;
3713 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3714 if (err < 0)
3715 return err;
3716 if (write)
3717 set_schedstats(state);
3718 return err;
3719}
4698f88c
JP
3720#endif /* CONFIG_PROC_SYSCTL */
3721#else /* !CONFIG_SCHEDSTATS */
3722static inline void init_schedstats(void) {}
3723#endif /* CONFIG_SCHEDSTATS */
dd41f596
IM
3724
3725/*
3726 * fork()/clone()-time setup:
3727 */
aab03e05 3728int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 3729{
0122ec5b 3730 unsigned long flags;
dd41f596 3731
5e1576ed 3732 __sched_fork(clone_flags, p);
06b83b5f 3733 /*
7dc603c9 3734 * We mark the process as NEW here. This guarantees that
06b83b5f
PZ
3735 * nobody will actually run it, and a signal or other external
3736 * event cannot wake it up and insert it on the runqueue either.
3737 */
7dc603c9 3738 p->state = TASK_NEW;
dd41f596 3739
c350a04e
MG
3740 /*
3741 * Make sure we do not leak PI boosting priority to the child.
3742 */
3743 p->prio = current->normal_prio;
3744
e8f14172
PB
3745 uclamp_fork(p);
3746
b9dc29e7
MG
3747 /*
3748 * Revert to default priority/policy on fork if requested.
3749 */
3750 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 3751 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 3752 p->policy = SCHED_NORMAL;
6c697bdf 3753 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
3754 p->rt_priority = 0;
3755 } else if (PRIO_TO_NICE(p->static_prio) < 0)
3756 p->static_prio = NICE_TO_PRIO(0);
3757
3758 p->prio = p->normal_prio = __normal_prio(p);
9059393e 3759 set_load_weight(p, false);
6c697bdf 3760
b9dc29e7
MG
3761 /*
3762 * We don't need the reset flag anymore after the fork. It has
3763 * fulfilled its duty:
3764 */
3765 p->sched_reset_on_fork = 0;
3766 }
ca94c442 3767
af0fffd9 3768 if (dl_prio(p->prio))
aab03e05 3769 return -EAGAIN;
af0fffd9 3770 else if (rt_prio(p->prio))
aab03e05 3771 p->sched_class = &rt_sched_class;
af0fffd9 3772 else
2ddbf952 3773 p->sched_class = &fair_sched_class;
b29739f9 3774
7dc603c9 3775 init_entity_runnable_average(&p->se);
cd29fe6f 3776
86951599
PZ
3777 /*
3778 * The child is not yet in the pid-hash so no cgroup attach races,
3779 * and the cgroup is pinned to this child due to cgroup_fork()
3780 * is ran before sched_fork().
3781 *
3782 * Silence PROVE_RCU.
3783 */
0122ec5b 3784 raw_spin_lock_irqsave(&p->pi_lock, flags);
ce3614da 3785 rseq_migrate(p);
e210bffd 3786 /*
d1ccc66d 3787 * We're setting the CPU for the first time, we don't migrate,
e210bffd
PZ
3788 * so use __set_task_cpu().
3789 */
af0fffd9 3790 __set_task_cpu(p, smp_processor_id());
e210bffd
PZ
3791 if (p->sched_class->task_fork)
3792 p->sched_class->task_fork(p);
0122ec5b 3793 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 3794
f6db8347 3795#ifdef CONFIG_SCHED_INFO
dd41f596 3796 if (likely(sched_info_on()))
52f17b6c 3797 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 3798#endif
3ca7a440
PZ
3799#if defined(CONFIG_SMP)
3800 p->on_cpu = 0;
4866cde0 3801#endif
01028747 3802 init_task_preempt_count(p);
806c09a7 3803#ifdef CONFIG_SMP
917b627d 3804 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 3805 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 3806#endif
aab03e05 3807 return 0;
1da177e4
LT
3808}
3809
13685c4a
QY
3810void sched_post_fork(struct task_struct *p)
3811{
3812 uclamp_post_fork(p);
3813}
3814
332ac17e
DF
3815unsigned long to_ratio(u64 period, u64 runtime)
3816{
3817 if (runtime == RUNTIME_INF)
c52f14d3 3818 return BW_UNIT;
332ac17e
DF
3819
3820 /*
3821 * Doing this here saves a lot of checks in all
3822 * the calling paths, and returning zero seems
3823 * safe for them anyway.
3824 */
3825 if (period == 0)
3826 return 0;
3827
c52f14d3 3828 return div64_u64(runtime << BW_SHIFT, period);
332ac17e
DF
3829}
3830
1da177e4
LT
3831/*
3832 * wake_up_new_task - wake up a newly created task for the first time.
3833 *
3834 * This function will do some initial scheduler statistics housekeeping
3835 * that must be done for every newly created context, then puts the task
3836 * on the runqueue and wakes it.
3837 */
3e51e3ed 3838void wake_up_new_task(struct task_struct *p)
1da177e4 3839{
eb580751 3840 struct rq_flags rf;
dd41f596 3841 struct rq *rq;
fabf318e 3842
eb580751 3843 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
7dc603c9 3844 p->state = TASK_RUNNING;
fabf318e
PZ
3845#ifdef CONFIG_SMP
3846 /*
3847 * Fork balancing, do it here and not earlier because:
3bd37062 3848 * - cpus_ptr can change in the fork path
d1ccc66d 3849 * - any previously selected CPU might disappear through hotplug
e210bffd
PZ
3850 *
3851 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
3852 * as we're not fully set-up yet.
fabf318e 3853 */
32e839dd 3854 p->recent_used_cpu = task_cpu(p);
ce3614da 3855 rseq_migrate(p);
3aef1551 3856 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
0017d735 3857#endif
b7fa30c9 3858 rq = __task_rq_lock(p, &rf);
4126bad6 3859 update_rq_clock(rq);
d0fe0b9c 3860 post_init_entity_util_avg(p);
0017d735 3861
7a57f32a 3862 activate_task(rq, p, ENQUEUE_NOCLOCK);
fbd705a0 3863 trace_sched_wakeup_new(p);
a7558e01 3864 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 3865#ifdef CONFIG_SMP
0aaafaab
PZ
3866 if (p->sched_class->task_woken) {
3867 /*
b19a888c 3868 * Nothing relies on rq->lock after this, so it's fine to
0aaafaab
PZ
3869 * drop it.
3870 */
d8ac8971 3871 rq_unpin_lock(rq, &rf);
efbbd05a 3872 p->sched_class->task_woken(rq, p);
d8ac8971 3873 rq_repin_lock(rq, &rf);
0aaafaab 3874 }
9a897c5a 3875#endif
eb580751 3876 task_rq_unlock(rq, p, &rf);
1da177e4
LT
3877}
3878
e107be36
AK
3879#ifdef CONFIG_PREEMPT_NOTIFIERS
3880
b7203428 3881static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
1cde2930 3882
2ecd9d29
PZ
3883void preempt_notifier_inc(void)
3884{
b7203428 3885 static_branch_inc(&preempt_notifier_key);
2ecd9d29
PZ
3886}
3887EXPORT_SYMBOL_GPL(preempt_notifier_inc);
3888
3889void preempt_notifier_dec(void)
3890{
b7203428 3891 static_branch_dec(&preempt_notifier_key);
2ecd9d29
PZ
3892}
3893EXPORT_SYMBOL_GPL(preempt_notifier_dec);
3894
e107be36 3895/**
80dd99b3 3896 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 3897 * @notifier: notifier struct to register
e107be36
AK
3898 */
3899void preempt_notifier_register(struct preempt_notifier *notifier)
3900{
b7203428 3901 if (!static_branch_unlikely(&preempt_notifier_key))
2ecd9d29
PZ
3902 WARN(1, "registering preempt_notifier while notifiers disabled\n");
3903
e107be36
AK
3904 hlist_add_head(&notifier->link, &current->preempt_notifiers);
3905}
3906EXPORT_SYMBOL_GPL(preempt_notifier_register);
3907
3908/**
3909 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 3910 * @notifier: notifier struct to unregister
e107be36 3911 *
d84525a8 3912 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
3913 */
3914void preempt_notifier_unregister(struct preempt_notifier *notifier)
3915{
3916 hlist_del(&notifier->link);
3917}
3918EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3919
1cde2930 3920static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3921{
3922 struct preempt_notifier *notifier;
e107be36 3923
b67bfe0d 3924 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3925 notifier->ops->sched_in(notifier, raw_smp_processor_id());
3926}
3927
1cde2930
PZ
3928static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3929{
b7203428 3930 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3931 __fire_sched_in_preempt_notifiers(curr);
3932}
3933
e107be36 3934static void
1cde2930
PZ
3935__fire_sched_out_preempt_notifiers(struct task_struct *curr,
3936 struct task_struct *next)
e107be36
AK
3937{
3938 struct preempt_notifier *notifier;
e107be36 3939
b67bfe0d 3940 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3941 notifier->ops->sched_out(notifier, next);
3942}
3943
1cde2930
PZ
3944static __always_inline void
3945fire_sched_out_preempt_notifiers(struct task_struct *curr,
3946 struct task_struct *next)
3947{
b7203428 3948 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3949 __fire_sched_out_preempt_notifiers(curr, next);
3950}
3951
6d6bc0ad 3952#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 3953
1cde2930 3954static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3955{
3956}
3957
1cde2930 3958static inline void
e107be36
AK
3959fire_sched_out_preempt_notifiers(struct task_struct *curr,
3960 struct task_struct *next)
3961{
3962}
3963
6d6bc0ad 3964#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 3965
31cb1bc0 3966static inline void prepare_task(struct task_struct *next)
3967{
3968#ifdef CONFIG_SMP
3969 /*
3970 * Claim the task as running, we do this before switching to it
3971 * such that any running task will have this set.
58877d34
PZ
3972 *
3973 * See the ttwu() WF_ON_CPU case and its ordering comment.
31cb1bc0 3974 */
58877d34 3975 WRITE_ONCE(next->on_cpu, 1);
31cb1bc0 3976#endif
3977}
3978
3979static inline void finish_task(struct task_struct *prev)
3980{
3981#ifdef CONFIG_SMP
3982 /*
58877d34
PZ
3983 * This must be the very last reference to @prev from this CPU. After
3984 * p->on_cpu is cleared, the task can be moved to a different CPU. We
3985 * must ensure this doesn't happen until the switch is completely
31cb1bc0 3986 * finished.
3987 *
3988 * In particular, the load of prev->state in finish_task_switch() must
3989 * happen before this.
3990 *
3991 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
3992 */
3993 smp_store_release(&prev->on_cpu, 0);
3994#endif
3995}
3996
565790d2
PZ
3997#ifdef CONFIG_SMP
3998
3999static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
4000{
4001 void (*func)(struct rq *rq);
4002 struct callback_head *next;
4003
4004 lockdep_assert_held(&rq->lock);
4005
4006 while (head) {
4007 func = (void (*)(struct rq *))head->func;
4008 next = head->next;
4009 head->next = NULL;
4010 head = next;
4011
4012 func(rq);
4013 }
4014}
4015
ae792702
PZ
4016static void balance_push(struct rq *rq);
4017
4018struct callback_head balance_push_callback = {
4019 .next = NULL,
4020 .func = (void (*)(struct callback_head *))balance_push,
4021};
4022
565790d2
PZ
4023static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4024{
4025 struct callback_head *head = rq->balance_callback;
4026
4027 lockdep_assert_held(&rq->lock);
ae792702 4028 if (head)
565790d2
PZ
4029 rq->balance_callback = NULL;
4030
4031 return head;
4032}
4033
4034static void __balance_callbacks(struct rq *rq)
4035{
4036 do_balance_callbacks(rq, splice_balance_callbacks(rq));
4037}
4038
4039static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4040{
4041 unsigned long flags;
4042
4043 if (unlikely(head)) {
4044 raw_spin_lock_irqsave(&rq->lock, flags);
4045 do_balance_callbacks(rq, head);
4046 raw_spin_unlock_irqrestore(&rq->lock, flags);
4047 }
4048}
4049
4050#else
4051
4052static inline void __balance_callbacks(struct rq *rq)
4053{
4054}
4055
4056static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4057{
4058 return NULL;
4059}
4060
4061static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4062{
4063}
4064
4065#endif
4066
269d5992
PZ
4067static inline void
4068prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
31cb1bc0 4069{
269d5992
PZ
4070 /*
4071 * Since the runqueue lock will be released by the next
4072 * task (which is an invalid locking op but in the case
4073 * of the scheduler it's an obvious special-case), so we
4074 * do an early lockdep release here:
4075 */
4076 rq_unpin_lock(rq, rf);
5facae4f 4077 spin_release(&rq->lock.dep_map, _THIS_IP_);
31cb1bc0 4078#ifdef CONFIG_DEBUG_SPINLOCK
4079 /* this is a valid case when another task releases the spinlock */
269d5992 4080 rq->lock.owner = next;
31cb1bc0 4081#endif
269d5992
PZ
4082}
4083
4084static inline void finish_lock_switch(struct rq *rq)
4085{
31cb1bc0 4086 /*
4087 * If we are tracking spinlock dependencies then we have to
4088 * fix up the runqueue lock - which gets 'carried over' from
4089 * prev into current:
4090 */
4091 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
ae792702 4092 __balance_callbacks(rq);
31cb1bc0 4093 raw_spin_unlock_irq(&rq->lock);
4094}
4095
325ea10c
IM
4096/*
4097 * NOP if the arch has not defined these:
4098 */
4099
4100#ifndef prepare_arch_switch
4101# define prepare_arch_switch(next) do { } while (0)
4102#endif
4103
4104#ifndef finish_arch_post_lock_switch
4105# define finish_arch_post_lock_switch() do { } while (0)
4106#endif
4107
5fbda3ec
TG
4108static inline void kmap_local_sched_out(void)
4109{
4110#ifdef CONFIG_KMAP_LOCAL
4111 if (unlikely(current->kmap_ctrl.idx))
4112 __kmap_local_sched_out();
4113#endif
4114}
4115
4116static inline void kmap_local_sched_in(void)
4117{
4118#ifdef CONFIG_KMAP_LOCAL
4119 if (unlikely(current->kmap_ctrl.idx))
4120 __kmap_local_sched_in();
4121#endif
4122}
4123
4866cde0
NP
4124/**
4125 * prepare_task_switch - prepare to switch tasks
4126 * @rq: the runqueue preparing to switch
421cee29 4127 * @prev: the current task that is being switched out
4866cde0
NP
4128 * @next: the task we are going to switch to.
4129 *
4130 * This is called with the rq lock held and interrupts off. It must
4131 * be paired with a subsequent finish_task_switch after the context
4132 * switch.
4133 *
4134 * prepare_task_switch sets up locking and calls architecture specific
4135 * hooks.
4136 */
e107be36
AK
4137static inline void
4138prepare_task_switch(struct rq *rq, struct task_struct *prev,
4139 struct task_struct *next)
4866cde0 4140{
0ed557aa 4141 kcov_prepare_switch(prev);
43148951 4142 sched_info_switch(rq, prev, next);
fe4b04fa 4143 perf_event_task_sched_out(prev, next);
d7822b1e 4144 rseq_preempt(prev);
e107be36 4145 fire_sched_out_preempt_notifiers(prev, next);
5fbda3ec 4146 kmap_local_sched_out();
31cb1bc0 4147 prepare_task(next);
4866cde0
NP
4148 prepare_arch_switch(next);
4149}
4150
1da177e4
LT
4151/**
4152 * finish_task_switch - clean up after a task-switch
4153 * @prev: the thread we just switched away from.
4154 *
4866cde0
NP
4155 * finish_task_switch must be called after the context switch, paired
4156 * with a prepare_task_switch call before the context switch.
4157 * finish_task_switch will reconcile locking set up by prepare_task_switch,
4158 * and do any other architecture-specific cleanup actions.
1da177e4
LT
4159 *
4160 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 4161 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
4162 * with the lock held can cause deadlocks; see schedule() for
4163 * details.)
dfa50b60
ON
4164 *
4165 * The context switch have flipped the stack from under us and restored the
4166 * local variables which were saved when this task called schedule() in the
4167 * past. prev == current is still correct but we need to recalculate this_rq
4168 * because prev may have moved to another CPU.
1da177e4 4169 */
dfa50b60 4170static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
4171 __releases(rq->lock)
4172{
dfa50b60 4173 struct rq *rq = this_rq();
1da177e4 4174 struct mm_struct *mm = rq->prev_mm;
55a101f8 4175 long prev_state;
1da177e4 4176
609ca066
PZ
4177 /*
4178 * The previous task will have left us with a preempt_count of 2
4179 * because it left us after:
4180 *
4181 * schedule()
4182 * preempt_disable(); // 1
4183 * __schedule()
4184 * raw_spin_lock_irq(&rq->lock) // 2
4185 *
4186 * Also, see FORK_PREEMPT_COUNT.
4187 */
e2bf1c4b
PZ
4188 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
4189 "corrupted preempt_count: %s/%d/0x%x\n",
4190 current->comm, current->pid, preempt_count()))
4191 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 4192
1da177e4
LT
4193 rq->prev_mm = NULL;
4194
4195 /*
4196 * A task struct has one reference for the use as "current".
c394cc9f 4197 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
4198 * schedule one last time. The schedule call will never return, and
4199 * the scheduled task must drop that reference.
95913d97
PZ
4200 *
4201 * We must observe prev->state before clearing prev->on_cpu (in
31cb1bc0 4202 * finish_task), otherwise a concurrent wakeup can get prev
95913d97
PZ
4203 * running on another CPU and we could rave with its RUNNING -> DEAD
4204 * transition, resulting in a double drop.
1da177e4 4205 */
55a101f8 4206 prev_state = prev->state;
bf9fae9f 4207 vtime_task_switch(prev);
a8d757ef 4208 perf_event_task_sched_in(prev, current);
31cb1bc0 4209 finish_task(prev);
4210 finish_lock_switch(rq);
01f23e16 4211 finish_arch_post_lock_switch();
0ed557aa 4212 kcov_finish_switch(current);
5fbda3ec
TG
4213 /*
4214 * kmap_local_sched_out() is invoked with rq::lock held and
4215 * interrupts disabled. There is no requirement for that, but the
4216 * sched out code does not have an interrupt enabled section.
4217 * Restoring the maps on sched in does not require interrupts being
4218 * disabled either.
4219 */
4220 kmap_local_sched_in();
e8fa1362 4221
e107be36 4222 fire_sched_in_preempt_notifiers(current);
306e0604 4223 /*
70216e18
MD
4224 * When switching through a kernel thread, the loop in
4225 * membarrier_{private,global}_expedited() may have observed that
4226 * kernel thread and not issued an IPI. It is therefore possible to
4227 * schedule between user->kernel->user threads without passing though
4228 * switch_mm(). Membarrier requires a barrier after storing to
4229 * rq->curr, before returning to userspace, so provide them here:
4230 *
4231 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
4232 * provided by mmdrop(),
4233 * - a sync_core for SYNC_CORE.
306e0604 4234 */
70216e18
MD
4235 if (mm) {
4236 membarrier_mm_sync_core_before_usermode(mm);
1da177e4 4237 mmdrop(mm);
70216e18 4238 }
1cef1150
PZ
4239 if (unlikely(prev_state == TASK_DEAD)) {
4240 if (prev->sched_class->task_dead)
4241 prev->sched_class->task_dead(prev);
68f24b08 4242
1cef1150
PZ
4243 /*
4244 * Remove function-return probe instances associated with this
4245 * task and put them back on the free list.
4246 */
4247 kprobe_flush_task(prev);
4248
4249 /* Task is done with its stack. */
4250 put_task_stack(prev);
4251
0ff7b2cf 4252 put_task_struct_rcu_user(prev);
c6fd91f0 4253 }
99e5ada9 4254
de734f89 4255 tick_nohz_task_switch();
dfa50b60 4256 return rq;
1da177e4
LT
4257}
4258
4259/**
4260 * schedule_tail - first thing a freshly forked thread must call.
4261 * @prev: the thread we just switched away from.
4262 */
722a9f92 4263asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
4264 __releases(rq->lock)
4265{
1a43a14a 4266 struct rq *rq;
da19ab51 4267
609ca066
PZ
4268 /*
4269 * New tasks start with FORK_PREEMPT_COUNT, see there and
4270 * finish_task_switch() for details.
4271 *
4272 * finish_task_switch() will drop rq->lock() and lower preempt_count
4273 * and the preempt_enable() will end up enabling preemption (on
4274 * PREEMPT_COUNT kernels).
4275 */
4276
dfa50b60 4277 rq = finish_task_switch(prev);
1a43a14a 4278 preempt_enable();
70b97a7f 4279
1da177e4 4280 if (current->set_child_tid)
b488893a 4281 put_user(task_pid_vnr(current), current->set_child_tid);
088fe47c
EB
4282
4283 calculate_sigpending();
1da177e4
LT
4284}
4285
4286/*
dfa50b60 4287 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 4288 */
04936948 4289static __always_inline struct rq *
70b97a7f 4290context_switch(struct rq *rq, struct task_struct *prev,
d8ac8971 4291 struct task_struct *next, struct rq_flags *rf)
1da177e4 4292{
e107be36 4293 prepare_task_switch(rq, prev, next);
fe4b04fa 4294
9226d125
ZA
4295 /*
4296 * For paravirt, this is coupled with an exit in switch_to to
4297 * combine the page table reload and the switch backend into
4298 * one hypercall.
4299 */
224101ed 4300 arch_start_context_switch(prev);
9226d125 4301
306e0604 4302 /*
139d025c
PZ
4303 * kernel -> kernel lazy + transfer active
4304 * user -> kernel lazy + mmgrab() active
4305 *
4306 * kernel -> user switch + mmdrop() active
4307 * user -> user switch
306e0604 4308 */
139d025c
PZ
4309 if (!next->mm) { // to kernel
4310 enter_lazy_tlb(prev->active_mm, next);
4311
4312 next->active_mm = prev->active_mm;
4313 if (prev->mm) // from user
4314 mmgrab(prev->active_mm);
4315 else
4316 prev->active_mm = NULL;
4317 } else { // to user
227a4aad 4318 membarrier_switch_mm(rq, prev->active_mm, next->mm);
139d025c
PZ
4319 /*
4320 * sys_membarrier() requires an smp_mb() between setting
227a4aad 4321 * rq->curr / membarrier_switch_mm() and returning to userspace.
139d025c
PZ
4322 *
4323 * The below provides this either through switch_mm(), or in
4324 * case 'prev->active_mm == next->mm' through
4325 * finish_task_switch()'s mmdrop().
4326 */
139d025c 4327 switch_mm_irqs_off(prev->active_mm, next->mm, next);
1da177e4 4328
139d025c
PZ
4329 if (!prev->mm) { // from kernel
4330 /* will mmdrop() in finish_task_switch(). */
4331 rq->prev_mm = prev->active_mm;
4332 prev->active_mm = NULL;
4333 }
1da177e4 4334 }
92509b73 4335
cb42c9a3 4336 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
92509b73 4337
269d5992 4338 prepare_lock_switch(rq, next, rf);
1da177e4
LT
4339
4340 /* Here we just switch the register state and the stack. */
4341 switch_to(prev, next, prev);
dd41f596 4342 barrier();
dfa50b60
ON
4343
4344 return finish_task_switch(prev);
1da177e4
LT
4345}
4346
4347/*
1c3e8264 4348 * nr_running and nr_context_switches:
1da177e4
LT
4349 *
4350 * externally visible scheduler statistics: current number of runnable
1c3e8264 4351 * threads, total number of context switches performed since bootup.
1da177e4
LT
4352 */
4353unsigned long nr_running(void)
4354{
4355 unsigned long i, sum = 0;
4356
4357 for_each_online_cpu(i)
4358 sum += cpu_rq(i)->nr_running;
4359
4360 return sum;
f711f609 4361}
1da177e4 4362
2ee507c4 4363/*
d1ccc66d 4364 * Check if only the current task is running on the CPU.
00cc1633
DD
4365 *
4366 * Caution: this function does not check that the caller has disabled
4367 * preemption, thus the result might have a time-of-check-to-time-of-use
4368 * race. The caller is responsible to use it correctly, for example:
4369 *
dfcb245e 4370 * - from a non-preemptible section (of course)
00cc1633
DD
4371 *
4372 * - from a thread that is bound to a single CPU
4373 *
4374 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
4375 */
4376bool single_task_running(void)
4377{
00cc1633 4378 return raw_rq()->nr_running == 1;
2ee507c4
TC
4379}
4380EXPORT_SYMBOL(single_task_running);
4381
1da177e4 4382unsigned long long nr_context_switches(void)
46cb4b7c 4383{
cc94abfc
SR
4384 int i;
4385 unsigned long long sum = 0;
46cb4b7c 4386
0a945022 4387 for_each_possible_cpu(i)
1da177e4 4388 sum += cpu_rq(i)->nr_switches;
46cb4b7c 4389
1da177e4
LT
4390 return sum;
4391}
483b4ee6 4392
145d952a
DL
4393/*
4394 * Consumers of these two interfaces, like for example the cpuidle menu
4395 * governor, are using nonsensical data. Preferring shallow idle state selection
4396 * for a CPU that has IO-wait which might not even end up running the task when
4397 * it does become runnable.
4398 */
4399
4400unsigned long nr_iowait_cpu(int cpu)
4401{
4402 return atomic_read(&cpu_rq(cpu)->nr_iowait);
4403}
4404
e33a9bba 4405/*
b19a888c 4406 * IO-wait accounting, and how it's mostly bollocks (on SMP).
e33a9bba
TH
4407 *
4408 * The idea behind IO-wait account is to account the idle time that we could
4409 * have spend running if it were not for IO. That is, if we were to improve the
4410 * storage performance, we'd have a proportional reduction in IO-wait time.
4411 *
4412 * This all works nicely on UP, where, when a task blocks on IO, we account
4413 * idle time as IO-wait, because if the storage were faster, it could've been
4414 * running and we'd not be idle.
4415 *
4416 * This has been extended to SMP, by doing the same for each CPU. This however
4417 * is broken.
4418 *
4419 * Imagine for instance the case where two tasks block on one CPU, only the one
4420 * CPU will have IO-wait accounted, while the other has regular idle. Even
4421 * though, if the storage were faster, both could've ran at the same time,
4422 * utilising both CPUs.
4423 *
4424 * This means, that when looking globally, the current IO-wait accounting on
4425 * SMP is a lower bound, by reason of under accounting.
4426 *
4427 * Worse, since the numbers are provided per CPU, they are sometimes
4428 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
4429 * associated with any one particular CPU, it can wake to another CPU than it
4430 * blocked on. This means the per CPU IO-wait number is meaningless.
4431 *
4432 * Task CPU affinities can make all that even more 'interesting'.
4433 */
4434
1da177e4
LT
4435unsigned long nr_iowait(void)
4436{
4437 unsigned long i, sum = 0;
483b4ee6 4438
0a945022 4439 for_each_possible_cpu(i)
145d952a 4440 sum += nr_iowait_cpu(i);
46cb4b7c 4441
1da177e4
LT
4442 return sum;
4443}
483b4ee6 4444
dd41f596 4445#ifdef CONFIG_SMP
8a0be9ef 4446
46cb4b7c 4447/*
38022906
PZ
4448 * sched_exec - execve() is a valuable balancing opportunity, because at
4449 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 4450 */
38022906 4451void sched_exec(void)
46cb4b7c 4452{
38022906 4453 struct task_struct *p = current;
1da177e4 4454 unsigned long flags;
0017d735 4455 int dest_cpu;
46cb4b7c 4456
8f42ced9 4457 raw_spin_lock_irqsave(&p->pi_lock, flags);
3aef1551 4458 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
0017d735
PZ
4459 if (dest_cpu == smp_processor_id())
4460 goto unlock;
38022906 4461
8f42ced9 4462 if (likely(cpu_active(dest_cpu))) {
969c7921 4463 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 4464
8f42ced9
PZ
4465 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4466 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
4467 return;
4468 }
0017d735 4469unlock:
8f42ced9 4470 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 4471}
dd41f596 4472
1da177e4
LT
4473#endif
4474
1da177e4 4475DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 4476DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
4477
4478EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 4479EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 4480
6075620b
GG
4481/*
4482 * The function fair_sched_class.update_curr accesses the struct curr
4483 * and its field curr->exec_start; when called from task_sched_runtime(),
4484 * we observe a high rate of cache misses in practice.
4485 * Prefetching this data results in improved performance.
4486 */
4487static inline void prefetch_curr_exec_start(struct task_struct *p)
4488{
4489#ifdef CONFIG_FAIR_GROUP_SCHED
4490 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
4491#else
4492 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
4493#endif
4494 prefetch(curr);
4495 prefetch(&curr->exec_start);
4496}
4497
c5f8d995
HS
4498/*
4499 * Return accounted runtime for the task.
4500 * In case the task is currently running, return the runtime plus current's
4501 * pending runtime that have not been accounted yet.
4502 */
4503unsigned long long task_sched_runtime(struct task_struct *p)
4504{
eb580751 4505 struct rq_flags rf;
c5f8d995 4506 struct rq *rq;
6e998916 4507 u64 ns;
c5f8d995 4508
911b2898
PZ
4509#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
4510 /*
97fb7a0a 4511 * 64-bit doesn't need locks to atomically read a 64-bit value.
911b2898
PZ
4512 * So we have a optimization chance when the task's delta_exec is 0.
4513 * Reading ->on_cpu is racy, but this is ok.
4514 *
d1ccc66d
IM
4515 * If we race with it leaving CPU, we'll take a lock. So we're correct.
4516 * If we race with it entering CPU, unaccounted time is 0. This is
911b2898 4517 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
4518 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
4519 * been accounted, so we're correct here as well.
911b2898 4520 */
da0c1e65 4521 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
4522 return p->se.sum_exec_runtime;
4523#endif
4524
eb580751 4525 rq = task_rq_lock(p, &rf);
6e998916
SG
4526 /*
4527 * Must be ->curr _and_ ->on_rq. If dequeued, we would
4528 * project cycles that may never be accounted to this
4529 * thread, breaking clock_gettime().
4530 */
4531 if (task_current(rq, p) && task_on_rq_queued(p)) {
6075620b 4532 prefetch_curr_exec_start(p);
6e998916
SG
4533 update_rq_clock(rq);
4534 p->sched_class->update_curr(rq);
4535 }
4536 ns = p->se.sum_exec_runtime;
eb580751 4537 task_rq_unlock(rq, p, &rf);
c5f8d995
HS
4538
4539 return ns;
4540}
48f24c4d 4541
7835b98b
CL
4542/*
4543 * This function gets called by the timer code, with HZ frequency.
4544 * We call it with interrupts disabled.
7835b98b
CL
4545 */
4546void scheduler_tick(void)
4547{
7835b98b
CL
4548 int cpu = smp_processor_id();
4549 struct rq *rq = cpu_rq(cpu);
dd41f596 4550 struct task_struct *curr = rq->curr;
8a8c69c3 4551 struct rq_flags rf;
b4eccf5f 4552 unsigned long thermal_pressure;
3e51f33f 4553
1567c3e3 4554 arch_scale_freq_tick();
3e51f33f 4555 sched_clock_tick();
dd41f596 4556
8a8c69c3
PZ
4557 rq_lock(rq, &rf);
4558
3e51f33f 4559 update_rq_clock(rq);
b4eccf5f 4560 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
05289b90 4561 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
fa85ae24 4562 curr->sched_class->task_tick(rq, curr, 0);
3289bdb4 4563 calc_global_load_tick(rq);
eb414681 4564 psi_task_tick(rq);
8a8c69c3
PZ
4565
4566 rq_unlock(rq, &rf);
7835b98b 4567
e9d2b064 4568 perf_event_task_tick();
e220d2dc 4569
e418e1c2 4570#ifdef CONFIG_SMP
6eb57e0d 4571 rq->idle_balance = idle_cpu(cpu);
7caff66f 4572 trigger_load_balance(rq);
e418e1c2 4573#endif
1da177e4
LT
4574}
4575
265f22a9 4576#ifdef CONFIG_NO_HZ_FULL
d84b3131
FW
4577
4578struct tick_work {
4579 int cpu;
b55bd585 4580 atomic_t state;
d84b3131
FW
4581 struct delayed_work work;
4582};
b55bd585
PM
4583/* Values for ->state, see diagram below. */
4584#define TICK_SCHED_REMOTE_OFFLINE 0
4585#define TICK_SCHED_REMOTE_OFFLINING 1
4586#define TICK_SCHED_REMOTE_RUNNING 2
4587
4588/*
4589 * State diagram for ->state:
4590 *
4591 *
4592 * TICK_SCHED_REMOTE_OFFLINE
4593 * | ^
4594 * | |
4595 * | | sched_tick_remote()
4596 * | |
4597 * | |
4598 * +--TICK_SCHED_REMOTE_OFFLINING
4599 * | ^
4600 * | |
4601 * sched_tick_start() | | sched_tick_stop()
4602 * | |
4603 * V |
4604 * TICK_SCHED_REMOTE_RUNNING
4605 *
4606 *
4607 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
4608 * and sched_tick_start() are happy to leave the state in RUNNING.
4609 */
d84b3131
FW
4610
4611static struct tick_work __percpu *tick_work_cpu;
4612
4613static void sched_tick_remote(struct work_struct *work)
4614{
4615 struct delayed_work *dwork = to_delayed_work(work);
4616 struct tick_work *twork = container_of(dwork, struct tick_work, work);
4617 int cpu = twork->cpu;
4618 struct rq *rq = cpu_rq(cpu);
d9c0ffca 4619 struct task_struct *curr;
d84b3131 4620 struct rq_flags rf;
d9c0ffca 4621 u64 delta;
b55bd585 4622 int os;
d84b3131
FW
4623
4624 /*
4625 * Handle the tick only if it appears the remote CPU is running in full
4626 * dynticks mode. The check is racy by nature, but missing a tick or
4627 * having one too much is no big deal because the scheduler tick updates
4628 * statistics and checks timeslices in a time-independent way, regardless
4629 * of when exactly it is running.
4630 */
488603b8 4631 if (!tick_nohz_tick_stopped_cpu(cpu))
d9c0ffca 4632 goto out_requeue;
d84b3131 4633
d9c0ffca
FW
4634 rq_lock_irq(rq, &rf);
4635 curr = rq->curr;
488603b8 4636 if (cpu_is_offline(cpu))
d9c0ffca 4637 goto out_unlock;
d84b3131 4638
d9c0ffca 4639 update_rq_clock(rq);
d9c0ffca 4640
488603b8
SW
4641 if (!is_idle_task(curr)) {
4642 /*
4643 * Make sure the next tick runs within a reasonable
4644 * amount of time.
4645 */
4646 delta = rq_clock_task(rq) - curr->se.exec_start;
4647 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
4648 }
d9c0ffca
FW
4649 curr->sched_class->task_tick(rq, curr, 0);
4650
ebc0f83c 4651 calc_load_nohz_remote(rq);
d9c0ffca
FW
4652out_unlock:
4653 rq_unlock_irq(rq, &rf);
d9c0ffca 4654out_requeue:
ebc0f83c 4655
d84b3131
FW
4656 /*
4657 * Run the remote tick once per second (1Hz). This arbitrary
4658 * frequency is large enough to avoid overload but short enough
b55bd585
PM
4659 * to keep scheduler internal stats reasonably up to date. But
4660 * first update state to reflect hotplug activity if required.
d84b3131 4661 */
b55bd585
PM
4662 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
4663 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
4664 if (os == TICK_SCHED_REMOTE_RUNNING)
4665 queue_delayed_work(system_unbound_wq, dwork, HZ);
d84b3131
FW
4666}
4667
4668static void sched_tick_start(int cpu)
4669{
b55bd585 4670 int os;
d84b3131
FW
4671 struct tick_work *twork;
4672
4673 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4674 return;
4675
4676 WARN_ON_ONCE(!tick_work_cpu);
4677
4678 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
4679 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
4680 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
4681 if (os == TICK_SCHED_REMOTE_OFFLINE) {
4682 twork->cpu = cpu;
4683 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
4684 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
4685 }
d84b3131
FW
4686}
4687
4688#ifdef CONFIG_HOTPLUG_CPU
4689static void sched_tick_stop(int cpu)
4690{
4691 struct tick_work *twork;
b55bd585 4692 int os;
d84b3131
FW
4693
4694 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4695 return;
4696
4697 WARN_ON_ONCE(!tick_work_cpu);
4698
4699 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
4700 /* There cannot be competing actions, but don't rely on stop-machine. */
4701 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
4702 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
4703 /* Don't cancel, as this would mess up the state machine. */
d84b3131
FW
4704}
4705#endif /* CONFIG_HOTPLUG_CPU */
4706
4707int __init sched_tick_offload_init(void)
4708{
4709 tick_work_cpu = alloc_percpu(struct tick_work);
4710 BUG_ON(!tick_work_cpu);
d84b3131
FW
4711 return 0;
4712}
4713
4714#else /* !CONFIG_NO_HZ_FULL */
4715static inline void sched_tick_start(int cpu) { }
4716static inline void sched_tick_stop(int cpu) { }
265f22a9 4717#endif
1da177e4 4718
c1a280b6 4719#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
c3bc8fd6 4720 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
47252cfb
SR
4721/*
4722 * If the value passed in is equal to the current preempt count
4723 * then we just disabled preemption. Start timing the latency.
4724 */
4725static inline void preempt_latency_start(int val)
4726{
4727 if (preempt_count() == val) {
4728 unsigned long ip = get_lock_parent_ip();
4729#ifdef CONFIG_DEBUG_PREEMPT
4730 current->preempt_disable_ip = ip;
4731#endif
4732 trace_preempt_off(CALLER_ADDR0, ip);
4733 }
4734}
7e49fcce 4735
edafe3a5 4736void preempt_count_add(int val)
1da177e4 4737{
6cd8a4bb 4738#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4739 /*
4740 * Underflow?
4741 */
9a11b49a
IM
4742 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4743 return;
6cd8a4bb 4744#endif
bdb43806 4745 __preempt_count_add(val);
6cd8a4bb 4746#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4747 /*
4748 * Spinlock count overflowing soon?
4749 */
33859f7f
MOS
4750 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4751 PREEMPT_MASK - 10);
6cd8a4bb 4752#endif
47252cfb 4753 preempt_latency_start(val);
1da177e4 4754}
bdb43806 4755EXPORT_SYMBOL(preempt_count_add);
edafe3a5 4756NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 4757
47252cfb
SR
4758/*
4759 * If the value passed in equals to the current preempt count
4760 * then we just enabled preemption. Stop timing the latency.
4761 */
4762static inline void preempt_latency_stop(int val)
4763{
4764 if (preempt_count() == val)
4765 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
4766}
4767
edafe3a5 4768void preempt_count_sub(int val)
1da177e4 4769{
6cd8a4bb 4770#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4771 /*
4772 * Underflow?
4773 */
01e3eb82 4774 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 4775 return;
1da177e4
LT
4776 /*
4777 * Is the spinlock portion underflowing?
4778 */
9a11b49a
IM
4779 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4780 !(preempt_count() & PREEMPT_MASK)))
4781 return;
6cd8a4bb 4782#endif
9a11b49a 4783
47252cfb 4784 preempt_latency_stop(val);
bdb43806 4785 __preempt_count_sub(val);
1da177e4 4786}
bdb43806 4787EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 4788NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4 4789
47252cfb
SR
4790#else
4791static inline void preempt_latency_start(int val) { }
4792static inline void preempt_latency_stop(int val) { }
1da177e4
LT
4793#endif
4794
59ddbcb2
IM
4795static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
4796{
4797#ifdef CONFIG_DEBUG_PREEMPT
4798 return p->preempt_disable_ip;
4799#else
4800 return 0;
4801#endif
4802}
4803
1da177e4 4804/*
dd41f596 4805 * Print scheduling while atomic bug:
1da177e4 4806 */
dd41f596 4807static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 4808{
d1c6d149
VN
4809 /* Save this before calling printk(), since that will clobber it */
4810 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
4811
664dfa65
DJ
4812 if (oops_in_progress)
4813 return;
4814
3df0fc5b
PZ
4815 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4816 prev->comm, prev->pid, preempt_count());
838225b4 4817
dd41f596 4818 debug_show_held_locks(prev);
e21f5b15 4819 print_modules();
dd41f596
IM
4820 if (irqs_disabled())
4821 print_irqtrace_events(prev);
d1c6d149
VN
4822 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
4823 && in_atomic_preempt_off()) {
8f47b187 4824 pr_err("Preemption disabled at:");
2062a4e8 4825 print_ip_sym(KERN_ERR, preempt_disable_ip);
8f47b187 4826 }
748c7201
DBO
4827 if (panic_on_warn)
4828 panic("scheduling while atomic\n");
4829
6135fc1e 4830 dump_stack();
373d4d09 4831 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 4832}
1da177e4 4833
dd41f596
IM
4834/*
4835 * Various schedule()-time debugging checks and statistics:
4836 */
312364f3 4837static inline void schedule_debug(struct task_struct *prev, bool preempt)
dd41f596 4838{
0d9e2632 4839#ifdef CONFIG_SCHED_STACK_END_CHECK
29d64551
JH
4840 if (task_stack_end_corrupted(prev))
4841 panic("corrupted stack end detected inside scheduler\n");
88485be5
WD
4842
4843 if (task_scs_end_corrupted(prev))
4844 panic("corrupted shadow stack detected inside scheduler\n");
0d9e2632 4845#endif
b99def8b 4846
312364f3
DV
4847#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
4848 if (!preempt && prev->state && prev->non_block_count) {
4849 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
4850 prev->comm, prev->pid, prev->non_block_count);
4851 dump_stack();
4852 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4853 }
4854#endif
4855
1dc0fffc 4856 if (unlikely(in_atomic_preempt_off())) {
dd41f596 4857 __schedule_bug(prev);
1dc0fffc
PZ
4858 preempt_count_set(PREEMPT_DISABLED);
4859 }
b3fbab05 4860 rcu_sleep_check();
9f68b5b7 4861 SCHED_WARN_ON(ct_state() == CONTEXT_USER);
dd41f596 4862
1da177e4
LT
4863 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4864
ae92882e 4865 schedstat_inc(this_rq()->sched_count);
dd41f596
IM
4866}
4867
457d1f46
CY
4868static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
4869 struct rq_flags *rf)
4870{
4871#ifdef CONFIG_SMP
4872 const struct sched_class *class;
4873 /*
4874 * We must do the balancing pass before put_prev_task(), such
4875 * that when we release the rq->lock the task is in the same
4876 * state as before we took rq->lock.
4877 *
4878 * We can terminate the balance pass as soon as we know there is
4879 * a runnable task of @class priority or higher.
4880 */
4881 for_class_range(class, prev->sched_class, &idle_sched_class) {
4882 if (class->balance(rq, prev, rf))
4883 break;
4884 }
4885#endif
4886
4887 put_prev_task(rq, prev);
4888}
4889
dd41f596
IM
4890/*
4891 * Pick up the highest-prio task:
4892 */
4893static inline struct task_struct *
d8ac8971 4894pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
dd41f596 4895{
49ee5768 4896 const struct sched_class *class;
dd41f596 4897 struct task_struct *p;
1da177e4
LT
4898
4899 /*
0ba87bb2
PZ
4900 * Optimization: we know that if all tasks are in the fair class we can
4901 * call that function directly, but only if the @prev task wasn't of a
b19a888c 4902 * higher scheduling class, because otherwise those lose the
0ba87bb2 4903 * opportunity to pull in more work from other CPUs.
1da177e4 4904 */
aa93cd53 4905 if (likely(prev->sched_class <= &fair_sched_class &&
0ba87bb2
PZ
4906 rq->nr_running == rq->cfs.h_nr_running)) {
4907
5d7d6056 4908 p = pick_next_task_fair(rq, prev, rf);
6ccdc84b 4909 if (unlikely(p == RETRY_TASK))
67692435 4910 goto restart;
6ccdc84b 4911
d1ccc66d 4912 /* Assumes fair_sched_class->next == idle_sched_class */
5d7d6056 4913 if (!p) {
f488e105 4914 put_prev_task(rq, prev);
98c2f700 4915 p = pick_next_task_idle(rq);
f488e105 4916 }
6ccdc84b
PZ
4917
4918 return p;
1da177e4
LT
4919 }
4920
67692435 4921restart:
457d1f46 4922 put_prev_task_balance(rq, prev, rf);
67692435 4923
34f971f6 4924 for_each_class(class) {
98c2f700 4925 p = class->pick_next_task(rq);
67692435 4926 if (p)
dd41f596 4927 return p;
dd41f596 4928 }
34f971f6 4929
d1ccc66d
IM
4930 /* The idle class should always have a runnable task: */
4931 BUG();
dd41f596 4932}
1da177e4 4933
dd41f596 4934/*
c259e01a 4935 * __schedule() is the main scheduler function.
edde96ea
PE
4936 *
4937 * The main means of driving the scheduler and thus entering this function are:
4938 *
4939 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
4940 *
4941 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
4942 * paths. For example, see arch/x86/entry_64.S.
4943 *
4944 * To drive preemption between tasks, the scheduler sets the flag in timer
4945 * interrupt handler scheduler_tick().
4946 *
4947 * 3. Wakeups don't really cause entry into schedule(). They add a
4948 * task to the run-queue and that's it.
4949 *
4950 * Now, if the new task added to the run-queue preempts the current
4951 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
4952 * called on the nearest possible occasion:
4953 *
c1a280b6 4954 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
edde96ea
PE
4955 *
4956 * - in syscall or exception context, at the next outmost
4957 * preempt_enable(). (this might be as soon as the wake_up()'s
4958 * spin_unlock()!)
4959 *
4960 * - in IRQ context, return from interrupt-handler to
4961 * preemptible context
4962 *
c1a280b6 4963 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
edde96ea
PE
4964 * then at the next:
4965 *
4966 * - cond_resched() call
4967 * - explicit schedule() call
4968 * - return from syscall or exception to user-space
4969 * - return from interrupt-handler to user-space
bfd9b2b5 4970 *
b30f0e3f 4971 * WARNING: must be called with preemption disabled!
dd41f596 4972 */
499d7955 4973static void __sched notrace __schedule(bool preempt)
dd41f596
IM
4974{
4975 struct task_struct *prev, *next;
67ca7bde 4976 unsigned long *switch_count;
dbfb089d 4977 unsigned long prev_state;
d8ac8971 4978 struct rq_flags rf;
dd41f596 4979 struct rq *rq;
31656519 4980 int cpu;
dd41f596 4981
dd41f596
IM
4982 cpu = smp_processor_id();
4983 rq = cpu_rq(cpu);
dd41f596 4984 prev = rq->curr;
dd41f596 4985
312364f3 4986 schedule_debug(prev, preempt);
1da177e4 4987
e0ee463c 4988 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
f333fdc9 4989 hrtick_clear(rq);
8f4d37ec 4990
46a5d164 4991 local_irq_disable();
bcbfdd01 4992 rcu_note_context_switch(preempt);
46a5d164 4993
e0acd0a6
ON
4994 /*
4995 * Make sure that signal_pending_state()->signal_pending() below
4996 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
dbfb089d
PZ
4997 * done by the caller to avoid the race with signal_wake_up():
4998 *
4999 * __set_current_state(@state) signal_wake_up()
5000 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
5001 * wake_up_state(p, state)
5002 * LOCK rq->lock LOCK p->pi_state
5003 * smp_mb__after_spinlock() smp_mb__after_spinlock()
5004 * if (signal_pending_state()) if (p->state & @state)
306e0604 5005 *
dbfb089d 5006 * Also, the membarrier system call requires a full memory barrier
306e0604 5007 * after coming from user-space, before storing to rq->curr.
e0acd0a6 5008 */
8a8c69c3 5009 rq_lock(rq, &rf);
d89e588c 5010 smp_mb__after_spinlock();
1da177e4 5011
d1ccc66d
IM
5012 /* Promote REQ to ACT */
5013 rq->clock_update_flags <<= 1;
bce4dc80 5014 update_rq_clock(rq);
9edfbfed 5015
246d86b5 5016 switch_count = &prev->nivcsw;
d136122f 5017
dbfb089d 5018 /*
d136122f
PZ
5019 * We must load prev->state once (task_struct::state is volatile), such
5020 * that:
5021 *
5022 * - we form a control dependency vs deactivate_task() below.
5023 * - ptrace_{,un}freeze_traced() can change ->state underneath us.
dbfb089d 5024 */
d136122f
PZ
5025 prev_state = prev->state;
5026 if (!preempt && prev_state) {
dbfb089d 5027 if (signal_pending_state(prev_state, prev)) {
1da177e4 5028 prev->state = TASK_RUNNING;
21aa9af0 5029 } else {
dbfb089d
PZ
5030 prev->sched_contributes_to_load =
5031 (prev_state & TASK_UNINTERRUPTIBLE) &&
5032 !(prev_state & TASK_NOLOAD) &&
5033 !(prev->flags & PF_FROZEN);
5034
5035 if (prev->sched_contributes_to_load)
5036 rq->nr_uninterruptible++;
5037
5038 /*
5039 * __schedule() ttwu()
d136122f
PZ
5040 * prev_state = prev->state; if (p->on_rq && ...)
5041 * if (prev_state) goto out;
5042 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
5043 * p->state = TASK_WAKING
5044 *
5045 * Where __schedule() and ttwu() have matching control dependencies.
dbfb089d
PZ
5046 *
5047 * After this, schedule() must not care about p->state any more.
5048 */
bce4dc80 5049 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
2acca55e 5050
e33a9bba
TH
5051 if (prev->in_iowait) {
5052 atomic_inc(&rq->nr_iowait);
5053 delayacct_blkio_start();
5054 }
21aa9af0 5055 }
dd41f596 5056 switch_count = &prev->nvcsw;
1da177e4
LT
5057 }
5058
d8ac8971 5059 next = pick_next_task(rq, prev, &rf);
f26f9aff 5060 clear_tsk_need_resched(prev);
f27dde8d 5061 clear_preempt_need_resched();
1da177e4 5062
1da177e4 5063 if (likely(prev != next)) {
1da177e4 5064 rq->nr_switches++;
5311a98f
EB
5065 /*
5066 * RCU users of rcu_dereference(rq->curr) may not see
5067 * changes to task_struct made by pick_next_task().
5068 */
5069 RCU_INIT_POINTER(rq->curr, next);
22e4ebb9
MD
5070 /*
5071 * The membarrier system call requires each architecture
5072 * to have a full memory barrier after updating
306e0604
MD
5073 * rq->curr, before returning to user-space.
5074 *
5075 * Here are the schemes providing that barrier on the
5076 * various architectures:
5077 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
5078 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
5079 * - finish_lock_switch() for weakly-ordered
5080 * architectures where spin_unlock is a full barrier,
5081 * - switch_to() for arm64 (weakly-ordered, spin_unlock
5082 * is a RELEASE barrier),
22e4ebb9 5083 */
1da177e4
LT
5084 ++*switch_count;
5085
af449901 5086 migrate_disable_switch(rq, prev);
b05e75d6
JW
5087 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
5088
c73464b1 5089 trace_sched_switch(preempt, prev, next);
d1ccc66d
IM
5090
5091 /* Also unlocks the rq: */
5092 rq = context_switch(rq, prev, next, &rf);
cbce1a68 5093 } else {
cb42c9a3 5094 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
1da177e4 5095
565790d2
PZ
5096 rq_unpin_lock(rq, &rf);
5097 __balance_callbacks(rq);
5098 raw_spin_unlock_irq(&rq->lock);
5099 }
1da177e4 5100}
c259e01a 5101
9af6528e
PZ
5102void __noreturn do_task_dead(void)
5103{
d1ccc66d 5104 /* Causes final put_task_struct in finish_task_switch(): */
b5bf9a90 5105 set_special_state(TASK_DEAD);
d1ccc66d
IM
5106
5107 /* Tell freezer to ignore us: */
5108 current->flags |= PF_NOFREEZE;
5109
9af6528e
PZ
5110 __schedule(false);
5111 BUG();
d1ccc66d
IM
5112
5113 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
9af6528e 5114 for (;;)
d1ccc66d 5115 cpu_relax();
9af6528e
PZ
5116}
5117
9c40cef2
TG
5118static inline void sched_submit_work(struct task_struct *tsk)
5119{
c1cecf88
SAS
5120 unsigned int task_flags;
5121
b0fdc013 5122 if (!tsk->state)
9c40cef2 5123 return;
6d25be57 5124
c1cecf88 5125 task_flags = tsk->flags;
6d25be57
TG
5126 /*
5127 * If a worker went to sleep, notify and ask workqueue whether
5128 * it wants to wake up a task to maintain concurrency.
5129 * As this function is called inside the schedule() context,
5130 * we disable preemption to avoid it calling schedule() again
62849a96
SAS
5131 * in the possible wakeup of a kworker and because wq_worker_sleeping()
5132 * requires it.
6d25be57 5133 */
c1cecf88 5134 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6d25be57 5135 preempt_disable();
c1cecf88 5136 if (task_flags & PF_WQ_WORKER)
771b53d0
JA
5137 wq_worker_sleeping(tsk);
5138 else
5139 io_wq_worker_sleeping(tsk);
6d25be57
TG
5140 preempt_enable_no_resched();
5141 }
5142
b0fdc013
SAS
5143 if (tsk_is_pi_blocked(tsk))
5144 return;
5145
9c40cef2
TG
5146 /*
5147 * If we are going to sleep and we have plugged IO queued,
5148 * make sure to submit it to avoid deadlocks.
5149 */
5150 if (blk_needs_flush_plug(tsk))
5151 blk_schedule_flush_plug(tsk);
5152}
5153
6d25be57
TG
5154static void sched_update_worker(struct task_struct *tsk)
5155{
771b53d0
JA
5156 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
5157 if (tsk->flags & PF_WQ_WORKER)
5158 wq_worker_running(tsk);
5159 else
5160 io_wq_worker_running(tsk);
5161 }
6d25be57
TG
5162}
5163
722a9f92 5164asmlinkage __visible void __sched schedule(void)
c259e01a 5165{
9c40cef2
TG
5166 struct task_struct *tsk = current;
5167
5168 sched_submit_work(tsk);
bfd9b2b5 5169 do {
b30f0e3f 5170 preempt_disable();
fc13aeba 5171 __schedule(false);
b30f0e3f 5172 sched_preempt_enable_no_resched();
bfd9b2b5 5173 } while (need_resched());
6d25be57 5174 sched_update_worker(tsk);
c259e01a 5175}
1da177e4
LT
5176EXPORT_SYMBOL(schedule);
5177
8663effb
SRV
5178/*
5179 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
5180 * state (have scheduled out non-voluntarily) by making sure that all
5181 * tasks have either left the run queue or have gone into user space.
5182 * As idle tasks do not do either, they must not ever be preempted
5183 * (schedule out non-voluntarily).
5184 *
5185 * schedule_idle() is similar to schedule_preempt_disable() except that it
5186 * never enables preemption because it does not call sched_submit_work().
5187 */
5188void __sched schedule_idle(void)
5189{
5190 /*
5191 * As this skips calling sched_submit_work(), which the idle task does
5192 * regardless because that function is a nop when the task is in a
5193 * TASK_RUNNING state, make sure this isn't used someplace that the
5194 * current task can be in any other state. Note, idle is always in the
5195 * TASK_RUNNING state.
5196 */
5197 WARN_ON_ONCE(current->state);
5198 do {
5199 __schedule(false);
5200 } while (need_resched());
5201}
5202
6775de49 5203#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
722a9f92 5204asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
5205{
5206 /*
5207 * If we come here after a random call to set_need_resched(),
5208 * or we have been woken up remotely but the IPI has not yet arrived,
5209 * we haven't yet exited the RCU idle mode. Do it here manually until
5210 * we find a better solution.
7cc78f8f
AL
5211 *
5212 * NB: There are buggy callers of this function. Ideally we
c467ea76 5213 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 5214 * too frequently to make sense yet.
20ab65e3 5215 */
7cc78f8f 5216 enum ctx_state prev_state = exception_enter();
20ab65e3 5217 schedule();
7cc78f8f 5218 exception_exit(prev_state);
20ab65e3
FW
5219}
5220#endif
5221
c5491ea7
TG
5222/**
5223 * schedule_preempt_disabled - called with preemption disabled
5224 *
5225 * Returns with preemption disabled. Note: preempt_count must be 1
5226 */
5227void __sched schedule_preempt_disabled(void)
5228{
ba74c144 5229 sched_preempt_enable_no_resched();
c5491ea7
TG
5230 schedule();
5231 preempt_disable();
5232}
5233
06b1f808 5234static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
5235{
5236 do {
47252cfb
SR
5237 /*
5238 * Because the function tracer can trace preempt_count_sub()
5239 * and it also uses preempt_enable/disable_notrace(), if
5240 * NEED_RESCHED is set, the preempt_enable_notrace() called
5241 * by the function tracer will call this function again and
5242 * cause infinite recursion.
5243 *
5244 * Preemption must be disabled here before the function
5245 * tracer can trace. Break up preempt_disable() into two
5246 * calls. One to disable preemption without fear of being
5247 * traced. The other to still record the preemption latency,
5248 * which can also be traced by the function tracer.
5249 */
499d7955 5250 preempt_disable_notrace();
47252cfb 5251 preempt_latency_start(1);
fc13aeba 5252 __schedule(true);
47252cfb 5253 preempt_latency_stop(1);
499d7955 5254 preempt_enable_no_resched_notrace();
a18b5d01
FW
5255
5256 /*
5257 * Check again in case we missed a preemption opportunity
5258 * between schedule and now.
5259 */
a18b5d01
FW
5260 } while (need_resched());
5261}
5262
c1a280b6 5263#ifdef CONFIG_PREEMPTION
1da177e4 5264/*
a49b4f40
VS
5265 * This is the entry point to schedule() from in-kernel preemption
5266 * off of preempt_enable.
1da177e4 5267 */
722a9f92 5268asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 5269{
1da177e4
LT
5270 /*
5271 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 5272 * we do not want to preempt the current task. Just return..
1da177e4 5273 */
fbb00b56 5274 if (likely(!preemptible()))
1da177e4
LT
5275 return;
5276
a18b5d01 5277 preempt_schedule_common();
1da177e4 5278}
376e2424 5279NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 5280EXPORT_SYMBOL(preempt_schedule);
009f60e2 5281
2c9a98d3
PZI
5282#ifdef CONFIG_PREEMPT_DYNAMIC
5283DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
ef72661e 5284EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
2c9a98d3
PZI
5285#endif
5286
5287
009f60e2 5288/**
4eaca0a8 5289 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
5290 *
5291 * The tracing infrastructure uses preempt_enable_notrace to prevent
5292 * recursion and tracing preempt enabling caused by the tracing
5293 * infrastructure itself. But as tracing can happen in areas coming
5294 * from userspace or just about to enter userspace, a preempt enable
5295 * can occur before user_exit() is called. This will cause the scheduler
5296 * to be called when the system is still in usermode.
5297 *
5298 * To prevent this, the preempt_enable_notrace will use this function
5299 * instead of preempt_schedule() to exit user context if needed before
5300 * calling the scheduler.
5301 */
4eaca0a8 5302asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
5303{
5304 enum ctx_state prev_ctx;
5305
5306 if (likely(!preemptible()))
5307 return;
5308
5309 do {
47252cfb
SR
5310 /*
5311 * Because the function tracer can trace preempt_count_sub()
5312 * and it also uses preempt_enable/disable_notrace(), if
5313 * NEED_RESCHED is set, the preempt_enable_notrace() called
5314 * by the function tracer will call this function again and
5315 * cause infinite recursion.
5316 *
5317 * Preemption must be disabled here before the function
5318 * tracer can trace. Break up preempt_disable() into two
5319 * calls. One to disable preemption without fear of being
5320 * traced. The other to still record the preemption latency,
5321 * which can also be traced by the function tracer.
5322 */
3d8f74dd 5323 preempt_disable_notrace();
47252cfb 5324 preempt_latency_start(1);
009f60e2
ON
5325 /*
5326 * Needs preempt disabled in case user_exit() is traced
5327 * and the tracer calls preempt_enable_notrace() causing
5328 * an infinite recursion.
5329 */
5330 prev_ctx = exception_enter();
fc13aeba 5331 __schedule(true);
009f60e2
ON
5332 exception_exit(prev_ctx);
5333
47252cfb 5334 preempt_latency_stop(1);
3d8f74dd 5335 preempt_enable_no_resched_notrace();
009f60e2
ON
5336 } while (need_resched());
5337}
4eaca0a8 5338EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 5339
2c9a98d3
PZI
5340#ifdef CONFIG_PREEMPT_DYNAMIC
5341DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
ef72661e 5342EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
2c9a98d3
PZI
5343#endif
5344
c1a280b6 5345#endif /* CONFIG_PREEMPTION */
1da177e4 5346
826bfeb3
PZI
5347#ifdef CONFIG_PREEMPT_DYNAMIC
5348
5349#include <linux/entry-common.h>
5350
5351/*
5352 * SC:cond_resched
5353 * SC:might_resched
5354 * SC:preempt_schedule
5355 * SC:preempt_schedule_notrace
5356 * SC:irqentry_exit_cond_resched
5357 *
5358 *
5359 * NONE:
5360 * cond_resched <- __cond_resched
5361 * might_resched <- RET0
5362 * preempt_schedule <- NOP
5363 * preempt_schedule_notrace <- NOP
5364 * irqentry_exit_cond_resched <- NOP
5365 *
5366 * VOLUNTARY:
5367 * cond_resched <- __cond_resched
5368 * might_resched <- __cond_resched
5369 * preempt_schedule <- NOP
5370 * preempt_schedule_notrace <- NOP
5371 * irqentry_exit_cond_resched <- NOP
5372 *
5373 * FULL:
5374 * cond_resched <- RET0
5375 * might_resched <- RET0
5376 * preempt_schedule <- preempt_schedule
5377 * preempt_schedule_notrace <- preempt_schedule_notrace
5378 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
5379 */
e59e10f8
PZ
5380
5381enum {
5382 preempt_dynamic_none = 0,
5383 preempt_dynamic_voluntary,
5384 preempt_dynamic_full,
5385};
5386
5387static int preempt_dynamic_mode = preempt_dynamic_full;
5388
5389static int sched_dynamic_mode(const char *str)
826bfeb3 5390{
e59e10f8
PZ
5391 if (!strcmp(str, "none"))
5392 return 0;
5393
5394 if (!strcmp(str, "voluntary"))
5395 return 1;
5396
5397 if (!strcmp(str, "full"))
5398 return 2;
5399
5400 return -1;
5401}
5402
5403static void sched_dynamic_update(int mode)
5404{
5405 /*
5406 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
5407 * the ZERO state, which is invalid.
5408 */
5409 static_call_update(cond_resched, __cond_resched);
5410 static_call_update(might_resched, __cond_resched);
5411 static_call_update(preempt_schedule, __preempt_schedule_func);
5412 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5413 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
5414
5415 switch (mode) {
5416 case preempt_dynamic_none:
826bfeb3
PZI
5417 static_call_update(cond_resched, __cond_resched);
5418 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
5419 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
5420 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
5421 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
e59e10f8
PZ
5422 pr_info("Dynamic Preempt: none\n");
5423 break;
5424
5425 case preempt_dynamic_voluntary:
826bfeb3
PZI
5426 static_call_update(cond_resched, __cond_resched);
5427 static_call_update(might_resched, __cond_resched);
5428 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
5429 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
5430 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
e59e10f8
PZ
5431 pr_info("Dynamic Preempt: voluntary\n");
5432 break;
5433
5434 case preempt_dynamic_full:
826bfeb3
PZI
5435 static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
5436 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
5437 static_call_update(preempt_schedule, __preempt_schedule_func);
5438 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
5439 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
e59e10f8
PZ
5440 pr_info("Dynamic Preempt: full\n");
5441 break;
5442 }
5443
5444 preempt_dynamic_mode = mode;
5445}
5446
5447static int __init setup_preempt_mode(char *str)
5448{
5449 int mode = sched_dynamic_mode(str);
5450 if (mode < 0) {
5451 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
826bfeb3
PZI
5452 return 1;
5453 }
e59e10f8
PZ
5454
5455 sched_dynamic_update(mode);
826bfeb3
PZI
5456 return 0;
5457}
5458__setup("preempt=", setup_preempt_mode);
5459
e59e10f8
PZ
5460#ifdef CONFIG_SCHED_DEBUG
5461
5462static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
5463 size_t cnt, loff_t *ppos)
5464{
5465 char buf[16];
5466 int mode;
5467
5468 if (cnt > 15)
5469 cnt = 15;
5470
5471 if (copy_from_user(&buf, ubuf, cnt))
5472 return -EFAULT;
5473
5474 buf[cnt] = 0;
5475 mode = sched_dynamic_mode(strstrip(buf));
5476 if (mode < 0)
5477 return mode;
5478
5479 sched_dynamic_update(mode);
5480
5481 *ppos += cnt;
5482
5483 return cnt;
5484}
5485
5486static int sched_dynamic_show(struct seq_file *m, void *v)
5487{
5488 static const char * preempt_modes[] = {
5489 "none", "voluntary", "full"
5490 };
5491 int i;
5492
5493 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
5494 if (preempt_dynamic_mode == i)
5495 seq_puts(m, "(");
5496 seq_puts(m, preempt_modes[i]);
5497 if (preempt_dynamic_mode == i)
5498 seq_puts(m, ")");
5499
5500 seq_puts(m, " ");
5501 }
5502
5503 seq_puts(m, "\n");
5504 return 0;
5505}
5506
5507static int sched_dynamic_open(struct inode *inode, struct file *filp)
5508{
5509 return single_open(filp, sched_dynamic_show, NULL);
5510}
5511
5512static const struct file_operations sched_dynamic_fops = {
5513 .open = sched_dynamic_open,
5514 .write = sched_dynamic_write,
5515 .read = seq_read,
5516 .llseek = seq_lseek,
5517 .release = single_release,
5518};
5519
5520static __init int sched_init_debug_dynamic(void)
5521{
5522 debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops);
5523 return 0;
5524}
5525late_initcall(sched_init_debug_dynamic);
5526
5527#endif /* CONFIG_SCHED_DEBUG */
826bfeb3
PZI
5528#endif /* CONFIG_PREEMPT_DYNAMIC */
5529
5530
1da177e4 5531/*
a49b4f40 5532 * This is the entry point to schedule() from kernel preemption
1da177e4
LT
5533 * off of irq context.
5534 * Note, that this is called and return with irqs disabled. This will
5535 * protect us against recursive calling from irq.
5536 */
722a9f92 5537asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 5538{
b22366cd 5539 enum ctx_state prev_state;
6478d880 5540
2ed6e34f 5541 /* Catch callers which need to be fixed */
f27dde8d 5542 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 5543
b22366cd
FW
5544 prev_state = exception_enter();
5545
3a5c359a 5546 do {
3d8f74dd 5547 preempt_disable();
3a5c359a 5548 local_irq_enable();
fc13aeba 5549 __schedule(true);
3a5c359a 5550 local_irq_disable();
3d8f74dd 5551 sched_preempt_enable_no_resched();
5ed0cec0 5552 } while (need_resched());
b22366cd
FW
5553
5554 exception_exit(prev_state);
1da177e4
LT
5555}
5556
ac6424b9 5557int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
95cdf3b7 5558 void *key)
1da177e4 5559{
062d3f95 5560 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
63859d4f 5561 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 5562}
1da177e4
LT
5563EXPORT_SYMBOL(default_wake_function);
5564
b29739f9
IM
5565#ifdef CONFIG_RT_MUTEXES
5566
acd58620
PZ
5567static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
5568{
5569 if (pi_task)
5570 prio = min(prio, pi_task->prio);
5571
5572 return prio;
5573}
5574
5575static inline int rt_effective_prio(struct task_struct *p, int prio)
5576{
5577 struct task_struct *pi_task = rt_mutex_get_top_task(p);
5578
5579 return __rt_effective_prio(pi_task, prio);
5580}
5581
b29739f9
IM
5582/*
5583 * rt_mutex_setprio - set the current priority of a task
acd58620
PZ
5584 * @p: task to boost
5585 * @pi_task: donor task
b29739f9
IM
5586 *
5587 * This function changes the 'effective' priority of a task. It does
5588 * not touch ->normal_prio like __setscheduler().
5589 *
c365c292
TG
5590 * Used by the rt_mutex code to implement priority inheritance
5591 * logic. Call site only calls if the priority of the task changed.
b29739f9 5592 */
acd58620 5593void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
b29739f9 5594{
acd58620 5595 int prio, oldprio, queued, running, queue_flag =
7a57f32a 5596 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
83ab0aa0 5597 const struct sched_class *prev_class;
eb580751
PZ
5598 struct rq_flags rf;
5599 struct rq *rq;
b29739f9 5600
acd58620
PZ
5601 /* XXX used to be waiter->prio, not waiter->task->prio */
5602 prio = __rt_effective_prio(pi_task, p->normal_prio);
5603
5604 /*
5605 * If nothing changed; bail early.
5606 */
5607 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
5608 return;
b29739f9 5609
eb580751 5610 rq = __task_rq_lock(p, &rf);
80f5c1b8 5611 update_rq_clock(rq);
acd58620
PZ
5612 /*
5613 * Set under pi_lock && rq->lock, such that the value can be used under
5614 * either lock.
5615 *
5616 * Note that there is loads of tricky to make this pointer cache work
5617 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
5618 * ensure a task is de-boosted (pi_task is set to NULL) before the
5619 * task is allowed to run again (and can exit). This ensures the pointer
b19a888c 5620 * points to a blocked task -- which guarantees the task is present.
acd58620
PZ
5621 */
5622 p->pi_top_task = pi_task;
5623
5624 /*
5625 * For FIFO/RR we only need to set prio, if that matches we're done.
5626 */
5627 if (prio == p->prio && !dl_prio(prio))
5628 goto out_unlock;
b29739f9 5629
1c4dd99b
TG
5630 /*
5631 * Idle task boosting is a nono in general. There is one
5632 * exception, when PREEMPT_RT and NOHZ is active:
5633 *
5634 * The idle task calls get_next_timer_interrupt() and holds
5635 * the timer wheel base->lock on the CPU and another CPU wants
5636 * to access the timer (probably to cancel it). We can safely
5637 * ignore the boosting request, as the idle CPU runs this code
5638 * with interrupts disabled and will complete the lock
5639 * protected section without being interrupted. So there is no
5640 * real need to boost.
5641 */
5642 if (unlikely(p == rq->idle)) {
5643 WARN_ON(p != rq->curr);
5644 WARN_ON(p->pi_blocked_on);
5645 goto out_unlock;
5646 }
5647
b91473ff 5648 trace_sched_pi_setprio(p, pi_task);
d5f9f942 5649 oldprio = p->prio;
ff77e468
PZ
5650
5651 if (oldprio == prio)
5652 queue_flag &= ~DEQUEUE_MOVE;
5653
83ab0aa0 5654 prev_class = p->sched_class;
da0c1e65 5655 queued = task_on_rq_queued(p);
051a1d1a 5656 running = task_current(rq, p);
da0c1e65 5657 if (queued)
ff77e468 5658 dequeue_task(rq, p, queue_flag);
0e1f3483 5659 if (running)
f3cd1c4e 5660 put_prev_task(rq, p);
dd41f596 5661
2d3d891d
DF
5662 /*
5663 * Boosting condition are:
5664 * 1. -rt task is running and holds mutex A
5665 * --> -dl task blocks on mutex A
5666 *
5667 * 2. -dl task is running and holds mutex A
5668 * --> -dl task blocks on mutex A and could preempt the
5669 * running task
5670 */
5671 if (dl_prio(prio)) {
466af29b 5672 if (!dl_prio(p->normal_prio) ||
740797ce
JL
5673 (pi_task && dl_prio(pi_task->prio) &&
5674 dl_entity_preempt(&pi_task->dl, &p->dl))) {
2279f540 5675 p->dl.pi_se = pi_task->dl.pi_se;
ff77e468 5676 queue_flag |= ENQUEUE_REPLENISH;
2279f540
JL
5677 } else {
5678 p->dl.pi_se = &p->dl;
5679 }
aab03e05 5680 p->sched_class = &dl_sched_class;
2d3d891d
DF
5681 } else if (rt_prio(prio)) {
5682 if (dl_prio(oldprio))
2279f540 5683 p->dl.pi_se = &p->dl;
2d3d891d 5684 if (oldprio < prio)
ff77e468 5685 queue_flag |= ENQUEUE_HEAD;
dd41f596 5686 p->sched_class = &rt_sched_class;
2d3d891d
DF
5687 } else {
5688 if (dl_prio(oldprio))
2279f540 5689 p->dl.pi_se = &p->dl;
746db944
BS
5690 if (rt_prio(oldprio))
5691 p->rt.timeout = 0;
dd41f596 5692 p->sched_class = &fair_sched_class;
2d3d891d 5693 }
dd41f596 5694
b29739f9
IM
5695 p->prio = prio;
5696
da0c1e65 5697 if (queued)
ff77e468 5698 enqueue_task(rq, p, queue_flag);
a399d233 5699 if (running)
03b7fad1 5700 set_next_task(rq, p);
cb469845 5701
da7a735e 5702 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 5703out_unlock:
d1ccc66d
IM
5704 /* Avoid rq from going away on us: */
5705 preempt_disable();
4c9a4bc8 5706
565790d2
PZ
5707 rq_unpin_lock(rq, &rf);
5708 __balance_callbacks(rq);
5709 raw_spin_unlock(&rq->lock);
5710
4c9a4bc8 5711 preempt_enable();
b29739f9 5712}
acd58620
PZ
5713#else
5714static inline int rt_effective_prio(struct task_struct *p, int prio)
5715{
5716 return prio;
5717}
b29739f9 5718#endif
d50dde5a 5719
36c8b586 5720void set_user_nice(struct task_struct *p, long nice)
1da177e4 5721{
49bd21ef 5722 bool queued, running;
53a23364 5723 int old_prio;
eb580751 5724 struct rq_flags rf;
70b97a7f 5725 struct rq *rq;
1da177e4 5726
75e45d51 5727 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
5728 return;
5729 /*
5730 * We have to be careful, if called from sys_setpriority(),
5731 * the task might be in the middle of scheduling on another CPU.
5732 */
eb580751 5733 rq = task_rq_lock(p, &rf);
2fb8d367
PZ
5734 update_rq_clock(rq);
5735
1da177e4
LT
5736 /*
5737 * The RT priorities are set via sched_setscheduler(), but we still
5738 * allow the 'normal' nice value to be set - but as expected
b19a888c 5739 * it won't have any effect on scheduling until the task is
aab03e05 5740 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 5741 */
aab03e05 5742 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
5743 p->static_prio = NICE_TO_PRIO(nice);
5744 goto out_unlock;
5745 }
da0c1e65 5746 queued = task_on_rq_queued(p);
49bd21ef 5747 running = task_current(rq, p);
da0c1e65 5748 if (queued)
7a57f32a 5749 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
49bd21ef
PZ
5750 if (running)
5751 put_prev_task(rq, p);
1da177e4 5752
1da177e4 5753 p->static_prio = NICE_TO_PRIO(nice);
9059393e 5754 set_load_weight(p, true);
b29739f9
IM
5755 old_prio = p->prio;
5756 p->prio = effective_prio(p);
1da177e4 5757
5443a0be 5758 if (queued)
7134b3e9 5759 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
49bd21ef 5760 if (running)
03b7fad1 5761 set_next_task(rq, p);
5443a0be
FW
5762
5763 /*
5764 * If the task increased its priority or is running and
5765 * lowered its priority, then reschedule its CPU:
5766 */
5767 p->sched_class->prio_changed(rq, p, old_prio);
5768
1da177e4 5769out_unlock:
eb580751 5770 task_rq_unlock(rq, p, &rf);
1da177e4 5771}
1da177e4
LT
5772EXPORT_SYMBOL(set_user_nice);
5773
e43379f1
MM
5774/*
5775 * can_nice - check if a task can reduce its nice value
5776 * @p: task
5777 * @nice: nice value
5778 */
36c8b586 5779int can_nice(const struct task_struct *p, const int nice)
e43379f1 5780{
d1ccc66d 5781 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7aa2c016 5782 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 5783
78d7d407 5784 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
5785 capable(CAP_SYS_NICE));
5786}
5787
1da177e4
LT
5788#ifdef __ARCH_WANT_SYS_NICE
5789
5790/*
5791 * sys_nice - change the priority of the current process.
5792 * @increment: priority increment
5793 *
5794 * sys_setpriority is a more generic, but much slower function that
5795 * does similar things.
5796 */
5add95d4 5797SYSCALL_DEFINE1(nice, int, increment)
1da177e4 5798{
48f24c4d 5799 long nice, retval;
1da177e4
LT
5800
5801 /*
5802 * Setpriority might change our priority at the same moment.
5803 * We don't have to worry. Conceptually one call occurs first
5804 * and we have a single winner.
5805 */
a9467fa3 5806 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 5807 nice = task_nice(current) + increment;
1da177e4 5808
a9467fa3 5809 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
5810 if (increment < 0 && !can_nice(current, nice))
5811 return -EPERM;
5812
1da177e4
LT
5813 retval = security_task_setnice(current, nice);
5814 if (retval)
5815 return retval;
5816
5817 set_user_nice(current, nice);
5818 return 0;
5819}
5820
5821#endif
5822
5823/**
5824 * task_prio - return the priority value of a given task.
5825 * @p: the task in question.
5826 *
e69f6186 5827 * Return: The priority value as seen by users in /proc.
c541bb78
DE
5828 *
5829 * sched policy return value kernel prio user prio/nice
5830 *
5831 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
5832 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
5833 * deadline -101 -1 0
1da177e4 5834 */
36c8b586 5835int task_prio(const struct task_struct *p)
1da177e4
LT
5836{
5837 return p->prio - MAX_RT_PRIO;
5838}
5839
1da177e4 5840/**
d1ccc66d 5841 * idle_cpu - is a given CPU idle currently?
1da177e4 5842 * @cpu: the processor in question.
e69f6186
YB
5843 *
5844 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
5845 */
5846int idle_cpu(int cpu)
5847{
908a3283
TG
5848 struct rq *rq = cpu_rq(cpu);
5849
5850 if (rq->curr != rq->idle)
5851 return 0;
5852
5853 if (rq->nr_running)
5854 return 0;
5855
5856#ifdef CONFIG_SMP
126c2092 5857 if (rq->ttwu_pending)
908a3283
TG
5858 return 0;
5859#endif
5860
5861 return 1;
1da177e4
LT
5862}
5863
943d355d
RJ
5864/**
5865 * available_idle_cpu - is a given CPU idle for enqueuing work.
5866 * @cpu: the CPU in question.
5867 *
5868 * Return: 1 if the CPU is currently idle. 0 otherwise.
5869 */
5870int available_idle_cpu(int cpu)
5871{
5872 if (!idle_cpu(cpu))
5873 return 0;
5874
247f2f6f
RJ
5875 if (vcpu_is_preempted(cpu))
5876 return 0;
5877
908a3283 5878 return 1;
1da177e4
LT
5879}
5880
1da177e4 5881/**
d1ccc66d 5882 * idle_task - return the idle task for a given CPU.
1da177e4 5883 * @cpu: the processor in question.
e69f6186 5884 *
d1ccc66d 5885 * Return: The idle task for the CPU @cpu.
1da177e4 5886 */
36c8b586 5887struct task_struct *idle_task(int cpu)
1da177e4
LT
5888{
5889 return cpu_rq(cpu)->idle;
5890}
5891
7d6a905f
VK
5892#ifdef CONFIG_SMP
5893/*
5894 * This function computes an effective utilization for the given CPU, to be
5895 * used for frequency selection given the linear relation: f = u * f_max.
5896 *
5897 * The scheduler tracks the following metrics:
5898 *
5899 * cpu_util_{cfs,rt,dl,irq}()
5900 * cpu_bw_dl()
5901 *
5902 * Where the cfs,rt and dl util numbers are tracked with the same metric and
5903 * synchronized windows and are thus directly comparable.
5904 *
5905 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
5906 * which excludes things like IRQ and steal-time. These latter are then accrued
5907 * in the irq utilization.
5908 *
5909 * The DL bandwidth number otoh is not a measured metric but a value computed
5910 * based on the task model parameters and gives the minimal utilization
5911 * required to meet deadlines.
5912 */
a5418be9
VK
5913unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
5914 unsigned long max, enum cpu_util_type type,
7d6a905f
VK
5915 struct task_struct *p)
5916{
5917 unsigned long dl_util, util, irq;
5918 struct rq *rq = cpu_rq(cpu);
5919
5920 if (!uclamp_is_used() &&
5921 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
5922 return max;
5923 }
5924
5925 /*
5926 * Early check to see if IRQ/steal time saturates the CPU, can be
5927 * because of inaccuracies in how we track these -- see
5928 * update_irq_load_avg().
5929 */
5930 irq = cpu_util_irq(rq);
5931 if (unlikely(irq >= max))
5932 return max;
5933
5934 /*
5935 * Because the time spend on RT/DL tasks is visible as 'lost' time to
5936 * CFS tasks and we use the same metric to track the effective
5937 * utilization (PELT windows are synchronized) we can directly add them
5938 * to obtain the CPU's actual utilization.
5939 *
5940 * CFS and RT utilization can be boosted or capped, depending on
5941 * utilization clamp constraints requested by currently RUNNABLE
5942 * tasks.
5943 * When there are no CFS RUNNABLE tasks, clamps are released and
5944 * frequency will be gracefully reduced with the utilization decay.
5945 */
5946 util = util_cfs + cpu_util_rt(rq);
5947 if (type == FREQUENCY_UTIL)
5948 util = uclamp_rq_util_with(rq, util, p);
5949
5950 dl_util = cpu_util_dl(rq);
5951
5952 /*
5953 * For frequency selection we do not make cpu_util_dl() a permanent part
5954 * of this sum because we want to use cpu_bw_dl() later on, but we need
5955 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
5956 * that we select f_max when there is no idle time.
5957 *
5958 * NOTE: numerical errors or stop class might cause us to not quite hit
5959 * saturation when we should -- something for later.
5960 */
5961 if (util + dl_util >= max)
5962 return max;
5963
5964 /*
5965 * OTOH, for energy computation we need the estimated running time, so
5966 * include util_dl and ignore dl_bw.
5967 */
5968 if (type == ENERGY_UTIL)
5969 util += dl_util;
5970
5971 /*
5972 * There is still idle time; further improve the number by using the
5973 * irq metric. Because IRQ/steal time is hidden from the task clock we
5974 * need to scale the task numbers:
5975 *
5976 * max - irq
5977 * U' = irq + --------- * U
5978 * max
5979 */
5980 util = scale_irq_capacity(util, irq, max);
5981 util += irq;
5982
5983 /*
5984 * Bandwidth required by DEADLINE must always be granted while, for
5985 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
5986 * to gracefully reduce the frequency when no tasks show up for longer
5987 * periods of time.
5988 *
5989 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
5990 * bw_dl as requested freq. However, cpufreq is not yet ready for such
5991 * an interface. So, we only do the latter for now.
5992 */
5993 if (type == FREQUENCY_UTIL)
5994 util += cpu_bw_dl(rq);
5995
5996 return min(max, util);
5997}
a5418be9
VK
5998
5999unsigned long sched_cpu_util(int cpu, unsigned long max)
6000{
6001 return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
6002 ENERGY_UTIL, NULL);
6003}
7d6a905f
VK
6004#endif /* CONFIG_SMP */
6005
1da177e4
LT
6006/**
6007 * find_process_by_pid - find a process with a matching PID value.
6008 * @pid: the pid in question.
e69f6186
YB
6009 *
6010 * The task of @pid, if found. %NULL otherwise.
1da177e4 6011 */
a9957449 6012static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 6013{
228ebcbe 6014 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
6015}
6016
c13db6b1
SR
6017/*
6018 * sched_setparam() passes in -1 for its policy, to let the functions
6019 * it calls know not to change it.
6020 */
6021#define SETPARAM_POLICY -1
6022
c365c292
TG
6023static void __setscheduler_params(struct task_struct *p,
6024 const struct sched_attr *attr)
1da177e4 6025{
d50dde5a
DF
6026 int policy = attr->sched_policy;
6027
c13db6b1 6028 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
6029 policy = p->policy;
6030
1da177e4 6031 p->policy = policy;
d50dde5a 6032
aab03e05
DF
6033 if (dl_policy(policy))
6034 __setparam_dl(p, attr);
39fd8fd2 6035 else if (fair_policy(policy))
d50dde5a
DF
6036 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
6037
39fd8fd2
PZ
6038 /*
6039 * __sched_setscheduler() ensures attr->sched_priority == 0 when
6040 * !rt_policy. Always setting this ensures that things like
6041 * getparam()/getattr() don't report silly values for !rt tasks.
6042 */
6043 p->rt_priority = attr->sched_priority;
383afd09 6044 p->normal_prio = normal_prio(p);
9059393e 6045 set_load_weight(p, true);
c365c292 6046}
39fd8fd2 6047
c365c292
TG
6048/* Actually do priority change: must hold pi & rq lock. */
6049static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 6050 const struct sched_attr *attr, bool keep_boost)
c365c292 6051{
a509a7cd
PB
6052 /*
6053 * If params can't change scheduling class changes aren't allowed
6054 * either.
6055 */
6056 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
6057 return;
6058
c365c292 6059 __setscheduler_params(p, attr);
d50dde5a 6060
383afd09 6061 /*
0782e63b
TG
6062 * Keep a potential priority boosting if called from
6063 * sched_setscheduler().
383afd09 6064 */
acd58620 6065 p->prio = normal_prio(p);
0782e63b 6066 if (keep_boost)
acd58620 6067 p->prio = rt_effective_prio(p, p->prio);
383afd09 6068
aab03e05
DF
6069 if (dl_prio(p->prio))
6070 p->sched_class = &dl_sched_class;
6071 else if (rt_prio(p->prio))
ffd44db5
PZ
6072 p->sched_class = &rt_sched_class;
6073 else
6074 p->sched_class = &fair_sched_class;
1da177e4 6075}
aab03e05 6076
c69e8d9c 6077/*
d1ccc66d 6078 * Check the target process has a UID that matches the current process's:
c69e8d9c
DH
6079 */
6080static bool check_same_owner(struct task_struct *p)
6081{
6082 const struct cred *cred = current_cred(), *pcred;
6083 bool match;
6084
6085 rcu_read_lock();
6086 pcred = __task_cred(p);
9c806aa0
EB
6087 match = (uid_eq(cred->euid, pcred->euid) ||
6088 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
6089 rcu_read_unlock();
6090 return match;
6091}
6092
d50dde5a
DF
6093static int __sched_setscheduler(struct task_struct *p,
6094 const struct sched_attr *attr,
dbc7f069 6095 bool user, bool pi)
1da177e4 6096{
383afd09
SR
6097 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
6098 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 6099 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 6100 int new_effective_prio, policy = attr->sched_policy;
83ab0aa0 6101 const struct sched_class *prev_class;
565790d2 6102 struct callback_head *head;
eb580751 6103 struct rq_flags rf;
ca94c442 6104 int reset_on_fork;
7a57f32a 6105 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
eb580751 6106 struct rq *rq;
1da177e4 6107
896bbb25
SRV
6108 /* The pi code expects interrupts enabled */
6109 BUG_ON(pi && in_interrupt());
1da177e4 6110recheck:
d1ccc66d 6111 /* Double check policy once rq lock held: */
ca94c442
LP
6112 if (policy < 0) {
6113 reset_on_fork = p->sched_reset_on_fork;
1da177e4 6114 policy = oldpolicy = p->policy;
ca94c442 6115 } else {
7479f3c9 6116 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 6117
20f9cd2a 6118 if (!valid_policy(policy))
ca94c442
LP
6119 return -EINVAL;
6120 }
6121
794a56eb 6122 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7479f3c9
PZ
6123 return -EINVAL;
6124
1da177e4
LT
6125 /*
6126 * Valid priorities for SCHED_FIFO and SCHED_RR are
ae18ad28 6127 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
dd41f596 6128 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 6129 */
ae18ad28 6130 if (attr->sched_priority > MAX_RT_PRIO-1)
1da177e4 6131 return -EINVAL;
aab03e05
DF
6132 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
6133 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
6134 return -EINVAL;
6135
37e4ab3f
OC
6136 /*
6137 * Allow unprivileged RT tasks to decrease priority:
6138 */
961ccddd 6139 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 6140 if (fair_policy(policy)) {
d0ea0268 6141 if (attr->sched_nice < task_nice(p) &&
eaad4513 6142 !can_nice(p, attr->sched_nice))
d50dde5a
DF
6143 return -EPERM;
6144 }
6145
e05606d3 6146 if (rt_policy(policy)) {
a44702e8
ON
6147 unsigned long rlim_rtprio =
6148 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909 6149
d1ccc66d 6150 /* Can't set/change the rt policy: */
8dc3e909
ON
6151 if (policy != p->policy && !rlim_rtprio)
6152 return -EPERM;
6153
d1ccc66d 6154 /* Can't increase priority: */
d50dde5a
DF
6155 if (attr->sched_priority > p->rt_priority &&
6156 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
6157 return -EPERM;
6158 }
c02aa73b 6159
d44753b8
JL
6160 /*
6161 * Can't set/change SCHED_DEADLINE policy at all for now
6162 * (safest behavior); in the future we would like to allow
6163 * unprivileged DL tasks to increase their relative deadline
6164 * or reduce their runtime (both ways reducing utilization)
6165 */
6166 if (dl_policy(policy))
6167 return -EPERM;
6168
dd41f596 6169 /*
c02aa73b
DH
6170 * Treat SCHED_IDLE as nice 20. Only allow a switch to
6171 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 6172 */
1da1843f 6173 if (task_has_idle_policy(p) && !idle_policy(policy)) {
d0ea0268 6174 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
6175 return -EPERM;
6176 }
5fe1d75f 6177
d1ccc66d 6178 /* Can't change other user's priorities: */
c69e8d9c 6179 if (!check_same_owner(p))
37e4ab3f 6180 return -EPERM;
ca94c442 6181
d1ccc66d 6182 /* Normal users shall not reset the sched_reset_on_fork flag: */
ca94c442
LP
6183 if (p->sched_reset_on_fork && !reset_on_fork)
6184 return -EPERM;
37e4ab3f 6185 }
1da177e4 6186
725aad24 6187 if (user) {
794a56eb
JL
6188 if (attr->sched_flags & SCHED_FLAG_SUGOV)
6189 return -EINVAL;
6190
b0ae1981 6191 retval = security_task_setscheduler(p);
725aad24
JF
6192 if (retval)
6193 return retval;
6194 }
6195
a509a7cd
PB
6196 /* Update task specific "requested" clamps */
6197 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
6198 retval = uclamp_validate(p, attr);
6199 if (retval)
6200 return retval;
6201 }
6202
710da3c8
JL
6203 if (pi)
6204 cpuset_read_lock();
6205
b29739f9 6206 /*
d1ccc66d 6207 * Make sure no PI-waiters arrive (or leave) while we are
b29739f9 6208 * changing the priority of the task:
0122ec5b 6209 *
25985edc 6210 * To be able to change p->policy safely, the appropriate
1da177e4
LT
6211 * runqueue lock must be held.
6212 */
eb580751 6213 rq = task_rq_lock(p, &rf);
80f5c1b8 6214 update_rq_clock(rq);
dc61b1d6 6215
34f971f6 6216 /*
d1ccc66d 6217 * Changing the policy of the stop threads its a very bad idea:
34f971f6
PZ
6218 */
6219 if (p == rq->stop) {
4b211f2b
MP
6220 retval = -EINVAL;
6221 goto unlock;
34f971f6
PZ
6222 }
6223
a51e9198 6224 /*
d6b1e911
TG
6225 * If not changing anything there's no need to proceed further,
6226 * but store a possible modification of reset_on_fork.
a51e9198 6227 */
d50dde5a 6228 if (unlikely(policy == p->policy)) {
d0ea0268 6229 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
6230 goto change;
6231 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
6232 goto change;
75381608 6233 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 6234 goto change;
a509a7cd
PB
6235 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
6236 goto change;
d50dde5a 6237
d6b1e911 6238 p->sched_reset_on_fork = reset_on_fork;
4b211f2b
MP
6239 retval = 0;
6240 goto unlock;
a51e9198 6241 }
d50dde5a 6242change:
a51e9198 6243
dc61b1d6 6244 if (user) {
332ac17e 6245#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
6246 /*
6247 * Do not allow realtime tasks into groups that have no runtime
6248 * assigned.
6249 */
6250 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
6251 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
6252 !task_group_is_autogroup(task_group(p))) {
4b211f2b
MP
6253 retval = -EPERM;
6254 goto unlock;
dc61b1d6 6255 }
dc61b1d6 6256#endif
332ac17e 6257#ifdef CONFIG_SMP
794a56eb
JL
6258 if (dl_bandwidth_enabled() && dl_policy(policy) &&
6259 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
332ac17e 6260 cpumask_t *span = rq->rd->span;
332ac17e
DF
6261
6262 /*
6263 * Don't allow tasks with an affinity mask smaller than
6264 * the entire root_domain to become SCHED_DEADLINE. We
6265 * will also fail if there's no bandwidth available.
6266 */
3bd37062 6267 if (!cpumask_subset(span, p->cpus_ptr) ||
e4099a5e 6268 rq->rd->dl_bw.bw == 0) {
4b211f2b
MP
6269 retval = -EPERM;
6270 goto unlock;
332ac17e
DF
6271 }
6272 }
6273#endif
6274 }
dc61b1d6 6275
d1ccc66d 6276 /* Re-check policy now with rq lock held: */
1da177e4
LT
6277 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6278 policy = oldpolicy = -1;
eb580751 6279 task_rq_unlock(rq, p, &rf);
710da3c8
JL
6280 if (pi)
6281 cpuset_read_unlock();
1da177e4
LT
6282 goto recheck;
6283 }
332ac17e
DF
6284
6285 /*
6286 * If setscheduling to SCHED_DEADLINE (or changing the parameters
6287 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
6288 * is available.
6289 */
06a76fe0 6290 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
4b211f2b
MP
6291 retval = -EBUSY;
6292 goto unlock;
332ac17e
DF
6293 }
6294
c365c292
TG
6295 p->sched_reset_on_fork = reset_on_fork;
6296 oldprio = p->prio;
6297
dbc7f069
PZ
6298 if (pi) {
6299 /*
6300 * Take priority boosted tasks into account. If the new
6301 * effective priority is unchanged, we just store the new
6302 * normal parameters and do not touch the scheduler class and
6303 * the runqueue. This will be done when the task deboost
6304 * itself.
6305 */
acd58620 6306 new_effective_prio = rt_effective_prio(p, newprio);
ff77e468
PZ
6307 if (new_effective_prio == oldprio)
6308 queue_flags &= ~DEQUEUE_MOVE;
c365c292
TG
6309 }
6310
da0c1e65 6311 queued = task_on_rq_queued(p);
051a1d1a 6312 running = task_current(rq, p);
da0c1e65 6313 if (queued)
ff77e468 6314 dequeue_task(rq, p, queue_flags);
0e1f3483 6315 if (running)
f3cd1c4e 6316 put_prev_task(rq, p);
f6b53205 6317
83ab0aa0 6318 prev_class = p->sched_class;
a509a7cd 6319
dbc7f069 6320 __setscheduler(rq, p, attr, pi);
a509a7cd 6321 __setscheduler_uclamp(p, attr);
f6b53205 6322
da0c1e65 6323 if (queued) {
81a44c54
TG
6324 /*
6325 * We enqueue to tail when the priority of a task is
6326 * increased (user space view).
6327 */
ff77e468
PZ
6328 if (oldprio < p->prio)
6329 queue_flags |= ENQUEUE_HEAD;
1de64443 6330
ff77e468 6331 enqueue_task(rq, p, queue_flags);
81a44c54 6332 }
a399d233 6333 if (running)
03b7fad1 6334 set_next_task(rq, p);
cb469845 6335
da7a735e 6336 check_class_changed(rq, p, prev_class, oldprio);
d1ccc66d
IM
6337
6338 /* Avoid rq from going away on us: */
6339 preempt_disable();
565790d2 6340 head = splice_balance_callbacks(rq);
eb580751 6341 task_rq_unlock(rq, p, &rf);
b29739f9 6342
710da3c8
JL
6343 if (pi) {
6344 cpuset_read_unlock();
dbc7f069 6345 rt_mutex_adjust_pi(p);
710da3c8 6346 }
95e02ca9 6347
d1ccc66d 6348 /* Run balance callbacks after we've adjusted the PI chain: */
565790d2 6349 balance_callbacks(rq, head);
4c9a4bc8 6350 preempt_enable();
95e02ca9 6351
1da177e4 6352 return 0;
4b211f2b
MP
6353
6354unlock:
6355 task_rq_unlock(rq, p, &rf);
710da3c8
JL
6356 if (pi)
6357 cpuset_read_unlock();
4b211f2b 6358 return retval;
1da177e4 6359}
961ccddd 6360
7479f3c9
PZ
6361static int _sched_setscheduler(struct task_struct *p, int policy,
6362 const struct sched_param *param, bool check)
6363{
6364 struct sched_attr attr = {
6365 .sched_policy = policy,
6366 .sched_priority = param->sched_priority,
6367 .sched_nice = PRIO_TO_NICE(p->static_prio),
6368 };
6369
c13db6b1
SR
6370 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
6371 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
6372 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
6373 policy &= ~SCHED_RESET_ON_FORK;
6374 attr.sched_policy = policy;
6375 }
6376
dbc7f069 6377 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 6378}
961ccddd
RR
6379/**
6380 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
6381 * @p: the task in question.
6382 * @policy: new policy.
6383 * @param: structure containing the new RT priority.
6384 *
7318d4cc
PZ
6385 * Use sched_set_fifo(), read its comment.
6386 *
e69f6186
YB
6387 * Return: 0 on success. An error code otherwise.
6388 *
961ccddd
RR
6389 * NOTE that the task may be already dead.
6390 */
6391int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 6392 const struct sched_param *param)
961ccddd 6393{
7479f3c9 6394 return _sched_setscheduler(p, policy, param, true);
961ccddd 6395}
1da177e4 6396
d50dde5a
DF
6397int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
6398{
dbc7f069 6399 return __sched_setscheduler(p, attr, true, true);
d50dde5a 6400}
d50dde5a 6401
794a56eb
JL
6402int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
6403{
6404 return __sched_setscheduler(p, attr, false, true);
6405}
6406
961ccddd
RR
6407/**
6408 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
6409 * @p: the task in question.
6410 * @policy: new policy.
6411 * @param: structure containing the new RT priority.
6412 *
6413 * Just like sched_setscheduler, only don't bother checking if the
6414 * current context has permission. For example, this is needed in
6415 * stop_machine(): we create temporary high priority worker threads,
6416 * but our caller might not have that capability.
e69f6186
YB
6417 *
6418 * Return: 0 on success. An error code otherwise.
961ccddd
RR
6419 */
6420int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 6421 const struct sched_param *param)
961ccddd 6422{
7479f3c9 6423 return _sched_setscheduler(p, policy, param, false);
961ccddd
RR
6424}
6425
7318d4cc
PZ
6426/*
6427 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
6428 * incapable of resource management, which is the one thing an OS really should
6429 * be doing.
6430 *
6431 * This is of course the reason it is limited to privileged users only.
6432 *
6433 * Worse still; it is fundamentally impossible to compose static priority
6434 * workloads. You cannot take two correctly working static prio workloads
6435 * and smash them together and still expect them to work.
6436 *
6437 * For this reason 'all' FIFO tasks the kernel creates are basically at:
6438 *
6439 * MAX_RT_PRIO / 2
6440 *
6441 * The administrator _MUST_ configure the system, the kernel simply doesn't
6442 * know enough information to make a sensible choice.
6443 */
8b700983 6444void sched_set_fifo(struct task_struct *p)
7318d4cc
PZ
6445{
6446 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
8b700983 6447 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7318d4cc
PZ
6448}
6449EXPORT_SYMBOL_GPL(sched_set_fifo);
6450
6451/*
6452 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
6453 */
8b700983 6454void sched_set_fifo_low(struct task_struct *p)
7318d4cc
PZ
6455{
6456 struct sched_param sp = { .sched_priority = 1 };
8b700983 6457 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7318d4cc
PZ
6458}
6459EXPORT_SYMBOL_GPL(sched_set_fifo_low);
6460
8b700983 6461void sched_set_normal(struct task_struct *p, int nice)
7318d4cc
PZ
6462{
6463 struct sched_attr attr = {
6464 .sched_policy = SCHED_NORMAL,
6465 .sched_nice = nice,
6466 };
8b700983 6467 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7318d4cc
PZ
6468}
6469EXPORT_SYMBOL_GPL(sched_set_normal);
961ccddd 6470
95cdf3b7
IM
6471static int
6472do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 6473{
1da177e4
LT
6474 struct sched_param lparam;
6475 struct task_struct *p;
36c8b586 6476 int retval;
1da177e4
LT
6477
6478 if (!param || pid < 0)
6479 return -EINVAL;
6480 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
6481 return -EFAULT;
5fe1d75f
ON
6482
6483 rcu_read_lock();
6484 retval = -ESRCH;
1da177e4 6485 p = find_process_by_pid(pid);
710da3c8
JL
6486 if (likely(p))
6487 get_task_struct(p);
5fe1d75f 6488 rcu_read_unlock();
36c8b586 6489
710da3c8
JL
6490 if (likely(p)) {
6491 retval = sched_setscheduler(p, policy, &lparam);
6492 put_task_struct(p);
6493 }
6494
1da177e4
LT
6495 return retval;
6496}
6497
d50dde5a
DF
6498/*
6499 * Mimics kernel/events/core.c perf_copy_attr().
6500 */
d1ccc66d 6501static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
d50dde5a
DF
6502{
6503 u32 size;
6504 int ret;
6505
d1ccc66d 6506 /* Zero the full structure, so that a short copy will be nice: */
d50dde5a
DF
6507 memset(attr, 0, sizeof(*attr));
6508
6509 ret = get_user(size, &uattr->size);
6510 if (ret)
6511 return ret;
6512
d1ccc66d
IM
6513 /* ABI compatibility quirk: */
6514 if (!size)
d50dde5a 6515 size = SCHED_ATTR_SIZE_VER0;
dff3a85f 6516 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
d50dde5a
DF
6517 goto err_size;
6518
dff3a85f
AS
6519 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
6520 if (ret) {
6521 if (ret == -E2BIG)
6522 goto err_size;
6523 return ret;
d50dde5a
DF
6524 }
6525
a509a7cd
PB
6526 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
6527 size < SCHED_ATTR_SIZE_VER1)
6528 return -EINVAL;
6529
d50dde5a 6530 /*
d1ccc66d 6531 * XXX: Do we want to be lenient like existing syscalls; or do we want
d50dde5a
DF
6532 * to be strict and return an error on out-of-bounds values?
6533 */
75e45d51 6534 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 6535
e78c7bca 6536 return 0;
d50dde5a
DF
6537
6538err_size:
6539 put_user(sizeof(*attr), &uattr->size);
e78c7bca 6540 return -E2BIG;
d50dde5a
DF
6541}
6542
1da177e4
LT
6543/**
6544 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
6545 * @pid: the pid in question.
6546 * @policy: new policy.
6547 * @param: structure containing the new RT priority.
e69f6186
YB
6548 *
6549 * Return: 0 on success. An error code otherwise.
1da177e4 6550 */
d1ccc66d 6551SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
1da177e4 6552{
c21761f1
JB
6553 if (policy < 0)
6554 return -EINVAL;
6555
1da177e4
LT
6556 return do_sched_setscheduler(pid, policy, param);
6557}
6558
6559/**
6560 * sys_sched_setparam - set/change the RT priority of a thread
6561 * @pid: the pid in question.
6562 * @param: structure containing the new RT priority.
e69f6186
YB
6563 *
6564 * Return: 0 on success. An error code otherwise.
1da177e4 6565 */
5add95d4 6566SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 6567{
c13db6b1 6568 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
6569}
6570
d50dde5a
DF
6571/**
6572 * sys_sched_setattr - same as above, but with extended sched_attr
6573 * @pid: the pid in question.
5778fccf 6574 * @uattr: structure containing the extended parameters.
db66d756 6575 * @flags: for future extension.
d50dde5a 6576 */
6d35ab48
PZ
6577SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
6578 unsigned int, flags)
d50dde5a
DF
6579{
6580 struct sched_attr attr;
6581 struct task_struct *p;
6582 int retval;
6583
6d35ab48 6584 if (!uattr || pid < 0 || flags)
d50dde5a
DF
6585 return -EINVAL;
6586
143cf23d
MK
6587 retval = sched_copy_attr(uattr, &attr);
6588 if (retval)
6589 return retval;
d50dde5a 6590
b14ed2c2 6591 if ((int)attr.sched_policy < 0)
dbdb2275 6592 return -EINVAL;
1d6362fa
PB
6593 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
6594 attr.sched_policy = SETPARAM_POLICY;
d50dde5a
DF
6595
6596 rcu_read_lock();
6597 retval = -ESRCH;
6598 p = find_process_by_pid(pid);
a509a7cd
PB
6599 if (likely(p))
6600 get_task_struct(p);
d50dde5a
DF
6601 rcu_read_unlock();
6602
a509a7cd
PB
6603 if (likely(p)) {
6604 retval = sched_setattr(p, &attr);
6605 put_task_struct(p);
6606 }
6607
d50dde5a
DF
6608 return retval;
6609}
6610
1da177e4
LT
6611/**
6612 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
6613 * @pid: the pid in question.
e69f6186
YB
6614 *
6615 * Return: On success, the policy of the thread. Otherwise, a negative error
6616 * code.
1da177e4 6617 */
5add95d4 6618SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 6619{
36c8b586 6620 struct task_struct *p;
3a5c359a 6621 int retval;
1da177e4
LT
6622
6623 if (pid < 0)
3a5c359a 6624 return -EINVAL;
1da177e4
LT
6625
6626 retval = -ESRCH;
5fe85be0 6627 rcu_read_lock();
1da177e4
LT
6628 p = find_process_by_pid(pid);
6629 if (p) {
6630 retval = security_task_getscheduler(p);
6631 if (!retval)
ca94c442
LP
6632 retval = p->policy
6633 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 6634 }
5fe85be0 6635 rcu_read_unlock();
1da177e4
LT
6636 return retval;
6637}
6638
6639/**
ca94c442 6640 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
6641 * @pid: the pid in question.
6642 * @param: structure containing the RT priority.
e69f6186
YB
6643 *
6644 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
6645 * code.
1da177e4 6646 */
5add95d4 6647SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 6648{
ce5f7f82 6649 struct sched_param lp = { .sched_priority = 0 };
36c8b586 6650 struct task_struct *p;
3a5c359a 6651 int retval;
1da177e4
LT
6652
6653 if (!param || pid < 0)
3a5c359a 6654 return -EINVAL;
1da177e4 6655
5fe85be0 6656 rcu_read_lock();
1da177e4
LT
6657 p = find_process_by_pid(pid);
6658 retval = -ESRCH;
6659 if (!p)
6660 goto out_unlock;
6661
6662 retval = security_task_getscheduler(p);
6663 if (retval)
6664 goto out_unlock;
6665
ce5f7f82
PZ
6666 if (task_has_rt_policy(p))
6667 lp.sched_priority = p->rt_priority;
5fe85be0 6668 rcu_read_unlock();
1da177e4
LT
6669
6670 /*
6671 * This one might sleep, we cannot do it with a spinlock held ...
6672 */
6673 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
6674
1da177e4
LT
6675 return retval;
6676
6677out_unlock:
5fe85be0 6678 rcu_read_unlock();
1da177e4
LT
6679 return retval;
6680}
6681
1251201c
IM
6682/*
6683 * Copy the kernel size attribute structure (which might be larger
6684 * than what user-space knows about) to user-space.
6685 *
6686 * Note that all cases are valid: user-space buffer can be larger or
6687 * smaller than the kernel-space buffer. The usual case is that both
6688 * have the same size.
6689 */
6690static int
6691sched_attr_copy_to_user(struct sched_attr __user *uattr,
6692 struct sched_attr *kattr,
6693 unsigned int usize)
d50dde5a 6694{
1251201c 6695 unsigned int ksize = sizeof(*kattr);
d50dde5a 6696
96d4f267 6697 if (!access_ok(uattr, usize))
d50dde5a
DF
6698 return -EFAULT;
6699
6700 /*
1251201c
IM
6701 * sched_getattr() ABI forwards and backwards compatibility:
6702 *
6703 * If usize == ksize then we just copy everything to user-space and all is good.
6704 *
6705 * If usize < ksize then we only copy as much as user-space has space for,
6706 * this keeps ABI compatibility as well. We skip the rest.
6707 *
6708 * If usize > ksize then user-space is using a newer version of the ABI,
6709 * which part the kernel doesn't know about. Just ignore it - tooling can
6710 * detect the kernel's knowledge of attributes from the attr->size value
6711 * which is set to ksize in this case.
d50dde5a 6712 */
1251201c 6713 kattr->size = min(usize, ksize);
d50dde5a 6714
1251201c 6715 if (copy_to_user(uattr, kattr, kattr->size))
d50dde5a
DF
6716 return -EFAULT;
6717
22400674 6718 return 0;
d50dde5a
DF
6719}
6720
6721/**
aab03e05 6722 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 6723 * @pid: the pid in question.
5778fccf 6724 * @uattr: structure containing the extended parameters.
dff3a85f 6725 * @usize: sizeof(attr) for fwd/bwd comp.
db66d756 6726 * @flags: for future extension.
d50dde5a 6727 */
6d35ab48 6728SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1251201c 6729 unsigned int, usize, unsigned int, flags)
d50dde5a 6730{
1251201c 6731 struct sched_attr kattr = { };
d50dde5a
DF
6732 struct task_struct *p;
6733 int retval;
6734
1251201c
IM
6735 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
6736 usize < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
6737 return -EINVAL;
6738
6739 rcu_read_lock();
6740 p = find_process_by_pid(pid);
6741 retval = -ESRCH;
6742 if (!p)
6743 goto out_unlock;
6744
6745 retval = security_task_getscheduler(p);
6746 if (retval)
6747 goto out_unlock;
6748
1251201c 6749 kattr.sched_policy = p->policy;
7479f3c9 6750 if (p->sched_reset_on_fork)
1251201c 6751 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05 6752 if (task_has_dl_policy(p))
1251201c 6753 __getparam_dl(p, &kattr);
aab03e05 6754 else if (task_has_rt_policy(p))
1251201c 6755 kattr.sched_priority = p->rt_priority;
d50dde5a 6756 else
1251201c 6757 kattr.sched_nice = task_nice(p);
d50dde5a 6758
a509a7cd 6759#ifdef CONFIG_UCLAMP_TASK
13685c4a
QY
6760 /*
6761 * This could race with another potential updater, but this is fine
6762 * because it'll correctly read the old or the new value. We don't need
6763 * to guarantee who wins the race as long as it doesn't return garbage.
6764 */
1251201c
IM
6765 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
6766 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
a509a7cd
PB
6767#endif
6768
d50dde5a
DF
6769 rcu_read_unlock();
6770
1251201c 6771 return sched_attr_copy_to_user(uattr, &kattr, usize);
d50dde5a
DF
6772
6773out_unlock:
6774 rcu_read_unlock();
6775 return retval;
6776}
6777
96f874e2 6778long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 6779{
5a16f3d3 6780 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
6781 struct task_struct *p;
6782 int retval;
1da177e4 6783
23f5d142 6784 rcu_read_lock();
1da177e4
LT
6785
6786 p = find_process_by_pid(pid);
6787 if (!p) {
23f5d142 6788 rcu_read_unlock();
1da177e4
LT
6789 return -ESRCH;
6790 }
6791
23f5d142 6792 /* Prevent p going away */
1da177e4 6793 get_task_struct(p);
23f5d142 6794 rcu_read_unlock();
1da177e4 6795
14a40ffc
TH
6796 if (p->flags & PF_NO_SETAFFINITY) {
6797 retval = -EINVAL;
6798 goto out_put_task;
6799 }
5a16f3d3
RR
6800 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6801 retval = -ENOMEM;
6802 goto out_put_task;
6803 }
6804 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
6805 retval = -ENOMEM;
6806 goto out_free_cpus_allowed;
6807 }
1da177e4 6808 retval = -EPERM;
4c44aaaf
EB
6809 if (!check_same_owner(p)) {
6810 rcu_read_lock();
6811 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
6812 rcu_read_unlock();
16303ab2 6813 goto out_free_new_mask;
4c44aaaf
EB
6814 }
6815 rcu_read_unlock();
6816 }
1da177e4 6817
b0ae1981 6818 retval = security_task_setscheduler(p);
e7834f8f 6819 if (retval)
16303ab2 6820 goto out_free_new_mask;
e7834f8f 6821
e4099a5e
PZ
6822
6823 cpuset_cpus_allowed(p, cpus_allowed);
6824 cpumask_and(new_mask, in_mask, cpus_allowed);
6825
332ac17e
DF
6826 /*
6827 * Since bandwidth control happens on root_domain basis,
6828 * if admission test is enabled, we only admit -deadline
6829 * tasks allowed to run on all the CPUs in the task's
6830 * root_domain.
6831 */
6832#ifdef CONFIG_SMP
f1e3a093
KT
6833 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
6834 rcu_read_lock();
6835 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 6836 retval = -EBUSY;
f1e3a093 6837 rcu_read_unlock();
16303ab2 6838 goto out_free_new_mask;
332ac17e 6839 }
f1e3a093 6840 rcu_read_unlock();
332ac17e
DF
6841 }
6842#endif
49246274 6843again:
9cfc3e18 6844 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
1da177e4 6845
8707d8b8 6846 if (!retval) {
5a16f3d3
RR
6847 cpuset_cpus_allowed(p, cpus_allowed);
6848 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
6849 /*
6850 * We must have raced with a concurrent cpuset
6851 * update. Just reset the cpus_allowed to the
6852 * cpuset's cpus_allowed
6853 */
5a16f3d3 6854 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
6855 goto again;
6856 }
6857 }
16303ab2 6858out_free_new_mask:
5a16f3d3
RR
6859 free_cpumask_var(new_mask);
6860out_free_cpus_allowed:
6861 free_cpumask_var(cpus_allowed);
6862out_put_task:
1da177e4 6863 put_task_struct(p);
1da177e4
LT
6864 return retval;
6865}
6866
6867static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 6868 struct cpumask *new_mask)
1da177e4 6869{
96f874e2
RR
6870 if (len < cpumask_size())
6871 cpumask_clear(new_mask);
6872 else if (len > cpumask_size())
6873 len = cpumask_size();
6874
1da177e4
LT
6875 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6876}
6877
6878/**
d1ccc66d 6879 * sys_sched_setaffinity - set the CPU affinity of a process
1da177e4
LT
6880 * @pid: pid of the process
6881 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 6882 * @user_mask_ptr: user-space pointer to the new CPU mask
e69f6186
YB
6883 *
6884 * Return: 0 on success. An error code otherwise.
1da177e4 6885 */
5add95d4
HC
6886SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6887 unsigned long __user *, user_mask_ptr)
1da177e4 6888{
5a16f3d3 6889 cpumask_var_t new_mask;
1da177e4
LT
6890 int retval;
6891
5a16f3d3
RR
6892 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6893 return -ENOMEM;
1da177e4 6894
5a16f3d3
RR
6895 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6896 if (retval == 0)
6897 retval = sched_setaffinity(pid, new_mask);
6898 free_cpumask_var(new_mask);
6899 return retval;
1da177e4
LT
6900}
6901
96f874e2 6902long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 6903{
36c8b586 6904 struct task_struct *p;
31605683 6905 unsigned long flags;
1da177e4 6906 int retval;
1da177e4 6907
23f5d142 6908 rcu_read_lock();
1da177e4
LT
6909
6910 retval = -ESRCH;
6911 p = find_process_by_pid(pid);
6912 if (!p)
6913 goto out_unlock;
6914
e7834f8f
DQ
6915 retval = security_task_getscheduler(p);
6916 if (retval)
6917 goto out_unlock;
6918
013fdb80 6919 raw_spin_lock_irqsave(&p->pi_lock, flags);
3bd37062 6920 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
013fdb80 6921 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
6922
6923out_unlock:
23f5d142 6924 rcu_read_unlock();
1da177e4 6925
9531b62f 6926 return retval;
1da177e4
LT
6927}
6928
6929/**
d1ccc66d 6930 * sys_sched_getaffinity - get the CPU affinity of a process
1da177e4
LT
6931 * @pid: pid of the process
6932 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 6933 * @user_mask_ptr: user-space pointer to hold the current CPU mask
e69f6186 6934 *
599b4840
ZW
6935 * Return: size of CPU mask copied to user_mask_ptr on success. An
6936 * error code otherwise.
1da177e4 6937 */
5add95d4
HC
6938SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6939 unsigned long __user *, user_mask_ptr)
1da177e4
LT
6940{
6941 int ret;
f17c8607 6942 cpumask_var_t mask;
1da177e4 6943
84fba5ec 6944 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
6945 return -EINVAL;
6946 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
6947 return -EINVAL;
6948
f17c8607
RR
6949 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6950 return -ENOMEM;
1da177e4 6951
f17c8607
RR
6952 ret = sched_getaffinity(pid, mask);
6953 if (ret == 0) {
4de373a1 6954 unsigned int retlen = min(len, cpumask_size());
cd3d8031
KM
6955
6956 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
6957 ret = -EFAULT;
6958 else
cd3d8031 6959 ret = retlen;
f17c8607
RR
6960 }
6961 free_cpumask_var(mask);
1da177e4 6962
f17c8607 6963 return ret;
1da177e4
LT
6964}
6965
7d4dd4f1 6966static void do_sched_yield(void)
1da177e4 6967{
8a8c69c3
PZ
6968 struct rq_flags rf;
6969 struct rq *rq;
6970
246b3b33 6971 rq = this_rq_lock_irq(&rf);
1da177e4 6972
ae92882e 6973 schedstat_inc(rq->yld_count);
4530d7ab 6974 current->sched_class->yield_task(rq);
1da177e4 6975
8a8c69c3 6976 preempt_disable();
345a957f 6977 rq_unlock_irq(rq, &rf);
ba74c144 6978 sched_preempt_enable_no_resched();
1da177e4
LT
6979
6980 schedule();
7d4dd4f1 6981}
1da177e4 6982
59a74b15
MCC
6983/**
6984 * sys_sched_yield - yield the current processor to other threads.
6985 *
6986 * This function yields the current CPU to other tasks. If there are no
6987 * other threads running on this CPU then this function will return.
6988 *
6989 * Return: 0.
6990 */
7d4dd4f1
DB
6991SYSCALL_DEFINE0(sched_yield)
6992{
6993 do_sched_yield();
1da177e4
LT
6994 return 0;
6995}
6996
b965f1dd
PZI
6997#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
6998int __sched __cond_resched(void)
1da177e4 6999{
fe32d3cd 7000 if (should_resched(0)) {
a18b5d01 7001 preempt_schedule_common();
1da177e4
LT
7002 return 1;
7003 }
b965f1dd 7004#ifndef CONFIG_PREEMPT_RCU
f79c3ad6 7005 rcu_all_qs();
b965f1dd 7006#endif
1da177e4
LT
7007 return 0;
7008}
b965f1dd
PZI
7009EXPORT_SYMBOL(__cond_resched);
7010#endif
7011
7012#ifdef CONFIG_PREEMPT_DYNAMIC
7013DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
ef72661e 7014EXPORT_STATIC_CALL_TRAMP(cond_resched);
b965f1dd
PZI
7015
7016DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
ef72661e 7017EXPORT_STATIC_CALL_TRAMP(might_resched);
35a773a0 7018#endif
1da177e4
LT
7019
7020/*
613afbf8 7021 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
7022 * call schedule, and on return reacquire the lock.
7023 *
c1a280b6 7024 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
1da177e4
LT
7025 * operations here to prevent schedule() from being called twice (once via
7026 * spin_unlock(), once by hand).
7027 */
613afbf8 7028int __cond_resched_lock(spinlock_t *lock)
1da177e4 7029{
fe32d3cd 7030 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
7031 int ret = 0;
7032
f607c668
PZ
7033 lockdep_assert_held(lock);
7034
4a81e832 7035 if (spin_needbreak(lock) || resched) {
1da177e4 7036 spin_unlock(lock);
d86ee480 7037 if (resched)
a18b5d01 7038 preempt_schedule_common();
95c354fe
NP
7039 else
7040 cpu_relax();
6df3cecb 7041 ret = 1;
1da177e4 7042 spin_lock(lock);
1da177e4 7043 }
6df3cecb 7044 return ret;
1da177e4 7045}
613afbf8 7046EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 7047
f3d4b4b1
BG
7048int __cond_resched_rwlock_read(rwlock_t *lock)
7049{
7050 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7051 int ret = 0;
7052
7053 lockdep_assert_held_read(lock);
7054
7055 if (rwlock_needbreak(lock) || resched) {
7056 read_unlock(lock);
7057 if (resched)
7058 preempt_schedule_common();
7059 else
7060 cpu_relax();
7061 ret = 1;
7062 read_lock(lock);
7063 }
7064 return ret;
7065}
7066EXPORT_SYMBOL(__cond_resched_rwlock_read);
7067
7068int __cond_resched_rwlock_write(rwlock_t *lock)
7069{
7070 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7071 int ret = 0;
7072
7073 lockdep_assert_held_write(lock);
7074
7075 if (rwlock_needbreak(lock) || resched) {
7076 write_unlock(lock);
7077 if (resched)
7078 preempt_schedule_common();
7079 else
7080 cpu_relax();
7081 ret = 1;
7082 write_lock(lock);
7083 }
7084 return ret;
7085}
7086EXPORT_SYMBOL(__cond_resched_rwlock_write);
7087
1da177e4
LT
7088/**
7089 * yield - yield the current processor to other threads.
7090 *
8e3fabfd
PZ
7091 * Do not ever use this function, there's a 99% chance you're doing it wrong.
7092 *
7093 * The scheduler is at all times free to pick the calling task as the most
7094 * eligible task to run, if removing the yield() call from your code breaks
b19a888c 7095 * it, it's already broken.
8e3fabfd
PZ
7096 *
7097 * Typical broken usage is:
7098 *
7099 * while (!event)
d1ccc66d 7100 * yield();
8e3fabfd
PZ
7101 *
7102 * where one assumes that yield() will let 'the other' process run that will
7103 * make event true. If the current task is a SCHED_FIFO task that will never
7104 * happen. Never use yield() as a progress guarantee!!
7105 *
7106 * If you want to use yield() to wait for something, use wait_event().
7107 * If you want to use yield() to be 'nice' for others, use cond_resched().
7108 * If you still want to use yield(), do not!
1da177e4
LT
7109 */
7110void __sched yield(void)
7111{
7112 set_current_state(TASK_RUNNING);
7d4dd4f1 7113 do_sched_yield();
1da177e4 7114}
1da177e4
LT
7115EXPORT_SYMBOL(yield);
7116
d95f4122
MG
7117/**
7118 * yield_to - yield the current processor to another thread in
7119 * your thread group, or accelerate that thread toward the
7120 * processor it's on.
16addf95
RD
7121 * @p: target task
7122 * @preempt: whether task preemption is allowed or not
d95f4122
MG
7123 *
7124 * It's the caller's job to ensure that the target task struct
7125 * can't go away on us before we can do any checks.
7126 *
e69f6186 7127 * Return:
7b270f60
PZ
7128 * true (>0) if we indeed boosted the target task.
7129 * false (0) if we failed to boost the target.
7130 * -ESRCH if there's no task to yield to.
d95f4122 7131 */
fa93384f 7132int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
7133{
7134 struct task_struct *curr = current;
7135 struct rq *rq, *p_rq;
7136 unsigned long flags;
c3c18640 7137 int yielded = 0;
d95f4122
MG
7138
7139 local_irq_save(flags);
7140 rq = this_rq();
7141
7142again:
7143 p_rq = task_rq(p);
7b270f60
PZ
7144 /*
7145 * If we're the only runnable task on the rq and target rq also
7146 * has only one task, there's absolutely no point in yielding.
7147 */
7148 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
7149 yielded = -ESRCH;
7150 goto out_irq;
7151 }
7152
d95f4122 7153 double_rq_lock(rq, p_rq);
39e24d8f 7154 if (task_rq(p) != p_rq) {
d95f4122
MG
7155 double_rq_unlock(rq, p_rq);
7156 goto again;
7157 }
7158
7159 if (!curr->sched_class->yield_to_task)
7b270f60 7160 goto out_unlock;
d95f4122
MG
7161
7162 if (curr->sched_class != p->sched_class)
7b270f60 7163 goto out_unlock;
d95f4122
MG
7164
7165 if (task_running(p_rq, p) || p->state)
7b270f60 7166 goto out_unlock;
d95f4122 7167
0900acf2 7168 yielded = curr->sched_class->yield_to_task(rq, p);
6d1cafd8 7169 if (yielded) {
ae92882e 7170 schedstat_inc(rq->yld_count);
6d1cafd8
VP
7171 /*
7172 * Make p's CPU reschedule; pick_next_entity takes care of
7173 * fairness.
7174 */
7175 if (preempt && rq != p_rq)
8875125e 7176 resched_curr(p_rq);
6d1cafd8 7177 }
d95f4122 7178
7b270f60 7179out_unlock:
d95f4122 7180 double_rq_unlock(rq, p_rq);
7b270f60 7181out_irq:
d95f4122
MG
7182 local_irq_restore(flags);
7183
7b270f60 7184 if (yielded > 0)
d95f4122
MG
7185 schedule();
7186
7187 return yielded;
7188}
7189EXPORT_SYMBOL_GPL(yield_to);
7190
10ab5643
TH
7191int io_schedule_prepare(void)
7192{
7193 int old_iowait = current->in_iowait;
7194
7195 current->in_iowait = 1;
7196 blk_schedule_flush_plug(current);
7197
7198 return old_iowait;
7199}
7200
7201void io_schedule_finish(int token)
7202{
7203 current->in_iowait = token;
7204}
7205
1da177e4 7206/*
41a2d6cf 7207 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 7208 * that process accounting knows that this is a task in IO wait state.
1da177e4 7209 */
1da177e4
LT
7210long __sched io_schedule_timeout(long timeout)
7211{
10ab5643 7212 int token;
1da177e4
LT
7213 long ret;
7214
10ab5643 7215 token = io_schedule_prepare();
1da177e4 7216 ret = schedule_timeout(timeout);
10ab5643 7217 io_schedule_finish(token);
9cff8ade 7218
1da177e4
LT
7219 return ret;
7220}
9cff8ade 7221EXPORT_SYMBOL(io_schedule_timeout);
1da177e4 7222
e3b929b0 7223void __sched io_schedule(void)
10ab5643
TH
7224{
7225 int token;
7226
7227 token = io_schedule_prepare();
7228 schedule();
7229 io_schedule_finish(token);
7230}
7231EXPORT_SYMBOL(io_schedule);
7232
1da177e4
LT
7233/**
7234 * sys_sched_get_priority_max - return maximum RT priority.
7235 * @policy: scheduling class.
7236 *
e69f6186
YB
7237 * Return: On success, this syscall returns the maximum
7238 * rt_priority that can be used by a given scheduling class.
7239 * On failure, a negative error code is returned.
1da177e4 7240 */
5add95d4 7241SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
7242{
7243 int ret = -EINVAL;
7244
7245 switch (policy) {
7246 case SCHED_FIFO:
7247 case SCHED_RR:
ae18ad28 7248 ret = MAX_RT_PRIO-1;
1da177e4 7249 break;
aab03e05 7250 case SCHED_DEADLINE:
1da177e4 7251 case SCHED_NORMAL:
b0a9499c 7252 case SCHED_BATCH:
dd41f596 7253 case SCHED_IDLE:
1da177e4
LT
7254 ret = 0;
7255 break;
7256 }
7257 return ret;
7258}
7259
7260/**
7261 * sys_sched_get_priority_min - return minimum RT priority.
7262 * @policy: scheduling class.
7263 *
e69f6186
YB
7264 * Return: On success, this syscall returns the minimum
7265 * rt_priority that can be used by a given scheduling class.
7266 * On failure, a negative error code is returned.
1da177e4 7267 */
5add95d4 7268SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
7269{
7270 int ret = -EINVAL;
7271
7272 switch (policy) {
7273 case SCHED_FIFO:
7274 case SCHED_RR:
7275 ret = 1;
7276 break;
aab03e05 7277 case SCHED_DEADLINE:
1da177e4 7278 case SCHED_NORMAL:
b0a9499c 7279 case SCHED_BATCH:
dd41f596 7280 case SCHED_IDLE:
1da177e4
LT
7281 ret = 0;
7282 }
7283 return ret;
7284}
7285
abca5fc5 7286static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1da177e4 7287{
36c8b586 7288 struct task_struct *p;
a4ec24b4 7289 unsigned int time_slice;
eb580751 7290 struct rq_flags rf;
dba091b9 7291 struct rq *rq;
3a5c359a 7292 int retval;
1da177e4
LT
7293
7294 if (pid < 0)
3a5c359a 7295 return -EINVAL;
1da177e4
LT
7296
7297 retval = -ESRCH;
1a551ae7 7298 rcu_read_lock();
1da177e4
LT
7299 p = find_process_by_pid(pid);
7300 if (!p)
7301 goto out_unlock;
7302
7303 retval = security_task_getscheduler(p);
7304 if (retval)
7305 goto out_unlock;
7306
eb580751 7307 rq = task_rq_lock(p, &rf);
a57beec5
PZ
7308 time_slice = 0;
7309 if (p->sched_class->get_rr_interval)
7310 time_slice = p->sched_class->get_rr_interval(rq, p);
eb580751 7311 task_rq_unlock(rq, p, &rf);
a4ec24b4 7312
1a551ae7 7313 rcu_read_unlock();
abca5fc5
AV
7314 jiffies_to_timespec64(time_slice, t);
7315 return 0;
3a5c359a 7316
1da177e4 7317out_unlock:
1a551ae7 7318 rcu_read_unlock();
1da177e4
LT
7319 return retval;
7320}
7321
2064a5ab
RD
7322/**
7323 * sys_sched_rr_get_interval - return the default timeslice of a process.
7324 * @pid: pid of the process.
7325 * @interval: userspace pointer to the timeslice value.
7326 *
7327 * this syscall writes the default timeslice value of a given process
7328 * into the user-space timespec buffer. A value of '0' means infinity.
7329 *
7330 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
7331 * an error code.
7332 */
abca5fc5 7333SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
474b9c77 7334 struct __kernel_timespec __user *, interval)
abca5fc5
AV
7335{
7336 struct timespec64 t;
7337 int retval = sched_rr_get_interval(pid, &t);
7338
7339 if (retval == 0)
7340 retval = put_timespec64(&t, interval);
7341
7342 return retval;
7343}
7344
474b9c77 7345#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724
AB
7346SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
7347 struct old_timespec32 __user *, interval)
abca5fc5
AV
7348{
7349 struct timespec64 t;
7350 int retval = sched_rr_get_interval(pid, &t);
7351
7352 if (retval == 0)
9afc5eee 7353 retval = put_old_timespec32(&t, interval);
abca5fc5
AV
7354 return retval;
7355}
7356#endif
7357
82a1fcb9 7358void sched_show_task(struct task_struct *p)
1da177e4 7359{
1da177e4 7360 unsigned long free = 0;
4e79752c 7361 int ppid;
c930b2c0 7362
38200502
TH
7363 if (!try_get_task_stack(p))
7364 return;
20435d84 7365
cc172ff3 7366 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
20435d84
XX
7367
7368 if (p->state == TASK_RUNNING)
cc172ff3 7369 pr_cont(" running task ");
1da177e4 7370#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 7371 free = stack_not_used(p);
1da177e4 7372#endif
a90e984c 7373 ppid = 0;
4e79752c 7374 rcu_read_lock();
a90e984c
ON
7375 if (pid_alive(p))
7376 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 7377 rcu_read_unlock();
cc172ff3
LZ
7378 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
7379 free, task_pid_nr(p), ppid,
aa47b7e0 7380 (unsigned long)task_thread_info(p)->flags);
1da177e4 7381
3d1cb205 7382 print_worker_info(KERN_INFO, p);
a8b62fd0 7383 print_stop_info(KERN_INFO, p);
9cb8f069 7384 show_stack(p, NULL, KERN_INFO);
38200502 7385 put_task_stack(p);
1da177e4 7386}
0032f4e8 7387EXPORT_SYMBOL_GPL(sched_show_task);
1da177e4 7388
5d68cc95
PZ
7389static inline bool
7390state_filter_match(unsigned long state_filter, struct task_struct *p)
7391{
7392 /* no filter, everything matches */
7393 if (!state_filter)
7394 return true;
7395
7396 /* filter, but doesn't match */
7397 if (!(p->state & state_filter))
7398 return false;
7399
7400 /*
7401 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7402 * TASK_KILLABLE).
7403 */
7404 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
7405 return false;
7406
7407 return true;
7408}
7409
7410
e59e2ae2 7411void show_state_filter(unsigned long state_filter)
1da177e4 7412{
36c8b586 7413 struct task_struct *g, *p;
1da177e4 7414
510f5acc 7415 rcu_read_lock();
5d07f420 7416 for_each_process_thread(g, p) {
1da177e4
LT
7417 /*
7418 * reset the NMI-timeout, listing all files on a slow
25985edc 7419 * console might take a lot of time:
57675cb9
AR
7420 * Also, reset softlockup watchdogs on all CPUs, because
7421 * another CPU might be blocked waiting for us to process
7422 * an IPI.
1da177e4
LT
7423 */
7424 touch_nmi_watchdog();
57675cb9 7425 touch_all_softlockup_watchdogs();
5d68cc95 7426 if (state_filter_match(state_filter, p))
82a1fcb9 7427 sched_show_task(p);
5d07f420 7428 }
1da177e4 7429
dd41f596 7430#ifdef CONFIG_SCHED_DEBUG
fb90a6e9
RV
7431 if (!state_filter)
7432 sysrq_sched_debug_show();
dd41f596 7433#endif
510f5acc 7434 rcu_read_unlock();
e59e2ae2
IM
7435 /*
7436 * Only show locks if all tasks are dumped:
7437 */
93335a21 7438 if (!state_filter)
e59e2ae2 7439 debug_show_all_locks();
1da177e4
LT
7440}
7441
f340c0d1
IM
7442/**
7443 * init_idle - set up an idle thread for a given CPU
7444 * @idle: task in question
d1ccc66d 7445 * @cpu: CPU the idle task belongs to
f340c0d1
IM
7446 *
7447 * NOTE: this function does not set the idle thread's NEED_RESCHED
7448 * flag, to make booting more robust.
7449 */
0db0628d 7450void init_idle(struct task_struct *idle, int cpu)
1da177e4 7451{
70b97a7f 7452 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
7453 unsigned long flags;
7454
ff51ff84
PZ
7455 __sched_fork(0, idle);
7456
25834c73
PZ
7457 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7458 raw_spin_lock(&rq->lock);
5cbd54ef 7459
06b83b5f 7460 idle->state = TASK_RUNNING;
dd41f596 7461 idle->se.exec_start = sched_clock();
c1de45ca 7462 idle->flags |= PF_IDLE;
dd41f596 7463
d08b9f0c 7464 scs_task_reset(idle);
e1b77c92
MR
7465 kasan_unpoison_task_stack(idle);
7466
de9b8f5d
PZ
7467#ifdef CONFIG_SMP
7468 /*
b19a888c 7469 * It's possible that init_idle() gets called multiple times on a task,
de9b8f5d
PZ
7470 * in that case do_set_cpus_allowed() will not do the right thing.
7471 *
7472 * And since this is boot we can forgo the serialization.
7473 */
9cfc3e18 7474 set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
de9b8f5d 7475#endif
6506cf6c
PZ
7476 /*
7477 * We're having a chicken and egg problem, even though we are
d1ccc66d 7478 * holding rq->lock, the CPU isn't yet set to this CPU so the
6506cf6c
PZ
7479 * lockdep check in task_group() will fail.
7480 *
7481 * Similar case to sched_fork(). / Alternatively we could
7482 * use task_rq_lock() here and obtain the other rq->lock.
7483 *
7484 * Silence PROVE_RCU
7485 */
7486 rcu_read_lock();
dd41f596 7487 __set_task_cpu(idle, cpu);
6506cf6c 7488 rcu_read_unlock();
1da177e4 7489
5311a98f
EB
7490 rq->idle = idle;
7491 rcu_assign_pointer(rq->curr, idle);
da0c1e65 7492 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 7493#ifdef CONFIG_SMP
3ca7a440 7494 idle->on_cpu = 1;
4866cde0 7495#endif
25834c73
PZ
7496 raw_spin_unlock(&rq->lock);
7497 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
7498
7499 /* Set the preempt count _outside_ the spinlocks! */
01028747 7500 init_idle_preempt_count(idle, cpu);
55cd5340 7501
dd41f596
IM
7502 /*
7503 * The idle tasks have their own, simple scheduling class:
7504 */
7505 idle->sched_class = &idle_sched_class;
868baf07 7506 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 7507 vtime_init_idle(idle, cpu);
de9b8f5d 7508#ifdef CONFIG_SMP
f1c6f1a7
CE
7509 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7510#endif
19978ca6
IM
7511}
7512
e1d4eeec
NP
7513#ifdef CONFIG_SMP
7514
f82f8042
JL
7515int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7516 const struct cpumask *trial)
7517{
06a76fe0 7518 int ret = 1;
f82f8042 7519
bb2bc55a
MG
7520 if (!cpumask_weight(cur))
7521 return ret;
7522
06a76fe0 7523 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
f82f8042
JL
7524
7525 return ret;
7526}
7527
7f51412a
JL
7528int task_can_attach(struct task_struct *p,
7529 const struct cpumask *cs_cpus_allowed)
7530{
7531 int ret = 0;
7532
7533 /*
7534 * Kthreads which disallow setaffinity shouldn't be moved
d1ccc66d 7535 * to a new cpuset; we don't want to change their CPU
7f51412a
JL
7536 * affinity and isolating such threads by their set of
7537 * allowed nodes is unnecessary. Thus, cpusets are not
7538 * applicable for such threads. This prevents checking for
7539 * success of set_cpus_allowed_ptr() on all attached tasks
3bd37062 7540 * before cpus_mask may be changed.
7f51412a
JL
7541 */
7542 if (p->flags & PF_NO_SETAFFINITY) {
7543 ret = -EINVAL;
7544 goto out;
7545 }
7546
7f51412a 7547 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
06a76fe0
NP
7548 cs_cpus_allowed))
7549 ret = dl_task_can_attach(p, cs_cpus_allowed);
7f51412a 7550
7f51412a
JL
7551out:
7552 return ret;
7553}
7554
f2cb1360 7555bool sched_smp_initialized __read_mostly;
e26fbffd 7556
e6628d5b
MG
7557#ifdef CONFIG_NUMA_BALANCING
7558/* Migrate current task p to target_cpu */
7559int migrate_task_to(struct task_struct *p, int target_cpu)
7560{
7561 struct migration_arg arg = { p, target_cpu };
7562 int curr_cpu = task_cpu(p);
7563
7564 if (curr_cpu == target_cpu)
7565 return 0;
7566
3bd37062 7567 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
e6628d5b
MG
7568 return -EINVAL;
7569
7570 /* TODO: This is not properly updating schedstats */
7571
286549dc 7572 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
7573 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7574}
0ec8aa00
PZ
7575
7576/*
7577 * Requeue a task on a given node and accurately track the number of NUMA
7578 * tasks on the runqueues
7579 */
7580void sched_setnuma(struct task_struct *p, int nid)
7581{
da0c1e65 7582 bool queued, running;
eb580751
PZ
7583 struct rq_flags rf;
7584 struct rq *rq;
0ec8aa00 7585
eb580751 7586 rq = task_rq_lock(p, &rf);
da0c1e65 7587 queued = task_on_rq_queued(p);
0ec8aa00
PZ
7588 running = task_current(rq, p);
7589
da0c1e65 7590 if (queued)
1de64443 7591 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 7592 if (running)
f3cd1c4e 7593 put_prev_task(rq, p);
0ec8aa00
PZ
7594
7595 p->numa_preferred_nid = nid;
0ec8aa00 7596
da0c1e65 7597 if (queued)
7134b3e9 7598 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 7599 if (running)
03b7fad1 7600 set_next_task(rq, p);
eb580751 7601 task_rq_unlock(rq, p, &rf);
0ec8aa00 7602}
5cc389bc 7603#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 7604
1da177e4 7605#ifdef CONFIG_HOTPLUG_CPU
054b9108 7606/*
d1ccc66d 7607 * Ensure that the idle task is using init_mm right before its CPU goes
48c5ccae 7608 * offline.
054b9108 7609 */
48c5ccae 7610void idle_task_exit(void)
1da177e4 7611{
48c5ccae 7612 struct mm_struct *mm = current->active_mm;
e76bd8d9 7613
48c5ccae 7614 BUG_ON(cpu_online(smp_processor_id()));
bf2c59fc 7615 BUG_ON(current != this_rq()->idle);
e76bd8d9 7616
a53efe5f 7617 if (mm != &init_mm) {
252d2a41 7618 switch_mm(mm, &init_mm, current);
a53efe5f
MS
7619 finish_arch_post_lock_switch();
7620 }
bf2c59fc
PZ
7621
7622 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
1da177e4
LT
7623}
7624
2558aacf 7625static int __balance_push_cpu_stop(void *arg)
1da177e4 7626{
2558aacf
PZ
7627 struct task_struct *p = arg;
7628 struct rq *rq = this_rq();
7629 struct rq_flags rf;
7630 int cpu;
1da177e4 7631
2558aacf
PZ
7632 raw_spin_lock_irq(&p->pi_lock);
7633 rq_lock(rq, &rf);
3f1d2a31 7634
2558aacf
PZ
7635 update_rq_clock(rq);
7636
7637 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7638 cpu = select_fallback_rq(rq->cpu, p);
7639 rq = __migrate_task(rq, &rf, p, cpu);
10e7071b 7640 }
3f1d2a31 7641
2558aacf
PZ
7642 rq_unlock(rq, &rf);
7643 raw_spin_unlock_irq(&p->pi_lock);
7644
7645 put_task_struct(p);
7646
7647 return 0;
10e7071b 7648}
3f1d2a31 7649
2558aacf
PZ
7650static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7651
48f24c4d 7652/*
2558aacf 7653 * Ensure we only run per-cpu kthreads once the CPU goes !active.
1da177e4 7654 */
2558aacf 7655static void balance_push(struct rq *rq)
1da177e4 7656{
2558aacf
PZ
7657 struct task_struct *push_task = rq->curr;
7658
7659 lockdep_assert_held(&rq->lock);
7660 SCHED_WARN_ON(rq->cpu != smp_processor_id());
ae792702
PZ
7661 /*
7662 * Ensure the thing is persistent until balance_push_set(.on = false);
7663 */
7664 rq->balance_callback = &balance_push_callback;
1da177e4
LT
7665
7666 /*
2558aacf
PZ
7667 * Both the cpu-hotplug and stop task are in this case and are
7668 * required to complete the hotplug process.
5ba2ffba
PZ
7669 *
7670 * XXX: the idle task does not match kthread_is_per_cpu() due to
7671 * histerical raisins.
1da177e4 7672 */
5ba2ffba
PZ
7673 if (rq->idle == push_task ||
7674 ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
7675 is_migration_disabled(push_task)) {
7676
f2469a1f
TG
7677 /*
7678 * If this is the idle task on the outgoing CPU try to wake
7679 * up the hotplug control thread which might wait for the
7680 * last task to vanish. The rcuwait_active() check is
7681 * accurate here because the waiter is pinned on this CPU
7682 * and can't obviously be running in parallel.
3015ef4b
TG
7683 *
7684 * On RT kernels this also has to check whether there are
7685 * pinned and scheduled out tasks on the runqueue. They
7686 * need to leave the migrate disabled section first.
f2469a1f 7687 */
3015ef4b
TG
7688 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7689 rcuwait_active(&rq->hotplug_wait)) {
f2469a1f
TG
7690 raw_spin_unlock(&rq->lock);
7691 rcuwait_wake_up(&rq->hotplug_wait);
7692 raw_spin_lock(&rq->lock);
7693 }
2558aacf 7694 return;
f2469a1f 7695 }
48f24c4d 7696
2558aacf 7697 get_task_struct(push_task);
77bd3970 7698 /*
2558aacf
PZ
7699 * Temporarily drop rq->lock such that we can wake-up the stop task.
7700 * Both preemption and IRQs are still disabled.
77bd3970 7701 */
2558aacf
PZ
7702 raw_spin_unlock(&rq->lock);
7703 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7704 this_cpu_ptr(&push_work));
7705 /*
7706 * At this point need_resched() is true and we'll take the loop in
7707 * schedule(). The next pick is obviously going to be the stop task
5ba2ffba 7708 * which kthread_is_per_cpu() and will push this task away.
2558aacf
PZ
7709 */
7710 raw_spin_lock(&rq->lock);
7711}
77bd3970 7712
2558aacf
PZ
7713static void balance_push_set(int cpu, bool on)
7714{
7715 struct rq *rq = cpu_rq(cpu);
7716 struct rq_flags rf;
48c5ccae 7717
2558aacf 7718 rq_lock_irqsave(rq, &rf);
975707f2 7719 rq->balance_push = on;
22f667c9
PZ
7720 if (on) {
7721 WARN_ON_ONCE(rq->balance_callback);
ae792702 7722 rq->balance_callback = &balance_push_callback;
22f667c9 7723 } else if (rq->balance_callback == &balance_push_callback) {
ae792702 7724 rq->balance_callback = NULL;
22f667c9 7725 }
2558aacf
PZ
7726 rq_unlock_irqrestore(rq, &rf);
7727}
e692ab53 7728
f2469a1f
TG
7729/*
7730 * Invoked from a CPUs hotplug control thread after the CPU has been marked
7731 * inactive. All tasks which are not per CPU kernel threads are either
7732 * pushed off this CPU now via balance_push() or placed on a different CPU
7733 * during wakeup. Wait until the CPU is quiescent.
7734 */
7735static void balance_hotplug_wait(void)
7736{
7737 struct rq *rq = this_rq();
5473e0cc 7738
3015ef4b
TG
7739 rcuwait_wait_event(&rq->hotplug_wait,
7740 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
f2469a1f
TG
7741 TASK_UNINTERRUPTIBLE);
7742}
5473e0cc 7743
2558aacf 7744#else
dce48a84 7745
2558aacf
PZ
7746static inline void balance_push(struct rq *rq)
7747{
dce48a84 7748}
dce48a84 7749
2558aacf
PZ
7750static inline void balance_push_set(int cpu, bool on)
7751{
7752}
7753
f2469a1f
TG
7754static inline void balance_hotplug_wait(void)
7755{
dce48a84 7756}
f2469a1f 7757
1da177e4
LT
7758#endif /* CONFIG_HOTPLUG_CPU */
7759
f2cb1360 7760void set_rq_online(struct rq *rq)
1f11eb6a
GH
7761{
7762 if (!rq->online) {
7763 const struct sched_class *class;
7764
c6c4927b 7765 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
7766 rq->online = 1;
7767
7768 for_each_class(class) {
7769 if (class->rq_online)
7770 class->rq_online(rq);
7771 }
7772 }
7773}
7774
f2cb1360 7775void set_rq_offline(struct rq *rq)
1f11eb6a
GH
7776{
7777 if (rq->online) {
7778 const struct sched_class *class;
7779
7780 for_each_class(class) {
7781 if (class->rq_offline)
7782 class->rq_offline(rq);
7783 }
7784
c6c4927b 7785 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
7786 rq->online = 0;
7787 }
7788}
7789
d1ccc66d
IM
7790/*
7791 * used to mark begin/end of suspend/resume:
7792 */
7793static int num_cpus_frozen;
d35be8ba 7794
1da177e4 7795/*
3a101d05
TH
7796 * Update cpusets according to cpu_active mask. If cpusets are
7797 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7798 * around partition_sched_domains().
d35be8ba
SB
7799 *
7800 * If we come here as part of a suspend/resume, don't touch cpusets because we
7801 * want to restore it back to its original state upon resume anyway.
1da177e4 7802 */
40190a78 7803static void cpuset_cpu_active(void)
e761b772 7804{
40190a78 7805 if (cpuhp_tasks_frozen) {
d35be8ba
SB
7806 /*
7807 * num_cpus_frozen tracks how many CPUs are involved in suspend
7808 * resume sequence. As long as this is not the last online
7809 * operation in the resume sequence, just build a single sched
7810 * domain, ignoring cpusets.
7811 */
50e76632
PZ
7812 partition_sched_domains(1, NULL, NULL);
7813 if (--num_cpus_frozen)
135fb3e1 7814 return;
d35be8ba
SB
7815 /*
7816 * This is the last CPU online operation. So fall through and
7817 * restore the original sched domains by considering the
7818 * cpuset configurations.
7819 */
50e76632 7820 cpuset_force_rebuild();
3a101d05 7821 }
30e03acd 7822 cpuset_update_active_cpus();
3a101d05 7823}
e761b772 7824
40190a78 7825static int cpuset_cpu_inactive(unsigned int cpu)
3a101d05 7826{
40190a78 7827 if (!cpuhp_tasks_frozen) {
06a76fe0 7828 if (dl_cpu_busy(cpu))
135fb3e1 7829 return -EBUSY;
30e03acd 7830 cpuset_update_active_cpus();
135fb3e1 7831 } else {
d35be8ba
SB
7832 num_cpus_frozen++;
7833 partition_sched_domains(1, NULL, NULL);
e761b772 7834 }
135fb3e1 7835 return 0;
e761b772 7836}
e761b772 7837
40190a78 7838int sched_cpu_activate(unsigned int cpu)
135fb3e1 7839{
7d976699 7840 struct rq *rq = cpu_rq(cpu);
8a8c69c3 7841 struct rq_flags rf;
7d976699 7842
22f667c9
PZ
7843 /*
7844 * Make sure that when the hotplug state machine does a roll-back
7845 * we clear balance_push. Ideally that would happen earlier...
7846 */
2558aacf
PZ
7847 balance_push_set(cpu, false);
7848
ba2591a5
PZ
7849#ifdef CONFIG_SCHED_SMT
7850 /*
c5511d03 7851 * When going up, increment the number of cores with SMT present.
ba2591a5 7852 */
c5511d03
PZI
7853 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7854 static_branch_inc_cpuslocked(&sched_smt_present);
ba2591a5 7855#endif
40190a78 7856 set_cpu_active(cpu, true);
135fb3e1 7857
40190a78 7858 if (sched_smp_initialized) {
135fb3e1 7859 sched_domains_numa_masks_set(cpu);
40190a78 7860 cpuset_cpu_active();
e761b772 7861 }
7d976699
TG
7862
7863 /*
7864 * Put the rq online, if not already. This happens:
7865 *
7866 * 1) In the early boot process, because we build the real domains
d1ccc66d 7867 * after all CPUs have been brought up.
7d976699
TG
7868 *
7869 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7870 * domains.
7871 */
8a8c69c3 7872 rq_lock_irqsave(rq, &rf);
7d976699
TG
7873 if (rq->rd) {
7874 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7875 set_rq_online(rq);
7876 }
8a8c69c3 7877 rq_unlock_irqrestore(rq, &rf);
7d976699 7878
40190a78 7879 return 0;
135fb3e1
TG
7880}
7881
40190a78 7882int sched_cpu_deactivate(unsigned int cpu)
135fb3e1 7883{
120455c5
PZ
7884 struct rq *rq = cpu_rq(cpu);
7885 struct rq_flags rf;
135fb3e1
TG
7886 int ret;
7887
e0b257c3
AMB
7888 /*
7889 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
7890 * load balancing when not active
7891 */
7892 nohz_balance_exit_idle(rq);
7893
40190a78 7894 set_cpu_active(cpu, false);
741ba80f
PZ
7895
7896 /*
7897 * From this point forward, this CPU will refuse to run any task that
7898 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
7899 * push those tasks away until this gets cleared, see
7900 * sched_cpu_dying().
7901 */
975707f2
PZ
7902 balance_push_set(cpu, true);
7903
b2454caa 7904 /*
975707f2
PZ
7905 * We've cleared cpu_active_mask / set balance_push, wait for all
7906 * preempt-disabled and RCU users of this state to go away such that
7907 * all new such users will observe it.
b2454caa 7908 *
5ba2ffba
PZ
7909 * Specifically, we rely on ttwu to no longer target this CPU, see
7910 * ttwu_queue_cond() and is_cpu_allowed().
7911 *
b2454caa
PZ
7912 * Do sync before park smpboot threads to take care the rcu boost case.
7913 */
309ba859 7914 synchronize_rcu();
40190a78 7915
120455c5
PZ
7916 rq_lock_irqsave(rq, &rf);
7917 if (rq->rd) {
7918 update_rq_clock(rq);
7919 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7920 set_rq_offline(rq);
7921 }
7922 rq_unlock_irqrestore(rq, &rf);
7923
c5511d03
PZI
7924#ifdef CONFIG_SCHED_SMT
7925 /*
7926 * When going down, decrement the number of cores with SMT present.
7927 */
7928 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7929 static_branch_dec_cpuslocked(&sched_smt_present);
7930#endif
7931
40190a78
TG
7932 if (!sched_smp_initialized)
7933 return 0;
7934
7935 ret = cpuset_cpu_inactive(cpu);
7936 if (ret) {
2558aacf 7937 balance_push_set(cpu, false);
40190a78
TG
7938 set_cpu_active(cpu, true);
7939 return ret;
135fb3e1 7940 }
40190a78
TG
7941 sched_domains_numa_masks_clear(cpu);
7942 return 0;
135fb3e1
TG
7943}
7944
94baf7a5
TG
7945static void sched_rq_cpu_starting(unsigned int cpu)
7946{
7947 struct rq *rq = cpu_rq(cpu);
7948
7949 rq->calc_load_update = calc_load_update;
94baf7a5
TG
7950 update_max_interval();
7951}
7952
135fb3e1
TG
7953int sched_cpu_starting(unsigned int cpu)
7954{
94baf7a5 7955 sched_rq_cpu_starting(cpu);
d84b3131 7956 sched_tick_start(cpu);
135fb3e1 7957 return 0;
e761b772 7958}
e761b772 7959
f2785ddb 7960#ifdef CONFIG_HOTPLUG_CPU
1cf12e08
TG
7961
7962/*
7963 * Invoked immediately before the stopper thread is invoked to bring the
7964 * CPU down completely. At this point all per CPU kthreads except the
7965 * hotplug thread (current) and the stopper thread (inactive) have been
7966 * either parked or have been unbound from the outgoing CPU. Ensure that
7967 * any of those which might be on the way out are gone.
7968 *
7969 * If after this point a bound task is being woken on this CPU then the
7970 * responsible hotplug callback has failed to do it's job.
7971 * sched_cpu_dying() will catch it with the appropriate fireworks.
7972 */
7973int sched_cpu_wait_empty(unsigned int cpu)
7974{
7975 balance_hotplug_wait();
7976 return 0;
7977}
7978
7979/*
7980 * Since this CPU is going 'away' for a while, fold any nr_active delta we
7981 * might have. Called from the CPU stopper task after ensuring that the
7982 * stopper is the last running task on the CPU, so nr_active count is
7983 * stable. We need to take the teardown thread which is calling this into
7984 * account, so we hand in adjust = 1 to the load calculation.
7985 *
7986 * Also see the comment "Global load-average calculations".
7987 */
7988static void calc_load_migrate(struct rq *rq)
7989{
7990 long delta = calc_load_fold_active(rq, 1);
7991
7992 if (delta)
7993 atomic_long_add(delta, &calc_load_tasks);
7994}
7995
36c6e17b
VS
7996static void dump_rq_tasks(struct rq *rq, const char *loglvl)
7997{
7998 struct task_struct *g, *p;
7999 int cpu = cpu_of(rq);
8000
8001 lockdep_assert_held(&rq->lock);
8002
8003 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8004 for_each_process_thread(g, p) {
8005 if (task_cpu(p) != cpu)
8006 continue;
8007
8008 if (!task_on_rq_queued(p))
8009 continue;
8010
8011 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8012 }
8013}
8014
f2785ddb
TG
8015int sched_cpu_dying(unsigned int cpu)
8016{
8017 struct rq *rq = cpu_rq(cpu);
8a8c69c3 8018 struct rq_flags rf;
f2785ddb
TG
8019
8020 /* Handle pending wakeups and then migrate everything off */
d84b3131 8021 sched_tick_stop(cpu);
8a8c69c3
PZ
8022
8023 rq_lock_irqsave(rq, &rf);
36c6e17b
VS
8024 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8025 WARN(true, "Dying CPU not properly vacated!");
8026 dump_rq_tasks(rq, KERN_WARNING);
8027 }
8a8c69c3
PZ
8028 rq_unlock_irqrestore(rq, &rf);
8029
22f667c9
PZ
8030 /*
8031 * Now that the CPU is offline, make sure we're welcome
8032 * to new tasks once we come back up.
8033 */
8034 balance_push_set(cpu, false);
8035
f2785ddb
TG
8036 calc_load_migrate(rq);
8037 update_max_interval();
e5ef27d0 8038 hrtick_clear(rq);
f2785ddb
TG
8039 return 0;
8040}
8041#endif
8042
1da177e4
LT
8043void __init sched_init_smp(void)
8044{
cb83b629
PZ
8045 sched_init_numa();
8046
6acce3ef
PZ
8047 /*
8048 * There's no userspace yet to cause hotplug operations; hence all the
d1ccc66d 8049 * CPU masks are stable and all blatant races in the below code cannot
b5a4e2bb 8050 * happen.
6acce3ef 8051 */
712555ee 8052 mutex_lock(&sched_domains_mutex);
8d5dc512 8053 sched_init_domains(cpu_active_mask);
712555ee 8054 mutex_unlock(&sched_domains_mutex);
e761b772 8055
5c1e1767 8056 /* Move init over to a non-isolated CPU */
edb93821 8057 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
5c1e1767 8058 BUG();
19978ca6 8059 sched_init_granularity();
4212823f 8060
0e3900e6 8061 init_sched_rt_class();
1baca4ce 8062 init_sched_dl_class();
1b568f0a 8063
e26fbffd 8064 sched_smp_initialized = true;
1da177e4 8065}
e26fbffd
TG
8066
8067static int __init migration_init(void)
8068{
77a5352b 8069 sched_cpu_starting(smp_processor_id());
e26fbffd 8070 return 0;
1da177e4 8071}
e26fbffd
TG
8072early_initcall(migration_init);
8073
1da177e4
LT
8074#else
8075void __init sched_init_smp(void)
8076{
19978ca6 8077 sched_init_granularity();
1da177e4
LT
8078}
8079#endif /* CONFIG_SMP */
8080
8081int in_sched_functions(unsigned long addr)
8082{
1da177e4
LT
8083 return in_lock_functions(addr) ||
8084 (addr >= (unsigned long)__sched_text_start
8085 && addr < (unsigned long)__sched_text_end);
8086}
8087
029632fb 8088#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
8089/*
8090 * Default task group.
8091 * Every task in system belongs to this group at bootup.
8092 */
029632fb 8093struct task_group root_task_group;
35cf4e50 8094LIST_HEAD(task_groups);
b0367629
WL
8095
8096/* Cacheline aligned slab cache for task_group */
8097static struct kmem_cache *task_group_cache __read_mostly;
052f1dc7 8098#endif
6f505b16 8099
e6252c3e 8100DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
10e2f1ac 8101DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
6f505b16 8102
1da177e4
LT
8103void __init sched_init(void)
8104{
a1dc0446 8105 unsigned long ptr = 0;
55627e3c 8106 int i;
434d53b0 8107
c3a340f7
SRV
8108 /* Make sure the linker didn't screw up */
8109 BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
8110 &fair_sched_class + 1 != &rt_sched_class ||
8111 &rt_sched_class + 1 != &dl_sched_class);
8112#ifdef CONFIG_SMP
8113 BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
8114#endif
8115
5822a454 8116 wait_bit_init();
9dcb8b68 8117
434d53b0 8118#ifdef CONFIG_FAIR_GROUP_SCHED
a1dc0446 8119 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0
MT
8120#endif
8121#ifdef CONFIG_RT_GROUP_SCHED
a1dc0446 8122 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0 8123#endif
a1dc0446
QC
8124 if (ptr) {
8125 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
434d53b0
MT
8126
8127#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 8128 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
8129 ptr += nr_cpu_ids * sizeof(void **);
8130
07e06b01 8131 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 8132 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 8133
b1d1779e
WY
8134 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8135 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6d6bc0ad 8136#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 8137#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 8138 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
8139 ptr += nr_cpu_ids * sizeof(void **);
8140
07e06b01 8141 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
8142 ptr += nr_cpu_ids * sizeof(void **);
8143
6d6bc0ad 8144#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 8145 }
df7c8e84 8146#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
8147 for_each_possible_cpu(i) {
8148 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
8149 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
10e2f1ac
PZ
8150 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
8151 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 8152 }
b74e6278 8153#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 8154
d1ccc66d
IM
8155 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
8156 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
332ac17e 8157
57d885fe
GH
8158#ifdef CONFIG_SMP
8159 init_defrootdomain();
8160#endif
8161
d0b27fa7 8162#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 8163 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 8164 global_rt_period(), global_rt_runtime());
6d6bc0ad 8165#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8166
7c941438 8167#ifdef CONFIG_CGROUP_SCHED
b0367629
WL
8168 task_group_cache = KMEM_CACHE(task_group, 0);
8169
07e06b01
YZ
8170 list_add(&root_task_group.list, &task_groups);
8171 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 8172 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 8173 autogroup_init(&init_task);
7c941438 8174#endif /* CONFIG_CGROUP_SCHED */
6f505b16 8175
0a945022 8176 for_each_possible_cpu(i) {
70b97a7f 8177 struct rq *rq;
1da177e4
LT
8178
8179 rq = cpu_rq(i);
05fa785c 8180 raw_spin_lock_init(&rq->lock);
7897986b 8181 rq->nr_running = 0;
dce48a84
TG
8182 rq->calc_load_active = 0;
8183 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 8184 init_cfs_rq(&rq->cfs);
07c54f7a
AV
8185 init_rt_rq(&rq->rt);
8186 init_dl_rq(&rq->dl);
dd41f596 8187#ifdef CONFIG_FAIR_GROUP_SCHED
6f505b16 8188 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9c2791f9 8189 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
354d60c2 8190 /*
d1ccc66d 8191 * How much CPU bandwidth does root_task_group get?
354d60c2
DG
8192 *
8193 * In case of task-groups formed thr' the cgroup filesystem, it
d1ccc66d
IM
8194 * gets 100% of the CPU resources in the system. This overall
8195 * system CPU resource is divided among the tasks of
07e06b01 8196 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
8197 * based on each entity's (task or task-group's) weight
8198 * (se->load.weight).
8199 *
07e06b01 8200 * In other words, if root_task_group has 10 tasks of weight
354d60c2 8201 * 1024) and two child groups A0 and A1 (of weight 1024 each),
d1ccc66d 8202 * then A0's share of the CPU resource is:
354d60c2 8203 *
0d905bca 8204 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 8205 *
07e06b01
YZ
8206 * We achieve this by letting root_task_group's tasks sit
8207 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 8208 */
07e06b01 8209 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
8210#endif /* CONFIG_FAIR_GROUP_SCHED */
8211
8212 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 8213#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 8214 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 8215#endif
1da177e4 8216#ifdef CONFIG_SMP
41c7ce9a 8217 rq->sd = NULL;
57d885fe 8218 rq->rd = NULL;
ca6d75e6 8219 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 8220 rq->balance_callback = NULL;
1da177e4 8221 rq->active_balance = 0;
dd41f596 8222 rq->next_balance = jiffies;
1da177e4 8223 rq->push_cpu = 0;
0a2966b4 8224 rq->cpu = i;
1f11eb6a 8225 rq->online = 0;
eae0c9df
MG
8226 rq->idle_stamp = 0;
8227 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 8228 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
8229
8230 INIT_LIST_HEAD(&rq->cfs_tasks);
8231
dc938520 8232 rq_attach_root(rq, &def_root_domain);
3451d024 8233#ifdef CONFIG_NO_HZ_COMMON
e022e0d3 8234 rq->last_blocked_load_update_tick = jiffies;
a22e47a4 8235 atomic_set(&rq->nohz_flags, 0);
90b5363a 8236
545b8c8d 8237 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
83cd4fe2 8238#endif
f2469a1f
TG
8239#ifdef CONFIG_HOTPLUG_CPU
8240 rcuwait_init(&rq->hotplug_wait);
83cd4fe2 8241#endif
9fd81dd5 8242#endif /* CONFIG_SMP */
77a021be 8243 hrtick_rq_init(rq);
1da177e4 8244 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
8245 }
8246
9059393e 8247 set_load_weight(&init_task, false);
b50f60ce 8248
1da177e4
LT
8249 /*
8250 * The boot idle thread does lazy MMU switching as well:
8251 */
f1f10076 8252 mmgrab(&init_mm);
1da177e4
LT
8253 enter_lazy_tlb(&init_mm, current);
8254
8255 /*
8256 * Make us the idle thread. Technically, schedule() should not be
8257 * called from this thread, however somewhere below it might be,
8258 * but because we are the idle thread, we just pick up running again
8259 * when this runqueue becomes "idle".
8260 */
8261 init_idle(current, smp_processor_id());
dce48a84
TG
8262
8263 calc_load_update = jiffies + LOAD_FREQ;
8264
bf4d83f6 8265#ifdef CONFIG_SMP
29d5e047 8266 idle_thread_set_boot_cpu();
029632fb
PZ
8267#endif
8268 init_sched_fair_class();
6a7b3dc3 8269
4698f88c
JP
8270 init_schedstats();
8271
eb414681
JW
8272 psi_init();
8273
69842cba
PB
8274 init_uclamp();
8275
6892b75e 8276 scheduler_running = 1;
1da177e4
LT
8277}
8278
d902db1e 8279#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
8280static inline int preempt_count_equals(int preempt_offset)
8281{
da7142e2 8282 int nested = preempt_count() + rcu_preempt_depth();
e4aafea2 8283
4ba8216c 8284 return (nested == preempt_offset);
e4aafea2
FW
8285}
8286
d894837f 8287void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 8288{
8eb23b9f
PZ
8289 /*
8290 * Blocking primitives will set (and therefore destroy) current->state,
8291 * since we will exit with TASK_RUNNING make sure we enter with it,
8292 * otherwise we will destroy state.
8293 */
00845eb9 8294 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
8295 "do not call blocking ops when !TASK_RUNNING; "
8296 "state=%lx set at [<%p>] %pS\n",
8297 current->state,
8298 (void *)current->task_state_change,
00845eb9 8299 (void *)current->task_state_change);
8eb23b9f 8300
3427445a
PZ
8301 ___might_sleep(file, line, preempt_offset);
8302}
8303EXPORT_SYMBOL(__might_sleep);
8304
8305void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 8306{
d1ccc66d
IM
8307 /* Ratelimiting timestamp: */
8308 static unsigned long prev_jiffy;
8309
d1c6d149 8310 unsigned long preempt_disable_ip;
1da177e4 8311
d1ccc66d
IM
8312 /* WARN_ON_ONCE() by default, no rate limit required: */
8313 rcu_sleep_check();
8314
db273be2 8315 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
312364f3 8316 !is_idle_task(current) && !current->non_block_count) ||
1c3c5eab
TG
8317 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8318 oops_in_progress)
aef745fc 8319 return;
1c3c5eab 8320
aef745fc
IM
8321 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8322 return;
8323 prev_jiffy = jiffies;
8324
d1ccc66d 8325 /* Save this before calling printk(), since that will clobber it: */
d1c6d149
VN
8326 preempt_disable_ip = get_preempt_disable_ip(current);
8327
3df0fc5b
PZ
8328 printk(KERN_ERR
8329 "BUG: sleeping function called from invalid context at %s:%d\n",
8330 file, line);
8331 printk(KERN_ERR
312364f3
DV
8332 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8333 in_atomic(), irqs_disabled(), current->non_block_count,
3df0fc5b 8334 current->pid, current->comm);
aef745fc 8335
a8b686b3
ES
8336 if (task_stack_end_corrupted(current))
8337 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
8338
aef745fc
IM
8339 debug_show_held_locks(current);
8340 if (irqs_disabled())
8341 print_irqtrace_events(current);
d1c6d149
VN
8342 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
8343 && !preempt_count_equals(preempt_offset)) {
8f47b187 8344 pr_err("Preemption disabled at:");
2062a4e8 8345 print_ip_sym(KERN_ERR, preempt_disable_ip);
8f47b187 8346 }
aef745fc 8347 dump_stack();
f0b22e39 8348 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1da177e4 8349}
3427445a 8350EXPORT_SYMBOL(___might_sleep);
568f1967
PZ
8351
8352void __cant_sleep(const char *file, int line, int preempt_offset)
8353{
8354 static unsigned long prev_jiffy;
8355
8356 if (irqs_disabled())
8357 return;
8358
8359 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8360 return;
8361
8362 if (preempt_count() > preempt_offset)
8363 return;
8364
8365 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8366 return;
8367 prev_jiffy = jiffies;
8368
8369 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8370 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8371 in_atomic(), irqs_disabled(),
8372 current->pid, current->comm);
8373
8374 debug_show_held_locks(current);
8375 dump_stack();
8376 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8377}
8378EXPORT_SYMBOL_GPL(__cant_sleep);
74d862b6
TG
8379
8380#ifdef CONFIG_SMP
8381void __cant_migrate(const char *file, int line)
8382{
8383 static unsigned long prev_jiffy;
8384
8385 if (irqs_disabled())
8386 return;
8387
8388 if (is_migration_disabled(current))
8389 return;
8390
8391 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8392 return;
8393
8394 if (preempt_count() > 0)
8395 return;
8396
8397 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8398 return;
8399 prev_jiffy = jiffies;
8400
8401 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8402 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8403 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8404 current->pid, current->comm);
8405
8406 debug_show_held_locks(current);
8407 dump_stack();
8408 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8409}
8410EXPORT_SYMBOL_GPL(__cant_migrate);
8411#endif
1da177e4
LT
8412#endif
8413
8414#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 8415void normalize_rt_tasks(void)
3a5e4dc1 8416{
dbc7f069 8417 struct task_struct *g, *p;
d50dde5a
DF
8418 struct sched_attr attr = {
8419 .sched_policy = SCHED_NORMAL,
8420 };
1da177e4 8421
3472eaa1 8422 read_lock(&tasklist_lock);
5d07f420 8423 for_each_process_thread(g, p) {
178be793
IM
8424 /*
8425 * Only normalize user tasks:
8426 */
3472eaa1 8427 if (p->flags & PF_KTHREAD)
178be793
IM
8428 continue;
8429
4fa8d299
JP
8430 p->se.exec_start = 0;
8431 schedstat_set(p->se.statistics.wait_start, 0);
8432 schedstat_set(p->se.statistics.sleep_start, 0);
8433 schedstat_set(p->se.statistics.block_start, 0);
dd41f596 8434
aab03e05 8435 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
8436 /*
8437 * Renice negative nice level userspace
8438 * tasks back to 0:
8439 */
3472eaa1 8440 if (task_nice(p) < 0)
dd41f596 8441 set_user_nice(p, 0);
1da177e4 8442 continue;
dd41f596 8443 }
1da177e4 8444
dbc7f069 8445 __sched_setscheduler(p, &attr, false, false);
5d07f420 8446 }
3472eaa1 8447 read_unlock(&tasklist_lock);
1da177e4
LT
8448}
8449
8450#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 8451
67fc4e0c 8452#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 8453/*
67fc4e0c 8454 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
8455 *
8456 * They can only be called when the whole system has been
8457 * stopped - every CPU needs to be quiescent, and no scheduling
8458 * activity can take place. Using them for anything else would
8459 * be a serious bug, and as a result, they aren't even visible
8460 * under any other configuration.
8461 */
8462
8463/**
d1ccc66d 8464 * curr_task - return the current task for a given CPU.
1df5c10a
LT
8465 * @cpu: the processor in question.
8466 *
8467 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
8468 *
8469 * Return: The current task for @cpu.
1df5c10a 8470 */
36c8b586 8471struct task_struct *curr_task(int cpu)
1df5c10a
LT
8472{
8473 return cpu_curr(cpu);
8474}
8475
67fc4e0c
JW
8476#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8477
8478#ifdef CONFIG_IA64
1df5c10a 8479/**
5feeb783 8480 * ia64_set_curr_task - set the current task for a given CPU.
1df5c10a
LT
8481 * @cpu: the processor in question.
8482 * @p: the task pointer to set.
8483 *
8484 * Description: This function must only be used when non-maskable interrupts
41a2d6cf 8485 * are serviced on a separate stack. It allows the architecture to switch the
d1ccc66d 8486 * notion of the current task on a CPU in a non-blocking manner. This function
1df5c10a
LT
8487 * must be called with all CPU's synchronized, and interrupts disabled, the
8488 * and caller must save the original value of the current task (see
8489 * curr_task() above) and restore that value before reenabling interrupts and
8490 * re-starting the system.
8491 *
8492 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8493 */
a458ae2e 8494void ia64_set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
8495{
8496 cpu_curr(cpu) = p;
8497}
8498
8499#endif
29f59db3 8500
7c941438 8501#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
8502/* task_group_lock serializes the addition/removal of task groups */
8503static DEFINE_SPINLOCK(task_group_lock);
8504
2480c093
PB
8505static inline void alloc_uclamp_sched_group(struct task_group *tg,
8506 struct task_group *parent)
8507{
8508#ifdef CONFIG_UCLAMP_TASK_GROUP
0413d7f3 8509 enum uclamp_id clamp_id;
2480c093
PB
8510
8511 for_each_clamp_id(clamp_id) {
8512 uclamp_se_set(&tg->uclamp_req[clamp_id],
8513 uclamp_none(clamp_id), false);
0b60ba2d 8514 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
2480c093
PB
8515 }
8516#endif
8517}
8518
2f5177f0 8519static void sched_free_group(struct task_group *tg)
bccbe08a
PZ
8520{
8521 free_fair_sched_group(tg);
8522 free_rt_sched_group(tg);
e9aa1dd1 8523 autogroup_free(tg);
b0367629 8524 kmem_cache_free(task_group_cache, tg);
bccbe08a
PZ
8525}
8526
8527/* allocate runqueue etc for a new task group */
ec7dc8ac 8528struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
8529{
8530 struct task_group *tg;
bccbe08a 8531
b0367629 8532 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
bccbe08a
PZ
8533 if (!tg)
8534 return ERR_PTR(-ENOMEM);
8535
ec7dc8ac 8536 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
8537 goto err;
8538
ec7dc8ac 8539 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
8540 goto err;
8541
2480c093
PB
8542 alloc_uclamp_sched_group(tg, parent);
8543
ace783b9
LZ
8544 return tg;
8545
8546err:
2f5177f0 8547 sched_free_group(tg);
ace783b9
LZ
8548 return ERR_PTR(-ENOMEM);
8549}
8550
8551void sched_online_group(struct task_group *tg, struct task_group *parent)
8552{
8553 unsigned long flags;
8554
8ed36996 8555 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 8556 list_add_rcu(&tg->list, &task_groups);
f473aa5e 8557
d1ccc66d
IM
8558 /* Root should already exist: */
8559 WARN_ON(!parent);
f473aa5e
PZ
8560
8561 tg->parent = parent;
f473aa5e 8562 INIT_LIST_HEAD(&tg->children);
09f2724a 8563 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 8564 spin_unlock_irqrestore(&task_group_lock, flags);
8663e24d
PZ
8565
8566 online_fair_sched_group(tg);
29f59db3
SV
8567}
8568
9b5b7751 8569/* rcu callback to free various structures associated with a task group */
2f5177f0 8570static void sched_free_group_rcu(struct rcu_head *rhp)
29f59db3 8571{
d1ccc66d 8572 /* Now it should be safe to free those cfs_rqs: */
2f5177f0 8573 sched_free_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
8574}
8575
4cf86d77 8576void sched_destroy_group(struct task_group *tg)
ace783b9 8577{
d1ccc66d 8578 /* Wait for possible concurrent references to cfs_rqs complete: */
2f5177f0 8579 call_rcu(&tg->rcu, sched_free_group_rcu);
ace783b9
LZ
8580}
8581
8582void sched_offline_group(struct task_group *tg)
29f59db3 8583{
8ed36996 8584 unsigned long flags;
29f59db3 8585
d1ccc66d 8586 /* End participation in shares distribution: */
6fe1f348 8587 unregister_fair_sched_group(tg);
3d4b47b4
PZ
8588
8589 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 8590 list_del_rcu(&tg->list);
f473aa5e 8591 list_del_rcu(&tg->siblings);
8ed36996 8592 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
8593}
8594
ea86cb4b 8595static void sched_change_group(struct task_struct *tsk, int type)
29f59db3 8596{
8323f26c 8597 struct task_group *tg;
29f59db3 8598
f7b8a47d
KT
8599 /*
8600 * All callers are synchronized by task_rq_lock(); we do not use RCU
8601 * which is pointless here. Thus, we pass "true" to task_css_check()
8602 * to prevent lockdep warnings.
8603 */
8604 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
8605 struct task_group, css);
8606 tg = autogroup_task_group(tsk, tg);
8607 tsk->sched_task_group = tg;
8608
810b3817 8609#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
8610 if (tsk->sched_class->task_change_group)
8611 tsk->sched_class->task_change_group(tsk, type);
b2b5ce02 8612 else
810b3817 8613#endif
b2b5ce02 8614 set_task_rq(tsk, task_cpu(tsk));
ea86cb4b
VG
8615}
8616
8617/*
8618 * Change task's runqueue when it moves between groups.
8619 *
8620 * The caller of this function should have put the task in its new group by
8621 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8622 * its new group.
8623 */
8624void sched_move_task(struct task_struct *tsk)
8625{
7a57f32a
PZ
8626 int queued, running, queue_flags =
8627 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
ea86cb4b
VG
8628 struct rq_flags rf;
8629 struct rq *rq;
8630
8631 rq = task_rq_lock(tsk, &rf);
1b1d6225 8632 update_rq_clock(rq);
ea86cb4b
VG
8633
8634 running = task_current(rq, tsk);
8635 queued = task_on_rq_queued(tsk);
8636
8637 if (queued)
7a57f32a 8638 dequeue_task(rq, tsk, queue_flags);
bb3bac2c 8639 if (running)
ea86cb4b
VG
8640 put_prev_task(rq, tsk);
8641
8642 sched_change_group(tsk, TASK_MOVE_GROUP);
810b3817 8643
da0c1e65 8644 if (queued)
7a57f32a 8645 enqueue_task(rq, tsk, queue_flags);
2a4b03ff 8646 if (running) {
03b7fad1 8647 set_next_task(rq, tsk);
2a4b03ff
VG
8648 /*
8649 * After changing group, the running task may have joined a
8650 * throttled one but it's still the running task. Trigger a
8651 * resched to make sure that task can still run.
8652 */
8653 resched_curr(rq);
8654 }
29f59db3 8655
eb580751 8656 task_rq_unlock(rq, tsk, &rf);
29f59db3 8657}
68318b8e 8658
a7c6d554 8659static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 8660{
a7c6d554 8661 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
8662}
8663
eb95419b
TH
8664static struct cgroup_subsys_state *
8665cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 8666{
eb95419b
TH
8667 struct task_group *parent = css_tg(parent_css);
8668 struct task_group *tg;
68318b8e 8669
eb95419b 8670 if (!parent) {
68318b8e 8671 /* This is early initialization for the top cgroup */
07e06b01 8672 return &root_task_group.css;
68318b8e
SV
8673 }
8674
ec7dc8ac 8675 tg = sched_create_group(parent);
68318b8e
SV
8676 if (IS_ERR(tg))
8677 return ERR_PTR(-ENOMEM);
8678
68318b8e
SV
8679 return &tg->css;
8680}
8681
96b77745
KK
8682/* Expose task group only after completing cgroup initialization */
8683static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8684{
8685 struct task_group *tg = css_tg(css);
8686 struct task_group *parent = css_tg(css->parent);
8687
8688 if (parent)
8689 sched_online_group(tg, parent);
7226017a
QY
8690
8691#ifdef CONFIG_UCLAMP_TASK_GROUP
8692 /* Propagate the effective uclamp value for the new group */
8693 cpu_util_update_eff(css);
8694#endif
8695
96b77745
KK
8696 return 0;
8697}
8698
2f5177f0 8699static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
ace783b9 8700{
eb95419b 8701 struct task_group *tg = css_tg(css);
ace783b9 8702
2f5177f0 8703 sched_offline_group(tg);
ace783b9
LZ
8704}
8705
eb95419b 8706static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 8707{
eb95419b 8708 struct task_group *tg = css_tg(css);
68318b8e 8709
2f5177f0
PZ
8710 /*
8711 * Relies on the RCU grace period between css_released() and this.
8712 */
8713 sched_free_group(tg);
ace783b9
LZ
8714}
8715
ea86cb4b
VG
8716/*
8717 * This is called before wake_up_new_task(), therefore we really only
8718 * have to set its group bits, all the other stuff does not apply.
8719 */
b53202e6 8720static void cpu_cgroup_fork(struct task_struct *task)
eeb61e53 8721{
ea86cb4b
VG
8722 struct rq_flags rf;
8723 struct rq *rq;
8724
8725 rq = task_rq_lock(task, &rf);
8726
80f5c1b8 8727 update_rq_clock(rq);
ea86cb4b
VG
8728 sched_change_group(task, TASK_SET_GROUP);
8729
8730 task_rq_unlock(rq, task, &rf);
eeb61e53
KT
8731}
8732
1f7dd3e5 8733static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
68318b8e 8734{
bb9d97b6 8735 struct task_struct *task;
1f7dd3e5 8736 struct cgroup_subsys_state *css;
7dc603c9 8737 int ret = 0;
bb9d97b6 8738
1f7dd3e5 8739 cgroup_taskset_for_each(task, css, tset) {
b68aa230 8740#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 8741 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 8742 return -EINVAL;
b68aa230 8743#endif
7dc603c9 8744 /*
b19a888c 8745 * Serialize against wake_up_new_task() such that if it's
7dc603c9
PZ
8746 * running, we're sure to observe its full state.
8747 */
8748 raw_spin_lock_irq(&task->pi_lock);
8749 /*
8750 * Avoid calling sched_move_task() before wake_up_new_task()
8751 * has happened. This would lead to problems with PELT, due to
8752 * move wanting to detach+attach while we're not attached yet.
8753 */
8754 if (task->state == TASK_NEW)
8755 ret = -EINVAL;
8756 raw_spin_unlock_irq(&task->pi_lock);
8757
8758 if (ret)
8759 break;
bb9d97b6 8760 }
7dc603c9 8761 return ret;
be367d09 8762}
68318b8e 8763
1f7dd3e5 8764static void cpu_cgroup_attach(struct cgroup_taskset *tset)
68318b8e 8765{
bb9d97b6 8766 struct task_struct *task;
1f7dd3e5 8767 struct cgroup_subsys_state *css;
bb9d97b6 8768
1f7dd3e5 8769 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 8770 sched_move_task(task);
68318b8e
SV
8771}
8772
2480c093 8773#ifdef CONFIG_UCLAMP_TASK_GROUP
0b60ba2d
PB
8774static void cpu_util_update_eff(struct cgroup_subsys_state *css)
8775{
8776 struct cgroup_subsys_state *top_css = css;
8777 struct uclamp_se *uc_parent = NULL;
8778 struct uclamp_se *uc_se = NULL;
8779 unsigned int eff[UCLAMP_CNT];
0413d7f3 8780 enum uclamp_id clamp_id;
0b60ba2d
PB
8781 unsigned int clamps;
8782
8783 css_for_each_descendant_pre(css, top_css) {
8784 uc_parent = css_tg(css)->parent
8785 ? css_tg(css)->parent->uclamp : NULL;
8786
8787 for_each_clamp_id(clamp_id) {
8788 /* Assume effective clamps matches requested clamps */
8789 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
8790 /* Cap effective clamps with parent's effective clamps */
8791 if (uc_parent &&
8792 eff[clamp_id] > uc_parent[clamp_id].value) {
8793 eff[clamp_id] = uc_parent[clamp_id].value;
8794 }
8795 }
8796 /* Ensure protection is always capped by limit */
8797 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
8798
8799 /* Propagate most restrictive effective clamps */
8800 clamps = 0x0;
8801 uc_se = css_tg(css)->uclamp;
8802 for_each_clamp_id(clamp_id) {
8803 if (eff[clamp_id] == uc_se[clamp_id].value)
8804 continue;
8805 uc_se[clamp_id].value = eff[clamp_id];
8806 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
8807 clamps |= (0x1 << clamp_id);
8808 }
babbe170 8809 if (!clamps) {
0b60ba2d 8810 css = css_rightmost_descendant(css);
babbe170
PB
8811 continue;
8812 }
8813
8814 /* Immediately update descendants RUNNABLE tasks */
8815 uclamp_update_active_tasks(css, clamps);
0b60ba2d
PB
8816 }
8817}
2480c093
PB
8818
8819/*
8820 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
8821 * C expression. Since there is no way to convert a macro argument (N) into a
8822 * character constant, use two levels of macros.
8823 */
8824#define _POW10(exp) ((unsigned int)1e##exp)
8825#define POW10(exp) _POW10(exp)
8826
8827struct uclamp_request {
8828#define UCLAMP_PERCENT_SHIFT 2
8829#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
8830 s64 percent;
8831 u64 util;
8832 int ret;
8833};
8834
8835static inline struct uclamp_request
8836capacity_from_percent(char *buf)
8837{
8838 struct uclamp_request req = {
8839 .percent = UCLAMP_PERCENT_SCALE,
8840 .util = SCHED_CAPACITY_SCALE,
8841 .ret = 0,
8842 };
8843
8844 buf = strim(buf);
8845 if (strcmp(buf, "max")) {
8846 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
8847 &req.percent);
8848 if (req.ret)
8849 return req;
b562d140 8850 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
2480c093
PB
8851 req.ret = -ERANGE;
8852 return req;
8853 }
8854
8855 req.util = req.percent << SCHED_CAPACITY_SHIFT;
8856 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
8857 }
8858
8859 return req;
8860}
8861
8862static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
8863 size_t nbytes, loff_t off,
8864 enum uclamp_id clamp_id)
8865{
8866 struct uclamp_request req;
8867 struct task_group *tg;
8868
8869 req = capacity_from_percent(buf);
8870 if (req.ret)
8871 return req.ret;
8872
46609ce2
QY
8873 static_branch_enable(&sched_uclamp_used);
8874
2480c093
PB
8875 mutex_lock(&uclamp_mutex);
8876 rcu_read_lock();
8877
8878 tg = css_tg(of_css(of));
8879 if (tg->uclamp_req[clamp_id].value != req.util)
8880 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
8881
8882 /*
8883 * Because of not recoverable conversion rounding we keep track of the
8884 * exact requested value
8885 */
8886 tg->uclamp_pct[clamp_id] = req.percent;
8887
0b60ba2d
PB
8888 /* Update effective clamps to track the most restrictive value */
8889 cpu_util_update_eff(of_css(of));
8890
2480c093
PB
8891 rcu_read_unlock();
8892 mutex_unlock(&uclamp_mutex);
8893
8894 return nbytes;
8895}
8896
8897static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
8898 char *buf, size_t nbytes,
8899 loff_t off)
8900{
8901 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
8902}
8903
8904static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
8905 char *buf, size_t nbytes,
8906 loff_t off)
8907{
8908 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
8909}
8910
8911static inline void cpu_uclamp_print(struct seq_file *sf,
8912 enum uclamp_id clamp_id)
8913{
8914 struct task_group *tg;
8915 u64 util_clamp;
8916 u64 percent;
8917 u32 rem;
8918
8919 rcu_read_lock();
8920 tg = css_tg(seq_css(sf));
8921 util_clamp = tg->uclamp_req[clamp_id].value;
8922 rcu_read_unlock();
8923
8924 if (util_clamp == SCHED_CAPACITY_SCALE) {
8925 seq_puts(sf, "max\n");
8926 return;
8927 }
8928
8929 percent = tg->uclamp_pct[clamp_id];
8930 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
8931 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
8932}
8933
8934static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
8935{
8936 cpu_uclamp_print(sf, UCLAMP_MIN);
8937 return 0;
8938}
8939
8940static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
8941{
8942 cpu_uclamp_print(sf, UCLAMP_MAX);
8943 return 0;
8944}
8945#endif /* CONFIG_UCLAMP_TASK_GROUP */
8946
052f1dc7 8947#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8948static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8949 struct cftype *cftype, u64 shareval)
68318b8e 8950{
5b61d50a
KK
8951 if (shareval > scale_load_down(ULONG_MAX))
8952 shareval = MAX_SHARES;
182446d0 8953 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8954}
8955
182446d0
TH
8956static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8957 struct cftype *cft)
68318b8e 8958{
182446d0 8959 struct task_group *tg = css_tg(css);
68318b8e 8960
c8b28116 8961 return (u64) scale_load_down(tg->shares);
68318b8e 8962}
ab84d31e
PT
8963
8964#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8965static DEFINE_MUTEX(cfs_constraints_mutex);
8966
ab84d31e 8967const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
b1546edc 8968static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
d505b8af
HC
8969/* More than 203 days if BW_SHIFT equals 20. */
8970static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
ab84d31e 8971
a790de99
PT
8972static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8973
ab84d31e
PT
8974static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8975{
56f570e5 8976 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8977 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8978
8979 if (tg == &root_task_group)
8980 return -EINVAL;
8981
8982 /*
8983 * Ensure we have at some amount of bandwidth every period. This is
8984 * to prevent reaching a state of large arrears when throttled via
8985 * entity_tick() resulting in prolonged exit starvation.
8986 */
8987 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8988 return -EINVAL;
8989
8990 /*
8991 * Likewise, bound things on the otherside by preventing insane quota
8992 * periods. This also allows us to normalize in computing quota
8993 * feasibility.
8994 */
8995 if (period > max_cfs_quota_period)
8996 return -EINVAL;
8997
d505b8af
HC
8998 /*
8999 * Bound quota to defend quota against overflow during bandwidth shift.
9000 */
9001 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9002 return -EINVAL;
9003
0e59bdae
KT
9004 /*
9005 * Prevent race between setting of cfs_rq->runtime_enabled and
9006 * unthrottle_offline_cfs_rqs().
9007 */
9008 get_online_cpus();
a790de99
PT
9009 mutex_lock(&cfs_constraints_mutex);
9010 ret = __cfs_schedulable(tg, period, quota);
9011 if (ret)
9012 goto out_unlock;
9013
58088ad0 9014 runtime_enabled = quota != RUNTIME_INF;
56f570e5 9015 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
9016 /*
9017 * If we need to toggle cfs_bandwidth_used, off->on must occur
9018 * before making related changes, and on->off must occur afterwards
9019 */
9020 if (runtime_enabled && !runtime_was_enabled)
9021 cfs_bandwidth_usage_inc();
ab84d31e
PT
9022 raw_spin_lock_irq(&cfs_b->lock);
9023 cfs_b->period = ns_to_ktime(period);
9024 cfs_b->quota = quota;
58088ad0 9025
a9cf55b2 9026 __refill_cfs_bandwidth_runtime(cfs_b);
d1ccc66d
IM
9027
9028 /* Restart the period timer (if active) to handle new period expiry: */
77a4d1a1
PZ
9029 if (runtime_enabled)
9030 start_cfs_bandwidth(cfs_b);
d1ccc66d 9031
ab84d31e
PT
9032 raw_spin_unlock_irq(&cfs_b->lock);
9033
0e59bdae 9034 for_each_online_cpu(i) {
ab84d31e 9035 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 9036 struct rq *rq = cfs_rq->rq;
8a8c69c3 9037 struct rq_flags rf;
ab84d31e 9038
8a8c69c3 9039 rq_lock_irq(rq, &rf);
58088ad0 9040 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 9041 cfs_rq->runtime_remaining = 0;
671fd9da 9042
029632fb 9043 if (cfs_rq->throttled)
671fd9da 9044 unthrottle_cfs_rq(cfs_rq);
8a8c69c3 9045 rq_unlock_irq(rq, &rf);
ab84d31e 9046 }
1ee14e6c
BS
9047 if (runtime_was_enabled && !runtime_enabled)
9048 cfs_bandwidth_usage_dec();
a790de99
PT
9049out_unlock:
9050 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 9051 put_online_cpus();
ab84d31e 9052
a790de99 9053 return ret;
ab84d31e
PT
9054}
9055
b1546edc 9056static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
ab84d31e
PT
9057{
9058 u64 quota, period;
9059
029632fb 9060 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
9061 if (cfs_quota_us < 0)
9062 quota = RUNTIME_INF;
1a8b4540 9063 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
ab84d31e 9064 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
1a8b4540
KK
9065 else
9066 return -EINVAL;
ab84d31e
PT
9067
9068 return tg_set_cfs_bandwidth(tg, period, quota);
9069}
9070
b1546edc 9071static long tg_get_cfs_quota(struct task_group *tg)
ab84d31e
PT
9072{
9073 u64 quota_us;
9074
029632fb 9075 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
9076 return -1;
9077
029632fb 9078 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
9079 do_div(quota_us, NSEC_PER_USEC);
9080
9081 return quota_us;
9082}
9083
b1546edc 9084static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
ab84d31e
PT
9085{
9086 u64 quota, period;
9087
1a8b4540
KK
9088 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9089 return -EINVAL;
9090
ab84d31e 9091 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 9092 quota = tg->cfs_bandwidth.quota;
ab84d31e 9093
ab84d31e
PT
9094 return tg_set_cfs_bandwidth(tg, period, quota);
9095}
9096
b1546edc 9097static long tg_get_cfs_period(struct task_group *tg)
ab84d31e
PT
9098{
9099 u64 cfs_period_us;
9100
029632fb 9101 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
9102 do_div(cfs_period_us, NSEC_PER_USEC);
9103
9104 return cfs_period_us;
9105}
9106
182446d0
TH
9107static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9108 struct cftype *cft)
ab84d31e 9109{
182446d0 9110 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
9111}
9112
182446d0
TH
9113static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9114 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 9115{
182446d0 9116 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
9117}
9118
182446d0
TH
9119static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9120 struct cftype *cft)
ab84d31e 9121{
182446d0 9122 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
9123}
9124
182446d0
TH
9125static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9126 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 9127{
182446d0 9128 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
9129}
9130
a790de99
PT
9131struct cfs_schedulable_data {
9132 struct task_group *tg;
9133 u64 period, quota;
9134};
9135
9136/*
9137 * normalize group quota/period to be quota/max_period
9138 * note: units are usecs
9139 */
9140static u64 normalize_cfs_quota(struct task_group *tg,
9141 struct cfs_schedulable_data *d)
9142{
9143 u64 quota, period;
9144
9145 if (tg == d->tg) {
9146 period = d->period;
9147 quota = d->quota;
9148 } else {
9149 period = tg_get_cfs_period(tg);
9150 quota = tg_get_cfs_quota(tg);
9151 }
9152
9153 /* note: these should typically be equivalent */
9154 if (quota == RUNTIME_INF || quota == -1)
9155 return RUNTIME_INF;
9156
9157 return to_ratio(period, quota);
9158}
9159
9160static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9161{
9162 struct cfs_schedulable_data *d = data;
029632fb 9163 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
9164 s64 quota = 0, parent_quota = -1;
9165
9166 if (!tg->parent) {
9167 quota = RUNTIME_INF;
9168 } else {
029632fb 9169 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
9170
9171 quota = normalize_cfs_quota(tg, d);
9c58c79a 9172 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
9173
9174 /*
c53593e5
TH
9175 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9176 * always take the min. On cgroup1, only inherit when no
d1ccc66d 9177 * limit is set:
a790de99 9178 */
c53593e5
TH
9179 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9180 quota = min(quota, parent_quota);
9181 } else {
9182 if (quota == RUNTIME_INF)
9183 quota = parent_quota;
9184 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9185 return -EINVAL;
9186 }
a790de99 9187 }
9c58c79a 9188 cfs_b->hierarchical_quota = quota;
a790de99
PT
9189
9190 return 0;
9191}
9192
9193static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9194{
8277434e 9195 int ret;
a790de99
PT
9196 struct cfs_schedulable_data data = {
9197 .tg = tg,
9198 .period = period,
9199 .quota = quota,
9200 };
9201
9202 if (quota != RUNTIME_INF) {
9203 do_div(data.period, NSEC_PER_USEC);
9204 do_div(data.quota, NSEC_PER_USEC);
9205 }
9206
8277434e
PT
9207 rcu_read_lock();
9208 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9209 rcu_read_unlock();
9210
9211 return ret;
a790de99 9212}
e8da1b18 9213
a1f7164c 9214static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
e8da1b18 9215{
2da8ca82 9216 struct task_group *tg = css_tg(seq_css(sf));
029632fb 9217 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 9218
44ffc75b
TH
9219 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9220 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9221 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18 9222
3d6c50c2
YW
9223 if (schedstat_enabled() && tg != &root_task_group) {
9224 u64 ws = 0;
9225 int i;
9226
9227 for_each_possible_cpu(i)
9228 ws += schedstat_val(tg->se[i]->statistics.wait_sum);
9229
9230 seq_printf(sf, "wait_sum %llu\n", ws);
9231 }
9232
e8da1b18
NR
9233 return 0;
9234}
ab84d31e 9235#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 9236#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 9237
052f1dc7 9238#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
9239static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9240 struct cftype *cft, s64 val)
6f505b16 9241{
182446d0 9242 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
9243}
9244
182446d0
TH
9245static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9246 struct cftype *cft)
6f505b16 9247{
182446d0 9248 return sched_group_rt_runtime(css_tg(css));
6f505b16 9249}
d0b27fa7 9250
182446d0
TH
9251static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9252 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 9253{
182446d0 9254 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
9255}
9256
182446d0
TH
9257static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9258 struct cftype *cft)
d0b27fa7 9259{
182446d0 9260 return sched_group_rt_period(css_tg(css));
d0b27fa7 9261}
6d6bc0ad 9262#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 9263
a1f7164c 9264static struct cftype cpu_legacy_files[] = {
052f1dc7 9265#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
9266 {
9267 .name = "shares",
f4c753b7
PM
9268 .read_u64 = cpu_shares_read_u64,
9269 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 9270 },
052f1dc7 9271#endif
ab84d31e
PT
9272#ifdef CONFIG_CFS_BANDWIDTH
9273 {
9274 .name = "cfs_quota_us",
9275 .read_s64 = cpu_cfs_quota_read_s64,
9276 .write_s64 = cpu_cfs_quota_write_s64,
9277 },
9278 {
9279 .name = "cfs_period_us",
9280 .read_u64 = cpu_cfs_period_read_u64,
9281 .write_u64 = cpu_cfs_period_write_u64,
9282 },
e8da1b18
NR
9283 {
9284 .name = "stat",
a1f7164c 9285 .seq_show = cpu_cfs_stat_show,
e8da1b18 9286 },
ab84d31e 9287#endif
052f1dc7 9288#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 9289 {
9f0c1e56 9290 .name = "rt_runtime_us",
06ecb27c
PM
9291 .read_s64 = cpu_rt_runtime_read,
9292 .write_s64 = cpu_rt_runtime_write,
6f505b16 9293 },
d0b27fa7
PZ
9294 {
9295 .name = "rt_period_us",
f4c753b7
PM
9296 .read_u64 = cpu_rt_period_read_uint,
9297 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 9298 },
2480c093
PB
9299#endif
9300#ifdef CONFIG_UCLAMP_TASK_GROUP
9301 {
9302 .name = "uclamp.min",
9303 .flags = CFTYPE_NOT_ON_ROOT,
9304 .seq_show = cpu_uclamp_min_show,
9305 .write = cpu_uclamp_min_write,
9306 },
9307 {
9308 .name = "uclamp.max",
9309 .flags = CFTYPE_NOT_ON_ROOT,
9310 .seq_show = cpu_uclamp_max_show,
9311 .write = cpu_uclamp_max_write,
9312 },
052f1dc7 9313#endif
d1ccc66d 9314 { } /* Terminate */
68318b8e
SV
9315};
9316
d41bf8c9
TH
9317static int cpu_extra_stat_show(struct seq_file *sf,
9318 struct cgroup_subsys_state *css)
0d593634 9319{
0d593634
TH
9320#ifdef CONFIG_CFS_BANDWIDTH
9321 {
d41bf8c9 9322 struct task_group *tg = css_tg(css);
0d593634
TH
9323 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9324 u64 throttled_usec;
9325
9326 throttled_usec = cfs_b->throttled_time;
9327 do_div(throttled_usec, NSEC_PER_USEC);
9328
9329 seq_printf(sf, "nr_periods %d\n"
9330 "nr_throttled %d\n"
9331 "throttled_usec %llu\n",
9332 cfs_b->nr_periods, cfs_b->nr_throttled,
9333 throttled_usec);
9334 }
9335#endif
9336 return 0;
9337}
9338
9339#ifdef CONFIG_FAIR_GROUP_SCHED
9340static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9341 struct cftype *cft)
9342{
9343 struct task_group *tg = css_tg(css);
9344 u64 weight = scale_load_down(tg->shares);
9345
9346 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
9347}
9348
9349static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9350 struct cftype *cft, u64 weight)
9351{
9352 /*
9353 * cgroup weight knobs should use the common MIN, DFL and MAX
9354 * values which are 1, 100 and 10000 respectively. While it loses
9355 * a bit of range on both ends, it maps pretty well onto the shares
9356 * value used by scheduler and the round-trip conversions preserve
9357 * the original value over the entire range.
9358 */
9359 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
9360 return -ERANGE;
9361
9362 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
9363
9364 return sched_group_set_shares(css_tg(css), scale_load(weight));
9365}
9366
9367static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9368 struct cftype *cft)
9369{
9370 unsigned long weight = scale_load_down(css_tg(css)->shares);
9371 int last_delta = INT_MAX;
9372 int prio, delta;
9373
9374 /* find the closest nice value to the current weight */
9375 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9376 delta = abs(sched_prio_to_weight[prio] - weight);
9377 if (delta >= last_delta)
9378 break;
9379 last_delta = delta;
9380 }
9381
9382 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9383}
9384
9385static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9386 struct cftype *cft, s64 nice)
9387{
9388 unsigned long weight;
7281c8de 9389 int idx;
0d593634
TH
9390
9391 if (nice < MIN_NICE || nice > MAX_NICE)
9392 return -ERANGE;
9393
7281c8de
PZ
9394 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9395 idx = array_index_nospec(idx, 40);
9396 weight = sched_prio_to_weight[idx];
9397
0d593634
TH
9398 return sched_group_set_shares(css_tg(css), scale_load(weight));
9399}
9400#endif
9401
9402static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9403 long period, long quota)
9404{
9405 if (quota < 0)
9406 seq_puts(sf, "max");
9407 else
9408 seq_printf(sf, "%ld", quota);
9409
9410 seq_printf(sf, " %ld\n", period);
9411}
9412
9413/* caller should put the current value in *@periodp before calling */
9414static int __maybe_unused cpu_period_quota_parse(char *buf,
9415 u64 *periodp, u64 *quotap)
9416{
9417 char tok[21]; /* U64_MAX */
9418
4c47acd8 9419 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
0d593634
TH
9420 return -EINVAL;
9421
9422 *periodp *= NSEC_PER_USEC;
9423
9424 if (sscanf(tok, "%llu", quotap))
9425 *quotap *= NSEC_PER_USEC;
9426 else if (!strcmp(tok, "max"))
9427 *quotap = RUNTIME_INF;
9428 else
9429 return -EINVAL;
9430
9431 return 0;
9432}
9433
9434#ifdef CONFIG_CFS_BANDWIDTH
9435static int cpu_max_show(struct seq_file *sf, void *v)
9436{
9437 struct task_group *tg = css_tg(seq_css(sf));
9438
9439 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9440 return 0;
9441}
9442
9443static ssize_t cpu_max_write(struct kernfs_open_file *of,
9444 char *buf, size_t nbytes, loff_t off)
9445{
9446 struct task_group *tg = css_tg(of_css(of));
9447 u64 period = tg_get_cfs_period(tg);
9448 u64 quota;
9449 int ret;
9450
9451 ret = cpu_period_quota_parse(buf, &period, &quota);
9452 if (!ret)
9453 ret = tg_set_cfs_bandwidth(tg, period, quota);
9454 return ret ?: nbytes;
9455}
9456#endif
9457
9458static struct cftype cpu_files[] = {
0d593634
TH
9459#ifdef CONFIG_FAIR_GROUP_SCHED
9460 {
9461 .name = "weight",
9462 .flags = CFTYPE_NOT_ON_ROOT,
9463 .read_u64 = cpu_weight_read_u64,
9464 .write_u64 = cpu_weight_write_u64,
9465 },
9466 {
9467 .name = "weight.nice",
9468 .flags = CFTYPE_NOT_ON_ROOT,
9469 .read_s64 = cpu_weight_nice_read_s64,
9470 .write_s64 = cpu_weight_nice_write_s64,
9471 },
9472#endif
9473#ifdef CONFIG_CFS_BANDWIDTH
9474 {
9475 .name = "max",
9476 .flags = CFTYPE_NOT_ON_ROOT,
9477 .seq_show = cpu_max_show,
9478 .write = cpu_max_write,
9479 },
2480c093
PB
9480#endif
9481#ifdef CONFIG_UCLAMP_TASK_GROUP
9482 {
9483 .name = "uclamp.min",
9484 .flags = CFTYPE_NOT_ON_ROOT,
9485 .seq_show = cpu_uclamp_min_show,
9486 .write = cpu_uclamp_min_write,
9487 },
9488 {
9489 .name = "uclamp.max",
9490 .flags = CFTYPE_NOT_ON_ROOT,
9491 .seq_show = cpu_uclamp_max_show,
9492 .write = cpu_uclamp_max_write,
9493 },
0d593634
TH
9494#endif
9495 { } /* terminate */
9496};
9497
073219e9 9498struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748 9499 .css_alloc = cpu_cgroup_css_alloc,
96b77745 9500 .css_online = cpu_cgroup_css_online,
2f5177f0 9501 .css_released = cpu_cgroup_css_released,
92fb9748 9502 .css_free = cpu_cgroup_css_free,
d41bf8c9 9503 .css_extra_stat_show = cpu_extra_stat_show,
eeb61e53 9504 .fork = cpu_cgroup_fork,
bb9d97b6
TH
9505 .can_attach = cpu_cgroup_can_attach,
9506 .attach = cpu_cgroup_attach,
a1f7164c 9507 .legacy_cftypes = cpu_legacy_files,
0d593634 9508 .dfl_cftypes = cpu_files,
b38e42e9 9509 .early_init = true,
0d593634 9510 .threaded = true,
68318b8e
SV
9511};
9512
052f1dc7 9513#endif /* CONFIG_CGROUP_SCHED */
d842de87 9514
b637a328
PM
9515void dump_cpu_task(int cpu)
9516{
9517 pr_info("Task dump for CPU %d:\n", cpu);
9518 sched_show_task(cpu_curr(cpu));
9519}
ed82b8a1
AK
9520
9521/*
9522 * Nice levels are multiplicative, with a gentle 10% change for every
9523 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9524 * nice 1, it will get ~10% less CPU time than another CPU-bound task
9525 * that remained on nice 0.
9526 *
9527 * The "10% effect" is relative and cumulative: from _any_ nice level,
9528 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9529 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9530 * If a task goes up by ~10% and another task goes down by ~10% then
9531 * the relative distance between them is ~25%.)
9532 */
9533const int sched_prio_to_weight[40] = {
9534 /* -20 */ 88761, 71755, 56483, 46273, 36291,
9535 /* -15 */ 29154, 23254, 18705, 14949, 11916,
9536 /* -10 */ 9548, 7620, 6100, 4904, 3906,
9537 /* -5 */ 3121, 2501, 1991, 1586, 1277,
9538 /* 0 */ 1024, 820, 655, 526, 423,
9539 /* 5 */ 335, 272, 215, 172, 137,
9540 /* 10 */ 110, 87, 70, 56, 45,
9541 /* 15 */ 36, 29, 23, 18, 15,
9542};
9543
9544/*
9545 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
9546 *
9547 * In cases where the weight does not change often, we can use the
9548 * precalculated inverse to speed up arithmetics by turning divisions
9549 * into multiplications:
9550 */
9551const u32 sched_prio_to_wmult[40] = {
9552 /* -20 */ 48388, 59856, 76040, 92818, 118348,
9553 /* -15 */ 147320, 184698, 229616, 287308, 360437,
9554 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
9555 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
9556 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
9557 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
9558 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
9559 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
9560};
14a7405b 9561
9d246053
PA
9562void call_trace_sched_update_nr_running(struct rq *rq, int count)
9563{
9564 trace_sched_update_nr_running_tp(rq, count);
9565}