]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/core.c
sched/hotplug: Ensure only per-cpu kthreads run during hotplug
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / core.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
391e43da 3 * kernel/sched/core.c
1da177e4 4 *
d1ccc66d 5 * Core kernel scheduler code and related syscalls
1da177e4
LT
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
1da177e4 8 */
9d246053
PA
9#define CREATE_TRACE_POINTS
10#include <trace/events/sched.h>
11#undef CREATE_TRACE_POINTS
12
325ea10c 13#include "sched.h"
1da177e4 14
7281c8de 15#include <linux/nospec.h>
85f1abe0 16
0ed557aa 17#include <linux/kcov.h>
d08b9f0c 18#include <linux/scs.h>
0ed557aa 19
96f951ed 20#include <asm/switch_to.h>
5517d86b 21#include <asm/tlb.h>
1da177e4 22
ea138446 23#include "../workqueue_internal.h"
771b53d0 24#include "../../fs/io-wq.h"
29d5e047 25#include "../smpboot.h"
6e0534f2 26
91c27493 27#include "pelt.h"
1f8db415 28#include "smp.h"
91c27493 29
a056a5be
QY
30/*
31 * Export tracepoints that act as a bare tracehook (ie: have no trace event
32 * associated with them) to allow external modules to probe them.
33 */
34EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
35EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
36EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
37EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
38EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
51cf18c9 39EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
a056a5be 40EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
4581bea8
VD
41EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
42EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
9d246053 43EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
a056a5be 44
029632fb 45DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 46
a73f863a 47#ifdef CONFIG_SCHED_DEBUG
bf5c91ba
IM
48/*
49 * Debugging: various feature bits
765cc3a4
PB
50 *
51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
52 * sysctl_sched_features, defined in sched.h, to allow constants propagation
53 * at compile time and compiler optimization based on features default.
bf5c91ba 54 */
f00b45c1
PZ
55#define SCHED_FEAT(name, enabled) \
56 (1UL << __SCHED_FEAT_##name) * enabled |
bf5c91ba 57const_debug unsigned int sysctl_sched_features =
391e43da 58#include "features.h"
f00b45c1 59 0;
f00b45c1 60#undef SCHED_FEAT
765cc3a4 61#endif
f00b45c1 62
b82d9fdd
PZ
63/*
64 * Number of tasks to iterate in a single balance run.
65 * Limited because this is done with IRQs disabled.
66 */
67const_debug unsigned int sysctl_sched_nr_migrate = 32;
68
fa85ae24 69/*
d1ccc66d 70 * period over which we measure -rt task CPU usage in us.
fa85ae24
PZ
71 * default: 1s
72 */
9f0c1e56 73unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 74
029632fb 75__read_mostly int scheduler_running;
6892b75e 76
9f0c1e56
PZ
77/*
78 * part of the period that we allow rt tasks to run in us.
79 * default: 0.95s
80 */
81int sysctl_sched_rt_runtime = 950000;
fa85ae24 82
58877d34
PZ
83
84/*
85 * Serialization rules:
86 *
87 * Lock order:
88 *
89 * p->pi_lock
90 * rq->lock
91 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
92 *
93 * rq1->lock
94 * rq2->lock where: rq1 < rq2
95 *
96 * Regular state:
97 *
98 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
99 * local CPU's rq->lock, it optionally removes the task from the runqueue and
100 * always looks at the local rq data structures to find the most elegible task
101 * to run next.
102 *
103 * Task enqueue is also under rq->lock, possibly taken from another CPU.
104 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
105 * the local CPU to avoid bouncing the runqueue state around [ see
106 * ttwu_queue_wakelist() ]
107 *
108 * Task wakeup, specifically wakeups that involve migration, are horribly
109 * complicated to avoid having to take two rq->locks.
110 *
111 * Special state:
112 *
113 * System-calls and anything external will use task_rq_lock() which acquires
114 * both p->pi_lock and rq->lock. As a consequence the state they change is
115 * stable while holding either lock:
116 *
117 * - sched_setaffinity()/
118 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
119 * - set_user_nice(): p->se.load, p->*prio
120 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
121 * p->se.load, p->rt_priority,
122 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
123 * - sched_setnuma(): p->numa_preferred_nid
124 * - sched_move_task()/
125 * cpu_cgroup_fork(): p->sched_task_group
126 * - uclamp_update_active() p->uclamp*
127 *
128 * p->state <- TASK_*:
129 *
130 * is changed locklessly using set_current_state(), __set_current_state() or
131 * set_special_state(), see their respective comments, or by
132 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
133 * concurrent self.
134 *
135 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
136 *
137 * is set by activate_task() and cleared by deactivate_task(), under
138 * rq->lock. Non-zero indicates the task is runnable, the special
139 * ON_RQ_MIGRATING state is used for migration without holding both
140 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
141 *
142 * p->on_cpu <- { 0, 1 }:
143 *
144 * is set by prepare_task() and cleared by finish_task() such that it will be
145 * set before p is scheduled-in and cleared after p is scheduled-out, both
146 * under rq->lock. Non-zero indicates the task is running on its CPU.
147 *
148 * [ The astute reader will observe that it is possible for two tasks on one
149 * CPU to have ->on_cpu = 1 at the same time. ]
150 *
151 * task_cpu(p): is changed by set_task_cpu(), the rules are:
152 *
153 * - Don't call set_task_cpu() on a blocked task:
154 *
155 * We don't care what CPU we're not running on, this simplifies hotplug,
156 * the CPU assignment of blocked tasks isn't required to be valid.
157 *
158 * - for try_to_wake_up(), called under p->pi_lock:
159 *
160 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
161 *
162 * - for migration called under rq->lock:
163 * [ see task_on_rq_migrating() in task_rq_lock() ]
164 *
165 * o move_queued_task()
166 * o detach_task()
167 *
168 * - for migration called under double_rq_lock():
169 *
170 * o __migrate_swap_task()
171 * o push_rt_task() / pull_rt_task()
172 * o push_dl_task() / pull_dl_task()
173 * o dl_task_offline_migration()
174 *
175 */
176
3e71a462
PZ
177/*
178 * __task_rq_lock - lock the rq @p resides on.
179 */
eb580751 180struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
181 __acquires(rq->lock)
182{
183 struct rq *rq;
184
185 lockdep_assert_held(&p->pi_lock);
186
187 for (;;) {
188 rq = task_rq(p);
189 raw_spin_lock(&rq->lock);
190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 191 rq_pin_lock(rq, rf);
3e71a462
PZ
192 return rq;
193 }
194 raw_spin_unlock(&rq->lock);
195
196 while (unlikely(task_on_rq_migrating(p)))
197 cpu_relax();
198 }
199}
200
201/*
202 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
203 */
eb580751 204struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
205 __acquires(p->pi_lock)
206 __acquires(rq->lock)
207{
208 struct rq *rq;
209
210 for (;;) {
eb580751 211 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
3e71a462
PZ
212 rq = task_rq(p);
213 raw_spin_lock(&rq->lock);
214 /*
215 * move_queued_task() task_rq_lock()
216 *
217 * ACQUIRE (rq->lock)
218 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
219 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
220 * [S] ->cpu = new_cpu [L] task_rq()
221 * [L] ->on_rq
222 * RELEASE (rq->lock)
223 *
c546951d 224 * If we observe the old CPU in task_rq_lock(), the acquire of
3e71a462
PZ
225 * the old rq->lock will fully serialize against the stores.
226 *
c546951d
AP
227 * If we observe the new CPU in task_rq_lock(), the address
228 * dependency headed by '[L] rq = task_rq()' and the acquire
229 * will pair with the WMB to ensure we then also see migrating.
3e71a462
PZ
230 */
231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 232 rq_pin_lock(rq, rf);
3e71a462
PZ
233 return rq;
234 }
235 raw_spin_unlock(&rq->lock);
eb580751 236 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3e71a462
PZ
237
238 while (unlikely(task_on_rq_migrating(p)))
239 cpu_relax();
240 }
241}
242
535b9552
IM
243/*
244 * RQ-clock updating methods:
245 */
246
247static void update_rq_clock_task(struct rq *rq, s64 delta)
248{
249/*
250 * In theory, the compile should just see 0 here, and optimize out the call
251 * to sched_rt_avg_update. But I don't trust it...
252 */
11d4afd4
VG
253 s64 __maybe_unused steal = 0, irq_delta = 0;
254
535b9552
IM
255#ifdef CONFIG_IRQ_TIME_ACCOUNTING
256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
257
258 /*
259 * Since irq_time is only updated on {soft,}irq_exit, we might run into
260 * this case when a previous update_rq_clock() happened inside a
261 * {soft,}irq region.
262 *
263 * When this happens, we stop ->clock_task and only update the
264 * prev_irq_time stamp to account for the part that fit, so that a next
265 * update will consume the rest. This ensures ->clock_task is
266 * monotonic.
267 *
268 * It does however cause some slight miss-attribution of {soft,}irq
269 * time, a more accurate solution would be to update the irq_time using
270 * the current rq->clock timestamp, except that would require using
271 * atomic ops.
272 */
273 if (irq_delta > delta)
274 irq_delta = delta;
275
276 rq->prev_irq_time += irq_delta;
277 delta -= irq_delta;
278#endif
279#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
280 if (static_key_false((&paravirt_steal_rq_enabled))) {
281 steal = paravirt_steal_clock(cpu_of(rq));
282 steal -= rq->prev_steal_time_rq;
283
284 if (unlikely(steal > delta))
285 steal = delta;
286
287 rq->prev_steal_time_rq += steal;
288 delta -= steal;
289 }
290#endif
291
292 rq->clock_task += delta;
293
11d4afd4 294#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
535b9552 295 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
91c27493 296 update_irq_load_avg(rq, irq_delta + steal);
535b9552 297#endif
23127296 298 update_rq_clock_pelt(rq, delta);
535b9552
IM
299}
300
301void update_rq_clock(struct rq *rq)
302{
303 s64 delta;
304
305 lockdep_assert_held(&rq->lock);
306
307 if (rq->clock_update_flags & RQCF_ACT_SKIP)
308 return;
309
310#ifdef CONFIG_SCHED_DEBUG
26ae58d2
PZ
311 if (sched_feat(WARN_DOUBLE_CLOCK))
312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
535b9552
IM
313 rq->clock_update_flags |= RQCF_UPDATED;
314#endif
26ae58d2 315
535b9552
IM
316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
317 if (delta < 0)
318 return;
319 rq->clock += delta;
320 update_rq_clock_task(rq, delta);
321}
322
90b5363a
PZI
323static inline void
324rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func)
325{
326 csd->flags = 0;
327 csd->func = func;
328 csd->info = rq;
329}
535b9552 330
8f4d37ec
PZ
331#ifdef CONFIG_SCHED_HRTICK
332/*
333 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 334 */
8f4d37ec 335
8f4d37ec
PZ
336static void hrtick_clear(struct rq *rq)
337{
338 if (hrtimer_active(&rq->hrtick_timer))
339 hrtimer_cancel(&rq->hrtick_timer);
340}
341
8f4d37ec
PZ
342/*
343 * High-resolution timer tick.
344 * Runs from hardirq context with interrupts disabled.
345 */
346static enum hrtimer_restart hrtick(struct hrtimer *timer)
347{
348 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
8a8c69c3 349 struct rq_flags rf;
8f4d37ec
PZ
350
351 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
352
8a8c69c3 353 rq_lock(rq, &rf);
3e51f33f 354 update_rq_clock(rq);
8f4d37ec 355 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
8a8c69c3 356 rq_unlock(rq, &rf);
8f4d37ec
PZ
357
358 return HRTIMER_NORESTART;
359}
360
95e904c7 361#ifdef CONFIG_SMP
971ee28c 362
4961b6e1 363static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
364{
365 struct hrtimer *timer = &rq->hrtick_timer;
971ee28c 366
d5096aa6 367 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
971ee28c
PZ
368}
369
31656519
PZ
370/*
371 * called from hardirq (IPI) context
372 */
373static void __hrtick_start(void *arg)
b328ca18 374{
31656519 375 struct rq *rq = arg;
8a8c69c3 376 struct rq_flags rf;
b328ca18 377
8a8c69c3 378 rq_lock(rq, &rf);
971ee28c 379 __hrtick_restart(rq);
8a8c69c3 380 rq_unlock(rq, &rf);
b328ca18
PZ
381}
382
31656519
PZ
383/*
384 * Called to set the hrtick timer state.
385 *
386 * called with rq->lock held and irqs disabled
387 */
029632fb 388void hrtick_start(struct rq *rq, u64 delay)
b328ca18 389{
31656519 390 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 391 ktime_t time;
392 s64 delta;
393
394 /*
395 * Don't schedule slices shorter than 10000ns, that just
396 * doesn't make sense and can cause timer DoS.
397 */
398 delta = max_t(s64, delay, 10000LL);
399 time = ktime_add_ns(timer->base->get_time(), delta);
b328ca18 400
cc584b21 401 hrtimer_set_expires(timer, time);
31656519 402
fd3eafda 403 if (rq == this_rq())
971ee28c 404 __hrtick_restart(rq);
fd3eafda 405 else
c46fff2a 406 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
b328ca18
PZ
407}
408
31656519
PZ
409#else
410/*
411 * Called to set the hrtick timer state.
412 *
413 * called with rq->lock held and irqs disabled
414 */
029632fb 415void hrtick_start(struct rq *rq, u64 delay)
31656519 416{
86893335
WL
417 /*
418 * Don't schedule slices shorter than 10000ns, that just
419 * doesn't make sense. Rely on vruntime for fairness.
420 */
421 delay = max_t(u64, delay, 10000LL);
4961b6e1 422 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
d5096aa6 423 HRTIMER_MODE_REL_PINNED_HARD);
31656519 424}
90b5363a 425
31656519 426#endif /* CONFIG_SMP */
8f4d37ec 427
77a021be 428static void hrtick_rq_init(struct rq *rq)
8f4d37ec 429{
31656519 430#ifdef CONFIG_SMP
90b5363a 431 rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start);
31656519 432#endif
d5096aa6 433 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
31656519 434 rq->hrtick_timer.function = hrtick;
8f4d37ec 435}
006c75f1 436#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
437static inline void hrtick_clear(struct rq *rq)
438{
439}
440
77a021be 441static inline void hrtick_rq_init(struct rq *rq)
8f4d37ec
PZ
442{
443}
006c75f1 444#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 445
5529578a
FW
446/*
447 * cmpxchg based fetch_or, macro so it works for different integer types
448 */
449#define fetch_or(ptr, mask) \
450 ({ \
451 typeof(ptr) _ptr = (ptr); \
452 typeof(mask) _mask = (mask); \
453 typeof(*_ptr) _old, _val = *_ptr; \
454 \
455 for (;;) { \
456 _old = cmpxchg(_ptr, _val, _val | _mask); \
457 if (_old == _val) \
458 break; \
459 _val = _old; \
460 } \
461 _old; \
462})
463
e3baac47 464#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
465/*
466 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
467 * this avoids any races wrt polling state changes and thereby avoids
468 * spurious IPIs.
469 */
470static bool set_nr_and_not_polling(struct task_struct *p)
471{
472 struct thread_info *ti = task_thread_info(p);
473 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
474}
e3baac47
PZ
475
476/*
477 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
478 *
479 * If this returns true, then the idle task promises to call
480 * sched_ttwu_pending() and reschedule soon.
481 */
482static bool set_nr_if_polling(struct task_struct *p)
483{
484 struct thread_info *ti = task_thread_info(p);
316c1608 485 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
e3baac47
PZ
486
487 for (;;) {
488 if (!(val & _TIF_POLLING_NRFLAG))
489 return false;
490 if (val & _TIF_NEED_RESCHED)
491 return true;
492 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
493 if (old == val)
494 break;
495 val = old;
496 }
497 return true;
498}
499
fd99f91a
PZ
500#else
501static bool set_nr_and_not_polling(struct task_struct *p)
502{
503 set_tsk_need_resched(p);
504 return true;
505}
e3baac47
PZ
506
507#ifdef CONFIG_SMP
508static bool set_nr_if_polling(struct task_struct *p)
509{
510 return false;
511}
512#endif
fd99f91a
PZ
513#endif
514
07879c6a 515static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
76751049
PZ
516{
517 struct wake_q_node *node = &task->wake_q;
518
519 /*
520 * Atomically grab the task, if ->wake_q is !nil already it means
521 * its already queued (either by us or someone else) and will get the
522 * wakeup due to that.
523 *
4c4e3731
PZ
524 * In order to ensure that a pending wakeup will observe our pending
525 * state, even in the failed case, an explicit smp_mb() must be used.
76751049 526 */
4c4e3731 527 smp_mb__before_atomic();
87ff19cb 528 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
07879c6a 529 return false;
76751049
PZ
530
531 /*
532 * The head is context local, there can be no concurrency.
533 */
534 *head->lastp = node;
535 head->lastp = &node->next;
07879c6a
DB
536 return true;
537}
538
539/**
540 * wake_q_add() - queue a wakeup for 'later' waking.
541 * @head: the wake_q_head to add @task to
542 * @task: the task to queue for 'later' wakeup
543 *
544 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
545 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
546 * instantly.
547 *
548 * This function must be used as-if it were wake_up_process(); IOW the task
549 * must be ready to be woken at this location.
550 */
551void wake_q_add(struct wake_q_head *head, struct task_struct *task)
552{
553 if (__wake_q_add(head, task))
554 get_task_struct(task);
555}
556
557/**
558 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
559 * @head: the wake_q_head to add @task to
560 * @task: the task to queue for 'later' wakeup
561 *
562 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
563 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
564 * instantly.
565 *
566 * This function must be used as-if it were wake_up_process(); IOW the task
567 * must be ready to be woken at this location.
568 *
569 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
570 * that already hold reference to @task can call the 'safe' version and trust
571 * wake_q to do the right thing depending whether or not the @task is already
572 * queued for wakeup.
573 */
574void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
575{
576 if (!__wake_q_add(head, task))
577 put_task_struct(task);
76751049
PZ
578}
579
580void wake_up_q(struct wake_q_head *head)
581{
582 struct wake_q_node *node = head->first;
583
584 while (node != WAKE_Q_TAIL) {
585 struct task_struct *task;
586
587 task = container_of(node, struct task_struct, wake_q);
588 BUG_ON(!task);
d1ccc66d 589 /* Task can safely be re-inserted now: */
76751049
PZ
590 node = node->next;
591 task->wake_q.next = NULL;
592
593 /*
7696f991
AP
594 * wake_up_process() executes a full barrier, which pairs with
595 * the queueing in wake_q_add() so as not to miss wakeups.
76751049
PZ
596 */
597 wake_up_process(task);
598 put_task_struct(task);
599 }
600}
601
c24d20db 602/*
8875125e 603 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
604 *
605 * On UP this means the setting of the need_resched flag, on SMP it
606 * might also involve a cross-CPU call to trigger the scheduler on
607 * the target CPU.
608 */
8875125e 609void resched_curr(struct rq *rq)
c24d20db 610{
8875125e 611 struct task_struct *curr = rq->curr;
c24d20db
IM
612 int cpu;
613
8875125e 614 lockdep_assert_held(&rq->lock);
c24d20db 615
8875125e 616 if (test_tsk_need_resched(curr))
c24d20db
IM
617 return;
618
8875125e 619 cpu = cpu_of(rq);
fd99f91a 620
f27dde8d 621 if (cpu == smp_processor_id()) {
8875125e 622 set_tsk_need_resched(curr);
f27dde8d 623 set_preempt_need_resched();
c24d20db 624 return;
f27dde8d 625 }
c24d20db 626
8875125e 627 if (set_nr_and_not_polling(curr))
c24d20db 628 smp_send_reschedule(cpu);
dfc68f29
AL
629 else
630 trace_sched_wake_idle_without_ipi(cpu);
c24d20db
IM
631}
632
029632fb 633void resched_cpu(int cpu)
c24d20db
IM
634{
635 struct rq *rq = cpu_rq(cpu);
636 unsigned long flags;
637
7c2102e5 638 raw_spin_lock_irqsave(&rq->lock, flags);
a0982dfa
PM
639 if (cpu_online(cpu) || cpu == smp_processor_id())
640 resched_curr(rq);
05fa785c 641 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 642}
06d8308c 643
b021fe3e 644#ifdef CONFIG_SMP
3451d024 645#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2 646/*
d1ccc66d
IM
647 * In the semi idle case, use the nearest busy CPU for migrating timers
648 * from an idle CPU. This is good for power-savings.
83cd4fe2
VP
649 *
650 * We don't do similar optimization for completely idle system, as
d1ccc66d
IM
651 * selecting an idle CPU will add more delays to the timers than intended
652 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
83cd4fe2 653 */
bc7a34b8 654int get_nohz_timer_target(void)
83cd4fe2 655{
e938b9c9 656 int i, cpu = smp_processor_id(), default_cpu = -1;
83cd4fe2
VP
657 struct sched_domain *sd;
658
e938b9c9
WL
659 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
660 if (!idle_cpu(cpu))
661 return cpu;
662 default_cpu = cpu;
663 }
6201b4d6 664
057f3fad 665 rcu_read_lock();
83cd4fe2 666 for_each_domain(cpu, sd) {
e938b9c9
WL
667 for_each_cpu_and(i, sched_domain_span(sd),
668 housekeeping_cpumask(HK_FLAG_TIMER)) {
44496922
WL
669 if (cpu == i)
670 continue;
671
e938b9c9 672 if (!idle_cpu(i)) {
057f3fad
PZ
673 cpu = i;
674 goto unlock;
675 }
676 }
83cd4fe2 677 }
9642d18e 678
e938b9c9
WL
679 if (default_cpu == -1)
680 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
681 cpu = default_cpu;
057f3fad
PZ
682unlock:
683 rcu_read_unlock();
83cd4fe2
VP
684 return cpu;
685}
d1ccc66d 686
06d8308c
TG
687/*
688 * When add_timer_on() enqueues a timer into the timer wheel of an
689 * idle CPU then this timer might expire before the next timer event
690 * which is scheduled to wake up that CPU. In case of a completely
691 * idle system the next event might even be infinite time into the
692 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
693 * leaves the inner idle loop so the newly added timer is taken into
694 * account when the CPU goes back to idle and evaluates the timer
695 * wheel for the next timer event.
696 */
1c20091e 697static void wake_up_idle_cpu(int cpu)
06d8308c
TG
698{
699 struct rq *rq = cpu_rq(cpu);
700
701 if (cpu == smp_processor_id())
702 return;
703
67b9ca70 704 if (set_nr_and_not_polling(rq->idle))
06d8308c 705 smp_send_reschedule(cpu);
dfc68f29
AL
706 else
707 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
708}
709
c5bfece2 710static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 711{
53c5fa16
FW
712 /*
713 * We just need the target to call irq_exit() and re-evaluate
714 * the next tick. The nohz full kick at least implies that.
715 * If needed we can still optimize that later with an
716 * empty IRQ.
717 */
379d9ecb
PM
718 if (cpu_is_offline(cpu))
719 return true; /* Don't try to wake offline CPUs. */
c5bfece2 720 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
721 if (cpu != smp_processor_id() ||
722 tick_nohz_tick_stopped())
53c5fa16 723 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
724 return true;
725 }
726
727 return false;
728}
729
379d9ecb
PM
730/*
731 * Wake up the specified CPU. If the CPU is going offline, it is the
732 * caller's responsibility to deal with the lost wakeup, for example,
733 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
734 */
1c20091e
FW
735void wake_up_nohz_cpu(int cpu)
736{
c5bfece2 737 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
738 wake_up_idle_cpu(cpu);
739}
740
19a1f5ec 741static void nohz_csd_func(void *info)
45bf76df 742{
19a1f5ec
PZ
743 struct rq *rq = info;
744 int cpu = cpu_of(rq);
745 unsigned int flags;
873b4c65
VG
746
747 /*
19a1f5ec 748 * Release the rq::nohz_csd.
873b4c65 749 */
19a1f5ec
PZ
750 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
751 WARN_ON(!(flags & NOHZ_KICK_MASK));
45bf76df 752
19a1f5ec
PZ
753 rq->idle_balance = idle_cpu(cpu);
754 if (rq->idle_balance && !need_resched()) {
755 rq->nohz_idle_balance = flags;
90b5363a
PZI
756 raise_softirq_irqoff(SCHED_SOFTIRQ);
757 }
2069dd75
PZ
758}
759
3451d024 760#endif /* CONFIG_NO_HZ_COMMON */
d842de87 761
ce831b38 762#ifdef CONFIG_NO_HZ_FULL
76d92ac3 763bool sched_can_stop_tick(struct rq *rq)
ce831b38 764{
76d92ac3
FW
765 int fifo_nr_running;
766
767 /* Deadline tasks, even if single, need the tick */
768 if (rq->dl.dl_nr_running)
769 return false;
770
1e78cdbd 771 /*
2548d546
PZ
772 * If there are more than one RR tasks, we need the tick to effect the
773 * actual RR behaviour.
1e78cdbd 774 */
76d92ac3
FW
775 if (rq->rt.rr_nr_running) {
776 if (rq->rt.rr_nr_running == 1)
777 return true;
778 else
779 return false;
1e78cdbd
RR
780 }
781
2548d546
PZ
782 /*
783 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
784 * forced preemption between FIFO tasks.
785 */
786 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
787 if (fifo_nr_running)
788 return true;
789
790 /*
791 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
792 * if there's more than one we need the tick for involuntary
793 * preemption.
794 */
795 if (rq->nr_running > 1)
541b8264 796 return false;
ce831b38 797
541b8264 798 return true;
ce831b38
FW
799}
800#endif /* CONFIG_NO_HZ_FULL */
6d6bc0ad 801#endif /* CONFIG_SMP */
18d95a28 802
a790de99
PT
803#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
804 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 805/*
8277434e
PT
806 * Iterate task_group tree rooted at *from, calling @down when first entering a
807 * node and @up when leaving it for the final time.
808 *
809 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 810 */
029632fb 811int walk_tg_tree_from(struct task_group *from,
8277434e 812 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
813{
814 struct task_group *parent, *child;
eb755805 815 int ret;
c09595f6 816
8277434e
PT
817 parent = from;
818
c09595f6 819down:
eb755805
PZ
820 ret = (*down)(parent, data);
821 if (ret)
8277434e 822 goto out;
c09595f6
PZ
823 list_for_each_entry_rcu(child, &parent->children, siblings) {
824 parent = child;
825 goto down;
826
827up:
828 continue;
829 }
eb755805 830 ret = (*up)(parent, data);
8277434e
PT
831 if (ret || parent == from)
832 goto out;
c09595f6
PZ
833
834 child = parent;
835 parent = parent->parent;
836 if (parent)
837 goto up;
8277434e 838out:
eb755805 839 return ret;
c09595f6
PZ
840}
841
029632fb 842int tg_nop(struct task_group *tg, void *data)
eb755805 843{
e2b245f8 844 return 0;
eb755805 845}
18d95a28
PZ
846#endif
847
9059393e 848static void set_load_weight(struct task_struct *p, bool update_load)
45bf76df 849{
f05998d4
NR
850 int prio = p->static_prio - MAX_RT_PRIO;
851 struct load_weight *load = &p->se.load;
852
dd41f596
IM
853 /*
854 * SCHED_IDLE tasks get minimal weight:
855 */
1da1843f 856 if (task_has_idle_policy(p)) {
c8b28116 857 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 858 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
859 return;
860 }
71f8bd46 861
9059393e
VG
862 /*
863 * SCHED_OTHER tasks have to update their load when changing their
864 * weight
865 */
866 if (update_load && p->sched_class == &fair_sched_class) {
867 reweight_task(p, prio);
868 } else {
869 load->weight = scale_load(sched_prio_to_weight[prio]);
870 load->inv_weight = sched_prio_to_wmult[prio];
871 }
71f8bd46
IM
872}
873
69842cba 874#ifdef CONFIG_UCLAMP_TASK
2480c093
PB
875/*
876 * Serializes updates of utilization clamp values
877 *
878 * The (slow-path) user-space triggers utilization clamp value updates which
879 * can require updates on (fast-path) scheduler's data structures used to
880 * support enqueue/dequeue operations.
881 * While the per-CPU rq lock protects fast-path update operations, user-space
882 * requests are serialized using a mutex to reduce the risk of conflicting
883 * updates or API abuses.
884 */
885static DEFINE_MUTEX(uclamp_mutex);
886
e8f14172
PB
887/* Max allowed minimum utilization */
888unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
889
890/* Max allowed maximum utilization */
891unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
892
13685c4a
QY
893/*
894 * By default RT tasks run at the maximum performance point/capacity of the
895 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
896 * SCHED_CAPACITY_SCALE.
897 *
898 * This knob allows admins to change the default behavior when uclamp is being
899 * used. In battery powered devices, particularly, running at the maximum
900 * capacity and frequency will increase energy consumption and shorten the
901 * battery life.
902 *
903 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
904 *
905 * This knob will not override the system default sched_util_clamp_min defined
906 * above.
907 */
908unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
909
e8f14172
PB
910/* All clamps are required to be less or equal than these values */
911static struct uclamp_se uclamp_default[UCLAMP_CNT];
69842cba 912
46609ce2
QY
913/*
914 * This static key is used to reduce the uclamp overhead in the fast path. It
915 * primarily disables the call to uclamp_rq_{inc, dec}() in
916 * enqueue/dequeue_task().
917 *
918 * This allows users to continue to enable uclamp in their kernel config with
919 * minimum uclamp overhead in the fast path.
920 *
921 * As soon as userspace modifies any of the uclamp knobs, the static key is
922 * enabled, since we have an actual users that make use of uclamp
923 * functionality.
924 *
925 * The knobs that would enable this static key are:
926 *
927 * * A task modifying its uclamp value with sched_setattr().
928 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
929 * * An admin modifying the cgroup cpu.uclamp.{min, max}
930 */
931DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
932
69842cba
PB
933/* Integer rounded range for each bucket */
934#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
935
936#define for_each_clamp_id(clamp_id) \
937 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
938
939static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
940{
941 return clamp_value / UCLAMP_BUCKET_DELTA;
942}
943
7763baac 944static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
69842cba
PB
945{
946 if (clamp_id == UCLAMP_MIN)
947 return 0;
948 return SCHED_CAPACITY_SCALE;
949}
950
a509a7cd
PB
951static inline void uclamp_se_set(struct uclamp_se *uc_se,
952 unsigned int value, bool user_defined)
69842cba
PB
953{
954 uc_se->value = value;
955 uc_se->bucket_id = uclamp_bucket_id(value);
a509a7cd 956 uc_se->user_defined = user_defined;
69842cba
PB
957}
958
e496187d 959static inline unsigned int
0413d7f3 960uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
961 unsigned int clamp_value)
962{
963 /*
964 * Avoid blocked utilization pushing up the frequency when we go
965 * idle (which drops the max-clamp) by retaining the last known
966 * max-clamp.
967 */
968 if (clamp_id == UCLAMP_MAX) {
969 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
970 return clamp_value;
971 }
972
973 return uclamp_none(UCLAMP_MIN);
974}
975
0413d7f3 976static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
977 unsigned int clamp_value)
978{
979 /* Reset max-clamp retention only on idle exit */
980 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
981 return;
982
983 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
984}
985
69842cba 986static inline
7763baac 987unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
0413d7f3 988 unsigned int clamp_value)
69842cba
PB
989{
990 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
991 int bucket_id = UCLAMP_BUCKETS - 1;
992
993 /*
994 * Since both min and max clamps are max aggregated, find the
995 * top most bucket with tasks in.
996 */
997 for ( ; bucket_id >= 0; bucket_id--) {
998 if (!bucket[bucket_id].tasks)
999 continue;
1000 return bucket[bucket_id].value;
1001 }
1002
1003 /* No tasks -- default clamp values */
e496187d 1004 return uclamp_idle_value(rq, clamp_id, clamp_value);
69842cba
PB
1005}
1006
13685c4a
QY
1007static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1008{
1009 unsigned int default_util_min;
1010 struct uclamp_se *uc_se;
1011
1012 lockdep_assert_held(&p->pi_lock);
1013
1014 uc_se = &p->uclamp_req[UCLAMP_MIN];
1015
1016 /* Only sync if user didn't override the default */
1017 if (uc_se->user_defined)
1018 return;
1019
1020 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1021 uclamp_se_set(uc_se, default_util_min, false);
1022}
1023
1024static void uclamp_update_util_min_rt_default(struct task_struct *p)
1025{
1026 struct rq_flags rf;
1027 struct rq *rq;
1028
1029 if (!rt_task(p))
1030 return;
1031
1032 /* Protect updates to p->uclamp_* */
1033 rq = task_rq_lock(p, &rf);
1034 __uclamp_update_util_min_rt_default(p);
1035 task_rq_unlock(rq, p, &rf);
1036}
1037
1038static void uclamp_sync_util_min_rt_default(void)
1039{
1040 struct task_struct *g, *p;
1041
1042 /*
1043 * copy_process() sysctl_uclamp
1044 * uclamp_min_rt = X;
1045 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1046 * // link thread smp_mb__after_spinlock()
1047 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1048 * sched_post_fork() for_each_process_thread()
1049 * __uclamp_sync_rt() __uclamp_sync_rt()
1050 *
1051 * Ensures that either sched_post_fork() will observe the new
1052 * uclamp_min_rt or for_each_process_thread() will observe the new
1053 * task.
1054 */
1055 read_lock(&tasklist_lock);
1056 smp_mb__after_spinlock();
1057 read_unlock(&tasklist_lock);
1058
1059 rcu_read_lock();
1060 for_each_process_thread(g, p)
1061 uclamp_update_util_min_rt_default(p);
1062 rcu_read_unlock();
1063}
1064
3eac870a 1065static inline struct uclamp_se
0413d7f3 1066uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
3eac870a
PB
1067{
1068 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1069#ifdef CONFIG_UCLAMP_TASK_GROUP
1070 struct uclamp_se uc_max;
1071
1072 /*
1073 * Tasks in autogroups or root task group will be
1074 * restricted by system defaults.
1075 */
1076 if (task_group_is_autogroup(task_group(p)))
1077 return uc_req;
1078 if (task_group(p) == &root_task_group)
1079 return uc_req;
1080
1081 uc_max = task_group(p)->uclamp[clamp_id];
1082 if (uc_req.value > uc_max.value || !uc_req.user_defined)
1083 return uc_max;
1084#endif
1085
1086 return uc_req;
1087}
1088
e8f14172
PB
1089/*
1090 * The effective clamp bucket index of a task depends on, by increasing
1091 * priority:
1092 * - the task specific clamp value, when explicitly requested from userspace
3eac870a
PB
1093 * - the task group effective clamp value, for tasks not either in the root
1094 * group or in an autogroup
e8f14172
PB
1095 * - the system default clamp value, defined by the sysadmin
1096 */
1097static inline struct uclamp_se
0413d7f3 1098uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
e8f14172 1099{
3eac870a 1100 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
e8f14172
PB
1101 struct uclamp_se uc_max = uclamp_default[clamp_id];
1102
1103 /* System default restrictions always apply */
1104 if (unlikely(uc_req.value > uc_max.value))
1105 return uc_max;
1106
1107 return uc_req;
1108}
1109
686516b5 1110unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
9d20ad7d
PB
1111{
1112 struct uclamp_se uc_eff;
1113
1114 /* Task currently refcounted: use back-annotated (effective) value */
1115 if (p->uclamp[clamp_id].active)
686516b5 1116 return (unsigned long)p->uclamp[clamp_id].value;
9d20ad7d
PB
1117
1118 uc_eff = uclamp_eff_get(p, clamp_id);
1119
686516b5 1120 return (unsigned long)uc_eff.value;
9d20ad7d
PB
1121}
1122
69842cba
PB
1123/*
1124 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1125 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1126 * updates the rq's clamp value if required.
60daf9c1
PB
1127 *
1128 * Tasks can have a task-specific value requested from user-space, track
1129 * within each bucket the maximum value for tasks refcounted in it.
1130 * This "local max aggregation" allows to track the exact "requested" value
1131 * for each bucket when all its RUNNABLE tasks require the same clamp.
69842cba
PB
1132 */
1133static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
0413d7f3 1134 enum uclamp_id clamp_id)
69842cba
PB
1135{
1136 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1137 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1138 struct uclamp_bucket *bucket;
1139
1140 lockdep_assert_held(&rq->lock);
1141
e8f14172
PB
1142 /* Update task effective clamp */
1143 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1144
69842cba
PB
1145 bucket = &uc_rq->bucket[uc_se->bucket_id];
1146 bucket->tasks++;
e8f14172 1147 uc_se->active = true;
69842cba 1148
e496187d
PB
1149 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1150
60daf9c1
PB
1151 /*
1152 * Local max aggregation: rq buckets always track the max
1153 * "requested" clamp value of its RUNNABLE tasks.
1154 */
1155 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1156 bucket->value = uc_se->value;
1157
69842cba 1158 if (uc_se->value > READ_ONCE(uc_rq->value))
60daf9c1 1159 WRITE_ONCE(uc_rq->value, uc_se->value);
69842cba
PB
1160}
1161
1162/*
1163 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1164 * is released. If this is the last task reference counting the rq's max
1165 * active clamp value, then the rq's clamp value is updated.
1166 *
1167 * Both refcounted tasks and rq's cached clamp values are expected to be
1168 * always valid. If it's detected they are not, as defensive programming,
1169 * enforce the expected state and warn.
1170 */
1171static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
0413d7f3 1172 enum uclamp_id clamp_id)
69842cba
PB
1173{
1174 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1175 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1176 struct uclamp_bucket *bucket;
e496187d 1177 unsigned int bkt_clamp;
69842cba
PB
1178 unsigned int rq_clamp;
1179
1180 lockdep_assert_held(&rq->lock);
1181
46609ce2
QY
1182 /*
1183 * If sched_uclamp_used was enabled after task @p was enqueued,
1184 * we could end up with unbalanced call to uclamp_rq_dec_id().
1185 *
1186 * In this case the uc_se->active flag should be false since no uclamp
1187 * accounting was performed at enqueue time and we can just return
1188 * here.
1189 *
1190 * Need to be careful of the following enqeueue/dequeue ordering
1191 * problem too
1192 *
1193 * enqueue(taskA)
1194 * // sched_uclamp_used gets enabled
1195 * enqueue(taskB)
1196 * dequeue(taskA)
1197 * // Must not decrement bukcet->tasks here
1198 * dequeue(taskB)
1199 *
1200 * where we could end up with stale data in uc_se and
1201 * bucket[uc_se->bucket_id].
1202 *
1203 * The following check here eliminates the possibility of such race.
1204 */
1205 if (unlikely(!uc_se->active))
1206 return;
1207
69842cba 1208 bucket = &uc_rq->bucket[uc_se->bucket_id];
46609ce2 1209
69842cba
PB
1210 SCHED_WARN_ON(!bucket->tasks);
1211 if (likely(bucket->tasks))
1212 bucket->tasks--;
46609ce2 1213
e8f14172 1214 uc_se->active = false;
69842cba 1215
60daf9c1
PB
1216 /*
1217 * Keep "local max aggregation" simple and accept to (possibly)
1218 * overboost some RUNNABLE tasks in the same bucket.
1219 * The rq clamp bucket value is reset to its base value whenever
1220 * there are no more RUNNABLE tasks refcounting it.
1221 */
69842cba
PB
1222 if (likely(bucket->tasks))
1223 return;
1224
1225 rq_clamp = READ_ONCE(uc_rq->value);
1226 /*
1227 * Defensive programming: this should never happen. If it happens,
1228 * e.g. due to future modification, warn and fixup the expected value.
1229 */
1230 SCHED_WARN_ON(bucket->value > rq_clamp);
e496187d
PB
1231 if (bucket->value >= rq_clamp) {
1232 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1233 WRITE_ONCE(uc_rq->value, bkt_clamp);
1234 }
69842cba
PB
1235}
1236
1237static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1238{
0413d7f3 1239 enum uclamp_id clamp_id;
69842cba 1240
46609ce2
QY
1241 /*
1242 * Avoid any overhead until uclamp is actually used by the userspace.
1243 *
1244 * The condition is constructed such that a NOP is generated when
1245 * sched_uclamp_used is disabled.
1246 */
1247 if (!static_branch_unlikely(&sched_uclamp_used))
1248 return;
1249
69842cba
PB
1250 if (unlikely(!p->sched_class->uclamp_enabled))
1251 return;
1252
1253 for_each_clamp_id(clamp_id)
1254 uclamp_rq_inc_id(rq, p, clamp_id);
e496187d
PB
1255
1256 /* Reset clamp idle holding when there is one RUNNABLE task */
1257 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1258 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
69842cba
PB
1259}
1260
1261static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1262{
0413d7f3 1263 enum uclamp_id clamp_id;
69842cba 1264
46609ce2
QY
1265 /*
1266 * Avoid any overhead until uclamp is actually used by the userspace.
1267 *
1268 * The condition is constructed such that a NOP is generated when
1269 * sched_uclamp_used is disabled.
1270 */
1271 if (!static_branch_unlikely(&sched_uclamp_used))
1272 return;
1273
69842cba
PB
1274 if (unlikely(!p->sched_class->uclamp_enabled))
1275 return;
1276
1277 for_each_clamp_id(clamp_id)
1278 uclamp_rq_dec_id(rq, p, clamp_id);
1279}
1280
babbe170 1281static inline void
0413d7f3 1282uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
babbe170
PB
1283{
1284 struct rq_flags rf;
1285 struct rq *rq;
1286
1287 /*
1288 * Lock the task and the rq where the task is (or was) queued.
1289 *
1290 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1291 * price to pay to safely serialize util_{min,max} updates with
1292 * enqueues, dequeues and migration operations.
1293 * This is the same locking schema used by __set_cpus_allowed_ptr().
1294 */
1295 rq = task_rq_lock(p, &rf);
1296
1297 /*
1298 * Setting the clamp bucket is serialized by task_rq_lock().
1299 * If the task is not yet RUNNABLE and its task_struct is not
1300 * affecting a valid clamp bucket, the next time it's enqueued,
1301 * it will already see the updated clamp bucket value.
1302 */
6e1ff077 1303 if (p->uclamp[clamp_id].active) {
babbe170
PB
1304 uclamp_rq_dec_id(rq, p, clamp_id);
1305 uclamp_rq_inc_id(rq, p, clamp_id);
1306 }
1307
1308 task_rq_unlock(rq, p, &rf);
1309}
1310
e3b8b6a0 1311#ifdef CONFIG_UCLAMP_TASK_GROUP
babbe170
PB
1312static inline void
1313uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1314 unsigned int clamps)
1315{
0413d7f3 1316 enum uclamp_id clamp_id;
babbe170
PB
1317 struct css_task_iter it;
1318 struct task_struct *p;
babbe170
PB
1319
1320 css_task_iter_start(css, 0, &it);
1321 while ((p = css_task_iter_next(&it))) {
1322 for_each_clamp_id(clamp_id) {
1323 if ((0x1 << clamp_id) & clamps)
1324 uclamp_update_active(p, clamp_id);
1325 }
1326 }
1327 css_task_iter_end(&it);
1328}
1329
7274a5c1
PB
1330static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1331static void uclamp_update_root_tg(void)
1332{
1333 struct task_group *tg = &root_task_group;
1334
1335 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1336 sysctl_sched_uclamp_util_min, false);
1337 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1338 sysctl_sched_uclamp_util_max, false);
1339
1340 rcu_read_lock();
1341 cpu_util_update_eff(&root_task_group.css);
1342 rcu_read_unlock();
1343}
1344#else
1345static void uclamp_update_root_tg(void) { }
1346#endif
1347
e8f14172 1348int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
32927393 1349 void *buffer, size_t *lenp, loff_t *ppos)
e8f14172 1350{
7274a5c1 1351 bool update_root_tg = false;
13685c4a 1352 int old_min, old_max, old_min_rt;
e8f14172
PB
1353 int result;
1354
2480c093 1355 mutex_lock(&uclamp_mutex);
e8f14172
PB
1356 old_min = sysctl_sched_uclamp_util_min;
1357 old_max = sysctl_sched_uclamp_util_max;
13685c4a 1358 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
e8f14172
PB
1359
1360 result = proc_dointvec(table, write, buffer, lenp, ppos);
1361 if (result)
1362 goto undo;
1363 if (!write)
1364 goto done;
1365
1366 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
13685c4a
QY
1367 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1368 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1369
e8f14172
PB
1370 result = -EINVAL;
1371 goto undo;
1372 }
1373
1374 if (old_min != sysctl_sched_uclamp_util_min) {
1375 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
a509a7cd 1376 sysctl_sched_uclamp_util_min, false);
7274a5c1 1377 update_root_tg = true;
e8f14172
PB
1378 }
1379 if (old_max != sysctl_sched_uclamp_util_max) {
1380 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
a509a7cd 1381 sysctl_sched_uclamp_util_max, false);
7274a5c1 1382 update_root_tg = true;
e8f14172
PB
1383 }
1384
46609ce2
QY
1385 if (update_root_tg) {
1386 static_branch_enable(&sched_uclamp_used);
7274a5c1 1387 uclamp_update_root_tg();
46609ce2 1388 }
7274a5c1 1389
13685c4a
QY
1390 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1391 static_branch_enable(&sched_uclamp_used);
1392 uclamp_sync_util_min_rt_default();
1393 }
7274a5c1 1394
e8f14172 1395 /*
7274a5c1
PB
1396 * We update all RUNNABLE tasks only when task groups are in use.
1397 * Otherwise, keep it simple and do just a lazy update at each next
1398 * task enqueue time.
e8f14172 1399 */
7274a5c1 1400
e8f14172
PB
1401 goto done;
1402
1403undo:
1404 sysctl_sched_uclamp_util_min = old_min;
1405 sysctl_sched_uclamp_util_max = old_max;
13685c4a 1406 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
e8f14172 1407done:
2480c093 1408 mutex_unlock(&uclamp_mutex);
e8f14172
PB
1409
1410 return result;
1411}
1412
a509a7cd
PB
1413static int uclamp_validate(struct task_struct *p,
1414 const struct sched_attr *attr)
1415{
1416 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value;
1417 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value;
1418
1419 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN)
1420 lower_bound = attr->sched_util_min;
1421 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX)
1422 upper_bound = attr->sched_util_max;
1423
1424 if (lower_bound > upper_bound)
1425 return -EINVAL;
1426 if (upper_bound > SCHED_CAPACITY_SCALE)
1427 return -EINVAL;
1428
e65855a5
QY
1429 /*
1430 * We have valid uclamp attributes; make sure uclamp is enabled.
1431 *
1432 * We need to do that here, because enabling static branches is a
1433 * blocking operation which obviously cannot be done while holding
1434 * scheduler locks.
1435 */
1436 static_branch_enable(&sched_uclamp_used);
1437
a509a7cd
PB
1438 return 0;
1439}
1440
1441static void __setscheduler_uclamp(struct task_struct *p,
1442 const struct sched_attr *attr)
1443{
0413d7f3 1444 enum uclamp_id clamp_id;
1a00d999
PB
1445
1446 /*
1447 * On scheduling class change, reset to default clamps for tasks
1448 * without a task-specific value.
1449 */
1450 for_each_clamp_id(clamp_id) {
1451 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1a00d999
PB
1452
1453 /* Keep using defined clamps across class changes */
1454 if (uc_se->user_defined)
1455 continue;
1456
13685c4a
QY
1457 /*
1458 * RT by default have a 100% boost value that could be modified
1459 * at runtime.
1460 */
1a00d999 1461 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
13685c4a
QY
1462 __uclamp_update_util_min_rt_default(p);
1463 else
1464 uclamp_se_set(uc_se, uclamp_none(clamp_id), false);
1a00d999 1465
1a00d999
PB
1466 }
1467
a509a7cd
PB
1468 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1469 return;
1470
1471 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1472 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1473 attr->sched_util_min, true);
1474 }
1475
1476 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1477 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1478 attr->sched_util_max, true);
1479 }
1480}
1481
e8f14172
PB
1482static void uclamp_fork(struct task_struct *p)
1483{
0413d7f3 1484 enum uclamp_id clamp_id;
e8f14172 1485
13685c4a
QY
1486 /*
1487 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1488 * as the task is still at its early fork stages.
1489 */
e8f14172
PB
1490 for_each_clamp_id(clamp_id)
1491 p->uclamp[clamp_id].active = false;
a87498ac
PB
1492
1493 if (likely(!p->sched_reset_on_fork))
1494 return;
1495
1496 for_each_clamp_id(clamp_id) {
eaf5a92e
QP
1497 uclamp_se_set(&p->uclamp_req[clamp_id],
1498 uclamp_none(clamp_id), false);
a87498ac 1499 }
e8f14172
PB
1500}
1501
13685c4a
QY
1502static void uclamp_post_fork(struct task_struct *p)
1503{
1504 uclamp_update_util_min_rt_default(p);
1505}
1506
d81ae8aa
QY
1507static void __init init_uclamp_rq(struct rq *rq)
1508{
1509 enum uclamp_id clamp_id;
1510 struct uclamp_rq *uc_rq = rq->uclamp;
1511
1512 for_each_clamp_id(clamp_id) {
1513 uc_rq[clamp_id] = (struct uclamp_rq) {
1514 .value = uclamp_none(clamp_id)
1515 };
1516 }
1517
1518 rq->uclamp_flags = 0;
1519}
1520
69842cba
PB
1521static void __init init_uclamp(void)
1522{
e8f14172 1523 struct uclamp_se uc_max = {};
0413d7f3 1524 enum uclamp_id clamp_id;
69842cba
PB
1525 int cpu;
1526
d81ae8aa
QY
1527 for_each_possible_cpu(cpu)
1528 init_uclamp_rq(cpu_rq(cpu));
69842cba 1529
69842cba 1530 for_each_clamp_id(clamp_id) {
e8f14172 1531 uclamp_se_set(&init_task.uclamp_req[clamp_id],
a509a7cd 1532 uclamp_none(clamp_id), false);
69842cba 1533 }
e8f14172
PB
1534
1535 /* System defaults allow max clamp values for both indexes */
a509a7cd 1536 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2480c093 1537 for_each_clamp_id(clamp_id) {
e8f14172 1538 uclamp_default[clamp_id] = uc_max;
2480c093
PB
1539#ifdef CONFIG_UCLAMP_TASK_GROUP
1540 root_task_group.uclamp_req[clamp_id] = uc_max;
0b60ba2d 1541 root_task_group.uclamp[clamp_id] = uc_max;
2480c093
PB
1542#endif
1543 }
69842cba
PB
1544}
1545
1546#else /* CONFIG_UCLAMP_TASK */
1547static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1548static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
a509a7cd
PB
1549static inline int uclamp_validate(struct task_struct *p,
1550 const struct sched_attr *attr)
1551{
1552 return -EOPNOTSUPP;
1553}
1554static void __setscheduler_uclamp(struct task_struct *p,
1555 const struct sched_attr *attr) { }
e8f14172 1556static inline void uclamp_fork(struct task_struct *p) { }
13685c4a 1557static inline void uclamp_post_fork(struct task_struct *p) { }
69842cba
PB
1558static inline void init_uclamp(void) { }
1559#endif /* CONFIG_UCLAMP_TASK */
1560
1de64443 1561static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 1562{
0a67d1ee
PZ
1563 if (!(flags & ENQUEUE_NOCLOCK))
1564 update_rq_clock(rq);
1565
eb414681 1566 if (!(flags & ENQUEUE_RESTORE)) {
1de64443 1567 sched_info_queued(rq, p);
eb414681
JW
1568 psi_enqueue(p, flags & ENQUEUE_WAKEUP);
1569 }
0a67d1ee 1570
69842cba 1571 uclamp_rq_inc(rq, p);
371fd7e7 1572 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
1573}
1574
1de64443 1575static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 1576{
0a67d1ee
PZ
1577 if (!(flags & DEQUEUE_NOCLOCK))
1578 update_rq_clock(rq);
1579
eb414681 1580 if (!(flags & DEQUEUE_SAVE)) {
1de64443 1581 sched_info_dequeued(rq, p);
eb414681
JW
1582 psi_dequeue(p, flags & DEQUEUE_SLEEP);
1583 }
0a67d1ee 1584
69842cba 1585 uclamp_rq_dec(rq, p);
371fd7e7 1586 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
1587}
1588
029632fb 1589void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 1590{
371fd7e7 1591 enqueue_task(rq, p, flags);
7dd77884
PZ
1592
1593 p->on_rq = TASK_ON_RQ_QUEUED;
1e3c88bd
PZ
1594}
1595
029632fb 1596void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 1597{
7dd77884
PZ
1598 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
1599
371fd7e7 1600 dequeue_task(rq, p, flags);
1e3c88bd
PZ
1601}
1602
14531189 1603/*
dd41f596 1604 * __normal_prio - return the priority that is based on the static prio
14531189 1605 */
14531189
IM
1606static inline int __normal_prio(struct task_struct *p)
1607{
dd41f596 1608 return p->static_prio;
14531189
IM
1609}
1610
b29739f9
IM
1611/*
1612 * Calculate the expected normal priority: i.e. priority
1613 * without taking RT-inheritance into account. Might be
1614 * boosted by interactivity modifiers. Changes upon fork,
1615 * setprio syscalls, and whenever the interactivity
1616 * estimator recalculates.
1617 */
36c8b586 1618static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1619{
1620 int prio;
1621
aab03e05
DF
1622 if (task_has_dl_policy(p))
1623 prio = MAX_DL_PRIO-1;
1624 else if (task_has_rt_policy(p))
b29739f9
IM
1625 prio = MAX_RT_PRIO-1 - p->rt_priority;
1626 else
1627 prio = __normal_prio(p);
1628 return prio;
1629}
1630
1631/*
1632 * Calculate the current priority, i.e. the priority
1633 * taken into account by the scheduler. This value might
1634 * be boosted by RT tasks, or might be boosted by
1635 * interactivity modifiers. Will be RT if the task got
1636 * RT-boosted. If not then it returns p->normal_prio.
1637 */
36c8b586 1638static int effective_prio(struct task_struct *p)
b29739f9
IM
1639{
1640 p->normal_prio = normal_prio(p);
1641 /*
1642 * If we are RT tasks or we were boosted to RT priority,
1643 * keep the priority unchanged. Otherwise, update priority
1644 * to the normal priority:
1645 */
1646 if (!rt_prio(p->prio))
1647 return p->normal_prio;
1648 return p->prio;
1649}
1650
1da177e4
LT
1651/**
1652 * task_curr - is this task currently executing on a CPU?
1653 * @p: the task in question.
e69f6186
YB
1654 *
1655 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 1656 */
36c8b586 1657inline int task_curr(const struct task_struct *p)
1da177e4
LT
1658{
1659 return cpu_curr(task_cpu(p)) == p;
1660}
1661
67dfa1b7 1662/*
4c9a4bc8
PZ
1663 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1664 * use the balance_callback list if you want balancing.
1665 *
1666 * this means any call to check_class_changed() must be followed by a call to
1667 * balance_callback().
67dfa1b7 1668 */
cb469845
SR
1669static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1670 const struct sched_class *prev_class,
da7a735e 1671 int oldprio)
cb469845
SR
1672{
1673 if (prev_class != p->sched_class) {
1674 if (prev_class->switched_from)
da7a735e 1675 prev_class->switched_from(rq, p);
4c9a4bc8 1676
da7a735e 1677 p->sched_class->switched_to(rq, p);
2d3d891d 1678 } else if (oldprio != p->prio || dl_task(p))
da7a735e 1679 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1680}
1681
029632fb 1682void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405 1683{
aa93cd53 1684 if (p->sched_class == rq->curr->sched_class)
1e5a7405 1685 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
aa93cd53
KT
1686 else if (p->sched_class > rq->curr->sched_class)
1687 resched_curr(rq);
1e5a7405
PZ
1688
1689 /*
1690 * A queue event has occurred, and we're going to schedule. In
1691 * this case, we can save a useless back to back clock update.
1692 */
da0c1e65 1693 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
adcc8da8 1694 rq_clock_skip_update(rq);
1e5a7405
PZ
1695}
1696
1da177e4 1697#ifdef CONFIG_SMP
175f0e25 1698
175f0e25 1699/*
bee98539 1700 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
175f0e25
PZ
1701 * __set_cpus_allowed_ptr() and select_fallback_rq().
1702 */
1703static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1704{
3bd37062 1705 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
175f0e25
PZ
1706 return false;
1707
1708 if (is_per_cpu_kthread(p))
1709 return cpu_online(cpu);
1710
1711 return cpu_active(cpu);
1712}
1713
5cc389bc
PZ
1714/*
1715 * This is how migration works:
1716 *
1717 * 1) we invoke migration_cpu_stop() on the target CPU using
1718 * stop_one_cpu().
1719 * 2) stopper starts to run (implicitly forcing the migrated thread
1720 * off the CPU)
1721 * 3) it checks whether the migrated task is still in the wrong runqueue.
1722 * 4) if it's in the wrong runqueue then the migration thread removes
1723 * it and puts it into the right queue.
1724 * 5) stopper completes and stop_one_cpu() returns and the migration
1725 * is done.
1726 */
1727
1728/*
1729 * move_queued_task - move a queued task to new rq.
1730 *
1731 * Returns (locked) new rq. Old rq's lock is released.
1732 */
8a8c69c3
PZ
1733static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
1734 struct task_struct *p, int new_cpu)
5cc389bc 1735{
5cc389bc
PZ
1736 lockdep_assert_held(&rq->lock);
1737
58877d34 1738 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
5cc389bc 1739 set_task_cpu(p, new_cpu);
8a8c69c3 1740 rq_unlock(rq, rf);
5cc389bc
PZ
1741
1742 rq = cpu_rq(new_cpu);
1743
8a8c69c3 1744 rq_lock(rq, rf);
5cc389bc 1745 BUG_ON(task_cpu(p) != new_cpu);
58877d34 1746 activate_task(rq, p, 0);
5cc389bc
PZ
1747 check_preempt_curr(rq, p, 0);
1748
1749 return rq;
1750}
1751
1752struct migration_arg {
1753 struct task_struct *task;
1754 int dest_cpu;
1755};
1756
1757/*
d1ccc66d 1758 * Move (not current) task off this CPU, onto the destination CPU. We're doing
5cc389bc
PZ
1759 * this because either it can't run here any more (set_cpus_allowed()
1760 * away from this CPU, or CPU going down), or because we're
1761 * attempting to rebalance this task on exec (sched_exec).
1762 *
1763 * So we race with normal scheduler movements, but that's OK, as long
1764 * as the task is no longer on this CPU.
5cc389bc 1765 */
8a8c69c3
PZ
1766static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1767 struct task_struct *p, int dest_cpu)
5cc389bc 1768{
5cc389bc 1769 /* Affinity changed (again). */
175f0e25 1770 if (!is_cpu_allowed(p, dest_cpu))
5e16bbc2 1771 return rq;
5cc389bc 1772
15ff991e 1773 update_rq_clock(rq);
8a8c69c3 1774 rq = move_queued_task(rq, rf, p, dest_cpu);
5e16bbc2
PZ
1775
1776 return rq;
5cc389bc
PZ
1777}
1778
1779/*
1780 * migration_cpu_stop - this will be executed by a highprio stopper thread
1781 * and performs thread migration by bumping thread off CPU then
1782 * 'pushing' onto another runqueue.
1783 */
1784static int migration_cpu_stop(void *data)
1785{
1786 struct migration_arg *arg = data;
5e16bbc2
PZ
1787 struct task_struct *p = arg->task;
1788 struct rq *rq = this_rq();
8a8c69c3 1789 struct rq_flags rf;
5cc389bc
PZ
1790
1791 /*
d1ccc66d
IM
1792 * The original target CPU might have gone down and we might
1793 * be on another CPU but it doesn't matter.
5cc389bc
PZ
1794 */
1795 local_irq_disable();
1796 /*
1797 * We need to explicitly wake pending tasks before running
3bd37062 1798 * __migrate_task() such that we will not miss enforcing cpus_ptr
5cc389bc
PZ
1799 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1800 */
a1488664 1801 flush_smp_call_function_from_idle();
5e16bbc2
PZ
1802
1803 raw_spin_lock(&p->pi_lock);
8a8c69c3 1804 rq_lock(rq, &rf);
5e16bbc2
PZ
1805 /*
1806 * If task_rq(p) != rq, it cannot be migrated here, because we're
1807 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1808 * we're holding p->pi_lock.
1809 */
bf89a304
CC
1810 if (task_rq(p) == rq) {
1811 if (task_on_rq_queued(p))
8a8c69c3 1812 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
bf89a304
CC
1813 else
1814 p->wake_cpu = arg->dest_cpu;
1815 }
8a8c69c3 1816 rq_unlock(rq, &rf);
5e16bbc2
PZ
1817 raw_spin_unlock(&p->pi_lock);
1818
5cc389bc
PZ
1819 local_irq_enable();
1820 return 0;
1821}
1822
c5b28038
PZ
1823/*
1824 * sched_class::set_cpus_allowed must do the below, but is not required to
1825 * actually call this function.
1826 */
1827void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
5cc389bc 1828{
3bd37062 1829 cpumask_copy(&p->cpus_mask, new_mask);
5cc389bc
PZ
1830 p->nr_cpus_allowed = cpumask_weight(new_mask);
1831}
1832
c5b28038
PZ
1833void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1834{
6c37067e
PZ
1835 struct rq *rq = task_rq(p);
1836 bool queued, running;
1837
c5b28038 1838 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
1839
1840 queued = task_on_rq_queued(p);
1841 running = task_current(rq, p);
1842
1843 if (queued) {
1844 /*
1845 * Because __kthread_bind() calls this on blocked tasks without
1846 * holding rq->lock.
1847 */
1848 lockdep_assert_held(&rq->lock);
7a57f32a 1849 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
6c37067e
PZ
1850 }
1851 if (running)
1852 put_prev_task(rq, p);
1853
c5b28038 1854 p->sched_class->set_cpus_allowed(p, new_mask);
6c37067e 1855
6c37067e 1856 if (queued)
7134b3e9 1857 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 1858 if (running)
03b7fad1 1859 set_next_task(rq, p);
c5b28038
PZ
1860}
1861
5cc389bc
PZ
1862/*
1863 * Change a given task's CPU affinity. Migrate the thread to a
1864 * proper CPU and schedule it away if the CPU it's executing on
1865 * is removed from the allowed bitmask.
1866 *
1867 * NOTE: the caller must have a valid reference to the task, the
1868 * task must not exit() & deallocate itself prematurely. The
1869 * call is not atomic; no spinlocks may be held.
1870 */
25834c73
PZ
1871static int __set_cpus_allowed_ptr(struct task_struct *p,
1872 const struct cpumask *new_mask, bool check)
5cc389bc 1873{
e9d867a6 1874 const struct cpumask *cpu_valid_mask = cpu_active_mask;
5cc389bc 1875 unsigned int dest_cpu;
eb580751
PZ
1876 struct rq_flags rf;
1877 struct rq *rq;
5cc389bc
PZ
1878 int ret = 0;
1879
eb580751 1880 rq = task_rq_lock(p, &rf);
a499c3ea 1881 update_rq_clock(rq);
5cc389bc 1882
e9d867a6
PZI
1883 if (p->flags & PF_KTHREAD) {
1884 /*
1885 * Kernel threads are allowed on online && !active CPUs
1886 */
1887 cpu_valid_mask = cpu_online_mask;
1888 }
1889
25834c73
PZ
1890 /*
1891 * Must re-check here, to close a race against __kthread_bind(),
1892 * sched_setaffinity() is not guaranteed to observe the flag.
1893 */
1894 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1895 ret = -EINVAL;
1896 goto out;
1897 }
1898
fd844ba9 1899 if (cpumask_equal(&p->cpus_mask, new_mask))
5cc389bc
PZ
1900 goto out;
1901
46a87b38
PT
1902 /*
1903 * Picking a ~random cpu helps in cases where we are changing affinity
1904 * for groups of tasks (ie. cpuset), so that load balancing is not
1905 * immediately required to distribute the tasks within their new mask.
1906 */
1907 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
714e501e 1908 if (dest_cpu >= nr_cpu_ids) {
5cc389bc
PZ
1909 ret = -EINVAL;
1910 goto out;
1911 }
1912
1913 do_set_cpus_allowed(p, new_mask);
1914
e9d867a6
PZI
1915 if (p->flags & PF_KTHREAD) {
1916 /*
1917 * For kernel threads that do indeed end up on online &&
d1ccc66d 1918 * !active we want to ensure they are strict per-CPU threads.
e9d867a6
PZI
1919 */
1920 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1921 !cpumask_intersects(new_mask, cpu_active_mask) &&
1922 p->nr_cpus_allowed != 1);
1923 }
1924
5cc389bc
PZ
1925 /* Can the task run on the task's current CPU? If so, we're done */
1926 if (cpumask_test_cpu(task_cpu(p), new_mask))
1927 goto out;
1928
5cc389bc
PZ
1929 if (task_running(rq, p) || p->state == TASK_WAKING) {
1930 struct migration_arg arg = { p, dest_cpu };
1931 /* Need help from migration thread: drop lock and wait. */
eb580751 1932 task_rq_unlock(rq, p, &rf);
5cc389bc 1933 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5cc389bc 1934 return 0;
cbce1a68
PZ
1935 } else if (task_on_rq_queued(p)) {
1936 /*
1937 * OK, since we're going to drop the lock immediately
1938 * afterwards anyway.
1939 */
8a8c69c3 1940 rq = move_queued_task(rq, &rf, p, dest_cpu);
cbce1a68 1941 }
5cc389bc 1942out:
eb580751 1943 task_rq_unlock(rq, p, &rf);
5cc389bc
PZ
1944
1945 return ret;
1946}
25834c73
PZ
1947
1948int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1949{
1950 return __set_cpus_allowed_ptr(p, new_mask, false);
1951}
5cc389bc
PZ
1952EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1953
dd41f596 1954void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1955{
e2912009
PZ
1956#ifdef CONFIG_SCHED_DEBUG
1957 /*
1958 * We should never call set_task_cpu() on a blocked task,
1959 * ttwu() will sort out the placement.
1960 */
077614ee 1961 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
e2336f6e 1962 !p->on_rq);
0122ec5b 1963
3ea94de1
JP
1964 /*
1965 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1966 * because schedstat_wait_{start,end} rebase migrating task's wait_start
1967 * time relying on p->on_rq.
1968 */
1969 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1970 p->sched_class == &fair_sched_class &&
1971 (p->on_rq && !task_on_rq_migrating(p)));
1972
0122ec5b 1973#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1974 /*
1975 * The caller should hold either p->pi_lock or rq->lock, when changing
1976 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1977 *
1978 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1979 * see task_group().
6c6c54e1
PZ
1980 *
1981 * Furthermore, all task_rq users should acquire both locks, see
1982 * task_rq_lock().
1983 */
0122ec5b
PZ
1984 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1985 lockdep_is_held(&task_rq(p)->lock)));
1986#endif
4ff9083b
PZ
1987 /*
1988 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
1989 */
1990 WARN_ON_ONCE(!cpu_online(new_cpu));
e2912009
PZ
1991#endif
1992
de1d7286 1993 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1994
0c69774e 1995 if (task_cpu(p) != new_cpu) {
0a74bef8 1996 if (p->sched_class->migrate_task_rq)
1327237a 1997 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1998 p->se.nr_migrations++;
d7822b1e 1999 rseq_migrate(p);
ff303e66 2000 perf_event_task_migrate(p);
0c69774e 2001 }
dd41f596
IM
2002
2003 __set_task_cpu(p, new_cpu);
c65cc870
IM
2004}
2005
0ad4e3df 2006#ifdef CONFIG_NUMA_BALANCING
ac66f547
PZ
2007static void __migrate_swap_task(struct task_struct *p, int cpu)
2008{
da0c1e65 2009 if (task_on_rq_queued(p)) {
ac66f547 2010 struct rq *src_rq, *dst_rq;
8a8c69c3 2011 struct rq_flags srf, drf;
ac66f547
PZ
2012
2013 src_rq = task_rq(p);
2014 dst_rq = cpu_rq(cpu);
2015
8a8c69c3
PZ
2016 rq_pin_lock(src_rq, &srf);
2017 rq_pin_lock(dst_rq, &drf);
2018
ac66f547
PZ
2019 deactivate_task(src_rq, p, 0);
2020 set_task_cpu(p, cpu);
2021 activate_task(dst_rq, p, 0);
2022 check_preempt_curr(dst_rq, p, 0);
8a8c69c3
PZ
2023
2024 rq_unpin_lock(dst_rq, &drf);
2025 rq_unpin_lock(src_rq, &srf);
2026
ac66f547
PZ
2027 } else {
2028 /*
2029 * Task isn't running anymore; make it appear like we migrated
2030 * it before it went to sleep. This means on wakeup we make the
d1ccc66d 2031 * previous CPU our target instead of where it really is.
ac66f547
PZ
2032 */
2033 p->wake_cpu = cpu;
2034 }
2035}
2036
2037struct migration_swap_arg {
2038 struct task_struct *src_task, *dst_task;
2039 int src_cpu, dst_cpu;
2040};
2041
2042static int migrate_swap_stop(void *data)
2043{
2044 struct migration_swap_arg *arg = data;
2045 struct rq *src_rq, *dst_rq;
2046 int ret = -EAGAIN;
2047
62694cd5
PZ
2048 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
2049 return -EAGAIN;
2050
ac66f547
PZ
2051 src_rq = cpu_rq(arg->src_cpu);
2052 dst_rq = cpu_rq(arg->dst_cpu);
2053
74602315
PZ
2054 double_raw_lock(&arg->src_task->pi_lock,
2055 &arg->dst_task->pi_lock);
ac66f547 2056 double_rq_lock(src_rq, dst_rq);
62694cd5 2057
ac66f547
PZ
2058 if (task_cpu(arg->dst_task) != arg->dst_cpu)
2059 goto unlock;
2060
2061 if (task_cpu(arg->src_task) != arg->src_cpu)
2062 goto unlock;
2063
3bd37062 2064 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
ac66f547
PZ
2065 goto unlock;
2066
3bd37062 2067 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
ac66f547
PZ
2068 goto unlock;
2069
2070 __migrate_swap_task(arg->src_task, arg->dst_cpu);
2071 __migrate_swap_task(arg->dst_task, arg->src_cpu);
2072
2073 ret = 0;
2074
2075unlock:
2076 double_rq_unlock(src_rq, dst_rq);
74602315
PZ
2077 raw_spin_unlock(&arg->dst_task->pi_lock);
2078 raw_spin_unlock(&arg->src_task->pi_lock);
ac66f547
PZ
2079
2080 return ret;
2081}
2082
2083/*
2084 * Cross migrate two tasks
2085 */
0ad4e3df
SD
2086int migrate_swap(struct task_struct *cur, struct task_struct *p,
2087 int target_cpu, int curr_cpu)
ac66f547
PZ
2088{
2089 struct migration_swap_arg arg;
2090 int ret = -EINVAL;
2091
ac66f547
PZ
2092 arg = (struct migration_swap_arg){
2093 .src_task = cur,
0ad4e3df 2094 .src_cpu = curr_cpu,
ac66f547 2095 .dst_task = p,
0ad4e3df 2096 .dst_cpu = target_cpu,
ac66f547
PZ
2097 };
2098
2099 if (arg.src_cpu == arg.dst_cpu)
2100 goto out;
2101
6acce3ef
PZ
2102 /*
2103 * These three tests are all lockless; this is OK since all of them
2104 * will be re-checked with proper locks held further down the line.
2105 */
ac66f547
PZ
2106 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
2107 goto out;
2108
3bd37062 2109 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
ac66f547
PZ
2110 goto out;
2111
3bd37062 2112 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
ac66f547
PZ
2113 goto out;
2114
286549dc 2115 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
2116 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
2117
2118out:
ac66f547
PZ
2119 return ret;
2120}
0ad4e3df 2121#endif /* CONFIG_NUMA_BALANCING */
ac66f547 2122
1da177e4
LT
2123/*
2124 * wait_task_inactive - wait for a thread to unschedule.
2125 *
85ba2d86
RM
2126 * If @match_state is nonzero, it's the @p->state value just checked and
2127 * not expected to change. If it changes, i.e. @p might have woken up,
2128 * then return zero. When we succeed in waiting for @p to be off its CPU,
2129 * we return a positive number (its total switch count). If a second call
2130 * a short while later returns the same number, the caller can be sure that
2131 * @p has remained unscheduled the whole time.
2132 *
1da177e4
LT
2133 * The caller must ensure that the task *will* unschedule sometime soon,
2134 * else this function might spin for a *long* time. This function can't
2135 * be called with interrupts off, or it may introduce deadlock with
2136 * smp_call_function() if an IPI is sent by the same process we are
2137 * waiting to become inactive.
2138 */
85ba2d86 2139unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4 2140{
da0c1e65 2141 int running, queued;
eb580751 2142 struct rq_flags rf;
85ba2d86 2143 unsigned long ncsw;
70b97a7f 2144 struct rq *rq;
1da177e4 2145
3a5c359a
AK
2146 for (;;) {
2147 /*
2148 * We do the initial early heuristics without holding
2149 * any task-queue locks at all. We'll only try to get
2150 * the runqueue lock when things look like they will
2151 * work out!
2152 */
2153 rq = task_rq(p);
fa490cfd 2154
3a5c359a
AK
2155 /*
2156 * If the task is actively running on another CPU
2157 * still, just relax and busy-wait without holding
2158 * any locks.
2159 *
2160 * NOTE! Since we don't hold any locks, it's not
2161 * even sure that "rq" stays as the right runqueue!
2162 * But we don't care, since "task_running()" will
2163 * return false if the runqueue has changed and p
2164 * is actually now running somewhere else!
2165 */
85ba2d86
RM
2166 while (task_running(rq, p)) {
2167 if (match_state && unlikely(p->state != match_state))
2168 return 0;
3a5c359a 2169 cpu_relax();
85ba2d86 2170 }
fa490cfd 2171
3a5c359a
AK
2172 /*
2173 * Ok, time to look more closely! We need the rq
2174 * lock now, to be *sure*. If we're wrong, we'll
2175 * just go back and repeat.
2176 */
eb580751 2177 rq = task_rq_lock(p, &rf);
27a9da65 2178 trace_sched_wait_task(p);
3a5c359a 2179 running = task_running(rq, p);
da0c1e65 2180 queued = task_on_rq_queued(p);
85ba2d86 2181 ncsw = 0;
f31e11d8 2182 if (!match_state || p->state == match_state)
93dcf55f 2183 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
eb580751 2184 task_rq_unlock(rq, p, &rf);
fa490cfd 2185
85ba2d86
RM
2186 /*
2187 * If it changed from the expected state, bail out now.
2188 */
2189 if (unlikely(!ncsw))
2190 break;
2191
3a5c359a
AK
2192 /*
2193 * Was it really running after all now that we
2194 * checked with the proper locks actually held?
2195 *
2196 * Oops. Go back and try again..
2197 */
2198 if (unlikely(running)) {
2199 cpu_relax();
2200 continue;
2201 }
fa490cfd 2202
3a5c359a
AK
2203 /*
2204 * It's not enough that it's not actively running,
2205 * it must be off the runqueue _entirely_, and not
2206 * preempted!
2207 *
80dd99b3 2208 * So if it was still runnable (but just not actively
3a5c359a
AK
2209 * running right now), it's preempted, and we should
2210 * yield - it could be a while.
2211 */
da0c1e65 2212 if (unlikely(queued)) {
8b0e1953 2213 ktime_t to = NSEC_PER_SEC / HZ;
8eb90c30
TG
2214
2215 set_current_state(TASK_UNINTERRUPTIBLE);
2216 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
2217 continue;
2218 }
fa490cfd 2219
3a5c359a
AK
2220 /*
2221 * Ahh, all good. It wasn't running, and it wasn't
2222 * runnable, which means that it will never become
2223 * running in the future either. We're all done!
2224 */
2225 break;
2226 }
85ba2d86
RM
2227
2228 return ncsw;
1da177e4
LT
2229}
2230
2231/***
2232 * kick_process - kick a running thread to enter/exit the kernel
2233 * @p: the to-be-kicked thread
2234 *
2235 * Cause a process which is running on another CPU to enter
2236 * kernel-mode, without any delay. (to get signals handled.)
2237 *
25985edc 2238 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
2239 * because all it wants to ensure is that the remote task enters
2240 * the kernel. If the IPI races and the task has been migrated
2241 * to another CPU then no harm is done and the purpose has been
2242 * achieved as well.
2243 */
36c8b586 2244void kick_process(struct task_struct *p)
1da177e4
LT
2245{
2246 int cpu;
2247
2248 preempt_disable();
2249 cpu = task_cpu(p);
2250 if ((cpu != smp_processor_id()) && task_curr(p))
2251 smp_send_reschedule(cpu);
2252 preempt_enable();
2253}
b43e3521 2254EXPORT_SYMBOL_GPL(kick_process);
1da177e4 2255
30da688e 2256/*
3bd37062 2257 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
e9d867a6
PZI
2258 *
2259 * A few notes on cpu_active vs cpu_online:
2260 *
2261 * - cpu_active must be a subset of cpu_online
2262 *
97fb7a0a 2263 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
e9d867a6 2264 * see __set_cpus_allowed_ptr(). At this point the newly online
d1ccc66d 2265 * CPU isn't yet part of the sched domains, and balancing will not
e9d867a6
PZI
2266 * see it.
2267 *
d1ccc66d 2268 * - on CPU-down we clear cpu_active() to mask the sched domains and
e9d867a6 2269 * avoid the load balancer to place new tasks on the to be removed
d1ccc66d 2270 * CPU. Existing tasks will remain running there and will be taken
e9d867a6
PZI
2271 * off.
2272 *
2273 * This means that fallback selection must not select !active CPUs.
2274 * And can assume that any active CPU must be online. Conversely
2275 * select_task_rq() below may allow selection of !active CPUs in order
2276 * to satisfy the above rules.
30da688e 2277 */
5da9a0fb
PZ
2278static int select_fallback_rq(int cpu, struct task_struct *p)
2279{
aa00d89c
TC
2280 int nid = cpu_to_node(cpu);
2281 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
2282 enum { cpuset, possible, fail } state = cpuset;
2283 int dest_cpu;
5da9a0fb 2284
aa00d89c 2285 /*
d1ccc66d
IM
2286 * If the node that the CPU is on has been offlined, cpu_to_node()
2287 * will return -1. There is no CPU on the node, and we should
2288 * select the CPU on the other node.
aa00d89c
TC
2289 */
2290 if (nid != -1) {
2291 nodemask = cpumask_of_node(nid);
2292
2293 /* Look for allowed, online CPU in same node. */
2294 for_each_cpu(dest_cpu, nodemask) {
aa00d89c
TC
2295 if (!cpu_active(dest_cpu))
2296 continue;
3bd37062 2297 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
aa00d89c
TC
2298 return dest_cpu;
2299 }
2baab4e9 2300 }
5da9a0fb 2301
2baab4e9
PZ
2302 for (;;) {
2303 /* Any allowed, online CPU? */
3bd37062 2304 for_each_cpu(dest_cpu, p->cpus_ptr) {
175f0e25 2305 if (!is_cpu_allowed(p, dest_cpu))
2baab4e9 2306 continue;
175f0e25 2307
2baab4e9
PZ
2308 goto out;
2309 }
5da9a0fb 2310
e73e85f0 2311 /* No more Mr. Nice Guy. */
2baab4e9
PZ
2312 switch (state) {
2313 case cpuset:
e73e85f0
ON
2314 if (IS_ENABLED(CONFIG_CPUSETS)) {
2315 cpuset_cpus_allowed_fallback(p);
2316 state = possible;
2317 break;
2318 }
df561f66 2319 fallthrough;
2baab4e9
PZ
2320 case possible:
2321 do_set_cpus_allowed(p, cpu_possible_mask);
2322 state = fail;
2323 break;
2324
2325 case fail:
2326 BUG();
2327 break;
2328 }
2329 }
2330
2331out:
2332 if (state != cpuset) {
2333 /*
2334 * Don't tell them about moving exiting tasks or
2335 * kernel threads (both mm NULL), since they never
2336 * leave kernel.
2337 */
2338 if (p->mm && printk_ratelimit()) {
aac74dc4 2339 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
2340 task_pid_nr(p), p->comm, cpu);
2341 }
5da9a0fb
PZ
2342 }
2343
2344 return dest_cpu;
2345}
2346
e2912009 2347/*
3bd37062 2348 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
e2912009 2349 */
970b13ba 2350static inline
ac66f547 2351int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
970b13ba 2352{
cbce1a68
PZ
2353 lockdep_assert_held(&p->pi_lock);
2354
4b53a341 2355 if (p->nr_cpus_allowed > 1)
6c1d9410 2356 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
e9d867a6 2357 else
3bd37062 2358 cpu = cpumask_any(p->cpus_ptr);
e2912009
PZ
2359
2360 /*
2361 * In order not to call set_task_cpu() on a blocking task we need
3bd37062 2362 * to rely on ttwu() to place the task on a valid ->cpus_ptr
d1ccc66d 2363 * CPU.
e2912009
PZ
2364 *
2365 * Since this is common to all placement strategies, this lives here.
2366 *
2367 * [ this allows ->select_task() to simply return task_cpu(p) and
2368 * not worry about this generic constraint ]
2369 */
7af443ee 2370 if (unlikely(!is_cpu_allowed(p, cpu)))
5da9a0fb 2371 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
2372
2373 return cpu;
970b13ba 2374}
09a40af5 2375
f5832c19
NP
2376void sched_set_stop_task(int cpu, struct task_struct *stop)
2377{
2378 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2379 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2380
2381 if (stop) {
2382 /*
2383 * Make it appear like a SCHED_FIFO task, its something
2384 * userspace knows about and won't get confused about.
2385 *
2386 * Also, it will make PI more or less work without too
2387 * much confusion -- but then, stop work should not
2388 * rely on PI working anyway.
2389 */
2390 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2391
2392 stop->sched_class = &stop_sched_class;
2393 }
2394
2395 cpu_rq(cpu)->stop = stop;
2396
2397 if (old_stop) {
2398 /*
2399 * Reset it back to a normal scheduling class so that
2400 * it can die in pieces.
2401 */
2402 old_stop->sched_class = &rt_sched_class;
2403 }
2404}
2405
25834c73
PZ
2406#else
2407
2408static inline int __set_cpus_allowed_ptr(struct task_struct *p,
2409 const struct cpumask *new_mask, bool check)
2410{
2411 return set_cpus_allowed_ptr(p, new_mask);
2412}
2413
5cc389bc 2414#endif /* CONFIG_SMP */
970b13ba 2415
d7c01d27 2416static void
b84cb5df 2417ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 2418{
4fa8d299 2419 struct rq *rq;
b84cb5df 2420
4fa8d299
JP
2421 if (!schedstat_enabled())
2422 return;
2423
2424 rq = this_rq();
d7c01d27 2425
4fa8d299
JP
2426#ifdef CONFIG_SMP
2427 if (cpu == rq->cpu) {
b85c8b71
PZ
2428 __schedstat_inc(rq->ttwu_local);
2429 __schedstat_inc(p->se.statistics.nr_wakeups_local);
d7c01d27
PZ
2430 } else {
2431 struct sched_domain *sd;
2432
b85c8b71 2433 __schedstat_inc(p->se.statistics.nr_wakeups_remote);
057f3fad 2434 rcu_read_lock();
4fa8d299 2435 for_each_domain(rq->cpu, sd) {
d7c01d27 2436 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
b85c8b71 2437 __schedstat_inc(sd->ttwu_wake_remote);
d7c01d27
PZ
2438 break;
2439 }
2440 }
057f3fad 2441 rcu_read_unlock();
d7c01d27 2442 }
f339b9dc
PZ
2443
2444 if (wake_flags & WF_MIGRATED)
b85c8b71 2445 __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
d7c01d27
PZ
2446#endif /* CONFIG_SMP */
2447
b85c8b71
PZ
2448 __schedstat_inc(rq->ttwu_count);
2449 __schedstat_inc(p->se.statistics.nr_wakeups);
d7c01d27
PZ
2450
2451 if (wake_flags & WF_SYNC)
b85c8b71 2452 __schedstat_inc(p->se.statistics.nr_wakeups_sync);
d7c01d27
PZ
2453}
2454
23f41eeb
PZ
2455/*
2456 * Mark the task runnable and perform wakeup-preemption.
2457 */
e7904a28 2458static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2459 struct rq_flags *rf)
9ed3811a 2460{
9ed3811a 2461 check_preempt_curr(rq, p, wake_flags);
9ed3811a 2462 p->state = TASK_RUNNING;
fbd705a0
PZ
2463 trace_sched_wakeup(p);
2464
9ed3811a 2465#ifdef CONFIG_SMP
4c9a4bc8
PZ
2466 if (p->sched_class->task_woken) {
2467 /*
cbce1a68
PZ
2468 * Our task @p is fully woken up and running; so its safe to
2469 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 2470 */
d8ac8971 2471 rq_unpin_lock(rq, rf);
9ed3811a 2472 p->sched_class->task_woken(rq, p);
d8ac8971 2473 rq_repin_lock(rq, rf);
4c9a4bc8 2474 }
9ed3811a 2475
e69c6341 2476 if (rq->idle_stamp) {
78becc27 2477 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 2478 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 2479
abfafa54
JL
2480 update_avg(&rq->avg_idle, delta);
2481
2482 if (rq->avg_idle > max)
9ed3811a 2483 rq->avg_idle = max;
abfafa54 2484
9ed3811a
TH
2485 rq->idle_stamp = 0;
2486 }
2487#endif
2488}
2489
c05fbafb 2490static void
e7904a28 2491ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
d8ac8971 2492 struct rq_flags *rf)
c05fbafb 2493{
77558e4d 2494 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
b5179ac7 2495
cbce1a68
PZ
2496 lockdep_assert_held(&rq->lock);
2497
c05fbafb
PZ
2498 if (p->sched_contributes_to_load)
2499 rq->nr_uninterruptible--;
b5179ac7 2500
dbfb089d 2501#ifdef CONFIG_SMP
b5179ac7 2502 if (wake_flags & WF_MIGRATED)
59efa0ba 2503 en_flags |= ENQUEUE_MIGRATED;
c05fbafb
PZ
2504#endif
2505
1b174a2c 2506 activate_task(rq, p, en_flags);
d8ac8971 2507 ttwu_do_wakeup(rq, p, wake_flags, rf);
c05fbafb
PZ
2508}
2509
2510/*
58877d34
PZ
2511 * Consider @p being inside a wait loop:
2512 *
2513 * for (;;) {
2514 * set_current_state(TASK_UNINTERRUPTIBLE);
2515 *
2516 * if (CONDITION)
2517 * break;
2518 *
2519 * schedule();
2520 * }
2521 * __set_current_state(TASK_RUNNING);
2522 *
2523 * between set_current_state() and schedule(). In this case @p is still
2524 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
2525 * an atomic manner.
2526 *
2527 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
2528 * then schedule() must still happen and p->state can be changed to
2529 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
2530 * need to do a full wakeup with enqueue.
2531 *
2532 * Returns: %true when the wakeup is done,
2533 * %false otherwise.
c05fbafb 2534 */
58877d34 2535static int ttwu_runnable(struct task_struct *p, int wake_flags)
c05fbafb 2536{
eb580751 2537 struct rq_flags rf;
c05fbafb
PZ
2538 struct rq *rq;
2539 int ret = 0;
2540
eb580751 2541 rq = __task_rq_lock(p, &rf);
da0c1e65 2542 if (task_on_rq_queued(p)) {
1ad4ec0d
FW
2543 /* check_preempt_curr() may use rq clock */
2544 update_rq_clock(rq);
d8ac8971 2545 ttwu_do_wakeup(rq, p, wake_flags, &rf);
c05fbafb
PZ
2546 ret = 1;
2547 }
eb580751 2548 __task_rq_unlock(rq, &rf);
c05fbafb
PZ
2549
2550 return ret;
2551}
2552
317f3941 2553#ifdef CONFIG_SMP
a1488664 2554void sched_ttwu_pending(void *arg)
317f3941 2555{
a1488664 2556 struct llist_node *llist = arg;
317f3941 2557 struct rq *rq = this_rq();
73215849 2558 struct task_struct *p, *t;
d8ac8971 2559 struct rq_flags rf;
317f3941 2560
e3baac47
PZ
2561 if (!llist)
2562 return;
2563
126c2092
PZ
2564 /*
2565 * rq::ttwu_pending racy indication of out-standing wakeups.
2566 * Races such that false-negatives are possible, since they
2567 * are shorter lived that false-positives would be.
2568 */
2569 WRITE_ONCE(rq->ttwu_pending, 0);
2570
8a8c69c3 2571 rq_lock_irqsave(rq, &rf);
77558e4d 2572 update_rq_clock(rq);
317f3941 2573
8c4890d1 2574 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
b6e13e85
PZ
2575 if (WARN_ON_ONCE(p->on_cpu))
2576 smp_cond_load_acquire(&p->on_cpu, !VAL);
2577
2578 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
2579 set_task_cpu(p, cpu_of(rq));
2580
73215849 2581 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
b6e13e85 2582 }
317f3941 2583
8a8c69c3 2584 rq_unlock_irqrestore(rq, &rf);
317f3941
PZ
2585}
2586
b2a02fc4 2587void send_call_function_single_ipi(int cpu)
317f3941 2588{
b2a02fc4 2589 struct rq *rq = cpu_rq(cpu);
ca38062e 2590
b2a02fc4
PZ
2591 if (!set_nr_if_polling(rq->idle))
2592 arch_send_call_function_single_ipi(cpu);
2593 else
2594 trace_sched_wake_idle_without_ipi(cpu);
317f3941
PZ
2595}
2596
2ebb1771
MG
2597/*
2598 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
2599 * necessary. The wakee CPU on receipt of the IPI will queue the task
2600 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
2601 * of the wakeup instead of the waker.
2602 */
2603static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
317f3941 2604{
e3baac47
PZ
2605 struct rq *rq = cpu_rq(cpu);
2606
b7e7ade3
PZ
2607 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
2608
126c2092 2609 WRITE_ONCE(rq->ttwu_pending, 1);
8c4890d1 2610 __smp_call_single_queue(cpu, &p->wake_entry.llist);
317f3941 2611}
d6aa8f85 2612
f6be8af1
CL
2613void wake_up_if_idle(int cpu)
2614{
2615 struct rq *rq = cpu_rq(cpu);
8a8c69c3 2616 struct rq_flags rf;
f6be8af1 2617
fd7de1e8
AL
2618 rcu_read_lock();
2619
2620 if (!is_idle_task(rcu_dereference(rq->curr)))
2621 goto out;
f6be8af1
CL
2622
2623 if (set_nr_if_polling(rq->idle)) {
2624 trace_sched_wake_idle_without_ipi(cpu);
2625 } else {
8a8c69c3 2626 rq_lock_irqsave(rq, &rf);
f6be8af1
CL
2627 if (is_idle_task(rq->curr))
2628 smp_send_reschedule(cpu);
d1ccc66d 2629 /* Else CPU is not idle, do nothing here: */
8a8c69c3 2630 rq_unlock_irqrestore(rq, &rf);
f6be8af1 2631 }
fd7de1e8
AL
2632
2633out:
2634 rcu_read_unlock();
f6be8af1
CL
2635}
2636
39be3501 2637bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
2638{
2639 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
2640}
c6e7bd7a 2641
2ebb1771
MG
2642static inline bool ttwu_queue_cond(int cpu, int wake_flags)
2643{
2644 /*
2645 * If the CPU does not share cache, then queue the task on the
2646 * remote rqs wakelist to avoid accessing remote data.
2647 */
2648 if (!cpus_share_cache(smp_processor_id(), cpu))
2649 return true;
2650
2651 /*
2652 * If the task is descheduling and the only running task on the
2653 * CPU then use the wakelist to offload the task activation to
2654 * the soon-to-be-idle CPU as the current CPU is likely busy.
2655 * nr_running is checked to avoid unnecessary task stacking.
2656 */
739f70b4 2657 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
2ebb1771
MG
2658 return true;
2659
2660 return false;
2661}
2662
2663static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
c6e7bd7a 2664{
2ebb1771 2665 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
b6e13e85
PZ
2666 if (WARN_ON_ONCE(cpu == smp_processor_id()))
2667 return false;
2668
c6e7bd7a 2669 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
2ebb1771 2670 __ttwu_queue_wakelist(p, cpu, wake_flags);
c6e7bd7a
PZ
2671 return true;
2672 }
2673
2674 return false;
2675}
58877d34
PZ
2676
2677#else /* !CONFIG_SMP */
2678
2679static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
2680{
2681 return false;
2682}
2683
d6aa8f85 2684#endif /* CONFIG_SMP */
317f3941 2685
b5179ac7 2686static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
c05fbafb
PZ
2687{
2688 struct rq *rq = cpu_rq(cpu);
d8ac8971 2689 struct rq_flags rf;
c05fbafb 2690
2ebb1771 2691 if (ttwu_queue_wakelist(p, cpu, wake_flags))
317f3941 2692 return;
317f3941 2693
8a8c69c3 2694 rq_lock(rq, &rf);
77558e4d 2695 update_rq_clock(rq);
d8ac8971 2696 ttwu_do_activate(rq, p, wake_flags, &rf);
8a8c69c3 2697 rq_unlock(rq, &rf);
9ed3811a
TH
2698}
2699
8643cda5
PZ
2700/*
2701 * Notes on Program-Order guarantees on SMP systems.
2702 *
2703 * MIGRATION
2704 *
2705 * The basic program-order guarantee on SMP systems is that when a task [t]
d1ccc66d
IM
2706 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
2707 * execution on its new CPU [c1].
8643cda5
PZ
2708 *
2709 * For migration (of runnable tasks) this is provided by the following means:
2710 *
2711 * A) UNLOCK of the rq(c0)->lock scheduling out task t
2712 * B) migration for t is required to synchronize *both* rq(c0)->lock and
2713 * rq(c1)->lock (if not at the same time, then in that order).
2714 * C) LOCK of the rq(c1)->lock scheduling in task
2715 *
7696f991 2716 * Release/acquire chaining guarantees that B happens after A and C after B.
d1ccc66d 2717 * Note: the CPU doing B need not be c0 or c1
8643cda5
PZ
2718 *
2719 * Example:
2720 *
2721 * CPU0 CPU1 CPU2
2722 *
2723 * LOCK rq(0)->lock
2724 * sched-out X
2725 * sched-in Y
2726 * UNLOCK rq(0)->lock
2727 *
2728 * LOCK rq(0)->lock // orders against CPU0
2729 * dequeue X
2730 * UNLOCK rq(0)->lock
2731 *
2732 * LOCK rq(1)->lock
2733 * enqueue X
2734 * UNLOCK rq(1)->lock
2735 *
2736 * LOCK rq(1)->lock // orders against CPU2
2737 * sched-out Z
2738 * sched-in X
2739 * UNLOCK rq(1)->lock
2740 *
2741 *
2742 * BLOCKING -- aka. SLEEP + WAKEUP
2743 *
2744 * For blocking we (obviously) need to provide the same guarantee as for
2745 * migration. However the means are completely different as there is no lock
2746 * chain to provide order. Instead we do:
2747 *
58877d34
PZ
2748 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
2749 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
8643cda5
PZ
2750 *
2751 * Example:
2752 *
2753 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
2754 *
2755 * LOCK rq(0)->lock LOCK X->pi_lock
2756 * dequeue X
2757 * sched-out X
2758 * smp_store_release(X->on_cpu, 0);
2759 *
1f03e8d2 2760 * smp_cond_load_acquire(&X->on_cpu, !VAL);
8643cda5
PZ
2761 * X->state = WAKING
2762 * set_task_cpu(X,2)
2763 *
2764 * LOCK rq(2)->lock
2765 * enqueue X
2766 * X->state = RUNNING
2767 * UNLOCK rq(2)->lock
2768 *
2769 * LOCK rq(2)->lock // orders against CPU1
2770 * sched-out Z
2771 * sched-in X
2772 * UNLOCK rq(2)->lock
2773 *
2774 * UNLOCK X->pi_lock
2775 * UNLOCK rq(0)->lock
2776 *
2777 *
7696f991
AP
2778 * However, for wakeups there is a second guarantee we must provide, namely we
2779 * must ensure that CONDITION=1 done by the caller can not be reordered with
2780 * accesses to the task state; see try_to_wake_up() and set_current_state().
8643cda5
PZ
2781 */
2782
9ed3811a 2783/**
1da177e4 2784 * try_to_wake_up - wake up a thread
9ed3811a 2785 * @p: the thread to be awakened
1da177e4 2786 * @state: the mask of task states that can be woken
9ed3811a 2787 * @wake_flags: wake modifier flags (WF_*)
1da177e4 2788 *
58877d34
PZ
2789 * Conceptually does:
2790 *
2791 * If (@state & @p->state) @p->state = TASK_RUNNING.
1da177e4 2792 *
a2250238
PZ
2793 * If the task was not queued/runnable, also place it back on a runqueue.
2794 *
58877d34
PZ
2795 * This function is atomic against schedule() which would dequeue the task.
2796 *
2797 * It issues a full memory barrier before accessing @p->state, see the comment
2798 * with set_current_state().
a2250238 2799 *
58877d34 2800 * Uses p->pi_lock to serialize against concurrent wake-ups.
a2250238 2801 *
58877d34
PZ
2802 * Relies on p->pi_lock stabilizing:
2803 * - p->sched_class
2804 * - p->cpus_ptr
2805 * - p->sched_task_group
2806 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
2807 *
2808 * Tries really hard to only take one task_rq(p)->lock for performance.
2809 * Takes rq->lock in:
2810 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
2811 * - ttwu_queue() -- new rq, for enqueue of the task;
2812 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
2813 *
2814 * As a consequence we race really badly with just about everything. See the
2815 * many memory barriers and their comments for details.
7696f991 2816 *
a2250238
PZ
2817 * Return: %true if @p->state changes (an actual wakeup was done),
2818 * %false otherwise.
1da177e4 2819 */
e4a52bcb
PZ
2820static int
2821try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 2822{
1da177e4 2823 unsigned long flags;
c05fbafb 2824 int cpu, success = 0;
2398f2c6 2825
e3d85487 2826 preempt_disable();
aacedf26
PZ
2827 if (p == current) {
2828 /*
2829 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
2830 * == smp_processor_id()'. Together this means we can special
58877d34 2831 * case the whole 'p->on_rq && ttwu_runnable()' case below
aacedf26
PZ
2832 * without taking any locks.
2833 *
2834 * In particular:
2835 * - we rely on Program-Order guarantees for all the ordering,
2836 * - we're serialized against set_special_state() by virtue of
2837 * it disabling IRQs (this allows not taking ->pi_lock).
2838 */
2839 if (!(p->state & state))
e3d85487 2840 goto out;
aacedf26
PZ
2841
2842 success = 1;
aacedf26
PZ
2843 trace_sched_waking(p);
2844 p->state = TASK_RUNNING;
2845 trace_sched_wakeup(p);
2846 goto out;
2847 }
2848
e0acd0a6
ON
2849 /*
2850 * If we are going to wake up a thread waiting for CONDITION we
2851 * need to ensure that CONDITION=1 done by the caller can not be
58877d34
PZ
2852 * reordered with p->state check below. This pairs with smp_store_mb()
2853 * in set_current_state() that the waiting thread does.
e0acd0a6 2854 */
013fdb80 2855 raw_spin_lock_irqsave(&p->pi_lock, flags);
d89e588c 2856 smp_mb__after_spinlock();
e9c84311 2857 if (!(p->state & state))
aacedf26 2858 goto unlock;
1da177e4 2859
fbd705a0
PZ
2860 trace_sched_waking(p);
2861
d1ccc66d
IM
2862 /* We're going to change ->state: */
2863 success = 1;
1da177e4 2864
135e8c92
BS
2865 /*
2866 * Ensure we load p->on_rq _after_ p->state, otherwise it would
2867 * be possible to, falsely, observe p->on_rq == 0 and get stuck
2868 * in smp_cond_load_acquire() below.
2869 *
3d85b270
AP
2870 * sched_ttwu_pending() try_to_wake_up()
2871 * STORE p->on_rq = 1 LOAD p->state
2872 * UNLOCK rq->lock
2873 *
2874 * __schedule() (switch to task 'p')
2875 * LOCK rq->lock smp_rmb();
2876 * smp_mb__after_spinlock();
2877 * UNLOCK rq->lock
135e8c92
BS
2878 *
2879 * [task p]
3d85b270 2880 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
135e8c92 2881 *
3d85b270
AP
2882 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
2883 * __schedule(). See the comment for smp_mb__after_spinlock().
2beaf328
PM
2884 *
2885 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
135e8c92
BS
2886 */
2887 smp_rmb();
58877d34 2888 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
aacedf26 2889 goto unlock;
1da177e4 2890
c6e7bd7a
PZ
2891 if (p->in_iowait) {
2892 delayacct_blkio_end(p);
2893 atomic_dec(&task_rq(p)->nr_iowait);
2894 }
2895
1da177e4 2896#ifdef CONFIG_SMP
ecf7d01c
PZ
2897 /*
2898 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
2899 * possible to, falsely, observe p->on_cpu == 0.
2900 *
2901 * One must be running (->on_cpu == 1) in order to remove oneself
2902 * from the runqueue.
2903 *
3d85b270
AP
2904 * __schedule() (switch to task 'p') try_to_wake_up()
2905 * STORE p->on_cpu = 1 LOAD p->on_rq
2906 * UNLOCK rq->lock
2907 *
2908 * __schedule() (put 'p' to sleep)
2909 * LOCK rq->lock smp_rmb();
2910 * smp_mb__after_spinlock();
2911 * STORE p->on_rq = 0 LOAD p->on_cpu
ecf7d01c 2912 *
3d85b270
AP
2913 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
2914 * __schedule(). See the comment for smp_mb__after_spinlock().
dbfb089d
PZ
2915 *
2916 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
2917 * schedule()'s deactivate_task() has 'happened' and p will no longer
2918 * care about it's own p->state. See the comment in __schedule().
ecf7d01c 2919 */
dbfb089d
PZ
2920 smp_acquire__after_ctrl_dep();
2921
2922 /*
2923 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
2924 * == 0), which means we need to do an enqueue, change p->state to
2925 * TASK_WAKING such that we can unlock p->pi_lock before doing the
2926 * enqueue, such as ttwu_queue_wakelist().
2927 */
2928 p->state = TASK_WAKING;
ecf7d01c 2929
c6e7bd7a
PZ
2930 /*
2931 * If the owning (remote) CPU is still in the middle of schedule() with
2932 * this task as prev, considering queueing p on the remote CPUs wake_list
2933 * which potentially sends an IPI instead of spinning on p->on_cpu to
2934 * let the waker make forward progress. This is safe because IRQs are
2935 * disabled and the IPI will deliver after on_cpu is cleared.
b6e13e85
PZ
2936 *
2937 * Ensure we load task_cpu(p) after p->on_cpu:
2938 *
2939 * set_task_cpu(p, cpu);
2940 * STORE p->cpu = @cpu
2941 * __schedule() (switch to task 'p')
2942 * LOCK rq->lock
2943 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
2944 * STORE p->on_cpu = 1 LOAD p->cpu
2945 *
2946 * to ensure we observe the correct CPU on which the task is currently
2947 * scheduling.
c6e7bd7a 2948 */
b6e13e85 2949 if (smp_load_acquire(&p->on_cpu) &&
739f70b4 2950 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
c6e7bd7a
PZ
2951 goto unlock;
2952
e9c84311 2953 /*
d1ccc66d 2954 * If the owning (remote) CPU is still in the middle of schedule() with
c05fbafb 2955 * this task as prev, wait until its done referencing the task.
b75a2253 2956 *
31cb1bc0 2957 * Pairs with the smp_store_release() in finish_task().
b75a2253
PZ
2958 *
2959 * This ensures that tasks getting woken will be fully ordered against
2960 * their previous state and preserve Program Order.
0970d299 2961 */
1f03e8d2 2962 smp_cond_load_acquire(&p->on_cpu, !VAL);
1da177e4 2963
ac66f547 2964 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
2965 if (task_cpu(p) != cpu) {
2966 wake_flags |= WF_MIGRATED;
eb414681 2967 psi_ttwu_dequeue(p);
e4a52bcb 2968 set_task_cpu(p, cpu);
f339b9dc 2969 }
b6e13e85
PZ
2970#else
2971 cpu = task_cpu(p);
1da177e4 2972#endif /* CONFIG_SMP */
1da177e4 2973
b5179ac7 2974 ttwu_queue(p, cpu, wake_flags);
aacedf26 2975unlock:
013fdb80 2976 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
aacedf26
PZ
2977out:
2978 if (success)
b6e13e85 2979 ttwu_stat(p, task_cpu(p), wake_flags);
e3d85487 2980 preempt_enable();
1da177e4
LT
2981
2982 return success;
2983}
2984
2beaf328
PM
2985/**
2986 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
2987 * @p: Process for which the function is to be invoked.
2988 * @func: Function to invoke.
2989 * @arg: Argument to function.
2990 *
2991 * If the specified task can be quickly locked into a definite state
2992 * (either sleeping or on a given runqueue), arrange to keep it in that
2993 * state while invoking @func(@arg). This function can use ->on_rq and
2994 * task_curr() to work out what the state is, if required. Given that
2995 * @func can be invoked with a runqueue lock held, it had better be quite
2996 * lightweight.
2997 *
2998 * Returns:
2999 * @false if the task slipped out from under the locks.
3000 * @true if the task was locked onto a runqueue or is sleeping.
3001 * However, @func can override this by returning @false.
3002 */
3003bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
3004{
3005 bool ret = false;
3006 struct rq_flags rf;
3007 struct rq *rq;
3008
3009 lockdep_assert_irqs_enabled();
3010 raw_spin_lock_irq(&p->pi_lock);
3011 if (p->on_rq) {
3012 rq = __task_rq_lock(p, &rf);
3013 if (task_rq(p) == rq)
3014 ret = func(p, arg);
3015 rq_unlock(rq, &rf);
3016 } else {
3017 switch (p->state) {
3018 case TASK_RUNNING:
3019 case TASK_WAKING:
3020 break;
3021 default:
3022 smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
3023 if (!p->on_rq)
3024 ret = func(p, arg);
3025 }
3026 }
3027 raw_spin_unlock_irq(&p->pi_lock);
3028 return ret;
3029}
3030
50fa610a
DH
3031/**
3032 * wake_up_process - Wake up a specific process
3033 * @p: The process to be woken up.
3034 *
3035 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
3036 * processes.
3037 *
3038 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a 3039 *
7696f991 3040 * This function executes a full memory barrier before accessing the task state.
50fa610a 3041 */
7ad5b3a5 3042int wake_up_process(struct task_struct *p)
1da177e4 3043{
9067ac85 3044 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 3045}
1da177e4
LT
3046EXPORT_SYMBOL(wake_up_process);
3047
7ad5b3a5 3048int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
3049{
3050 return try_to_wake_up(p, state, 0);
3051}
3052
1da177e4
LT
3053/*
3054 * Perform scheduler related setup for a newly forked process p.
3055 * p is forked by current.
dd41f596
IM
3056 *
3057 * __sched_fork() is basic setup used by init_idle() too:
3058 */
5e1576ed 3059static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 3060{
fd2f4419
PZ
3061 p->on_rq = 0;
3062
3063 p->se.on_rq = 0;
dd41f596
IM
3064 p->se.exec_start = 0;
3065 p->se.sum_exec_runtime = 0;
f6cf891c 3066 p->se.prev_sum_exec_runtime = 0;
6c594c21 3067 p->se.nr_migrations = 0;
da7a735e 3068 p->se.vruntime = 0;
fd2f4419 3069 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 3070
ad936d86
BP
3071#ifdef CONFIG_FAIR_GROUP_SCHED
3072 p->se.cfs_rq = NULL;
3073#endif
3074
6cfb0d5d 3075#ifdef CONFIG_SCHEDSTATS
cb251765 3076 /* Even if schedstat is disabled, there should not be garbage */
41acab88 3077 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 3078#endif
476d139c 3079
aab03e05 3080 RB_CLEAR_NODE(&p->dl.rb_node);
40767b0d 3081 init_dl_task_timer(&p->dl);
209a0cbd 3082 init_dl_inactive_task_timer(&p->dl);
a5e7be3b 3083 __dl_clear_params(p);
aab03e05 3084
fa717060 3085 INIT_LIST_HEAD(&p->rt.run_list);
ff77e468
PZ
3086 p->rt.timeout = 0;
3087 p->rt.time_slice = sched_rr_timeslice;
3088 p->rt.on_rq = 0;
3089 p->rt.on_list = 0;
476d139c 3090
e107be36
AK
3091#ifdef CONFIG_PREEMPT_NOTIFIERS
3092 INIT_HLIST_HEAD(&p->preempt_notifiers);
3093#endif
cbee9f88 3094
5e1f0f09
MG
3095#ifdef CONFIG_COMPACTION
3096 p->capture_control = NULL;
3097#endif
13784475 3098 init_numa_balancing(clone_flags, p);
a1488664 3099#ifdef CONFIG_SMP
8c4890d1 3100 p->wake_entry.u_flags = CSD_TYPE_TTWU;
a1488664 3101#endif
dd41f596
IM
3102}
3103
2a595721
SD
3104DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
3105
1a687c2e 3106#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 3107
1a687c2e
MG
3108void set_numabalancing_state(bool enabled)
3109{
3110 if (enabled)
2a595721 3111 static_branch_enable(&sched_numa_balancing);
1a687c2e 3112 else
2a595721 3113 static_branch_disable(&sched_numa_balancing);
1a687c2e 3114}
54a43d54
AK
3115
3116#ifdef CONFIG_PROC_SYSCTL
3117int sysctl_numa_balancing(struct ctl_table *table, int write,
32927393 3118 void *buffer, size_t *lenp, loff_t *ppos)
54a43d54
AK
3119{
3120 struct ctl_table t;
3121 int err;
2a595721 3122 int state = static_branch_likely(&sched_numa_balancing);
54a43d54
AK
3123
3124 if (write && !capable(CAP_SYS_ADMIN))
3125 return -EPERM;
3126
3127 t = *table;
3128 t.data = &state;
3129 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3130 if (err < 0)
3131 return err;
3132 if (write)
3133 set_numabalancing_state(state);
3134 return err;
3135}
3136#endif
3137#endif
dd41f596 3138
4698f88c
JP
3139#ifdef CONFIG_SCHEDSTATS
3140
cb251765 3141DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4698f88c 3142static bool __initdata __sched_schedstats = false;
cb251765 3143
cb251765
MG
3144static void set_schedstats(bool enabled)
3145{
3146 if (enabled)
3147 static_branch_enable(&sched_schedstats);
3148 else
3149 static_branch_disable(&sched_schedstats);
3150}
3151
3152void force_schedstat_enabled(void)
3153{
3154 if (!schedstat_enabled()) {
3155 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
3156 static_branch_enable(&sched_schedstats);
3157 }
3158}
3159
3160static int __init setup_schedstats(char *str)
3161{
3162 int ret = 0;
3163 if (!str)
3164 goto out;
3165
4698f88c
JP
3166 /*
3167 * This code is called before jump labels have been set up, so we can't
3168 * change the static branch directly just yet. Instead set a temporary
3169 * variable so init_schedstats() can do it later.
3170 */
cb251765 3171 if (!strcmp(str, "enable")) {
4698f88c 3172 __sched_schedstats = true;
cb251765
MG
3173 ret = 1;
3174 } else if (!strcmp(str, "disable")) {
4698f88c 3175 __sched_schedstats = false;
cb251765
MG
3176 ret = 1;
3177 }
3178out:
3179 if (!ret)
3180 pr_warn("Unable to parse schedstats=\n");
3181
3182 return ret;
3183}
3184__setup("schedstats=", setup_schedstats);
3185
4698f88c
JP
3186static void __init init_schedstats(void)
3187{
3188 set_schedstats(__sched_schedstats);
3189}
3190
cb251765 3191#ifdef CONFIG_PROC_SYSCTL
32927393
CH
3192int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
3193 size_t *lenp, loff_t *ppos)
cb251765
MG
3194{
3195 struct ctl_table t;
3196 int err;
3197 int state = static_branch_likely(&sched_schedstats);
3198
3199 if (write && !capable(CAP_SYS_ADMIN))
3200 return -EPERM;
3201
3202 t = *table;
3203 t.data = &state;
3204 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3205 if (err < 0)
3206 return err;
3207 if (write)
3208 set_schedstats(state);
3209 return err;
3210}
4698f88c
JP
3211#endif /* CONFIG_PROC_SYSCTL */
3212#else /* !CONFIG_SCHEDSTATS */
3213static inline void init_schedstats(void) {}
3214#endif /* CONFIG_SCHEDSTATS */
dd41f596
IM
3215
3216/*
3217 * fork()/clone()-time setup:
3218 */
aab03e05 3219int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 3220{
0122ec5b 3221 unsigned long flags;
dd41f596 3222
5e1576ed 3223 __sched_fork(clone_flags, p);
06b83b5f 3224 /*
7dc603c9 3225 * We mark the process as NEW here. This guarantees that
06b83b5f
PZ
3226 * nobody will actually run it, and a signal or other external
3227 * event cannot wake it up and insert it on the runqueue either.
3228 */
7dc603c9 3229 p->state = TASK_NEW;
dd41f596 3230
c350a04e
MG
3231 /*
3232 * Make sure we do not leak PI boosting priority to the child.
3233 */
3234 p->prio = current->normal_prio;
3235
e8f14172
PB
3236 uclamp_fork(p);
3237
b9dc29e7
MG
3238 /*
3239 * Revert to default priority/policy on fork if requested.
3240 */
3241 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 3242 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 3243 p->policy = SCHED_NORMAL;
6c697bdf 3244 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
3245 p->rt_priority = 0;
3246 } else if (PRIO_TO_NICE(p->static_prio) < 0)
3247 p->static_prio = NICE_TO_PRIO(0);
3248
3249 p->prio = p->normal_prio = __normal_prio(p);
9059393e 3250 set_load_weight(p, false);
6c697bdf 3251
b9dc29e7
MG
3252 /*
3253 * We don't need the reset flag anymore after the fork. It has
3254 * fulfilled its duty:
3255 */
3256 p->sched_reset_on_fork = 0;
3257 }
ca94c442 3258
af0fffd9 3259 if (dl_prio(p->prio))
aab03e05 3260 return -EAGAIN;
af0fffd9 3261 else if (rt_prio(p->prio))
aab03e05 3262 p->sched_class = &rt_sched_class;
af0fffd9 3263 else
2ddbf952 3264 p->sched_class = &fair_sched_class;
b29739f9 3265
7dc603c9 3266 init_entity_runnable_average(&p->se);
cd29fe6f 3267
86951599
PZ
3268 /*
3269 * The child is not yet in the pid-hash so no cgroup attach races,
3270 * and the cgroup is pinned to this child due to cgroup_fork()
3271 * is ran before sched_fork().
3272 *
3273 * Silence PROVE_RCU.
3274 */
0122ec5b 3275 raw_spin_lock_irqsave(&p->pi_lock, flags);
ce3614da 3276 rseq_migrate(p);
e210bffd 3277 /*
d1ccc66d 3278 * We're setting the CPU for the first time, we don't migrate,
e210bffd
PZ
3279 * so use __set_task_cpu().
3280 */
af0fffd9 3281 __set_task_cpu(p, smp_processor_id());
e210bffd
PZ
3282 if (p->sched_class->task_fork)
3283 p->sched_class->task_fork(p);
0122ec5b 3284 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 3285
f6db8347 3286#ifdef CONFIG_SCHED_INFO
dd41f596 3287 if (likely(sched_info_on()))
52f17b6c 3288 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 3289#endif
3ca7a440
PZ
3290#if defined(CONFIG_SMP)
3291 p->on_cpu = 0;
4866cde0 3292#endif
01028747 3293 init_task_preempt_count(p);
806c09a7 3294#ifdef CONFIG_SMP
917b627d 3295 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 3296 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 3297#endif
aab03e05 3298 return 0;
1da177e4
LT
3299}
3300
13685c4a
QY
3301void sched_post_fork(struct task_struct *p)
3302{
3303 uclamp_post_fork(p);
3304}
3305
332ac17e
DF
3306unsigned long to_ratio(u64 period, u64 runtime)
3307{
3308 if (runtime == RUNTIME_INF)
c52f14d3 3309 return BW_UNIT;
332ac17e
DF
3310
3311 /*
3312 * Doing this here saves a lot of checks in all
3313 * the calling paths, and returning zero seems
3314 * safe for them anyway.
3315 */
3316 if (period == 0)
3317 return 0;
3318
c52f14d3 3319 return div64_u64(runtime << BW_SHIFT, period);
332ac17e
DF
3320}
3321
1da177e4
LT
3322/*
3323 * wake_up_new_task - wake up a newly created task for the first time.
3324 *
3325 * This function will do some initial scheduler statistics housekeeping
3326 * that must be done for every newly created context, then puts the task
3327 * on the runqueue and wakes it.
3328 */
3e51e3ed 3329void wake_up_new_task(struct task_struct *p)
1da177e4 3330{
eb580751 3331 struct rq_flags rf;
dd41f596 3332 struct rq *rq;
fabf318e 3333
eb580751 3334 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
7dc603c9 3335 p->state = TASK_RUNNING;
fabf318e
PZ
3336#ifdef CONFIG_SMP
3337 /*
3338 * Fork balancing, do it here and not earlier because:
3bd37062 3339 * - cpus_ptr can change in the fork path
d1ccc66d 3340 * - any previously selected CPU might disappear through hotplug
e210bffd
PZ
3341 *
3342 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
3343 * as we're not fully set-up yet.
fabf318e 3344 */
32e839dd 3345 p->recent_used_cpu = task_cpu(p);
ce3614da 3346 rseq_migrate(p);
e210bffd 3347 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
0017d735 3348#endif
b7fa30c9 3349 rq = __task_rq_lock(p, &rf);
4126bad6 3350 update_rq_clock(rq);
d0fe0b9c 3351 post_init_entity_util_avg(p);
0017d735 3352
7a57f32a 3353 activate_task(rq, p, ENQUEUE_NOCLOCK);
fbd705a0 3354 trace_sched_wakeup_new(p);
a7558e01 3355 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 3356#ifdef CONFIG_SMP
0aaafaab
PZ
3357 if (p->sched_class->task_woken) {
3358 /*
3359 * Nothing relies on rq->lock after this, so its fine to
3360 * drop it.
3361 */
d8ac8971 3362 rq_unpin_lock(rq, &rf);
efbbd05a 3363 p->sched_class->task_woken(rq, p);
d8ac8971 3364 rq_repin_lock(rq, &rf);
0aaafaab 3365 }
9a897c5a 3366#endif
eb580751 3367 task_rq_unlock(rq, p, &rf);
1da177e4
LT
3368}
3369
e107be36
AK
3370#ifdef CONFIG_PREEMPT_NOTIFIERS
3371
b7203428 3372static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
1cde2930 3373
2ecd9d29
PZ
3374void preempt_notifier_inc(void)
3375{
b7203428 3376 static_branch_inc(&preempt_notifier_key);
2ecd9d29
PZ
3377}
3378EXPORT_SYMBOL_GPL(preempt_notifier_inc);
3379
3380void preempt_notifier_dec(void)
3381{
b7203428 3382 static_branch_dec(&preempt_notifier_key);
2ecd9d29
PZ
3383}
3384EXPORT_SYMBOL_GPL(preempt_notifier_dec);
3385
e107be36 3386/**
80dd99b3 3387 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 3388 * @notifier: notifier struct to register
e107be36
AK
3389 */
3390void preempt_notifier_register(struct preempt_notifier *notifier)
3391{
b7203428 3392 if (!static_branch_unlikely(&preempt_notifier_key))
2ecd9d29
PZ
3393 WARN(1, "registering preempt_notifier while notifiers disabled\n");
3394
e107be36
AK
3395 hlist_add_head(&notifier->link, &current->preempt_notifiers);
3396}
3397EXPORT_SYMBOL_GPL(preempt_notifier_register);
3398
3399/**
3400 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 3401 * @notifier: notifier struct to unregister
e107be36 3402 *
d84525a8 3403 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
3404 */
3405void preempt_notifier_unregister(struct preempt_notifier *notifier)
3406{
3407 hlist_del(&notifier->link);
3408}
3409EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3410
1cde2930 3411static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3412{
3413 struct preempt_notifier *notifier;
e107be36 3414
b67bfe0d 3415 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3416 notifier->ops->sched_in(notifier, raw_smp_processor_id());
3417}
3418
1cde2930
PZ
3419static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3420{
b7203428 3421 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3422 __fire_sched_in_preempt_notifiers(curr);
3423}
3424
e107be36 3425static void
1cde2930
PZ
3426__fire_sched_out_preempt_notifiers(struct task_struct *curr,
3427 struct task_struct *next)
e107be36
AK
3428{
3429 struct preempt_notifier *notifier;
e107be36 3430
b67bfe0d 3431 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
3432 notifier->ops->sched_out(notifier, next);
3433}
3434
1cde2930
PZ
3435static __always_inline void
3436fire_sched_out_preempt_notifiers(struct task_struct *curr,
3437 struct task_struct *next)
3438{
b7203428 3439 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
3440 __fire_sched_out_preempt_notifiers(curr, next);
3441}
3442
6d6bc0ad 3443#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 3444
1cde2930 3445static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
3446{
3447}
3448
1cde2930 3449static inline void
e107be36
AK
3450fire_sched_out_preempt_notifiers(struct task_struct *curr,
3451 struct task_struct *next)
3452{
3453}
3454
6d6bc0ad 3455#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 3456
31cb1bc0 3457static inline void prepare_task(struct task_struct *next)
3458{
3459#ifdef CONFIG_SMP
3460 /*
3461 * Claim the task as running, we do this before switching to it
3462 * such that any running task will have this set.
58877d34
PZ
3463 *
3464 * See the ttwu() WF_ON_CPU case and its ordering comment.
31cb1bc0 3465 */
58877d34 3466 WRITE_ONCE(next->on_cpu, 1);
31cb1bc0 3467#endif
3468}
3469
3470static inline void finish_task(struct task_struct *prev)
3471{
3472#ifdef CONFIG_SMP
3473 /*
58877d34
PZ
3474 * This must be the very last reference to @prev from this CPU. After
3475 * p->on_cpu is cleared, the task can be moved to a different CPU. We
3476 * must ensure this doesn't happen until the switch is completely
31cb1bc0 3477 * finished.
3478 *
3479 * In particular, the load of prev->state in finish_task_switch() must
3480 * happen before this.
3481 *
3482 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
3483 */
3484 smp_store_release(&prev->on_cpu, 0);
3485#endif
3486}
3487
565790d2
PZ
3488#ifdef CONFIG_SMP
3489
3490static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
3491{
3492 void (*func)(struct rq *rq);
3493 struct callback_head *next;
3494
3495 lockdep_assert_held(&rq->lock);
3496
3497 while (head) {
3498 func = (void (*)(struct rq *))head->func;
3499 next = head->next;
3500 head->next = NULL;
3501 head = next;
3502
3503 func(rq);
3504 }
3505}
3506
3507static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
3508{
3509 struct callback_head *head = rq->balance_callback;
3510
3511 lockdep_assert_held(&rq->lock);
2558aacf 3512 if (head) {
565790d2 3513 rq->balance_callback = NULL;
2558aacf
PZ
3514 rq->balance_flags &= ~BALANCE_WORK;
3515 }
565790d2
PZ
3516
3517 return head;
3518}
3519
3520static void __balance_callbacks(struct rq *rq)
3521{
3522 do_balance_callbacks(rq, splice_balance_callbacks(rq));
3523}
3524
3525static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
3526{
3527 unsigned long flags;
3528
3529 if (unlikely(head)) {
3530 raw_spin_lock_irqsave(&rq->lock, flags);
3531 do_balance_callbacks(rq, head);
3532 raw_spin_unlock_irqrestore(&rq->lock, flags);
3533 }
3534}
3535
2558aacf
PZ
3536static void balance_push(struct rq *rq);
3537
3538static inline void balance_switch(struct rq *rq)
3539{
3540 if (likely(!rq->balance_flags))
3541 return;
3542
3543 if (rq->balance_flags & BALANCE_PUSH) {
3544 balance_push(rq);
3545 return;
3546 }
3547
3548 __balance_callbacks(rq);
3549}
3550
565790d2
PZ
3551#else
3552
3553static inline void __balance_callbacks(struct rq *rq)
3554{
3555}
3556
3557static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
3558{
3559 return NULL;
3560}
3561
3562static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
3563{
3564}
3565
2558aacf
PZ
3566static inline void balance_switch(struct rq *rq)
3567{
3568}
3569
565790d2
PZ
3570#endif
3571
269d5992
PZ
3572static inline void
3573prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
31cb1bc0 3574{
269d5992
PZ
3575 /*
3576 * Since the runqueue lock will be released by the next
3577 * task (which is an invalid locking op but in the case
3578 * of the scheduler it's an obvious special-case), so we
3579 * do an early lockdep release here:
3580 */
3581 rq_unpin_lock(rq, rf);
5facae4f 3582 spin_release(&rq->lock.dep_map, _THIS_IP_);
31cb1bc0 3583#ifdef CONFIG_DEBUG_SPINLOCK
3584 /* this is a valid case when another task releases the spinlock */
269d5992 3585 rq->lock.owner = next;
31cb1bc0 3586#endif
269d5992
PZ
3587}
3588
3589static inline void finish_lock_switch(struct rq *rq)
3590{
31cb1bc0 3591 /*
3592 * If we are tracking spinlock dependencies then we have to
3593 * fix up the runqueue lock - which gets 'carried over' from
3594 * prev into current:
3595 */
3596 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
2558aacf 3597 balance_switch(rq);
31cb1bc0 3598 raw_spin_unlock_irq(&rq->lock);
3599}
3600
325ea10c
IM
3601/*
3602 * NOP if the arch has not defined these:
3603 */
3604
3605#ifndef prepare_arch_switch
3606# define prepare_arch_switch(next) do { } while (0)
3607#endif
3608
3609#ifndef finish_arch_post_lock_switch
3610# define finish_arch_post_lock_switch() do { } while (0)
3611#endif
3612
4866cde0
NP
3613/**
3614 * prepare_task_switch - prepare to switch tasks
3615 * @rq: the runqueue preparing to switch
421cee29 3616 * @prev: the current task that is being switched out
4866cde0
NP
3617 * @next: the task we are going to switch to.
3618 *
3619 * This is called with the rq lock held and interrupts off. It must
3620 * be paired with a subsequent finish_task_switch after the context
3621 * switch.
3622 *
3623 * prepare_task_switch sets up locking and calls architecture specific
3624 * hooks.
3625 */
e107be36
AK
3626static inline void
3627prepare_task_switch(struct rq *rq, struct task_struct *prev,
3628 struct task_struct *next)
4866cde0 3629{
0ed557aa 3630 kcov_prepare_switch(prev);
43148951 3631 sched_info_switch(rq, prev, next);
fe4b04fa 3632 perf_event_task_sched_out(prev, next);
d7822b1e 3633 rseq_preempt(prev);
e107be36 3634 fire_sched_out_preempt_notifiers(prev, next);
31cb1bc0 3635 prepare_task(next);
4866cde0
NP
3636 prepare_arch_switch(next);
3637}
3638
1da177e4
LT
3639/**
3640 * finish_task_switch - clean up after a task-switch
3641 * @prev: the thread we just switched away from.
3642 *
4866cde0
NP
3643 * finish_task_switch must be called after the context switch, paired
3644 * with a prepare_task_switch call before the context switch.
3645 * finish_task_switch will reconcile locking set up by prepare_task_switch,
3646 * and do any other architecture-specific cleanup actions.
1da177e4
LT
3647 *
3648 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 3649 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
3650 * with the lock held can cause deadlocks; see schedule() for
3651 * details.)
dfa50b60
ON
3652 *
3653 * The context switch have flipped the stack from under us and restored the
3654 * local variables which were saved when this task called schedule() in the
3655 * past. prev == current is still correct but we need to recalculate this_rq
3656 * because prev may have moved to another CPU.
1da177e4 3657 */
dfa50b60 3658static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
3659 __releases(rq->lock)
3660{
dfa50b60 3661 struct rq *rq = this_rq();
1da177e4 3662 struct mm_struct *mm = rq->prev_mm;
55a101f8 3663 long prev_state;
1da177e4 3664
609ca066
PZ
3665 /*
3666 * The previous task will have left us with a preempt_count of 2
3667 * because it left us after:
3668 *
3669 * schedule()
3670 * preempt_disable(); // 1
3671 * __schedule()
3672 * raw_spin_lock_irq(&rq->lock) // 2
3673 *
3674 * Also, see FORK_PREEMPT_COUNT.
3675 */
e2bf1c4b
PZ
3676 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
3677 "corrupted preempt_count: %s/%d/0x%x\n",
3678 current->comm, current->pid, preempt_count()))
3679 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 3680
1da177e4
LT
3681 rq->prev_mm = NULL;
3682
3683 /*
3684 * A task struct has one reference for the use as "current".
c394cc9f 3685 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
3686 * schedule one last time. The schedule call will never return, and
3687 * the scheduled task must drop that reference.
95913d97
PZ
3688 *
3689 * We must observe prev->state before clearing prev->on_cpu (in
31cb1bc0 3690 * finish_task), otherwise a concurrent wakeup can get prev
95913d97
PZ
3691 * running on another CPU and we could rave with its RUNNING -> DEAD
3692 * transition, resulting in a double drop.
1da177e4 3693 */
55a101f8 3694 prev_state = prev->state;
bf9fae9f 3695 vtime_task_switch(prev);
a8d757ef 3696 perf_event_task_sched_in(prev, current);
31cb1bc0 3697 finish_task(prev);
3698 finish_lock_switch(rq);
01f23e16 3699 finish_arch_post_lock_switch();
0ed557aa 3700 kcov_finish_switch(current);
e8fa1362 3701
e107be36 3702 fire_sched_in_preempt_notifiers(current);
306e0604 3703 /*
70216e18
MD
3704 * When switching through a kernel thread, the loop in
3705 * membarrier_{private,global}_expedited() may have observed that
3706 * kernel thread and not issued an IPI. It is therefore possible to
3707 * schedule between user->kernel->user threads without passing though
3708 * switch_mm(). Membarrier requires a barrier after storing to
3709 * rq->curr, before returning to userspace, so provide them here:
3710 *
3711 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
3712 * provided by mmdrop(),
3713 * - a sync_core for SYNC_CORE.
306e0604 3714 */
70216e18
MD
3715 if (mm) {
3716 membarrier_mm_sync_core_before_usermode(mm);
1da177e4 3717 mmdrop(mm);
70216e18 3718 }
1cef1150
PZ
3719 if (unlikely(prev_state == TASK_DEAD)) {
3720 if (prev->sched_class->task_dead)
3721 prev->sched_class->task_dead(prev);
68f24b08 3722
1cef1150
PZ
3723 /*
3724 * Remove function-return probe instances associated with this
3725 * task and put them back on the free list.
3726 */
3727 kprobe_flush_task(prev);
3728
3729 /* Task is done with its stack. */
3730 put_task_stack(prev);
3731
0ff7b2cf 3732 put_task_struct_rcu_user(prev);
c6fd91f0 3733 }
99e5ada9 3734
de734f89 3735 tick_nohz_task_switch();
dfa50b60 3736 return rq;
1da177e4
LT
3737}
3738
3739/**
3740 * schedule_tail - first thing a freshly forked thread must call.
3741 * @prev: the thread we just switched away from.
3742 */
722a9f92 3743asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
3744 __releases(rq->lock)
3745{
1a43a14a 3746 struct rq *rq;
da19ab51 3747
609ca066
PZ
3748 /*
3749 * New tasks start with FORK_PREEMPT_COUNT, see there and
3750 * finish_task_switch() for details.
3751 *
3752 * finish_task_switch() will drop rq->lock() and lower preempt_count
3753 * and the preempt_enable() will end up enabling preemption (on
3754 * PREEMPT_COUNT kernels).
3755 */
3756
dfa50b60 3757 rq = finish_task_switch(prev);
1a43a14a 3758 preempt_enable();
70b97a7f 3759
1da177e4 3760 if (current->set_child_tid)
b488893a 3761 put_user(task_pid_vnr(current), current->set_child_tid);
088fe47c
EB
3762
3763 calculate_sigpending();
1da177e4
LT
3764}
3765
3766/*
dfa50b60 3767 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 3768 */
04936948 3769static __always_inline struct rq *
70b97a7f 3770context_switch(struct rq *rq, struct task_struct *prev,
d8ac8971 3771 struct task_struct *next, struct rq_flags *rf)
1da177e4 3772{
e107be36 3773 prepare_task_switch(rq, prev, next);
fe4b04fa 3774
9226d125
ZA
3775 /*
3776 * For paravirt, this is coupled with an exit in switch_to to
3777 * combine the page table reload and the switch backend into
3778 * one hypercall.
3779 */
224101ed 3780 arch_start_context_switch(prev);
9226d125 3781
306e0604 3782 /*
139d025c
PZ
3783 * kernel -> kernel lazy + transfer active
3784 * user -> kernel lazy + mmgrab() active
3785 *
3786 * kernel -> user switch + mmdrop() active
3787 * user -> user switch
306e0604 3788 */
139d025c
PZ
3789 if (!next->mm) { // to kernel
3790 enter_lazy_tlb(prev->active_mm, next);
3791
3792 next->active_mm = prev->active_mm;
3793 if (prev->mm) // from user
3794 mmgrab(prev->active_mm);
3795 else
3796 prev->active_mm = NULL;
3797 } else { // to user
227a4aad 3798 membarrier_switch_mm(rq, prev->active_mm, next->mm);
139d025c
PZ
3799 /*
3800 * sys_membarrier() requires an smp_mb() between setting
227a4aad 3801 * rq->curr / membarrier_switch_mm() and returning to userspace.
139d025c
PZ
3802 *
3803 * The below provides this either through switch_mm(), or in
3804 * case 'prev->active_mm == next->mm' through
3805 * finish_task_switch()'s mmdrop().
3806 */
139d025c 3807 switch_mm_irqs_off(prev->active_mm, next->mm, next);
1da177e4 3808
139d025c
PZ
3809 if (!prev->mm) { // from kernel
3810 /* will mmdrop() in finish_task_switch(). */
3811 rq->prev_mm = prev->active_mm;
3812 prev->active_mm = NULL;
3813 }
1da177e4 3814 }
92509b73 3815
cb42c9a3 3816 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
92509b73 3817
269d5992 3818 prepare_lock_switch(rq, next, rf);
1da177e4
LT
3819
3820 /* Here we just switch the register state and the stack. */
3821 switch_to(prev, next, prev);
dd41f596 3822 barrier();
dfa50b60
ON
3823
3824 return finish_task_switch(prev);
1da177e4
LT
3825}
3826
3827/*
1c3e8264 3828 * nr_running and nr_context_switches:
1da177e4
LT
3829 *
3830 * externally visible scheduler statistics: current number of runnable
1c3e8264 3831 * threads, total number of context switches performed since bootup.
1da177e4
LT
3832 */
3833unsigned long nr_running(void)
3834{
3835 unsigned long i, sum = 0;
3836
3837 for_each_online_cpu(i)
3838 sum += cpu_rq(i)->nr_running;
3839
3840 return sum;
f711f609 3841}
1da177e4 3842
2ee507c4 3843/*
d1ccc66d 3844 * Check if only the current task is running on the CPU.
00cc1633
DD
3845 *
3846 * Caution: this function does not check that the caller has disabled
3847 * preemption, thus the result might have a time-of-check-to-time-of-use
3848 * race. The caller is responsible to use it correctly, for example:
3849 *
dfcb245e 3850 * - from a non-preemptible section (of course)
00cc1633
DD
3851 *
3852 * - from a thread that is bound to a single CPU
3853 *
3854 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
3855 */
3856bool single_task_running(void)
3857{
00cc1633 3858 return raw_rq()->nr_running == 1;
2ee507c4
TC
3859}
3860EXPORT_SYMBOL(single_task_running);
3861
1da177e4 3862unsigned long long nr_context_switches(void)
46cb4b7c 3863{
cc94abfc
SR
3864 int i;
3865 unsigned long long sum = 0;
46cb4b7c 3866
0a945022 3867 for_each_possible_cpu(i)
1da177e4 3868 sum += cpu_rq(i)->nr_switches;
46cb4b7c 3869
1da177e4
LT
3870 return sum;
3871}
483b4ee6 3872
145d952a
DL
3873/*
3874 * Consumers of these two interfaces, like for example the cpuidle menu
3875 * governor, are using nonsensical data. Preferring shallow idle state selection
3876 * for a CPU that has IO-wait which might not even end up running the task when
3877 * it does become runnable.
3878 */
3879
3880unsigned long nr_iowait_cpu(int cpu)
3881{
3882 return atomic_read(&cpu_rq(cpu)->nr_iowait);
3883}
3884
e33a9bba
TH
3885/*
3886 * IO-wait accounting, and how its mostly bollocks (on SMP).
3887 *
3888 * The idea behind IO-wait account is to account the idle time that we could
3889 * have spend running if it were not for IO. That is, if we were to improve the
3890 * storage performance, we'd have a proportional reduction in IO-wait time.
3891 *
3892 * This all works nicely on UP, where, when a task blocks on IO, we account
3893 * idle time as IO-wait, because if the storage were faster, it could've been
3894 * running and we'd not be idle.
3895 *
3896 * This has been extended to SMP, by doing the same for each CPU. This however
3897 * is broken.
3898 *
3899 * Imagine for instance the case where two tasks block on one CPU, only the one
3900 * CPU will have IO-wait accounted, while the other has regular idle. Even
3901 * though, if the storage were faster, both could've ran at the same time,
3902 * utilising both CPUs.
3903 *
3904 * This means, that when looking globally, the current IO-wait accounting on
3905 * SMP is a lower bound, by reason of under accounting.
3906 *
3907 * Worse, since the numbers are provided per CPU, they are sometimes
3908 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
3909 * associated with any one particular CPU, it can wake to another CPU than it
3910 * blocked on. This means the per CPU IO-wait number is meaningless.
3911 *
3912 * Task CPU affinities can make all that even more 'interesting'.
3913 */
3914
1da177e4
LT
3915unsigned long nr_iowait(void)
3916{
3917 unsigned long i, sum = 0;
483b4ee6 3918
0a945022 3919 for_each_possible_cpu(i)
145d952a 3920 sum += nr_iowait_cpu(i);
46cb4b7c 3921
1da177e4
LT
3922 return sum;
3923}
483b4ee6 3924
dd41f596 3925#ifdef CONFIG_SMP
8a0be9ef 3926
46cb4b7c 3927/*
38022906
PZ
3928 * sched_exec - execve() is a valuable balancing opportunity, because at
3929 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 3930 */
38022906 3931void sched_exec(void)
46cb4b7c 3932{
38022906 3933 struct task_struct *p = current;
1da177e4 3934 unsigned long flags;
0017d735 3935 int dest_cpu;
46cb4b7c 3936
8f42ced9 3937 raw_spin_lock_irqsave(&p->pi_lock, flags);
ac66f547 3938 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
0017d735
PZ
3939 if (dest_cpu == smp_processor_id())
3940 goto unlock;
38022906 3941
8f42ced9 3942 if (likely(cpu_active(dest_cpu))) {
969c7921 3943 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 3944
8f42ced9
PZ
3945 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3946 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
3947 return;
3948 }
0017d735 3949unlock:
8f42ced9 3950 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 3951}
dd41f596 3952
1da177e4
LT
3953#endif
3954
1da177e4 3955DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 3956DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
3957
3958EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 3959EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 3960
6075620b
GG
3961/*
3962 * The function fair_sched_class.update_curr accesses the struct curr
3963 * and its field curr->exec_start; when called from task_sched_runtime(),
3964 * we observe a high rate of cache misses in practice.
3965 * Prefetching this data results in improved performance.
3966 */
3967static inline void prefetch_curr_exec_start(struct task_struct *p)
3968{
3969#ifdef CONFIG_FAIR_GROUP_SCHED
3970 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
3971#else
3972 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
3973#endif
3974 prefetch(curr);
3975 prefetch(&curr->exec_start);
3976}
3977
c5f8d995
HS
3978/*
3979 * Return accounted runtime for the task.
3980 * In case the task is currently running, return the runtime plus current's
3981 * pending runtime that have not been accounted yet.
3982 */
3983unsigned long long task_sched_runtime(struct task_struct *p)
3984{
eb580751 3985 struct rq_flags rf;
c5f8d995 3986 struct rq *rq;
6e998916 3987 u64 ns;
c5f8d995 3988
911b2898
PZ
3989#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
3990 /*
97fb7a0a 3991 * 64-bit doesn't need locks to atomically read a 64-bit value.
911b2898
PZ
3992 * So we have a optimization chance when the task's delta_exec is 0.
3993 * Reading ->on_cpu is racy, but this is ok.
3994 *
d1ccc66d
IM
3995 * If we race with it leaving CPU, we'll take a lock. So we're correct.
3996 * If we race with it entering CPU, unaccounted time is 0. This is
911b2898 3997 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
3998 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
3999 * been accounted, so we're correct here as well.
911b2898 4000 */
da0c1e65 4001 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
4002 return p->se.sum_exec_runtime;
4003#endif
4004
eb580751 4005 rq = task_rq_lock(p, &rf);
6e998916
SG
4006 /*
4007 * Must be ->curr _and_ ->on_rq. If dequeued, we would
4008 * project cycles that may never be accounted to this
4009 * thread, breaking clock_gettime().
4010 */
4011 if (task_current(rq, p) && task_on_rq_queued(p)) {
6075620b 4012 prefetch_curr_exec_start(p);
6e998916
SG
4013 update_rq_clock(rq);
4014 p->sched_class->update_curr(rq);
4015 }
4016 ns = p->se.sum_exec_runtime;
eb580751 4017 task_rq_unlock(rq, p, &rf);
c5f8d995
HS
4018
4019 return ns;
4020}
48f24c4d 4021
7835b98b
CL
4022/*
4023 * This function gets called by the timer code, with HZ frequency.
4024 * We call it with interrupts disabled.
7835b98b
CL
4025 */
4026void scheduler_tick(void)
4027{
7835b98b
CL
4028 int cpu = smp_processor_id();
4029 struct rq *rq = cpu_rq(cpu);
dd41f596 4030 struct task_struct *curr = rq->curr;
8a8c69c3 4031 struct rq_flags rf;
b4eccf5f 4032 unsigned long thermal_pressure;
3e51f33f 4033
1567c3e3 4034 arch_scale_freq_tick();
3e51f33f 4035 sched_clock_tick();
dd41f596 4036
8a8c69c3
PZ
4037 rq_lock(rq, &rf);
4038
3e51f33f 4039 update_rq_clock(rq);
b4eccf5f 4040 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
05289b90 4041 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
fa85ae24 4042 curr->sched_class->task_tick(rq, curr, 0);
3289bdb4 4043 calc_global_load_tick(rq);
eb414681 4044 psi_task_tick(rq);
8a8c69c3
PZ
4045
4046 rq_unlock(rq, &rf);
7835b98b 4047
e9d2b064 4048 perf_event_task_tick();
e220d2dc 4049
e418e1c2 4050#ifdef CONFIG_SMP
6eb57e0d 4051 rq->idle_balance = idle_cpu(cpu);
7caff66f 4052 trigger_load_balance(rq);
e418e1c2 4053#endif
1da177e4
LT
4054}
4055
265f22a9 4056#ifdef CONFIG_NO_HZ_FULL
d84b3131
FW
4057
4058struct tick_work {
4059 int cpu;
b55bd585 4060 atomic_t state;
d84b3131
FW
4061 struct delayed_work work;
4062};
b55bd585
PM
4063/* Values for ->state, see diagram below. */
4064#define TICK_SCHED_REMOTE_OFFLINE 0
4065#define TICK_SCHED_REMOTE_OFFLINING 1
4066#define TICK_SCHED_REMOTE_RUNNING 2
4067
4068/*
4069 * State diagram for ->state:
4070 *
4071 *
4072 * TICK_SCHED_REMOTE_OFFLINE
4073 * | ^
4074 * | |
4075 * | | sched_tick_remote()
4076 * | |
4077 * | |
4078 * +--TICK_SCHED_REMOTE_OFFLINING
4079 * | ^
4080 * | |
4081 * sched_tick_start() | | sched_tick_stop()
4082 * | |
4083 * V |
4084 * TICK_SCHED_REMOTE_RUNNING
4085 *
4086 *
4087 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
4088 * and sched_tick_start() are happy to leave the state in RUNNING.
4089 */
d84b3131
FW
4090
4091static struct tick_work __percpu *tick_work_cpu;
4092
4093static void sched_tick_remote(struct work_struct *work)
4094{
4095 struct delayed_work *dwork = to_delayed_work(work);
4096 struct tick_work *twork = container_of(dwork, struct tick_work, work);
4097 int cpu = twork->cpu;
4098 struct rq *rq = cpu_rq(cpu);
d9c0ffca 4099 struct task_struct *curr;
d84b3131 4100 struct rq_flags rf;
d9c0ffca 4101 u64 delta;
b55bd585 4102 int os;
d84b3131
FW
4103
4104 /*
4105 * Handle the tick only if it appears the remote CPU is running in full
4106 * dynticks mode. The check is racy by nature, but missing a tick or
4107 * having one too much is no big deal because the scheduler tick updates
4108 * statistics and checks timeslices in a time-independent way, regardless
4109 * of when exactly it is running.
4110 */
488603b8 4111 if (!tick_nohz_tick_stopped_cpu(cpu))
d9c0ffca 4112 goto out_requeue;
d84b3131 4113
d9c0ffca
FW
4114 rq_lock_irq(rq, &rf);
4115 curr = rq->curr;
488603b8 4116 if (cpu_is_offline(cpu))
d9c0ffca 4117 goto out_unlock;
d84b3131 4118
d9c0ffca 4119 update_rq_clock(rq);
d9c0ffca 4120
488603b8
SW
4121 if (!is_idle_task(curr)) {
4122 /*
4123 * Make sure the next tick runs within a reasonable
4124 * amount of time.
4125 */
4126 delta = rq_clock_task(rq) - curr->se.exec_start;
4127 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
4128 }
d9c0ffca
FW
4129 curr->sched_class->task_tick(rq, curr, 0);
4130
ebc0f83c 4131 calc_load_nohz_remote(rq);
d9c0ffca
FW
4132out_unlock:
4133 rq_unlock_irq(rq, &rf);
d9c0ffca 4134out_requeue:
ebc0f83c 4135
d84b3131
FW
4136 /*
4137 * Run the remote tick once per second (1Hz). This arbitrary
4138 * frequency is large enough to avoid overload but short enough
b55bd585
PM
4139 * to keep scheduler internal stats reasonably up to date. But
4140 * first update state to reflect hotplug activity if required.
d84b3131 4141 */
b55bd585
PM
4142 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
4143 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
4144 if (os == TICK_SCHED_REMOTE_RUNNING)
4145 queue_delayed_work(system_unbound_wq, dwork, HZ);
d84b3131
FW
4146}
4147
4148static void sched_tick_start(int cpu)
4149{
b55bd585 4150 int os;
d84b3131
FW
4151 struct tick_work *twork;
4152
4153 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4154 return;
4155
4156 WARN_ON_ONCE(!tick_work_cpu);
4157
4158 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
4159 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
4160 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
4161 if (os == TICK_SCHED_REMOTE_OFFLINE) {
4162 twork->cpu = cpu;
4163 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
4164 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
4165 }
d84b3131
FW
4166}
4167
4168#ifdef CONFIG_HOTPLUG_CPU
4169static void sched_tick_stop(int cpu)
4170{
4171 struct tick_work *twork;
b55bd585 4172 int os;
d84b3131
FW
4173
4174 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4175 return;
4176
4177 WARN_ON_ONCE(!tick_work_cpu);
4178
4179 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
4180 /* There cannot be competing actions, but don't rely on stop-machine. */
4181 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
4182 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
4183 /* Don't cancel, as this would mess up the state machine. */
d84b3131
FW
4184}
4185#endif /* CONFIG_HOTPLUG_CPU */
4186
4187int __init sched_tick_offload_init(void)
4188{
4189 tick_work_cpu = alloc_percpu(struct tick_work);
4190 BUG_ON(!tick_work_cpu);
d84b3131
FW
4191 return 0;
4192}
4193
4194#else /* !CONFIG_NO_HZ_FULL */
4195static inline void sched_tick_start(int cpu) { }
4196static inline void sched_tick_stop(int cpu) { }
265f22a9 4197#endif
1da177e4 4198
c1a280b6 4199#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
c3bc8fd6 4200 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
47252cfb
SR
4201/*
4202 * If the value passed in is equal to the current preempt count
4203 * then we just disabled preemption. Start timing the latency.
4204 */
4205static inline void preempt_latency_start(int val)
4206{
4207 if (preempt_count() == val) {
4208 unsigned long ip = get_lock_parent_ip();
4209#ifdef CONFIG_DEBUG_PREEMPT
4210 current->preempt_disable_ip = ip;
4211#endif
4212 trace_preempt_off(CALLER_ADDR0, ip);
4213 }
4214}
7e49fcce 4215
edafe3a5 4216void preempt_count_add(int val)
1da177e4 4217{
6cd8a4bb 4218#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4219 /*
4220 * Underflow?
4221 */
9a11b49a
IM
4222 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4223 return;
6cd8a4bb 4224#endif
bdb43806 4225 __preempt_count_add(val);
6cd8a4bb 4226#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4227 /*
4228 * Spinlock count overflowing soon?
4229 */
33859f7f
MOS
4230 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4231 PREEMPT_MASK - 10);
6cd8a4bb 4232#endif
47252cfb 4233 preempt_latency_start(val);
1da177e4 4234}
bdb43806 4235EXPORT_SYMBOL(preempt_count_add);
edafe3a5 4236NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 4237
47252cfb
SR
4238/*
4239 * If the value passed in equals to the current preempt count
4240 * then we just enabled preemption. Stop timing the latency.
4241 */
4242static inline void preempt_latency_stop(int val)
4243{
4244 if (preempt_count() == val)
4245 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
4246}
4247
edafe3a5 4248void preempt_count_sub(int val)
1da177e4 4249{
6cd8a4bb 4250#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4251 /*
4252 * Underflow?
4253 */
01e3eb82 4254 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 4255 return;
1da177e4
LT
4256 /*
4257 * Is the spinlock portion underflowing?
4258 */
9a11b49a
IM
4259 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4260 !(preempt_count() & PREEMPT_MASK)))
4261 return;
6cd8a4bb 4262#endif
9a11b49a 4263
47252cfb 4264 preempt_latency_stop(val);
bdb43806 4265 __preempt_count_sub(val);
1da177e4 4266}
bdb43806 4267EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 4268NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4 4269
47252cfb
SR
4270#else
4271static inline void preempt_latency_start(int val) { }
4272static inline void preempt_latency_stop(int val) { }
1da177e4
LT
4273#endif
4274
59ddbcb2
IM
4275static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
4276{
4277#ifdef CONFIG_DEBUG_PREEMPT
4278 return p->preempt_disable_ip;
4279#else
4280 return 0;
4281#endif
4282}
4283
1da177e4 4284/*
dd41f596 4285 * Print scheduling while atomic bug:
1da177e4 4286 */
dd41f596 4287static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 4288{
d1c6d149
VN
4289 /* Save this before calling printk(), since that will clobber it */
4290 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
4291
664dfa65
DJ
4292 if (oops_in_progress)
4293 return;
4294
3df0fc5b
PZ
4295 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4296 prev->comm, prev->pid, preempt_count());
838225b4 4297
dd41f596 4298 debug_show_held_locks(prev);
e21f5b15 4299 print_modules();
dd41f596
IM
4300 if (irqs_disabled())
4301 print_irqtrace_events(prev);
d1c6d149
VN
4302 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
4303 && in_atomic_preempt_off()) {
8f47b187 4304 pr_err("Preemption disabled at:");
2062a4e8 4305 print_ip_sym(KERN_ERR, preempt_disable_ip);
8f47b187 4306 }
748c7201
DBO
4307 if (panic_on_warn)
4308 panic("scheduling while atomic\n");
4309
6135fc1e 4310 dump_stack();
373d4d09 4311 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 4312}
1da177e4 4313
dd41f596
IM
4314/*
4315 * Various schedule()-time debugging checks and statistics:
4316 */
312364f3 4317static inline void schedule_debug(struct task_struct *prev, bool preempt)
dd41f596 4318{
0d9e2632 4319#ifdef CONFIG_SCHED_STACK_END_CHECK
29d64551
JH
4320 if (task_stack_end_corrupted(prev))
4321 panic("corrupted stack end detected inside scheduler\n");
88485be5
WD
4322
4323 if (task_scs_end_corrupted(prev))
4324 panic("corrupted shadow stack detected inside scheduler\n");
0d9e2632 4325#endif
b99def8b 4326
312364f3
DV
4327#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
4328 if (!preempt && prev->state && prev->non_block_count) {
4329 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
4330 prev->comm, prev->pid, prev->non_block_count);
4331 dump_stack();
4332 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4333 }
4334#endif
4335
1dc0fffc 4336 if (unlikely(in_atomic_preempt_off())) {
dd41f596 4337 __schedule_bug(prev);
1dc0fffc
PZ
4338 preempt_count_set(PREEMPT_DISABLED);
4339 }
b3fbab05 4340 rcu_sleep_check();
dd41f596 4341
1da177e4
LT
4342 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4343
ae92882e 4344 schedstat_inc(this_rq()->sched_count);
dd41f596
IM
4345}
4346
457d1f46
CY
4347static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
4348 struct rq_flags *rf)
4349{
4350#ifdef CONFIG_SMP
4351 const struct sched_class *class;
4352 /*
4353 * We must do the balancing pass before put_prev_task(), such
4354 * that when we release the rq->lock the task is in the same
4355 * state as before we took rq->lock.
4356 *
4357 * We can terminate the balance pass as soon as we know there is
4358 * a runnable task of @class priority or higher.
4359 */
4360 for_class_range(class, prev->sched_class, &idle_sched_class) {
4361 if (class->balance(rq, prev, rf))
4362 break;
4363 }
4364#endif
4365
4366 put_prev_task(rq, prev);
4367}
4368
dd41f596
IM
4369/*
4370 * Pick up the highest-prio task:
4371 */
4372static inline struct task_struct *
d8ac8971 4373pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
dd41f596 4374{
49ee5768 4375 const struct sched_class *class;
dd41f596 4376 struct task_struct *p;
1da177e4
LT
4377
4378 /*
0ba87bb2
PZ
4379 * Optimization: we know that if all tasks are in the fair class we can
4380 * call that function directly, but only if the @prev task wasn't of a
4381 * higher scheduling class, because otherwise those loose the
4382 * opportunity to pull in more work from other CPUs.
1da177e4 4383 */
aa93cd53 4384 if (likely(prev->sched_class <= &fair_sched_class &&
0ba87bb2
PZ
4385 rq->nr_running == rq->cfs.h_nr_running)) {
4386
5d7d6056 4387 p = pick_next_task_fair(rq, prev, rf);
6ccdc84b 4388 if (unlikely(p == RETRY_TASK))
67692435 4389 goto restart;
6ccdc84b 4390
d1ccc66d 4391 /* Assumes fair_sched_class->next == idle_sched_class */
5d7d6056 4392 if (!p) {
f488e105 4393 put_prev_task(rq, prev);
98c2f700 4394 p = pick_next_task_idle(rq);
f488e105 4395 }
6ccdc84b
PZ
4396
4397 return p;
1da177e4
LT
4398 }
4399
67692435 4400restart:
457d1f46 4401 put_prev_task_balance(rq, prev, rf);
67692435 4402
34f971f6 4403 for_each_class(class) {
98c2f700 4404 p = class->pick_next_task(rq);
67692435 4405 if (p)
dd41f596 4406 return p;
dd41f596 4407 }
34f971f6 4408
d1ccc66d
IM
4409 /* The idle class should always have a runnable task: */
4410 BUG();
dd41f596 4411}
1da177e4 4412
dd41f596 4413/*
c259e01a 4414 * __schedule() is the main scheduler function.
edde96ea
PE
4415 *
4416 * The main means of driving the scheduler and thus entering this function are:
4417 *
4418 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
4419 *
4420 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
4421 * paths. For example, see arch/x86/entry_64.S.
4422 *
4423 * To drive preemption between tasks, the scheduler sets the flag in timer
4424 * interrupt handler scheduler_tick().
4425 *
4426 * 3. Wakeups don't really cause entry into schedule(). They add a
4427 * task to the run-queue and that's it.
4428 *
4429 * Now, if the new task added to the run-queue preempts the current
4430 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
4431 * called on the nearest possible occasion:
4432 *
c1a280b6 4433 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
edde96ea
PE
4434 *
4435 * - in syscall or exception context, at the next outmost
4436 * preempt_enable(). (this might be as soon as the wake_up()'s
4437 * spin_unlock()!)
4438 *
4439 * - in IRQ context, return from interrupt-handler to
4440 * preemptible context
4441 *
c1a280b6 4442 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
edde96ea
PE
4443 * then at the next:
4444 *
4445 * - cond_resched() call
4446 * - explicit schedule() call
4447 * - return from syscall or exception to user-space
4448 * - return from interrupt-handler to user-space
bfd9b2b5 4449 *
b30f0e3f 4450 * WARNING: must be called with preemption disabled!
dd41f596 4451 */
499d7955 4452static void __sched notrace __schedule(bool preempt)
dd41f596
IM
4453{
4454 struct task_struct *prev, *next;
67ca7bde 4455 unsigned long *switch_count;
dbfb089d 4456 unsigned long prev_state;
d8ac8971 4457 struct rq_flags rf;
dd41f596 4458 struct rq *rq;
31656519 4459 int cpu;
dd41f596 4460
dd41f596
IM
4461 cpu = smp_processor_id();
4462 rq = cpu_rq(cpu);
dd41f596 4463 prev = rq->curr;
dd41f596 4464
312364f3 4465 schedule_debug(prev, preempt);
1da177e4 4466
31656519 4467 if (sched_feat(HRTICK))
f333fdc9 4468 hrtick_clear(rq);
8f4d37ec 4469
46a5d164 4470 local_irq_disable();
bcbfdd01 4471 rcu_note_context_switch(preempt);
46a5d164 4472
e0acd0a6
ON
4473 /*
4474 * Make sure that signal_pending_state()->signal_pending() below
4475 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
dbfb089d
PZ
4476 * done by the caller to avoid the race with signal_wake_up():
4477 *
4478 * __set_current_state(@state) signal_wake_up()
4479 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
4480 * wake_up_state(p, state)
4481 * LOCK rq->lock LOCK p->pi_state
4482 * smp_mb__after_spinlock() smp_mb__after_spinlock()
4483 * if (signal_pending_state()) if (p->state & @state)
306e0604 4484 *
dbfb089d 4485 * Also, the membarrier system call requires a full memory barrier
306e0604 4486 * after coming from user-space, before storing to rq->curr.
e0acd0a6 4487 */
8a8c69c3 4488 rq_lock(rq, &rf);
d89e588c 4489 smp_mb__after_spinlock();
1da177e4 4490
d1ccc66d
IM
4491 /* Promote REQ to ACT */
4492 rq->clock_update_flags <<= 1;
bce4dc80 4493 update_rq_clock(rq);
9edfbfed 4494
246d86b5 4495 switch_count = &prev->nivcsw;
d136122f 4496
dbfb089d 4497 /*
d136122f
PZ
4498 * We must load prev->state once (task_struct::state is volatile), such
4499 * that:
4500 *
4501 * - we form a control dependency vs deactivate_task() below.
4502 * - ptrace_{,un}freeze_traced() can change ->state underneath us.
dbfb089d 4503 */
d136122f
PZ
4504 prev_state = prev->state;
4505 if (!preempt && prev_state) {
dbfb089d 4506 if (signal_pending_state(prev_state, prev)) {
1da177e4 4507 prev->state = TASK_RUNNING;
21aa9af0 4508 } else {
dbfb089d
PZ
4509 prev->sched_contributes_to_load =
4510 (prev_state & TASK_UNINTERRUPTIBLE) &&
4511 !(prev_state & TASK_NOLOAD) &&
4512 !(prev->flags & PF_FROZEN);
4513
4514 if (prev->sched_contributes_to_load)
4515 rq->nr_uninterruptible++;
4516
4517 /*
4518 * __schedule() ttwu()
d136122f
PZ
4519 * prev_state = prev->state; if (p->on_rq && ...)
4520 * if (prev_state) goto out;
4521 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
4522 * p->state = TASK_WAKING
4523 *
4524 * Where __schedule() and ttwu() have matching control dependencies.
dbfb089d
PZ
4525 *
4526 * After this, schedule() must not care about p->state any more.
4527 */
bce4dc80 4528 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
2acca55e 4529
e33a9bba
TH
4530 if (prev->in_iowait) {
4531 atomic_inc(&rq->nr_iowait);
4532 delayacct_blkio_start();
4533 }
21aa9af0 4534 }
dd41f596 4535 switch_count = &prev->nvcsw;
1da177e4
LT
4536 }
4537
d8ac8971 4538 next = pick_next_task(rq, prev, &rf);
f26f9aff 4539 clear_tsk_need_resched(prev);
f27dde8d 4540 clear_preempt_need_resched();
1da177e4 4541
1da177e4 4542 if (likely(prev != next)) {
1da177e4 4543 rq->nr_switches++;
5311a98f
EB
4544 /*
4545 * RCU users of rcu_dereference(rq->curr) may not see
4546 * changes to task_struct made by pick_next_task().
4547 */
4548 RCU_INIT_POINTER(rq->curr, next);
22e4ebb9
MD
4549 /*
4550 * The membarrier system call requires each architecture
4551 * to have a full memory barrier after updating
306e0604
MD
4552 * rq->curr, before returning to user-space.
4553 *
4554 * Here are the schemes providing that barrier on the
4555 * various architectures:
4556 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
4557 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
4558 * - finish_lock_switch() for weakly-ordered
4559 * architectures where spin_unlock is a full barrier,
4560 * - switch_to() for arm64 (weakly-ordered, spin_unlock
4561 * is a RELEASE barrier),
22e4ebb9 4562 */
1da177e4
LT
4563 ++*switch_count;
4564
b05e75d6
JW
4565 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
4566
c73464b1 4567 trace_sched_switch(preempt, prev, next);
d1ccc66d
IM
4568
4569 /* Also unlocks the rq: */
4570 rq = context_switch(rq, prev, next, &rf);
cbce1a68 4571 } else {
cb42c9a3 4572 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
1da177e4 4573
565790d2
PZ
4574 rq_unpin_lock(rq, &rf);
4575 __balance_callbacks(rq);
4576 raw_spin_unlock_irq(&rq->lock);
4577 }
1da177e4 4578}
c259e01a 4579
9af6528e
PZ
4580void __noreturn do_task_dead(void)
4581{
d1ccc66d 4582 /* Causes final put_task_struct in finish_task_switch(): */
b5bf9a90 4583 set_special_state(TASK_DEAD);
d1ccc66d
IM
4584
4585 /* Tell freezer to ignore us: */
4586 current->flags |= PF_NOFREEZE;
4587
9af6528e
PZ
4588 __schedule(false);
4589 BUG();
d1ccc66d
IM
4590
4591 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
9af6528e 4592 for (;;)
d1ccc66d 4593 cpu_relax();
9af6528e
PZ
4594}
4595
9c40cef2
TG
4596static inline void sched_submit_work(struct task_struct *tsk)
4597{
c1cecf88
SAS
4598 unsigned int task_flags;
4599
b0fdc013 4600 if (!tsk->state)
9c40cef2 4601 return;
6d25be57 4602
c1cecf88 4603 task_flags = tsk->flags;
6d25be57
TG
4604 /*
4605 * If a worker went to sleep, notify and ask workqueue whether
4606 * it wants to wake up a task to maintain concurrency.
4607 * As this function is called inside the schedule() context,
4608 * we disable preemption to avoid it calling schedule() again
62849a96
SAS
4609 * in the possible wakeup of a kworker and because wq_worker_sleeping()
4610 * requires it.
6d25be57 4611 */
c1cecf88 4612 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6d25be57 4613 preempt_disable();
c1cecf88 4614 if (task_flags & PF_WQ_WORKER)
771b53d0
JA
4615 wq_worker_sleeping(tsk);
4616 else
4617 io_wq_worker_sleeping(tsk);
6d25be57
TG
4618 preempt_enable_no_resched();
4619 }
4620
b0fdc013
SAS
4621 if (tsk_is_pi_blocked(tsk))
4622 return;
4623
9c40cef2
TG
4624 /*
4625 * If we are going to sleep and we have plugged IO queued,
4626 * make sure to submit it to avoid deadlocks.
4627 */
4628 if (blk_needs_flush_plug(tsk))
4629 blk_schedule_flush_plug(tsk);
4630}
4631
6d25be57
TG
4632static void sched_update_worker(struct task_struct *tsk)
4633{
771b53d0
JA
4634 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
4635 if (tsk->flags & PF_WQ_WORKER)
4636 wq_worker_running(tsk);
4637 else
4638 io_wq_worker_running(tsk);
4639 }
6d25be57
TG
4640}
4641
722a9f92 4642asmlinkage __visible void __sched schedule(void)
c259e01a 4643{
9c40cef2
TG
4644 struct task_struct *tsk = current;
4645
4646 sched_submit_work(tsk);
bfd9b2b5 4647 do {
b30f0e3f 4648 preempt_disable();
fc13aeba 4649 __schedule(false);
b30f0e3f 4650 sched_preempt_enable_no_resched();
bfd9b2b5 4651 } while (need_resched());
6d25be57 4652 sched_update_worker(tsk);
c259e01a 4653}
1da177e4
LT
4654EXPORT_SYMBOL(schedule);
4655
8663effb
SRV
4656/*
4657 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
4658 * state (have scheduled out non-voluntarily) by making sure that all
4659 * tasks have either left the run queue or have gone into user space.
4660 * As idle tasks do not do either, they must not ever be preempted
4661 * (schedule out non-voluntarily).
4662 *
4663 * schedule_idle() is similar to schedule_preempt_disable() except that it
4664 * never enables preemption because it does not call sched_submit_work().
4665 */
4666void __sched schedule_idle(void)
4667{
4668 /*
4669 * As this skips calling sched_submit_work(), which the idle task does
4670 * regardless because that function is a nop when the task is in a
4671 * TASK_RUNNING state, make sure this isn't used someplace that the
4672 * current task can be in any other state. Note, idle is always in the
4673 * TASK_RUNNING state.
4674 */
4675 WARN_ON_ONCE(current->state);
4676 do {
4677 __schedule(false);
4678 } while (need_resched());
4679}
4680
91d1aa43 4681#ifdef CONFIG_CONTEXT_TRACKING
722a9f92 4682asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
4683{
4684 /*
4685 * If we come here after a random call to set_need_resched(),
4686 * or we have been woken up remotely but the IPI has not yet arrived,
4687 * we haven't yet exited the RCU idle mode. Do it here manually until
4688 * we find a better solution.
7cc78f8f
AL
4689 *
4690 * NB: There are buggy callers of this function. Ideally we
c467ea76 4691 * should warn if prev_state != CONTEXT_USER, but that will trigger
7cc78f8f 4692 * too frequently to make sense yet.
20ab65e3 4693 */
7cc78f8f 4694 enum ctx_state prev_state = exception_enter();
20ab65e3 4695 schedule();
7cc78f8f 4696 exception_exit(prev_state);
20ab65e3
FW
4697}
4698#endif
4699
c5491ea7
TG
4700/**
4701 * schedule_preempt_disabled - called with preemption disabled
4702 *
4703 * Returns with preemption disabled. Note: preempt_count must be 1
4704 */
4705void __sched schedule_preempt_disabled(void)
4706{
ba74c144 4707 sched_preempt_enable_no_resched();
c5491ea7
TG
4708 schedule();
4709 preempt_disable();
4710}
4711
06b1f808 4712static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
4713{
4714 do {
47252cfb
SR
4715 /*
4716 * Because the function tracer can trace preempt_count_sub()
4717 * and it also uses preempt_enable/disable_notrace(), if
4718 * NEED_RESCHED is set, the preempt_enable_notrace() called
4719 * by the function tracer will call this function again and
4720 * cause infinite recursion.
4721 *
4722 * Preemption must be disabled here before the function
4723 * tracer can trace. Break up preempt_disable() into two
4724 * calls. One to disable preemption without fear of being
4725 * traced. The other to still record the preemption latency,
4726 * which can also be traced by the function tracer.
4727 */
499d7955 4728 preempt_disable_notrace();
47252cfb 4729 preempt_latency_start(1);
fc13aeba 4730 __schedule(true);
47252cfb 4731 preempt_latency_stop(1);
499d7955 4732 preempt_enable_no_resched_notrace();
a18b5d01
FW
4733
4734 /*
4735 * Check again in case we missed a preemption opportunity
4736 * between schedule and now.
4737 */
a18b5d01
FW
4738 } while (need_resched());
4739}
4740
c1a280b6 4741#ifdef CONFIG_PREEMPTION
1da177e4 4742/*
a49b4f40
VS
4743 * This is the entry point to schedule() from in-kernel preemption
4744 * off of preempt_enable.
1da177e4 4745 */
722a9f92 4746asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 4747{
1da177e4
LT
4748 /*
4749 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 4750 * we do not want to preempt the current task. Just return..
1da177e4 4751 */
fbb00b56 4752 if (likely(!preemptible()))
1da177e4
LT
4753 return;
4754
a18b5d01 4755 preempt_schedule_common();
1da177e4 4756}
376e2424 4757NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 4758EXPORT_SYMBOL(preempt_schedule);
009f60e2 4759
009f60e2 4760/**
4eaca0a8 4761 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
4762 *
4763 * The tracing infrastructure uses preempt_enable_notrace to prevent
4764 * recursion and tracing preempt enabling caused by the tracing
4765 * infrastructure itself. But as tracing can happen in areas coming
4766 * from userspace or just about to enter userspace, a preempt enable
4767 * can occur before user_exit() is called. This will cause the scheduler
4768 * to be called when the system is still in usermode.
4769 *
4770 * To prevent this, the preempt_enable_notrace will use this function
4771 * instead of preempt_schedule() to exit user context if needed before
4772 * calling the scheduler.
4773 */
4eaca0a8 4774asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
4775{
4776 enum ctx_state prev_ctx;
4777
4778 if (likely(!preemptible()))
4779 return;
4780
4781 do {
47252cfb
SR
4782 /*
4783 * Because the function tracer can trace preempt_count_sub()
4784 * and it also uses preempt_enable/disable_notrace(), if
4785 * NEED_RESCHED is set, the preempt_enable_notrace() called
4786 * by the function tracer will call this function again and
4787 * cause infinite recursion.
4788 *
4789 * Preemption must be disabled here before the function
4790 * tracer can trace. Break up preempt_disable() into two
4791 * calls. One to disable preemption without fear of being
4792 * traced. The other to still record the preemption latency,
4793 * which can also be traced by the function tracer.
4794 */
3d8f74dd 4795 preempt_disable_notrace();
47252cfb 4796 preempt_latency_start(1);
009f60e2
ON
4797 /*
4798 * Needs preempt disabled in case user_exit() is traced
4799 * and the tracer calls preempt_enable_notrace() causing
4800 * an infinite recursion.
4801 */
4802 prev_ctx = exception_enter();
fc13aeba 4803 __schedule(true);
009f60e2
ON
4804 exception_exit(prev_ctx);
4805
47252cfb 4806 preempt_latency_stop(1);
3d8f74dd 4807 preempt_enable_no_resched_notrace();
009f60e2
ON
4808 } while (need_resched());
4809}
4eaca0a8 4810EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 4811
c1a280b6 4812#endif /* CONFIG_PREEMPTION */
1da177e4
LT
4813
4814/*
a49b4f40 4815 * This is the entry point to schedule() from kernel preemption
1da177e4
LT
4816 * off of irq context.
4817 * Note, that this is called and return with irqs disabled. This will
4818 * protect us against recursive calling from irq.
4819 */
722a9f92 4820asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 4821{
b22366cd 4822 enum ctx_state prev_state;
6478d880 4823
2ed6e34f 4824 /* Catch callers which need to be fixed */
f27dde8d 4825 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 4826
b22366cd
FW
4827 prev_state = exception_enter();
4828
3a5c359a 4829 do {
3d8f74dd 4830 preempt_disable();
3a5c359a 4831 local_irq_enable();
fc13aeba 4832 __schedule(true);
3a5c359a 4833 local_irq_disable();
3d8f74dd 4834 sched_preempt_enable_no_resched();
5ed0cec0 4835 } while (need_resched());
b22366cd
FW
4836
4837 exception_exit(prev_state);
1da177e4
LT
4838}
4839
ac6424b9 4840int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
95cdf3b7 4841 void *key)
1da177e4 4842{
062d3f95 4843 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
63859d4f 4844 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 4845}
1da177e4
LT
4846EXPORT_SYMBOL(default_wake_function);
4847
b29739f9
IM
4848#ifdef CONFIG_RT_MUTEXES
4849
acd58620
PZ
4850static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
4851{
4852 if (pi_task)
4853 prio = min(prio, pi_task->prio);
4854
4855 return prio;
4856}
4857
4858static inline int rt_effective_prio(struct task_struct *p, int prio)
4859{
4860 struct task_struct *pi_task = rt_mutex_get_top_task(p);
4861
4862 return __rt_effective_prio(pi_task, prio);
4863}
4864
b29739f9
IM
4865/*
4866 * rt_mutex_setprio - set the current priority of a task
acd58620
PZ
4867 * @p: task to boost
4868 * @pi_task: donor task
b29739f9
IM
4869 *
4870 * This function changes the 'effective' priority of a task. It does
4871 * not touch ->normal_prio like __setscheduler().
4872 *
c365c292
TG
4873 * Used by the rt_mutex code to implement priority inheritance
4874 * logic. Call site only calls if the priority of the task changed.
b29739f9 4875 */
acd58620 4876void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
b29739f9 4877{
acd58620 4878 int prio, oldprio, queued, running, queue_flag =
7a57f32a 4879 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
83ab0aa0 4880 const struct sched_class *prev_class;
eb580751
PZ
4881 struct rq_flags rf;
4882 struct rq *rq;
b29739f9 4883
acd58620
PZ
4884 /* XXX used to be waiter->prio, not waiter->task->prio */
4885 prio = __rt_effective_prio(pi_task, p->normal_prio);
4886
4887 /*
4888 * If nothing changed; bail early.
4889 */
4890 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
4891 return;
b29739f9 4892
eb580751 4893 rq = __task_rq_lock(p, &rf);
80f5c1b8 4894 update_rq_clock(rq);
acd58620
PZ
4895 /*
4896 * Set under pi_lock && rq->lock, such that the value can be used under
4897 * either lock.
4898 *
4899 * Note that there is loads of tricky to make this pointer cache work
4900 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
4901 * ensure a task is de-boosted (pi_task is set to NULL) before the
4902 * task is allowed to run again (and can exit). This ensures the pointer
4903 * points to a blocked task -- which guaratees the task is present.
4904 */
4905 p->pi_top_task = pi_task;
4906
4907 /*
4908 * For FIFO/RR we only need to set prio, if that matches we're done.
4909 */
4910 if (prio == p->prio && !dl_prio(prio))
4911 goto out_unlock;
b29739f9 4912
1c4dd99b
TG
4913 /*
4914 * Idle task boosting is a nono in general. There is one
4915 * exception, when PREEMPT_RT and NOHZ is active:
4916 *
4917 * The idle task calls get_next_timer_interrupt() and holds
4918 * the timer wheel base->lock on the CPU and another CPU wants
4919 * to access the timer (probably to cancel it). We can safely
4920 * ignore the boosting request, as the idle CPU runs this code
4921 * with interrupts disabled and will complete the lock
4922 * protected section without being interrupted. So there is no
4923 * real need to boost.
4924 */
4925 if (unlikely(p == rq->idle)) {
4926 WARN_ON(p != rq->curr);
4927 WARN_ON(p->pi_blocked_on);
4928 goto out_unlock;
4929 }
4930
b91473ff 4931 trace_sched_pi_setprio(p, pi_task);
d5f9f942 4932 oldprio = p->prio;
ff77e468
PZ
4933
4934 if (oldprio == prio)
4935 queue_flag &= ~DEQUEUE_MOVE;
4936
83ab0aa0 4937 prev_class = p->sched_class;
da0c1e65 4938 queued = task_on_rq_queued(p);
051a1d1a 4939 running = task_current(rq, p);
da0c1e65 4940 if (queued)
ff77e468 4941 dequeue_task(rq, p, queue_flag);
0e1f3483 4942 if (running)
f3cd1c4e 4943 put_prev_task(rq, p);
dd41f596 4944
2d3d891d
DF
4945 /*
4946 * Boosting condition are:
4947 * 1. -rt task is running and holds mutex A
4948 * --> -dl task blocks on mutex A
4949 *
4950 * 2. -dl task is running and holds mutex A
4951 * --> -dl task blocks on mutex A and could preempt the
4952 * running task
4953 */
4954 if (dl_prio(prio)) {
466af29b 4955 if (!dl_prio(p->normal_prio) ||
740797ce
JL
4956 (pi_task && dl_prio(pi_task->prio) &&
4957 dl_entity_preempt(&pi_task->dl, &p->dl))) {
2d3d891d 4958 p->dl.dl_boosted = 1;
ff77e468 4959 queue_flag |= ENQUEUE_REPLENISH;
2d3d891d
DF
4960 } else
4961 p->dl.dl_boosted = 0;
aab03e05 4962 p->sched_class = &dl_sched_class;
2d3d891d
DF
4963 } else if (rt_prio(prio)) {
4964 if (dl_prio(oldprio))
4965 p->dl.dl_boosted = 0;
4966 if (oldprio < prio)
ff77e468 4967 queue_flag |= ENQUEUE_HEAD;
dd41f596 4968 p->sched_class = &rt_sched_class;
2d3d891d
DF
4969 } else {
4970 if (dl_prio(oldprio))
4971 p->dl.dl_boosted = 0;
746db944
BS
4972 if (rt_prio(oldprio))
4973 p->rt.timeout = 0;
dd41f596 4974 p->sched_class = &fair_sched_class;
2d3d891d 4975 }
dd41f596 4976
b29739f9
IM
4977 p->prio = prio;
4978
da0c1e65 4979 if (queued)
ff77e468 4980 enqueue_task(rq, p, queue_flag);
a399d233 4981 if (running)
03b7fad1 4982 set_next_task(rq, p);
cb469845 4983
da7a735e 4984 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 4985out_unlock:
d1ccc66d
IM
4986 /* Avoid rq from going away on us: */
4987 preempt_disable();
4c9a4bc8 4988
565790d2
PZ
4989 rq_unpin_lock(rq, &rf);
4990 __balance_callbacks(rq);
4991 raw_spin_unlock(&rq->lock);
4992
4c9a4bc8 4993 preempt_enable();
b29739f9 4994}
acd58620
PZ
4995#else
4996static inline int rt_effective_prio(struct task_struct *p, int prio)
4997{
4998 return prio;
4999}
b29739f9 5000#endif
d50dde5a 5001
36c8b586 5002void set_user_nice(struct task_struct *p, long nice)
1da177e4 5003{
49bd21ef 5004 bool queued, running;
53a23364 5005 int old_prio;
eb580751 5006 struct rq_flags rf;
70b97a7f 5007 struct rq *rq;
1da177e4 5008
75e45d51 5009 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
1da177e4
LT
5010 return;
5011 /*
5012 * We have to be careful, if called from sys_setpriority(),
5013 * the task might be in the middle of scheduling on another CPU.
5014 */
eb580751 5015 rq = task_rq_lock(p, &rf);
2fb8d367
PZ
5016 update_rq_clock(rq);
5017
1da177e4
LT
5018 /*
5019 * The RT priorities are set via sched_setscheduler(), but we still
5020 * allow the 'normal' nice value to be set - but as expected
5021 * it wont have any effect on scheduling until the task is
aab03e05 5022 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
1da177e4 5023 */
aab03e05 5024 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1da177e4
LT
5025 p->static_prio = NICE_TO_PRIO(nice);
5026 goto out_unlock;
5027 }
da0c1e65 5028 queued = task_on_rq_queued(p);
49bd21ef 5029 running = task_current(rq, p);
da0c1e65 5030 if (queued)
7a57f32a 5031 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
49bd21ef
PZ
5032 if (running)
5033 put_prev_task(rq, p);
1da177e4 5034
1da177e4 5035 p->static_prio = NICE_TO_PRIO(nice);
9059393e 5036 set_load_weight(p, true);
b29739f9
IM
5037 old_prio = p->prio;
5038 p->prio = effective_prio(p);
1da177e4 5039
5443a0be 5040 if (queued)
7134b3e9 5041 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
49bd21ef 5042 if (running)
03b7fad1 5043 set_next_task(rq, p);
5443a0be
FW
5044
5045 /*
5046 * If the task increased its priority or is running and
5047 * lowered its priority, then reschedule its CPU:
5048 */
5049 p->sched_class->prio_changed(rq, p, old_prio);
5050
1da177e4 5051out_unlock:
eb580751 5052 task_rq_unlock(rq, p, &rf);
1da177e4 5053}
1da177e4
LT
5054EXPORT_SYMBOL(set_user_nice);
5055
e43379f1
MM
5056/*
5057 * can_nice - check if a task can reduce its nice value
5058 * @p: task
5059 * @nice: nice value
5060 */
36c8b586 5061int can_nice(const struct task_struct *p, const int nice)
e43379f1 5062{
d1ccc66d 5063 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7aa2c016 5064 int nice_rlim = nice_to_rlimit(nice);
48f24c4d 5065
78d7d407 5066 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
5067 capable(CAP_SYS_NICE));
5068}
5069
1da177e4
LT
5070#ifdef __ARCH_WANT_SYS_NICE
5071
5072/*
5073 * sys_nice - change the priority of the current process.
5074 * @increment: priority increment
5075 *
5076 * sys_setpriority is a more generic, but much slower function that
5077 * does similar things.
5078 */
5add95d4 5079SYSCALL_DEFINE1(nice, int, increment)
1da177e4 5080{
48f24c4d 5081 long nice, retval;
1da177e4
LT
5082
5083 /*
5084 * Setpriority might change our priority at the same moment.
5085 * We don't have to worry. Conceptually one call occurs first
5086 * and we have a single winner.
5087 */
a9467fa3 5088 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
d0ea0268 5089 nice = task_nice(current) + increment;
1da177e4 5090
a9467fa3 5091 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
e43379f1
MM
5092 if (increment < 0 && !can_nice(current, nice))
5093 return -EPERM;
5094
1da177e4
LT
5095 retval = security_task_setnice(current, nice);
5096 if (retval)
5097 return retval;
5098
5099 set_user_nice(current, nice);
5100 return 0;
5101}
5102
5103#endif
5104
5105/**
5106 * task_prio - return the priority value of a given task.
5107 * @p: the task in question.
5108 *
e69f6186 5109 * Return: The priority value as seen by users in /proc.
1da177e4
LT
5110 * RT tasks are offset by -200. Normal tasks are centered
5111 * around 0, value goes from -16 to +15.
5112 */
36c8b586 5113int task_prio(const struct task_struct *p)
1da177e4
LT
5114{
5115 return p->prio - MAX_RT_PRIO;
5116}
5117
1da177e4 5118/**
d1ccc66d 5119 * idle_cpu - is a given CPU idle currently?
1da177e4 5120 * @cpu: the processor in question.
e69f6186
YB
5121 *
5122 * Return: 1 if the CPU is currently idle. 0 otherwise.
1da177e4
LT
5123 */
5124int idle_cpu(int cpu)
5125{
908a3283
TG
5126 struct rq *rq = cpu_rq(cpu);
5127
5128 if (rq->curr != rq->idle)
5129 return 0;
5130
5131 if (rq->nr_running)
5132 return 0;
5133
5134#ifdef CONFIG_SMP
126c2092 5135 if (rq->ttwu_pending)
908a3283
TG
5136 return 0;
5137#endif
5138
5139 return 1;
1da177e4
LT
5140}
5141
943d355d
RJ
5142/**
5143 * available_idle_cpu - is a given CPU idle for enqueuing work.
5144 * @cpu: the CPU in question.
5145 *
5146 * Return: 1 if the CPU is currently idle. 0 otherwise.
5147 */
5148int available_idle_cpu(int cpu)
5149{
5150 if (!idle_cpu(cpu))
5151 return 0;
5152
247f2f6f
RJ
5153 if (vcpu_is_preempted(cpu))
5154 return 0;
5155
908a3283 5156 return 1;
1da177e4
LT
5157}
5158
1da177e4 5159/**
d1ccc66d 5160 * idle_task - return the idle task for a given CPU.
1da177e4 5161 * @cpu: the processor in question.
e69f6186 5162 *
d1ccc66d 5163 * Return: The idle task for the CPU @cpu.
1da177e4 5164 */
36c8b586 5165struct task_struct *idle_task(int cpu)
1da177e4
LT
5166{
5167 return cpu_rq(cpu)->idle;
5168}
5169
5170/**
5171 * find_process_by_pid - find a process with a matching PID value.
5172 * @pid: the pid in question.
e69f6186
YB
5173 *
5174 * The task of @pid, if found. %NULL otherwise.
1da177e4 5175 */
a9957449 5176static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 5177{
228ebcbe 5178 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
5179}
5180
c13db6b1
SR
5181/*
5182 * sched_setparam() passes in -1 for its policy, to let the functions
5183 * it calls know not to change it.
5184 */
5185#define SETPARAM_POLICY -1
5186
c365c292
TG
5187static void __setscheduler_params(struct task_struct *p,
5188 const struct sched_attr *attr)
1da177e4 5189{
d50dde5a
DF
5190 int policy = attr->sched_policy;
5191
c13db6b1 5192 if (policy == SETPARAM_POLICY)
39fd8fd2
PZ
5193 policy = p->policy;
5194
1da177e4 5195 p->policy = policy;
d50dde5a 5196
aab03e05
DF
5197 if (dl_policy(policy))
5198 __setparam_dl(p, attr);
39fd8fd2 5199 else if (fair_policy(policy))
d50dde5a
DF
5200 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
5201
39fd8fd2
PZ
5202 /*
5203 * __sched_setscheduler() ensures attr->sched_priority == 0 when
5204 * !rt_policy. Always setting this ensures that things like
5205 * getparam()/getattr() don't report silly values for !rt tasks.
5206 */
5207 p->rt_priority = attr->sched_priority;
383afd09 5208 p->normal_prio = normal_prio(p);
9059393e 5209 set_load_weight(p, true);
c365c292 5210}
39fd8fd2 5211
c365c292
TG
5212/* Actually do priority change: must hold pi & rq lock. */
5213static void __setscheduler(struct rq *rq, struct task_struct *p,
0782e63b 5214 const struct sched_attr *attr, bool keep_boost)
c365c292 5215{
a509a7cd
PB
5216 /*
5217 * If params can't change scheduling class changes aren't allowed
5218 * either.
5219 */
5220 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
5221 return;
5222
c365c292 5223 __setscheduler_params(p, attr);
d50dde5a 5224
383afd09 5225 /*
0782e63b
TG
5226 * Keep a potential priority boosting if called from
5227 * sched_setscheduler().
383afd09 5228 */
acd58620 5229 p->prio = normal_prio(p);
0782e63b 5230 if (keep_boost)
acd58620 5231 p->prio = rt_effective_prio(p, p->prio);
383afd09 5232
aab03e05
DF
5233 if (dl_prio(p->prio))
5234 p->sched_class = &dl_sched_class;
5235 else if (rt_prio(p->prio))
ffd44db5
PZ
5236 p->sched_class = &rt_sched_class;
5237 else
5238 p->sched_class = &fair_sched_class;
1da177e4 5239}
aab03e05 5240
c69e8d9c 5241/*
d1ccc66d 5242 * Check the target process has a UID that matches the current process's:
c69e8d9c
DH
5243 */
5244static bool check_same_owner(struct task_struct *p)
5245{
5246 const struct cred *cred = current_cred(), *pcred;
5247 bool match;
5248
5249 rcu_read_lock();
5250 pcred = __task_cred(p);
9c806aa0
EB
5251 match = (uid_eq(cred->euid, pcred->euid) ||
5252 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
5253 rcu_read_unlock();
5254 return match;
5255}
5256
d50dde5a
DF
5257static int __sched_setscheduler(struct task_struct *p,
5258 const struct sched_attr *attr,
dbc7f069 5259 bool user, bool pi)
1da177e4 5260{
383afd09
SR
5261 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
5262 MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65 5263 int retval, oldprio, oldpolicy = -1, queued, running;
0782e63b 5264 int new_effective_prio, policy = attr->sched_policy;
83ab0aa0 5265 const struct sched_class *prev_class;
565790d2 5266 struct callback_head *head;
eb580751 5267 struct rq_flags rf;
ca94c442 5268 int reset_on_fork;
7a57f32a 5269 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
eb580751 5270 struct rq *rq;
1da177e4 5271
896bbb25
SRV
5272 /* The pi code expects interrupts enabled */
5273 BUG_ON(pi && in_interrupt());
1da177e4 5274recheck:
d1ccc66d 5275 /* Double check policy once rq lock held: */
ca94c442
LP
5276 if (policy < 0) {
5277 reset_on_fork = p->sched_reset_on_fork;
1da177e4 5278 policy = oldpolicy = p->policy;
ca94c442 5279 } else {
7479f3c9 5280 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442 5281
20f9cd2a 5282 if (!valid_policy(policy))
ca94c442
LP
5283 return -EINVAL;
5284 }
5285
794a56eb 5286 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7479f3c9
PZ
5287 return -EINVAL;
5288
1da177e4
LT
5289 /*
5290 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
5291 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5292 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4 5293 */
0bb040a4 5294 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
d50dde5a 5295 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
1da177e4 5296 return -EINVAL;
aab03e05
DF
5297 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
5298 (rt_policy(policy) != (attr->sched_priority != 0)))
1da177e4
LT
5299 return -EINVAL;
5300
37e4ab3f
OC
5301 /*
5302 * Allow unprivileged RT tasks to decrease priority:
5303 */
961ccddd 5304 if (user && !capable(CAP_SYS_NICE)) {
d50dde5a 5305 if (fair_policy(policy)) {
d0ea0268 5306 if (attr->sched_nice < task_nice(p) &&
eaad4513 5307 !can_nice(p, attr->sched_nice))
d50dde5a
DF
5308 return -EPERM;
5309 }
5310
e05606d3 5311 if (rt_policy(policy)) {
a44702e8
ON
5312 unsigned long rlim_rtprio =
5313 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909 5314
d1ccc66d 5315 /* Can't set/change the rt policy: */
8dc3e909
ON
5316 if (policy != p->policy && !rlim_rtprio)
5317 return -EPERM;
5318
d1ccc66d 5319 /* Can't increase priority: */
d50dde5a
DF
5320 if (attr->sched_priority > p->rt_priority &&
5321 attr->sched_priority > rlim_rtprio)
8dc3e909
ON
5322 return -EPERM;
5323 }
c02aa73b 5324
d44753b8
JL
5325 /*
5326 * Can't set/change SCHED_DEADLINE policy at all for now
5327 * (safest behavior); in the future we would like to allow
5328 * unprivileged DL tasks to increase their relative deadline
5329 * or reduce their runtime (both ways reducing utilization)
5330 */
5331 if (dl_policy(policy))
5332 return -EPERM;
5333
dd41f596 5334 /*
c02aa73b
DH
5335 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5336 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 5337 */
1da1843f 5338 if (task_has_idle_policy(p) && !idle_policy(policy)) {
d0ea0268 5339 if (!can_nice(p, task_nice(p)))
c02aa73b
DH
5340 return -EPERM;
5341 }
5fe1d75f 5342
d1ccc66d 5343 /* Can't change other user's priorities: */
c69e8d9c 5344 if (!check_same_owner(p))
37e4ab3f 5345 return -EPERM;
ca94c442 5346
d1ccc66d 5347 /* Normal users shall not reset the sched_reset_on_fork flag: */
ca94c442
LP
5348 if (p->sched_reset_on_fork && !reset_on_fork)
5349 return -EPERM;
37e4ab3f 5350 }
1da177e4 5351
725aad24 5352 if (user) {
794a56eb
JL
5353 if (attr->sched_flags & SCHED_FLAG_SUGOV)
5354 return -EINVAL;
5355
b0ae1981 5356 retval = security_task_setscheduler(p);
725aad24
JF
5357 if (retval)
5358 return retval;
5359 }
5360
a509a7cd
PB
5361 /* Update task specific "requested" clamps */
5362 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
5363 retval = uclamp_validate(p, attr);
5364 if (retval)
5365 return retval;
5366 }
5367
710da3c8
JL
5368 if (pi)
5369 cpuset_read_lock();
5370
b29739f9 5371 /*
d1ccc66d 5372 * Make sure no PI-waiters arrive (or leave) while we are
b29739f9 5373 * changing the priority of the task:
0122ec5b 5374 *
25985edc 5375 * To be able to change p->policy safely, the appropriate
1da177e4
LT
5376 * runqueue lock must be held.
5377 */
eb580751 5378 rq = task_rq_lock(p, &rf);
80f5c1b8 5379 update_rq_clock(rq);
dc61b1d6 5380
34f971f6 5381 /*
d1ccc66d 5382 * Changing the policy of the stop threads its a very bad idea:
34f971f6
PZ
5383 */
5384 if (p == rq->stop) {
4b211f2b
MP
5385 retval = -EINVAL;
5386 goto unlock;
34f971f6
PZ
5387 }
5388
a51e9198 5389 /*
d6b1e911
TG
5390 * If not changing anything there's no need to proceed further,
5391 * but store a possible modification of reset_on_fork.
a51e9198 5392 */
d50dde5a 5393 if (unlikely(policy == p->policy)) {
d0ea0268 5394 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a
DF
5395 goto change;
5396 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
5397 goto change;
75381608 5398 if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05 5399 goto change;
a509a7cd
PB
5400 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
5401 goto change;
d50dde5a 5402
d6b1e911 5403 p->sched_reset_on_fork = reset_on_fork;
4b211f2b
MP
5404 retval = 0;
5405 goto unlock;
a51e9198 5406 }
d50dde5a 5407change:
a51e9198 5408
dc61b1d6 5409 if (user) {
332ac17e 5410#ifdef CONFIG_RT_GROUP_SCHED
dc61b1d6
PZ
5411 /*
5412 * Do not allow realtime tasks into groups that have no runtime
5413 * assigned.
5414 */
5415 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
5416 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5417 !task_group_is_autogroup(task_group(p))) {
4b211f2b
MP
5418 retval = -EPERM;
5419 goto unlock;
dc61b1d6 5420 }
dc61b1d6 5421#endif
332ac17e 5422#ifdef CONFIG_SMP
794a56eb
JL
5423 if (dl_bandwidth_enabled() && dl_policy(policy) &&
5424 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
332ac17e 5425 cpumask_t *span = rq->rd->span;
332ac17e
DF
5426
5427 /*
5428 * Don't allow tasks with an affinity mask smaller than
5429 * the entire root_domain to become SCHED_DEADLINE. We
5430 * will also fail if there's no bandwidth available.
5431 */
3bd37062 5432 if (!cpumask_subset(span, p->cpus_ptr) ||
e4099a5e 5433 rq->rd->dl_bw.bw == 0) {
4b211f2b
MP
5434 retval = -EPERM;
5435 goto unlock;
332ac17e
DF
5436 }
5437 }
5438#endif
5439 }
dc61b1d6 5440
d1ccc66d 5441 /* Re-check policy now with rq lock held: */
1da177e4
LT
5442 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5443 policy = oldpolicy = -1;
eb580751 5444 task_rq_unlock(rq, p, &rf);
710da3c8
JL
5445 if (pi)
5446 cpuset_read_unlock();
1da177e4
LT
5447 goto recheck;
5448 }
332ac17e
DF
5449
5450 /*
5451 * If setscheduling to SCHED_DEADLINE (or changing the parameters
5452 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
5453 * is available.
5454 */
06a76fe0 5455 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
4b211f2b
MP
5456 retval = -EBUSY;
5457 goto unlock;
332ac17e
DF
5458 }
5459
c365c292
TG
5460 p->sched_reset_on_fork = reset_on_fork;
5461 oldprio = p->prio;
5462
dbc7f069
PZ
5463 if (pi) {
5464 /*
5465 * Take priority boosted tasks into account. If the new
5466 * effective priority is unchanged, we just store the new
5467 * normal parameters and do not touch the scheduler class and
5468 * the runqueue. This will be done when the task deboost
5469 * itself.
5470 */
acd58620 5471 new_effective_prio = rt_effective_prio(p, newprio);
ff77e468
PZ
5472 if (new_effective_prio == oldprio)
5473 queue_flags &= ~DEQUEUE_MOVE;
c365c292
TG
5474 }
5475
da0c1e65 5476 queued = task_on_rq_queued(p);
051a1d1a 5477 running = task_current(rq, p);
da0c1e65 5478 if (queued)
ff77e468 5479 dequeue_task(rq, p, queue_flags);
0e1f3483 5480 if (running)
f3cd1c4e 5481 put_prev_task(rq, p);
f6b53205 5482
83ab0aa0 5483 prev_class = p->sched_class;
a509a7cd 5484
dbc7f069 5485 __setscheduler(rq, p, attr, pi);
a509a7cd 5486 __setscheduler_uclamp(p, attr);
f6b53205 5487
da0c1e65 5488 if (queued) {
81a44c54
TG
5489 /*
5490 * We enqueue to tail when the priority of a task is
5491 * increased (user space view).
5492 */
ff77e468
PZ
5493 if (oldprio < p->prio)
5494 queue_flags |= ENQUEUE_HEAD;
1de64443 5495
ff77e468 5496 enqueue_task(rq, p, queue_flags);
81a44c54 5497 }
a399d233 5498 if (running)
03b7fad1 5499 set_next_task(rq, p);
cb469845 5500
da7a735e 5501 check_class_changed(rq, p, prev_class, oldprio);
d1ccc66d
IM
5502
5503 /* Avoid rq from going away on us: */
5504 preempt_disable();
565790d2 5505 head = splice_balance_callbacks(rq);
eb580751 5506 task_rq_unlock(rq, p, &rf);
b29739f9 5507
710da3c8
JL
5508 if (pi) {
5509 cpuset_read_unlock();
dbc7f069 5510 rt_mutex_adjust_pi(p);
710da3c8 5511 }
95e02ca9 5512
d1ccc66d 5513 /* Run balance callbacks after we've adjusted the PI chain: */
565790d2 5514 balance_callbacks(rq, head);
4c9a4bc8 5515 preempt_enable();
95e02ca9 5516
1da177e4 5517 return 0;
4b211f2b
MP
5518
5519unlock:
5520 task_rq_unlock(rq, p, &rf);
710da3c8
JL
5521 if (pi)
5522 cpuset_read_unlock();
4b211f2b 5523 return retval;
1da177e4 5524}
961ccddd 5525
7479f3c9
PZ
5526static int _sched_setscheduler(struct task_struct *p, int policy,
5527 const struct sched_param *param, bool check)
5528{
5529 struct sched_attr attr = {
5530 .sched_policy = policy,
5531 .sched_priority = param->sched_priority,
5532 .sched_nice = PRIO_TO_NICE(p->static_prio),
5533 };
5534
c13db6b1
SR
5535 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
5536 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7479f3c9
PZ
5537 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
5538 policy &= ~SCHED_RESET_ON_FORK;
5539 attr.sched_policy = policy;
5540 }
5541
dbc7f069 5542 return __sched_setscheduler(p, &attr, check, true);
7479f3c9 5543}
961ccddd
RR
5544/**
5545 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5546 * @p: the task in question.
5547 * @policy: new policy.
5548 * @param: structure containing the new RT priority.
5549 *
7318d4cc
PZ
5550 * Use sched_set_fifo(), read its comment.
5551 *
e69f6186
YB
5552 * Return: 0 on success. An error code otherwise.
5553 *
961ccddd
RR
5554 * NOTE that the task may be already dead.
5555 */
5556int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 5557 const struct sched_param *param)
961ccddd 5558{
7479f3c9 5559 return _sched_setscheduler(p, policy, param, true);
961ccddd 5560}
1da177e4 5561
d50dde5a
DF
5562int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
5563{
dbc7f069 5564 return __sched_setscheduler(p, attr, true, true);
d50dde5a 5565}
d50dde5a 5566
794a56eb
JL
5567int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
5568{
5569 return __sched_setscheduler(p, attr, false, true);
5570}
5571
961ccddd
RR
5572/**
5573 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5574 * @p: the task in question.
5575 * @policy: new policy.
5576 * @param: structure containing the new RT priority.
5577 *
5578 * Just like sched_setscheduler, only don't bother checking if the
5579 * current context has permission. For example, this is needed in
5580 * stop_machine(): we create temporary high priority worker threads,
5581 * but our caller might not have that capability.
e69f6186
YB
5582 *
5583 * Return: 0 on success. An error code otherwise.
961ccddd
RR
5584 */
5585int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 5586 const struct sched_param *param)
961ccddd 5587{
7479f3c9 5588 return _sched_setscheduler(p, policy, param, false);
961ccddd
RR
5589}
5590
7318d4cc
PZ
5591/*
5592 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
5593 * incapable of resource management, which is the one thing an OS really should
5594 * be doing.
5595 *
5596 * This is of course the reason it is limited to privileged users only.
5597 *
5598 * Worse still; it is fundamentally impossible to compose static priority
5599 * workloads. You cannot take two correctly working static prio workloads
5600 * and smash them together and still expect them to work.
5601 *
5602 * For this reason 'all' FIFO tasks the kernel creates are basically at:
5603 *
5604 * MAX_RT_PRIO / 2
5605 *
5606 * The administrator _MUST_ configure the system, the kernel simply doesn't
5607 * know enough information to make a sensible choice.
5608 */
8b700983 5609void sched_set_fifo(struct task_struct *p)
7318d4cc
PZ
5610{
5611 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
8b700983 5612 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7318d4cc
PZ
5613}
5614EXPORT_SYMBOL_GPL(sched_set_fifo);
5615
5616/*
5617 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
5618 */
8b700983 5619void sched_set_fifo_low(struct task_struct *p)
7318d4cc
PZ
5620{
5621 struct sched_param sp = { .sched_priority = 1 };
8b700983 5622 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7318d4cc
PZ
5623}
5624EXPORT_SYMBOL_GPL(sched_set_fifo_low);
5625
8b700983 5626void sched_set_normal(struct task_struct *p, int nice)
7318d4cc
PZ
5627{
5628 struct sched_attr attr = {
5629 .sched_policy = SCHED_NORMAL,
5630 .sched_nice = nice,
5631 };
8b700983 5632 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7318d4cc
PZ
5633}
5634EXPORT_SYMBOL_GPL(sched_set_normal);
961ccddd 5635
95cdf3b7
IM
5636static int
5637do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 5638{
1da177e4
LT
5639 struct sched_param lparam;
5640 struct task_struct *p;
36c8b586 5641 int retval;
1da177e4
LT
5642
5643 if (!param || pid < 0)
5644 return -EINVAL;
5645 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5646 return -EFAULT;
5fe1d75f
ON
5647
5648 rcu_read_lock();
5649 retval = -ESRCH;
1da177e4 5650 p = find_process_by_pid(pid);
710da3c8
JL
5651 if (likely(p))
5652 get_task_struct(p);
5fe1d75f 5653 rcu_read_unlock();
36c8b586 5654
710da3c8
JL
5655 if (likely(p)) {
5656 retval = sched_setscheduler(p, policy, &lparam);
5657 put_task_struct(p);
5658 }
5659
1da177e4
LT
5660 return retval;
5661}
5662
d50dde5a
DF
5663/*
5664 * Mimics kernel/events/core.c perf_copy_attr().
5665 */
d1ccc66d 5666static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
d50dde5a
DF
5667{
5668 u32 size;
5669 int ret;
5670
d1ccc66d 5671 /* Zero the full structure, so that a short copy will be nice: */
d50dde5a
DF
5672 memset(attr, 0, sizeof(*attr));
5673
5674 ret = get_user(size, &uattr->size);
5675 if (ret)
5676 return ret;
5677
d1ccc66d
IM
5678 /* ABI compatibility quirk: */
5679 if (!size)
d50dde5a 5680 size = SCHED_ATTR_SIZE_VER0;
dff3a85f 5681 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
d50dde5a
DF
5682 goto err_size;
5683
dff3a85f
AS
5684 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
5685 if (ret) {
5686 if (ret == -E2BIG)
5687 goto err_size;
5688 return ret;
d50dde5a
DF
5689 }
5690
a509a7cd
PB
5691 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
5692 size < SCHED_ATTR_SIZE_VER1)
5693 return -EINVAL;
5694
d50dde5a 5695 /*
d1ccc66d 5696 * XXX: Do we want to be lenient like existing syscalls; or do we want
d50dde5a
DF
5697 * to be strict and return an error on out-of-bounds values?
5698 */
75e45d51 5699 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
d50dde5a 5700
e78c7bca 5701 return 0;
d50dde5a
DF
5702
5703err_size:
5704 put_user(sizeof(*attr), &uattr->size);
e78c7bca 5705 return -E2BIG;
d50dde5a
DF
5706}
5707
1da177e4
LT
5708/**
5709 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5710 * @pid: the pid in question.
5711 * @policy: new policy.
5712 * @param: structure containing the new RT priority.
e69f6186
YB
5713 *
5714 * Return: 0 on success. An error code otherwise.
1da177e4 5715 */
d1ccc66d 5716SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
1da177e4 5717{
c21761f1
JB
5718 if (policy < 0)
5719 return -EINVAL;
5720
1da177e4
LT
5721 return do_sched_setscheduler(pid, policy, param);
5722}
5723
5724/**
5725 * sys_sched_setparam - set/change the RT priority of a thread
5726 * @pid: the pid in question.
5727 * @param: structure containing the new RT priority.
e69f6186
YB
5728 *
5729 * Return: 0 on success. An error code otherwise.
1da177e4 5730 */
5add95d4 5731SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 5732{
c13db6b1 5733 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1da177e4
LT
5734}
5735
d50dde5a
DF
5736/**
5737 * sys_sched_setattr - same as above, but with extended sched_attr
5738 * @pid: the pid in question.
5778fccf 5739 * @uattr: structure containing the extended parameters.
db66d756 5740 * @flags: for future extension.
d50dde5a 5741 */
6d35ab48
PZ
5742SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
5743 unsigned int, flags)
d50dde5a
DF
5744{
5745 struct sched_attr attr;
5746 struct task_struct *p;
5747 int retval;
5748
6d35ab48 5749 if (!uattr || pid < 0 || flags)
d50dde5a
DF
5750 return -EINVAL;
5751
143cf23d
MK
5752 retval = sched_copy_attr(uattr, &attr);
5753 if (retval)
5754 return retval;
d50dde5a 5755
b14ed2c2 5756 if ((int)attr.sched_policy < 0)
dbdb2275 5757 return -EINVAL;
1d6362fa
PB
5758 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
5759 attr.sched_policy = SETPARAM_POLICY;
d50dde5a
DF
5760
5761 rcu_read_lock();
5762 retval = -ESRCH;
5763 p = find_process_by_pid(pid);
a509a7cd
PB
5764 if (likely(p))
5765 get_task_struct(p);
d50dde5a
DF
5766 rcu_read_unlock();
5767
a509a7cd
PB
5768 if (likely(p)) {
5769 retval = sched_setattr(p, &attr);
5770 put_task_struct(p);
5771 }
5772
d50dde5a
DF
5773 return retval;
5774}
5775
1da177e4
LT
5776/**
5777 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5778 * @pid: the pid in question.
e69f6186
YB
5779 *
5780 * Return: On success, the policy of the thread. Otherwise, a negative error
5781 * code.
1da177e4 5782 */
5add95d4 5783SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 5784{
36c8b586 5785 struct task_struct *p;
3a5c359a 5786 int retval;
1da177e4
LT
5787
5788 if (pid < 0)
3a5c359a 5789 return -EINVAL;
1da177e4
LT
5790
5791 retval = -ESRCH;
5fe85be0 5792 rcu_read_lock();
1da177e4
LT
5793 p = find_process_by_pid(pid);
5794 if (p) {
5795 retval = security_task_getscheduler(p);
5796 if (!retval)
ca94c442
LP
5797 retval = p->policy
5798 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 5799 }
5fe85be0 5800 rcu_read_unlock();
1da177e4
LT
5801 return retval;
5802}
5803
5804/**
ca94c442 5805 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
5806 * @pid: the pid in question.
5807 * @param: structure containing the RT priority.
e69f6186
YB
5808 *
5809 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
5810 * code.
1da177e4 5811 */
5add95d4 5812SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4 5813{
ce5f7f82 5814 struct sched_param lp = { .sched_priority = 0 };
36c8b586 5815 struct task_struct *p;
3a5c359a 5816 int retval;
1da177e4
LT
5817
5818 if (!param || pid < 0)
3a5c359a 5819 return -EINVAL;
1da177e4 5820
5fe85be0 5821 rcu_read_lock();
1da177e4
LT
5822 p = find_process_by_pid(pid);
5823 retval = -ESRCH;
5824 if (!p)
5825 goto out_unlock;
5826
5827 retval = security_task_getscheduler(p);
5828 if (retval)
5829 goto out_unlock;
5830
ce5f7f82
PZ
5831 if (task_has_rt_policy(p))
5832 lp.sched_priority = p->rt_priority;
5fe85be0 5833 rcu_read_unlock();
1da177e4
LT
5834
5835 /*
5836 * This one might sleep, we cannot do it with a spinlock held ...
5837 */
5838 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5839
1da177e4
LT
5840 return retval;
5841
5842out_unlock:
5fe85be0 5843 rcu_read_unlock();
1da177e4
LT
5844 return retval;
5845}
5846
1251201c
IM
5847/*
5848 * Copy the kernel size attribute structure (which might be larger
5849 * than what user-space knows about) to user-space.
5850 *
5851 * Note that all cases are valid: user-space buffer can be larger or
5852 * smaller than the kernel-space buffer. The usual case is that both
5853 * have the same size.
5854 */
5855static int
5856sched_attr_copy_to_user(struct sched_attr __user *uattr,
5857 struct sched_attr *kattr,
5858 unsigned int usize)
d50dde5a 5859{
1251201c 5860 unsigned int ksize = sizeof(*kattr);
d50dde5a 5861
96d4f267 5862 if (!access_ok(uattr, usize))
d50dde5a
DF
5863 return -EFAULT;
5864
5865 /*
1251201c
IM
5866 * sched_getattr() ABI forwards and backwards compatibility:
5867 *
5868 * If usize == ksize then we just copy everything to user-space and all is good.
5869 *
5870 * If usize < ksize then we only copy as much as user-space has space for,
5871 * this keeps ABI compatibility as well. We skip the rest.
5872 *
5873 * If usize > ksize then user-space is using a newer version of the ABI,
5874 * which part the kernel doesn't know about. Just ignore it - tooling can
5875 * detect the kernel's knowledge of attributes from the attr->size value
5876 * which is set to ksize in this case.
d50dde5a 5877 */
1251201c 5878 kattr->size = min(usize, ksize);
d50dde5a 5879
1251201c 5880 if (copy_to_user(uattr, kattr, kattr->size))
d50dde5a
DF
5881 return -EFAULT;
5882
22400674 5883 return 0;
d50dde5a
DF
5884}
5885
5886/**
aab03e05 5887 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
d50dde5a 5888 * @pid: the pid in question.
5778fccf 5889 * @uattr: structure containing the extended parameters.
dff3a85f 5890 * @usize: sizeof(attr) for fwd/bwd comp.
db66d756 5891 * @flags: for future extension.
d50dde5a 5892 */
6d35ab48 5893SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1251201c 5894 unsigned int, usize, unsigned int, flags)
d50dde5a 5895{
1251201c 5896 struct sched_attr kattr = { };
d50dde5a
DF
5897 struct task_struct *p;
5898 int retval;
5899
1251201c
IM
5900 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
5901 usize < SCHED_ATTR_SIZE_VER0 || flags)
d50dde5a
DF
5902 return -EINVAL;
5903
5904 rcu_read_lock();
5905 p = find_process_by_pid(pid);
5906 retval = -ESRCH;
5907 if (!p)
5908 goto out_unlock;
5909
5910 retval = security_task_getscheduler(p);
5911 if (retval)
5912 goto out_unlock;
5913
1251201c 5914 kattr.sched_policy = p->policy;
7479f3c9 5915 if (p->sched_reset_on_fork)
1251201c 5916 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
aab03e05 5917 if (task_has_dl_policy(p))
1251201c 5918 __getparam_dl(p, &kattr);
aab03e05 5919 else if (task_has_rt_policy(p))
1251201c 5920 kattr.sched_priority = p->rt_priority;
d50dde5a 5921 else
1251201c 5922 kattr.sched_nice = task_nice(p);
d50dde5a 5923
a509a7cd 5924#ifdef CONFIG_UCLAMP_TASK
13685c4a
QY
5925 /*
5926 * This could race with another potential updater, but this is fine
5927 * because it'll correctly read the old or the new value. We don't need
5928 * to guarantee who wins the race as long as it doesn't return garbage.
5929 */
1251201c
IM
5930 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
5931 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
a509a7cd
PB
5932#endif
5933
d50dde5a
DF
5934 rcu_read_unlock();
5935
1251201c 5936 return sched_attr_copy_to_user(uattr, &kattr, usize);
d50dde5a
DF
5937
5938out_unlock:
5939 rcu_read_unlock();
5940 return retval;
5941}
5942
96f874e2 5943long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 5944{
5a16f3d3 5945 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
5946 struct task_struct *p;
5947 int retval;
1da177e4 5948
23f5d142 5949 rcu_read_lock();
1da177e4
LT
5950
5951 p = find_process_by_pid(pid);
5952 if (!p) {
23f5d142 5953 rcu_read_unlock();
1da177e4
LT
5954 return -ESRCH;
5955 }
5956
23f5d142 5957 /* Prevent p going away */
1da177e4 5958 get_task_struct(p);
23f5d142 5959 rcu_read_unlock();
1da177e4 5960
14a40ffc
TH
5961 if (p->flags & PF_NO_SETAFFINITY) {
5962 retval = -EINVAL;
5963 goto out_put_task;
5964 }
5a16f3d3
RR
5965 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5966 retval = -ENOMEM;
5967 goto out_put_task;
5968 }
5969 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5970 retval = -ENOMEM;
5971 goto out_free_cpus_allowed;
5972 }
1da177e4 5973 retval = -EPERM;
4c44aaaf
EB
5974 if (!check_same_owner(p)) {
5975 rcu_read_lock();
5976 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
5977 rcu_read_unlock();
16303ab2 5978 goto out_free_new_mask;
4c44aaaf
EB
5979 }
5980 rcu_read_unlock();
5981 }
1da177e4 5982
b0ae1981 5983 retval = security_task_setscheduler(p);
e7834f8f 5984 if (retval)
16303ab2 5985 goto out_free_new_mask;
e7834f8f 5986
e4099a5e
PZ
5987
5988 cpuset_cpus_allowed(p, cpus_allowed);
5989 cpumask_and(new_mask, in_mask, cpus_allowed);
5990
332ac17e
DF
5991 /*
5992 * Since bandwidth control happens on root_domain basis,
5993 * if admission test is enabled, we only admit -deadline
5994 * tasks allowed to run on all the CPUs in the task's
5995 * root_domain.
5996 */
5997#ifdef CONFIG_SMP
f1e3a093
KT
5998 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
5999 rcu_read_lock();
6000 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
332ac17e 6001 retval = -EBUSY;
f1e3a093 6002 rcu_read_unlock();
16303ab2 6003 goto out_free_new_mask;
332ac17e 6004 }
f1e3a093 6005 rcu_read_unlock();
332ac17e
DF
6006 }
6007#endif
49246274 6008again:
25834c73 6009 retval = __set_cpus_allowed_ptr(p, new_mask, true);
1da177e4 6010
8707d8b8 6011 if (!retval) {
5a16f3d3
RR
6012 cpuset_cpus_allowed(p, cpus_allowed);
6013 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
6014 /*
6015 * We must have raced with a concurrent cpuset
6016 * update. Just reset the cpus_allowed to the
6017 * cpuset's cpus_allowed
6018 */
5a16f3d3 6019 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
6020 goto again;
6021 }
6022 }
16303ab2 6023out_free_new_mask:
5a16f3d3
RR
6024 free_cpumask_var(new_mask);
6025out_free_cpus_allowed:
6026 free_cpumask_var(cpus_allowed);
6027out_put_task:
1da177e4 6028 put_task_struct(p);
1da177e4
LT
6029 return retval;
6030}
6031
6032static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 6033 struct cpumask *new_mask)
1da177e4 6034{
96f874e2
RR
6035 if (len < cpumask_size())
6036 cpumask_clear(new_mask);
6037 else if (len > cpumask_size())
6038 len = cpumask_size();
6039
1da177e4
LT
6040 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6041}
6042
6043/**
d1ccc66d 6044 * sys_sched_setaffinity - set the CPU affinity of a process
1da177e4
LT
6045 * @pid: pid of the process
6046 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 6047 * @user_mask_ptr: user-space pointer to the new CPU mask
e69f6186
YB
6048 *
6049 * Return: 0 on success. An error code otherwise.
1da177e4 6050 */
5add95d4
HC
6051SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6052 unsigned long __user *, user_mask_ptr)
1da177e4 6053{
5a16f3d3 6054 cpumask_var_t new_mask;
1da177e4
LT
6055 int retval;
6056
5a16f3d3
RR
6057 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6058 return -ENOMEM;
1da177e4 6059
5a16f3d3
RR
6060 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6061 if (retval == 0)
6062 retval = sched_setaffinity(pid, new_mask);
6063 free_cpumask_var(new_mask);
6064 return retval;
1da177e4
LT
6065}
6066
96f874e2 6067long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 6068{
36c8b586 6069 struct task_struct *p;
31605683 6070 unsigned long flags;
1da177e4 6071 int retval;
1da177e4 6072
23f5d142 6073 rcu_read_lock();
1da177e4
LT
6074
6075 retval = -ESRCH;
6076 p = find_process_by_pid(pid);
6077 if (!p)
6078 goto out_unlock;
6079
e7834f8f
DQ
6080 retval = security_task_getscheduler(p);
6081 if (retval)
6082 goto out_unlock;
6083
013fdb80 6084 raw_spin_lock_irqsave(&p->pi_lock, flags);
3bd37062 6085 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
013fdb80 6086 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
6087
6088out_unlock:
23f5d142 6089 rcu_read_unlock();
1da177e4 6090
9531b62f 6091 return retval;
1da177e4
LT
6092}
6093
6094/**
d1ccc66d 6095 * sys_sched_getaffinity - get the CPU affinity of a process
1da177e4
LT
6096 * @pid: pid of the process
6097 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
d1ccc66d 6098 * @user_mask_ptr: user-space pointer to hold the current CPU mask
e69f6186 6099 *
599b4840
ZW
6100 * Return: size of CPU mask copied to user_mask_ptr on success. An
6101 * error code otherwise.
1da177e4 6102 */
5add95d4
HC
6103SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6104 unsigned long __user *, user_mask_ptr)
1da177e4
LT
6105{
6106 int ret;
f17c8607 6107 cpumask_var_t mask;
1da177e4 6108
84fba5ec 6109 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
6110 return -EINVAL;
6111 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
6112 return -EINVAL;
6113
f17c8607
RR
6114 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6115 return -ENOMEM;
1da177e4 6116
f17c8607
RR
6117 ret = sched_getaffinity(pid, mask);
6118 if (ret == 0) {
4de373a1 6119 unsigned int retlen = min(len, cpumask_size());
cd3d8031
KM
6120
6121 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
6122 ret = -EFAULT;
6123 else
cd3d8031 6124 ret = retlen;
f17c8607
RR
6125 }
6126 free_cpumask_var(mask);
1da177e4 6127
f17c8607 6128 return ret;
1da177e4
LT
6129}
6130
6131/**
6132 * sys_sched_yield - yield the current processor to other threads.
6133 *
dd41f596
IM
6134 * This function yields the current CPU to other tasks. If there are no
6135 * other threads running on this CPU then this function will return.
e69f6186
YB
6136 *
6137 * Return: 0.
1da177e4 6138 */
7d4dd4f1 6139static void do_sched_yield(void)
1da177e4 6140{
8a8c69c3
PZ
6141 struct rq_flags rf;
6142 struct rq *rq;
6143
246b3b33 6144 rq = this_rq_lock_irq(&rf);
1da177e4 6145
ae92882e 6146 schedstat_inc(rq->yld_count);
4530d7ab 6147 current->sched_class->yield_task(rq);
1da177e4
LT
6148
6149 /*
6150 * Since we are going to call schedule() anyway, there's
6151 * no need to preempt or enable interrupts:
6152 */
8a8c69c3
PZ
6153 preempt_disable();
6154 rq_unlock(rq, &rf);
ba74c144 6155 sched_preempt_enable_no_resched();
1da177e4
LT
6156
6157 schedule();
7d4dd4f1 6158}
1da177e4 6159
7d4dd4f1
DB
6160SYSCALL_DEFINE0(sched_yield)
6161{
6162 do_sched_yield();
1da177e4
LT
6163 return 0;
6164}
6165
c1a280b6 6166#ifndef CONFIG_PREEMPTION
02b67cc3 6167int __sched _cond_resched(void)
1da177e4 6168{
fe32d3cd 6169 if (should_resched(0)) {
a18b5d01 6170 preempt_schedule_common();
1da177e4
LT
6171 return 1;
6172 }
f79c3ad6 6173 rcu_all_qs();
1da177e4
LT
6174 return 0;
6175}
02b67cc3 6176EXPORT_SYMBOL(_cond_resched);
35a773a0 6177#endif
1da177e4
LT
6178
6179/*
613afbf8 6180 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
6181 * call schedule, and on return reacquire the lock.
6182 *
c1a280b6 6183 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
1da177e4
LT
6184 * operations here to prevent schedule() from being called twice (once via
6185 * spin_unlock(), once by hand).
6186 */
613afbf8 6187int __cond_resched_lock(spinlock_t *lock)
1da177e4 6188{
fe32d3cd 6189 int resched = should_resched(PREEMPT_LOCK_OFFSET);
6df3cecb
JK
6190 int ret = 0;
6191
f607c668
PZ
6192 lockdep_assert_held(lock);
6193
4a81e832 6194 if (spin_needbreak(lock) || resched) {
1da177e4 6195 spin_unlock(lock);
d86ee480 6196 if (resched)
a18b5d01 6197 preempt_schedule_common();
95c354fe
NP
6198 else
6199 cpu_relax();
6df3cecb 6200 ret = 1;
1da177e4 6201 spin_lock(lock);
1da177e4 6202 }
6df3cecb 6203 return ret;
1da177e4 6204}
613afbf8 6205EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 6206
1da177e4
LT
6207/**
6208 * yield - yield the current processor to other threads.
6209 *
8e3fabfd
PZ
6210 * Do not ever use this function, there's a 99% chance you're doing it wrong.
6211 *
6212 * The scheduler is at all times free to pick the calling task as the most
6213 * eligible task to run, if removing the yield() call from your code breaks
6214 * it, its already broken.
6215 *
6216 * Typical broken usage is:
6217 *
6218 * while (!event)
d1ccc66d 6219 * yield();
8e3fabfd
PZ
6220 *
6221 * where one assumes that yield() will let 'the other' process run that will
6222 * make event true. If the current task is a SCHED_FIFO task that will never
6223 * happen. Never use yield() as a progress guarantee!!
6224 *
6225 * If you want to use yield() to wait for something, use wait_event().
6226 * If you want to use yield() to be 'nice' for others, use cond_resched().
6227 * If you still want to use yield(), do not!
1da177e4
LT
6228 */
6229void __sched yield(void)
6230{
6231 set_current_state(TASK_RUNNING);
7d4dd4f1 6232 do_sched_yield();
1da177e4 6233}
1da177e4
LT
6234EXPORT_SYMBOL(yield);
6235
d95f4122
MG
6236/**
6237 * yield_to - yield the current processor to another thread in
6238 * your thread group, or accelerate that thread toward the
6239 * processor it's on.
16addf95
RD
6240 * @p: target task
6241 * @preempt: whether task preemption is allowed or not
d95f4122
MG
6242 *
6243 * It's the caller's job to ensure that the target task struct
6244 * can't go away on us before we can do any checks.
6245 *
e69f6186 6246 * Return:
7b270f60
PZ
6247 * true (>0) if we indeed boosted the target task.
6248 * false (0) if we failed to boost the target.
6249 * -ESRCH if there's no task to yield to.
d95f4122 6250 */
fa93384f 6251int __sched yield_to(struct task_struct *p, bool preempt)
d95f4122
MG
6252{
6253 struct task_struct *curr = current;
6254 struct rq *rq, *p_rq;
6255 unsigned long flags;
c3c18640 6256 int yielded = 0;
d95f4122
MG
6257
6258 local_irq_save(flags);
6259 rq = this_rq();
6260
6261again:
6262 p_rq = task_rq(p);
7b270f60
PZ
6263 /*
6264 * If we're the only runnable task on the rq and target rq also
6265 * has only one task, there's absolutely no point in yielding.
6266 */
6267 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
6268 yielded = -ESRCH;
6269 goto out_irq;
6270 }
6271
d95f4122 6272 double_rq_lock(rq, p_rq);
39e24d8f 6273 if (task_rq(p) != p_rq) {
d95f4122
MG
6274 double_rq_unlock(rq, p_rq);
6275 goto again;
6276 }
6277
6278 if (!curr->sched_class->yield_to_task)
7b270f60 6279 goto out_unlock;
d95f4122
MG
6280
6281 if (curr->sched_class != p->sched_class)
7b270f60 6282 goto out_unlock;
d95f4122
MG
6283
6284 if (task_running(p_rq, p) || p->state)
7b270f60 6285 goto out_unlock;
d95f4122 6286
0900acf2 6287 yielded = curr->sched_class->yield_to_task(rq, p);
6d1cafd8 6288 if (yielded) {
ae92882e 6289 schedstat_inc(rq->yld_count);
6d1cafd8
VP
6290 /*
6291 * Make p's CPU reschedule; pick_next_entity takes care of
6292 * fairness.
6293 */
6294 if (preempt && rq != p_rq)
8875125e 6295 resched_curr(p_rq);
6d1cafd8 6296 }
d95f4122 6297
7b270f60 6298out_unlock:
d95f4122 6299 double_rq_unlock(rq, p_rq);
7b270f60 6300out_irq:
d95f4122
MG
6301 local_irq_restore(flags);
6302
7b270f60 6303 if (yielded > 0)
d95f4122
MG
6304 schedule();
6305
6306 return yielded;
6307}
6308EXPORT_SYMBOL_GPL(yield_to);
6309
10ab5643
TH
6310int io_schedule_prepare(void)
6311{
6312 int old_iowait = current->in_iowait;
6313
6314 current->in_iowait = 1;
6315 blk_schedule_flush_plug(current);
6316
6317 return old_iowait;
6318}
6319
6320void io_schedule_finish(int token)
6321{
6322 current->in_iowait = token;
6323}
6324
1da177e4 6325/*
41a2d6cf 6326 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 6327 * that process accounting knows that this is a task in IO wait state.
1da177e4 6328 */
1da177e4
LT
6329long __sched io_schedule_timeout(long timeout)
6330{
10ab5643 6331 int token;
1da177e4
LT
6332 long ret;
6333
10ab5643 6334 token = io_schedule_prepare();
1da177e4 6335 ret = schedule_timeout(timeout);
10ab5643 6336 io_schedule_finish(token);
9cff8ade 6337
1da177e4
LT
6338 return ret;
6339}
9cff8ade 6340EXPORT_SYMBOL(io_schedule_timeout);
1da177e4 6341
e3b929b0 6342void __sched io_schedule(void)
10ab5643
TH
6343{
6344 int token;
6345
6346 token = io_schedule_prepare();
6347 schedule();
6348 io_schedule_finish(token);
6349}
6350EXPORT_SYMBOL(io_schedule);
6351
1da177e4
LT
6352/**
6353 * sys_sched_get_priority_max - return maximum RT priority.
6354 * @policy: scheduling class.
6355 *
e69f6186
YB
6356 * Return: On success, this syscall returns the maximum
6357 * rt_priority that can be used by a given scheduling class.
6358 * On failure, a negative error code is returned.
1da177e4 6359 */
5add95d4 6360SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
6361{
6362 int ret = -EINVAL;
6363
6364 switch (policy) {
6365 case SCHED_FIFO:
6366 case SCHED_RR:
6367 ret = MAX_USER_RT_PRIO-1;
6368 break;
aab03e05 6369 case SCHED_DEADLINE:
1da177e4 6370 case SCHED_NORMAL:
b0a9499c 6371 case SCHED_BATCH:
dd41f596 6372 case SCHED_IDLE:
1da177e4
LT
6373 ret = 0;
6374 break;
6375 }
6376 return ret;
6377}
6378
6379/**
6380 * sys_sched_get_priority_min - return minimum RT priority.
6381 * @policy: scheduling class.
6382 *
e69f6186
YB
6383 * Return: On success, this syscall returns the minimum
6384 * rt_priority that can be used by a given scheduling class.
6385 * On failure, a negative error code is returned.
1da177e4 6386 */
5add95d4 6387SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
6388{
6389 int ret = -EINVAL;
6390
6391 switch (policy) {
6392 case SCHED_FIFO:
6393 case SCHED_RR:
6394 ret = 1;
6395 break;
aab03e05 6396 case SCHED_DEADLINE:
1da177e4 6397 case SCHED_NORMAL:
b0a9499c 6398 case SCHED_BATCH:
dd41f596 6399 case SCHED_IDLE:
1da177e4
LT
6400 ret = 0;
6401 }
6402 return ret;
6403}
6404
abca5fc5 6405static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1da177e4 6406{
36c8b586 6407 struct task_struct *p;
a4ec24b4 6408 unsigned int time_slice;
eb580751 6409 struct rq_flags rf;
dba091b9 6410 struct rq *rq;
3a5c359a 6411 int retval;
1da177e4
LT
6412
6413 if (pid < 0)
3a5c359a 6414 return -EINVAL;
1da177e4
LT
6415
6416 retval = -ESRCH;
1a551ae7 6417 rcu_read_lock();
1da177e4
LT
6418 p = find_process_by_pid(pid);
6419 if (!p)
6420 goto out_unlock;
6421
6422 retval = security_task_getscheduler(p);
6423 if (retval)
6424 goto out_unlock;
6425
eb580751 6426 rq = task_rq_lock(p, &rf);
a57beec5
PZ
6427 time_slice = 0;
6428 if (p->sched_class->get_rr_interval)
6429 time_slice = p->sched_class->get_rr_interval(rq, p);
eb580751 6430 task_rq_unlock(rq, p, &rf);
a4ec24b4 6431
1a551ae7 6432 rcu_read_unlock();
abca5fc5
AV
6433 jiffies_to_timespec64(time_slice, t);
6434 return 0;
3a5c359a 6435
1da177e4 6436out_unlock:
1a551ae7 6437 rcu_read_unlock();
1da177e4
LT
6438 return retval;
6439}
6440
2064a5ab
RD
6441/**
6442 * sys_sched_rr_get_interval - return the default timeslice of a process.
6443 * @pid: pid of the process.
6444 * @interval: userspace pointer to the timeslice value.
6445 *
6446 * this syscall writes the default timeslice value of a given process
6447 * into the user-space timespec buffer. A value of '0' means infinity.
6448 *
6449 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
6450 * an error code.
6451 */
abca5fc5 6452SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
474b9c77 6453 struct __kernel_timespec __user *, interval)
abca5fc5
AV
6454{
6455 struct timespec64 t;
6456 int retval = sched_rr_get_interval(pid, &t);
6457
6458 if (retval == 0)
6459 retval = put_timespec64(&t, interval);
6460
6461 return retval;
6462}
6463
474b9c77 6464#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724
AB
6465SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
6466 struct old_timespec32 __user *, interval)
abca5fc5
AV
6467{
6468 struct timespec64 t;
6469 int retval = sched_rr_get_interval(pid, &t);
6470
6471 if (retval == 0)
9afc5eee 6472 retval = put_old_timespec32(&t, interval);
abca5fc5
AV
6473 return retval;
6474}
6475#endif
6476
82a1fcb9 6477void sched_show_task(struct task_struct *p)
1da177e4 6478{
1da177e4 6479 unsigned long free = 0;
4e79752c 6480 int ppid;
c930b2c0 6481
38200502
TH
6482 if (!try_get_task_stack(p))
6483 return;
20435d84 6484
cc172ff3 6485 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
20435d84
XX
6486
6487 if (p->state == TASK_RUNNING)
cc172ff3 6488 pr_cont(" running task ");
1da177e4 6489#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 6490 free = stack_not_used(p);
1da177e4 6491#endif
a90e984c 6492 ppid = 0;
4e79752c 6493 rcu_read_lock();
a90e984c
ON
6494 if (pid_alive(p))
6495 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 6496 rcu_read_unlock();
cc172ff3
LZ
6497 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
6498 free, task_pid_nr(p), ppid,
aa47b7e0 6499 (unsigned long)task_thread_info(p)->flags);
1da177e4 6500
3d1cb205 6501 print_worker_info(KERN_INFO, p);
a8b62fd0 6502 print_stop_info(KERN_INFO, p);
9cb8f069 6503 show_stack(p, NULL, KERN_INFO);
38200502 6504 put_task_stack(p);
1da177e4 6505}
0032f4e8 6506EXPORT_SYMBOL_GPL(sched_show_task);
1da177e4 6507
5d68cc95
PZ
6508static inline bool
6509state_filter_match(unsigned long state_filter, struct task_struct *p)
6510{
6511 /* no filter, everything matches */
6512 if (!state_filter)
6513 return true;
6514
6515 /* filter, but doesn't match */
6516 if (!(p->state & state_filter))
6517 return false;
6518
6519 /*
6520 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
6521 * TASK_KILLABLE).
6522 */
6523 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
6524 return false;
6525
6526 return true;
6527}
6528
6529
e59e2ae2 6530void show_state_filter(unsigned long state_filter)
1da177e4 6531{
36c8b586 6532 struct task_struct *g, *p;
1da177e4 6533
510f5acc 6534 rcu_read_lock();
5d07f420 6535 for_each_process_thread(g, p) {
1da177e4
LT
6536 /*
6537 * reset the NMI-timeout, listing all files on a slow
25985edc 6538 * console might take a lot of time:
57675cb9
AR
6539 * Also, reset softlockup watchdogs on all CPUs, because
6540 * another CPU might be blocked waiting for us to process
6541 * an IPI.
1da177e4
LT
6542 */
6543 touch_nmi_watchdog();
57675cb9 6544 touch_all_softlockup_watchdogs();
5d68cc95 6545 if (state_filter_match(state_filter, p))
82a1fcb9 6546 sched_show_task(p);
5d07f420 6547 }
1da177e4 6548
dd41f596 6549#ifdef CONFIG_SCHED_DEBUG
fb90a6e9
RV
6550 if (!state_filter)
6551 sysrq_sched_debug_show();
dd41f596 6552#endif
510f5acc 6553 rcu_read_unlock();
e59e2ae2
IM
6554 /*
6555 * Only show locks if all tasks are dumped:
6556 */
93335a21 6557 if (!state_filter)
e59e2ae2 6558 debug_show_all_locks();
1da177e4
LT
6559}
6560
f340c0d1
IM
6561/**
6562 * init_idle - set up an idle thread for a given CPU
6563 * @idle: task in question
d1ccc66d 6564 * @cpu: CPU the idle task belongs to
f340c0d1
IM
6565 *
6566 * NOTE: this function does not set the idle thread's NEED_RESCHED
6567 * flag, to make booting more robust.
6568 */
0db0628d 6569void init_idle(struct task_struct *idle, int cpu)
1da177e4 6570{
70b97a7f 6571 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
6572 unsigned long flags;
6573
ff51ff84
PZ
6574 __sched_fork(0, idle);
6575
25834c73
PZ
6576 raw_spin_lock_irqsave(&idle->pi_lock, flags);
6577 raw_spin_lock(&rq->lock);
5cbd54ef 6578
06b83b5f 6579 idle->state = TASK_RUNNING;
dd41f596 6580 idle->se.exec_start = sched_clock();
c1de45ca 6581 idle->flags |= PF_IDLE;
dd41f596 6582
d08b9f0c 6583 scs_task_reset(idle);
e1b77c92
MR
6584 kasan_unpoison_task_stack(idle);
6585
de9b8f5d
PZ
6586#ifdef CONFIG_SMP
6587 /*
6588 * Its possible that init_idle() gets called multiple times on a task,
6589 * in that case do_set_cpus_allowed() will not do the right thing.
6590 *
6591 * And since this is boot we can forgo the serialization.
6592 */
6593 set_cpus_allowed_common(idle, cpumask_of(cpu));
6594#endif
6506cf6c
PZ
6595 /*
6596 * We're having a chicken and egg problem, even though we are
d1ccc66d 6597 * holding rq->lock, the CPU isn't yet set to this CPU so the
6506cf6c
PZ
6598 * lockdep check in task_group() will fail.
6599 *
6600 * Similar case to sched_fork(). / Alternatively we could
6601 * use task_rq_lock() here and obtain the other rq->lock.
6602 *
6603 * Silence PROVE_RCU
6604 */
6605 rcu_read_lock();
dd41f596 6606 __set_task_cpu(idle, cpu);
6506cf6c 6607 rcu_read_unlock();
1da177e4 6608
5311a98f
EB
6609 rq->idle = idle;
6610 rcu_assign_pointer(rq->curr, idle);
da0c1e65 6611 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 6612#ifdef CONFIG_SMP
3ca7a440 6613 idle->on_cpu = 1;
4866cde0 6614#endif
25834c73
PZ
6615 raw_spin_unlock(&rq->lock);
6616 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
6617
6618 /* Set the preempt count _outside_ the spinlocks! */
01028747 6619 init_idle_preempt_count(idle, cpu);
55cd5340 6620
dd41f596
IM
6621 /*
6622 * The idle tasks have their own, simple scheduling class:
6623 */
6624 idle->sched_class = &idle_sched_class;
868baf07 6625 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 6626 vtime_init_idle(idle, cpu);
de9b8f5d 6627#ifdef CONFIG_SMP
f1c6f1a7
CE
6628 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
6629#endif
19978ca6
IM
6630}
6631
e1d4eeec
NP
6632#ifdef CONFIG_SMP
6633
f82f8042
JL
6634int cpuset_cpumask_can_shrink(const struct cpumask *cur,
6635 const struct cpumask *trial)
6636{
06a76fe0 6637 int ret = 1;
f82f8042 6638
bb2bc55a
MG
6639 if (!cpumask_weight(cur))
6640 return ret;
6641
06a76fe0 6642 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
f82f8042
JL
6643
6644 return ret;
6645}
6646
7f51412a
JL
6647int task_can_attach(struct task_struct *p,
6648 const struct cpumask *cs_cpus_allowed)
6649{
6650 int ret = 0;
6651
6652 /*
6653 * Kthreads which disallow setaffinity shouldn't be moved
d1ccc66d 6654 * to a new cpuset; we don't want to change their CPU
7f51412a
JL
6655 * affinity and isolating such threads by their set of
6656 * allowed nodes is unnecessary. Thus, cpusets are not
6657 * applicable for such threads. This prevents checking for
6658 * success of set_cpus_allowed_ptr() on all attached tasks
3bd37062 6659 * before cpus_mask may be changed.
7f51412a
JL
6660 */
6661 if (p->flags & PF_NO_SETAFFINITY) {
6662 ret = -EINVAL;
6663 goto out;
6664 }
6665
7f51412a 6666 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
06a76fe0
NP
6667 cs_cpus_allowed))
6668 ret = dl_task_can_attach(p, cs_cpus_allowed);
7f51412a 6669
7f51412a
JL
6670out:
6671 return ret;
6672}
6673
f2cb1360 6674bool sched_smp_initialized __read_mostly;
e26fbffd 6675
e6628d5b
MG
6676#ifdef CONFIG_NUMA_BALANCING
6677/* Migrate current task p to target_cpu */
6678int migrate_task_to(struct task_struct *p, int target_cpu)
6679{
6680 struct migration_arg arg = { p, target_cpu };
6681 int curr_cpu = task_cpu(p);
6682
6683 if (curr_cpu == target_cpu)
6684 return 0;
6685
3bd37062 6686 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
e6628d5b
MG
6687 return -EINVAL;
6688
6689 /* TODO: This is not properly updating schedstats */
6690
286549dc 6691 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
6692 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
6693}
0ec8aa00
PZ
6694
6695/*
6696 * Requeue a task on a given node and accurately track the number of NUMA
6697 * tasks on the runqueues
6698 */
6699void sched_setnuma(struct task_struct *p, int nid)
6700{
da0c1e65 6701 bool queued, running;
eb580751
PZ
6702 struct rq_flags rf;
6703 struct rq *rq;
0ec8aa00 6704
eb580751 6705 rq = task_rq_lock(p, &rf);
da0c1e65 6706 queued = task_on_rq_queued(p);
0ec8aa00
PZ
6707 running = task_current(rq, p);
6708
da0c1e65 6709 if (queued)
1de64443 6710 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 6711 if (running)
f3cd1c4e 6712 put_prev_task(rq, p);
0ec8aa00
PZ
6713
6714 p->numa_preferred_nid = nid;
0ec8aa00 6715
da0c1e65 6716 if (queued)
7134b3e9 6717 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 6718 if (running)
03b7fad1 6719 set_next_task(rq, p);
eb580751 6720 task_rq_unlock(rq, p, &rf);
0ec8aa00 6721}
5cc389bc 6722#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 6723
1da177e4 6724#ifdef CONFIG_HOTPLUG_CPU
054b9108 6725/*
d1ccc66d 6726 * Ensure that the idle task is using init_mm right before its CPU goes
48c5ccae 6727 * offline.
054b9108 6728 */
48c5ccae 6729void idle_task_exit(void)
1da177e4 6730{
48c5ccae 6731 struct mm_struct *mm = current->active_mm;
e76bd8d9 6732
48c5ccae 6733 BUG_ON(cpu_online(smp_processor_id()));
bf2c59fc 6734 BUG_ON(current != this_rq()->idle);
e76bd8d9 6735
a53efe5f 6736 if (mm != &init_mm) {
252d2a41 6737 switch_mm(mm, &init_mm, current);
a53efe5f
MS
6738 finish_arch_post_lock_switch();
6739 }
bf2c59fc
PZ
6740
6741 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
1da177e4
LT
6742}
6743
6744/*
5d180232
PZ
6745 * Since this CPU is going 'away' for a while, fold any nr_active delta
6746 * we might have. Assumes we're called after migrate_tasks() so that the
d60585c5
TG
6747 * nr_active count is stable. We need to take the teardown thread which
6748 * is calling this into account, so we hand in adjust = 1 to the load
6749 * calculation.
5d180232
PZ
6750 *
6751 * Also see the comment "Global load-average calculations".
1da177e4 6752 */
5d180232 6753static void calc_load_migrate(struct rq *rq)
1da177e4 6754{
d60585c5 6755 long delta = calc_load_fold_active(rq, 1);
5d180232
PZ
6756 if (delta)
6757 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
6758}
6759
10e7071b 6760static struct task_struct *__pick_migrate_task(struct rq *rq)
3f1d2a31 6761{
10e7071b
PZ
6762 const struct sched_class *class;
6763 struct task_struct *next;
3f1d2a31 6764
10e7071b 6765 for_each_class(class) {
98c2f700 6766 next = class->pick_next_task(rq);
10e7071b 6767 if (next) {
6e2df058 6768 next->sched_class->put_prev_task(rq, next);
10e7071b
PZ
6769 return next;
6770 }
6771 }
3f1d2a31 6772
10e7071b
PZ
6773 /* The idle class should always have a runnable task */
6774 BUG();
6775}
3f1d2a31 6776
48f24c4d 6777/*
48c5ccae
PZ
6778 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6779 * try_to_wake_up()->select_task_rq().
6780 *
6781 * Called with rq->lock held even though we'er in stop_machine() and
6782 * there's no concurrency possible, we hold the required locks anyway
6783 * because of lock validation efforts.
1da177e4 6784 */
8a8c69c3 6785static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
1da177e4 6786{
5e16bbc2 6787 struct rq *rq = dead_rq;
48c5ccae 6788 struct task_struct *next, *stop = rq->stop;
8a8c69c3 6789 struct rq_flags orf = *rf;
48c5ccae 6790 int dest_cpu;
1da177e4
LT
6791
6792 /*
48c5ccae
PZ
6793 * Fudge the rq selection such that the below task selection loop
6794 * doesn't get stuck on the currently eligible stop task.
6795 *
6796 * We're currently inside stop_machine() and the rq is either stuck
6797 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6798 * either way we should never end up calling schedule() until we're
6799 * done here.
1da177e4 6800 */
48c5ccae 6801 rq->stop = NULL;
48f24c4d 6802
77bd3970
FW
6803 /*
6804 * put_prev_task() and pick_next_task() sched
6805 * class method both need to have an up-to-date
6806 * value of rq->clock[_task]
6807 */
6808 update_rq_clock(rq);
6809
5e16bbc2 6810 for (;;) {
48c5ccae
PZ
6811 /*
6812 * There's this thread running, bail when that's the only
d1ccc66d 6813 * remaining thread:
48c5ccae
PZ
6814 */
6815 if (rq->nr_running == 1)
dd41f596 6816 break;
48c5ccae 6817
10e7071b 6818 next = __pick_migrate_task(rq);
e692ab53 6819
5473e0cc 6820 /*
3bd37062 6821 * Rules for changing task_struct::cpus_mask are holding
5473e0cc
WL
6822 * both pi_lock and rq->lock, such that holding either
6823 * stabilizes the mask.
6824 *
6825 * Drop rq->lock is not quite as disastrous as it usually is
6826 * because !cpu_active at this point, which means load-balance
6827 * will not interfere. Also, stop-machine.
6828 */
8a8c69c3 6829 rq_unlock(rq, rf);
5473e0cc 6830 raw_spin_lock(&next->pi_lock);
8a8c69c3 6831 rq_relock(rq, rf);
5473e0cc
WL
6832
6833 /*
6834 * Since we're inside stop-machine, _nothing_ should have
6835 * changed the task, WARN if weird stuff happened, because in
6836 * that case the above rq->lock drop is a fail too.
6837 */
6838 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
6839 raw_spin_unlock(&next->pi_lock);
6840 continue;
6841 }
6842
48c5ccae 6843 /* Find suitable destination for @next, with force if needed. */
5e16bbc2 6844 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
8a8c69c3 6845 rq = __migrate_task(rq, rf, next, dest_cpu);
5e16bbc2 6846 if (rq != dead_rq) {
8a8c69c3 6847 rq_unlock(rq, rf);
5e16bbc2 6848 rq = dead_rq;
8a8c69c3
PZ
6849 *rf = orf;
6850 rq_relock(rq, rf);
5e16bbc2 6851 }
5473e0cc 6852 raw_spin_unlock(&next->pi_lock);
1da177e4 6853 }
dce48a84 6854
48c5ccae 6855 rq->stop = stop;
dce48a84 6856}
2558aacf
PZ
6857
6858static int __balance_push_cpu_stop(void *arg)
6859{
6860 struct task_struct *p = arg;
6861 struct rq *rq = this_rq();
6862 struct rq_flags rf;
6863 int cpu;
6864
6865 raw_spin_lock_irq(&p->pi_lock);
6866 rq_lock(rq, &rf);
6867
6868 update_rq_clock(rq);
6869
6870 if (task_rq(p) == rq && task_on_rq_queued(p)) {
6871 cpu = select_fallback_rq(rq->cpu, p);
6872 rq = __migrate_task(rq, &rf, p, cpu);
6873 }
6874
6875 rq_unlock(rq, &rf);
6876 raw_spin_unlock_irq(&p->pi_lock);
6877
6878 put_task_struct(p);
6879
6880 return 0;
6881}
6882
6883static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
6884
6885/*
6886 * Ensure we only run per-cpu kthreads once the CPU goes !active.
6887 */
6888static void balance_push(struct rq *rq)
6889{
6890 struct task_struct *push_task = rq->curr;
6891
6892 lockdep_assert_held(&rq->lock);
6893 SCHED_WARN_ON(rq->cpu != smp_processor_id());
6894
6895 /*
6896 * Both the cpu-hotplug and stop task are in this case and are
6897 * required to complete the hotplug process.
6898 */
6899 if (is_per_cpu_kthread(push_task))
6900 return;
6901
6902 get_task_struct(push_task);
6903 /*
6904 * Temporarily drop rq->lock such that we can wake-up the stop task.
6905 * Both preemption and IRQs are still disabled.
6906 */
6907 raw_spin_unlock(&rq->lock);
6908 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
6909 this_cpu_ptr(&push_work));
6910 /*
6911 * At this point need_resched() is true and we'll take the loop in
6912 * schedule(). The next pick is obviously going to be the stop task
6913 * which is_per_cpu_kthread() and will push this task away.
6914 */
6915 raw_spin_lock(&rq->lock);
6916}
6917
6918static void balance_push_set(int cpu, bool on)
6919{
6920 struct rq *rq = cpu_rq(cpu);
6921 struct rq_flags rf;
6922
6923 rq_lock_irqsave(rq, &rf);
6924 if (on)
6925 rq->balance_flags |= BALANCE_PUSH;
6926 else
6927 rq->balance_flags &= ~BALANCE_PUSH;
6928 rq_unlock_irqrestore(rq, &rf);
6929}
6930
6931#else
6932
6933static inline void balance_push(struct rq *rq)
6934{
6935}
6936
6937static inline void balance_push_set(int cpu, bool on)
6938{
6939}
6940
1da177e4
LT
6941#endif /* CONFIG_HOTPLUG_CPU */
6942
f2cb1360 6943void set_rq_online(struct rq *rq)
1f11eb6a
GH
6944{
6945 if (!rq->online) {
6946 const struct sched_class *class;
6947
c6c4927b 6948 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6949 rq->online = 1;
6950
6951 for_each_class(class) {
6952 if (class->rq_online)
6953 class->rq_online(rq);
6954 }
6955 }
6956}
6957
f2cb1360 6958void set_rq_offline(struct rq *rq)
1f11eb6a
GH
6959{
6960 if (rq->online) {
6961 const struct sched_class *class;
6962
6963 for_each_class(class) {
6964 if (class->rq_offline)
6965 class->rq_offline(rq);
6966 }
6967
c6c4927b 6968 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6969 rq->online = 0;
6970 }
6971}
6972
d1ccc66d
IM
6973/*
6974 * used to mark begin/end of suspend/resume:
6975 */
6976static int num_cpus_frozen;
d35be8ba 6977
1da177e4 6978/*
3a101d05
TH
6979 * Update cpusets according to cpu_active mask. If cpusets are
6980 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6981 * around partition_sched_domains().
d35be8ba
SB
6982 *
6983 * If we come here as part of a suspend/resume, don't touch cpusets because we
6984 * want to restore it back to its original state upon resume anyway.
1da177e4 6985 */
40190a78 6986static void cpuset_cpu_active(void)
e761b772 6987{
40190a78 6988 if (cpuhp_tasks_frozen) {
d35be8ba
SB
6989 /*
6990 * num_cpus_frozen tracks how many CPUs are involved in suspend
6991 * resume sequence. As long as this is not the last online
6992 * operation in the resume sequence, just build a single sched
6993 * domain, ignoring cpusets.
6994 */
50e76632
PZ
6995 partition_sched_domains(1, NULL, NULL);
6996 if (--num_cpus_frozen)
135fb3e1 6997 return;
d35be8ba
SB
6998 /*
6999 * This is the last CPU online operation. So fall through and
7000 * restore the original sched domains by considering the
7001 * cpuset configurations.
7002 */
50e76632 7003 cpuset_force_rebuild();
3a101d05 7004 }
30e03acd 7005 cpuset_update_active_cpus();
3a101d05 7006}
e761b772 7007
40190a78 7008static int cpuset_cpu_inactive(unsigned int cpu)
3a101d05 7009{
40190a78 7010 if (!cpuhp_tasks_frozen) {
06a76fe0 7011 if (dl_cpu_busy(cpu))
135fb3e1 7012 return -EBUSY;
30e03acd 7013 cpuset_update_active_cpus();
135fb3e1 7014 } else {
d35be8ba
SB
7015 num_cpus_frozen++;
7016 partition_sched_domains(1, NULL, NULL);
e761b772 7017 }
135fb3e1 7018 return 0;
e761b772 7019}
e761b772 7020
40190a78 7021int sched_cpu_activate(unsigned int cpu)
135fb3e1 7022{
7d976699 7023 struct rq *rq = cpu_rq(cpu);
8a8c69c3 7024 struct rq_flags rf;
7d976699 7025
2558aacf
PZ
7026 balance_push_set(cpu, false);
7027
ba2591a5
PZ
7028#ifdef CONFIG_SCHED_SMT
7029 /*
c5511d03 7030 * When going up, increment the number of cores with SMT present.
ba2591a5 7031 */
c5511d03
PZI
7032 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7033 static_branch_inc_cpuslocked(&sched_smt_present);
ba2591a5 7034#endif
40190a78 7035 set_cpu_active(cpu, true);
135fb3e1 7036
40190a78 7037 if (sched_smp_initialized) {
135fb3e1 7038 sched_domains_numa_masks_set(cpu);
40190a78 7039 cpuset_cpu_active();
e761b772 7040 }
7d976699
TG
7041
7042 /*
7043 * Put the rq online, if not already. This happens:
7044 *
7045 * 1) In the early boot process, because we build the real domains
d1ccc66d 7046 * after all CPUs have been brought up.
7d976699
TG
7047 *
7048 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7049 * domains.
7050 */
8a8c69c3 7051 rq_lock_irqsave(rq, &rf);
7d976699
TG
7052 if (rq->rd) {
7053 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7054 set_rq_online(rq);
7055 }
8a8c69c3 7056 rq_unlock_irqrestore(rq, &rf);
7d976699 7057
40190a78 7058 return 0;
135fb3e1
TG
7059}
7060
40190a78 7061int sched_cpu_deactivate(unsigned int cpu)
135fb3e1 7062{
135fb3e1
TG
7063 int ret;
7064
40190a78 7065 set_cpu_active(cpu, false);
b2454caa
PZ
7066 /*
7067 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
7068 * users of this state to go away such that all new such users will
7069 * observe it.
7070 *
b2454caa
PZ
7071 * Do sync before park smpboot threads to take care the rcu boost case.
7072 */
309ba859 7073 synchronize_rcu();
40190a78 7074
2558aacf
PZ
7075 balance_push_set(cpu, true);
7076
c5511d03
PZI
7077#ifdef CONFIG_SCHED_SMT
7078 /*
7079 * When going down, decrement the number of cores with SMT present.
7080 */
7081 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7082 static_branch_dec_cpuslocked(&sched_smt_present);
7083#endif
7084
40190a78
TG
7085 if (!sched_smp_initialized)
7086 return 0;
7087
7088 ret = cpuset_cpu_inactive(cpu);
7089 if (ret) {
2558aacf 7090 balance_push_set(cpu, false);
40190a78
TG
7091 set_cpu_active(cpu, true);
7092 return ret;
135fb3e1 7093 }
40190a78
TG
7094 sched_domains_numa_masks_clear(cpu);
7095 return 0;
135fb3e1
TG
7096}
7097
94baf7a5
TG
7098static void sched_rq_cpu_starting(unsigned int cpu)
7099{
7100 struct rq *rq = cpu_rq(cpu);
7101
7102 rq->calc_load_update = calc_load_update;
94baf7a5
TG
7103 update_max_interval();
7104}
7105
135fb3e1
TG
7106int sched_cpu_starting(unsigned int cpu)
7107{
94baf7a5 7108 sched_rq_cpu_starting(cpu);
d84b3131 7109 sched_tick_start(cpu);
135fb3e1 7110 return 0;
e761b772 7111}
e761b772 7112
f2785ddb
TG
7113#ifdef CONFIG_HOTPLUG_CPU
7114int sched_cpu_dying(unsigned int cpu)
7115{
7116 struct rq *rq = cpu_rq(cpu);
8a8c69c3 7117 struct rq_flags rf;
f2785ddb
TG
7118
7119 /* Handle pending wakeups and then migrate everything off */
d84b3131 7120 sched_tick_stop(cpu);
8a8c69c3
PZ
7121
7122 rq_lock_irqsave(rq, &rf);
f2785ddb
TG
7123 if (rq->rd) {
7124 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7125 set_rq_offline(rq);
7126 }
8a8c69c3 7127 migrate_tasks(rq, &rf);
f2785ddb 7128 BUG_ON(rq->nr_running != 1);
8a8c69c3
PZ
7129 rq_unlock_irqrestore(rq, &rf);
7130
f2785ddb
TG
7131 calc_load_migrate(rq);
7132 update_max_interval();
00357f5e 7133 nohz_balance_exit_idle(rq);
e5ef27d0 7134 hrtick_clear(rq);
f2785ddb
TG
7135 return 0;
7136}
7137#endif
7138
1da177e4
LT
7139void __init sched_init_smp(void)
7140{
cb83b629
PZ
7141 sched_init_numa();
7142
6acce3ef
PZ
7143 /*
7144 * There's no userspace yet to cause hotplug operations; hence all the
d1ccc66d 7145 * CPU masks are stable and all blatant races in the below code cannot
b5a4e2bb 7146 * happen.
6acce3ef 7147 */
712555ee 7148 mutex_lock(&sched_domains_mutex);
8d5dc512 7149 sched_init_domains(cpu_active_mask);
712555ee 7150 mutex_unlock(&sched_domains_mutex);
e761b772 7151
5c1e1767 7152 /* Move init over to a non-isolated CPU */
edb93821 7153 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
5c1e1767 7154 BUG();
19978ca6 7155 sched_init_granularity();
4212823f 7156
0e3900e6 7157 init_sched_rt_class();
1baca4ce 7158 init_sched_dl_class();
1b568f0a 7159
e26fbffd 7160 sched_smp_initialized = true;
1da177e4 7161}
e26fbffd
TG
7162
7163static int __init migration_init(void)
7164{
77a5352b 7165 sched_cpu_starting(smp_processor_id());
e26fbffd 7166 return 0;
1da177e4 7167}
e26fbffd
TG
7168early_initcall(migration_init);
7169
1da177e4
LT
7170#else
7171void __init sched_init_smp(void)
7172{
19978ca6 7173 sched_init_granularity();
1da177e4
LT
7174}
7175#endif /* CONFIG_SMP */
7176
7177int in_sched_functions(unsigned long addr)
7178{
1da177e4
LT
7179 return in_lock_functions(addr) ||
7180 (addr >= (unsigned long)__sched_text_start
7181 && addr < (unsigned long)__sched_text_end);
7182}
7183
029632fb 7184#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7185/*
7186 * Default task group.
7187 * Every task in system belongs to this group at bootup.
7188 */
029632fb 7189struct task_group root_task_group;
35cf4e50 7190LIST_HEAD(task_groups);
b0367629
WL
7191
7192/* Cacheline aligned slab cache for task_group */
7193static struct kmem_cache *task_group_cache __read_mostly;
052f1dc7 7194#endif
6f505b16 7195
e6252c3e 7196DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
10e2f1ac 7197DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
6f505b16 7198
1da177e4
LT
7199void __init sched_init(void)
7200{
a1dc0446 7201 unsigned long ptr = 0;
55627e3c 7202 int i;
434d53b0 7203
c3a340f7
SRV
7204 /* Make sure the linker didn't screw up */
7205 BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
7206 &fair_sched_class + 1 != &rt_sched_class ||
7207 &rt_sched_class + 1 != &dl_sched_class);
7208#ifdef CONFIG_SMP
7209 BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
7210#endif
7211
5822a454 7212 wait_bit_init();
9dcb8b68 7213
434d53b0 7214#ifdef CONFIG_FAIR_GROUP_SCHED
a1dc0446 7215 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0
MT
7216#endif
7217#ifdef CONFIG_RT_GROUP_SCHED
a1dc0446 7218 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0 7219#endif
a1dc0446
QC
7220 if (ptr) {
7221 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
434d53b0
MT
7222
7223#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7224 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7225 ptr += nr_cpu_ids * sizeof(void **);
7226
07e06b01 7227 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7228 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7229
b1d1779e
WY
7230 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7231 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6d6bc0ad 7232#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7233#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7234 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7235 ptr += nr_cpu_ids * sizeof(void **);
7236
07e06b01 7237 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7238 ptr += nr_cpu_ids * sizeof(void **);
7239
6d6bc0ad 7240#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 7241 }
df7c8e84 7242#ifdef CONFIG_CPUMASK_OFFSTACK
b74e6278
AT
7243 for_each_possible_cpu(i) {
7244 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7245 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
10e2f1ac
PZ
7246 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
7247 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
434d53b0 7248 }
b74e6278 7249#endif /* CONFIG_CPUMASK_OFFSTACK */
dd41f596 7250
d1ccc66d
IM
7251 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
7252 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
332ac17e 7253
57d885fe
GH
7254#ifdef CONFIG_SMP
7255 init_defrootdomain();
7256#endif
7257
d0b27fa7 7258#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7259 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7260 global_rt_period(), global_rt_runtime());
6d6bc0ad 7261#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7262
7c941438 7263#ifdef CONFIG_CGROUP_SCHED
b0367629
WL
7264 task_group_cache = KMEM_CACHE(task_group, 0);
7265
07e06b01
YZ
7266 list_add(&root_task_group.list, &task_groups);
7267 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7268 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7269 autogroup_init(&init_task);
7c941438 7270#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7271
0a945022 7272 for_each_possible_cpu(i) {
70b97a7f 7273 struct rq *rq;
1da177e4
LT
7274
7275 rq = cpu_rq(i);
05fa785c 7276 raw_spin_lock_init(&rq->lock);
7897986b 7277 rq->nr_running = 0;
dce48a84
TG
7278 rq->calc_load_active = 0;
7279 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 7280 init_cfs_rq(&rq->cfs);
07c54f7a
AV
7281 init_rt_rq(&rq->rt);
7282 init_dl_rq(&rq->dl);
dd41f596 7283#ifdef CONFIG_FAIR_GROUP_SCHED
6f505b16 7284 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9c2791f9 7285 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
354d60c2 7286 /*
d1ccc66d 7287 * How much CPU bandwidth does root_task_group get?
354d60c2
DG
7288 *
7289 * In case of task-groups formed thr' the cgroup filesystem, it
d1ccc66d
IM
7290 * gets 100% of the CPU resources in the system. This overall
7291 * system CPU resource is divided among the tasks of
07e06b01 7292 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7293 * based on each entity's (task or task-group's) weight
7294 * (se->load.weight).
7295 *
07e06b01 7296 * In other words, if root_task_group has 10 tasks of weight
354d60c2 7297 * 1024) and two child groups A0 and A1 (of weight 1024 each),
d1ccc66d 7298 * then A0's share of the CPU resource is:
354d60c2 7299 *
0d905bca 7300 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7301 *
07e06b01
YZ
7302 * We achieve this by letting root_task_group's tasks sit
7303 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7304 */
07e06b01 7305 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7306#endif /* CONFIG_FAIR_GROUP_SCHED */
7307
7308 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7309#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7310 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7311#endif
1da177e4 7312#ifdef CONFIG_SMP
41c7ce9a 7313 rq->sd = NULL;
57d885fe 7314 rq->rd = NULL;
ca6d75e6 7315 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
e3fca9e7 7316 rq->balance_callback = NULL;
1da177e4 7317 rq->active_balance = 0;
dd41f596 7318 rq->next_balance = jiffies;
1da177e4 7319 rq->push_cpu = 0;
0a2966b4 7320 rq->cpu = i;
1f11eb6a 7321 rq->online = 0;
eae0c9df
MG
7322 rq->idle_stamp = 0;
7323 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 7324 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
7325
7326 INIT_LIST_HEAD(&rq->cfs_tasks);
7327
dc938520 7328 rq_attach_root(rq, &def_root_domain);
3451d024 7329#ifdef CONFIG_NO_HZ_COMMON
e022e0d3 7330 rq->last_blocked_load_update_tick = jiffies;
a22e47a4 7331 atomic_set(&rq->nohz_flags, 0);
90b5363a
PZI
7332
7333 rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
83cd4fe2 7334#endif
9fd81dd5 7335#endif /* CONFIG_SMP */
77a021be 7336 hrtick_rq_init(rq);
1da177e4 7337 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7338 }
7339
9059393e 7340 set_load_weight(&init_task, false);
b50f60ce 7341
1da177e4
LT
7342 /*
7343 * The boot idle thread does lazy MMU switching as well:
7344 */
f1f10076 7345 mmgrab(&init_mm);
1da177e4
LT
7346 enter_lazy_tlb(&init_mm, current);
7347
7348 /*
7349 * Make us the idle thread. Technically, schedule() should not be
7350 * called from this thread, however somewhere below it might be,
7351 * but because we are the idle thread, we just pick up running again
7352 * when this runqueue becomes "idle".
7353 */
7354 init_idle(current, smp_processor_id());
dce48a84
TG
7355
7356 calc_load_update = jiffies + LOAD_FREQ;
7357
bf4d83f6 7358#ifdef CONFIG_SMP
29d5e047 7359 idle_thread_set_boot_cpu();
029632fb
PZ
7360#endif
7361 init_sched_fair_class();
6a7b3dc3 7362
4698f88c
JP
7363 init_schedstats();
7364
eb414681
JW
7365 psi_init();
7366
69842cba
PB
7367 init_uclamp();
7368
6892b75e 7369 scheduler_running = 1;
1da177e4
LT
7370}
7371
d902db1e 7372#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7373static inline int preempt_count_equals(int preempt_offset)
7374{
da7142e2 7375 int nested = preempt_count() + rcu_preempt_depth();
e4aafea2 7376
4ba8216c 7377 return (nested == preempt_offset);
e4aafea2
FW
7378}
7379
d894837f 7380void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7381{
8eb23b9f
PZ
7382 /*
7383 * Blocking primitives will set (and therefore destroy) current->state,
7384 * since we will exit with TASK_RUNNING make sure we enter with it,
7385 * otherwise we will destroy state.
7386 */
00845eb9 7387 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
8eb23b9f
PZ
7388 "do not call blocking ops when !TASK_RUNNING; "
7389 "state=%lx set at [<%p>] %pS\n",
7390 current->state,
7391 (void *)current->task_state_change,
00845eb9 7392 (void *)current->task_state_change);
8eb23b9f 7393
3427445a
PZ
7394 ___might_sleep(file, line, preempt_offset);
7395}
7396EXPORT_SYMBOL(__might_sleep);
7397
7398void ___might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7399{
d1ccc66d
IM
7400 /* Ratelimiting timestamp: */
7401 static unsigned long prev_jiffy;
7402
d1c6d149 7403 unsigned long preempt_disable_ip;
1da177e4 7404
d1ccc66d
IM
7405 /* WARN_ON_ONCE() by default, no rate limit required: */
7406 rcu_sleep_check();
7407
db273be2 7408 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
312364f3 7409 !is_idle_task(current) && !current->non_block_count) ||
1c3c5eab
TG
7410 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
7411 oops_in_progress)
aef745fc 7412 return;
1c3c5eab 7413
aef745fc
IM
7414 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7415 return;
7416 prev_jiffy = jiffies;
7417
d1ccc66d 7418 /* Save this before calling printk(), since that will clobber it: */
d1c6d149
VN
7419 preempt_disable_ip = get_preempt_disable_ip(current);
7420
3df0fc5b
PZ
7421 printk(KERN_ERR
7422 "BUG: sleeping function called from invalid context at %s:%d\n",
7423 file, line);
7424 printk(KERN_ERR
312364f3
DV
7425 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
7426 in_atomic(), irqs_disabled(), current->non_block_count,
3df0fc5b 7427 current->pid, current->comm);
aef745fc 7428
a8b686b3
ES
7429 if (task_stack_end_corrupted(current))
7430 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7431
aef745fc
IM
7432 debug_show_held_locks(current);
7433 if (irqs_disabled())
7434 print_irqtrace_events(current);
d1c6d149
VN
7435 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
7436 && !preempt_count_equals(preempt_offset)) {
8f47b187 7437 pr_err("Preemption disabled at:");
2062a4e8 7438 print_ip_sym(KERN_ERR, preempt_disable_ip);
8f47b187 7439 }
aef745fc 7440 dump_stack();
f0b22e39 7441 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1da177e4 7442}
3427445a 7443EXPORT_SYMBOL(___might_sleep);
568f1967
PZ
7444
7445void __cant_sleep(const char *file, int line, int preempt_offset)
7446{
7447 static unsigned long prev_jiffy;
7448
7449 if (irqs_disabled())
7450 return;
7451
7452 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
7453 return;
7454
7455 if (preempt_count() > preempt_offset)
7456 return;
7457
7458 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7459 return;
7460 prev_jiffy = jiffies;
7461
7462 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
7463 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7464 in_atomic(), irqs_disabled(),
7465 current->pid, current->comm);
7466
7467 debug_show_held_locks(current);
7468 dump_stack();
7469 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
7470}
7471EXPORT_SYMBOL_GPL(__cant_sleep);
1da177e4
LT
7472#endif
7473
7474#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 7475void normalize_rt_tasks(void)
3a5e4dc1 7476{
dbc7f069 7477 struct task_struct *g, *p;
d50dde5a
DF
7478 struct sched_attr attr = {
7479 .sched_policy = SCHED_NORMAL,
7480 };
1da177e4 7481
3472eaa1 7482 read_lock(&tasklist_lock);
5d07f420 7483 for_each_process_thread(g, p) {
178be793
IM
7484 /*
7485 * Only normalize user tasks:
7486 */
3472eaa1 7487 if (p->flags & PF_KTHREAD)
178be793
IM
7488 continue;
7489
4fa8d299
JP
7490 p->se.exec_start = 0;
7491 schedstat_set(p->se.statistics.wait_start, 0);
7492 schedstat_set(p->se.statistics.sleep_start, 0);
7493 schedstat_set(p->se.statistics.block_start, 0);
dd41f596 7494
aab03e05 7495 if (!dl_task(p) && !rt_task(p)) {
dd41f596
IM
7496 /*
7497 * Renice negative nice level userspace
7498 * tasks back to 0:
7499 */
3472eaa1 7500 if (task_nice(p) < 0)
dd41f596 7501 set_user_nice(p, 0);
1da177e4 7502 continue;
dd41f596 7503 }
1da177e4 7504
dbc7f069 7505 __sched_setscheduler(p, &attr, false, false);
5d07f420 7506 }
3472eaa1 7507 read_unlock(&tasklist_lock);
1da177e4
LT
7508}
7509
7510#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7511
67fc4e0c 7512#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7513/*
67fc4e0c 7514 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7515 *
7516 * They can only be called when the whole system has been
7517 * stopped - every CPU needs to be quiescent, and no scheduling
7518 * activity can take place. Using them for anything else would
7519 * be a serious bug, and as a result, they aren't even visible
7520 * under any other configuration.
7521 */
7522
7523/**
d1ccc66d 7524 * curr_task - return the current task for a given CPU.
1df5c10a
LT
7525 * @cpu: the processor in question.
7526 *
7527 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
7528 *
7529 * Return: The current task for @cpu.
1df5c10a 7530 */
36c8b586 7531struct task_struct *curr_task(int cpu)
1df5c10a
LT
7532{
7533 return cpu_curr(cpu);
7534}
7535
67fc4e0c
JW
7536#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7537
7538#ifdef CONFIG_IA64
1df5c10a 7539/**
5feeb783 7540 * ia64_set_curr_task - set the current task for a given CPU.
1df5c10a
LT
7541 * @cpu: the processor in question.
7542 * @p: the task pointer to set.
7543 *
7544 * Description: This function must only be used when non-maskable interrupts
41a2d6cf 7545 * are serviced on a separate stack. It allows the architecture to switch the
d1ccc66d 7546 * notion of the current task on a CPU in a non-blocking manner. This function
1df5c10a
LT
7547 * must be called with all CPU's synchronized, and interrupts disabled, the
7548 * and caller must save the original value of the current task (see
7549 * curr_task() above) and restore that value before reenabling interrupts and
7550 * re-starting the system.
7551 *
7552 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7553 */
a458ae2e 7554void ia64_set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7555{
7556 cpu_curr(cpu) = p;
7557}
7558
7559#endif
29f59db3 7560
7c941438 7561#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7562/* task_group_lock serializes the addition/removal of task groups */
7563static DEFINE_SPINLOCK(task_group_lock);
7564
2480c093
PB
7565static inline void alloc_uclamp_sched_group(struct task_group *tg,
7566 struct task_group *parent)
7567{
7568#ifdef CONFIG_UCLAMP_TASK_GROUP
0413d7f3 7569 enum uclamp_id clamp_id;
2480c093
PB
7570
7571 for_each_clamp_id(clamp_id) {
7572 uclamp_se_set(&tg->uclamp_req[clamp_id],
7573 uclamp_none(clamp_id), false);
0b60ba2d 7574 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
2480c093
PB
7575 }
7576#endif
7577}
7578
2f5177f0 7579static void sched_free_group(struct task_group *tg)
bccbe08a
PZ
7580{
7581 free_fair_sched_group(tg);
7582 free_rt_sched_group(tg);
e9aa1dd1 7583 autogroup_free(tg);
b0367629 7584 kmem_cache_free(task_group_cache, tg);
bccbe08a
PZ
7585}
7586
7587/* allocate runqueue etc for a new task group */
ec7dc8ac 7588struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7589{
7590 struct task_group *tg;
bccbe08a 7591
b0367629 7592 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
bccbe08a
PZ
7593 if (!tg)
7594 return ERR_PTR(-ENOMEM);
7595
ec7dc8ac 7596 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7597 goto err;
7598
ec7dc8ac 7599 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7600 goto err;
7601
2480c093
PB
7602 alloc_uclamp_sched_group(tg, parent);
7603
ace783b9
LZ
7604 return tg;
7605
7606err:
2f5177f0 7607 sched_free_group(tg);
ace783b9
LZ
7608 return ERR_PTR(-ENOMEM);
7609}
7610
7611void sched_online_group(struct task_group *tg, struct task_group *parent)
7612{
7613 unsigned long flags;
7614
8ed36996 7615 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7616 list_add_rcu(&tg->list, &task_groups);
f473aa5e 7617
d1ccc66d
IM
7618 /* Root should already exist: */
7619 WARN_ON(!parent);
f473aa5e
PZ
7620
7621 tg->parent = parent;
f473aa5e 7622 INIT_LIST_HEAD(&tg->children);
09f2724a 7623 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7624 spin_unlock_irqrestore(&task_group_lock, flags);
8663e24d
PZ
7625
7626 online_fair_sched_group(tg);
29f59db3
SV
7627}
7628
9b5b7751 7629/* rcu callback to free various structures associated with a task group */
2f5177f0 7630static void sched_free_group_rcu(struct rcu_head *rhp)
29f59db3 7631{
d1ccc66d 7632 /* Now it should be safe to free those cfs_rqs: */
2f5177f0 7633 sched_free_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7634}
7635
4cf86d77 7636void sched_destroy_group(struct task_group *tg)
ace783b9 7637{
d1ccc66d 7638 /* Wait for possible concurrent references to cfs_rqs complete: */
2f5177f0 7639 call_rcu(&tg->rcu, sched_free_group_rcu);
ace783b9
LZ
7640}
7641
7642void sched_offline_group(struct task_group *tg)
29f59db3 7643{
8ed36996 7644 unsigned long flags;
29f59db3 7645
d1ccc66d 7646 /* End participation in shares distribution: */
6fe1f348 7647 unregister_fair_sched_group(tg);
3d4b47b4
PZ
7648
7649 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7650 list_del_rcu(&tg->list);
f473aa5e 7651 list_del_rcu(&tg->siblings);
8ed36996 7652 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7653}
7654
ea86cb4b 7655static void sched_change_group(struct task_struct *tsk, int type)
29f59db3 7656{
8323f26c 7657 struct task_group *tg;
29f59db3 7658
f7b8a47d
KT
7659 /*
7660 * All callers are synchronized by task_rq_lock(); we do not use RCU
7661 * which is pointless here. Thus, we pass "true" to task_css_check()
7662 * to prevent lockdep warnings.
7663 */
7664 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
7665 struct task_group, css);
7666 tg = autogroup_task_group(tsk, tg);
7667 tsk->sched_task_group = tg;
7668
810b3817 7669#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
7670 if (tsk->sched_class->task_change_group)
7671 tsk->sched_class->task_change_group(tsk, type);
b2b5ce02 7672 else
810b3817 7673#endif
b2b5ce02 7674 set_task_rq(tsk, task_cpu(tsk));
ea86cb4b
VG
7675}
7676
7677/*
7678 * Change task's runqueue when it moves between groups.
7679 *
7680 * The caller of this function should have put the task in its new group by
7681 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
7682 * its new group.
7683 */
7684void sched_move_task(struct task_struct *tsk)
7685{
7a57f32a
PZ
7686 int queued, running, queue_flags =
7687 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
ea86cb4b
VG
7688 struct rq_flags rf;
7689 struct rq *rq;
7690
7691 rq = task_rq_lock(tsk, &rf);
1b1d6225 7692 update_rq_clock(rq);
ea86cb4b
VG
7693
7694 running = task_current(rq, tsk);
7695 queued = task_on_rq_queued(tsk);
7696
7697 if (queued)
7a57f32a 7698 dequeue_task(rq, tsk, queue_flags);
bb3bac2c 7699 if (running)
ea86cb4b
VG
7700 put_prev_task(rq, tsk);
7701
7702 sched_change_group(tsk, TASK_MOVE_GROUP);
810b3817 7703
da0c1e65 7704 if (queued)
7a57f32a 7705 enqueue_task(rq, tsk, queue_flags);
2a4b03ff 7706 if (running) {
03b7fad1 7707 set_next_task(rq, tsk);
2a4b03ff
VG
7708 /*
7709 * After changing group, the running task may have joined a
7710 * throttled one but it's still the running task. Trigger a
7711 * resched to make sure that task can still run.
7712 */
7713 resched_curr(rq);
7714 }
29f59db3 7715
eb580751 7716 task_rq_unlock(rq, tsk, &rf);
29f59db3 7717}
68318b8e 7718
a7c6d554 7719static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
68318b8e 7720{
a7c6d554 7721 return css ? container_of(css, struct task_group, css) : NULL;
68318b8e
SV
7722}
7723
eb95419b
TH
7724static struct cgroup_subsys_state *
7725cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 7726{
eb95419b
TH
7727 struct task_group *parent = css_tg(parent_css);
7728 struct task_group *tg;
68318b8e 7729
eb95419b 7730 if (!parent) {
68318b8e 7731 /* This is early initialization for the top cgroup */
07e06b01 7732 return &root_task_group.css;
68318b8e
SV
7733 }
7734
ec7dc8ac 7735 tg = sched_create_group(parent);
68318b8e
SV
7736 if (IS_ERR(tg))
7737 return ERR_PTR(-ENOMEM);
7738
68318b8e
SV
7739 return &tg->css;
7740}
7741
96b77745
KK
7742/* Expose task group only after completing cgroup initialization */
7743static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7744{
7745 struct task_group *tg = css_tg(css);
7746 struct task_group *parent = css_tg(css->parent);
7747
7748 if (parent)
7749 sched_online_group(tg, parent);
7226017a
QY
7750
7751#ifdef CONFIG_UCLAMP_TASK_GROUP
7752 /* Propagate the effective uclamp value for the new group */
7753 cpu_util_update_eff(css);
7754#endif
7755
96b77745
KK
7756 return 0;
7757}
7758
2f5177f0 7759static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
ace783b9 7760{
eb95419b 7761 struct task_group *tg = css_tg(css);
ace783b9 7762
2f5177f0 7763 sched_offline_group(tg);
ace783b9
LZ
7764}
7765
eb95419b 7766static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 7767{
eb95419b 7768 struct task_group *tg = css_tg(css);
68318b8e 7769
2f5177f0
PZ
7770 /*
7771 * Relies on the RCU grace period between css_released() and this.
7772 */
7773 sched_free_group(tg);
ace783b9
LZ
7774}
7775
ea86cb4b
VG
7776/*
7777 * This is called before wake_up_new_task(), therefore we really only
7778 * have to set its group bits, all the other stuff does not apply.
7779 */
b53202e6 7780static void cpu_cgroup_fork(struct task_struct *task)
eeb61e53 7781{
ea86cb4b
VG
7782 struct rq_flags rf;
7783 struct rq *rq;
7784
7785 rq = task_rq_lock(task, &rf);
7786
80f5c1b8 7787 update_rq_clock(rq);
ea86cb4b
VG
7788 sched_change_group(task, TASK_SET_GROUP);
7789
7790 task_rq_unlock(rq, task, &rf);
eeb61e53
KT
7791}
7792
1f7dd3e5 7793static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
68318b8e 7794{
bb9d97b6 7795 struct task_struct *task;
1f7dd3e5 7796 struct cgroup_subsys_state *css;
7dc603c9 7797 int ret = 0;
bb9d97b6 7798
1f7dd3e5 7799 cgroup_taskset_for_each(task, css, tset) {
b68aa230 7800#ifdef CONFIG_RT_GROUP_SCHED
eb95419b 7801 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 7802 return -EINVAL;
b68aa230 7803#endif
7dc603c9
PZ
7804 /*
7805 * Serialize against wake_up_new_task() such that if its
7806 * running, we're sure to observe its full state.
7807 */
7808 raw_spin_lock_irq(&task->pi_lock);
7809 /*
7810 * Avoid calling sched_move_task() before wake_up_new_task()
7811 * has happened. This would lead to problems with PELT, due to
7812 * move wanting to detach+attach while we're not attached yet.
7813 */
7814 if (task->state == TASK_NEW)
7815 ret = -EINVAL;
7816 raw_spin_unlock_irq(&task->pi_lock);
7817
7818 if (ret)
7819 break;
bb9d97b6 7820 }
7dc603c9 7821 return ret;
be367d09 7822}
68318b8e 7823
1f7dd3e5 7824static void cpu_cgroup_attach(struct cgroup_taskset *tset)
68318b8e 7825{
bb9d97b6 7826 struct task_struct *task;
1f7dd3e5 7827 struct cgroup_subsys_state *css;
bb9d97b6 7828
1f7dd3e5 7829 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 7830 sched_move_task(task);
68318b8e
SV
7831}
7832
2480c093 7833#ifdef CONFIG_UCLAMP_TASK_GROUP
0b60ba2d
PB
7834static void cpu_util_update_eff(struct cgroup_subsys_state *css)
7835{
7836 struct cgroup_subsys_state *top_css = css;
7837 struct uclamp_se *uc_parent = NULL;
7838 struct uclamp_se *uc_se = NULL;
7839 unsigned int eff[UCLAMP_CNT];
0413d7f3 7840 enum uclamp_id clamp_id;
0b60ba2d
PB
7841 unsigned int clamps;
7842
7843 css_for_each_descendant_pre(css, top_css) {
7844 uc_parent = css_tg(css)->parent
7845 ? css_tg(css)->parent->uclamp : NULL;
7846
7847 for_each_clamp_id(clamp_id) {
7848 /* Assume effective clamps matches requested clamps */
7849 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
7850 /* Cap effective clamps with parent's effective clamps */
7851 if (uc_parent &&
7852 eff[clamp_id] > uc_parent[clamp_id].value) {
7853 eff[clamp_id] = uc_parent[clamp_id].value;
7854 }
7855 }
7856 /* Ensure protection is always capped by limit */
7857 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
7858
7859 /* Propagate most restrictive effective clamps */
7860 clamps = 0x0;
7861 uc_se = css_tg(css)->uclamp;
7862 for_each_clamp_id(clamp_id) {
7863 if (eff[clamp_id] == uc_se[clamp_id].value)
7864 continue;
7865 uc_se[clamp_id].value = eff[clamp_id];
7866 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
7867 clamps |= (0x1 << clamp_id);
7868 }
babbe170 7869 if (!clamps) {
0b60ba2d 7870 css = css_rightmost_descendant(css);
babbe170
PB
7871 continue;
7872 }
7873
7874 /* Immediately update descendants RUNNABLE tasks */
7875 uclamp_update_active_tasks(css, clamps);
0b60ba2d
PB
7876 }
7877}
2480c093
PB
7878
7879/*
7880 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
7881 * C expression. Since there is no way to convert a macro argument (N) into a
7882 * character constant, use two levels of macros.
7883 */
7884#define _POW10(exp) ((unsigned int)1e##exp)
7885#define POW10(exp) _POW10(exp)
7886
7887struct uclamp_request {
7888#define UCLAMP_PERCENT_SHIFT 2
7889#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
7890 s64 percent;
7891 u64 util;
7892 int ret;
7893};
7894
7895static inline struct uclamp_request
7896capacity_from_percent(char *buf)
7897{
7898 struct uclamp_request req = {
7899 .percent = UCLAMP_PERCENT_SCALE,
7900 .util = SCHED_CAPACITY_SCALE,
7901 .ret = 0,
7902 };
7903
7904 buf = strim(buf);
7905 if (strcmp(buf, "max")) {
7906 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
7907 &req.percent);
7908 if (req.ret)
7909 return req;
b562d140 7910 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
2480c093
PB
7911 req.ret = -ERANGE;
7912 return req;
7913 }
7914
7915 req.util = req.percent << SCHED_CAPACITY_SHIFT;
7916 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
7917 }
7918
7919 return req;
7920}
7921
7922static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
7923 size_t nbytes, loff_t off,
7924 enum uclamp_id clamp_id)
7925{
7926 struct uclamp_request req;
7927 struct task_group *tg;
7928
7929 req = capacity_from_percent(buf);
7930 if (req.ret)
7931 return req.ret;
7932
46609ce2
QY
7933 static_branch_enable(&sched_uclamp_used);
7934
2480c093
PB
7935 mutex_lock(&uclamp_mutex);
7936 rcu_read_lock();
7937
7938 tg = css_tg(of_css(of));
7939 if (tg->uclamp_req[clamp_id].value != req.util)
7940 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
7941
7942 /*
7943 * Because of not recoverable conversion rounding we keep track of the
7944 * exact requested value
7945 */
7946 tg->uclamp_pct[clamp_id] = req.percent;
7947
0b60ba2d
PB
7948 /* Update effective clamps to track the most restrictive value */
7949 cpu_util_update_eff(of_css(of));
7950
2480c093
PB
7951 rcu_read_unlock();
7952 mutex_unlock(&uclamp_mutex);
7953
7954 return nbytes;
7955}
7956
7957static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
7958 char *buf, size_t nbytes,
7959 loff_t off)
7960{
7961 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
7962}
7963
7964static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
7965 char *buf, size_t nbytes,
7966 loff_t off)
7967{
7968 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
7969}
7970
7971static inline void cpu_uclamp_print(struct seq_file *sf,
7972 enum uclamp_id clamp_id)
7973{
7974 struct task_group *tg;
7975 u64 util_clamp;
7976 u64 percent;
7977 u32 rem;
7978
7979 rcu_read_lock();
7980 tg = css_tg(seq_css(sf));
7981 util_clamp = tg->uclamp_req[clamp_id].value;
7982 rcu_read_unlock();
7983
7984 if (util_clamp == SCHED_CAPACITY_SCALE) {
7985 seq_puts(sf, "max\n");
7986 return;
7987 }
7988
7989 percent = tg->uclamp_pct[clamp_id];
7990 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
7991 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
7992}
7993
7994static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
7995{
7996 cpu_uclamp_print(sf, UCLAMP_MIN);
7997 return 0;
7998}
7999
8000static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
8001{
8002 cpu_uclamp_print(sf, UCLAMP_MAX);
8003 return 0;
8004}
8005#endif /* CONFIG_UCLAMP_TASK_GROUP */
8006
052f1dc7 8007#ifdef CONFIG_FAIR_GROUP_SCHED
182446d0
TH
8008static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8009 struct cftype *cftype, u64 shareval)
68318b8e 8010{
5b61d50a
KK
8011 if (shareval > scale_load_down(ULONG_MAX))
8012 shareval = MAX_SHARES;
182446d0 8013 return sched_group_set_shares(css_tg(css), scale_load(shareval));
68318b8e
SV
8014}
8015
182446d0
TH
8016static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8017 struct cftype *cft)
68318b8e 8018{
182446d0 8019 struct task_group *tg = css_tg(css);
68318b8e 8020
c8b28116 8021 return (u64) scale_load_down(tg->shares);
68318b8e 8022}
ab84d31e
PT
8023
8024#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8025static DEFINE_MUTEX(cfs_constraints_mutex);
8026
ab84d31e 8027const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
b1546edc 8028static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
d505b8af
HC
8029/* More than 203 days if BW_SHIFT equals 20. */
8030static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
ab84d31e 8031
a790de99
PT
8032static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8033
ab84d31e
PT
8034static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8035{
56f570e5 8036 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8037 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8038
8039 if (tg == &root_task_group)
8040 return -EINVAL;
8041
8042 /*
8043 * Ensure we have at some amount of bandwidth every period. This is
8044 * to prevent reaching a state of large arrears when throttled via
8045 * entity_tick() resulting in prolonged exit starvation.
8046 */
8047 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8048 return -EINVAL;
8049
8050 /*
8051 * Likewise, bound things on the otherside by preventing insane quota
8052 * periods. This also allows us to normalize in computing quota
8053 * feasibility.
8054 */
8055 if (period > max_cfs_quota_period)
8056 return -EINVAL;
8057
d505b8af
HC
8058 /*
8059 * Bound quota to defend quota against overflow during bandwidth shift.
8060 */
8061 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
8062 return -EINVAL;
8063
0e59bdae
KT
8064 /*
8065 * Prevent race between setting of cfs_rq->runtime_enabled and
8066 * unthrottle_offline_cfs_rqs().
8067 */
8068 get_online_cpus();
a790de99
PT
8069 mutex_lock(&cfs_constraints_mutex);
8070 ret = __cfs_schedulable(tg, period, quota);
8071 if (ret)
8072 goto out_unlock;
8073
58088ad0 8074 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8075 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
8076 /*
8077 * If we need to toggle cfs_bandwidth_used, off->on must occur
8078 * before making related changes, and on->off must occur afterwards
8079 */
8080 if (runtime_enabled && !runtime_was_enabled)
8081 cfs_bandwidth_usage_inc();
ab84d31e
PT
8082 raw_spin_lock_irq(&cfs_b->lock);
8083 cfs_b->period = ns_to_ktime(period);
8084 cfs_b->quota = quota;
58088ad0 8085
a9cf55b2 8086 __refill_cfs_bandwidth_runtime(cfs_b);
d1ccc66d
IM
8087
8088 /* Restart the period timer (if active) to handle new period expiry: */
77a4d1a1
PZ
8089 if (runtime_enabled)
8090 start_cfs_bandwidth(cfs_b);
d1ccc66d 8091
ab84d31e
PT
8092 raw_spin_unlock_irq(&cfs_b->lock);
8093
0e59bdae 8094 for_each_online_cpu(i) {
ab84d31e 8095 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8096 struct rq *rq = cfs_rq->rq;
8a8c69c3 8097 struct rq_flags rf;
ab84d31e 8098
8a8c69c3 8099 rq_lock_irq(rq, &rf);
58088ad0 8100 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8101 cfs_rq->runtime_remaining = 0;
671fd9da 8102
029632fb 8103 if (cfs_rq->throttled)
671fd9da 8104 unthrottle_cfs_rq(cfs_rq);
8a8c69c3 8105 rq_unlock_irq(rq, &rf);
ab84d31e 8106 }
1ee14e6c
BS
8107 if (runtime_was_enabled && !runtime_enabled)
8108 cfs_bandwidth_usage_dec();
a790de99
PT
8109out_unlock:
8110 mutex_unlock(&cfs_constraints_mutex);
0e59bdae 8111 put_online_cpus();
ab84d31e 8112
a790de99 8113 return ret;
ab84d31e
PT
8114}
8115
b1546edc 8116static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
ab84d31e
PT
8117{
8118 u64 quota, period;
8119
029632fb 8120 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8121 if (cfs_quota_us < 0)
8122 quota = RUNTIME_INF;
1a8b4540 8123 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
ab84d31e 8124 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
1a8b4540
KK
8125 else
8126 return -EINVAL;
ab84d31e
PT
8127
8128 return tg_set_cfs_bandwidth(tg, period, quota);
8129}
8130
b1546edc 8131static long tg_get_cfs_quota(struct task_group *tg)
ab84d31e
PT
8132{
8133 u64 quota_us;
8134
029632fb 8135 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8136 return -1;
8137
029632fb 8138 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8139 do_div(quota_us, NSEC_PER_USEC);
8140
8141 return quota_us;
8142}
8143
b1546edc 8144static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
ab84d31e
PT
8145{
8146 u64 quota, period;
8147
1a8b4540
KK
8148 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
8149 return -EINVAL;
8150
ab84d31e 8151 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8152 quota = tg->cfs_bandwidth.quota;
ab84d31e 8153
ab84d31e
PT
8154 return tg_set_cfs_bandwidth(tg, period, quota);
8155}
8156
b1546edc 8157static long tg_get_cfs_period(struct task_group *tg)
ab84d31e
PT
8158{
8159 u64 cfs_period_us;
8160
029632fb 8161 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8162 do_div(cfs_period_us, NSEC_PER_USEC);
8163
8164 return cfs_period_us;
8165}
8166
182446d0
TH
8167static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8168 struct cftype *cft)
ab84d31e 8169{
182446d0 8170 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
8171}
8172
182446d0
TH
8173static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8174 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 8175{
182446d0 8176 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
8177}
8178
182446d0
TH
8179static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8180 struct cftype *cft)
ab84d31e 8181{
182446d0 8182 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
8183}
8184
182446d0
TH
8185static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8186 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 8187{
182446d0 8188 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
8189}
8190
a790de99
PT
8191struct cfs_schedulable_data {
8192 struct task_group *tg;
8193 u64 period, quota;
8194};
8195
8196/*
8197 * normalize group quota/period to be quota/max_period
8198 * note: units are usecs
8199 */
8200static u64 normalize_cfs_quota(struct task_group *tg,
8201 struct cfs_schedulable_data *d)
8202{
8203 u64 quota, period;
8204
8205 if (tg == d->tg) {
8206 period = d->period;
8207 quota = d->quota;
8208 } else {
8209 period = tg_get_cfs_period(tg);
8210 quota = tg_get_cfs_quota(tg);
8211 }
8212
8213 /* note: these should typically be equivalent */
8214 if (quota == RUNTIME_INF || quota == -1)
8215 return RUNTIME_INF;
8216
8217 return to_ratio(period, quota);
8218}
8219
8220static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8221{
8222 struct cfs_schedulable_data *d = data;
029632fb 8223 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8224 s64 quota = 0, parent_quota = -1;
8225
8226 if (!tg->parent) {
8227 quota = RUNTIME_INF;
8228 } else {
029632fb 8229 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8230
8231 quota = normalize_cfs_quota(tg, d);
9c58c79a 8232 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
8233
8234 /*
c53593e5
TH
8235 * Ensure max(child_quota) <= parent_quota. On cgroup2,
8236 * always take the min. On cgroup1, only inherit when no
d1ccc66d 8237 * limit is set:
a790de99 8238 */
c53593e5
TH
8239 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
8240 quota = min(quota, parent_quota);
8241 } else {
8242 if (quota == RUNTIME_INF)
8243 quota = parent_quota;
8244 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8245 return -EINVAL;
8246 }
a790de99 8247 }
9c58c79a 8248 cfs_b->hierarchical_quota = quota;
a790de99
PT
8249
8250 return 0;
8251}
8252
8253static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8254{
8277434e 8255 int ret;
a790de99
PT
8256 struct cfs_schedulable_data data = {
8257 .tg = tg,
8258 .period = period,
8259 .quota = quota,
8260 };
8261
8262 if (quota != RUNTIME_INF) {
8263 do_div(data.period, NSEC_PER_USEC);
8264 do_div(data.quota, NSEC_PER_USEC);
8265 }
8266
8277434e
PT
8267 rcu_read_lock();
8268 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8269 rcu_read_unlock();
8270
8271 return ret;
a790de99 8272}
e8da1b18 8273
a1f7164c 8274static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
e8da1b18 8275{
2da8ca82 8276 struct task_group *tg = css_tg(seq_css(sf));
029632fb 8277 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 8278
44ffc75b
TH
8279 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8280 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8281 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18 8282
3d6c50c2
YW
8283 if (schedstat_enabled() && tg != &root_task_group) {
8284 u64 ws = 0;
8285 int i;
8286
8287 for_each_possible_cpu(i)
8288 ws += schedstat_val(tg->se[i]->statistics.wait_sum);
8289
8290 seq_printf(sf, "wait_sum %llu\n", ws);
8291 }
8292
e8da1b18
NR
8293 return 0;
8294}
ab84d31e 8295#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8296#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8297
052f1dc7 8298#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
8299static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8300 struct cftype *cft, s64 val)
6f505b16 8301{
182446d0 8302 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
8303}
8304
182446d0
TH
8305static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8306 struct cftype *cft)
6f505b16 8307{
182446d0 8308 return sched_group_rt_runtime(css_tg(css));
6f505b16 8309}
d0b27fa7 8310
182446d0
TH
8311static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8312 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 8313{
182446d0 8314 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
8315}
8316
182446d0
TH
8317static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8318 struct cftype *cft)
d0b27fa7 8319{
182446d0 8320 return sched_group_rt_period(css_tg(css));
d0b27fa7 8321}
6d6bc0ad 8322#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8323
a1f7164c 8324static struct cftype cpu_legacy_files[] = {
052f1dc7 8325#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8326 {
8327 .name = "shares",
f4c753b7
PM
8328 .read_u64 = cpu_shares_read_u64,
8329 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8330 },
052f1dc7 8331#endif
ab84d31e
PT
8332#ifdef CONFIG_CFS_BANDWIDTH
8333 {
8334 .name = "cfs_quota_us",
8335 .read_s64 = cpu_cfs_quota_read_s64,
8336 .write_s64 = cpu_cfs_quota_write_s64,
8337 },
8338 {
8339 .name = "cfs_period_us",
8340 .read_u64 = cpu_cfs_period_read_u64,
8341 .write_u64 = cpu_cfs_period_write_u64,
8342 },
e8da1b18
NR
8343 {
8344 .name = "stat",
a1f7164c 8345 .seq_show = cpu_cfs_stat_show,
e8da1b18 8346 },
ab84d31e 8347#endif
052f1dc7 8348#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8349 {
9f0c1e56 8350 .name = "rt_runtime_us",
06ecb27c
PM
8351 .read_s64 = cpu_rt_runtime_read,
8352 .write_s64 = cpu_rt_runtime_write,
6f505b16 8353 },
d0b27fa7
PZ
8354 {
8355 .name = "rt_period_us",
f4c753b7
PM
8356 .read_u64 = cpu_rt_period_read_uint,
8357 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8358 },
2480c093
PB
8359#endif
8360#ifdef CONFIG_UCLAMP_TASK_GROUP
8361 {
8362 .name = "uclamp.min",
8363 .flags = CFTYPE_NOT_ON_ROOT,
8364 .seq_show = cpu_uclamp_min_show,
8365 .write = cpu_uclamp_min_write,
8366 },
8367 {
8368 .name = "uclamp.max",
8369 .flags = CFTYPE_NOT_ON_ROOT,
8370 .seq_show = cpu_uclamp_max_show,
8371 .write = cpu_uclamp_max_write,
8372 },
052f1dc7 8373#endif
d1ccc66d 8374 { } /* Terminate */
68318b8e
SV
8375};
8376
d41bf8c9
TH
8377static int cpu_extra_stat_show(struct seq_file *sf,
8378 struct cgroup_subsys_state *css)
0d593634 8379{
0d593634
TH
8380#ifdef CONFIG_CFS_BANDWIDTH
8381 {
d41bf8c9 8382 struct task_group *tg = css_tg(css);
0d593634
TH
8383 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8384 u64 throttled_usec;
8385
8386 throttled_usec = cfs_b->throttled_time;
8387 do_div(throttled_usec, NSEC_PER_USEC);
8388
8389 seq_printf(sf, "nr_periods %d\n"
8390 "nr_throttled %d\n"
8391 "throttled_usec %llu\n",
8392 cfs_b->nr_periods, cfs_b->nr_throttled,
8393 throttled_usec);
8394 }
8395#endif
8396 return 0;
8397}
8398
8399#ifdef CONFIG_FAIR_GROUP_SCHED
8400static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
8401 struct cftype *cft)
8402{
8403 struct task_group *tg = css_tg(css);
8404 u64 weight = scale_load_down(tg->shares);
8405
8406 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
8407}
8408
8409static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
8410 struct cftype *cft, u64 weight)
8411{
8412 /*
8413 * cgroup weight knobs should use the common MIN, DFL and MAX
8414 * values which are 1, 100 and 10000 respectively. While it loses
8415 * a bit of range on both ends, it maps pretty well onto the shares
8416 * value used by scheduler and the round-trip conversions preserve
8417 * the original value over the entire range.
8418 */
8419 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
8420 return -ERANGE;
8421
8422 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
8423
8424 return sched_group_set_shares(css_tg(css), scale_load(weight));
8425}
8426
8427static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
8428 struct cftype *cft)
8429{
8430 unsigned long weight = scale_load_down(css_tg(css)->shares);
8431 int last_delta = INT_MAX;
8432 int prio, delta;
8433
8434 /* find the closest nice value to the current weight */
8435 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
8436 delta = abs(sched_prio_to_weight[prio] - weight);
8437 if (delta >= last_delta)
8438 break;
8439 last_delta = delta;
8440 }
8441
8442 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
8443}
8444
8445static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
8446 struct cftype *cft, s64 nice)
8447{
8448 unsigned long weight;
7281c8de 8449 int idx;
0d593634
TH
8450
8451 if (nice < MIN_NICE || nice > MAX_NICE)
8452 return -ERANGE;
8453
7281c8de
PZ
8454 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
8455 idx = array_index_nospec(idx, 40);
8456 weight = sched_prio_to_weight[idx];
8457
0d593634
TH
8458 return sched_group_set_shares(css_tg(css), scale_load(weight));
8459}
8460#endif
8461
8462static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
8463 long period, long quota)
8464{
8465 if (quota < 0)
8466 seq_puts(sf, "max");
8467 else
8468 seq_printf(sf, "%ld", quota);
8469
8470 seq_printf(sf, " %ld\n", period);
8471}
8472
8473/* caller should put the current value in *@periodp before calling */
8474static int __maybe_unused cpu_period_quota_parse(char *buf,
8475 u64 *periodp, u64 *quotap)
8476{
8477 char tok[21]; /* U64_MAX */
8478
4c47acd8 8479 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
0d593634
TH
8480 return -EINVAL;
8481
8482 *periodp *= NSEC_PER_USEC;
8483
8484 if (sscanf(tok, "%llu", quotap))
8485 *quotap *= NSEC_PER_USEC;
8486 else if (!strcmp(tok, "max"))
8487 *quotap = RUNTIME_INF;
8488 else
8489 return -EINVAL;
8490
8491 return 0;
8492}
8493
8494#ifdef CONFIG_CFS_BANDWIDTH
8495static int cpu_max_show(struct seq_file *sf, void *v)
8496{
8497 struct task_group *tg = css_tg(seq_css(sf));
8498
8499 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
8500 return 0;
8501}
8502
8503static ssize_t cpu_max_write(struct kernfs_open_file *of,
8504 char *buf, size_t nbytes, loff_t off)
8505{
8506 struct task_group *tg = css_tg(of_css(of));
8507 u64 period = tg_get_cfs_period(tg);
8508 u64 quota;
8509 int ret;
8510
8511 ret = cpu_period_quota_parse(buf, &period, &quota);
8512 if (!ret)
8513 ret = tg_set_cfs_bandwidth(tg, period, quota);
8514 return ret ?: nbytes;
8515}
8516#endif
8517
8518static struct cftype cpu_files[] = {
0d593634
TH
8519#ifdef CONFIG_FAIR_GROUP_SCHED
8520 {
8521 .name = "weight",
8522 .flags = CFTYPE_NOT_ON_ROOT,
8523 .read_u64 = cpu_weight_read_u64,
8524 .write_u64 = cpu_weight_write_u64,
8525 },
8526 {
8527 .name = "weight.nice",
8528 .flags = CFTYPE_NOT_ON_ROOT,
8529 .read_s64 = cpu_weight_nice_read_s64,
8530 .write_s64 = cpu_weight_nice_write_s64,
8531 },
8532#endif
8533#ifdef CONFIG_CFS_BANDWIDTH
8534 {
8535 .name = "max",
8536 .flags = CFTYPE_NOT_ON_ROOT,
8537 .seq_show = cpu_max_show,
8538 .write = cpu_max_write,
8539 },
2480c093
PB
8540#endif
8541#ifdef CONFIG_UCLAMP_TASK_GROUP
8542 {
8543 .name = "uclamp.min",
8544 .flags = CFTYPE_NOT_ON_ROOT,
8545 .seq_show = cpu_uclamp_min_show,
8546 .write = cpu_uclamp_min_write,
8547 },
8548 {
8549 .name = "uclamp.max",
8550 .flags = CFTYPE_NOT_ON_ROOT,
8551 .seq_show = cpu_uclamp_max_show,
8552 .write = cpu_uclamp_max_write,
8553 },
0d593634
TH
8554#endif
8555 { } /* terminate */
8556};
8557
073219e9 8558struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748 8559 .css_alloc = cpu_cgroup_css_alloc,
96b77745 8560 .css_online = cpu_cgroup_css_online,
2f5177f0 8561 .css_released = cpu_cgroup_css_released,
92fb9748 8562 .css_free = cpu_cgroup_css_free,
d41bf8c9 8563 .css_extra_stat_show = cpu_extra_stat_show,
eeb61e53 8564 .fork = cpu_cgroup_fork,
bb9d97b6
TH
8565 .can_attach = cpu_cgroup_can_attach,
8566 .attach = cpu_cgroup_attach,
a1f7164c 8567 .legacy_cftypes = cpu_legacy_files,
0d593634 8568 .dfl_cftypes = cpu_files,
b38e42e9 8569 .early_init = true,
0d593634 8570 .threaded = true,
68318b8e
SV
8571};
8572
052f1dc7 8573#endif /* CONFIG_CGROUP_SCHED */
d842de87 8574
b637a328
PM
8575void dump_cpu_task(int cpu)
8576{
8577 pr_info("Task dump for CPU %d:\n", cpu);
8578 sched_show_task(cpu_curr(cpu));
8579}
ed82b8a1
AK
8580
8581/*
8582 * Nice levels are multiplicative, with a gentle 10% change for every
8583 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
8584 * nice 1, it will get ~10% less CPU time than another CPU-bound task
8585 * that remained on nice 0.
8586 *
8587 * The "10% effect" is relative and cumulative: from _any_ nice level,
8588 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
8589 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
8590 * If a task goes up by ~10% and another task goes down by ~10% then
8591 * the relative distance between them is ~25%.)
8592 */
8593const int sched_prio_to_weight[40] = {
8594 /* -20 */ 88761, 71755, 56483, 46273, 36291,
8595 /* -15 */ 29154, 23254, 18705, 14949, 11916,
8596 /* -10 */ 9548, 7620, 6100, 4904, 3906,
8597 /* -5 */ 3121, 2501, 1991, 1586, 1277,
8598 /* 0 */ 1024, 820, 655, 526, 423,
8599 /* 5 */ 335, 272, 215, 172, 137,
8600 /* 10 */ 110, 87, 70, 56, 45,
8601 /* 15 */ 36, 29, 23, 18, 15,
8602};
8603
8604/*
8605 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
8606 *
8607 * In cases where the weight does not change often, we can use the
8608 * precalculated inverse to speed up arithmetics by turning divisions
8609 * into multiplications:
8610 */
8611const u32 sched_prio_to_wmult[40] = {
8612 /* -20 */ 48388, 59856, 76040, 92818, 118348,
8613 /* -15 */ 147320, 184698, 229616, 287308, 360437,
8614 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
8615 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
8616 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
8617 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
8618 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
8619 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
8620};
14a7405b 8621
9d246053
PA
8622void call_trace_sched_update_nr_running(struct rq *rq, int count)
8623{
8624 trace_sched_update_nr_running_tp(rq, count);
8625}