]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cf37b6b4 | 2 | /* |
a92057e1 IM |
3 | * Generic entry points for the idle threads and |
4 | * implementation of the idle task scheduling class. | |
5 | * | |
6 | * (NOTE: these are not related to SCHED_IDLE batch scheduled | |
7 | * tasks which are handled in sched/fair.c ) | |
cf37b6b4 | 8 | */ |
325ea10c | 9 | #include "sched.h" |
cf37b6b4 NP |
10 | |
11 | #include <trace/events/power.h> | |
12 | ||
6727ad9e CM |
13 | /* Linker adds these: start and end of __cpuidle functions */ |
14 | extern char __cpuidle_text_start[], __cpuidle_text_end[]; | |
15 | ||
faad3849 RW |
16 | /** |
17 | * sched_idle_set_state - Record idle state for the current CPU. | |
18 | * @idle_state: State to record. | |
19 | */ | |
20 | void sched_idle_set_state(struct cpuidle_state *idle_state) | |
21 | { | |
22 | idle_set_state(this_rq(), idle_state); | |
23 | } | |
24 | ||
cf37b6b4 NP |
25 | static int __read_mostly cpu_idle_force_poll; |
26 | ||
27 | void cpu_idle_poll_ctrl(bool enable) | |
28 | { | |
29 | if (enable) { | |
30 | cpu_idle_force_poll++; | |
31 | } else { | |
32 | cpu_idle_force_poll--; | |
33 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
34 | } | |
35 | } | |
36 | ||
37 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
38 | static int __init cpu_idle_poll_setup(char *__unused) | |
39 | { | |
40 | cpu_idle_force_poll = 1; | |
a92057e1 | 41 | |
cf37b6b4 NP |
42 | return 1; |
43 | } | |
44 | __setup("nohlt", cpu_idle_poll_setup); | |
45 | ||
46 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
47 | { | |
48 | cpu_idle_force_poll = 0; | |
a92057e1 | 49 | |
cf37b6b4 NP |
50 | return 1; |
51 | } | |
52 | __setup("hlt", cpu_idle_nopoll_setup); | |
53 | #endif | |
54 | ||
6727ad9e | 55 | static noinline int __cpuidle cpu_idle_poll(void) |
cf37b6b4 NP |
56 | { |
57 | rcu_idle_enter(); | |
58 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | |
59 | local_irq_enable(); | |
9babcd79 | 60 | stop_critical_timings(); |
a92057e1 | 61 | |
ff6f2d29 PM |
62 | while (!tif_need_resched() && |
63 | (cpu_idle_force_poll || tick_check_broadcast_expired())) | |
cf37b6b4 | 64 | cpu_relax(); |
9babcd79 | 65 | start_critical_timings(); |
cf37b6b4 NP |
66 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
67 | rcu_idle_exit(); | |
a92057e1 | 68 | |
cf37b6b4 NP |
69 | return 1; |
70 | } | |
71 | ||
72 | /* Weak implementations for optional arch specific functions */ | |
73 | void __weak arch_cpu_idle_prepare(void) { } | |
74 | void __weak arch_cpu_idle_enter(void) { } | |
75 | void __weak arch_cpu_idle_exit(void) { } | |
76 | void __weak arch_cpu_idle_dead(void) { } | |
77 | void __weak arch_cpu_idle(void) | |
78 | { | |
79 | cpu_idle_force_poll = 1; | |
80 | local_irq_enable(); | |
81 | } | |
82 | ||
827a5aef RW |
83 | /** |
84 | * default_idle_call - Default CPU idle routine. | |
85 | * | |
86 | * To use when the cpuidle framework cannot be used. | |
87 | */ | |
6727ad9e | 88 | void __cpuidle default_idle_call(void) |
82f66327 | 89 | { |
63caae84 | 90 | if (current_clr_polling_and_test()) { |
82f66327 | 91 | local_irq_enable(); |
63caae84 LS |
92 | } else { |
93 | stop_critical_timings(); | |
82f66327 | 94 | arch_cpu_idle(); |
63caae84 LS |
95 | start_critical_timings(); |
96 | } | |
82f66327 RW |
97 | } |
98 | ||
bcf6ad8a RW |
99 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
100 | int next_state) | |
101 | { | |
bcf6ad8a RW |
102 | /* |
103 | * The idle task must be scheduled, it is pointless to go to idle, just | |
104 | * update no idle residency and return. | |
105 | */ | |
106 | if (current_clr_polling_and_test()) { | |
107 | dev->last_residency = 0; | |
108 | local_irq_enable(); | |
109 | return -EBUSY; | |
110 | } | |
111 | ||
bcf6ad8a RW |
112 | /* |
113 | * Enter the idle state previously returned by the governor decision. | |
114 | * This function will block until an interrupt occurs and will take | |
115 | * care of re-enabling the local interrupts | |
116 | */ | |
827a5aef | 117 | return cpuidle_enter(drv, dev, next_state); |
bcf6ad8a RW |
118 | } |
119 | ||
30cdd69e DL |
120 | /** |
121 | * cpuidle_idle_call - the main idle function | |
122 | * | |
123 | * NOTE: no locks or semaphores should be used here | |
82c65d60 AL |
124 | * |
125 | * On archs that support TIF_POLLING_NRFLAG, is called with polling | |
126 | * set, and it returns with polling set. If it ever stops polling, it | |
127 | * must clear the polling bit. | |
30cdd69e | 128 | */ |
08c373e5 | 129 | static void cpuidle_idle_call(void) |
30cdd69e | 130 | { |
9bd616e3 | 131 | struct cpuidle_device *dev = cpuidle_get_device(); |
30cdd69e | 132 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
37352273 | 133 | int next_state, entered_state; |
30cdd69e | 134 | |
a1d028bd DL |
135 | /* |
136 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 137 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 138 | */ |
c444117f | 139 | if (need_resched()) { |
8ca3c642 | 140 | local_irq_enable(); |
08c373e5 | 141 | return; |
8ca3c642 DL |
142 | } |
143 | ||
a1d028bd | 144 | /* |
ed98c349 RW |
145 | * The RCU framework needs to be told that we are entering an idle |
146 | * section, so no more rcu read side critical sections and one more | |
a1d028bd DL |
147 | * step to the grace period |
148 | */ | |
c8cc7d4d | 149 | |
82f66327 | 150 | if (cpuidle_not_available(drv, dev)) { |
ed98c349 RW |
151 | tick_nohz_idle_stop_tick(); |
152 | rcu_idle_enter(); | |
153 | ||
82f66327 RW |
154 | default_idle_call(); |
155 | goto exit_idle; | |
156 | } | |
ef2b22ac | 157 | |
38106313 | 158 | /* |
f02f4f9d | 159 | * Suspend-to-idle ("s2idle") is a system state in which all user space |
38106313 RW |
160 | * has been frozen, all I/O devices have been suspended and the only |
161 | * activity happens here and in iterrupts (if any). In that case bypass | |
162 | * the cpuidle governor and go stratight for the deepest idle state | |
163 | * available. Possibly also suspend the local tick and the entire | |
164 | * timekeeping to prevent timer interrupts from kicking us out of idle | |
165 | * until a proper wakeup interrupt happens. | |
166 | */ | |
bb8313b6 | 167 | |
f02f4f9d RW |
168 | if (idle_should_enter_s2idle() || dev->use_deepest_state) { |
169 | if (idle_should_enter_s2idle()) { | |
ed98c349 RW |
170 | rcu_idle_enter(); |
171 | ||
28ba086e | 172 | entered_state = cpuidle_enter_s2idle(drv, dev); |
bb8313b6 JP |
173 | if (entered_state > 0) { |
174 | local_irq_enable(); | |
175 | goto exit_idle; | |
176 | } | |
ed98c349 RW |
177 | |
178 | rcu_idle_exit(); | |
ef2b22ac RW |
179 | } |
180 | ||
ed98c349 RW |
181 | tick_nohz_idle_stop_tick(); |
182 | rcu_idle_enter(); | |
183 | ||
ef2b22ac | 184 | next_state = cpuidle_find_deepest_state(drv, dev); |
bcf6ad8a | 185 | call_cpuidle(drv, dev, next_state); |
ef2b22ac | 186 | } else { |
45f1ff59 RW |
187 | bool stop_tick = true; |
188 | ||
ef2b22ac RW |
189 | /* |
190 | * Ask the cpuidle framework to choose a convenient idle state. | |
191 | */ | |
45f1ff59 | 192 | next_state = cpuidle_select(drv, dev, &stop_tick); |
554c8aa8 | 193 | |
7059b366 | 194 | if (stop_tick || tick_nohz_tick_stopped()) |
554c8aa8 RW |
195 | tick_nohz_idle_stop_tick(); |
196 | else | |
197 | tick_nohz_idle_retain_tick(); | |
198 | ||
199 | rcu_idle_enter(); | |
200 | ||
bcf6ad8a RW |
201 | entered_state = call_cpuidle(drv, dev, next_state); |
202 | /* | |
203 | * Give the governor an opportunity to reflect on the outcome | |
204 | */ | |
ef2b22ac | 205 | cpuidle_reflect(dev, entered_state); |
bcf6ad8a | 206 | } |
37352273 PZ |
207 | |
208 | exit_idle: | |
8ca3c642 | 209 | __current_set_polling(); |
30cdd69e | 210 | |
a1d028bd | 211 | /* |
37352273 | 212 | * It is up to the idle functions to reenable local interrupts |
a1d028bd | 213 | */ |
c8cc7d4d DL |
214 | if (WARN_ON_ONCE(irqs_disabled())) |
215 | local_irq_enable(); | |
216 | ||
217 | rcu_idle_exit(); | |
30cdd69e | 218 | } |
30cdd69e | 219 | |
cf37b6b4 NP |
220 | /* |
221 | * Generic idle loop implementation | |
82c65d60 AL |
222 | * |
223 | * Called with polling cleared. | |
cf37b6b4 | 224 | */ |
c1de45ca | 225 | static void do_idle(void) |
cf37b6b4 | 226 | { |
54b933c6 | 227 | int cpu = smp_processor_id(); |
c1de45ca PZ |
228 | /* |
229 | * If the arch has a polling bit, we maintain an invariant: | |
230 | * | |
231 | * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != | |
232 | * rq->idle). This means that, if rq->idle has the polling bit set, | |
233 | * then setting need_resched is guaranteed to cause the CPU to | |
234 | * reschedule. | |
235 | */ | |
cf37b6b4 | 236 | |
c1de45ca PZ |
237 | __current_set_polling(); |
238 | tick_nohz_idle_enter(); | |
cf37b6b4 | 239 | |
c1de45ca PZ |
240 | while (!need_resched()) { |
241 | check_pgt_cache(); | |
242 | rmb(); | |
cf37b6b4 | 243 | |
54b933c6 | 244 | if (cpu_is_offline(cpu)) { |
2aaf709a | 245 | tick_nohz_idle_stop_tick_protected(); |
c1de45ca PZ |
246 | cpuhp_report_idle_dead(); |
247 | arch_cpu_idle_dead(); | |
cf37b6b4 | 248 | } |
06d50c65 | 249 | |
c1de45ca PZ |
250 | local_irq_disable(); |
251 | arch_cpu_idle_enter(); | |
82c65d60 AL |
252 | |
253 | /* | |
c1de45ca PZ |
254 | * In poll mode we reenable interrupts and spin. Also if we |
255 | * detected in the wakeup from idle path that the tick | |
256 | * broadcast device expired for us, we don't want to go deep | |
257 | * idle as we know that the IPI is going to arrive right away. | |
82c65d60 | 258 | */ |
2aaf709a RW |
259 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
260 | tick_nohz_idle_restart_tick(); | |
c1de45ca | 261 | cpu_idle_poll(); |
2aaf709a | 262 | } else { |
c1de45ca | 263 | cpuidle_idle_call(); |
2aaf709a | 264 | } |
c1de45ca | 265 | arch_cpu_idle_exit(); |
cf37b6b4 | 266 | } |
c1de45ca PZ |
267 | |
268 | /* | |
269 | * Since we fell out of the loop above, we know TIF_NEED_RESCHED must | |
270 | * be set, propagate it into PREEMPT_NEED_RESCHED. | |
271 | * | |
272 | * This is required because for polling idle loops we will not have had | |
273 | * an IPI to fold the state for us. | |
274 | */ | |
275 | preempt_set_need_resched(); | |
276 | tick_nohz_idle_exit(); | |
277 | __current_clr_polling(); | |
278 | ||
279 | /* | |
280 | * We promise to call sched_ttwu_pending() and reschedule if | |
281 | * need_resched() is set while polling is set. That means that clearing | |
282 | * polling needs to be visible before doing these things. | |
283 | */ | |
284 | smp_mb__after_atomic(); | |
285 | ||
286 | sched_ttwu_pending(); | |
8663effb | 287 | schedule_idle(); |
d83a7cb3 JP |
288 | |
289 | if (unlikely(klp_patch_pending(current))) | |
290 | klp_update_patch_state(current); | |
cf37b6b4 NP |
291 | } |
292 | ||
6727ad9e CM |
293 | bool cpu_in_idle(unsigned long pc) |
294 | { | |
295 | return pc >= (unsigned long)__cpuidle_text_start && | |
296 | pc < (unsigned long)__cpuidle_text_end; | |
297 | } | |
298 | ||
c1de45ca PZ |
299 | struct idle_timer { |
300 | struct hrtimer timer; | |
301 | int done; | |
302 | }; | |
303 | ||
304 | static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) | |
305 | { | |
306 | struct idle_timer *it = container_of(timer, struct idle_timer, timer); | |
307 | ||
308 | WRITE_ONCE(it->done, 1); | |
309 | set_tsk_need_resched(current); | |
310 | ||
311 | return HRTIMER_NORESTART; | |
312 | } | |
313 | ||
314 | void play_idle(unsigned long duration_ms) | |
315 | { | |
316 | struct idle_timer it; | |
317 | ||
318 | /* | |
319 | * Only FIFO tasks can disable the tick since they don't need the forced | |
320 | * preemption. | |
321 | */ | |
322 | WARN_ON_ONCE(current->policy != SCHED_FIFO); | |
323 | WARN_ON_ONCE(current->nr_cpus_allowed != 1); | |
324 | WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); | |
325 | WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); | |
326 | WARN_ON_ONCE(!duration_ms); | |
327 | ||
328 | rcu_sleep_check(); | |
329 | preempt_disable(); | |
330 | current->flags |= PF_IDLE; | |
331 | cpuidle_use_deepest_state(true); | |
332 | ||
333 | it.done = 0; | |
334 | hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
335 | it.timer.function = idle_inject_timer_fn; | |
336 | hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED); | |
337 | ||
338 | while (!READ_ONCE(it.done)) | |
339 | do_idle(); | |
340 | ||
341 | cpuidle_use_deepest_state(false); | |
342 | current->flags &= ~PF_IDLE; | |
343 | ||
344 | preempt_fold_need_resched(); | |
345 | preempt_enable(); | |
346 | } | |
347 | EXPORT_SYMBOL_GPL(play_idle); | |
348 | ||
cf37b6b4 NP |
349 | void cpu_startup_entry(enum cpuhp_state state) |
350 | { | |
cf37b6b4 | 351 | arch_cpu_idle_prepare(); |
8df3e07e | 352 | cpuhp_online_idle(state); |
c1de45ca PZ |
353 | while (1) |
354 | do_idle(); | |
cf37b6b4 | 355 | } |
a92057e1 IM |
356 | |
357 | /* | |
358 | * idle-task scheduling class. | |
359 | */ | |
360 | ||
361 | #ifdef CONFIG_SMP | |
362 | static int | |
363 | select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) | |
364 | { | |
365 | return task_cpu(p); /* IDLE tasks as never migrated */ | |
366 | } | |
367 | #endif | |
368 | ||
369 | /* | |
370 | * Idle tasks are unconditionally rescheduled: | |
371 | */ | |
372 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) | |
373 | { | |
374 | resched_curr(rq); | |
375 | } | |
376 | ||
377 | static struct task_struct * | |
378 | pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |
379 | { | |
380 | put_prev_task(rq, prev); | |
381 | update_idle_core(rq); | |
382 | schedstat_inc(rq->sched_goidle); | |
383 | ||
384 | return rq->idle; | |
385 | } | |
386 | ||
387 | /* | |
388 | * It is not legal to sleep in the idle task - print a warning | |
389 | * message if some code attempts to do it: | |
390 | */ | |
391 | static void | |
392 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | |
393 | { | |
394 | raw_spin_unlock_irq(&rq->lock); | |
395 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | |
396 | dump_stack(); | |
397 | raw_spin_lock_irq(&rq->lock); | |
398 | } | |
399 | ||
400 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | |
401 | { | |
402 | } | |
403 | ||
404 | /* | |
405 | * scheduler tick hitting a task of our scheduling class. | |
406 | * | |
407 | * NOTE: This function can be called remotely by the tick offload that | |
408 | * goes along full dynticks. Therefore no local assumption can be made | |
409 | * and everything must be accessed through the @rq and @curr passed in | |
410 | * parameters. | |
411 | */ | |
412 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | |
413 | { | |
414 | } | |
415 | ||
416 | static void set_curr_task_idle(struct rq *rq) | |
417 | { | |
418 | } | |
419 | ||
420 | static void switched_to_idle(struct rq *rq, struct task_struct *p) | |
421 | { | |
422 | BUG(); | |
423 | } | |
424 | ||
425 | static void | |
426 | prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) | |
427 | { | |
428 | BUG(); | |
429 | } | |
430 | ||
431 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) | |
432 | { | |
433 | return 0; | |
434 | } | |
435 | ||
436 | static void update_curr_idle(struct rq *rq) | |
437 | { | |
438 | } | |
439 | ||
440 | /* | |
441 | * Simple, special scheduling class for the per-CPU idle tasks: | |
442 | */ | |
443 | const struct sched_class idle_sched_class = { | |
444 | /* .next is NULL */ | |
445 | /* no enqueue/yield_task for idle tasks */ | |
446 | ||
447 | /* dequeue is not valid, we print a debug message there: */ | |
448 | .dequeue_task = dequeue_task_idle, | |
449 | ||
450 | .check_preempt_curr = check_preempt_curr_idle, | |
451 | ||
452 | .pick_next_task = pick_next_task_idle, | |
453 | .put_prev_task = put_prev_task_idle, | |
454 | ||
455 | #ifdef CONFIG_SMP | |
456 | .select_task_rq = select_task_rq_idle, | |
457 | .set_cpus_allowed = set_cpus_allowed_common, | |
458 | #endif | |
459 | ||
460 | .set_curr_task = set_curr_task_idle, | |
461 | .task_tick = task_tick_idle, | |
462 | ||
463 | .get_rr_interval = get_rr_interval_idle, | |
464 | ||
465 | .prio_changed = prio_changed_idle, | |
466 | .switched_to = switched_to_idle, | |
467 | .update_curr = update_curr_idle, | |
468 | }; |