]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/idle.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / kernel / sched / idle.c
CommitLineData
cf37b6b4
NP
1/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
4c822698 5#include <linux/sched/idle.h>
cf37b6b4
NP
6#include <linux/cpu.h>
7#include <linux/cpuidle.h>
8df3e07e 8#include <linux/cpuhotplug.h>
cf37b6b4
NP
9#include <linux/tick.h>
10#include <linux/mm.h>
11#include <linux/stackprotector.h>
38106313 12#include <linux/suspend.h>
cf37b6b4
NP
13
14#include <asm/tlb.h>
15
16#include <trace/events/power.h>
17
e3baac47
PZ
18#include "sched.h"
19
6727ad9e
CM
20/* Linker adds these: start and end of __cpuidle functions */
21extern char __cpuidle_text_start[], __cpuidle_text_end[];
22
faad3849
RW
23/**
24 * sched_idle_set_state - Record idle state for the current CPU.
25 * @idle_state: State to record.
26 */
27void sched_idle_set_state(struct cpuidle_state *idle_state)
28{
29 idle_set_state(this_rq(), idle_state);
30}
31
cf37b6b4
NP
32static int __read_mostly cpu_idle_force_poll;
33
34void cpu_idle_poll_ctrl(bool enable)
35{
36 if (enable) {
37 cpu_idle_force_poll++;
38 } else {
39 cpu_idle_force_poll--;
40 WARN_ON_ONCE(cpu_idle_force_poll < 0);
41 }
42}
43
44#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
45static int __init cpu_idle_poll_setup(char *__unused)
46{
47 cpu_idle_force_poll = 1;
48 return 1;
49}
50__setup("nohlt", cpu_idle_poll_setup);
51
52static int __init cpu_idle_nopoll_setup(char *__unused)
53{
54 cpu_idle_force_poll = 0;
55 return 1;
56}
57__setup("hlt", cpu_idle_nopoll_setup);
58#endif
59
6727ad9e 60static noinline int __cpuidle cpu_idle_poll(void)
cf37b6b4
NP
61{
62 rcu_idle_enter();
63 trace_cpu_idle_rcuidle(0, smp_processor_id());
64 local_irq_enable();
9babcd79 65 stop_critical_timings();
ff6f2d29
PM
66 while (!tif_need_resched() &&
67 (cpu_idle_force_poll || tick_check_broadcast_expired()))
cf37b6b4 68 cpu_relax();
9babcd79 69 start_critical_timings();
cf37b6b4
NP
70 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
71 rcu_idle_exit();
72 return 1;
73}
74
75/* Weak implementations for optional arch specific functions */
76void __weak arch_cpu_idle_prepare(void) { }
77void __weak arch_cpu_idle_enter(void) { }
78void __weak arch_cpu_idle_exit(void) { }
79void __weak arch_cpu_idle_dead(void) { }
80void __weak arch_cpu_idle(void)
81{
82 cpu_idle_force_poll = 1;
83 local_irq_enable();
84}
85
827a5aef
RW
86/**
87 * default_idle_call - Default CPU idle routine.
88 *
89 * To use when the cpuidle framework cannot be used.
90 */
6727ad9e 91void __cpuidle default_idle_call(void)
82f66327 92{
63caae84 93 if (current_clr_polling_and_test()) {
82f66327 94 local_irq_enable();
63caae84
LS
95 } else {
96 stop_critical_timings();
82f66327 97 arch_cpu_idle();
63caae84
LS
98 start_critical_timings();
99 }
82f66327
RW
100}
101
bcf6ad8a
RW
102static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
103 int next_state)
104{
bcf6ad8a
RW
105 /*
106 * The idle task must be scheduled, it is pointless to go to idle, just
107 * update no idle residency and return.
108 */
109 if (current_clr_polling_and_test()) {
110 dev->last_residency = 0;
111 local_irq_enable();
112 return -EBUSY;
113 }
114
bcf6ad8a
RW
115 /*
116 * Enter the idle state previously returned by the governor decision.
117 * This function will block until an interrupt occurs and will take
118 * care of re-enabling the local interrupts
119 */
827a5aef 120 return cpuidle_enter(drv, dev, next_state);
bcf6ad8a
RW
121}
122
30cdd69e
DL
123/**
124 * cpuidle_idle_call - the main idle function
125 *
126 * NOTE: no locks or semaphores should be used here
82c65d60
AL
127 *
128 * On archs that support TIF_POLLING_NRFLAG, is called with polling
129 * set, and it returns with polling set. If it ever stops polling, it
130 * must clear the polling bit.
30cdd69e 131 */
08c373e5 132static void cpuidle_idle_call(void)
30cdd69e 133{
9bd616e3 134 struct cpuidle_device *dev = cpuidle_get_device();
30cdd69e 135 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
37352273 136 int next_state, entered_state;
30cdd69e 137
a1d028bd
DL
138 /*
139 * Check if the idle task must be rescheduled. If it is the
c444117f 140 * case, exit the function after re-enabling the local irq.
a1d028bd 141 */
c444117f 142 if (need_resched()) {
8ca3c642 143 local_irq_enable();
08c373e5 144 return;
8ca3c642
DL
145 }
146
a1d028bd
DL
147 /*
148 * Tell the RCU framework we are entering an idle section,
149 * so no more rcu read side critical sections and one more
150 * step to the grace period
151 */
c8cc7d4d
DL
152 rcu_idle_enter();
153
82f66327
RW
154 if (cpuidle_not_available(drv, dev)) {
155 default_idle_call();
156 goto exit_idle;
157 }
ef2b22ac 158
38106313
RW
159 /*
160 * Suspend-to-idle ("freeze") is a system state in which all user space
161 * has been frozen, all I/O devices have been suspended and the only
162 * activity happens here and in iterrupts (if any). In that case bypass
163 * the cpuidle governor and go stratight for the deepest idle state
164 * available. Possibly also suspend the local tick and the entire
165 * timekeeping to prevent timer interrupts from kicking us out of idle
166 * until a proper wakeup interrupt happens.
167 */
bb8313b6
JP
168
169 if (idle_should_freeze() || dev->use_deepest_state) {
170 if (idle_should_freeze()) {
171 entered_state = cpuidle_enter_freeze(drv, dev);
172 if (entered_state > 0) {
173 local_irq_enable();
174 goto exit_idle;
175 }
ef2b22ac
RW
176 }
177
ef2b22ac 178 next_state = cpuidle_find_deepest_state(drv, dev);
bcf6ad8a 179 call_cpuidle(drv, dev, next_state);
ef2b22ac 180 } else {
ef2b22ac
RW
181 /*
182 * Ask the cpuidle framework to choose a convenient idle state.
183 */
184 next_state = cpuidle_select(drv, dev);
bcf6ad8a
RW
185 entered_state = call_cpuidle(drv, dev, next_state);
186 /*
187 * Give the governor an opportunity to reflect on the outcome
188 */
ef2b22ac 189 cpuidle_reflect(dev, entered_state);
bcf6ad8a 190 }
37352273
PZ
191
192exit_idle:
8ca3c642 193 __current_set_polling();
30cdd69e 194
a1d028bd 195 /*
37352273 196 * It is up to the idle functions to reenable local interrupts
a1d028bd 197 */
c8cc7d4d
DL
198 if (WARN_ON_ONCE(irqs_disabled()))
199 local_irq_enable();
200
201 rcu_idle_exit();
30cdd69e 202}
30cdd69e 203
cf37b6b4
NP
204/*
205 * Generic idle loop implementation
82c65d60
AL
206 *
207 * Called with polling cleared.
cf37b6b4 208 */
c1de45ca 209static void do_idle(void)
cf37b6b4 210{
c1de45ca
PZ
211 /*
212 * If the arch has a polling bit, we maintain an invariant:
213 *
214 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
215 * rq->idle). This means that, if rq->idle has the polling bit set,
216 * then setting need_resched is guaranteed to cause the CPU to
217 * reschedule.
218 */
cf37b6b4 219
c1de45ca
PZ
220 __current_set_polling();
221 tick_nohz_idle_enter();
cf37b6b4 222
c1de45ca
PZ
223 while (!need_resched()) {
224 check_pgt_cache();
225 rmb();
cf37b6b4 226
c1de45ca
PZ
227 if (cpu_is_offline(smp_processor_id())) {
228 cpuhp_report_idle_dead();
229 arch_cpu_idle_dead();
cf37b6b4 230 }
06d50c65 231
c1de45ca
PZ
232 local_irq_disable();
233 arch_cpu_idle_enter();
82c65d60
AL
234
235 /*
c1de45ca
PZ
236 * In poll mode we reenable interrupts and spin. Also if we
237 * detected in the wakeup from idle path that the tick
238 * broadcast device expired for us, we don't want to go deep
239 * idle as we know that the IPI is going to arrive right away.
82c65d60 240 */
c1de45ca
PZ
241 if (cpu_idle_force_poll || tick_check_broadcast_expired())
242 cpu_idle_poll();
243 else
244 cpuidle_idle_call();
245 arch_cpu_idle_exit();
cf37b6b4 246 }
c1de45ca
PZ
247
248 /*
249 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
250 * be set, propagate it into PREEMPT_NEED_RESCHED.
251 *
252 * This is required because for polling idle loops we will not have had
253 * an IPI to fold the state for us.
254 */
255 preempt_set_need_resched();
256 tick_nohz_idle_exit();
257 __current_clr_polling();
258
259 /*
260 * We promise to call sched_ttwu_pending() and reschedule if
261 * need_resched() is set while polling is set. That means that clearing
262 * polling needs to be visible before doing these things.
263 */
264 smp_mb__after_atomic();
265
266 sched_ttwu_pending();
267 schedule_preempt_disabled();
cf37b6b4
NP
268}
269
6727ad9e
CM
270bool cpu_in_idle(unsigned long pc)
271{
272 return pc >= (unsigned long)__cpuidle_text_start &&
273 pc < (unsigned long)__cpuidle_text_end;
274}
275
c1de45ca
PZ
276struct idle_timer {
277 struct hrtimer timer;
278 int done;
279};
280
281static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
282{
283 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
284
285 WRITE_ONCE(it->done, 1);
286 set_tsk_need_resched(current);
287
288 return HRTIMER_NORESTART;
289}
290
291void play_idle(unsigned long duration_ms)
292{
293 struct idle_timer it;
294
295 /*
296 * Only FIFO tasks can disable the tick since they don't need the forced
297 * preemption.
298 */
299 WARN_ON_ONCE(current->policy != SCHED_FIFO);
300 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
301 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
302 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
303 WARN_ON_ONCE(!duration_ms);
304
305 rcu_sleep_check();
306 preempt_disable();
307 current->flags |= PF_IDLE;
308 cpuidle_use_deepest_state(true);
309
310 it.done = 0;
311 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
312 it.timer.function = idle_inject_timer_fn;
313 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
314
315 while (!READ_ONCE(it.done))
316 do_idle();
317
318 cpuidle_use_deepest_state(false);
319 current->flags &= ~PF_IDLE;
320
321 preempt_fold_need_resched();
322 preempt_enable();
323}
324EXPORT_SYMBOL_GPL(play_idle);
325
cf37b6b4
NP
326void cpu_startup_entry(enum cpuhp_state state)
327{
328 /*
329 * This #ifdef needs to die, but it's too late in the cycle to
330 * make this generic (arm and sh have never invoked the canary
331 * init for the non boot cpus!). Will be fixed in 3.11
332 */
333#ifdef CONFIG_X86
334 /*
335 * If we're the non-boot CPU, nothing set the stack canary up
336 * for us. The boot CPU already has it initialized but no harm
337 * in doing it again. This is a good place for updating it, as
338 * we wont ever return from this function (so the invalid
339 * canaries already on the stack wont ever trigger).
340 */
341 boot_init_stack_canary();
342#endif
cf37b6b4 343 arch_cpu_idle_prepare();
8df3e07e 344 cpuhp_online_idle(state);
c1de45ca
PZ
345 while (1)
346 do_idle();
cf37b6b4 347}