]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/sched/idle.c
Merge tag 'dlm-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm
[mirror_ubuntu-bionic-kernel.git] / kernel / sched / idle.c
1 /*
2 * Generic entry point for the idle threads
3 */
4 #include <linux/sched.h>
5 #include <linux/sched/idle.h>
6 #include <linux/cpu.h>
7 #include <linux/cpuidle.h>
8 #include <linux/cpuhotplug.h>
9 #include <linux/tick.h>
10 #include <linux/mm.h>
11 #include <linux/stackprotector.h>
12 #include <linux/suspend.h>
13 #include <linux/livepatch.h>
14
15 #include <asm/tlb.h>
16
17 #include <trace/events/power.h>
18
19 #include "sched.h"
20
21 /* Linker adds these: start and end of __cpuidle functions */
22 extern char __cpuidle_text_start[], __cpuidle_text_end[];
23
24 /**
25 * sched_idle_set_state - Record idle state for the current CPU.
26 * @idle_state: State to record.
27 */
28 void sched_idle_set_state(struct cpuidle_state *idle_state)
29 {
30 idle_set_state(this_rq(), idle_state);
31 }
32
33 static int __read_mostly cpu_idle_force_poll;
34
35 void cpu_idle_poll_ctrl(bool enable)
36 {
37 if (enable) {
38 cpu_idle_force_poll++;
39 } else {
40 cpu_idle_force_poll--;
41 WARN_ON_ONCE(cpu_idle_force_poll < 0);
42 }
43 }
44
45 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
46 static int __init cpu_idle_poll_setup(char *__unused)
47 {
48 cpu_idle_force_poll = 1;
49 return 1;
50 }
51 __setup("nohlt", cpu_idle_poll_setup);
52
53 static int __init cpu_idle_nopoll_setup(char *__unused)
54 {
55 cpu_idle_force_poll = 0;
56 return 1;
57 }
58 __setup("hlt", cpu_idle_nopoll_setup);
59 #endif
60
61 static noinline int __cpuidle cpu_idle_poll(void)
62 {
63 rcu_idle_enter();
64 trace_cpu_idle_rcuidle(0, smp_processor_id());
65 local_irq_enable();
66 stop_critical_timings();
67 while (!tif_need_resched() &&
68 (cpu_idle_force_poll || tick_check_broadcast_expired()))
69 cpu_relax();
70 start_critical_timings();
71 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
72 rcu_idle_exit();
73 return 1;
74 }
75
76 /* Weak implementations for optional arch specific functions */
77 void __weak arch_cpu_idle_prepare(void) { }
78 void __weak arch_cpu_idle_enter(void) { }
79 void __weak arch_cpu_idle_exit(void) { }
80 void __weak arch_cpu_idle_dead(void) { }
81 void __weak arch_cpu_idle(void)
82 {
83 cpu_idle_force_poll = 1;
84 local_irq_enable();
85 }
86
87 /**
88 * default_idle_call - Default CPU idle routine.
89 *
90 * To use when the cpuidle framework cannot be used.
91 */
92 void __cpuidle default_idle_call(void)
93 {
94 if (current_clr_polling_and_test()) {
95 local_irq_enable();
96 } else {
97 stop_critical_timings();
98 arch_cpu_idle();
99 start_critical_timings();
100 }
101 }
102
103 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
104 int next_state)
105 {
106 /*
107 * The idle task must be scheduled, it is pointless to go to idle, just
108 * update no idle residency and return.
109 */
110 if (current_clr_polling_and_test()) {
111 dev->last_residency = 0;
112 local_irq_enable();
113 return -EBUSY;
114 }
115
116 /*
117 * Enter the idle state previously returned by the governor decision.
118 * This function will block until an interrupt occurs and will take
119 * care of re-enabling the local interrupts
120 */
121 return cpuidle_enter(drv, dev, next_state);
122 }
123
124 /**
125 * cpuidle_idle_call - the main idle function
126 *
127 * NOTE: no locks or semaphores should be used here
128 *
129 * On archs that support TIF_POLLING_NRFLAG, is called with polling
130 * set, and it returns with polling set. If it ever stops polling, it
131 * must clear the polling bit.
132 */
133 static void cpuidle_idle_call(void)
134 {
135 struct cpuidle_device *dev = cpuidle_get_device();
136 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
137 int next_state, entered_state;
138
139 /*
140 * Check if the idle task must be rescheduled. If it is the
141 * case, exit the function after re-enabling the local irq.
142 */
143 if (need_resched()) {
144 local_irq_enable();
145 return;
146 }
147
148 /*
149 * Tell the RCU framework we are entering an idle section,
150 * so no more rcu read side critical sections and one more
151 * step to the grace period
152 */
153 rcu_idle_enter();
154
155 if (cpuidle_not_available(drv, dev)) {
156 default_idle_call();
157 goto exit_idle;
158 }
159
160 /*
161 * Suspend-to-idle ("s2idle") is a system state in which all user space
162 * has been frozen, all I/O devices have been suspended and the only
163 * activity happens here and in iterrupts (if any). In that case bypass
164 * the cpuidle governor and go stratight for the deepest idle state
165 * available. Possibly also suspend the local tick and the entire
166 * timekeeping to prevent timer interrupts from kicking us out of idle
167 * until a proper wakeup interrupt happens.
168 */
169
170 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
171 if (idle_should_enter_s2idle()) {
172 entered_state = cpuidle_enter_s2idle(drv, dev);
173 if (entered_state > 0) {
174 local_irq_enable();
175 goto exit_idle;
176 }
177 }
178
179 next_state = cpuidle_find_deepest_state(drv, dev);
180 call_cpuidle(drv, dev, next_state);
181 } else {
182 /*
183 * Ask the cpuidle framework to choose a convenient idle state.
184 */
185 next_state = cpuidle_select(drv, dev);
186 entered_state = call_cpuidle(drv, dev, next_state);
187 /*
188 * Give the governor an opportunity to reflect on the outcome
189 */
190 cpuidle_reflect(dev, entered_state);
191 }
192
193 exit_idle:
194 __current_set_polling();
195
196 /*
197 * It is up to the idle functions to reenable local interrupts
198 */
199 if (WARN_ON_ONCE(irqs_disabled()))
200 local_irq_enable();
201
202 rcu_idle_exit();
203 }
204
205 /*
206 * Generic idle loop implementation
207 *
208 * Called with polling cleared.
209 */
210 static void do_idle(void)
211 {
212 /*
213 * If the arch has a polling bit, we maintain an invariant:
214 *
215 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
216 * rq->idle). This means that, if rq->idle has the polling bit set,
217 * then setting need_resched is guaranteed to cause the CPU to
218 * reschedule.
219 */
220
221 __current_set_polling();
222 quiet_vmstat();
223 tick_nohz_idle_enter();
224
225 while (!need_resched()) {
226 check_pgt_cache();
227 rmb();
228
229 if (cpu_is_offline(smp_processor_id())) {
230 cpuhp_report_idle_dead();
231 arch_cpu_idle_dead();
232 }
233
234 local_irq_disable();
235 arch_cpu_idle_enter();
236
237 /*
238 * In poll mode we reenable interrupts and spin. Also if we
239 * detected in the wakeup from idle path that the tick
240 * broadcast device expired for us, we don't want to go deep
241 * idle as we know that the IPI is going to arrive right away.
242 */
243 if (cpu_idle_force_poll || tick_check_broadcast_expired())
244 cpu_idle_poll();
245 else
246 cpuidle_idle_call();
247 arch_cpu_idle_exit();
248 }
249
250 /*
251 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
252 * be set, propagate it into PREEMPT_NEED_RESCHED.
253 *
254 * This is required because for polling idle loops we will not have had
255 * an IPI to fold the state for us.
256 */
257 preempt_set_need_resched();
258 tick_nohz_idle_exit();
259 __current_clr_polling();
260
261 /*
262 * We promise to call sched_ttwu_pending() and reschedule if
263 * need_resched() is set while polling is set. That means that clearing
264 * polling needs to be visible before doing these things.
265 */
266 smp_mb__after_atomic();
267
268 sched_ttwu_pending();
269 schedule_idle();
270
271 if (unlikely(klp_patch_pending(current)))
272 klp_update_patch_state(current);
273 }
274
275 bool cpu_in_idle(unsigned long pc)
276 {
277 return pc >= (unsigned long)__cpuidle_text_start &&
278 pc < (unsigned long)__cpuidle_text_end;
279 }
280
281 struct idle_timer {
282 struct hrtimer timer;
283 int done;
284 };
285
286 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
287 {
288 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
289
290 WRITE_ONCE(it->done, 1);
291 set_tsk_need_resched(current);
292
293 return HRTIMER_NORESTART;
294 }
295
296 void play_idle(unsigned long duration_ms)
297 {
298 struct idle_timer it;
299
300 /*
301 * Only FIFO tasks can disable the tick since they don't need the forced
302 * preemption.
303 */
304 WARN_ON_ONCE(current->policy != SCHED_FIFO);
305 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
306 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
307 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
308 WARN_ON_ONCE(!duration_ms);
309
310 rcu_sleep_check();
311 preempt_disable();
312 current->flags |= PF_IDLE;
313 cpuidle_use_deepest_state(true);
314
315 it.done = 0;
316 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
317 it.timer.function = idle_inject_timer_fn;
318 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
319
320 while (!READ_ONCE(it.done))
321 do_idle();
322
323 cpuidle_use_deepest_state(false);
324 current->flags &= ~PF_IDLE;
325
326 preempt_fold_need_resched();
327 preempt_enable();
328 }
329 EXPORT_SYMBOL_GPL(play_idle);
330
331 void cpu_startup_entry(enum cpuhp_state state)
332 {
333 /*
334 * This #ifdef needs to die, but it's too late in the cycle to
335 * make this generic (arm and sh have never invoked the canary
336 * init for the non boot cpus!). Will be fixed in 3.11
337 */
338 #ifdef CONFIG_X86
339 /*
340 * If we're the non-boot CPU, nothing set the stack canary up
341 * for us. The boot CPU already has it initialized but no harm
342 * in doing it again. This is a good place for updating it, as
343 * we wont ever return from this function (so the invalid
344 * canaries already on the stack wont ever trigger).
345 */
346 boot_init_stack_canary();
347 #endif
348 arch_cpu_idle_prepare();
349 cpuhp_online_idle(state);
350 while (1)
351 do_idle();
352 }