]>
Commit | Line | Data |
---|---|---|
1da1180c PM |
1 | /* |
2 | * The idle loop for all SuperH platforms. | |
3 | * | |
2e046b94 | 4 | * Copyright (C) 2002 - 2009 Paul Mundt |
1da1180c PM |
5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/pm.h> | |
14 | #include <linux/tick.h> | |
15 | #include <linux/preempt.h> | |
16 | #include <linux/thread_info.h> | |
17 | #include <linux/irqflags.h> | |
2e046b94 | 18 | #include <linux/smp.h> |
a0bfa137 | 19 | #include <linux/cpuidle.h> |
1da1180c PM |
20 | #include <asm/pgalloc.h> |
21 | #include <asm/system.h> | |
60063497 | 22 | #include <linux/atomic.h> |
763142d1 | 23 | #include <asm/smp.h> |
1da1180c | 24 | |
c66d3fcb | 25 | void (*pm_idle)(void); |
fbb82b03 PM |
26 | |
27 | static int hlt_counter; | |
1da1180c PM |
28 | |
29 | static int __init nohlt_setup(char *__unused) | |
30 | { | |
31 | hlt_counter = 1; | |
32 | return 1; | |
33 | } | |
34 | __setup("nohlt", nohlt_setup); | |
35 | ||
36 | static int __init hlt_setup(char *__unused) | |
37 | { | |
38 | hlt_counter = 0; | |
39 | return 1; | |
40 | } | |
41 | __setup("hlt", hlt_setup); | |
42 | ||
f533c3d3 PM |
43 | static inline int hlt_works(void) |
44 | { | |
45 | return !hlt_counter; | |
46 | } | |
47 | ||
48 | /* | |
49 | * On SMP it's slightly faster (but much more power-consuming!) | |
50 | * to poll the ->work.need_resched flag instead of waiting for the | |
51 | * cross-CPU IPI to arrive. Use this option with caution. | |
52 | */ | |
53 | static void poll_idle(void) | |
54 | { | |
55 | local_irq_enable(); | |
56 | while (!need_resched()) | |
57 | cpu_relax(); | |
58 | } | |
59 | ||
e869a90e | 60 | void default_idle(void) |
1da1180c | 61 | { |
f533c3d3 | 62 | if (hlt_works()) { |
1da1180c PM |
63 | clear_thread_flag(TIF_POLLING_NRFLAG); |
64 | smp_mb__after_clear_bit(); | |
1da1180c | 65 | |
73a38b83 | 66 | set_bl_bit(); |
f533c3d3 PM |
67 | if (!need_resched()) { |
68 | local_irq_enable(); | |
1da1180c | 69 | cpu_sleep(); |
9dbe00a5 PM |
70 | } else |
71 | local_irq_enable(); | |
1da1180c | 72 | |
1da1180c | 73 | set_thread_flag(TIF_POLLING_NRFLAG); |
73a38b83 | 74 | clear_bl_bit(); |
1da1180c | 75 | } else |
f533c3d3 | 76 | poll_idle(); |
1da1180c PM |
77 | } |
78 | ||
f533c3d3 PM |
79 | /* |
80 | * The idle thread. There's no useful work to be done, so just try to conserve | |
81 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | |
82 | * say that they'd like to reschedule) | |
83 | */ | |
1da1180c PM |
84 | void cpu_idle(void) |
85 | { | |
f533c3d3 PM |
86 | unsigned int cpu = smp_processor_id(); |
87 | ||
1da1180c PM |
88 | set_thread_flag(TIF_POLLING_NRFLAG); |
89 | ||
90 | /* endless idle loop with no priority at all */ | |
91 | while (1) { | |
1268fbc7 FW |
92 | tick_nohz_idle_enter(); |
93 | rcu_idle_enter(); | |
1da1180c | 94 | |
763142d1 | 95 | while (!need_resched()) { |
0e6d4986 PM |
96 | check_pgt_cache(); |
97 | rmb(); | |
98 | ||
763142d1 PM |
99 | if (cpu_is_offline(cpu)) |
100 | play_dead(); | |
101 | ||
f533c3d3 PM |
102 | local_irq_disable(); |
103 | /* Don't trace irqs off for idle */ | |
104 | stop_critical_timings(); | |
cbc158d6 | 105 | if (cpuidle_idle_call()) |
a0bfa137 | 106 | pm_idle(); |
f533c3d3 PM |
107 | /* |
108 | * Sanity check to ensure that pm_idle() returns | |
109 | * with IRQs enabled | |
110 | */ | |
111 | WARN_ON(irqs_disabled()); | |
112 | start_critical_timings(); | |
113 | } | |
1da1180c | 114 | |
1268fbc7 FW |
115 | rcu_idle_exit(); |
116 | tick_nohz_idle_exit(); | |
1da1180c PM |
117 | preempt_enable_no_resched(); |
118 | schedule(); | |
119 | preempt_disable(); | |
1da1180c PM |
120 | } |
121 | } | |
2e046b94 | 122 | |
90851c40 | 123 | void __init select_idle_routine(void) |
f533c3d3 PM |
124 | { |
125 | /* | |
126 | * If a platform has set its own idle routine, leave it alone. | |
127 | */ | |
128 | if (pm_idle) | |
129 | return; | |
130 | ||
131 | if (hlt_works()) | |
132 | pm_idle = default_idle; | |
133 | else | |
134 | pm_idle = poll_idle; | |
135 | } | |
136 | ||
2e046b94 PM |
137 | static void do_nothing(void *unused) |
138 | { | |
139 | } | |
140 | ||
fbb82b03 PM |
141 | void stop_this_cpu(void *unused) |
142 | { | |
143 | local_irq_disable(); | |
f0ccf277 | 144 | set_cpu_online(smp_processor_id(), false); |
fbb82b03 PM |
145 | |
146 | for (;;) | |
147 | cpu_sleep(); | |
148 | } | |
149 | ||
2e046b94 PM |
150 | /* |
151 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | |
152 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | |
153 | * handler on SMP systems. | |
154 | * | |
155 | * Caller must have changed pm_idle to the new value before the call. Old | |
156 | * pm_idle value will not be used by any CPU after the return of this function. | |
157 | */ | |
158 | void cpu_idle_wait(void) | |
159 | { | |
160 | smp_mb(); | |
161 | /* kick all the CPUs so that they exit out of pm_idle */ | |
162 | smp_call_function(do_nothing, NULL, 1); | |
163 | } | |
164 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |