]>
Commit | Line | Data |
---|---|---|
1da1180c PM |
1 | /* |
2 | * The idle loop for all SuperH platforms. | |
3 | * | |
2e046b94 | 4 | * Copyright (C) 2002 - 2009 Paul Mundt |
1da1180c PM |
5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/pm.h> | |
14 | #include <linux/tick.h> | |
15 | #include <linux/preempt.h> | |
16 | #include <linux/thread_info.h> | |
17 | #include <linux/irqflags.h> | |
2e046b94 | 18 | #include <linux/smp.h> |
1da1180c PM |
19 | #include <asm/pgalloc.h> |
20 | #include <asm/system.h> | |
21 | #include <asm/atomic.h> | |
763142d1 | 22 | #include <asm/smp.h> |
1da1180c | 23 | |
f533c3d3 | 24 | void (*pm_idle)(void) = NULL; |
fbb82b03 PM |
25 | |
26 | static int hlt_counter; | |
1da1180c PM |
27 | |
28 | static int __init nohlt_setup(char *__unused) | |
29 | { | |
30 | hlt_counter = 1; | |
31 | return 1; | |
32 | } | |
33 | __setup("nohlt", nohlt_setup); | |
34 | ||
35 | static int __init hlt_setup(char *__unused) | |
36 | { | |
37 | hlt_counter = 0; | |
38 | return 1; | |
39 | } | |
40 | __setup("hlt", hlt_setup); | |
41 | ||
f533c3d3 PM |
42 | static inline int hlt_works(void) |
43 | { | |
44 | return !hlt_counter; | |
45 | } | |
46 | ||
47 | /* | |
48 | * On SMP it's slightly faster (but much more power-consuming!) | |
49 | * to poll the ->work.need_resched flag instead of waiting for the | |
50 | * cross-CPU IPI to arrive. Use this option with caution. | |
51 | */ | |
52 | static void poll_idle(void) | |
53 | { | |
54 | local_irq_enable(); | |
55 | while (!need_resched()) | |
56 | cpu_relax(); | |
57 | } | |
58 | ||
e869a90e | 59 | void default_idle(void) |
1da1180c | 60 | { |
f533c3d3 | 61 | if (hlt_works()) { |
1da1180c PM |
62 | clear_thread_flag(TIF_POLLING_NRFLAG); |
63 | smp_mb__after_clear_bit(); | |
1da1180c | 64 | |
73a38b83 | 65 | set_bl_bit(); |
f533c3d3 PM |
66 | if (!need_resched()) { |
67 | local_irq_enable(); | |
1da1180c | 68 | cpu_sleep(); |
9dbe00a5 PM |
69 | } else |
70 | local_irq_enable(); | |
1da1180c | 71 | |
1da1180c | 72 | set_thread_flag(TIF_POLLING_NRFLAG); |
73a38b83 | 73 | clear_bl_bit(); |
1da1180c | 74 | } else |
f533c3d3 | 75 | poll_idle(); |
1da1180c PM |
76 | } |
77 | ||
f533c3d3 PM |
78 | /* |
79 | * The idle thread. There's no useful work to be done, so just try to conserve | |
80 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | |
81 | * say that they'd like to reschedule) | |
82 | */ | |
1da1180c PM |
83 | void cpu_idle(void) |
84 | { | |
f533c3d3 PM |
85 | unsigned int cpu = smp_processor_id(); |
86 | ||
1da1180c PM |
87 | set_thread_flag(TIF_POLLING_NRFLAG); |
88 | ||
89 | /* endless idle loop with no priority at all */ | |
90 | while (1) { | |
f533c3d3 | 91 | tick_nohz_stop_sched_tick(1); |
1da1180c | 92 | |
763142d1 | 93 | while (!need_resched()) { |
0e6d4986 PM |
94 | check_pgt_cache(); |
95 | rmb(); | |
96 | ||
763142d1 PM |
97 | if (cpu_is_offline(cpu)) |
98 | play_dead(); | |
99 | ||
f533c3d3 PM |
100 | local_irq_disable(); |
101 | /* Don't trace irqs off for idle */ | |
102 | stop_critical_timings(); | |
103 | pm_idle(); | |
104 | /* | |
105 | * Sanity check to ensure that pm_idle() returns | |
106 | * with IRQs enabled | |
107 | */ | |
108 | WARN_ON(irqs_disabled()); | |
109 | start_critical_timings(); | |
110 | } | |
1da1180c | 111 | |
1da1180c | 112 | tick_nohz_restart_sched_tick(); |
1da1180c PM |
113 | preempt_enable_no_resched(); |
114 | schedule(); | |
115 | preempt_disable(); | |
1da1180c PM |
116 | } |
117 | } | |
2e046b94 | 118 | |
90851c40 | 119 | void __init select_idle_routine(void) |
f533c3d3 PM |
120 | { |
121 | /* | |
122 | * If a platform has set its own idle routine, leave it alone. | |
123 | */ | |
124 | if (pm_idle) | |
125 | return; | |
126 | ||
127 | if (hlt_works()) | |
128 | pm_idle = default_idle; | |
129 | else | |
130 | pm_idle = poll_idle; | |
131 | } | |
132 | ||
2e046b94 PM |
133 | static void do_nothing(void *unused) |
134 | { | |
135 | } | |
136 | ||
fbb82b03 PM |
137 | void stop_this_cpu(void *unused) |
138 | { | |
139 | local_irq_disable(); | |
f0ccf277 | 140 | set_cpu_online(smp_processor_id(), false); |
fbb82b03 PM |
141 | |
142 | for (;;) | |
143 | cpu_sleep(); | |
144 | } | |
145 | ||
2e046b94 PM |
146 | /* |
147 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | |
148 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | |
149 | * handler on SMP systems. | |
150 | * | |
151 | * Caller must have changed pm_idle to the new value before the call. Old | |
152 | * pm_idle value will not be used by any CPU after the return of this function. | |
153 | */ | |
154 | void cpu_idle_wait(void) | |
155 | { | |
156 | smp_mb(); | |
157 | /* kick all the CPUs so that they exit out of pm_idle */ | |
158 | smp_call_function(do_nothing, NULL, 1); | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |