2 * processor_idle - idle state cpuidle driver.
3 * Adapted from drivers/idle/intel_idle.c and
4 * drivers/acpi/processor_idle.c
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/moduleparam.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpu.h>
14 #include <linux/notifier.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/runlatch.h>
22 #include "plpar_wrappers.h"
25 struct cpuidle_driver pseries_idle_driver
= {
26 .name
= "pseries_idle",
30 #define MAX_IDLE_STATE_COUNT 2
32 static int max_idle_state
= MAX_IDLE_STATE_COUNT
- 1;
33 static struct cpuidle_device __percpu
*pseries_cpuidle_devices
;
34 static struct cpuidle_state
*cpuidle_state_table
;
36 void update_smt_snooze_delay(int snooze
)
38 struct cpuidle_driver
*drv
= cpuidle_get_driver();
40 drv
->states
[0].target_residency
= snooze
;
43 static inline void idle_loop_prolog(unsigned long *in_purr
, ktime_t
*kt_before
)
46 *kt_before
= ktime_get_real();
47 *in_purr
= mfspr(SPRN_PURR
);
49 * Indicate to the HV that we are idle. Now would be
50 * a good time to find other work to dispatch.
52 get_lppaca()->idle
= 1;
55 static inline s64
idle_loop_epilog(unsigned long in_purr
, ktime_t kt_before
)
57 get_lppaca()->wait_state_cycles
+= mfspr(SPRN_PURR
) - in_purr
;
58 get_lppaca()->idle
= 0;
60 return ktime_to_us(ktime_sub(ktime_get_real(), kt_before
));
63 static int snooze_loop(struct cpuidle_device
*dev
,
64 struct cpuidle_driver
*drv
,
67 unsigned long in_purr
;
69 unsigned long start_snooze
;
70 long snooze
= drv
->states
[0].target_residency
;
72 idle_loop_prolog(&in_purr
, &kt_before
);
75 start_snooze
= get_tb() + snooze
* tb_ticks_per_usec
;
77 set_thread_flag(TIF_POLLING_NRFLAG
);
79 while ((snooze
< 0) || (get_tb() < start_snooze
)) {
80 if (need_resched() || cpu_is_offline(dev
->cpu
))
88 clear_thread_flag(TIF_POLLING_NRFLAG
);
96 (int)idle_loop_epilog(in_purr
, kt_before
);
100 static void check_and_cede_processor(void)
103 * Ensure our interrupt state is properly tracked,
104 * also checks if no interrupt has occurred while we
107 if (prep_irq_for_idle()) {
109 #ifdef CONFIG_TRACE_IRQFLAGS
110 /* Ensure that H_CEDE returns with IRQs on */
111 if (WARN_ON(!(mfmsr() & MSR_EE
)))
117 static int dedicated_cede_loop(struct cpuidle_device
*dev
,
118 struct cpuidle_driver
*drv
,
121 unsigned long in_purr
;
124 idle_loop_prolog(&in_purr
, &kt_before
);
125 get_lppaca()->donate_dedicated_cpu
= 1;
127 ppc64_runlatch_off();
129 check_and_cede_processor();
131 get_lppaca()->donate_dedicated_cpu
= 0;
132 dev
->last_residency
=
133 (int)idle_loop_epilog(in_purr
, kt_before
);
137 static int shared_cede_loop(struct cpuidle_device
*dev
,
138 struct cpuidle_driver
*drv
,
141 unsigned long in_purr
;
144 idle_loop_prolog(&in_purr
, &kt_before
);
147 * Yield the processor to the hypervisor. We return if
148 * an external interrupt occurs (which are driven prior
149 * to returning here) or if a prod occurs from another
150 * processor. When returning here, external interrupts
153 check_and_cede_processor();
155 dev
->last_residency
=
156 (int)idle_loop_epilog(in_purr
, kt_before
);
161 * States for dedicated partition case.
163 static struct cpuidle_state dedicated_states
[MAX_IDLE_STATE_COUNT
] = {
167 .flags
= CPUIDLE_FLAG_TIME_VALID
,
169 .target_residency
= 0,
170 .enter
= &snooze_loop
},
174 .flags
= CPUIDLE_FLAG_TIME_VALID
,
176 .target_residency
= 10,
177 .enter
= &dedicated_cede_loop
},
181 * States for shared partition case.
183 static struct cpuidle_state shared_states
[MAX_IDLE_STATE_COUNT
] = {
185 .name
= "Shared Cede",
186 .desc
= "Shared Cede",
187 .flags
= CPUIDLE_FLAG_TIME_VALID
,
189 .target_residency
= 0,
190 .enter
= &shared_cede_loop
},
193 static int pseries_cpuidle_add_cpu_notifier(struct notifier_block
*n
,
194 unsigned long action
, void *hcpu
)
196 int hotcpu
= (unsigned long)hcpu
;
197 struct cpuidle_device
*dev
=
198 per_cpu_ptr(pseries_cpuidle_devices
, hotcpu
);
200 if (dev
&& cpuidle_get_driver()) {
203 case CPU_ONLINE_FROZEN
:
204 cpuidle_pause_and_lock();
205 cpuidle_enable_device(dev
);
206 cpuidle_resume_and_unlock();
210 case CPU_DEAD_FROZEN
:
211 cpuidle_pause_and_lock();
212 cpuidle_disable_device(dev
);
213 cpuidle_resume_and_unlock();
223 static struct notifier_block setup_hotplug_notifier
= {
224 .notifier_call
= pseries_cpuidle_add_cpu_notifier
,
228 * pseries_cpuidle_driver_init()
230 static int pseries_cpuidle_driver_init(void)
233 struct cpuidle_driver
*drv
= &pseries_idle_driver
;
235 drv
->state_count
= 0;
237 for (idle_state
= 0; idle_state
< MAX_IDLE_STATE_COUNT
; ++idle_state
) {
239 if (idle_state
> max_idle_state
)
242 /* is the state not enabled? */
243 if (cpuidle_state_table
[idle_state
].enter
== NULL
)
246 drv
->states
[drv
->state_count
] = /* structure copy */
247 cpuidle_state_table
[idle_state
];
249 if (cpuidle_state_table
== dedicated_states
)
250 drv
->states
[drv
->state_count
].target_residency
=
251 __get_cpu_var(smt_snooze_delay
);
253 drv
->state_count
+= 1;
259 /* pseries_idle_devices_uninit(void)
260 * unregister cpuidle devices and de-allocate memory
262 static void pseries_idle_devices_uninit(void)
265 struct cpuidle_device
*dev
;
267 for_each_possible_cpu(i
) {
268 dev
= per_cpu_ptr(pseries_cpuidle_devices
, i
);
269 cpuidle_unregister_device(dev
);
272 free_percpu(pseries_cpuidle_devices
);
276 /* pseries_idle_devices_init()
277 * allocate, initialize and register cpuidle device
279 static int pseries_idle_devices_init(void)
282 struct cpuidle_driver
*drv
= &pseries_idle_driver
;
283 struct cpuidle_device
*dev
;
285 pseries_cpuidle_devices
= alloc_percpu(struct cpuidle_device
);
286 if (pseries_cpuidle_devices
== NULL
)
289 for_each_possible_cpu(i
) {
290 dev
= per_cpu_ptr(pseries_cpuidle_devices
, i
);
291 dev
->state_count
= drv
->state_count
;
293 if (cpuidle_register_device(dev
)) {
295 "cpuidle_register_device %d failed!\n", i
);
304 * pseries_idle_probe()
305 * Choose state table for shared versus dedicated partition
307 static int pseries_idle_probe(void)
310 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
313 if (cpuidle_disable
!= IDLE_NO_OVERRIDE
)
316 if (max_idle_state
== 0) {
317 printk(KERN_DEBUG
"pseries processor idle disabled.\n");
321 if (get_lppaca()->shared_proc
)
322 cpuidle_state_table
= shared_states
;
324 cpuidle_state_table
= dedicated_states
;
329 static int __init
pseries_processor_idle_init(void)
333 retval
= pseries_idle_probe();
337 pseries_cpuidle_driver_init();
338 retval
= cpuidle_register_driver(&pseries_idle_driver
);
340 printk(KERN_DEBUG
"Registration of pseries driver failed.\n");
344 retval
= pseries_idle_devices_init();
346 pseries_idle_devices_uninit();
347 cpuidle_unregister_driver(&pseries_idle_driver
);
351 register_cpu_notifier(&setup_hotplug_notifier
);
352 printk(KERN_DEBUG
"pseries_idle_driver registered\n");
357 static void __exit
pseries_processor_idle_exit(void)
360 unregister_cpu_notifier(&setup_hotplug_notifier
);
361 pseries_idle_devices_uninit();
362 cpuidle_unregister_driver(&pseries_idle_driver
);
367 module_init(pseries_processor_idle_init
);
368 module_exit(pseries_processor_idle_exit
);
370 MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
371 MODULE_DESCRIPTION("Cpuidle driver for POWER");
372 MODULE_LICENSE("GPL");