1 #include <linux/errno.h>
2 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/ftrace.h>
12 #include <asm/system.h>
15 unsigned long idle_halt
;
16 EXPORT_SYMBOL(idle_halt
);
17 unsigned long idle_nomwait
;
18 EXPORT_SYMBOL(idle_nomwait
);
20 struct kmem_cache
*task_xstate_cachep
;
22 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
25 if (src
->thread
.xstate
) {
26 dst
->thread
.xstate
= kmem_cache_alloc(task_xstate_cachep
,
28 if (!dst
->thread
.xstate
)
30 WARN_ON((unsigned long)dst
->thread
.xstate
& 15);
31 memcpy(dst
->thread
.xstate
, src
->thread
.xstate
, xstate_size
);
36 void free_thread_xstate(struct task_struct
*tsk
)
38 if (tsk
->thread
.xstate
) {
39 kmem_cache_free(task_xstate_cachep
, tsk
->thread
.xstate
);
40 tsk
->thread
.xstate
= NULL
;
44 void free_thread_info(struct thread_info
*ti
)
46 free_thread_xstate(ti
->task
);
47 free_pages((unsigned long)ti
, get_order(THREAD_SIZE
));
50 void arch_task_cache_init(void)
53 kmem_cache_create("task_xstate", xstate_size
,
54 __alignof__(union thread_xstate
),
59 * Idle related variables and functions
61 unsigned long boot_option_idle_override
= 0;
62 EXPORT_SYMBOL(boot_option_idle_override
);
65 * Powermanagement idle function, if any..
67 void (*pm_idle
)(void);
68 EXPORT_SYMBOL(pm_idle
);
72 * This halt magic was a workaround for ancient floppy DMA
73 * wreckage. It should be safe to remove.
75 static int hlt_counter
;
76 void disable_hlt(void)
80 EXPORT_SYMBOL(disable_hlt
);
86 EXPORT_SYMBOL(enable_hlt
);
88 static inline int hlt_use_halt(void)
90 return (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
);
93 static inline int hlt_use_halt(void)
100 * We use this if we don't have any better
103 void default_idle(void)
105 if (hlt_use_halt()) {
106 struct power_trace it
;
108 trace_power_start(&it
, POWER_CSTATE
, 1);
109 current_thread_info()->status
&= ~TS_POLLING
;
111 * TS_POLLING-cleared state must be visible before we
117 safe_halt(); /* enables interrupts racelessly */
120 current_thread_info()->status
|= TS_POLLING
;
121 trace_power_end(&it
);
124 /* loop is done by the caller */
128 #ifdef CONFIG_APM_MODULE
129 EXPORT_SYMBOL(default_idle
);
132 void stop_this_cpu(void *dummy
)
138 cpu_clear(smp_processor_id(), cpu_online_map
);
139 disable_local_APIC();
142 if (hlt_works(smp_processor_id()))
147 static void do_nothing(void *unused
)
152 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
153 * pm_idle and update to new pm_idle value. Required while changing pm_idle
154 * handler on SMP systems.
156 * Caller must have changed pm_idle to the new value before the call. Old
157 * pm_idle value will not be used by any CPU after the return of this function.
159 void cpu_idle_wait(void)
162 /* kick all the CPUs so that they exit out of pm_idle */
163 smp_call_function(do_nothing
, NULL
, 1);
165 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
168 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
169 * which can obviate IPI to trigger checking of need_resched.
170 * We execute MONITOR against need_resched and enter optimized wait state
171 * through MWAIT. Whenever someone changes need_resched, we would be woken
172 * up from MWAIT (without an IPI).
174 * New with Core Duo processors, MWAIT can take some hints based on CPU
177 void mwait_idle_with_hints(unsigned long ax
, unsigned long cx
)
179 struct power_trace it
;
181 trace_power_start(&it
, POWER_CSTATE
, (ax
>>4)+1);
182 if (!need_resched()) {
183 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
184 clflush((void *)¤t_thread_info()->flags
);
186 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
191 trace_power_end(&it
);
194 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
195 static void mwait_idle(void)
197 struct power_trace it
;
198 if (!need_resched()) {
199 trace_power_start(&it
, POWER_CSTATE
, 1);
200 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
201 clflush((void *)¤t_thread_info()->flags
);
203 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
209 trace_power_end(&it
);
215 * On SMP it's slightly faster (but much more power-consuming!)
216 * to poll the ->work.need_resched flag instead of waiting for the
217 * cross-CPU IPI to arrive. Use this option with caution.
219 static void poll_idle(void)
221 struct power_trace it
;
223 trace_power_start(&it
, POWER_CSTATE
, 0);
225 while (!need_resched())
227 trace_power_end(&it
);
231 * mwait selection logic:
233 * It depends on the CPU. For AMD CPUs that support MWAIT this is
234 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
235 * then depend on a clock divisor and current Pstate of the core. If
236 * all cores of a processor are in halt state (C1) the processor can
237 * enter the C1E (C1 enhanced) state. If mwait is used this will never
240 * idle=mwait overrides this decision and forces the usage of mwait.
242 static int __cpuinitdata force_mwait
;
244 #define MWAIT_INFO 0x05
245 #define MWAIT_ECX_EXTENDED_INFO 0x01
246 #define MWAIT_EDX_C1 0xf0
248 static int __cpuinit
mwait_usable(const struct cpuinfo_x86
*c
)
250 u32 eax
, ebx
, ecx
, edx
;
255 if (c
->cpuid_level
< MWAIT_INFO
)
258 cpuid(MWAIT_INFO
, &eax
, &ebx
, &ecx
, &edx
);
259 /* Check, whether EDX has extended info about MWAIT */
260 if (!(ecx
& MWAIT_ECX_EXTENDED_INFO
))
264 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
267 return (edx
& MWAIT_EDX_C1
);
271 * Check for AMD CPUs, which have potentially C1E support
273 static int __cpuinit
check_c1e_idle(const struct cpuinfo_x86
*c
)
275 if (c
->x86_vendor
!= X86_VENDOR_AMD
)
281 /* Family 0x0f models < rev F do not have C1E */
282 if (c
->x86
== 0x0f && c
->x86_model
< 0x40)
288 static cpumask_t c1e_mask
= CPU_MASK_NONE
;
289 static int c1e_detected
;
291 void c1e_remove_cpu(int cpu
)
293 cpu_clear(cpu
, c1e_mask
);
297 * C1E aware idle routine. We check for C1E active in the interrupt
298 * pending message MSR. If we detect C1E, then we handle it the same
299 * way as C3 power states (local apic timer and TSC stop)
301 static void c1e_idle(void)
309 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
310 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
312 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
313 mark_tsc_unstable("TSC halt in AMD C1E");
314 printk(KERN_INFO
"System has AMD C1E enabled\n");
315 set_cpu_cap(&boot_cpu_data
, X86_FEATURE_AMDC1E
);
320 int cpu
= smp_processor_id();
322 if (!cpu_isset(cpu
, c1e_mask
)) {
323 cpu_set(cpu
, c1e_mask
);
325 * Force broadcast so ACPI can not interfere. Needs
326 * to run with interrupts enabled as it uses
330 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
332 printk(KERN_INFO
"Switch to broadcast mode on CPU%d\n",
336 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
341 * The switch back from broadcast mode needs to be
342 * called with interrupts disabled.
345 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
351 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
353 #ifdef CONFIG_X86_SMP
354 if (pm_idle
== poll_idle
&& smp_num_siblings
> 1) {
355 printk(KERN_WARNING
"WARNING: polling idle and HT enabled,"
356 " performance may degrade.\n");
362 if (cpu_has(c
, X86_FEATURE_MWAIT
) && mwait_usable(c
)) {
364 * One CPU supports mwait => All CPUs supports mwait
366 printk(KERN_INFO
"using mwait in idle threads.\n");
367 pm_idle
= mwait_idle
;
368 } else if (check_c1e_idle(c
)) {
369 printk(KERN_INFO
"using C1E aware idle routine\n");
372 pm_idle
= default_idle
;
375 static int __init
idle_setup(char *str
)
380 if (!strcmp(str
, "poll")) {
381 printk("using polling idle threads.\n");
383 } else if (!strcmp(str
, "mwait"))
385 else if (!strcmp(str
, "halt")) {
387 * When the boot option of idle=halt is added, halt is
388 * forced to be used for CPU idle. In such case CPU C2/C3
389 * won't be used again.
390 * To continue to load the CPU idle driver, don't touch
391 * the boot_option_idle_override.
393 pm_idle
= default_idle
;
396 } else if (!strcmp(str
, "nomwait")) {
398 * If the boot option of "idle=nomwait" is added,
399 * it means that mwait will be disabled for CPU C2/C3
400 * states. In such case it won't touch the variable
401 * of boot_option_idle_override.
408 boot_option_idle_override
= 1;
411 early_param("idle", idle_setup
);