2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
56 #include <asm/uaccess.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
61 #define ACPI_PROCESSOR_COMPONENT 0x01000000
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER "power"
66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save
) (void) __read_mostly
;
73 #define C2_OVERHEAD 1 /* 1us */
74 #define C3_OVERHEAD 1 /* 1us */
76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 static unsigned int max_cstate __read_mostly
= ACPI_PROCESSOR_MAX_POWER
;
79 #ifdef CONFIG_CPU_IDLE
80 module_param(max_cstate
, uint
, 0000);
82 module_param(max_cstate
, uint
, 0644);
84 static unsigned int nocst __read_mostly
;
85 module_param(nocst
, uint
, 0000);
87 #ifndef CONFIG_CPU_IDLE
89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93 * reduce history for more aggressive entry into C3
95 static unsigned int bm_history __read_mostly
=
96 (HZ
>= 800 ? 0xFFFFFFFF : ((1U << (HZ
/ 25)) - 1));
97 module_param(bm_history
, uint
, 0644);
99 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
);
101 #else /* CONFIG_CPU_IDLE */
102 static unsigned int latency_factor __read_mostly
= 2;
103 module_param(latency_factor
, uint
, 0644);
107 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
108 * For now disable this. Probably a bug somewhere else.
110 * To skip this limit, boot/load with a large max_cstate limit.
112 static int set_max_cstate(const struct dmi_system_id
*id
)
114 if (max_cstate
> ACPI_PROCESSOR_MAX_POWER
)
117 printk(KERN_NOTICE PREFIX
"%s detected - limiting to C%ld max_cstate."
118 " Override with \"processor.max_cstate=%d\"\n", id
->ident
,
119 (long)id
->driver_data
, ACPI_PROCESSOR_MAX_POWER
+ 1);
121 max_cstate
= (long)id
->driver_data
;
126 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
127 callers to only run once -AK */
128 static struct dmi_system_id __cpuinitdata processor_power_dmi_table
[] = {
129 { set_max_cstate
, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION
,"1SET70WW")}, (void *)1},
132 { set_max_cstate
, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW")}, (void *)1},
135 { set_max_cstate
, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION
,"1SET43WW") }, (void*)1},
138 { set_max_cstate
, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION
,"1SET45WW") }, (void*)1},
141 { set_max_cstate
, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION
,"1SET47WW") }, (void*)1},
144 { set_max_cstate
, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION
,"1SET50WW") }, (void*)1},
147 { set_max_cstate
, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION
,"1SET52WW") }, (void*)1},
150 { set_max_cstate
, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION
,"1SET55WW") }, (void*)1},
153 { set_max_cstate
, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION
,"1SET56WW") }, (void*)1},
156 { set_max_cstate
, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION
,"1SET59WW") }, (void*)1},
159 { set_max_cstate
, "IBM ThinkPad R40e", {
160 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
161 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW") }, (void*)1},
162 { set_max_cstate
, "IBM ThinkPad R40e", {
163 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
164 DMI_MATCH(DMI_BIOS_VERSION
,"1SET61WW") }, (void*)1},
165 { set_max_cstate
, "IBM ThinkPad R40e", {
166 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
167 DMI_MATCH(DMI_BIOS_VERSION
,"1SET62WW") }, (void*)1},
168 { set_max_cstate
, "IBM ThinkPad R40e", {
169 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
170 DMI_MATCH(DMI_BIOS_VERSION
,"1SET64WW") }, (void*)1},
171 { set_max_cstate
, "IBM ThinkPad R40e", {
172 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
173 DMI_MATCH(DMI_BIOS_VERSION
,"1SET65WW") }, (void*)1},
174 { set_max_cstate
, "IBM ThinkPad R40e", {
175 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
176 DMI_MATCH(DMI_BIOS_VERSION
,"1SET68WW") }, (void*)1},
177 { set_max_cstate
, "Medion 41700", {
178 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
179 DMI_MATCH(DMI_BIOS_VERSION
,"R01-A1J")}, (void *)1},
180 { set_max_cstate
, "Clevo 5600D", {
181 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
182 DMI_MATCH(DMI_BIOS_VERSION
,"SHE845M0.86C.0013.D.0302131307")},
187 static inline u32
ticks_elapsed(u32 t1
, u32 t2
)
191 else if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_32BIT_TIMER
))
192 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
194 return ((0xFFFFFFFF - t1
) + t2
);
197 static inline u32
ticks_elapsed_in_us(u32 t1
, u32 t2
)
200 return PM_TIMER_TICKS_TO_US(t2
- t1
);
201 else if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_32BIT_TIMER
))
202 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1
) + t2
);
208 * Callers should disable interrupts before the call and enable
209 * interrupts after return.
211 static void acpi_safe_halt(void)
213 current_thread_info()->status
&= ~TS_POLLING
;
215 * TS_POLLING-cleared state must be visible before we
219 if (!need_resched()) {
223 current_thread_info()->status
|= TS_POLLING
;
226 #ifndef CONFIG_CPU_IDLE
229 acpi_processor_power_activate(struct acpi_processor
*pr
,
230 struct acpi_processor_cx
*new)
232 struct acpi_processor_cx
*old
;
237 old
= pr
->power
.state
;
240 old
->promotion
.count
= 0;
241 new->demotion
.count
= 0;
243 /* Cleanup from old state. */
247 /* Disable bus master reload */
248 if (new->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
249 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
254 /* Prepare to use new state. */
257 /* Enable bus master reload */
258 if (old
->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
259 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1);
263 pr
->power
.state
= new;
268 static atomic_t c3_cpu_count
;
270 /* Common C-state entry for C2, C3, .. */
271 static void acpi_cstate_enter(struct acpi_processor_cx
*cstate
)
273 if (cstate
->entry_method
== ACPI_CSTATE_FFH
) {
274 /* Call into architectural FFH based C-state */
275 acpi_processor_ffh_cstate_enter(cstate
);
278 /* IO port based C-state */
279 inb(cstate
->address
);
280 /* Dummy wait op - must do something useless after P_LVL2 read
281 because chipsets cannot guarantee that STPCLK# signal
282 gets asserted in time to freeze execution properly. */
283 unused
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
286 #endif /* !CONFIG_CPU_IDLE */
288 #ifdef ARCH_APICTIMER_STOPS_ON_C3
291 * Some BIOS implementations switch to C3 in the published C2 state.
292 * This seems to be a common problem on AMD boxen, but other vendors
293 * are affected too. We pick the most conservative approach: we assume
294 * that the local APIC stops in both C2 and C3.
296 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
297 struct acpi_processor_cx
*cx
)
299 struct acpi_processor_power
*pwr
= &pr
->power
;
300 u8 type
= local_apic_timer_c2_ok
? ACPI_STATE_C3
: ACPI_STATE_C2
;
303 * Check, if one of the previous states already marked the lapic
306 if (pwr
->timer_broadcast_on_state
< state
)
309 if (cx
->type
>= type
)
310 pr
->power
.timer_broadcast_on_state
= state
;
313 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
)
315 unsigned long reason
;
317 reason
= pr
->power
.timer_broadcast_on_state
< INT_MAX
?
318 CLOCK_EVT_NOTIFY_BROADCAST_ON
: CLOCK_EVT_NOTIFY_BROADCAST_OFF
;
320 clockevents_notify(reason
, &pr
->id
);
323 /* Power(C) State timer broadcast control */
324 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
325 struct acpi_processor_cx
*cx
,
328 int state
= cx
- pr
->power
.states
;
330 if (state
>= pr
->power
.timer_broadcast_on_state
) {
331 unsigned long reason
;
333 reason
= broadcast
? CLOCK_EVT_NOTIFY_BROADCAST_ENTER
:
334 CLOCK_EVT_NOTIFY_BROADCAST_EXIT
;
335 clockevents_notify(reason
, &pr
->id
);
341 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
342 struct acpi_processor_cx
*cstate
) { }
343 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
) { }
344 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
345 struct acpi_processor_cx
*cx
,
353 * Suspend / resume control
355 static int acpi_idle_suspend
;
357 int acpi_processor_suspend(struct acpi_device
* device
, pm_message_t state
)
359 acpi_idle_suspend
= 1;
363 int acpi_processor_resume(struct acpi_device
* device
)
365 acpi_idle_suspend
= 0;
369 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
370 static int tsc_halts_in_c(int state
)
372 switch (boot_cpu_data
.x86_vendor
) {
375 * AMD Fam10h TSC will tick in all
376 * C/P/S0/S1 states when this bit is set.
378 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
381 case X86_VENDOR_INTEL
:
382 /* Several cases known where TSC halts in C2 too */
384 return state
> ACPI_STATE_C1
;
389 #ifndef CONFIG_CPU_IDLE
390 static void acpi_processor_idle(void)
392 struct acpi_processor
*pr
= NULL
;
393 struct acpi_processor_cx
*cx
= NULL
;
394 struct acpi_processor_cx
*next_state
= NULL
;
399 * Interrupts must be disabled during bus mastering calculations and
400 * for C2/C3 transitions.
404 pr
= processors
[smp_processor_id()];
411 * Check whether we truly need to go idle, or should
414 if (unlikely(need_resched())) {
419 cx
= pr
->power
.state
;
420 if (!cx
|| acpi_idle_suspend
) {
422 pm_idle_save(); /* enables IRQs */
434 * Check for bus mastering activity (if required), record, and check
437 if (pr
->flags
.bm_check
) {
439 unsigned long diff
= jiffies
- pr
->power
.bm_check_timestamp
;
444 pr
->power
.bm_activity
<<= diff
;
446 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_status
);
448 pr
->power
.bm_activity
|= 0x1;
449 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
452 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
453 * the true state of bus mastering activity; forcing us to
454 * manually check the BMIDEA bit of each IDE channel.
456 else if (errata
.piix4
.bmisx
) {
457 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
458 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
459 pr
->power
.bm_activity
|= 0x1;
462 pr
->power
.bm_check_timestamp
= jiffies
;
465 * If bus mastering is or was active this jiffy, demote
466 * to avoid a faulty transition. Note that the processor
467 * won't enter a low-power state during this call (to this
468 * function) but should upon the next.
470 * TBD: A better policy might be to fallback to the demotion
471 * state (use it for this quantum only) istead of
472 * demoting -- and rely on duration as our sole demotion
473 * qualification. This may, however, introduce DMA
474 * issues (e.g. floppy DMA transfer overrun/underrun).
476 if ((pr
->power
.bm_activity
& 0x1) &&
477 cx
->demotion
.threshold
.bm
) {
479 next_state
= cx
->demotion
.state
;
484 #ifdef CONFIG_HOTPLUG_CPU
486 * Check for P_LVL2_UP flag before entering C2 and above on
487 * an SMP system. We do it here instead of doing it at _CST/P_LVL
488 * detection phase, to work cleanly with logical CPU hotplug.
490 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
491 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
492 cx
= &pr
->power
.states
[ACPI_STATE_C1
];
498 * Invoke the current Cx state to put the processor to sleep.
500 if (cx
->type
== ACPI_STATE_C2
|| cx
->type
== ACPI_STATE_C3
) {
501 current_thread_info()->status
&= ~TS_POLLING
;
503 * TS_POLLING-cleared state must be visible before we
507 if (need_resched()) {
508 current_thread_info()->status
|= TS_POLLING
;
519 * Use the appropriate idle routine, the one that would
520 * be used without acpi C-states.
523 pm_idle_save(); /* enables IRQs */
530 * TBD: Can't get time duration while in C1, as resumes
531 * go to an ISR rather than here. Need to instrument
532 * base interrupt handler.
534 * Note: the TSC better not stop in C1, sched_clock() will
537 sleep_ticks
= 0xFFFFFFFF;
542 /* Get start time (ticks) */
543 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
544 /* Tell the scheduler that we are going deep-idle: */
545 sched_clock_idle_sleep_event();
547 acpi_state_timer_broadcast(pr
, cx
, 1);
548 acpi_cstate_enter(cx
);
549 /* Get end time (ticks) */
550 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
552 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
553 /* TSC halts in C2, so notify users */
554 if (tsc_halts_in_c(ACPI_STATE_C2
))
555 mark_tsc_unstable("possible TSC halt in C2");
557 /* Compute time (ticks) that we were actually asleep */
558 sleep_ticks
= ticks_elapsed(t1
, t2
);
560 /* Tell the scheduler how much we idled: */
561 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
563 /* Re-enable interrupts */
565 /* Do not account our idle-switching overhead: */
566 sleep_ticks
-= cx
->latency_ticks
+ C2_OVERHEAD
;
568 current_thread_info()->status
|= TS_POLLING
;
569 acpi_state_timer_broadcast(pr
, cx
, 0);
573 acpi_unlazy_tlb(smp_processor_id());
575 * Must be done before busmaster disable as we might
576 * need to access HPET !
578 acpi_state_timer_broadcast(pr
, cx
, 1);
581 * bm_check implies we need ARB_DIS
582 * !bm_check implies we need cache flush
583 * bm_control implies whether we can do ARB_DIS
585 * That leaves a case where bm_check is set and bm_control is
586 * not set. In that case we cannot do much, we enter C3
587 * without doing anything.
589 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
590 if (atomic_inc_return(&c3_cpu_count
) ==
593 * All CPUs are trying to go to C3
594 * Disable bus master arbitration
596 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1);
598 } else if (!pr
->flags
.bm_check
) {
599 /* SMP with no shared cache... Invalidate cache */
600 ACPI_FLUSH_CPU_CACHE();
603 /* Get start time (ticks) */
604 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
606 /* Tell the scheduler that we are going deep-idle: */
607 sched_clock_idle_sleep_event();
608 acpi_cstate_enter(cx
);
609 /* Get end time (ticks) */
610 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
611 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
612 /* Enable bus master arbitration */
613 atomic_dec(&c3_cpu_count
);
614 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0);
617 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
618 /* TSC halts in C3, so notify users */
619 if (tsc_halts_in_c(ACPI_STATE_C3
))
620 mark_tsc_unstable("TSC halts in C3");
622 /* Compute time (ticks) that we were actually asleep */
623 sleep_ticks
= ticks_elapsed(t1
, t2
);
624 /* Tell the scheduler how much we idled: */
625 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
627 /* Re-enable interrupts */
629 /* Do not account our idle-switching overhead: */
630 sleep_ticks
-= cx
->latency_ticks
+ C3_OVERHEAD
;
632 current_thread_info()->status
|= TS_POLLING
;
633 acpi_state_timer_broadcast(pr
, cx
, 0);
641 if ((cx
->type
!= ACPI_STATE_C1
) && (sleep_ticks
> 0))
642 cx
->time
+= sleep_ticks
;
644 next_state
= pr
->power
.state
;
646 #ifdef CONFIG_HOTPLUG_CPU
647 /* Don't do promotion/demotion */
648 if ((cx
->type
== ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
649 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
)) {
658 * Track the number of longs (time asleep is greater than threshold)
659 * and promote when the count threshold is reached. Note that bus
660 * mastering activity may prevent promotions.
661 * Do not promote above max_cstate.
663 if (cx
->promotion
.state
&&
664 ((cx
->promotion
.state
- pr
->power
.states
) <= max_cstate
)) {
665 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
&&
666 cx
->promotion
.state
->latency
<=
667 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY
)) {
668 cx
->promotion
.count
++;
669 cx
->demotion
.count
= 0;
670 if (cx
->promotion
.count
>=
671 cx
->promotion
.threshold
.count
) {
672 if (pr
->flags
.bm_check
) {
674 (pr
->power
.bm_activity
& cx
->
675 promotion
.threshold
.bm
)) {
681 next_state
= cx
->promotion
.state
;
691 * Track the number of shorts (time asleep is less than time threshold)
692 * and demote when the usage threshold is reached.
694 if (cx
->demotion
.state
) {
695 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
696 cx
->demotion
.count
++;
697 cx
->promotion
.count
= 0;
698 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
699 next_state
= cx
->demotion
.state
;
707 * Demote if current state exceeds max_cstate
708 * or if the latency of the current state is unacceptable
710 if ((pr
->power
.state
- pr
->power
.states
) > max_cstate
||
711 pr
->power
.state
->latency
>
712 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY
)) {
713 if (cx
->demotion
.state
)
714 next_state
= cx
->demotion
.state
;
720 * If we're going to start using a new Cx state we must clean up
721 * from the previous and prepare to use the new.
723 if (next_state
!= pr
->power
.state
)
724 acpi_processor_power_activate(pr
, next_state
);
727 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
)
730 unsigned int state_is_set
= 0;
731 struct acpi_processor_cx
*lower
= NULL
;
732 struct acpi_processor_cx
*higher
= NULL
;
733 struct acpi_processor_cx
*cx
;
740 * This function sets the default Cx state policy (OS idle handler).
741 * Our scheme is to promote quickly to C2 but more conservatively
742 * to C3. We're favoring C2 for its characteristics of low latency
743 * (quick response), good power savings, and ability to allow bus
744 * mastering activity. Note that the Cx state policy is completely
745 * customizable and can be altered dynamically.
749 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
750 cx
= &pr
->power
.states
[i
];
755 pr
->power
.state
= cx
;
764 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
765 cx
= &pr
->power
.states
[i
];
770 cx
->demotion
.state
= lower
;
771 cx
->demotion
.threshold
.ticks
= cx
->latency_ticks
;
772 cx
->demotion
.threshold
.count
= 1;
773 if (cx
->type
== ACPI_STATE_C3
)
774 cx
->demotion
.threshold
.bm
= bm_history
;
781 for (i
= (ACPI_PROCESSOR_MAX_POWER
- 1); i
> 0; i
--) {
782 cx
= &pr
->power
.states
[i
];
787 cx
->promotion
.state
= higher
;
788 cx
->promotion
.threshold
.ticks
= cx
->latency_ticks
;
789 if (cx
->type
>= ACPI_STATE_C2
)
790 cx
->promotion
.threshold
.count
= 4;
792 cx
->promotion
.threshold
.count
= 10;
793 if (higher
->type
== ACPI_STATE_C3
)
794 cx
->promotion
.threshold
.bm
= bm_history
;
802 #endif /* !CONFIG_CPU_IDLE */
804 static int acpi_processor_get_power_info_fadt(struct acpi_processor
*pr
)
813 /* if info is obtained from pblk/fadt, type equals state */
814 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
815 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
817 #ifndef CONFIG_HOTPLUG_CPU
819 * Check for P_LVL2_UP flag before entering C2 and above on
822 if ((num_online_cpus() > 1) &&
823 !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
827 /* determine C2 and C3 address from pblk */
828 pr
->power
.states
[ACPI_STATE_C2
].address
= pr
->pblk
+ 4;
829 pr
->power
.states
[ACPI_STATE_C3
].address
= pr
->pblk
+ 5;
831 /* determine latencies from FADT */
832 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_gbl_FADT
.C2latency
;
833 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_gbl_FADT
.C3latency
;
835 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
836 "lvl2[0x%08x] lvl3[0x%08x]\n",
837 pr
->power
.states
[ACPI_STATE_C2
].address
,
838 pr
->power
.states
[ACPI_STATE_C3
].address
));
843 static int acpi_processor_get_power_info_default(struct acpi_processor
*pr
)
845 if (!pr
->power
.states
[ACPI_STATE_C1
].valid
) {
846 /* set the first C-State to C1 */
847 /* all processors need to support C1 */
848 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
849 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
851 /* the C0 state only exists as a filler in our array */
852 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
856 static int acpi_processor_get_power_info_cst(struct acpi_processor
*pr
)
858 acpi_status status
= 0;
862 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
863 union acpi_object
*cst
;
871 status
= acpi_evaluate_object(pr
->handle
, "_CST", NULL
, &buffer
);
872 if (ACPI_FAILURE(status
)) {
873 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No _CST, giving up\n"));
877 cst
= buffer
.pointer
;
879 /* There must be at least 2 elements */
880 if (!cst
|| (cst
->type
!= ACPI_TYPE_PACKAGE
) || cst
->package
.count
< 2) {
881 printk(KERN_ERR PREFIX
"not enough elements in _CST\n");
886 count
= cst
->package
.elements
[0].integer
.value
;
888 /* Validate number of power states. */
889 if (count
< 1 || count
!= cst
->package
.count
- 1) {
890 printk(KERN_ERR PREFIX
"count given by _CST is not valid\n");
895 /* Tell driver that at least _CST is supported. */
896 pr
->flags
.has_cst
= 1;
898 for (i
= 1; i
<= count
; i
++) {
899 union acpi_object
*element
;
900 union acpi_object
*obj
;
901 struct acpi_power_register
*reg
;
902 struct acpi_processor_cx cx
;
904 memset(&cx
, 0, sizeof(cx
));
906 element
= &(cst
->package
.elements
[i
]);
907 if (element
->type
!= ACPI_TYPE_PACKAGE
)
910 if (element
->package
.count
!= 4)
913 obj
= &(element
->package
.elements
[0]);
915 if (obj
->type
!= ACPI_TYPE_BUFFER
)
918 reg
= (struct acpi_power_register
*)obj
->buffer
.pointer
;
920 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
&&
921 (reg
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
))
924 /* There should be an easy way to extract an integer... */
925 obj
= &(element
->package
.elements
[1]);
926 if (obj
->type
!= ACPI_TYPE_INTEGER
)
929 cx
.type
= obj
->integer
.value
;
931 * Some buggy BIOSes won't list C1 in _CST -
932 * Let acpi_processor_get_power_info_default() handle them later
934 if (i
== 1 && cx
.type
!= ACPI_STATE_C1
)
937 cx
.address
= reg
->address
;
938 cx
.index
= current_count
+ 1;
940 cx
.entry_method
= ACPI_CSTATE_SYSTEMIO
;
941 if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
) {
942 if (acpi_processor_ffh_cstate_probe
943 (pr
->id
, &cx
, reg
) == 0) {
944 cx
.entry_method
= ACPI_CSTATE_FFH
;
945 } else if (cx
.type
== ACPI_STATE_C1
) {
947 * C1 is a special case where FIXED_HARDWARE
948 * can be handled in non-MWAIT way as well.
949 * In that case, save this _CST entry info.
950 * Otherwise, ignore this info and continue.
952 cx
.entry_method
= ACPI_CSTATE_HALT
;
953 snprintf(cx
.desc
, ACPI_CX_DESC_LEN
, "ACPI HLT");
958 snprintf(cx
.desc
, ACPI_CX_DESC_LEN
, "ACPI IOPORT 0x%x",
963 obj
= &(element
->package
.elements
[2]);
964 if (obj
->type
!= ACPI_TYPE_INTEGER
)
967 cx
.latency
= obj
->integer
.value
;
969 obj
= &(element
->package
.elements
[3]);
970 if (obj
->type
!= ACPI_TYPE_INTEGER
)
973 cx
.power
= obj
->integer
.value
;
976 memcpy(&(pr
->power
.states
[current_count
]), &cx
, sizeof(cx
));
979 * We support total ACPI_PROCESSOR_MAX_POWER - 1
980 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
982 if (current_count
>= (ACPI_PROCESSOR_MAX_POWER
- 1)) {
984 "Limiting number of power states to max (%d)\n",
985 ACPI_PROCESSOR_MAX_POWER
);
987 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
992 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d power states\n",
995 /* Validate number of power states discovered */
996 if (current_count
< 2)
1000 kfree(buffer
.pointer
);
1005 static void acpi_processor_power_verify_c2(struct acpi_processor_cx
*cx
)
1012 * C2 latency must be less than or equal to 100
1015 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C2_LATENCY
) {
1016 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1017 "latency too large [%d]\n", cx
->latency
));
1022 * Otherwise we've met all of our C2 requirements.
1023 * Normalize the C2 latency to expidite policy
1027 #ifndef CONFIG_CPU_IDLE
1028 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
1030 cx
->latency_ticks
= cx
->latency
;
1036 static void acpi_processor_power_verify_c3(struct acpi_processor
*pr
,
1037 struct acpi_processor_cx
*cx
)
1039 static int bm_check_flag
;
1046 * C3 latency must be less than or equal to 1000
1049 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C3_LATENCY
) {
1050 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1051 "latency too large [%d]\n", cx
->latency
));
1056 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1057 * DMA transfers are used by any ISA device to avoid livelock.
1058 * Note that we could disable Type-F DMA (as recommended by
1059 * the erratum), but this is known to disrupt certain ISA
1060 * devices thus we take the conservative approach.
1062 else if (errata
.piix4
.fdma
) {
1063 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1064 "C3 not supported on PIIX4 with Type-F DMA\n"));
1068 /* All the logic here assumes flags.bm_check is same across all CPUs */
1069 if (!bm_check_flag
) {
1070 /* Determine whether bm_check is needed based on CPU */
1071 acpi_processor_power_init_bm_check(&(pr
->flags
), pr
->id
);
1072 bm_check_flag
= pr
->flags
.bm_check
;
1074 pr
->flags
.bm_check
= bm_check_flag
;
1077 if (pr
->flags
.bm_check
) {
1078 if (!pr
->flags
.bm_control
) {
1079 if (pr
->flags
.has_cst
!= 1) {
1080 /* bus mastering control is necessary */
1081 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1082 "C3 support requires BM control\n"));
1085 /* Here we enter C3 without bus mastering */
1086 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1087 "C3 support without BM control\n"));
1092 * WBINVD should be set in fadt, for C3 state to be
1093 * supported on when bm_check is not required.
1095 if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_WBINVD
)) {
1096 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1097 "Cache invalidation should work properly"
1098 " for C3 to be enabled on SMP systems\n"));
1101 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
1105 * Otherwise we've met all of our C3 requirements.
1106 * Normalize the C3 latency to expidite policy. Enable
1107 * checking of bus mastering status (bm_check) so we can
1108 * use this in our C3 policy
1112 #ifndef CONFIG_CPU_IDLE
1113 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
1115 cx
->latency_ticks
= cx
->latency
;
1121 static int acpi_processor_power_verify(struct acpi_processor
*pr
)
1124 unsigned int working
= 0;
1126 pr
->power
.timer_broadcast_on_state
= INT_MAX
;
1128 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1129 struct acpi_processor_cx
*cx
= &pr
->power
.states
[i
];
1137 acpi_processor_power_verify_c2(cx
);
1139 acpi_timer_check_state(i
, pr
, cx
);
1143 acpi_processor_power_verify_c3(pr
, cx
);
1145 acpi_timer_check_state(i
, pr
, cx
);
1153 acpi_propagate_timer_broadcast(pr
);
1158 static int acpi_processor_get_power_info(struct acpi_processor
*pr
)
1164 /* NOTE: the idle thread may not be running while calling
1167 /* Zero initialize all the C-states info. */
1168 memset(pr
->power
.states
, 0, sizeof(pr
->power
.states
));
1170 result
= acpi_processor_get_power_info_cst(pr
);
1171 if (result
== -ENODEV
)
1172 result
= acpi_processor_get_power_info_fadt(pr
);
1177 acpi_processor_get_power_info_default(pr
);
1179 pr
->power
.count
= acpi_processor_power_verify(pr
);
1181 #ifndef CONFIG_CPU_IDLE
1183 * Set Default Policy
1184 * ------------------
1185 * Now that we know which states are supported, set the default
1186 * policy. Note that this policy can be changed dynamically
1187 * (e.g. encourage deeper sleeps to conserve battery life when
1190 result
= acpi_processor_set_power_policy(pr
);
1196 * if one state of type C2 or C3 is available, mark this
1197 * CPU as being "idle manageable"
1199 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1200 if (pr
->power
.states
[i
].valid
) {
1201 pr
->power
.count
= i
;
1202 if (pr
->power
.states
[i
].type
>= ACPI_STATE_C2
)
1203 pr
->flags
.power
= 1;
1210 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
1212 struct acpi_processor
*pr
= seq
->private;
1219 seq_printf(seq
, "active state: C%zd\n"
1221 "bus master activity: %08x\n"
1222 "maximum allowed latency: %d usec\n",
1223 pr
->power
.state
? pr
->power
.state
- pr
->power
.states
: 0,
1224 max_cstate
, (unsigned)pr
->power
.bm_activity
,
1225 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY
));
1227 seq_puts(seq
, "states:\n");
1229 for (i
= 1; i
<= pr
->power
.count
; i
++) {
1230 seq_printf(seq
, " %cC%d: ",
1231 (&pr
->power
.states
[i
] ==
1232 pr
->power
.state
? '*' : ' '), i
);
1234 if (!pr
->power
.states
[i
].valid
) {
1235 seq_puts(seq
, "<not supported>\n");
1239 switch (pr
->power
.states
[i
].type
) {
1241 seq_printf(seq
, "type[C1] ");
1244 seq_printf(seq
, "type[C2] ");
1247 seq_printf(seq
, "type[C3] ");
1250 seq_printf(seq
, "type[--] ");
1254 if (pr
->power
.states
[i
].promotion
.state
)
1255 seq_printf(seq
, "promotion[C%zd] ",
1256 (pr
->power
.states
[i
].promotion
.state
-
1259 seq_puts(seq
, "promotion[--] ");
1261 if (pr
->power
.states
[i
].demotion
.state
)
1262 seq_printf(seq
, "demotion[C%zd] ",
1263 (pr
->power
.states
[i
].demotion
.state
-
1266 seq_puts(seq
, "demotion[--] ");
1268 seq_printf(seq
, "latency[%03d] usage[%08d] duration[%020llu]\n",
1269 pr
->power
.states
[i
].latency
,
1270 pr
->power
.states
[i
].usage
,
1271 (unsigned long long)pr
->power
.states
[i
].time
);
1278 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
1280 return single_open(file
, acpi_processor_power_seq_show
,
1284 static const struct file_operations acpi_processor_power_fops
= {
1285 .open
= acpi_processor_power_open_fs
,
1287 .llseek
= seq_lseek
,
1288 .release
= single_release
,
1291 #ifndef CONFIG_CPU_IDLE
1293 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
1305 if (!pr
->flags
.power_setup_done
)
1308 /* Fall back to the default idle loop */
1309 pm_idle
= pm_idle_save
;
1310 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1312 pr
->flags
.power
= 0;
1313 result
= acpi_processor_get_power_info(pr
);
1314 if ((pr
->flags
.power
== 1) && (pr
->flags
.power_setup_done
))
1315 pm_idle
= acpi_processor_idle
;
1321 static void smp_callback(void *v
)
1323 /* we already woke the CPU up, nothing more to do */
1327 * This function gets called when a part of the kernel has a new latency
1328 * requirement. This means we need to get all processors out of their C-state,
1329 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1330 * wakes them all right up.
1332 static int acpi_processor_latency_notify(struct notifier_block
*b
,
1333 unsigned long l
, void *v
)
1335 smp_call_function(smp_callback
, NULL
, 0, 1);
1339 static struct notifier_block acpi_processor_latency_notifier
= {
1340 .notifier_call
= acpi_processor_latency_notify
,
1345 #else /* CONFIG_CPU_IDLE */
1348 * acpi_idle_bm_check - checks if bus master activity was detected
1350 static int acpi_idle_bm_check(void)
1354 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_status
);
1356 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
1358 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1359 * the true state of bus mastering activity; forcing us to
1360 * manually check the BMIDEA bit of each IDE channel.
1362 else if (errata
.piix4
.bmisx
) {
1363 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
1364 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
1371 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1372 * @pr: the processor
1373 * @target: the new target state
1375 static inline void acpi_idle_update_bm_rld(struct acpi_processor
*pr
,
1376 struct acpi_processor_cx
*target
)
1378 if (pr
->flags
.bm_rld_set
&& target
->type
!= ACPI_STATE_C3
) {
1379 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
1380 pr
->flags
.bm_rld_set
= 0;
1383 if (!pr
->flags
.bm_rld_set
&& target
->type
== ACPI_STATE_C3
) {
1384 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1);
1385 pr
->flags
.bm_rld_set
= 1;
1390 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1393 * Caller disables interrupt before call and enables interrupt after return.
1395 static inline void acpi_idle_do_entry(struct acpi_processor_cx
*cx
)
1397 if (cx
->entry_method
== ACPI_CSTATE_FFH
) {
1398 /* Call into architectural FFH based C-state */
1399 acpi_processor_ffh_cstate_enter(cx
);
1400 } else if (cx
->entry_method
== ACPI_CSTATE_HALT
) {
1404 /* IO port based C-state */
1406 /* Dummy wait op - must do something useless after P_LVL2 read
1407 because chipsets cannot guarantee that STPCLK# signal
1408 gets asserted in time to freeze execution properly. */
1409 unused
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1414 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1415 * @dev: the target CPU
1416 * @state: the state data
1418 * This is equivalent to the HALT instruction.
1420 static int acpi_idle_enter_c1(struct cpuidle_device
*dev
,
1421 struct cpuidle_state
*state
)
1424 struct acpi_processor
*pr
;
1425 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1427 pr
= processors
[smp_processor_id()];
1432 local_irq_disable();
1434 /* Do not access any ACPI IO ports in suspend path */
1435 if (acpi_idle_suspend
) {
1441 if (pr
->flags
.bm_check
)
1442 acpi_idle_update_bm_rld(pr
, cx
);
1444 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1445 acpi_idle_do_entry(cx
);
1446 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1451 return ticks_elapsed_in_us(t1
, t2
);
1455 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1456 * @dev: the target CPU
1457 * @state: the state data
1459 static int acpi_idle_enter_simple(struct cpuidle_device
*dev
,
1460 struct cpuidle_state
*state
)
1462 struct acpi_processor
*pr
;
1463 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1465 int sleep_ticks
= 0;
1467 pr
= processors
[smp_processor_id()];
1472 if (acpi_idle_suspend
)
1473 return(acpi_idle_enter_c1(dev
, state
));
1475 local_irq_disable();
1476 current_thread_info()->status
&= ~TS_POLLING
;
1478 * TS_POLLING-cleared state must be visible before we test
1483 if (unlikely(need_resched())) {
1484 current_thread_info()->status
|= TS_POLLING
;
1490 * Must be done before busmaster disable as we might need to
1493 acpi_state_timer_broadcast(pr
, cx
, 1);
1495 if (pr
->flags
.bm_check
)
1496 acpi_idle_update_bm_rld(pr
, cx
);
1498 if (cx
->type
== ACPI_STATE_C3
)
1499 ACPI_FLUSH_CPU_CACHE();
1501 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1502 /* Tell the scheduler that we are going deep-idle: */
1503 sched_clock_idle_sleep_event();
1504 acpi_idle_do_entry(cx
);
1505 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1507 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1508 /* TSC could halt in idle, so notify users */
1509 if (tsc_halts_in_c(cx
->type
))
1510 mark_tsc_unstable("TSC halts in idle");;
1512 sleep_ticks
= ticks_elapsed(t1
, t2
);
1514 /* Tell the scheduler how much we idled: */
1515 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
1518 current_thread_info()->status
|= TS_POLLING
;
1522 acpi_state_timer_broadcast(pr
, cx
, 0);
1523 cx
->time
+= sleep_ticks
;
1524 return ticks_elapsed_in_us(t1
, t2
);
1527 static int c3_cpu_count
;
1528 static DEFINE_SPINLOCK(c3_lock
);
1531 * acpi_idle_enter_bm - enters C3 with proper BM handling
1532 * @dev: the target CPU
1533 * @state: the state data
1535 * If BM is detected, the deepest non-C3 idle state is entered instead.
1537 static int acpi_idle_enter_bm(struct cpuidle_device
*dev
,
1538 struct cpuidle_state
*state
)
1540 struct acpi_processor
*pr
;
1541 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1543 int sleep_ticks
= 0;
1545 pr
= processors
[smp_processor_id()];
1550 if (acpi_idle_suspend
)
1551 return(acpi_idle_enter_c1(dev
, state
));
1553 if (acpi_idle_bm_check()) {
1554 if (dev
->safe_state
) {
1555 return dev
->safe_state
->enter(dev
, dev
->safe_state
);
1557 local_irq_disable();
1564 local_irq_disable();
1565 current_thread_info()->status
&= ~TS_POLLING
;
1567 * TS_POLLING-cleared state must be visible before we test
1572 if (unlikely(need_resched())) {
1573 current_thread_info()->status
|= TS_POLLING
;
1578 acpi_unlazy_tlb(smp_processor_id());
1580 /* Tell the scheduler that we are going deep-idle: */
1581 sched_clock_idle_sleep_event();
1583 * Must be done before busmaster disable as we might need to
1586 acpi_state_timer_broadcast(pr
, cx
, 1);
1588 acpi_idle_update_bm_rld(pr
, cx
);
1591 * disable bus master
1592 * bm_check implies we need ARB_DIS
1593 * !bm_check implies we need cache flush
1594 * bm_control implies whether we can do ARB_DIS
1596 * That leaves a case where bm_check is set and bm_control is
1597 * not set. In that case we cannot do much, we enter C3
1598 * without doing anything.
1600 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
1601 spin_lock(&c3_lock
);
1603 /* Disable bus master arbitration when all CPUs are in C3 */
1604 if (c3_cpu_count
== num_online_cpus())
1605 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1);
1606 spin_unlock(&c3_lock
);
1607 } else if (!pr
->flags
.bm_check
) {
1608 ACPI_FLUSH_CPU_CACHE();
1611 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1612 acpi_idle_do_entry(cx
);
1613 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1615 /* Re-enable bus master arbitration */
1616 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
1617 spin_lock(&c3_lock
);
1618 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0);
1620 spin_unlock(&c3_lock
);
1623 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1624 /* TSC could halt in idle, so notify users */
1625 if (tsc_halts_in_c(ACPI_STATE_C3
))
1626 mark_tsc_unstable("TSC halts in idle");
1628 sleep_ticks
= ticks_elapsed(t1
, t2
);
1629 /* Tell the scheduler how much we idled: */
1630 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
1633 current_thread_info()->status
|= TS_POLLING
;
1637 acpi_state_timer_broadcast(pr
, cx
, 0);
1638 cx
->time
+= sleep_ticks
;
1639 return ticks_elapsed_in_us(t1
, t2
);
1642 struct cpuidle_driver acpi_idle_driver
= {
1643 .name
= "acpi_idle",
1644 .owner
= THIS_MODULE
,
1648 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1649 * @pr: the ACPI processor
1651 static int acpi_processor_setup_cpuidle(struct acpi_processor
*pr
)
1653 int i
, count
= CPUIDLE_DRIVER_STATE_START
;
1654 struct acpi_processor_cx
*cx
;
1655 struct cpuidle_state
*state
;
1656 struct cpuidle_device
*dev
= &pr
->power
.dev
;
1658 if (!pr
->flags
.power_setup_done
)
1661 if (pr
->flags
.power
== 0) {
1665 for (i
= 0; i
< CPUIDLE_STATE_MAX
; i
++) {
1666 dev
->states
[i
].name
[0] = '\0';
1667 dev
->states
[i
].desc
[0] = '\0';
1670 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
&& i
<= max_cstate
; i
++) {
1671 cx
= &pr
->power
.states
[i
];
1672 state
= &dev
->states
[count
];
1677 #ifdef CONFIG_HOTPLUG_CPU
1678 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
1679 !pr
->flags
.has_cst
&&
1680 !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
1683 cpuidle_set_statedata(state
, cx
);
1685 snprintf(state
->name
, CPUIDLE_NAME_LEN
, "C%d", i
);
1686 strncpy(state
->desc
, cx
->desc
, CPUIDLE_DESC_LEN
);
1687 state
->exit_latency
= cx
->latency
;
1688 state
->target_residency
= cx
->latency
* latency_factor
;
1689 state
->power_usage
= cx
->power
;
1694 state
->flags
|= CPUIDLE_FLAG_SHALLOW
;
1695 if (cx
->entry_method
== ACPI_CSTATE_FFH
)
1696 state
->flags
|= CPUIDLE_FLAG_TIME_VALID
;
1698 state
->enter
= acpi_idle_enter_c1
;
1699 dev
->safe_state
= state
;
1703 state
->flags
|= CPUIDLE_FLAG_BALANCED
;
1704 state
->flags
|= CPUIDLE_FLAG_TIME_VALID
;
1705 state
->enter
= acpi_idle_enter_simple
;
1706 dev
->safe_state
= state
;
1710 state
->flags
|= CPUIDLE_FLAG_DEEP
;
1711 state
->flags
|= CPUIDLE_FLAG_TIME_VALID
;
1712 state
->flags
|= CPUIDLE_FLAG_CHECK_BM
;
1713 state
->enter
= pr
->flags
.bm_check
?
1714 acpi_idle_enter_bm
:
1715 acpi_idle_enter_simple
;
1720 if (count
== CPUIDLE_STATE_MAX
)
1724 dev
->state_count
= count
;
1732 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
1743 if (!pr
->flags
.power_setup_done
)
1746 cpuidle_pause_and_lock();
1747 cpuidle_disable_device(&pr
->power
.dev
);
1748 acpi_processor_get_power_info(pr
);
1749 acpi_processor_setup_cpuidle(pr
);
1750 ret
= cpuidle_enable_device(&pr
->power
.dev
);
1751 cpuidle_resume_and_unlock();
1756 #endif /* CONFIG_CPU_IDLE */
1758 int __cpuinit
acpi_processor_power_init(struct acpi_processor
*pr
,
1759 struct acpi_device
*device
)
1761 acpi_status status
= 0;
1762 static int first_run
;
1763 struct proc_dir_entry
*entry
= NULL
;
1768 dmi_check_system(processor_power_dmi_table
);
1769 max_cstate
= acpi_processor_cstate_check(max_cstate
);
1770 if (max_cstate
< ACPI_C_STATES_MAX
)
1772 "ACPI: processor limited to max C-state %d\n",
1775 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1776 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY
,
1777 &acpi_processor_latency_notifier
);
1784 if (acpi_gbl_FADT
.cst_control
&& !nocst
) {
1786 acpi_os_write_port(acpi_gbl_FADT
.smi_command
, acpi_gbl_FADT
.cst_control
, 8);
1787 if (ACPI_FAILURE(status
)) {
1788 ACPI_EXCEPTION((AE_INFO
, status
,
1789 "Notifying BIOS of _CST ability failed"));
1793 acpi_processor_get_power_info(pr
);
1794 pr
->flags
.power_setup_done
= 1;
1797 * Install the idle handler if processor power management is supported.
1798 * Note that we use previously set idle handler will be used on
1799 * platforms that only support C1.
1801 if ((pr
->flags
.power
) && (!boot_option_idle_override
)) {
1802 #ifdef CONFIG_CPU_IDLE
1803 acpi_processor_setup_cpuidle(pr
);
1804 pr
->power
.dev
.cpu
= pr
->id
;
1805 if (cpuidle_register_device(&pr
->power
.dev
))
1809 printk(KERN_INFO PREFIX
"CPU%d (power states:", pr
->id
);
1810 for (i
= 1; i
<= pr
->power
.count
; i
++)
1811 if (pr
->power
.states
[i
].valid
)
1812 printk(" C%d[C%d]", i
,
1813 pr
->power
.states
[i
].type
);
1816 #ifndef CONFIG_CPU_IDLE
1818 pm_idle_save
= pm_idle
;
1819 pm_idle
= acpi_processor_idle
;
1825 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1826 S_IRUGO
, acpi_device_dir(device
));
1830 entry
->proc_fops
= &acpi_processor_power_fops
;
1831 entry
->data
= acpi_driver_data(device
);
1832 entry
->owner
= THIS_MODULE
;
1838 int acpi_processor_power_exit(struct acpi_processor
*pr
,
1839 struct acpi_device
*device
)
1841 #ifdef CONFIG_CPU_IDLE
1842 if ((pr
->flags
.power
) && (!boot_option_idle_override
))
1843 cpuidle_unregister_device(&pr
->power
.dev
);
1845 pr
->flags
.power_setup_done
= 0;
1847 if (acpi_device_dir(device
))
1848 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1849 acpi_device_dir(device
));
1851 #ifndef CONFIG_CPU_IDLE
1853 /* Unregister the idle handler when processor #0 is removed. */
1855 pm_idle
= pm_idle_save
;
1858 * We are about to unload the current idle thread pm callback
1859 * (pm_idle), Wait for all processors to update cached/local
1860 * copies of pm_idle before proceeding.
1864 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY
,
1865 &acpi_processor_latency_notifier
);