]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/acpi/processor_idle.c
ACPI / IBFT: Fix incorrect <acpi/acpi.h> inclusion in iSCSI boot firmware module
[mirror_ubuntu-zesty-kernel.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
c5ab81ca 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
1da177e4
LT
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
02df8b93
VP
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
1da177e4
LT
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
1da177e4 31#include <linux/module.h>
1da177e4
LT
32#include <linux/acpi.h>
33#include <linux/dmi.h>
e2668fb5 34#include <linux/sched.h> /* need_resched() */
e9e2cdb4 35#include <linux/clockchips.h>
4f86d3a8 36#include <linux/cpuidle.h>
0a3b15ac 37#include <linux/syscore_ops.h>
8b48463f 38#include <acpi/processor.h>
1da177e4 39
3434933b
TG
40/*
41 * Include the apic definitions for x86 to have the APIC timer related defines
42 * available also for UP (on SMP it gets magically included via linux/smp.h).
43 * asm/acpi.h is not an option, as it would require more include magic. Also
44 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
45 */
46#ifdef CONFIG_X86
47#include <asm/apic.h>
48#endif
49
a192a958
LB
50#define PREFIX "ACPI: "
51
1da177e4 52#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 53#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 54ACPI_MODULE_NAME("processor_idle");
1da177e4 55
4f86d3a8
LB
56static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
57module_param(max_cstate, uint, 0000);
b6835052 58static unsigned int nocst __read_mostly;
1da177e4 59module_param(nocst, uint, 0000);
d3e7e99f
LB
60static int bm_check_disable __read_mostly;
61module_param(bm_check_disable, uint, 0000);
1da177e4 62
25de5718 63static unsigned int latency_factor __read_mostly = 2;
4963f620 64module_param(latency_factor, uint, 0644);
1da177e4 65
3d339dcb
DL
66static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
67
6240a10d
AS
68static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
69 acpi_cstate);
ac3ebafa 70
d1896049
TR
71static int disabled_by_idle_boot_param(void)
72{
73 return boot_option_idle_override == IDLE_POLL ||
d1896049
TR
74 boot_option_idle_override == IDLE_HALT;
75}
76
1da177e4
LT
77/*
78 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
79 * For now disable this. Probably a bug somewhere else.
80 *
81 * To skip this limit, boot/load with a large max_cstate limit.
82 */
1855256c 83static int set_max_cstate(const struct dmi_system_id *id)
1da177e4
LT
84{
85 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
86 return 0;
87
3d35600a 88 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
4be44fcd
LB
89 " Override with \"processor.max_cstate=%d\"\n", id->ident,
90 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1da177e4 91
3d35600a 92 max_cstate = (long)id->driver_data;
1da177e4
LT
93
94 return 0;
95}
96
fe7bf106 97static struct dmi_system_id processor_power_dmi_table[] = {
876c184b
TR
98 { set_max_cstate, "Clevo 5600D", {
99 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
100 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
4be44fcd 101 (void *)2},
370d5cd8
AV
102 { set_max_cstate, "Pavilion zv5000", {
103 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
104 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
105 (void *)1},
106 { set_max_cstate, "Asus L8400B", {
107 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
108 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
109 (void *)1},
1da177e4
LT
110 {},
111};
112
4f86d3a8 113
2e906655 114/*
115 * Callers should disable interrupts before the call and enable
116 * interrupts after return.
117 */
ddc081a1
VP
118static void acpi_safe_halt(void)
119{
ea811747 120 if (!tif_need_resched()) {
ddc081a1 121 safe_halt();
71e93d15
VP
122 local_irq_disable();
123 }
ddc081a1
VP
124}
125
169a0abb
TG
126#ifdef ARCH_APICTIMER_STOPS_ON_C3
127
128/*
129 * Some BIOS implementations switch to C3 in the published C2 state.
296d93cd
LT
130 * This seems to be a common problem on AMD boxen, but other vendors
131 * are affected too. We pick the most conservative approach: we assume
132 * that the local APIC stops in both C2 and C3.
169a0abb 133 */
7e275cc4 134static void lapic_timer_check_state(int state, struct acpi_processor *pr,
169a0abb
TG
135 struct acpi_processor_cx *cx)
136{
137 struct acpi_processor_power *pwr = &pr->power;
e585bef8 138 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
169a0abb 139
db954b58
VP
140 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
141 return;
142
02c68a02 143 if (amd_e400_c1e_detected)
87ad57ba
SL
144 type = ACPI_STATE_C1;
145
169a0abb
TG
146 /*
147 * Check, if one of the previous states already marked the lapic
148 * unstable
149 */
150 if (pwr->timer_broadcast_on_state < state)
151 return;
152
e585bef8 153 if (cx->type >= type)
296d93cd 154 pr->power.timer_broadcast_on_state = state;
169a0abb
TG
155}
156
918aae42 157static void __lapic_timer_propagate_broadcast(void *arg)
169a0abb 158{
f833bab8 159 struct acpi_processor *pr = (struct acpi_processor *) arg;
e9e2cdb4
TG
160 unsigned long reason;
161
162 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
163 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
164
165 clockevents_notify(reason, &pr->id);
e9e2cdb4
TG
166}
167
918aae42
HS
168static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
169{
170 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
171 (void *)pr, 1);
172}
173
e9e2cdb4 174/* Power(C) State timer broadcast control */
7e275cc4 175static void lapic_timer_state_broadcast(struct acpi_processor *pr,
e9e2cdb4
TG
176 struct acpi_processor_cx *cx,
177 int broadcast)
178{
e9e2cdb4
TG
179 int state = cx - pr->power.states;
180
181 if (state >= pr->power.timer_broadcast_on_state) {
182 unsigned long reason;
183
184 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
185 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
186 clockevents_notify(reason, &pr->id);
187 }
169a0abb
TG
188}
189
190#else
191
7e275cc4 192static void lapic_timer_check_state(int state, struct acpi_processor *pr,
169a0abb 193 struct acpi_processor_cx *cstate) { }
7e275cc4
LB
194static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
195static void lapic_timer_state_broadcast(struct acpi_processor *pr,
e9e2cdb4
TG
196 struct acpi_processor_cx *cx,
197 int broadcast)
198{
199}
169a0abb
TG
200
201#endif
202
0a3b15ac 203#ifdef CONFIG_PM_SLEEP
815ab0fd
LB
204static u32 saved_bm_rld;
205
95d45d4c 206static int acpi_processor_suspend(void)
815ab0fd
LB
207{
208 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
0a3b15ac 209 return 0;
815ab0fd 210}
0a3b15ac 211
95d45d4c 212static void acpi_processor_resume(void)
815ab0fd
LB
213{
214 u32 resumed_bm_rld;
215
216 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
0a3b15ac
RW
217 if (resumed_bm_rld == saved_bm_rld)
218 return;
815ab0fd 219
0a3b15ac 220 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
815ab0fd 221}
b04e7bdb 222
0a3b15ac
RW
223static struct syscore_ops acpi_processor_syscore_ops = {
224 .suspend = acpi_processor_suspend,
225 .resume = acpi_processor_resume,
226};
227
228void acpi_processor_syscore_init(void)
b04e7bdb 229{
0a3b15ac 230 register_syscore_ops(&acpi_processor_syscore_ops);
b04e7bdb
TG
231}
232
0a3b15ac 233void acpi_processor_syscore_exit(void)
b04e7bdb 234{
0a3b15ac 235 unregister_syscore_ops(&acpi_processor_syscore_ops);
b04e7bdb 236}
0a3b15ac 237#endif /* CONFIG_PM_SLEEP */
b04e7bdb 238
592913ec 239#if defined(CONFIG_X86)
520daf72 240static void tsc_check_state(int state)
ddb25f9a
AK
241{
242 switch (boot_cpu_data.x86_vendor) {
243 case X86_VENDOR_AMD:
40fb1715 244 case X86_VENDOR_INTEL:
ddb25f9a
AK
245 /*
246 * AMD Fam10h TSC will tick in all
247 * C/P/S0/S1 states when this bit is set.
248 */
40fb1715 249 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
520daf72 250 return;
40fb1715 251
ddb25f9a 252 /*FALL THROUGH*/
ddb25f9a 253 default:
520daf72
LB
254 /* TSC could halt in idle, so notify users */
255 if (state > ACPI_STATE_C1)
256 mark_tsc_unstable("TSC halts in idle");
ddb25f9a
AK
257 }
258}
520daf72
LB
259#else
260static void tsc_check_state(int state) { return; }
ddb25f9a
AK
261#endif
262
4be44fcd 263static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
1da177e4 264{
1da177e4 265
1da177e4 266 if (!pr->pblk)
d550d98d 267 return -ENODEV;
1da177e4 268
1da177e4 269 /* if info is obtained from pblk/fadt, type equals state */
1da177e4
LT
270 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
271 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
272
4c033552
VP
273#ifndef CONFIG_HOTPLUG_CPU
274 /*
275 * Check for P_LVL2_UP flag before entering C2 and above on
4f86d3a8 276 * an SMP system.
4c033552 277 */
ad71860a 278 if ((num_online_cpus() > 1) &&
cee324b1 279 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
d550d98d 280 return -ENODEV;
4c033552
VP
281#endif
282
1da177e4
LT
283 /* determine C2 and C3 address from pblk */
284 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
285 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
286
287 /* determine latencies from FADT */
ba494bee
BM
288 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
289 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
1da177e4 290
5d76b6f6
LB
291 /*
292 * FADT specified C2 latency must be less than or equal to
293 * 100 microseconds.
294 */
ba494bee 295 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
5d76b6f6 296 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
ba494bee 297 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
5d76b6f6
LB
298 /* invalidate C2 */
299 pr->power.states[ACPI_STATE_C2].address = 0;
300 }
301
a6d72c18
LB
302 /*
303 * FADT supplied C3 latency must be less than or equal to
304 * 1000 microseconds.
305 */
ba494bee 306 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
a6d72c18 307 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
ba494bee 308 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
a6d72c18
LB
309 /* invalidate C3 */
310 pr->power.states[ACPI_STATE_C3].address = 0;
311 }
312
1da177e4
LT
313 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
314 "lvl2[0x%08x] lvl3[0x%08x]\n",
315 pr->power.states[ACPI_STATE_C2].address,
316 pr->power.states[ACPI_STATE_C3].address));
317
d550d98d 318 return 0;
1da177e4
LT
319}
320
991528d7 321static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
acf05f4b 322{
991528d7
VP
323 if (!pr->power.states[ACPI_STATE_C1].valid) {
324 /* set the first C-State to C1 */
325 /* all processors need to support C1 */
326 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
327 pr->power.states[ACPI_STATE_C1].valid = 1;
0fda6b40 328 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
991528d7
VP
329 }
330 /* the C0 state only exists as a filler in our array */
acf05f4b 331 pr->power.states[ACPI_STATE_C0].valid = 1;
d550d98d 332 return 0;
acf05f4b
VP
333}
334
4be44fcd 335static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
1da177e4 336{
4be44fcd 337 acpi_status status = 0;
439913ff 338 u64 count;
cf824788 339 int current_count;
4be44fcd
LB
340 int i;
341 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
342 union acpi_object *cst;
1da177e4 343
1da177e4 344
1da177e4 345 if (nocst)
d550d98d 346 return -ENODEV;
1da177e4 347
991528d7 348 current_count = 0;
1da177e4
LT
349
350 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
351 if (ACPI_FAILURE(status)) {
352 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
d550d98d 353 return -ENODEV;
4be44fcd 354 }
1da177e4 355
50dd0969 356 cst = buffer.pointer;
1da177e4
LT
357
358 /* There must be at least 2 elements */
359 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
6468463a 360 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
1da177e4
LT
361 status = -EFAULT;
362 goto end;
363 }
364
365 count = cst->package.elements[0].integer.value;
366
367 /* Validate number of power states. */
368 if (count < 1 || count != cst->package.count - 1) {
6468463a 369 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
1da177e4
LT
370 status = -EFAULT;
371 goto end;
372 }
373
1da177e4
LT
374 /* Tell driver that at least _CST is supported. */
375 pr->flags.has_cst = 1;
376
377 for (i = 1; i <= count; i++) {
378 union acpi_object *element;
379 union acpi_object *obj;
380 struct acpi_power_register *reg;
381 struct acpi_processor_cx cx;
382
383 memset(&cx, 0, sizeof(cx));
384
50dd0969 385 element = &(cst->package.elements[i]);
1da177e4
LT
386 if (element->type != ACPI_TYPE_PACKAGE)
387 continue;
388
389 if (element->package.count != 4)
390 continue;
391
50dd0969 392 obj = &(element->package.elements[0]);
1da177e4
LT
393
394 if (obj->type != ACPI_TYPE_BUFFER)
395 continue;
396
4be44fcd 397 reg = (struct acpi_power_register *)obj->buffer.pointer;
1da177e4
LT
398
399 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
4be44fcd 400 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
1da177e4
LT
401 continue;
402
1da177e4 403 /* There should be an easy way to extract an integer... */
50dd0969 404 obj = &(element->package.elements[1]);
1da177e4
LT
405 if (obj->type != ACPI_TYPE_INTEGER)
406 continue;
407
408 cx.type = obj->integer.value;
991528d7
VP
409 /*
410 * Some buggy BIOSes won't list C1 in _CST -
411 * Let acpi_processor_get_power_info_default() handle them later
412 */
413 if (i == 1 && cx.type != ACPI_STATE_C1)
414 current_count++;
415
416 cx.address = reg->address;
417 cx.index = current_count + 1;
418
bc71bec9 419 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
991528d7
VP
420 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
421 if (acpi_processor_ffh_cstate_probe
422 (pr->id, &cx, reg) == 0) {
bc71bec9 423 cx.entry_method = ACPI_CSTATE_FFH;
424 } else if (cx.type == ACPI_STATE_C1) {
991528d7
VP
425 /*
426 * C1 is a special case where FIXED_HARDWARE
427 * can be handled in non-MWAIT way as well.
428 * In that case, save this _CST entry info.
991528d7
VP
429 * Otherwise, ignore this info and continue.
430 */
bc71bec9 431 cx.entry_method = ACPI_CSTATE_HALT;
4fcb2fcd 432 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
bc71bec9 433 } else {
991528d7
VP
434 continue;
435 }
da5e09a1 436 if (cx.type == ACPI_STATE_C1 &&
d1896049 437 (boot_option_idle_override == IDLE_NOMWAIT)) {
c1e3b377
ZY
438 /*
439 * In most cases the C1 space_id obtained from
440 * _CST object is FIXED_HARDWARE access mode.
441 * But when the option of idle=halt is added,
442 * the entry_method type should be changed from
443 * CSTATE_FFH to CSTATE_HALT.
da5e09a1
ZY
444 * When the option of idle=nomwait is added,
445 * the C1 entry_method type should be
446 * CSTATE_HALT.
c1e3b377
ZY
447 */
448 cx.entry_method = ACPI_CSTATE_HALT;
449 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
450 }
4fcb2fcd
VP
451 } else {
452 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
453 cx.address);
991528d7 454 }
1da177e4 455
0fda6b40
VP
456 if (cx.type == ACPI_STATE_C1) {
457 cx.valid = 1;
458 }
4fcb2fcd 459
50dd0969 460 obj = &(element->package.elements[2]);
1da177e4
LT
461 if (obj->type != ACPI_TYPE_INTEGER)
462 continue;
463
464 cx.latency = obj->integer.value;
465
50dd0969 466 obj = &(element->package.elements[3]);
1da177e4
LT
467 if (obj->type != ACPI_TYPE_INTEGER)
468 continue;
469
cf824788
JM
470 current_count++;
471 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
472
473 /*
474 * We support total ACPI_PROCESSOR_MAX_POWER - 1
475 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
476 */
477 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
478 printk(KERN_WARNING
479 "Limiting number of power states to max (%d)\n",
480 ACPI_PROCESSOR_MAX_POWER);
481 printk(KERN_WARNING
482 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
483 break;
484 }
1da177e4
LT
485 }
486
4be44fcd 487 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
cf824788 488 current_count));
1da177e4
LT
489
490 /* Validate number of power states discovered */
cf824788 491 if (current_count < 2)
6d93c648 492 status = -EFAULT;
1da177e4 493
4be44fcd 494 end:
02438d87 495 kfree(buffer.pointer);
1da177e4 496
d550d98d 497 return status;
1da177e4
LT
498}
499
4be44fcd
LB
500static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
501 struct acpi_processor_cx *cx)
1da177e4 502{
ee1ca48f
PV
503 static int bm_check_flag = -1;
504 static int bm_control_flag = -1;
02df8b93 505
1da177e4
LT
506
507 if (!cx->address)
d550d98d 508 return;
1da177e4 509
1da177e4
LT
510 /*
511 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
512 * DMA transfers are used by any ISA device to avoid livelock.
513 * Note that we could disable Type-F DMA (as recommended by
514 * the erratum), but this is known to disrupt certain ISA
515 * devices thus we take the conservative approach.
516 */
517 else if (errata.piix4.fdma) {
518 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 519 "C3 not supported on PIIX4 with Type-F DMA\n"));
d550d98d 520 return;
1da177e4
LT
521 }
522
02df8b93 523 /* All the logic here assumes flags.bm_check is same across all CPUs */
ee1ca48f 524 if (bm_check_flag == -1) {
02df8b93
VP
525 /* Determine whether bm_check is needed based on CPU */
526 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
527 bm_check_flag = pr->flags.bm_check;
ee1ca48f 528 bm_control_flag = pr->flags.bm_control;
02df8b93
VP
529 } else {
530 pr->flags.bm_check = bm_check_flag;
ee1ca48f 531 pr->flags.bm_control = bm_control_flag;
02df8b93
VP
532 }
533
534 if (pr->flags.bm_check) {
02df8b93 535 if (!pr->flags.bm_control) {
ed3110ef
VP
536 if (pr->flags.has_cst != 1) {
537 /* bus mastering control is necessary */
538 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
539 "C3 support requires BM control\n"));
540 return;
541 } else {
542 /* Here we enter C3 without bus mastering */
543 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
544 "C3 support without BM control\n"));
545 }
02df8b93
VP
546 }
547 } else {
02df8b93
VP
548 /*
549 * WBINVD should be set in fadt, for C3 state to be
550 * supported on when bm_check is not required.
551 */
cee324b1 552 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
02df8b93 553 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd
LB
554 "Cache invalidation should work properly"
555 " for C3 to be enabled on SMP systems\n"));
d550d98d 556 return;
02df8b93 557 }
02df8b93
VP
558 }
559
1da177e4
LT
560 /*
561 * Otherwise we've met all of our C3 requirements.
562 * Normalize the C3 latency to expidite policy. Enable
563 * checking of bus mastering status (bm_check) so we can
564 * use this in our C3 policy
565 */
566 cx->valid = 1;
4f86d3a8 567
31878dd8
LB
568 /*
569 * On older chipsets, BM_RLD needs to be set
570 * in order for Bus Master activity to wake the
571 * system from C3. Newer chipsets handle DMA
572 * during C3 automatically and BM_RLD is a NOP.
573 * In either case, the proper way to
574 * handle BM_RLD is to set it and leave it set.
575 */
50ffba1b 576 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1da177e4 577
d550d98d 578 return;
1da177e4
LT
579}
580
1da177e4
LT
581static int acpi_processor_power_verify(struct acpi_processor *pr)
582{
583 unsigned int i;
584 unsigned int working = 0;
6eb0a0fd 585
169a0abb 586 pr->power.timer_broadcast_on_state = INT_MAX;
6eb0a0fd 587
a0bf284b 588 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1da177e4
LT
589 struct acpi_processor_cx *cx = &pr->power.states[i];
590
591 switch (cx->type) {
592 case ACPI_STATE_C1:
593 cx->valid = 1;
594 break;
595
596 case ACPI_STATE_C2:
d22edd29
LB
597 if (!cx->address)
598 break;
599 cx->valid = 1;
1da177e4
LT
600 break;
601
602 case ACPI_STATE_C3:
603 acpi_processor_power_verify_c3(pr, cx);
604 break;
605 }
7e275cc4
LB
606 if (!cx->valid)
607 continue;
1da177e4 608
7e275cc4
LB
609 lapic_timer_check_state(i, pr, cx);
610 tsc_check_state(cx->type);
611 working++;
1da177e4 612 }
bd663347 613
918aae42 614 lapic_timer_propagate_broadcast(pr);
1da177e4
LT
615
616 return (working);
617}
618
4be44fcd 619static int acpi_processor_get_power_info(struct acpi_processor *pr)
1da177e4
LT
620{
621 unsigned int i;
622 int result;
623
1da177e4
LT
624
625 /* NOTE: the idle thread may not be running while calling
626 * this function */
627
991528d7
VP
628 /* Zero initialize all the C-states info. */
629 memset(pr->power.states, 0, sizeof(pr->power.states));
630
1da177e4 631 result = acpi_processor_get_power_info_cst(pr);
6d93c648 632 if (result == -ENODEV)
c5a114f1 633 result = acpi_processor_get_power_info_fadt(pr);
6d93c648 634
991528d7
VP
635 if (result)
636 return result;
637
638 acpi_processor_get_power_info_default(pr);
639
cf824788 640 pr->power.count = acpi_processor_power_verify(pr);
1da177e4 641
1da177e4
LT
642 /*
643 * if one state of type C2 or C3 is available, mark this
644 * CPU as being "idle manageable"
645 */
646 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 647 if (pr->power.states[i].valid) {
1da177e4 648 pr->power.count = i;
2203d6ed
LT
649 if (pr->power.states[i].type >= ACPI_STATE_C2)
650 pr->flags.power = 1;
acf05f4b 651 }
1da177e4
LT
652 }
653
d550d98d 654 return 0;
1da177e4
LT
655}
656
4f86d3a8
LB
657/**
658 * acpi_idle_bm_check - checks if bus master activity was detected
659 */
660static int acpi_idle_bm_check(void)
661{
662 u32 bm_status = 0;
663
d3e7e99f
LB
664 if (bm_check_disable)
665 return 0;
666
50ffba1b 667 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4f86d3a8 668 if (bm_status)
50ffba1b 669 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
4f86d3a8
LB
670 /*
671 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
672 * the true state of bus mastering activity; forcing us to
673 * manually check the BMIDEA bit of each IDE channel.
674 */
675 else if (errata.piix4.bmisx) {
676 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
677 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
678 bm_status = 1;
679 }
680 return bm_status;
681}
682
4f86d3a8
LB
683/**
684 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
685 * @cx: cstate data
bc71bec9 686 *
687 * Caller disables interrupt before call and enables interrupt after return.
4f86d3a8
LB
688 */
689static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
690{
dcf30997
SR
691 /* Don't trace irqs off for idle */
692 stop_critical_timings();
bc71bec9 693 if (cx->entry_method == ACPI_CSTATE_FFH) {
4f86d3a8
LB
694 /* Call into architectural FFH based C-state */
695 acpi_processor_ffh_cstate_enter(cx);
bc71bec9 696 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
697 acpi_safe_halt();
4f86d3a8 698 } else {
4f86d3a8
LB
699 /* IO port based C-state */
700 inb(cx->address);
701 /* Dummy wait op - must do something useless after P_LVL2 read
702 because chipsets cannot guarantee that STPCLK# signal
703 gets asserted in time to freeze execution properly. */
cfa806f0 704 inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8 705 }
dcf30997 706 start_critical_timings();
4f86d3a8
LB
707}
708
709/**
710 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
711 * @dev: the target CPU
46bcfad7 712 * @drv: cpuidle driver containing cpuidle state info
e978aa7d 713 * @index: index of target state
4f86d3a8
LB
714 *
715 * This is equivalent to the HALT instruction.
716 */
717static int acpi_idle_enter_c1(struct cpuidle_device *dev,
46bcfad7 718 struct cpuidle_driver *drv, int index)
4f86d3a8
LB
719{
720 struct acpi_processor *pr;
6240a10d 721 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
9b12e18c 722
4a6f4fe8 723 pr = __this_cpu_read(processors);
4f86d3a8
LB
724
725 if (unlikely(!pr))
e978aa7d 726 return -EINVAL;
4f86d3a8 727
ea811747
PZ
728 if (cx->entry_method == ACPI_CSTATE_FFH) {
729 if (current_set_polling_and_test())
730 return -EINVAL;
731 }
732
7e275cc4 733 lapic_timer_state_broadcast(pr, cx, 1);
bc71bec9 734 acpi_idle_do_entry(cx);
e978aa7d 735
7e275cc4 736 lapic_timer_state_broadcast(pr, cx, 0);
4f86d3a8 737
e978aa7d 738 return index;
4f86d3a8
LB
739}
740
1a022e3f
BO
741
742/**
743 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
744 * @dev: the target CPU
745 * @index: the index of suggested state
746 */
747static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
748{
6240a10d 749 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
1a022e3f
BO
750
751 ACPI_FLUSH_CPU_CACHE();
752
753 while (1) {
754
755 if (cx->entry_method == ACPI_CSTATE_HALT)
54f70077 756 safe_halt();
1a022e3f
BO
757 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
758 inb(cx->address);
759 /* See comment in acpi_idle_do_entry() */
760 inl(acpi_gbl_FADT.xpm_timer_block.address);
761 } else
762 return -ENODEV;
763 }
764
765 /* Never reached */
766 return 0;
767}
768
4f86d3a8
LB
769/**
770 * acpi_idle_enter_simple - enters an ACPI state without BM handling
771 * @dev: the target CPU
46bcfad7 772 * @drv: cpuidle driver with cpuidle state information
e978aa7d 773 * @index: the index of suggested state
4f86d3a8
LB
774 */
775static int acpi_idle_enter_simple(struct cpuidle_device *dev,
46bcfad7 776 struct cpuidle_driver *drv, int index)
4f86d3a8
LB
777{
778 struct acpi_processor *pr;
6240a10d 779 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
50629118 780
4a6f4fe8 781 pr = __this_cpu_read(processors);
4f86d3a8
LB
782
783 if (unlikely(!pr))
e978aa7d 784 return -EINVAL;
e196441b 785
ea811747
PZ
786 if (cx->entry_method == ACPI_CSTATE_FFH) {
787 if (current_set_polling_and_test())
e978aa7d 788 return -EINVAL;
4f86d3a8
LB
789 }
790
e17bcb43
TG
791 /*
792 * Must be done before busmaster disable as we might need to
793 * access HPET !
794 */
7e275cc4 795 lapic_timer_state_broadcast(pr, cx, 1);
e17bcb43 796
4f86d3a8
LB
797 if (cx->type == ACPI_STATE_C3)
798 ACPI_FLUSH_CPU_CACHE();
799
50629118
VP
800 /* Tell the scheduler that we are going deep-idle: */
801 sched_clock_idle_sleep_event();
4f86d3a8 802 acpi_idle_do_entry(cx);
4f86d3a8 803
a474a515 804 sched_clock_idle_wakeup_event(0);
e978aa7d 805
7e275cc4 806 lapic_timer_state_broadcast(pr, cx, 0);
e978aa7d 807 return index;
4f86d3a8
LB
808}
809
810static int c3_cpu_count;
e12f65f7 811static DEFINE_RAW_SPINLOCK(c3_lock);
4f86d3a8
LB
812
813/**
814 * acpi_idle_enter_bm - enters C3 with proper BM handling
815 * @dev: the target CPU
46bcfad7 816 * @drv: cpuidle driver containing state data
e978aa7d 817 * @index: the index of suggested state
4f86d3a8
LB
818 *
819 * If BM is detected, the deepest non-C3 idle state is entered instead.
820 */
821static int acpi_idle_enter_bm(struct cpuidle_device *dev,
46bcfad7 822 struct cpuidle_driver *drv, int index)
4f86d3a8
LB
823{
824 struct acpi_processor *pr;
6240a10d 825 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
50629118 826
4a6f4fe8 827 pr = __this_cpu_read(processors);
4f86d3a8
LB
828
829 if (unlikely(!pr))
e978aa7d 830 return -EINVAL;
4f86d3a8 831
718be4aa 832 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
46bcfad7
DD
833 if (drv->safe_state_index >= 0) {
834 return drv->states[drv->safe_state_index].enter(dev,
835 drv, drv->safe_state_index);
ddc081a1 836 } else {
8651f97b 837 acpi_safe_halt();
75cc5235 838 return -EBUSY;
ddc081a1
VP
839 }
840 }
841
ea811747
PZ
842 if (cx->entry_method == ACPI_CSTATE_FFH) {
843 if (current_set_polling_and_test())
e978aa7d 844 return -EINVAL;
4f86d3a8
LB
845 }
846
996520c1
VP
847 acpi_unlazy_tlb(smp_processor_id());
848
50629118
VP
849 /* Tell the scheduler that we are going deep-idle: */
850 sched_clock_idle_sleep_event();
4f86d3a8
LB
851 /*
852 * Must be done before busmaster disable as we might need to
853 * access HPET !
854 */
7e275cc4 855 lapic_timer_state_broadcast(pr, cx, 1);
4f86d3a8 856
ddc081a1
VP
857 /*
858 * disable bus master
859 * bm_check implies we need ARB_DIS
860 * !bm_check implies we need cache flush
861 * bm_control implies whether we can do ARB_DIS
862 *
863 * That leaves a case where bm_check is set and bm_control is
864 * not set. In that case we cannot do much, we enter C3
865 * without doing anything.
866 */
867 if (pr->flags.bm_check && pr->flags.bm_control) {
e12f65f7 868 raw_spin_lock(&c3_lock);
4f86d3a8
LB
869 c3_cpu_count++;
870 /* Disable bus master arbitration when all CPUs are in C3 */
871 if (c3_cpu_count == num_online_cpus())
50ffba1b 872 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
e12f65f7 873 raw_spin_unlock(&c3_lock);
ddc081a1
VP
874 } else if (!pr->flags.bm_check) {
875 ACPI_FLUSH_CPU_CACHE();
876 }
4f86d3a8 877
ddc081a1 878 acpi_idle_do_entry(cx);
4f86d3a8 879
ddc081a1
VP
880 /* Re-enable bus master arbitration */
881 if (pr->flags.bm_check && pr->flags.bm_control) {
e12f65f7 882 raw_spin_lock(&c3_lock);
50ffba1b 883 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
4f86d3a8 884 c3_cpu_count--;
e12f65f7 885 raw_spin_unlock(&c3_lock);
4f86d3a8 886 }
e978aa7d 887
a474a515 888 sched_clock_idle_wakeup_event(0);
4f86d3a8 889
7e275cc4 890 lapic_timer_state_broadcast(pr, cx, 0);
e978aa7d 891 return index;
4f86d3a8
LB
892}
893
894struct cpuidle_driver acpi_idle_driver = {
895 .name = "acpi_idle",
896 .owner = THIS_MODULE,
897};
898
899/**
46bcfad7
DD
900 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
901 * device i.e. per-cpu data
902 *
4f86d3a8 903 * @pr: the ACPI processor
6ef0f086 904 * @dev : the cpuidle device
4f86d3a8 905 */
6ef0f086
DL
906static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
907 struct cpuidle_device *dev)
4f86d3a8 908{
9a0b8415 909 int i, count = CPUIDLE_DRIVER_STATE_START;
4f86d3a8 910 struct acpi_processor_cx *cx;
4f86d3a8
LB
911
912 if (!pr->flags.power_setup_done)
913 return -EINVAL;
914
915 if (pr->flags.power == 0) {
916 return -EINVAL;
917 }
918
b88a634a
KRW
919 if (!dev)
920 return -EINVAL;
921
dcb84f33 922 dev->cpu = pr->id;
4fcb2fcd 923
615dfd93
LB
924 if (max_cstate == 0)
925 max_cstate = 1;
926
4f86d3a8
LB
927 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
928 cx = &pr->power.states[i];
4f86d3a8
LB
929
930 if (!cx->valid)
931 continue;
932
933#ifdef CONFIG_HOTPLUG_CPU
934 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
935 !pr->flags.has_cst &&
936 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
937 continue;
1fec74a9 938#endif
6240a10d 939 per_cpu(acpi_cstate[count], dev->cpu) = cx;
4f86d3a8 940
46bcfad7
DD
941 count++;
942 if (count == CPUIDLE_STATE_MAX)
943 break;
944 }
945
946 dev->state_count = count;
947
948 if (!count)
949 return -EINVAL;
950
951 return 0;
952}
953
954/**
955 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
956 * global state data i.e. idle routines
957 *
958 * @pr: the ACPI processor
959 */
960static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
961{
962 int i, count = CPUIDLE_DRIVER_STATE_START;
963 struct acpi_processor_cx *cx;
964 struct cpuidle_state *state;
965 struct cpuidle_driver *drv = &acpi_idle_driver;
966
967 if (!pr->flags.power_setup_done)
968 return -EINVAL;
969
970 if (pr->flags.power == 0)
971 return -EINVAL;
972
973 drv->safe_state_index = -1;
4fcb2fcd 974 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
46bcfad7
DD
975 drv->states[i].name[0] = '\0';
976 drv->states[i].desc[0] = '\0';
4fcb2fcd
VP
977 }
978
615dfd93
LB
979 if (max_cstate == 0)
980 max_cstate = 1;
981
4f86d3a8
LB
982 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
983 cx = &pr->power.states[i];
4f86d3a8
LB
984
985 if (!cx->valid)
986 continue;
987
988#ifdef CONFIG_HOTPLUG_CPU
989 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
990 !pr->flags.has_cst &&
991 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
992 continue;
1fec74a9 993#endif
4f86d3a8 994
46bcfad7 995 state = &drv->states[count];
4f86d3a8 996 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
4fcb2fcd 997 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
4f86d3a8 998 state->exit_latency = cx->latency;
4963f620 999 state->target_residency = cx->latency * latency_factor;
4f86d3a8
LB
1000
1001 state->flags = 0;
1002 switch (cx->type) {
1003 case ACPI_STATE_C1:
8e92b660
VP
1004 if (cx->entry_method == ACPI_CSTATE_FFH)
1005 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1006
4f86d3a8 1007 state->enter = acpi_idle_enter_c1;
1a022e3f 1008 state->enter_dead = acpi_idle_play_dead;
46bcfad7 1009 drv->safe_state_index = count;
4f86d3a8
LB
1010 break;
1011
1012 case ACPI_STATE_C2:
4f86d3a8
LB
1013 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1014 state->enter = acpi_idle_enter_simple;
1a022e3f 1015 state->enter_dead = acpi_idle_play_dead;
46bcfad7 1016 drv->safe_state_index = count;
4f86d3a8
LB
1017 break;
1018
1019 case ACPI_STATE_C3:
4f86d3a8 1020 state->flags |= CPUIDLE_FLAG_TIME_VALID;
4f86d3a8
LB
1021 state->enter = pr->flags.bm_check ?
1022 acpi_idle_enter_bm :
1023 acpi_idle_enter_simple;
1024 break;
1025 }
1026
1027 count++;
9a0b8415 1028 if (count == CPUIDLE_STATE_MAX)
1029 break;
4f86d3a8
LB
1030 }
1031
46bcfad7 1032 drv->state_count = count;
4f86d3a8
LB
1033
1034 if (!count)
1035 return -EINVAL;
1036
4f86d3a8
LB
1037 return 0;
1038}
1039
46bcfad7 1040int acpi_processor_hotplug(struct acpi_processor *pr)
4f86d3a8 1041{
dcb84f33 1042 int ret = 0;
e8b1b59d 1043 struct cpuidle_device *dev;
4f86d3a8 1044
d1896049 1045 if (disabled_by_idle_boot_param())
36a91358
VP
1046 return 0;
1047
bf9b59f2 1048 if (nocst)
4f86d3a8 1049 return -ENODEV;
4f86d3a8
LB
1050
1051 if (!pr->flags.power_setup_done)
1052 return -ENODEV;
1053
e8b1b59d 1054 dev = per_cpu(acpi_cpuidle_device, pr->id);
4f86d3a8 1055 cpuidle_pause_and_lock();
3d339dcb 1056 cpuidle_disable_device(dev);
4f86d3a8 1057 acpi_processor_get_power_info(pr);
dcb84f33 1058 if (pr->flags.power) {
6ef0f086 1059 acpi_processor_setup_cpuidle_cx(pr, dev);
3d339dcb 1060 ret = cpuidle_enable_device(dev);
dcb84f33 1061 }
4f86d3a8
LB
1062 cpuidle_resume_and_unlock();
1063
1064 return ret;
1065}
1066
46bcfad7
DD
1067int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1068{
1069 int cpu;
1070 struct acpi_processor *_pr;
3d339dcb 1071 struct cpuidle_device *dev;
46bcfad7
DD
1072
1073 if (disabled_by_idle_boot_param())
1074 return 0;
1075
46bcfad7
DD
1076 if (nocst)
1077 return -ENODEV;
1078
1079 if (!pr->flags.power_setup_done)
1080 return -ENODEV;
1081
1082 /*
1083 * FIXME: Design the ACPI notification to make it once per
1084 * system instead of once per-cpu. This condition is a hack
1085 * to make the code that updates C-States be called once.
1086 */
1087
9505626d 1088 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
46bcfad7
DD
1089
1090 cpuidle_pause_and_lock();
1091 /* Protect against cpu-hotplug */
1092 get_online_cpus();
1093
1094 /* Disable all cpuidle devices */
1095 for_each_online_cpu(cpu) {
1096 _pr = per_cpu(processors, cpu);
1097 if (!_pr || !_pr->flags.power_setup_done)
1098 continue;
3d339dcb
DL
1099 dev = per_cpu(acpi_cpuidle_device, cpu);
1100 cpuidle_disable_device(dev);
46bcfad7
DD
1101 }
1102
1103 /* Populate Updated C-state information */
f427e5f1 1104 acpi_processor_get_power_info(pr);
46bcfad7
DD
1105 acpi_processor_setup_cpuidle_states(pr);
1106
1107 /* Enable all cpuidle devices */
1108 for_each_online_cpu(cpu) {
1109 _pr = per_cpu(processors, cpu);
1110 if (!_pr || !_pr->flags.power_setup_done)
1111 continue;
1112 acpi_processor_get_power_info(_pr);
1113 if (_pr->flags.power) {
3d339dcb 1114 dev = per_cpu(acpi_cpuidle_device, cpu);
6ef0f086 1115 acpi_processor_setup_cpuidle_cx(_pr, dev);
3d339dcb 1116 cpuidle_enable_device(dev);
46bcfad7
DD
1117 }
1118 }
1119 put_online_cpus();
1120 cpuidle_resume_and_unlock();
1121 }
1122
1123 return 0;
1124}
1125
1126static int acpi_processor_registered;
1127
fe7bf106 1128int acpi_processor_power_init(struct acpi_processor *pr)
1da177e4 1129{
4be44fcd 1130 acpi_status status = 0;
46bcfad7 1131 int retval;
3d339dcb 1132 struct cpuidle_device *dev;
b6835052 1133 static int first_run;
1da177e4 1134
d1896049 1135 if (disabled_by_idle_boot_param())
36a91358 1136 return 0;
1da177e4
LT
1137
1138 if (!first_run) {
1139 dmi_check_system(processor_power_dmi_table);
c1c30634 1140 max_cstate = acpi_processor_cstate_check(max_cstate);
1da177e4 1141 if (max_cstate < ACPI_C_STATES_MAX)
4be44fcd
LB
1142 printk(KERN_NOTICE
1143 "ACPI: processor limited to max C-state %d\n",
1144 max_cstate);
1da177e4
LT
1145 first_run++;
1146 }
1147
cee324b1 1148 if (acpi_gbl_FADT.cst_control && !nocst) {
4be44fcd 1149 status =
cee324b1 1150 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1da177e4 1151 if (ACPI_FAILURE(status)) {
a6fc6720
TR
1152 ACPI_EXCEPTION((AE_INFO, status,
1153 "Notifying BIOS of _CST ability failed"));
1da177e4
LT
1154 }
1155 }
1156
1157 acpi_processor_get_power_info(pr);
4f86d3a8 1158 pr->flags.power_setup_done = 1;
1da177e4
LT
1159
1160 /*
1161 * Install the idle handler if processor power management is supported.
1162 * Note that we use previously set idle handler will be used on
1163 * platforms that only support C1.
1164 */
36a91358 1165 if (pr->flags.power) {
46bcfad7
DD
1166 /* Register acpi_idle_driver if not already registered */
1167 if (!acpi_processor_registered) {
1168 acpi_processor_setup_cpuidle_states(pr);
1169 retval = cpuidle_register_driver(&acpi_idle_driver);
1170 if (retval)
1171 return retval;
1172 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1173 acpi_idle_driver.name);
1174 }
3d339dcb
DL
1175
1176 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1177 if (!dev)
1178 return -ENOMEM;
1179 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1180
6ef0f086 1181 acpi_processor_setup_cpuidle_cx(pr, dev);
3d339dcb 1182
46bcfad7
DD
1183 /* Register per-cpu cpuidle_device. Cpuidle driver
1184 * must already be registered before registering device
1185 */
3d339dcb 1186 retval = cpuidle_register_device(dev);
46bcfad7
DD
1187 if (retval) {
1188 if (acpi_processor_registered == 0)
1189 cpuidle_unregister_driver(&acpi_idle_driver);
1190 return retval;
1191 }
1192 acpi_processor_registered++;
1da177e4 1193 }
d550d98d 1194 return 0;
1da177e4
LT
1195}
1196
38a991b6 1197int acpi_processor_power_exit(struct acpi_processor *pr)
1da177e4 1198{
3d339dcb
DL
1199 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1200
d1896049 1201 if (disabled_by_idle_boot_param())
36a91358
VP
1202 return 0;
1203
46bcfad7 1204 if (pr->flags.power) {
3d339dcb 1205 cpuidle_unregister_device(dev);
46bcfad7
DD
1206 acpi_processor_registered--;
1207 if (acpi_processor_registered == 0)
1208 cpuidle_unregister_driver(&acpi_idle_driver);
1209 }
1da177e4 1210
46bcfad7 1211 pr->flags.power_setup_done = 0;
d550d98d 1212 return 0;
1da177e4 1213}