]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/acpi/processor_idle.c
intel_idle: Add ->enter_freeze callbacks
[mirror_ubuntu-bionic-kernel.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
c5ab81ca 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
1da177e4
LT
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
02df8b93
VP
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
1da177e4
LT
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
1da177e4 31#include <linux/module.h>
1da177e4
LT
32#include <linux/acpi.h>
33#include <linux/dmi.h>
e2668fb5 34#include <linux/sched.h> /* need_resched() */
e9e2cdb4 35#include <linux/clockchips.h>
4f86d3a8 36#include <linux/cpuidle.h>
0a3b15ac 37#include <linux/syscore_ops.h>
8b48463f 38#include <acpi/processor.h>
1da177e4 39
3434933b
TG
40/*
41 * Include the apic definitions for x86 to have the APIC timer related defines
42 * available also for UP (on SMP it gets magically included via linux/smp.h).
43 * asm/acpi.h is not an option, as it would require more include magic. Also
44 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
45 */
46#ifdef CONFIG_X86
47#include <asm/apic.h>
48#endif
49
a192a958
LB
50#define PREFIX "ACPI: "
51
1da177e4 52#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 53#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 54ACPI_MODULE_NAME("processor_idle");
1da177e4 55
4f86d3a8
LB
56static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
57module_param(max_cstate, uint, 0000);
b6835052 58static unsigned int nocst __read_mostly;
1da177e4 59module_param(nocst, uint, 0000);
d3e7e99f
LB
60static int bm_check_disable __read_mostly;
61module_param(bm_check_disable, uint, 0000);
1da177e4 62
25de5718 63static unsigned int latency_factor __read_mostly = 2;
4963f620 64module_param(latency_factor, uint, 0644);
1da177e4 65
3d339dcb
DL
66static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
67
6240a10d
AS
68static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
69 acpi_cstate);
ac3ebafa 70
d1896049
TR
71static int disabled_by_idle_boot_param(void)
72{
73 return boot_option_idle_override == IDLE_POLL ||
d1896049
TR
74 boot_option_idle_override == IDLE_HALT;
75}
76
1da177e4
LT
77/*
78 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
79 * For now disable this. Probably a bug somewhere else.
80 *
81 * To skip this limit, boot/load with a large max_cstate limit.
82 */
1855256c 83static int set_max_cstate(const struct dmi_system_id *id)
1da177e4
LT
84{
85 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
86 return 0;
87
3d35600a 88 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
4be44fcd
LB
89 " Override with \"processor.max_cstate=%d\"\n", id->ident,
90 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1da177e4 91
3d35600a 92 max_cstate = (long)id->driver_data;
1da177e4
LT
93
94 return 0;
95}
96
fe7bf106 97static struct dmi_system_id processor_power_dmi_table[] = {
876c184b
TR
98 { set_max_cstate, "Clevo 5600D", {
99 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
100 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
4be44fcd 101 (void *)2},
370d5cd8
AV
102 { set_max_cstate, "Pavilion zv5000", {
103 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
104 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
105 (void *)1},
106 { set_max_cstate, "Asus L8400B", {
107 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
108 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
109 (void *)1},
1da177e4
LT
110 {},
111};
112
4f86d3a8 113
2e906655 114/*
115 * Callers should disable interrupts before the call and enable
116 * interrupts after return.
117 */
ddc081a1
VP
118static void acpi_safe_halt(void)
119{
ea811747 120 if (!tif_need_resched()) {
ddc081a1 121 safe_halt();
71e93d15
VP
122 local_irq_disable();
123 }
ddc081a1
VP
124}
125
169a0abb
TG
126#ifdef ARCH_APICTIMER_STOPS_ON_C3
127
128/*
129 * Some BIOS implementations switch to C3 in the published C2 state.
296d93cd
LT
130 * This seems to be a common problem on AMD boxen, but other vendors
131 * are affected too. We pick the most conservative approach: we assume
132 * that the local APIC stops in both C2 and C3.
169a0abb 133 */
7e275cc4 134static void lapic_timer_check_state(int state, struct acpi_processor *pr,
169a0abb
TG
135 struct acpi_processor_cx *cx)
136{
137 struct acpi_processor_power *pwr = &pr->power;
e585bef8 138 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
169a0abb 139
db954b58
VP
140 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
141 return;
142
02c68a02 143 if (amd_e400_c1e_detected)
87ad57ba
SL
144 type = ACPI_STATE_C1;
145
169a0abb
TG
146 /*
147 * Check, if one of the previous states already marked the lapic
148 * unstable
149 */
150 if (pwr->timer_broadcast_on_state < state)
151 return;
152
e585bef8 153 if (cx->type >= type)
296d93cd 154 pr->power.timer_broadcast_on_state = state;
169a0abb
TG
155}
156
918aae42 157static void __lapic_timer_propagate_broadcast(void *arg)
169a0abb 158{
f833bab8 159 struct acpi_processor *pr = (struct acpi_processor *) arg;
e9e2cdb4
TG
160 unsigned long reason;
161
162 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
163 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
164
165 clockevents_notify(reason, &pr->id);
e9e2cdb4
TG
166}
167
918aae42
HS
168static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
169{
170 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
171 (void *)pr, 1);
172}
173
e9e2cdb4 174/* Power(C) State timer broadcast control */
7e275cc4 175static void lapic_timer_state_broadcast(struct acpi_processor *pr,
e9e2cdb4
TG
176 struct acpi_processor_cx *cx,
177 int broadcast)
178{
e9e2cdb4
TG
179 int state = cx - pr->power.states;
180
181 if (state >= pr->power.timer_broadcast_on_state) {
182 unsigned long reason;
183
184 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
185 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
186 clockevents_notify(reason, &pr->id);
187 }
169a0abb
TG
188}
189
190#else
191
7e275cc4 192static void lapic_timer_check_state(int state, struct acpi_processor *pr,
169a0abb 193 struct acpi_processor_cx *cstate) { }
7e275cc4
LB
194static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
195static void lapic_timer_state_broadcast(struct acpi_processor *pr,
e9e2cdb4
TG
196 struct acpi_processor_cx *cx,
197 int broadcast)
198{
199}
169a0abb
TG
200
201#endif
202
0a3b15ac 203#ifdef CONFIG_PM_SLEEP
815ab0fd
LB
204static u32 saved_bm_rld;
205
95d45d4c 206static int acpi_processor_suspend(void)
815ab0fd
LB
207{
208 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
0a3b15ac 209 return 0;
815ab0fd 210}
0a3b15ac 211
95d45d4c 212static void acpi_processor_resume(void)
815ab0fd 213{
43575faa 214 u32 resumed_bm_rld = 0;
815ab0fd
LB
215
216 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
0a3b15ac
RW
217 if (resumed_bm_rld == saved_bm_rld)
218 return;
815ab0fd 219
0a3b15ac 220 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
815ab0fd 221}
b04e7bdb 222
0a3b15ac
RW
223static struct syscore_ops acpi_processor_syscore_ops = {
224 .suspend = acpi_processor_suspend,
225 .resume = acpi_processor_resume,
226};
227
228void acpi_processor_syscore_init(void)
b04e7bdb 229{
0a3b15ac 230 register_syscore_ops(&acpi_processor_syscore_ops);
b04e7bdb
TG
231}
232
0a3b15ac 233void acpi_processor_syscore_exit(void)
b04e7bdb 234{
0a3b15ac 235 unregister_syscore_ops(&acpi_processor_syscore_ops);
b04e7bdb 236}
0a3b15ac 237#endif /* CONFIG_PM_SLEEP */
b04e7bdb 238
592913ec 239#if defined(CONFIG_X86)
520daf72 240static void tsc_check_state(int state)
ddb25f9a
AK
241{
242 switch (boot_cpu_data.x86_vendor) {
243 case X86_VENDOR_AMD:
40fb1715 244 case X86_VENDOR_INTEL:
ddb25f9a
AK
245 /*
246 * AMD Fam10h TSC will tick in all
247 * C/P/S0/S1 states when this bit is set.
248 */
40fb1715 249 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
520daf72 250 return;
40fb1715 251
ddb25f9a 252 /*FALL THROUGH*/
ddb25f9a 253 default:
520daf72
LB
254 /* TSC could halt in idle, so notify users */
255 if (state > ACPI_STATE_C1)
256 mark_tsc_unstable("TSC halts in idle");
ddb25f9a
AK
257 }
258}
520daf72
LB
259#else
260static void tsc_check_state(int state) { return; }
ddb25f9a
AK
261#endif
262
4be44fcd 263static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
1da177e4 264{
1da177e4 265
1da177e4 266 if (!pr->pblk)
d550d98d 267 return -ENODEV;
1da177e4 268
1da177e4 269 /* if info is obtained from pblk/fadt, type equals state */
1da177e4
LT
270 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
271 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
272
4c033552
VP
273#ifndef CONFIG_HOTPLUG_CPU
274 /*
275 * Check for P_LVL2_UP flag before entering C2 and above on
4f86d3a8 276 * an SMP system.
4c033552 277 */
ad71860a 278 if ((num_online_cpus() > 1) &&
cee324b1 279 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
d550d98d 280 return -ENODEV;
4c033552
VP
281#endif
282
1da177e4
LT
283 /* determine C2 and C3 address from pblk */
284 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
285 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
286
287 /* determine latencies from FADT */
ba494bee
BM
288 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
289 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
1da177e4 290
5d76b6f6
LB
291 /*
292 * FADT specified C2 latency must be less than or equal to
293 * 100 microseconds.
294 */
ba494bee 295 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
5d76b6f6 296 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
ba494bee 297 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
5d76b6f6
LB
298 /* invalidate C2 */
299 pr->power.states[ACPI_STATE_C2].address = 0;
300 }
301
a6d72c18
LB
302 /*
303 * FADT supplied C3 latency must be less than or equal to
304 * 1000 microseconds.
305 */
ba494bee 306 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
a6d72c18 307 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
ba494bee 308 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
a6d72c18
LB
309 /* invalidate C3 */
310 pr->power.states[ACPI_STATE_C3].address = 0;
311 }
312
1da177e4
LT
313 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
314 "lvl2[0x%08x] lvl3[0x%08x]\n",
315 pr->power.states[ACPI_STATE_C2].address,
316 pr->power.states[ACPI_STATE_C3].address));
317
d550d98d 318 return 0;
1da177e4
LT
319}
320
991528d7 321static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
acf05f4b 322{
991528d7
VP
323 if (!pr->power.states[ACPI_STATE_C1].valid) {
324 /* set the first C-State to C1 */
325 /* all processors need to support C1 */
326 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
327 pr->power.states[ACPI_STATE_C1].valid = 1;
0fda6b40 328 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
991528d7
VP
329 }
330 /* the C0 state only exists as a filler in our array */
acf05f4b 331 pr->power.states[ACPI_STATE_C0].valid = 1;
d550d98d 332 return 0;
acf05f4b
VP
333}
334
4be44fcd 335static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
1da177e4 336{
6fd8050a 337 acpi_status status;
439913ff 338 u64 count;
cf824788 339 int current_count;
6fd8050a 340 int i, ret = 0;
4be44fcd
LB
341 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
342 union acpi_object *cst;
1da177e4 343
1da177e4 344
1da177e4 345 if (nocst)
d550d98d 346 return -ENODEV;
1da177e4 347
991528d7 348 current_count = 0;
1da177e4
LT
349
350 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
351 if (ACPI_FAILURE(status)) {
352 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
d550d98d 353 return -ENODEV;
4be44fcd 354 }
1da177e4 355
50dd0969 356 cst = buffer.pointer;
1da177e4
LT
357
358 /* There must be at least 2 elements */
359 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
6468463a 360 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
6fd8050a 361 ret = -EFAULT;
1da177e4
LT
362 goto end;
363 }
364
365 count = cst->package.elements[0].integer.value;
366
367 /* Validate number of power states. */
368 if (count < 1 || count != cst->package.count - 1) {
6468463a 369 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
6fd8050a 370 ret = -EFAULT;
1da177e4
LT
371 goto end;
372 }
373
1da177e4
LT
374 /* Tell driver that at least _CST is supported. */
375 pr->flags.has_cst = 1;
376
377 for (i = 1; i <= count; i++) {
378 union acpi_object *element;
379 union acpi_object *obj;
380 struct acpi_power_register *reg;
381 struct acpi_processor_cx cx;
382
383 memset(&cx, 0, sizeof(cx));
384
50dd0969 385 element = &(cst->package.elements[i]);
1da177e4
LT
386 if (element->type != ACPI_TYPE_PACKAGE)
387 continue;
388
389 if (element->package.count != 4)
390 continue;
391
50dd0969 392 obj = &(element->package.elements[0]);
1da177e4
LT
393
394 if (obj->type != ACPI_TYPE_BUFFER)
395 continue;
396
4be44fcd 397 reg = (struct acpi_power_register *)obj->buffer.pointer;
1da177e4
LT
398
399 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
4be44fcd 400 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
1da177e4
LT
401 continue;
402
1da177e4 403 /* There should be an easy way to extract an integer... */
50dd0969 404 obj = &(element->package.elements[1]);
1da177e4
LT
405 if (obj->type != ACPI_TYPE_INTEGER)
406 continue;
407
408 cx.type = obj->integer.value;
991528d7
VP
409 /*
410 * Some buggy BIOSes won't list C1 in _CST -
411 * Let acpi_processor_get_power_info_default() handle them later
412 */
413 if (i == 1 && cx.type != ACPI_STATE_C1)
414 current_count++;
415
416 cx.address = reg->address;
417 cx.index = current_count + 1;
418
bc71bec9 419 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
991528d7
VP
420 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
421 if (acpi_processor_ffh_cstate_probe
422 (pr->id, &cx, reg) == 0) {
bc71bec9 423 cx.entry_method = ACPI_CSTATE_FFH;
424 } else if (cx.type == ACPI_STATE_C1) {
991528d7
VP
425 /*
426 * C1 is a special case where FIXED_HARDWARE
427 * can be handled in non-MWAIT way as well.
428 * In that case, save this _CST entry info.
991528d7
VP
429 * Otherwise, ignore this info and continue.
430 */
bc71bec9 431 cx.entry_method = ACPI_CSTATE_HALT;
4fcb2fcd 432 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
bc71bec9 433 } else {
991528d7
VP
434 continue;
435 }
da5e09a1 436 if (cx.type == ACPI_STATE_C1 &&
d1896049 437 (boot_option_idle_override == IDLE_NOMWAIT)) {
c1e3b377
ZY
438 /*
439 * In most cases the C1 space_id obtained from
440 * _CST object is FIXED_HARDWARE access mode.
441 * But when the option of idle=halt is added,
442 * the entry_method type should be changed from
443 * CSTATE_FFH to CSTATE_HALT.
da5e09a1
ZY
444 * When the option of idle=nomwait is added,
445 * the C1 entry_method type should be
446 * CSTATE_HALT.
c1e3b377
ZY
447 */
448 cx.entry_method = ACPI_CSTATE_HALT;
449 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
450 }
4fcb2fcd
VP
451 } else {
452 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
453 cx.address);
991528d7 454 }
1da177e4 455
0fda6b40
VP
456 if (cx.type == ACPI_STATE_C1) {
457 cx.valid = 1;
458 }
4fcb2fcd 459
50dd0969 460 obj = &(element->package.elements[2]);
1da177e4
LT
461 if (obj->type != ACPI_TYPE_INTEGER)
462 continue;
463
464 cx.latency = obj->integer.value;
465
50dd0969 466 obj = &(element->package.elements[3]);
1da177e4
LT
467 if (obj->type != ACPI_TYPE_INTEGER)
468 continue;
469
cf824788
JM
470 current_count++;
471 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
472
473 /*
474 * We support total ACPI_PROCESSOR_MAX_POWER - 1
475 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
476 */
477 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
478 printk(KERN_WARNING
479 "Limiting number of power states to max (%d)\n",
480 ACPI_PROCESSOR_MAX_POWER);
481 printk(KERN_WARNING
482 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
483 break;
484 }
1da177e4
LT
485 }
486
4be44fcd 487 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
cf824788 488 current_count));
1da177e4
LT
489
490 /* Validate number of power states discovered */
cf824788 491 if (current_count < 2)
6fd8050a 492 ret = -EFAULT;
1da177e4 493
4be44fcd 494 end:
02438d87 495 kfree(buffer.pointer);
1da177e4 496
6fd8050a 497 return ret;
1da177e4
LT
498}
499
4be44fcd
LB
500static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
501 struct acpi_processor_cx *cx)
1da177e4 502{
ee1ca48f
PV
503 static int bm_check_flag = -1;
504 static int bm_control_flag = -1;
02df8b93 505
1da177e4
LT
506
507 if (!cx->address)
d550d98d 508 return;
1da177e4 509
1da177e4
LT
510 /*
511 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
512 * DMA transfers are used by any ISA device to avoid livelock.
513 * Note that we could disable Type-F DMA (as recommended by
514 * the erratum), but this is known to disrupt certain ISA
515 * devices thus we take the conservative approach.
516 */
517 else if (errata.piix4.fdma) {
518 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 519 "C3 not supported on PIIX4 with Type-F DMA\n"));
d550d98d 520 return;
1da177e4
LT
521 }
522
02df8b93 523 /* All the logic here assumes flags.bm_check is same across all CPUs */
ee1ca48f 524 if (bm_check_flag == -1) {
02df8b93
VP
525 /* Determine whether bm_check is needed based on CPU */
526 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
527 bm_check_flag = pr->flags.bm_check;
ee1ca48f 528 bm_control_flag = pr->flags.bm_control;
02df8b93
VP
529 } else {
530 pr->flags.bm_check = bm_check_flag;
ee1ca48f 531 pr->flags.bm_control = bm_control_flag;
02df8b93
VP
532 }
533
534 if (pr->flags.bm_check) {
02df8b93 535 if (!pr->flags.bm_control) {
ed3110ef
VP
536 if (pr->flags.has_cst != 1) {
537 /* bus mastering control is necessary */
538 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
539 "C3 support requires BM control\n"));
540 return;
541 } else {
542 /* Here we enter C3 without bus mastering */
543 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
544 "C3 support without BM control\n"));
545 }
02df8b93
VP
546 }
547 } else {
02df8b93
VP
548 /*
549 * WBINVD should be set in fadt, for C3 state to be
550 * supported on when bm_check is not required.
551 */
cee324b1 552 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
02df8b93 553 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd
LB
554 "Cache invalidation should work properly"
555 " for C3 to be enabled on SMP systems\n"));
d550d98d 556 return;
02df8b93 557 }
02df8b93
VP
558 }
559
1da177e4
LT
560 /*
561 * Otherwise we've met all of our C3 requirements.
562 * Normalize the C3 latency to expidite policy. Enable
563 * checking of bus mastering status (bm_check) so we can
564 * use this in our C3 policy
565 */
566 cx->valid = 1;
4f86d3a8 567
31878dd8
LB
568 /*
569 * On older chipsets, BM_RLD needs to be set
570 * in order for Bus Master activity to wake the
571 * system from C3. Newer chipsets handle DMA
572 * during C3 automatically and BM_RLD is a NOP.
573 * In either case, the proper way to
574 * handle BM_RLD is to set it and leave it set.
575 */
50ffba1b 576 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1da177e4 577
d550d98d 578 return;
1da177e4
LT
579}
580
1da177e4
LT
581static int acpi_processor_power_verify(struct acpi_processor *pr)
582{
583 unsigned int i;
584 unsigned int working = 0;
6eb0a0fd 585
169a0abb 586 pr->power.timer_broadcast_on_state = INT_MAX;
6eb0a0fd 587
a0bf284b 588 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1da177e4
LT
589 struct acpi_processor_cx *cx = &pr->power.states[i];
590
591 switch (cx->type) {
592 case ACPI_STATE_C1:
593 cx->valid = 1;
594 break;
595
596 case ACPI_STATE_C2:
d22edd29
LB
597 if (!cx->address)
598 break;
cad1525a 599 cx->valid = 1;
1da177e4
LT
600 break;
601
602 case ACPI_STATE_C3:
603 acpi_processor_power_verify_c3(pr, cx);
604 break;
605 }
7e275cc4
LB
606 if (!cx->valid)
607 continue;
1da177e4 608
7e275cc4
LB
609 lapic_timer_check_state(i, pr, cx);
610 tsc_check_state(cx->type);
611 working++;
1da177e4 612 }
bd663347 613
918aae42 614 lapic_timer_propagate_broadcast(pr);
1da177e4
LT
615
616 return (working);
617}
618
4be44fcd 619static int acpi_processor_get_power_info(struct acpi_processor *pr)
1da177e4
LT
620{
621 unsigned int i;
622 int result;
623
1da177e4
LT
624
625 /* NOTE: the idle thread may not be running while calling
626 * this function */
627
991528d7
VP
628 /* Zero initialize all the C-states info. */
629 memset(pr->power.states, 0, sizeof(pr->power.states));
630
1da177e4 631 result = acpi_processor_get_power_info_cst(pr);
6d93c648 632 if (result == -ENODEV)
c5a114f1 633 result = acpi_processor_get_power_info_fadt(pr);
6d93c648 634
991528d7
VP
635 if (result)
636 return result;
637
638 acpi_processor_get_power_info_default(pr);
639
cf824788 640 pr->power.count = acpi_processor_power_verify(pr);
1da177e4 641
1da177e4
LT
642 /*
643 * if one state of type C2 or C3 is available, mark this
644 * CPU as being "idle manageable"
645 */
646 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 647 if (pr->power.states[i].valid) {
1da177e4 648 pr->power.count = i;
2203d6ed
LT
649 if (pr->power.states[i].type >= ACPI_STATE_C2)
650 pr->flags.power = 1;
acf05f4b 651 }
1da177e4
LT
652 }
653
d550d98d 654 return 0;
1da177e4
LT
655}
656
4f86d3a8
LB
657/**
658 * acpi_idle_bm_check - checks if bus master activity was detected
659 */
660static int acpi_idle_bm_check(void)
661{
662 u32 bm_status = 0;
663
d3e7e99f
LB
664 if (bm_check_disable)
665 return 0;
666
50ffba1b 667 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4f86d3a8 668 if (bm_status)
50ffba1b 669 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
4f86d3a8
LB
670 /*
671 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
672 * the true state of bus mastering activity; forcing us to
673 * manually check the BMIDEA bit of each IDE channel.
674 */
675 else if (errata.piix4.bmisx) {
676 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
677 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
678 bm_status = 1;
679 }
680 return bm_status;
681}
682
4f86d3a8 683/**
b00783fd 684 * acpi_idle_do_entry - enter idle state using the appropriate method
4f86d3a8 685 * @cx: cstate data
bc71bec9 686 *
687 * Caller disables interrupt before call and enables interrupt after return.
4f86d3a8 688 */
b00783fd 689static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
4f86d3a8 690{
bc71bec9 691 if (cx->entry_method == ACPI_CSTATE_FFH) {
4f86d3a8
LB
692 /* Call into architectural FFH based C-state */
693 acpi_processor_ffh_cstate_enter(cx);
bc71bec9 694 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
695 acpi_safe_halt();
4f86d3a8 696 } else {
4f86d3a8
LB
697 /* IO port based C-state */
698 inb(cx->address);
699 /* Dummy wait op - must do something useless after P_LVL2 read
700 because chipsets cannot guarantee that STPCLK# signal
701 gets asserted in time to freeze execution properly. */
cfa806f0 702 inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8
LB
703 }
704}
705
1a022e3f
BO
706/**
707 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
708 * @dev: the target CPU
709 * @index: the index of suggested state
710 */
711static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
712{
6240a10d 713 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
1a022e3f
BO
714
715 ACPI_FLUSH_CPU_CACHE();
716
717 while (1) {
718
719 if (cx->entry_method == ACPI_CSTATE_HALT)
54f70077 720 safe_halt();
1a022e3f
BO
721 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
722 inb(cx->address);
723 /* See comment in acpi_idle_do_entry() */
724 inl(acpi_gbl_FADT.xpm_timer_block.address);
725 } else
726 return -ENODEV;
727 }
728
729 /* Never reached */
730 return 0;
731}
732
adcb2623
RW
733static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
734{
735 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
736 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
737 !pr->flags.has_cst;
738}
739
4f86d3a8 740static int c3_cpu_count;
e12f65f7 741static DEFINE_RAW_SPINLOCK(c3_lock);
4f86d3a8
LB
742
743/**
744 * acpi_idle_enter_bm - enters C3 with proper BM handling
6491bc0c
RW
745 * @pr: Target processor
746 * @cx: Target state context
4f86d3a8 747 */
6491bc0c
RW
748static void acpi_idle_enter_bm(struct acpi_processor *pr,
749 struct acpi_processor_cx *cx)
4f86d3a8 750{
996520c1
VP
751 acpi_unlazy_tlb(smp_processor_id());
752
4f86d3a8
LB
753 /*
754 * Must be done before busmaster disable as we might need to
755 * access HPET !
756 */
7e275cc4 757 lapic_timer_state_broadcast(pr, cx, 1);
4f86d3a8 758
ddc081a1
VP
759 /*
760 * disable bus master
761 * bm_check implies we need ARB_DIS
ddc081a1
VP
762 * bm_control implies whether we can do ARB_DIS
763 *
764 * That leaves a case where bm_check is set and bm_control is
765 * not set. In that case we cannot do much, we enter C3
766 * without doing anything.
767 */
2a738352 768 if (pr->flags.bm_control) {
e12f65f7 769 raw_spin_lock(&c3_lock);
4f86d3a8
LB
770 c3_cpu_count++;
771 /* Disable bus master arbitration when all CPUs are in C3 */
772 if (c3_cpu_count == num_online_cpus())
50ffba1b 773 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
e12f65f7 774 raw_spin_unlock(&c3_lock);
ddc081a1 775 }
4f86d3a8 776
ddc081a1 777 acpi_idle_do_entry(cx);
4f86d3a8 778
ddc081a1 779 /* Re-enable bus master arbitration */
2a738352 780 if (pr->flags.bm_control) {
e12f65f7 781 raw_spin_lock(&c3_lock);
50ffba1b 782 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
4f86d3a8 783 c3_cpu_count--;
e12f65f7 784 raw_spin_unlock(&c3_lock);
4f86d3a8 785 }
e978aa7d 786
7e275cc4 787 lapic_timer_state_broadcast(pr, cx, 0);
6491bc0c
RW
788}
789
790static int acpi_idle_enter(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index)
792{
793 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
794 struct acpi_processor *pr;
795
796 pr = __this_cpu_read(processors);
797 if (unlikely(!pr))
798 return -EINVAL;
799
800 if (cx->type != ACPI_STATE_C1) {
801 if (acpi_idle_fallback_to_c1(pr)) {
802 index = CPUIDLE_DRIVER_STATE_START;
803 cx = per_cpu(acpi_cstate[index], dev->cpu);
804 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
805 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
806 acpi_idle_enter_bm(pr, cx);
807 return index;
808 } else if (drv->safe_state_index >= 0) {
809 index = drv->safe_state_index;
810 cx = per_cpu(acpi_cstate[index], dev->cpu);
811 } else {
812 acpi_safe_halt();
813 return -EBUSY;
814 }
815 }
816 }
817
818 lapic_timer_state_broadcast(pr, cx, 1);
819
820 if (cx->type == ACPI_STATE_C3)
821 ACPI_FLUSH_CPU_CACHE();
822
823 acpi_idle_do_entry(cx);
824
825 lapic_timer_state_broadcast(pr, cx, 0);
826
e978aa7d 827 return index;
4f86d3a8
LB
828}
829
830struct cpuidle_driver acpi_idle_driver = {
831 .name = "acpi_idle",
832 .owner = THIS_MODULE,
833};
834
835/**
46bcfad7
DD
836 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
837 * device i.e. per-cpu data
838 *
4f86d3a8 839 * @pr: the ACPI processor
6ef0f086 840 * @dev : the cpuidle device
4f86d3a8 841 */
6ef0f086
DL
842static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
843 struct cpuidle_device *dev)
4f86d3a8 844{
9a0b8415 845 int i, count = CPUIDLE_DRIVER_STATE_START;
4f86d3a8 846 struct acpi_processor_cx *cx;
4f86d3a8
LB
847
848 if (!pr->flags.power_setup_done)
849 return -EINVAL;
850
851 if (pr->flags.power == 0) {
852 return -EINVAL;
853 }
854
b88a634a
KRW
855 if (!dev)
856 return -EINVAL;
857
dcb84f33 858 dev->cpu = pr->id;
4fcb2fcd 859
615dfd93
LB
860 if (max_cstate == 0)
861 max_cstate = 1;
862
4f86d3a8
LB
863 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
864 cx = &pr->power.states[i];
4f86d3a8
LB
865
866 if (!cx->valid)
867 continue;
868
6240a10d 869 per_cpu(acpi_cstate[count], dev->cpu) = cx;
4f86d3a8 870
46bcfad7
DD
871 count++;
872 if (count == CPUIDLE_STATE_MAX)
873 break;
874 }
875
46bcfad7
DD
876 if (!count)
877 return -EINVAL;
878
879 return 0;
880}
881
882/**
883 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
884 * global state data i.e. idle routines
885 *
886 * @pr: the ACPI processor
887 */
888static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
889{
890 int i, count = CPUIDLE_DRIVER_STATE_START;
891 struct acpi_processor_cx *cx;
892 struct cpuidle_state *state;
893 struct cpuidle_driver *drv = &acpi_idle_driver;
894
895 if (!pr->flags.power_setup_done)
896 return -EINVAL;
897
898 if (pr->flags.power == 0)
899 return -EINVAL;
900
901 drv->safe_state_index = -1;
4fcb2fcd 902 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
46bcfad7
DD
903 drv->states[i].name[0] = '\0';
904 drv->states[i].desc[0] = '\0';
4fcb2fcd
VP
905 }
906
615dfd93
LB
907 if (max_cstate == 0)
908 max_cstate = 1;
909
4f86d3a8
LB
910 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
911 cx = &pr->power.states[i];
4f86d3a8
LB
912
913 if (!cx->valid)
914 continue;
915
46bcfad7 916 state = &drv->states[count];
4f86d3a8 917 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
4fcb2fcd 918 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
4f86d3a8 919 state->exit_latency = cx->latency;
4963f620 920 state->target_residency = cx->latency * latency_factor;
6491bc0c 921 state->enter = acpi_idle_enter;
4f86d3a8
LB
922
923 state->flags = 0;
6491bc0c 924 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
1a022e3f 925 state->enter_dead = acpi_idle_play_dead;
46bcfad7 926 drv->safe_state_index = count;
4f86d3a8
LB
927 }
928
929 count++;
9a0b8415 930 if (count == CPUIDLE_STATE_MAX)
931 break;
4f86d3a8
LB
932 }
933
46bcfad7 934 drv->state_count = count;
4f86d3a8
LB
935
936 if (!count)
937 return -EINVAL;
938
4f86d3a8
LB
939 return 0;
940}
941
46bcfad7 942int acpi_processor_hotplug(struct acpi_processor *pr)
4f86d3a8 943{
dcb84f33 944 int ret = 0;
e8b1b59d 945 struct cpuidle_device *dev;
4f86d3a8 946
d1896049 947 if (disabled_by_idle_boot_param())
36a91358
VP
948 return 0;
949
bf9b59f2 950 if (nocst)
4f86d3a8 951 return -ENODEV;
4f86d3a8
LB
952
953 if (!pr->flags.power_setup_done)
954 return -ENODEV;
955
e8b1b59d 956 dev = per_cpu(acpi_cpuidle_device, pr->id);
4f86d3a8 957 cpuidle_pause_and_lock();
3d339dcb 958 cpuidle_disable_device(dev);
4f86d3a8 959 acpi_processor_get_power_info(pr);
dcb84f33 960 if (pr->flags.power) {
6ef0f086 961 acpi_processor_setup_cpuidle_cx(pr, dev);
3d339dcb 962 ret = cpuidle_enable_device(dev);
dcb84f33 963 }
4f86d3a8
LB
964 cpuidle_resume_and_unlock();
965
966 return ret;
967}
968
46bcfad7
DD
969int acpi_processor_cst_has_changed(struct acpi_processor *pr)
970{
971 int cpu;
972 struct acpi_processor *_pr;
3d339dcb 973 struct cpuidle_device *dev;
46bcfad7
DD
974
975 if (disabled_by_idle_boot_param())
976 return 0;
977
46bcfad7
DD
978 if (nocst)
979 return -ENODEV;
980
981 if (!pr->flags.power_setup_done)
982 return -ENODEV;
983
984 /*
985 * FIXME: Design the ACPI notification to make it once per
986 * system instead of once per-cpu. This condition is a hack
987 * to make the code that updates C-States be called once.
988 */
989
9505626d 990 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
46bcfad7 991
46bcfad7
DD
992 /* Protect against cpu-hotplug */
993 get_online_cpus();
6726655d 994 cpuidle_pause_and_lock();
46bcfad7
DD
995
996 /* Disable all cpuidle devices */
997 for_each_online_cpu(cpu) {
998 _pr = per_cpu(processors, cpu);
999 if (!_pr || !_pr->flags.power_setup_done)
1000 continue;
3d339dcb
DL
1001 dev = per_cpu(acpi_cpuidle_device, cpu);
1002 cpuidle_disable_device(dev);
46bcfad7
DD
1003 }
1004
1005 /* Populate Updated C-state information */
f427e5f1 1006 acpi_processor_get_power_info(pr);
46bcfad7
DD
1007 acpi_processor_setup_cpuidle_states(pr);
1008
1009 /* Enable all cpuidle devices */
1010 for_each_online_cpu(cpu) {
1011 _pr = per_cpu(processors, cpu);
1012 if (!_pr || !_pr->flags.power_setup_done)
1013 continue;
1014 acpi_processor_get_power_info(_pr);
1015 if (_pr->flags.power) {
3d339dcb 1016 dev = per_cpu(acpi_cpuidle_device, cpu);
6ef0f086 1017 acpi_processor_setup_cpuidle_cx(_pr, dev);
3d339dcb 1018 cpuidle_enable_device(dev);
46bcfad7
DD
1019 }
1020 }
46bcfad7 1021 cpuidle_resume_and_unlock();
6726655d 1022 put_online_cpus();
46bcfad7
DD
1023 }
1024
1025 return 0;
1026}
1027
1028static int acpi_processor_registered;
1029
fe7bf106 1030int acpi_processor_power_init(struct acpi_processor *pr)
1da177e4 1031{
6fd8050a 1032 acpi_status status;
46bcfad7 1033 int retval;
3d339dcb 1034 struct cpuidle_device *dev;
b6835052 1035 static int first_run;
1da177e4 1036
d1896049 1037 if (disabled_by_idle_boot_param())
36a91358 1038 return 0;
1da177e4
LT
1039
1040 if (!first_run) {
1041 dmi_check_system(processor_power_dmi_table);
c1c30634 1042 max_cstate = acpi_processor_cstate_check(max_cstate);
1da177e4 1043 if (max_cstate < ACPI_C_STATES_MAX)
4be44fcd
LB
1044 printk(KERN_NOTICE
1045 "ACPI: processor limited to max C-state %d\n",
1046 max_cstate);
1da177e4
LT
1047 first_run++;
1048 }
1049
cee324b1 1050 if (acpi_gbl_FADT.cst_control && !nocst) {
4be44fcd 1051 status =
cee324b1 1052 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1da177e4 1053 if (ACPI_FAILURE(status)) {
a6fc6720
TR
1054 ACPI_EXCEPTION((AE_INFO, status,
1055 "Notifying BIOS of _CST ability failed"));
1da177e4
LT
1056 }
1057 }
1058
1059 acpi_processor_get_power_info(pr);
4f86d3a8 1060 pr->flags.power_setup_done = 1;
1da177e4
LT
1061
1062 /*
1063 * Install the idle handler if processor power management is supported.
1064 * Note that we use previously set idle handler will be used on
1065 * platforms that only support C1.
1066 */
36a91358 1067 if (pr->flags.power) {
46bcfad7
DD
1068 /* Register acpi_idle_driver if not already registered */
1069 if (!acpi_processor_registered) {
1070 acpi_processor_setup_cpuidle_states(pr);
1071 retval = cpuidle_register_driver(&acpi_idle_driver);
1072 if (retval)
1073 return retval;
1074 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1075 acpi_idle_driver.name);
1076 }
3d339dcb
DL
1077
1078 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1079 if (!dev)
1080 return -ENOMEM;
1081 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1082
6ef0f086 1083 acpi_processor_setup_cpuidle_cx(pr, dev);
3d339dcb 1084
46bcfad7
DD
1085 /* Register per-cpu cpuidle_device. Cpuidle driver
1086 * must already be registered before registering device
1087 */
3d339dcb 1088 retval = cpuidle_register_device(dev);
46bcfad7
DD
1089 if (retval) {
1090 if (acpi_processor_registered == 0)
1091 cpuidle_unregister_driver(&acpi_idle_driver);
1092 return retval;
1093 }
1094 acpi_processor_registered++;
1da177e4 1095 }
d550d98d 1096 return 0;
1da177e4
LT
1097}
1098
38a991b6 1099int acpi_processor_power_exit(struct acpi_processor *pr)
1da177e4 1100{
3d339dcb
DL
1101 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1102
d1896049 1103 if (disabled_by_idle_boot_param())
36a91358
VP
1104 return 0;
1105
46bcfad7 1106 if (pr->flags.power) {
3d339dcb 1107 cpuidle_unregister_device(dev);
46bcfad7
DD
1108 acpi_processor_registered--;
1109 if (acpi_processor_registered == 0)
1110 cpuidle_unregister_driver(&acpi_idle_driver);
1111 }
1da177e4 1112
46bcfad7 1113 pr->flags.power_setup_done = 0;
d550d98d 1114 return 0;
1da177e4 1115}