flags->bm_check = 1;
else if (c->x86_vendor == X86_VENDOR_INTEL) {
/*
- * Today all CPUs that support C3 share cache.
- * TBD: This needs to look at cache shared map, once
- * multi-core detection patch makes to the base.
+ * Today all MP CPUs that support C3 share cache.
+ * And caches should not be flushed by software while
+ * entering C3 type state.
*/
flags->bm_check = 1;
}
+
+ /*
+ * On all recent Intel platforms, ARB_DISABLE is a nop.
+ * So, set bm_control to zero to indicate that ARB_DISABLE
+ * is not required while entering C3 type state on
+ * P4, Core and beyond CPUs
+ */
+ if (c->x86_vendor == X86_VENDOR_INTEL &&
+ (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)))
+ flags->bm_control = 0;
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
- static int bm_check_flag;
+ static int bm_check_flag = -1;
+ static int bm_control_flag = -1;
if (!cx->address)
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
- if (!bm_check_flag) {
+ if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
+ bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
+ pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {