]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/i386/kernel/acpi/cstate.c
Merge branch 'master'
[mirror_ubuntu-jammy-kernel.git] / arch / i386 / kernel / acpi / cstate.c
1 /*
2 * arch/i386/kernel/acpi/cstate.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for SMP C-states on Intel CPUs
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/acpi.h>
13
14 #include <acpi/processor.h>
15 #include <asm/acpi.h>
16
17 static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
18 *pow)
19 {
20 struct acpi_object_list *obj_list;
21 union acpi_object *obj;
22 u32 *buf;
23
24 /* allocate and initialize pdc. It will be used later. */
25 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
26 if (!obj_list) {
27 printk(KERN_ERR "Memory allocation error\n");
28 return;
29 }
30
31 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
32 if (!obj) {
33 printk(KERN_ERR "Memory allocation error\n");
34 kfree(obj_list);
35 return;
36 }
37
38 buf = kmalloc(12, GFP_KERNEL);
39 if (!buf) {
40 printk(KERN_ERR "Memory allocation error\n");
41 kfree(obj);
42 kfree(obj_list);
43 return;
44 }
45
46 buf[0] = ACPI_PDC_REVISION_ID;
47 buf[1] = 1;
48 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
49
50 obj->type = ACPI_TYPE_BUFFER;
51 obj->buffer.length = 12;
52 obj->buffer.pointer = (u8 *) buf;
53 obj_list->count = 1;
54 obj_list->pointer = obj;
55 pow->pdc = obj_list;
56
57 return;
58 }
59
60 /* Initialize _PDC data based on the CPU vendor */
61 void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
62 unsigned int cpu)
63 {
64 struct cpuinfo_x86 *c = cpu_data + cpu;
65
66 pow->pdc = NULL;
67 if (c->x86_vendor == X86_VENDOR_INTEL)
68 acpi_processor_power_init_intel_pdc(pow);
69
70 return;
71 }
72
73 EXPORT_SYMBOL(acpi_processor_power_init_pdc);
74
75 /*
76 * Initialize bm_flags based on the CPU cache properties
77 * On SMP it depends on cache configuration
78 * - When cache is not shared among all CPUs, we flush cache
79 * before entering C3.
80 * - When cache is shared among all CPUs, we use bm_check
81 * mechanism as in UP case
82 *
83 * This routine is called only after all the CPUs are online
84 */
85 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
86 unsigned int cpu)
87 {
88 struct cpuinfo_x86 *c = cpu_data + cpu;
89
90 flags->bm_check = 0;
91 if (num_online_cpus() == 1)
92 flags->bm_check = 1;
93 else if (c->x86_vendor == X86_VENDOR_INTEL) {
94 /*
95 * Today all CPUs that support C3 share cache.
96 * TBD: This needs to look at cache shared map, once
97 * multi-core detection patch makes to the base.
98 */
99 flags->bm_check = 1;
100 }
101 }
102
103 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);