]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
Merge branch 'timers-for-linus-migration' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / cpufreq / p4-clockmod.c
1 /*
2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
6 * (C) 2002 Tora T. Engstad
7 * All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties.
17 *
18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 *
21 */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/smp.h>
27 #include <linux/cpufreq.h>
28 #include <linux/slab.h>
29 #include <linux/cpumask.h>
30 #include <linux/timex.h>
31
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/timer.h>
35
36 #include "speedstep-lib.h"
37
38 #define PFX "p4-clockmod: "
39 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
40 "p4-clockmod", msg)
41
42 /*
43 * Duty Cycle (3bits), note DC_DISABLE is not specified in
44 * intel docs i just use it to mean disable
45 */
46 enum {
47 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
48 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
49 };
50
51 #define DC_ENTRIES 8
52
53
54 static int has_N44_O17_errata[NR_CPUS];
55 static unsigned int stock_freq;
56 static struct cpufreq_driver p4clockmod_driver;
57 static unsigned int cpufreq_p4_get(unsigned int cpu);
58
59 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
60 {
61 u32 l, h;
62
63 if (!cpu_online(cpu) ||
64 (newstate > DC_DISABLE) || (newstate == DC_RESV))
65 return -EINVAL;
66
67 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
68
69 if (l & 0x01)
70 dprintk("CPU#%d currently thermal throttled\n", cpu);
71
72 if (has_N44_O17_errata[cpu] &&
73 (newstate == DC_25PT || newstate == DC_DFLT))
74 newstate = DC_38PT;
75
76 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
77 if (newstate == DC_DISABLE) {
78 dprintk("CPU#%d disabling modulation\n", cpu);
79 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
80 } else {
81 dprintk("CPU#%d setting duty cycle to %d%%\n",
82 cpu, ((125 * newstate) / 10));
83 /* bits 63 - 5 : reserved
84 * bit 4 : enable/disable
85 * bits 3-1 : duty cycle
86 * bit 0 : reserved
87 */
88 l = (l & ~14);
89 l = l | (1<<4) | ((newstate & 0x7)<<1);
90 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
91 }
92
93 return 0;
94 }
95
96
97 static struct cpufreq_frequency_table p4clockmod_table[] = {
98 {DC_RESV, CPUFREQ_ENTRY_INVALID},
99 {DC_DFLT, 0},
100 {DC_25PT, 0},
101 {DC_38PT, 0},
102 {DC_50PT, 0},
103 {DC_64PT, 0},
104 {DC_75PT, 0},
105 {DC_88PT, 0},
106 {DC_DISABLE, 0},
107 {DC_RESV, CPUFREQ_TABLE_END},
108 };
109
110
111 static int cpufreq_p4_target(struct cpufreq_policy *policy,
112 unsigned int target_freq,
113 unsigned int relation)
114 {
115 unsigned int newstate = DC_RESV;
116 struct cpufreq_freqs freqs;
117 int i;
118
119 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
120 target_freq, relation, &newstate))
121 return -EINVAL;
122
123 freqs.old = cpufreq_p4_get(policy->cpu);
124 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
125
126 if (freqs.new == freqs.old)
127 return 0;
128
129 /* notifiers */
130 for_each_cpu(i, policy->cpus) {
131 freqs.cpu = i;
132 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
133 }
134
135 /* run on each logical CPU,
136 * see section 13.15.3 of IA32 Intel Architecture Software
137 * Developer's Manual, Volume 3
138 */
139 for_each_cpu(i, policy->cpus)
140 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
141
142 /* notifiers */
143 for_each_cpu(i, policy->cpus) {
144 freqs.cpu = i;
145 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
146 }
147
148 return 0;
149 }
150
151
152 static int cpufreq_p4_verify(struct cpufreq_policy *policy)
153 {
154 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
155 }
156
157
158 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
159 {
160 if (c->x86 == 0x06) {
161 if (cpu_has(c, X86_FEATURE_EST))
162 printk(KERN_WARNING PFX "Warning: EST-capable CPU "
163 "detected. The acpi-cpufreq module offers "
164 "voltage scaling in addition of frequency "
165 "scaling. You should use that instead of "
166 "p4-clockmod, if possible.\n");
167 switch (c->x86_model) {
168 case 0x0E: /* Core */
169 case 0x0F: /* Core Duo */
170 case 0x16: /* Celeron Core */
171 case 0x1C: /* Atom */
172 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
173 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
174 case 0x0D: /* Pentium M (Dothan) */
175 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
176 /* fall through */
177 case 0x09: /* Pentium M (Banias) */
178 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
179 }
180 }
181
182 if (c->x86 != 0xF) {
183 if (!cpu_has(c, X86_FEATURE_EST))
184 printk(KERN_WARNING PFX "Unknown CPU. "
185 "Please send an e-mail to "
186 "<cpufreq@vger.kernel.org>\n");
187 return 0;
188 }
189
190 /* on P-4s, the TSC runs with constant frequency independent whether
191 * throttling is active or not. */
192 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
193
194 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
195 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
196 "The speedstep-ich or acpi cpufreq modules offer "
197 "voltage scaling in addition of frequency scaling. "
198 "You should use either one instead of p4-clockmod, "
199 "if possible.\n");
200 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
201 }
202
203 return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
204 }
205
206
207
208 static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
209 {
210 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
211 int cpuid = 0;
212 unsigned int i;
213
214 #ifdef CONFIG_SMP
215 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
216 #endif
217
218 /* Errata workaround */
219 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
220 switch (cpuid) {
221 case 0x0f07:
222 case 0x0f0a:
223 case 0x0f11:
224 case 0x0f12:
225 has_N44_O17_errata[policy->cpu] = 1;
226 dprintk("has errata -- disabling low frequencies\n");
227 }
228
229 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
230 c->x86_model < 2) {
231 /* switch to maximum frequency and measure result */
232 cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
233 recalibrate_cpu_khz();
234 }
235 /* get max frequency */
236 stock_freq = cpufreq_p4_get_frequency(c);
237 if (!stock_freq)
238 return -EINVAL;
239
240 /* table init */
241 for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
242 if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
243 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
244 else
245 p4clockmod_table[i].frequency = (stock_freq * i)/8;
246 }
247 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
248
249 /* cpuinfo and default policy values */
250
251 /* the transition latency is set to be 1 higher than the maximum
252 * transition latency of the ondemand governor */
253 policy->cpuinfo.transition_latency = 10000001;
254 policy->cur = stock_freq;
255
256 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
257 }
258
259
260 static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
261 {
262 cpufreq_frequency_table_put_attr(policy->cpu);
263 return 0;
264 }
265
266 static unsigned int cpufreq_p4_get(unsigned int cpu)
267 {
268 u32 l, h;
269
270 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
271
272 if (l & 0x10) {
273 l = l >> 1;
274 l &= 0x7;
275 } else
276 l = DC_DISABLE;
277
278 if (l != DC_DISABLE)
279 return stock_freq * l / 8;
280
281 return stock_freq;
282 }
283
284 static struct freq_attr *p4clockmod_attr[] = {
285 &cpufreq_freq_attr_scaling_available_freqs,
286 NULL,
287 };
288
289 static struct cpufreq_driver p4clockmod_driver = {
290 .verify = cpufreq_p4_verify,
291 .target = cpufreq_p4_target,
292 .init = cpufreq_p4_cpu_init,
293 .exit = cpufreq_p4_cpu_exit,
294 .get = cpufreq_p4_get,
295 .name = "p4-clockmod",
296 .owner = THIS_MODULE,
297 .attr = p4clockmod_attr,
298 };
299
300
301 static int __init cpufreq_p4_init(void)
302 {
303 struct cpuinfo_x86 *c = &cpu_data(0);
304 int ret;
305
306 /*
307 * THERM_CONTROL is architectural for IA32 now, so
308 * we can rely on the capability checks
309 */
310 if (c->x86_vendor != X86_VENDOR_INTEL)
311 return -ENODEV;
312
313 if (!test_cpu_cap(c, X86_FEATURE_ACPI) ||
314 !test_cpu_cap(c, X86_FEATURE_ACC))
315 return -ENODEV;
316
317 ret = cpufreq_register_driver(&p4clockmod_driver);
318 if (!ret)
319 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
320 "Modulation available\n");
321
322 return ret;
323 }
324
325
326 static void __exit cpufreq_p4_exit(void)
327 {
328 cpufreq_unregister_driver(&p4clockmod_driver);
329 }
330
331
332 MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
333 MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
334 MODULE_LICENSE("GPL");
335
336 late_initcall(cpufreq_p4_init);
337 module_exit(cpufreq_p4_exit);