]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/cpufreq/intel_pstate.c
cpufreq: mt8173: check return value of regulator_get_voltage() call
[mirror_ubuntu-bionic-kernel.git] / drivers / cpufreq / intel_pstate.c
CommitLineData
93f0822d 1/*
d1b68485 2 * intel_pstate.c: Native P state management for Intel processors
93f0822d
DB
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#include <linux/kernel.h>
14#include <linux/kernel_stat.h>
15#include <linux/module.h>
16#include <linux/ktime.h>
17#include <linux/hrtimer.h>
18#include <linux/tick.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/list.h>
22#include <linux/cpu.h>
23#include <linux/cpufreq.h>
24#include <linux/sysfs.h>
25#include <linux/types.h>
26#include <linux/fs.h>
27#include <linux/debugfs.h>
fbbcdc07 28#include <linux/acpi.h>
d6472302 29#include <linux/vmalloc.h>
93f0822d
DB
30#include <trace/events/power.h>
31
32#include <asm/div64.h>
33#include <asm/msr.h>
34#include <asm/cpu_device_id.h>
64df1fdf 35#include <asm/cpufeature.h>
93f0822d 36
938d21a2
PL
37#define ATOM_RATIOS 0x66a
38#define ATOM_VIDS 0x66b
39#define ATOM_TURBO_RATIOS 0x66c
40#define ATOM_TURBO_VIDS 0x66d
61d8d2ab 41
f0fe3cd7 42#define FRAC_BITS 8
93f0822d
DB
43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44#define fp_toint(X) ((X) >> FRAC_BITS)
f0fe3cd7 45
93f0822d
DB
46static inline int32_t mul_fp(int32_t x, int32_t y)
47{
48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
49}
50
7180dddf 51static inline int32_t div_fp(s64 x, s64 y)
93f0822d 52{
7180dddf 53 return div64_s64((int64_t)x << FRAC_BITS, y);
93f0822d
DB
54}
55
d022a65e
DB
56static inline int ceiling_fp(int32_t x)
57{
58 int mask, ret;
59
60 ret = fp_toint(x);
61 mask = (1 << FRAC_BITS) - 1;
62 if (x & mask)
63 ret += 1;
64 return ret;
65}
66
93f0822d 67struct sample {
d253d2a5 68 int32_t core_pct_busy;
93f0822d
DB
69 u64 aperf;
70 u64 mperf;
4055fad3 71 u64 tsc;
93f0822d 72 int freq;
c4ee841f 73 ktime_t time;
93f0822d
DB
74};
75
76struct pstate_data {
77 int current_pstate;
78 int min_pstate;
79 int max_pstate;
3bcc6fa9 80 int max_pstate_physical;
b27580b0 81 int scaling;
93f0822d
DB
82 int turbo_pstate;
83};
84
007bea09 85struct vid_data {
21855ff5
DB
86 int min;
87 int max;
88 int turbo;
007bea09
DB
89 int32_t ratio;
90};
91
93f0822d
DB
92struct _pid {
93 int setpoint;
94 int32_t integral;
95 int32_t p_gain;
96 int32_t i_gain;
97 int32_t d_gain;
98 int deadband;
d253d2a5 99 int32_t last_err;
93f0822d
DB
100};
101
102struct cpudata {
103 int cpu;
104
93f0822d
DB
105 struct timer_list timer;
106
93f0822d 107 struct pstate_data pstate;
007bea09 108 struct vid_data vid;
93f0822d 109 struct _pid pid;
93f0822d 110
c4ee841f 111 ktime_t last_sample_time;
93f0822d
DB
112 u64 prev_aperf;
113 u64 prev_mperf;
4055fad3 114 u64 prev_tsc;
d37e2b76 115 struct sample sample;
93f0822d
DB
116};
117
118static struct cpudata **all_cpu_data;
119struct pstate_adjust_policy {
120 int sample_rate_ms;
121 int deadband;
122 int setpoint;
123 int p_gain_pct;
124 int d_gain_pct;
125 int i_gain_pct;
126};
127
016c8150
DB
128struct pstate_funcs {
129 int (*get_max)(void);
3bcc6fa9 130 int (*get_max_physical)(void);
016c8150
DB
131 int (*get_min)(void);
132 int (*get_turbo)(void);
b27580b0 133 int (*get_scaling)(void);
007bea09
DB
134 void (*set)(struct cpudata*, int pstate);
135 void (*get_vid)(struct cpudata *);
93f0822d
DB
136};
137
016c8150
DB
138struct cpu_defaults {
139 struct pstate_adjust_policy pid_policy;
140 struct pstate_funcs funcs;
93f0822d
DB
141};
142
016c8150
DB
143static struct pstate_adjust_policy pid_params;
144static struct pstate_funcs pstate_funcs;
2f86dc4c 145static int hwp_active;
016c8150 146
93f0822d
DB
147struct perf_limits {
148 int no_turbo;
dd5fbf70 149 int turbo_disabled;
93f0822d
DB
150 int max_perf_pct;
151 int min_perf_pct;
152 int32_t max_perf;
153 int32_t min_perf;
d8f469e9
DB
154 int max_policy_pct;
155 int max_sysfs_pct;
a0475992
KCA
156 int min_policy_pct;
157 int min_sysfs_pct;
93f0822d
DB
158};
159
51443fbf
PB
160static struct perf_limits performance_limits = {
161 .no_turbo = 0,
162 .turbo_disabled = 0,
163 .max_perf_pct = 100,
164 .max_perf = int_tofp(1),
165 .min_perf_pct = 100,
166 .min_perf = int_tofp(1),
167 .max_policy_pct = 100,
168 .max_sysfs_pct = 100,
169 .min_policy_pct = 0,
170 .min_sysfs_pct = 0,
171};
172
173static struct perf_limits powersave_limits = {
93f0822d 174 .no_turbo = 0,
4521e1a0 175 .turbo_disabled = 0,
93f0822d
DB
176 .max_perf_pct = 100,
177 .max_perf = int_tofp(1),
178 .min_perf_pct = 0,
179 .min_perf = 0,
d8f469e9
DB
180 .max_policy_pct = 100,
181 .max_sysfs_pct = 100,
a0475992
KCA
182 .min_policy_pct = 0,
183 .min_sysfs_pct = 0,
93f0822d
DB
184};
185
51443fbf
PB
186#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
187static struct perf_limits *limits = &performance_limits;
188#else
189static struct perf_limits *limits = &powersave_limits;
190#endif
191
93f0822d 192static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
c410833a 193 int deadband, int integral) {
93f0822d
DB
194 pid->setpoint = setpoint;
195 pid->deadband = deadband;
196 pid->integral = int_tofp(integral);
d98d099b 197 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
93f0822d
DB
198}
199
200static inline void pid_p_gain_set(struct _pid *pid, int percent)
201{
202 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
203}
204
205static inline void pid_i_gain_set(struct _pid *pid, int percent)
206{
207 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
208}
209
210static inline void pid_d_gain_set(struct _pid *pid, int percent)
211{
93f0822d
DB
212 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
213}
214
d253d2a5 215static signed int pid_calc(struct _pid *pid, int32_t busy)
93f0822d 216{
d253d2a5 217 signed int result;
93f0822d
DB
218 int32_t pterm, dterm, fp_error;
219 int32_t integral_limit;
220
d253d2a5 221 fp_error = int_tofp(pid->setpoint) - busy;
93f0822d 222
d253d2a5 223 if (abs(fp_error) <= int_tofp(pid->deadband))
93f0822d
DB
224 return 0;
225
226 pterm = mul_fp(pid->p_gain, fp_error);
227
228 pid->integral += fp_error;
229
e0d4c8f8
KCA
230 /*
231 * We limit the integral here so that it will never
232 * get higher than 30. This prevents it from becoming
233 * too large an input over long periods of time and allows
234 * it to get factored out sooner.
235 *
236 * The value of 30 was chosen through experimentation.
237 */
93f0822d
DB
238 integral_limit = int_tofp(30);
239 if (pid->integral > integral_limit)
240 pid->integral = integral_limit;
241 if (pid->integral < -integral_limit)
242 pid->integral = -integral_limit;
243
d253d2a5
BS
244 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
245 pid->last_err = fp_error;
93f0822d
DB
246
247 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
51d211e9 248 result = result + (1 << (FRAC_BITS-1));
93f0822d
DB
249 return (signed int)fp_toint(result);
250}
251
252static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
253{
016c8150
DB
254 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
255 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
256 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
93f0822d 257
2d8d1f18 258 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
93f0822d
DB
259}
260
93f0822d
DB
261static inline void intel_pstate_reset_all_pid(void)
262{
263 unsigned int cpu;
845c1cbe 264
93f0822d
DB
265 for_each_online_cpu(cpu) {
266 if (all_cpu_data[cpu])
267 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
268 }
269}
270
4521e1a0
GM
271static inline void update_turbo_state(void)
272{
273 u64 misc_en;
274 struct cpudata *cpu;
275
276 cpu = all_cpu_data[0];
277 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
51443fbf 278 limits->turbo_disabled =
4521e1a0
GM
279 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
280 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
281}
282
2f86dc4c
DB
283static void intel_pstate_hwp_set(void)
284{
74da56ce
KCA
285 int min, hw_min, max, hw_max, cpu, range, adj_range;
286 u64 value, cap;
287
288 rdmsrl(MSR_HWP_CAPABILITIES, cap);
289 hw_min = HWP_LOWEST_PERF(cap);
290 hw_max = HWP_HIGHEST_PERF(cap);
291 range = hw_max - hw_min;
2f86dc4c
DB
292
293 get_online_cpus();
294
295 for_each_online_cpu(cpu) {
296 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
51443fbf 297 adj_range = limits->min_perf_pct * range / 100;
74da56ce 298 min = hw_min + adj_range;
2f86dc4c
DB
299 value &= ~HWP_MIN_PERF(~0L);
300 value |= HWP_MIN_PERF(min);
301
51443fbf 302 adj_range = limits->max_perf_pct * range / 100;
74da56ce 303 max = hw_min + adj_range;
51443fbf 304 if (limits->no_turbo) {
74da56ce
KCA
305 hw_max = HWP_GUARANTEED_PERF(cap);
306 if (hw_max < max)
307 max = hw_max;
2f86dc4c
DB
308 }
309
310 value &= ~HWP_MAX_PERF(~0L);
311 value |= HWP_MAX_PERF(max);
312 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
313 }
314
315 put_online_cpus();
316}
317
93f0822d
DB
318/************************** debugfs begin ************************/
319static int pid_param_set(void *data, u64 val)
320{
321 *(u32 *)data = val;
322 intel_pstate_reset_all_pid();
323 return 0;
324}
845c1cbe 325
93f0822d
DB
326static int pid_param_get(void *data, u64 *val)
327{
328 *val = *(u32 *)data;
329 return 0;
330}
2d8d1f18 331DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
93f0822d
DB
332
333struct pid_param {
334 char *name;
335 void *value;
336};
337
338static struct pid_param pid_files[] = {
016c8150
DB
339 {"sample_rate_ms", &pid_params.sample_rate_ms},
340 {"d_gain_pct", &pid_params.d_gain_pct},
341 {"i_gain_pct", &pid_params.i_gain_pct},
342 {"deadband", &pid_params.deadband},
343 {"setpoint", &pid_params.setpoint},
344 {"p_gain_pct", &pid_params.p_gain_pct},
93f0822d
DB
345 {NULL, NULL}
346};
347
317dd50e 348static void __init intel_pstate_debug_expose_params(void)
93f0822d 349{
317dd50e 350 struct dentry *debugfs_parent;
93f0822d
DB
351 int i = 0;
352
2f86dc4c
DB
353 if (hwp_active)
354 return;
93f0822d
DB
355 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
356 if (IS_ERR_OR_NULL(debugfs_parent))
357 return;
358 while (pid_files[i].name) {
359 debugfs_create_file(pid_files[i].name, 0660,
c410833a
SK
360 debugfs_parent, pid_files[i].value,
361 &fops_pid_param);
93f0822d
DB
362 i++;
363 }
364}
365
366/************************** debugfs end ************************/
367
368/************************** sysfs begin ************************/
369#define show_one(file_name, object) \
370 static ssize_t show_##file_name \
371 (struct kobject *kobj, struct attribute *attr, char *buf) \
372 { \
51443fbf 373 return sprintf(buf, "%u\n", limits->object); \
93f0822d
DB
374 }
375
d01b1f48
KCA
376static ssize_t show_turbo_pct(struct kobject *kobj,
377 struct attribute *attr, char *buf)
378{
379 struct cpudata *cpu;
380 int total, no_turbo, turbo_pct;
381 uint32_t turbo_fp;
382
383 cpu = all_cpu_data[0];
384
385 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
386 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
387 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
388 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
389 return sprintf(buf, "%u\n", turbo_pct);
390}
391
0522424e
KCA
392static ssize_t show_num_pstates(struct kobject *kobj,
393 struct attribute *attr, char *buf)
394{
395 struct cpudata *cpu;
396 int total;
397
398 cpu = all_cpu_data[0];
399 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
400 return sprintf(buf, "%u\n", total);
401}
402
4521e1a0
GM
403static ssize_t show_no_turbo(struct kobject *kobj,
404 struct attribute *attr, char *buf)
405{
406 ssize_t ret;
407
408 update_turbo_state();
51443fbf
PB
409 if (limits->turbo_disabled)
410 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
4521e1a0 411 else
51443fbf 412 ret = sprintf(buf, "%u\n", limits->no_turbo);
4521e1a0
GM
413
414 return ret;
415}
416
93f0822d 417static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
c410833a 418 const char *buf, size_t count)
93f0822d
DB
419{
420 unsigned int input;
421 int ret;
845c1cbe 422
93f0822d
DB
423 ret = sscanf(buf, "%u", &input);
424 if (ret != 1)
425 return -EINVAL;
4521e1a0
GM
426
427 update_turbo_state();
51443fbf 428 if (limits->turbo_disabled) {
f16255eb 429 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
4521e1a0 430 return -EPERM;
dd5fbf70 431 }
2f86dc4c 432
51443fbf 433 limits->no_turbo = clamp_t(int, input, 0, 1);
4521e1a0 434
2f86dc4c
DB
435 if (hwp_active)
436 intel_pstate_hwp_set();
437
93f0822d
DB
438 return count;
439}
440
441static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
c410833a 442 const char *buf, size_t count)
93f0822d
DB
443{
444 unsigned int input;
445 int ret;
845c1cbe 446
93f0822d
DB
447 ret = sscanf(buf, "%u", &input);
448 if (ret != 1)
449 return -EINVAL;
450
51443fbf
PB
451 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
452 limits->max_perf_pct = min(limits->max_policy_pct,
453 limits->max_sysfs_pct);
454 limits->max_perf_pct = max(limits->min_policy_pct,
455 limits->max_perf_pct);
456 limits->max_perf_pct = max(limits->min_perf_pct,
457 limits->max_perf_pct);
458 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
459 int_tofp(100));
845c1cbe 460
2f86dc4c
DB
461 if (hwp_active)
462 intel_pstate_hwp_set();
93f0822d
DB
463 return count;
464}
465
466static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
c410833a 467 const char *buf, size_t count)
93f0822d
DB
468{
469 unsigned int input;
470 int ret;
845c1cbe 471
93f0822d
DB
472 ret = sscanf(buf, "%u", &input);
473 if (ret != 1)
474 return -EINVAL;
a0475992 475
51443fbf
PB
476 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
477 limits->min_perf_pct = max(limits->min_policy_pct,
478 limits->min_sysfs_pct);
479 limits->min_perf_pct = min(limits->max_policy_pct,
480 limits->min_perf_pct);
481 limits->min_perf_pct = min(limits->max_perf_pct,
482 limits->min_perf_pct);
483 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
484 int_tofp(100));
93f0822d 485
2f86dc4c
DB
486 if (hwp_active)
487 intel_pstate_hwp_set();
93f0822d
DB
488 return count;
489}
490
93f0822d
DB
491show_one(max_perf_pct, max_perf_pct);
492show_one(min_perf_pct, min_perf_pct);
493
494define_one_global_rw(no_turbo);
495define_one_global_rw(max_perf_pct);
496define_one_global_rw(min_perf_pct);
d01b1f48 497define_one_global_ro(turbo_pct);
0522424e 498define_one_global_ro(num_pstates);
93f0822d
DB
499
500static struct attribute *intel_pstate_attributes[] = {
501 &no_turbo.attr,
502 &max_perf_pct.attr,
503 &min_perf_pct.attr,
d01b1f48 504 &turbo_pct.attr,
0522424e 505 &num_pstates.attr,
93f0822d
DB
506 NULL
507};
508
509static struct attribute_group intel_pstate_attr_group = {
510 .attrs = intel_pstate_attributes,
511};
93f0822d 512
317dd50e 513static void __init intel_pstate_sysfs_expose_params(void)
93f0822d 514{
317dd50e 515 struct kobject *intel_pstate_kobject;
93f0822d
DB
516 int rc;
517
518 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
519 &cpu_subsys.dev_root->kobj);
520 BUG_ON(!intel_pstate_kobject);
2d8d1f18 521 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
93f0822d
DB
522 BUG_ON(rc);
523}
93f0822d 524/************************** sysfs end ************************/
2f86dc4c 525
ba88d433 526static void intel_pstate_hwp_enable(struct cpudata *cpudata)
2f86dc4c 527{
ba88d433 528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
2f86dc4c
DB
529}
530
938d21a2 531static int atom_get_min_pstate(void)
19e77c28
DB
532{
533 u64 value;
845c1cbe 534
938d21a2 535 rdmsrl(ATOM_RATIOS, value);
c16ed060 536 return (value >> 8) & 0x7F;
19e77c28
DB
537}
538
938d21a2 539static int atom_get_max_pstate(void)
19e77c28
DB
540{
541 u64 value;
845c1cbe 542
938d21a2 543 rdmsrl(ATOM_RATIOS, value);
c16ed060 544 return (value >> 16) & 0x7F;
19e77c28 545}
93f0822d 546
938d21a2 547static int atom_get_turbo_pstate(void)
61d8d2ab
DB
548{
549 u64 value;
845c1cbe 550
938d21a2 551 rdmsrl(ATOM_TURBO_RATIOS, value);
c16ed060 552 return value & 0x7F;
61d8d2ab
DB
553}
554
938d21a2 555static void atom_set_pstate(struct cpudata *cpudata, int pstate)
007bea09
DB
556{
557 u64 val;
558 int32_t vid_fp;
559 u32 vid;
560
144c8e17 561 val = (u64)pstate << 8;
51443fbf 562 if (limits->no_turbo && !limits->turbo_disabled)
007bea09
DB
563 val |= (u64)1 << 32;
564
565 vid_fp = cpudata->vid.min + mul_fp(
566 int_tofp(pstate - cpudata->pstate.min_pstate),
567 cpudata->vid.ratio);
568
569 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
d022a65e 570 vid = ceiling_fp(vid_fp);
007bea09 571
21855ff5
DB
572 if (pstate > cpudata->pstate.max_pstate)
573 vid = cpudata->vid.turbo;
574
007bea09
DB
575 val |= vid;
576
0dd23f94 577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
007bea09
DB
578}
579
1421df63 580static int silvermont_get_scaling(void)
b27580b0
DB
581{
582 u64 value;
583 int i;
1421df63
PL
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
b27580b0
DB
587
588 rdmsrl(MSR_FSB_FREQ, value);
1421df63
PL
589 i = value & 0x7;
590 WARN_ON(i > 4);
b27580b0 591
1421df63
PL
592 return silvermont_freq_table[i];
593}
b27580b0 594
1421df63
PL
595static int airmont_get_scaling(void)
596{
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
b27580b0
DB
609}
610
938d21a2 611static void atom_get_vid(struct cpudata *cpudata)
007bea09
DB
612{
613 u64 value;
614
938d21a2 615 rdmsrl(ATOM_VIDS, value);
c16ed060
DB
616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
007bea09
DB
618 cpudata->vid.ratio = div_fp(
619 cpudata->vid.max - cpudata->vid.min,
620 int_tofp(cpudata->pstate.max_pstate -
621 cpudata->pstate.min_pstate));
21855ff5 622
938d21a2 623 rdmsrl(ATOM_TURBO_VIDS, value);
21855ff5 624 cpudata->vid.turbo = value & 0x7f;
007bea09
DB
625}
626
016c8150 627static int core_get_min_pstate(void)
93f0822d
DB
628{
629 u64 value;
845c1cbe 630
05e99c8c 631 rdmsrl(MSR_PLATFORM_INFO, value);
93f0822d
DB
632 return (value >> 40) & 0xFF;
633}
634
3bcc6fa9 635static int core_get_max_pstate_physical(void)
93f0822d
DB
636{
637 u64 value;
845c1cbe 638
05e99c8c 639 rdmsrl(MSR_PLATFORM_INFO, value);
93f0822d
DB
640 return (value >> 8) & 0xFF;
641}
642
016c8150 643static int core_get_max_pstate(void)
93f0822d 644{
6a35fc2d
SP
645 u64 tar;
646 u64 plat_info;
647 int max_pstate;
648 int err;
649
650 rdmsrl(MSR_PLATFORM_INFO, plat_info);
651 max_pstate = (plat_info >> 8) & 0xFF;
652
653 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
654 if (!err) {
655 /* Do some sanity checking for safety */
656 if (plat_info & 0x600000000) {
657 u64 tdp_ctrl;
658 u64 tdp_ratio;
659 int tdp_msr;
660
661 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
662 if (err)
663 goto skip_tar;
664
665 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
666 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
667 if (err)
668 goto skip_tar;
669
670 if (tdp_ratio - 1 == tar) {
671 max_pstate = tar;
672 pr_debug("max_pstate=TAC %x\n", max_pstate);
673 } else {
674 goto skip_tar;
675 }
676 }
677 }
845c1cbe 678
6a35fc2d
SP
679skip_tar:
680 return max_pstate;
93f0822d
DB
681}
682
016c8150 683static int core_get_turbo_pstate(void)
93f0822d
DB
684{
685 u64 value;
686 int nont, ret;
845c1cbe 687
05e99c8c 688 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
016c8150 689 nont = core_get_max_pstate();
285cb990 690 ret = (value) & 255;
93f0822d
DB
691 if (ret <= nont)
692 ret = nont;
693 return ret;
694}
695
b27580b0
DB
696static inline int core_get_scaling(void)
697{
698 return 100000;
699}
700
007bea09 701static void core_set_pstate(struct cpudata *cpudata, int pstate)
016c8150
DB
702{
703 u64 val;
704
144c8e17 705 val = (u64)pstate << 8;
51443fbf 706 if (limits->no_turbo && !limits->turbo_disabled)
016c8150
DB
707 val |= (u64)1 << 32;
708
bb18008f 709 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
016c8150
DB
710}
711
b34ef932
DC
712static int knl_get_turbo_pstate(void)
713{
714 u64 value;
715 int nont, ret;
716
717 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
718 nont = core_get_max_pstate();
719 ret = (((value) >> 8) & 0xFF);
720 if (ret <= nont)
721 ret = nont;
722 return ret;
723}
724
016c8150
DB
725static struct cpu_defaults core_params = {
726 .pid_policy = {
727 .sample_rate_ms = 10,
728 .deadband = 0,
729 .setpoint = 97,
730 .p_gain_pct = 20,
731 .d_gain_pct = 0,
732 .i_gain_pct = 0,
733 },
734 .funcs = {
735 .get_max = core_get_max_pstate,
3bcc6fa9 736 .get_max_physical = core_get_max_pstate_physical,
016c8150
DB
737 .get_min = core_get_min_pstate,
738 .get_turbo = core_get_turbo_pstate,
b27580b0 739 .get_scaling = core_get_scaling,
016c8150
DB
740 .set = core_set_pstate,
741 },
742};
743
1421df63
PL
744static struct cpu_defaults silvermont_params = {
745 .pid_policy = {
746 .sample_rate_ms = 10,
747 .deadband = 0,
748 .setpoint = 60,
749 .p_gain_pct = 14,
750 .d_gain_pct = 0,
751 .i_gain_pct = 4,
752 },
753 .funcs = {
754 .get_max = atom_get_max_pstate,
755 .get_max_physical = atom_get_max_pstate,
756 .get_min = atom_get_min_pstate,
757 .get_turbo = atom_get_turbo_pstate,
758 .set = atom_set_pstate,
759 .get_scaling = silvermont_get_scaling,
760 .get_vid = atom_get_vid,
761 },
762};
763
764static struct cpu_defaults airmont_params = {
19e77c28
DB
765 .pid_policy = {
766 .sample_rate_ms = 10,
767 .deadband = 0,
6a82ba6d 768 .setpoint = 60,
19e77c28
DB
769 .p_gain_pct = 14,
770 .d_gain_pct = 0,
771 .i_gain_pct = 4,
772 },
773 .funcs = {
938d21a2
PL
774 .get_max = atom_get_max_pstate,
775 .get_max_physical = atom_get_max_pstate,
776 .get_min = atom_get_min_pstate,
777 .get_turbo = atom_get_turbo_pstate,
778 .set = atom_set_pstate,
1421df63 779 .get_scaling = airmont_get_scaling,
938d21a2 780 .get_vid = atom_get_vid,
19e77c28
DB
781 },
782};
783
b34ef932
DC
784static struct cpu_defaults knl_params = {
785 .pid_policy = {
786 .sample_rate_ms = 10,
787 .deadband = 0,
788 .setpoint = 97,
789 .p_gain_pct = 20,
790 .d_gain_pct = 0,
791 .i_gain_pct = 0,
792 },
793 .funcs = {
794 .get_max = core_get_max_pstate,
3bcc6fa9 795 .get_max_physical = core_get_max_pstate_physical,
b34ef932
DC
796 .get_min = core_get_min_pstate,
797 .get_turbo = knl_get_turbo_pstate,
69cefc27 798 .get_scaling = core_get_scaling,
b34ef932
DC
799 .set = core_set_pstate,
800 },
801};
802
93f0822d
DB
803static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
804{
805 int max_perf = cpu->pstate.turbo_pstate;
7244cb62 806 int max_perf_adj;
93f0822d 807 int min_perf;
845c1cbe 808
51443fbf 809 if (limits->no_turbo || limits->turbo_disabled)
93f0822d
DB
810 max_perf = cpu->pstate.max_pstate;
811
e0d4c8f8
KCA
812 /*
813 * performance can be limited by user through sysfs, by cpufreq
814 * policy, or by cpu specific default values determined through
815 * experimentation.
816 */
799281a3
RW
817 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
818 *max = clamp_t(int, max_perf_adj,
819 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
93f0822d 820
799281a3
RW
821 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
822 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
93f0822d
DB
823}
824
6c1e4591 825static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
93f0822d
DB
826{
827 int max_perf, min_perf;
828
6c1e4591
DS
829 if (force) {
830 update_turbo_state();
93f0822d 831
6c1e4591 832 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
93f0822d 833
6c1e4591 834 pstate = clamp_t(int, pstate, min_perf, max_perf);
93f0822d 835
6c1e4591
DS
836 if (pstate == cpu->pstate.current_pstate)
837 return;
838 }
b27580b0 839 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
35363e94 840
93f0822d 841 cpu->pstate.current_pstate = pstate;
93f0822d 842
007bea09 843 pstate_funcs.set(cpu, pstate);
93f0822d
DB
844}
845
93f0822d
DB
846static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
847{
016c8150
DB
848 cpu->pstate.min_pstate = pstate_funcs.get_min();
849 cpu->pstate.max_pstate = pstate_funcs.get_max();
3bcc6fa9 850 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
016c8150 851 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
b27580b0 852 cpu->pstate.scaling = pstate_funcs.get_scaling();
93f0822d 853
007bea09
DB
854 if (pstate_funcs.get_vid)
855 pstate_funcs.get_vid(cpu);
6c1e4591 856 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
93f0822d
DB
857}
858
6b17ddb2 859static inline void intel_pstate_calc_busy(struct cpudata *cpu)
93f0822d 860{
6b17ddb2 861 struct sample *sample = &cpu->sample;
bf810222 862 int64_t core_pct;
93f0822d 863
bf810222 864 core_pct = int_tofp(sample->aperf) * int_tofp(100);
78e27086 865 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
e66c1768 866
fcb6a15c 867 sample->freq = fp_toint(
b27580b0 868 mul_fp(int_tofp(
3bcc6fa9
SP
869 cpu->pstate.max_pstate_physical *
870 cpu->pstate.scaling / 100),
b27580b0 871 core_pct));
fcb6a15c 872
bf810222 873 sample->core_pct_busy = (int32_t)core_pct;
93f0822d
DB
874}
875
876static inline void intel_pstate_sample(struct cpudata *cpu)
877{
93f0822d 878 u64 aperf, mperf;
4ab60c3f 879 unsigned long flags;
4055fad3 880 u64 tsc;
93f0822d 881
4ab60c3f 882 local_irq_save(flags);
93f0822d
DB
883 rdmsrl(MSR_IA32_APERF, aperf);
884 rdmsrl(MSR_IA32_MPERF, mperf);
8e601a9f
SP
885 if (cpu->prev_mperf == mperf) {
886 local_irq_restore(flags);
887 return;
888 }
889
4ea1636b 890 tsc = rdtsc();
4ab60c3f 891 local_irq_restore(flags);
b69880f9 892
c4ee841f
DB
893 cpu->last_sample_time = cpu->sample.time;
894 cpu->sample.time = ktime_get();
d37e2b76
DB
895 cpu->sample.aperf = aperf;
896 cpu->sample.mperf = mperf;
4055fad3 897 cpu->sample.tsc = tsc;
d37e2b76
DB
898 cpu->sample.aperf -= cpu->prev_aperf;
899 cpu->sample.mperf -= cpu->prev_mperf;
4055fad3 900 cpu->sample.tsc -= cpu->prev_tsc;
1abc4b20 901
6b17ddb2 902 intel_pstate_calc_busy(cpu);
93f0822d 903
93f0822d
DB
904 cpu->prev_aperf = aperf;
905 cpu->prev_mperf = mperf;
4055fad3 906 cpu->prev_tsc = tsc;
93f0822d
DB
907}
908
2f86dc4c
DB
909static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
910{
911 int delay;
912
913 delay = msecs_to_jiffies(50);
914 mod_timer_pinned(&cpu->timer, jiffies + delay);
915}
916
93f0822d
DB
917static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
918{
abf013bf 919 int delay;
93f0822d 920
abf013bf 921 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
93f0822d
DB
922 mod_timer_pinned(&cpu->timer, jiffies + delay);
923}
924
d253d2a5 925static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
93f0822d 926{
c4ee841f 927 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
7180dddf 928 s64 duration_us;
c4ee841f 929 u32 sample_time;
93f0822d 930
e0d4c8f8
KCA
931 /*
932 * core_busy is the ratio of actual performance to max
933 * max_pstate is the max non turbo pstate available
934 * current_pstate was the pstate that was requested during
935 * the last sample period.
936 *
937 * We normalize core_busy, which was our actual percent
938 * performance to what we requested during the last sample
939 * period. The result will be a percentage of busy at a
940 * specified pstate.
941 */
d37e2b76 942 core_busy = cpu->sample.core_pct_busy;
3bcc6fa9 943 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
93f0822d 944 current_pstate = int_tofp(cpu->pstate.current_pstate);
e66c1768 945 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
c4ee841f 946
e0d4c8f8
KCA
947 /*
948 * Since we have a deferred timer, it will not fire unless
949 * we are in C0. So, determine if the actual elapsed time
950 * is significantly greater (3x) than our sample interval. If it
951 * is, then we were idle for a long enough period of time
952 * to adjust our busyness.
953 */
285cb990 954 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
7180dddf
PB
955 duration_us = ktime_us_delta(cpu->sample.time,
956 cpu->last_sample_time);
c4ee841f
DB
957 if (duration_us > sample_time * 3) {
958 sample_ratio = div_fp(int_tofp(sample_time),
c410833a 959 int_tofp(duration_us));
c4ee841f
DB
960 core_busy = mul_fp(core_busy, sample_ratio);
961 }
962
f0fe3cd7 963 return core_busy;
93f0822d
DB
964}
965
966static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
967{
d253d2a5 968 int32_t busy_scaled;
93f0822d 969 struct _pid *pid;
4b707c89 970 signed int ctl;
4055fad3
DS
971 int from;
972 struct sample *sample;
973
974 from = cpu->pstate.current_pstate;
93f0822d
DB
975
976 pid = &cpu->pid;
977 busy_scaled = intel_pstate_get_scaled_busy(cpu);
978
979 ctl = pid_calc(pid, busy_scaled);
980
4b707c89 981 /* Negative values of ctl increase the pstate and vice versa */
6c1e4591 982 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
4055fad3
DS
983
984 sample = &cpu->sample;
985 trace_pstate_sample(fp_toint(sample->core_pct_busy),
986 fp_toint(busy_scaled),
987 from,
988 cpu->pstate.current_pstate,
989 sample->mperf,
990 sample->aperf,
991 sample->tsc,
992 sample->freq);
93f0822d
DB
993}
994
2f86dc4c
DB
995static void intel_hwp_timer_func(unsigned long __data)
996{
997 struct cpudata *cpu = (struct cpudata *) __data;
998
999 intel_pstate_sample(cpu);
1000 intel_hwp_set_sample_time(cpu);
1001}
1002
93f0822d
DB
1003static void intel_pstate_timer_func(unsigned long __data)
1004{
1005 struct cpudata *cpu = (struct cpudata *) __data;
1006
1007 intel_pstate_sample(cpu);
b69880f9 1008
ca182aee 1009 intel_pstate_adjust_busy_pstate(cpu);
b69880f9 1010
93f0822d
DB
1011 intel_pstate_set_sample_time(cpu);
1012}
1013
1014#define ICPU(model, policy) \
6cbd7ee1
DB
1015 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1016 (unsigned long)&policy }
93f0822d
DB
1017
1018static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
016c8150
DB
1019 ICPU(0x2a, core_params),
1020 ICPU(0x2d, core_params),
1421df63 1021 ICPU(0x37, silvermont_params),
016c8150
DB
1022 ICPU(0x3a, core_params),
1023 ICPU(0x3c, core_params),
c7e241df 1024 ICPU(0x3d, core_params),
016c8150
DB
1025 ICPU(0x3e, core_params),
1026 ICPU(0x3f, core_params),
1027 ICPU(0x45, core_params),
1028 ICPU(0x46, core_params),
43f8a966 1029 ICPU(0x47, core_params),
1421df63 1030 ICPU(0x4c, airmont_params),
7ab0256e 1031 ICPU(0x4e, core_params),
c7e241df 1032 ICPU(0x4f, core_params),
1c939123 1033 ICPU(0x5e, core_params),
c7e241df 1034 ICPU(0x56, core_params),
b34ef932 1035 ICPU(0x57, knl_params),
93f0822d
DB
1036 {}
1037};
1038MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1039
2f86dc4c
DB
1040static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1041 ICPU(0x56, core_params),
1042 {}
1043};
1044
93f0822d
DB
1045static int intel_pstate_init_cpu(unsigned int cpunum)
1046{
93f0822d
DB
1047 struct cpudata *cpu;
1048
c0348717
DB
1049 if (!all_cpu_data[cpunum])
1050 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1051 GFP_KERNEL);
93f0822d
DB
1052 if (!all_cpu_data[cpunum])
1053 return -ENOMEM;
1054
1055 cpu = all_cpu_data[cpunum];
1056
93f0822d 1057 cpu->cpu = cpunum;
ba88d433
KCA
1058
1059 if (hwp_active)
1060 intel_pstate_hwp_enable(cpu);
1061
179e8471 1062 intel_pstate_get_cpu_pstates(cpu);
016c8150 1063
93f0822d 1064 init_timer_deferrable(&cpu->timer);
2d8d1f18 1065 cpu->timer.data = (unsigned long)cpu;
93f0822d 1066 cpu->timer.expires = jiffies + HZ/100;
2f86dc4c
DB
1067
1068 if (!hwp_active)
1069 cpu->timer.function = intel_pstate_timer_func;
1070 else
1071 cpu->timer.function = intel_hwp_timer_func;
1072
93f0822d 1073 intel_pstate_busy_pid_reset(cpu);
93f0822d 1074 intel_pstate_sample(cpu);
93f0822d
DB
1075
1076 add_timer_on(&cpu->timer, cpunum);
1077
f16255eb 1078 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
93f0822d
DB
1079
1080 return 0;
1081}
1082
1083static unsigned int intel_pstate_get(unsigned int cpu_num)
1084{
1085 struct sample *sample;
1086 struct cpudata *cpu;
1087
1088 cpu = all_cpu_data[cpu_num];
1089 if (!cpu)
1090 return 0;
d37e2b76 1091 sample = &cpu->sample;
93f0822d
DB
1092 return sample->freq;
1093}
1094
1095static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1096{
d3929b83
DB
1097 if (!policy->cpuinfo.max_freq)
1098 return -ENODEV;
1099
630ec286
SP
1100 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1101 policy->max >= policy->cpuinfo.max_freq) {
51443fbf
PB
1102 pr_debug("intel_pstate: set performance\n");
1103 limits = &performance_limits;
584ee3dc
AY
1104 if (hwp_active)
1105 intel_pstate_hwp_set();
d1b68485 1106 return 0;
93f0822d 1107 }
2f86dc4c 1108
51443fbf
PB
1109 pr_debug("intel_pstate: set powersave\n");
1110 limits = &powersave_limits;
1111 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1112 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
8478f539
PB
1113 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1114 policy->cpuinfo.max_freq);
51443fbf 1115 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
43717aad
CY
1116
1117 /* Normalize user input to [min_policy_pct, max_policy_pct] */
51443fbf
PB
1118 limits->min_perf_pct = max(limits->min_policy_pct,
1119 limits->min_sysfs_pct);
1120 limits->min_perf_pct = min(limits->max_policy_pct,
1121 limits->min_perf_pct);
1122 limits->max_perf_pct = min(limits->max_policy_pct,
1123 limits->max_sysfs_pct);
1124 limits->max_perf_pct = max(limits->min_policy_pct,
1125 limits->max_perf_pct);
785ee278 1126 limits->max_perf = round_up(limits->max_perf, 8);
43717aad
CY
1127
1128 /* Make sure min_perf_pct <= max_perf_pct */
51443fbf 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
43717aad 1130
51443fbf
PB
1131 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1132 int_tofp(100));
1133 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1134 int_tofp(100));
93f0822d 1135
2f86dc4c
DB
1136 if (hwp_active)
1137 intel_pstate_hwp_set();
1138
93f0822d
DB
1139 return 0;
1140}
1141
1142static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1143{
be49e346 1144 cpufreq_verify_within_cpu_limits(policy);
93f0822d 1145
285cb990 1146 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
c410833a 1147 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
93f0822d
DB
1148 return -EINVAL;
1149
1150 return 0;
1151}
1152
bb18008f 1153static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
93f0822d 1154{
bb18008f
DB
1155 int cpu_num = policy->cpu;
1156 struct cpudata *cpu = all_cpu_data[cpu_num];
93f0822d 1157
f16255eb 1158 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
bb18008f 1159
c2294a2f 1160 del_timer_sync(&all_cpu_data[cpu_num]->timer);
2f86dc4c
DB
1161 if (hwp_active)
1162 return;
1163
6c1e4591 1164 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
93f0822d
DB
1165}
1166
2760984f 1167static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
93f0822d 1168{
93f0822d 1169 struct cpudata *cpu;
52e0a509 1170 int rc;
93f0822d
DB
1171
1172 rc = intel_pstate_init_cpu(policy->cpu);
1173 if (rc)
1174 return rc;
1175
1176 cpu = all_cpu_data[policy->cpu];
1177
51443fbf 1178 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
93f0822d
DB
1179 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1180 else
1181 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1182
b27580b0
DB
1183 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1184 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
93f0822d
DB
1185
1186 /* cpuinfo and default policy values */
b27580b0
DB
1187 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1188 policy->cpuinfo.max_freq =
1189 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
93f0822d
DB
1190 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1191 cpumask_set_cpu(policy->cpu, policy->cpus);
1192
1193 return 0;
1194}
1195
1196static struct cpufreq_driver intel_pstate_driver = {
1197 .flags = CPUFREQ_CONST_LOOPS,
1198 .verify = intel_pstate_verify_policy,
1199 .setpolicy = intel_pstate_set_policy,
1200 .get = intel_pstate_get,
1201 .init = intel_pstate_cpu_init,
bb18008f 1202 .stop_cpu = intel_pstate_stop_cpu,
93f0822d 1203 .name = "intel_pstate",
93f0822d
DB
1204};
1205
6be26498 1206static int __initdata no_load;
2f86dc4c 1207static int __initdata no_hwp;
d64c3b0b 1208static int __initdata hwp_only;
aa4ea34d 1209static unsigned int force_load;
6be26498 1210
b563b4e3
DB
1211static int intel_pstate_msrs_not_valid(void)
1212{
016c8150 1213 if (!pstate_funcs.get_max() ||
c410833a
SK
1214 !pstate_funcs.get_min() ||
1215 !pstate_funcs.get_turbo())
b563b4e3
DB
1216 return -ENODEV;
1217
b563b4e3
DB
1218 return 0;
1219}
016c8150 1220
e0a261a2 1221static void copy_pid_params(struct pstate_adjust_policy *policy)
016c8150
DB
1222{
1223 pid_params.sample_rate_ms = policy->sample_rate_ms;
1224 pid_params.p_gain_pct = policy->p_gain_pct;
1225 pid_params.i_gain_pct = policy->i_gain_pct;
1226 pid_params.d_gain_pct = policy->d_gain_pct;
1227 pid_params.deadband = policy->deadband;
1228 pid_params.setpoint = policy->setpoint;
1229}
1230
e0a261a2 1231static void copy_cpu_funcs(struct pstate_funcs *funcs)
016c8150
DB
1232{
1233 pstate_funcs.get_max = funcs->get_max;
3bcc6fa9 1234 pstate_funcs.get_max_physical = funcs->get_max_physical;
016c8150
DB
1235 pstate_funcs.get_min = funcs->get_min;
1236 pstate_funcs.get_turbo = funcs->get_turbo;
b27580b0 1237 pstate_funcs.get_scaling = funcs->get_scaling;
016c8150 1238 pstate_funcs.set = funcs->set;
007bea09 1239 pstate_funcs.get_vid = funcs->get_vid;
016c8150
DB
1240}
1241
fbbcdc07 1242#if IS_ENABLED(CONFIG_ACPI)
6ee11e41 1243#include <acpi/processor.h>
fbbcdc07
AH
1244
1245static bool intel_pstate_no_acpi_pss(void)
1246{
1247 int i;
1248
1249 for_each_possible_cpu(i) {
1250 acpi_status status;
1251 union acpi_object *pss;
1252 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1253 struct acpi_processor *pr = per_cpu(processors, i);
1254
1255 if (!pr)
1256 continue;
1257
1258 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1259 if (ACPI_FAILURE(status))
1260 continue;
1261
1262 pss = buffer.pointer;
1263 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1264 kfree(pss);
1265 return false;
1266 }
1267
1268 kfree(pss);
1269 }
1270
1271 return true;
1272}
1273
966916ea 1274static bool intel_pstate_has_acpi_ppc(void)
1275{
1276 int i;
1277
1278 for_each_possible_cpu(i) {
1279 struct acpi_processor *pr = per_cpu(processors, i);
1280
1281 if (!pr)
1282 continue;
1283 if (acpi_has_method(pr->handle, "_PPC"))
1284 return true;
1285 }
1286 return false;
1287}
1288
1289enum {
1290 PSS,
1291 PPC,
1292};
1293
fbbcdc07
AH
1294struct hw_vendor_info {
1295 u16 valid;
1296 char oem_id[ACPI_OEM_ID_SIZE];
1297 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
966916ea 1298 int oem_pwr_table;
fbbcdc07
AH
1299};
1300
1301/* Hardware vendor-specific info that has its own power management modes */
1302static struct hw_vendor_info vendor_info[] = {
966916ea 1303 {1, "HP ", "ProLiant", PSS},
1304 {1, "ORACLE", "X4-2 ", PPC},
1305 {1, "ORACLE", "X4-2L ", PPC},
1306 {1, "ORACLE", "X4-2B ", PPC},
1307 {1, "ORACLE", "X3-2 ", PPC},
1308 {1, "ORACLE", "X3-2L ", PPC},
1309 {1, "ORACLE", "X3-2B ", PPC},
1310 {1, "ORACLE", "X4470M2 ", PPC},
1311 {1, "ORACLE", "X4270M3 ", PPC},
1312 {1, "ORACLE", "X4270M2 ", PPC},
1313 {1, "ORACLE", "X4170M2 ", PPC},
5aecc3c8
EZ
1314 {1, "ORACLE", "X4170 M3", PPC},
1315 {1, "ORACLE", "X4275 M3", PPC},
1316 {1, "ORACLE", "X6-2 ", PPC},
1317 {1, "ORACLE", "Sudbury ", PPC},
fbbcdc07
AH
1318 {0, "", ""},
1319};
1320
1321static bool intel_pstate_platform_pwr_mgmt_exists(void)
1322{
1323 struct acpi_table_header hdr;
1324 struct hw_vendor_info *v_info;
2f86dc4c
DB
1325 const struct x86_cpu_id *id;
1326 u64 misc_pwr;
1327
1328 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1329 if (id) {
1330 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1331 if ( misc_pwr & (1 << 8))
1332 return true;
1333 }
fbbcdc07 1334
c410833a
SK
1335 if (acpi_disabled ||
1336 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
fbbcdc07
AH
1337 return false;
1338
1339 for (v_info = vendor_info; v_info->valid; v_info++) {
c410833a 1340 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
966916ea 1341 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1342 ACPI_OEM_TABLE_ID_SIZE))
1343 switch (v_info->oem_pwr_table) {
1344 case PSS:
1345 return intel_pstate_no_acpi_pss();
1346 case PPC:
aa4ea34d
EZ
1347 return intel_pstate_has_acpi_ppc() &&
1348 (!force_load);
966916ea 1349 }
fbbcdc07
AH
1350 }
1351
1352 return false;
1353}
1354#else /* CONFIG_ACPI not enabled */
1355static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
966916ea 1356static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
fbbcdc07
AH
1357#endif /* CONFIG_ACPI */
1358
93f0822d
DB
1359static int __init intel_pstate_init(void)
1360{
907cc908 1361 int cpu, rc = 0;
93f0822d 1362 const struct x86_cpu_id *id;
64df1fdf 1363 struct cpu_defaults *cpu_def;
93f0822d 1364
6be26498
DB
1365 if (no_load)
1366 return -ENODEV;
1367
93f0822d
DB
1368 id = x86_match_cpu(intel_pstate_cpu_ids);
1369 if (!id)
1370 return -ENODEV;
1371
fbbcdc07
AH
1372 /*
1373 * The Intel pstate driver will be ignored if the platform
1374 * firmware has its own power management modes.
1375 */
1376 if (intel_pstate_platform_pwr_mgmt_exists())
1377 return -ENODEV;
1378
64df1fdf 1379 cpu_def = (struct cpu_defaults *)id->driver_data;
016c8150 1380
64df1fdf
BP
1381 copy_pid_params(&cpu_def->pid_policy);
1382 copy_cpu_funcs(&cpu_def->funcs);
016c8150 1383
b563b4e3
DB
1384 if (intel_pstate_msrs_not_valid())
1385 return -ENODEV;
1386
93f0822d
DB
1387 pr_info("Intel P-state driver initializing.\n");
1388
b57ffac5 1389 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
93f0822d
DB
1390 if (!all_cpu_data)
1391 return -ENOMEM;
93f0822d 1392
539342f6
PB
1393 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
1394 pr_info("intel_pstate: HWP enabled\n");
ba88d433 1395 hwp_active++;
539342f6 1396 }
2f86dc4c 1397
d64c3b0b
KCA
1398 if (!hwp_active && hwp_only)
1399 goto out;
1400
93f0822d
DB
1401 rc = cpufreq_register_driver(&intel_pstate_driver);
1402 if (rc)
1403 goto out;
1404
1405 intel_pstate_debug_expose_params();
1406 intel_pstate_sysfs_expose_params();
b69880f9 1407
93f0822d
DB
1408 return rc;
1409out:
907cc908
DB
1410 get_online_cpus();
1411 for_each_online_cpu(cpu) {
1412 if (all_cpu_data[cpu]) {
1413 del_timer_sync(&all_cpu_data[cpu]->timer);
1414 kfree(all_cpu_data[cpu]);
1415 }
1416 }
1417
1418 put_online_cpus();
1419 vfree(all_cpu_data);
93f0822d
DB
1420 return -ENODEV;
1421}
1422device_initcall(intel_pstate_init);
1423
6be26498
DB
1424static int __init intel_pstate_setup(char *str)
1425{
1426 if (!str)
1427 return -EINVAL;
1428
1429 if (!strcmp(str, "disable"))
1430 no_load = 1;
539342f6
PB
1431 if (!strcmp(str, "no_hwp")) {
1432 pr_info("intel_pstate: HWP disabled\n");
2f86dc4c 1433 no_hwp = 1;
539342f6 1434 }
aa4ea34d
EZ
1435 if (!strcmp(str, "force"))
1436 force_load = 1;
d64c3b0b
KCA
1437 if (!strcmp(str, "hwp_only"))
1438 hwp_only = 1;
6be26498
DB
1439 return 0;
1440}
1441early_param("intel_pstate", intel_pstate_setup);
1442
93f0822d
DB
1443MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1444MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1445MODULE_LICENSE("GPL");