2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
32 #include <asm/div64.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
37 #define ATOM_RATIOS 0x66a
38 #define ATOM_VIDS 0x66b
39 #define ATOM_TURBO_RATIOS 0x66c
40 #define ATOM_TURBO_VIDS 0x66d
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
46 static inline int32_t mul_fp(int32_t x
, int32_t y
)
48 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
51 static inline int32_t div_fp(s64 x
, s64 y
)
53 return div64_s64((int64_t)x
<< FRAC_BITS
, y
);
56 static inline int ceiling_fp(int32_t x
)
61 mask
= (1 << FRAC_BITS
) - 1;
68 int32_t core_pct_busy
;
81 int max_pstate_physical
;
106 struct update_util_data update_util
;
108 struct pstate_data pstate
;
112 u64 last_sample_time
;
116 u64 prev_cummulative_iowait
;
117 struct sample sample
;
120 static struct cpudata
**all_cpu_data
;
121 struct pstate_adjust_policy
{
131 struct pstate_funcs
{
132 int (*get_max
)(void);
133 int (*get_max_physical
)(void);
134 int (*get_min
)(void);
135 int (*get_turbo
)(void);
136 int (*get_scaling
)(void);
137 void (*set
)(struct cpudata
*, int pstate
);
138 void (*get_vid
)(struct cpudata
*);
139 int32_t (*get_target_pstate
)(struct cpudata
*);
142 struct cpu_defaults
{
143 struct pstate_adjust_policy pid_policy
;
144 struct pstate_funcs funcs
;
147 static inline int32_t get_target_pstate_use_performance(struct cpudata
*cpu
);
148 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata
*cpu
);
150 static struct pstate_adjust_policy pid_params
;
151 static struct pstate_funcs pstate_funcs
;
152 static int hwp_active
;
167 static struct perf_limits performance_limits
= {
171 .max_perf
= int_tofp(1),
173 .min_perf
= int_tofp(1),
174 .max_policy_pct
= 100,
175 .max_sysfs_pct
= 100,
180 static struct perf_limits powersave_limits
= {
184 .max_perf
= int_tofp(1),
187 .max_policy_pct
= 100,
188 .max_sysfs_pct
= 100,
193 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
194 static struct perf_limits
*limits
= &performance_limits
;
196 static struct perf_limits
*limits
= &powersave_limits
;
199 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
200 int deadband
, int integral
) {
201 pid
->setpoint
= int_tofp(setpoint
);
202 pid
->deadband
= int_tofp(deadband
);
203 pid
->integral
= int_tofp(integral
);
204 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
207 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
209 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
212 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
214 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
217 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
219 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
222 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
225 int32_t pterm
, dterm
, fp_error
;
226 int32_t integral_limit
;
228 fp_error
= pid
->setpoint
- busy
;
230 if (abs(fp_error
) <= pid
->deadband
)
233 pterm
= mul_fp(pid
->p_gain
, fp_error
);
235 pid
->integral
+= fp_error
;
238 * We limit the integral here so that it will never
239 * get higher than 30. This prevents it from becoming
240 * too large an input over long periods of time and allows
241 * it to get factored out sooner.
243 * The value of 30 was chosen through experimentation.
245 integral_limit
= int_tofp(30);
246 if (pid
->integral
> integral_limit
)
247 pid
->integral
= integral_limit
;
248 if (pid
->integral
< -integral_limit
)
249 pid
->integral
= -integral_limit
;
251 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
252 pid
->last_err
= fp_error
;
254 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
255 result
= result
+ (1 << (FRAC_BITS
-1));
256 return (signed int)fp_toint(result
);
259 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
261 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
262 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
263 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
265 pid_reset(&cpu
->pid
, pid_params
.setpoint
, 100, pid_params
.deadband
, 0);
268 static inline void intel_pstate_reset_all_pid(void)
272 for_each_online_cpu(cpu
) {
273 if (all_cpu_data
[cpu
])
274 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
278 static inline void update_turbo_state(void)
283 cpu
= all_cpu_data
[0];
284 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
285 limits
->turbo_disabled
=
286 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
287 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
290 static void intel_pstate_hwp_set(const struct cpumask
*cpumask
)
292 int min
, hw_min
, max
, hw_max
, cpu
, range
, adj_range
;
295 rdmsrl(MSR_HWP_CAPABILITIES
, cap
);
296 hw_min
= HWP_LOWEST_PERF(cap
);
297 hw_max
= HWP_HIGHEST_PERF(cap
);
298 range
= hw_max
- hw_min
;
300 for_each_cpu(cpu
, cpumask
) {
301 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
302 adj_range
= limits
->min_perf_pct
* range
/ 100;
303 min
= hw_min
+ adj_range
;
304 value
&= ~HWP_MIN_PERF(~0L);
305 value
|= HWP_MIN_PERF(min
);
307 adj_range
= limits
->max_perf_pct
* range
/ 100;
308 max
= hw_min
+ adj_range
;
309 if (limits
->no_turbo
) {
310 hw_max
= HWP_GUARANTEED_PERF(cap
);
315 value
&= ~HWP_MAX_PERF(~0L);
316 value
|= HWP_MAX_PERF(max
);
317 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
321 static void intel_pstate_hwp_set_online_cpus(void)
324 intel_pstate_hwp_set(cpu_online_mask
);
328 /************************** debugfs begin ************************/
329 static int pid_param_set(void *data
, u64 val
)
332 intel_pstate_reset_all_pid();
336 static int pid_param_get(void *data
, u64
*val
)
341 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
, pid_param_set
, "%llu\n");
348 static struct pid_param pid_files
[] = {
349 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
350 {"d_gain_pct", &pid_params
.d_gain_pct
},
351 {"i_gain_pct", &pid_params
.i_gain_pct
},
352 {"deadband", &pid_params
.deadband
},
353 {"setpoint", &pid_params
.setpoint
},
354 {"p_gain_pct", &pid_params
.p_gain_pct
},
358 static void __init
intel_pstate_debug_expose_params(void)
360 struct dentry
*debugfs_parent
;
365 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
366 if (IS_ERR_OR_NULL(debugfs_parent
))
368 while (pid_files
[i
].name
) {
369 debugfs_create_file(pid_files
[i
].name
, 0660,
370 debugfs_parent
, pid_files
[i
].value
,
376 /************************** debugfs end ************************/
378 /************************** sysfs begin ************************/
379 #define show_one(file_name, object) \
380 static ssize_t show_##file_name \
381 (struct kobject *kobj, struct attribute *attr, char *buf) \
383 return sprintf(buf, "%u\n", limits->object); \
386 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
387 struct attribute
*attr
, char *buf
)
390 int total
, no_turbo
, turbo_pct
;
393 cpu
= all_cpu_data
[0];
395 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
396 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
397 turbo_fp
= div_fp(int_tofp(no_turbo
), int_tofp(total
));
398 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
399 return sprintf(buf
, "%u\n", turbo_pct
);
402 static ssize_t
show_num_pstates(struct kobject
*kobj
,
403 struct attribute
*attr
, char *buf
)
408 cpu
= all_cpu_data
[0];
409 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
410 return sprintf(buf
, "%u\n", total
);
413 static ssize_t
show_no_turbo(struct kobject
*kobj
,
414 struct attribute
*attr
, char *buf
)
418 update_turbo_state();
419 if (limits
->turbo_disabled
)
420 ret
= sprintf(buf
, "%u\n", limits
->turbo_disabled
);
422 ret
= sprintf(buf
, "%u\n", limits
->no_turbo
);
427 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
428 const char *buf
, size_t count
)
433 ret
= sscanf(buf
, "%u", &input
);
437 update_turbo_state();
438 if (limits
->turbo_disabled
) {
439 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
443 limits
->no_turbo
= clamp_t(int, input
, 0, 1);
446 intel_pstate_hwp_set_online_cpus();
451 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
452 const char *buf
, size_t count
)
457 ret
= sscanf(buf
, "%u", &input
);
461 limits
->max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
462 limits
->max_perf_pct
= min(limits
->max_policy_pct
,
463 limits
->max_sysfs_pct
);
464 limits
->max_perf_pct
= max(limits
->min_policy_pct
,
465 limits
->max_perf_pct
);
466 limits
->max_perf_pct
= max(limits
->min_perf_pct
,
467 limits
->max_perf_pct
);
468 limits
->max_perf
= div_fp(int_tofp(limits
->max_perf_pct
),
472 intel_pstate_hwp_set_online_cpus();
476 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
477 const char *buf
, size_t count
)
482 ret
= sscanf(buf
, "%u", &input
);
486 limits
->min_sysfs_pct
= clamp_t(int, input
, 0 , 100);
487 limits
->min_perf_pct
= max(limits
->min_policy_pct
,
488 limits
->min_sysfs_pct
);
489 limits
->min_perf_pct
= min(limits
->max_policy_pct
,
490 limits
->min_perf_pct
);
491 limits
->min_perf_pct
= min(limits
->max_perf_pct
,
492 limits
->min_perf_pct
);
493 limits
->min_perf
= div_fp(int_tofp(limits
->min_perf_pct
),
497 intel_pstate_hwp_set_online_cpus();
501 show_one(max_perf_pct
, max_perf_pct
);
502 show_one(min_perf_pct
, min_perf_pct
);
504 define_one_global_rw(no_turbo
);
505 define_one_global_rw(max_perf_pct
);
506 define_one_global_rw(min_perf_pct
);
507 define_one_global_ro(turbo_pct
);
508 define_one_global_ro(num_pstates
);
510 static struct attribute
*intel_pstate_attributes
[] = {
519 static struct attribute_group intel_pstate_attr_group
= {
520 .attrs
= intel_pstate_attributes
,
523 static void __init
intel_pstate_sysfs_expose_params(void)
525 struct kobject
*intel_pstate_kobject
;
528 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
529 &cpu_subsys
.dev_root
->kobj
);
530 BUG_ON(!intel_pstate_kobject
);
531 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
534 /************************** sysfs end ************************/
536 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
)
538 /* First disable HWP notification interrupt as we don't process them */
539 wrmsrl_on_cpu(cpudata
->cpu
, MSR_HWP_INTERRUPT
, 0x00);
541 wrmsrl_on_cpu(cpudata
->cpu
, MSR_PM_ENABLE
, 0x1);
544 static int atom_get_min_pstate(void)
548 rdmsrl(ATOM_RATIOS
, value
);
549 return (value
>> 8) & 0x7F;
552 static int atom_get_max_pstate(void)
556 rdmsrl(ATOM_RATIOS
, value
);
557 return (value
>> 16) & 0x7F;
560 static int atom_get_turbo_pstate(void)
564 rdmsrl(ATOM_TURBO_RATIOS
, value
);
568 static void atom_set_pstate(struct cpudata
*cpudata
, int pstate
)
574 val
= (u64
)pstate
<< 8;
575 if (limits
->no_turbo
&& !limits
->turbo_disabled
)
578 vid_fp
= cpudata
->vid
.min
+ mul_fp(
579 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
582 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
583 vid
= ceiling_fp(vid_fp
);
585 if (pstate
> cpudata
->pstate
.max_pstate
)
586 vid
= cpudata
->vid
.turbo
;
590 wrmsrl_on_cpu(cpudata
->cpu
, MSR_IA32_PERF_CTL
, val
);
593 static int silvermont_get_scaling(void)
597 /* Defined in Table 35-6 from SDM (Sept 2015) */
598 static int silvermont_freq_table
[] = {
599 83300, 100000, 133300, 116700, 80000};
601 rdmsrl(MSR_FSB_FREQ
, value
);
605 return silvermont_freq_table
[i
];
608 static int airmont_get_scaling(void)
612 /* Defined in Table 35-10 from SDM (Sept 2015) */
613 static int airmont_freq_table
[] = {
614 83300, 100000, 133300, 116700, 80000,
615 93300, 90000, 88900, 87500};
617 rdmsrl(MSR_FSB_FREQ
, value
);
621 return airmont_freq_table
[i
];
624 static void atom_get_vid(struct cpudata
*cpudata
)
628 rdmsrl(ATOM_VIDS
, value
);
629 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
630 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
631 cpudata
->vid
.ratio
= div_fp(
632 cpudata
->vid
.max
- cpudata
->vid
.min
,
633 int_tofp(cpudata
->pstate
.max_pstate
-
634 cpudata
->pstate
.min_pstate
));
636 rdmsrl(ATOM_TURBO_VIDS
, value
);
637 cpudata
->vid
.turbo
= value
& 0x7f;
640 static int core_get_min_pstate(void)
644 rdmsrl(MSR_PLATFORM_INFO
, value
);
645 return (value
>> 40) & 0xFF;
648 static int core_get_max_pstate_physical(void)
652 rdmsrl(MSR_PLATFORM_INFO
, value
);
653 return (value
>> 8) & 0xFF;
656 static int core_get_max_pstate(void)
663 rdmsrl(MSR_PLATFORM_INFO
, plat_info
);
664 max_pstate
= (plat_info
>> 8) & 0xFF;
666 err
= rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO
, &tar
);
668 /* Do some sanity checking for safety */
669 if (plat_info
& 0x600000000) {
674 err
= rdmsrl_safe(MSR_CONFIG_TDP_CONTROL
, &tdp_ctrl
);
678 tdp_msr
= MSR_CONFIG_TDP_NOMINAL
+ tdp_ctrl
;
679 err
= rdmsrl_safe(tdp_msr
, &tdp_ratio
);
683 if (tdp_ratio
- 1 == tar
) {
685 pr_debug("max_pstate=TAC %x\n", max_pstate
);
696 static int core_get_turbo_pstate(void)
701 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
702 nont
= core_get_max_pstate();
709 static inline int core_get_scaling(void)
714 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
718 val
= (u64
)pstate
<< 8;
719 if (limits
->no_turbo
&& !limits
->turbo_disabled
)
722 wrmsrl(MSR_IA32_PERF_CTL
, val
);
725 static int knl_get_turbo_pstate(void)
730 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
731 nont
= core_get_max_pstate();
732 ret
= (((value
) >> 8) & 0xFF);
738 static struct cpu_defaults core_params
= {
740 .sample_rate_ms
= 10,
748 .get_max
= core_get_max_pstate
,
749 .get_max_physical
= core_get_max_pstate_physical
,
750 .get_min
= core_get_min_pstate
,
751 .get_turbo
= core_get_turbo_pstate
,
752 .get_scaling
= core_get_scaling
,
753 .set
= core_set_pstate
,
754 .get_target_pstate
= get_target_pstate_use_performance
,
758 static struct cpu_defaults silvermont_params
= {
760 .sample_rate_ms
= 10,
768 .get_max
= atom_get_max_pstate
,
769 .get_max_physical
= atom_get_max_pstate
,
770 .get_min
= atom_get_min_pstate
,
771 .get_turbo
= atom_get_turbo_pstate
,
772 .set
= atom_set_pstate
,
773 .get_scaling
= silvermont_get_scaling
,
774 .get_vid
= atom_get_vid
,
775 .get_target_pstate
= get_target_pstate_use_cpu_load
,
779 static struct cpu_defaults airmont_params
= {
781 .sample_rate_ms
= 10,
789 .get_max
= atom_get_max_pstate
,
790 .get_max_physical
= atom_get_max_pstate
,
791 .get_min
= atom_get_min_pstate
,
792 .get_turbo
= atom_get_turbo_pstate
,
793 .set
= atom_set_pstate
,
794 .get_scaling
= airmont_get_scaling
,
795 .get_vid
= atom_get_vid
,
796 .get_target_pstate
= get_target_pstate_use_cpu_load
,
800 static struct cpu_defaults knl_params
= {
802 .sample_rate_ms
= 10,
810 .get_max
= core_get_max_pstate
,
811 .get_max_physical
= core_get_max_pstate_physical
,
812 .get_min
= core_get_min_pstate
,
813 .get_turbo
= knl_get_turbo_pstate
,
814 .get_scaling
= core_get_scaling
,
815 .set
= core_set_pstate
,
816 .get_target_pstate
= get_target_pstate_use_performance
,
820 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
822 int max_perf
= cpu
->pstate
.turbo_pstate
;
826 if (limits
->no_turbo
|| limits
->turbo_disabled
)
827 max_perf
= cpu
->pstate
.max_pstate
;
830 * performance can be limited by user through sysfs, by cpufreq
831 * policy, or by cpu specific default values determined through
834 max_perf_adj
= fp_toint(max_perf
* limits
->max_perf
);
835 *max
= clamp_t(int, max_perf_adj
,
836 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
838 min_perf
= fp_toint(max_perf
* limits
->min_perf
);
839 *min
= clamp_t(int, min_perf
, cpu
->pstate
.min_pstate
, max_perf
);
842 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
, bool force
)
844 int max_perf
, min_perf
;
847 update_turbo_state();
849 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
851 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
853 if (pstate
== cpu
->pstate
.current_pstate
)
856 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
858 cpu
->pstate
.current_pstate
= pstate
;
860 pstate_funcs
.set(cpu
, pstate
);
863 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
865 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
866 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
867 cpu
->pstate
.max_pstate_physical
= pstate_funcs
.get_max_physical();
868 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
869 cpu
->pstate
.scaling
= pstate_funcs
.get_scaling();
871 if (pstate_funcs
.get_vid
)
872 pstate_funcs
.get_vid(cpu
);
873 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
, false);
876 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
)
878 struct sample
*sample
= &cpu
->sample
;
881 core_pct
= int_tofp(sample
->aperf
) * int_tofp(100);
882 core_pct
= div64_u64(core_pct
, int_tofp(sample
->mperf
));
884 sample
->freq
= fp_toint(
886 cpu
->pstate
.max_pstate_physical
*
887 cpu
->pstate
.scaling
/ 100),
890 sample
->core_pct_busy
= (int32_t)core_pct
;
893 static inline void intel_pstate_sample(struct cpudata
*cpu
, u64 time
)
899 local_irq_save(flags
);
900 rdmsrl(MSR_IA32_APERF
, aperf
);
901 rdmsrl(MSR_IA32_MPERF
, mperf
);
903 if ((cpu
->prev_mperf
== mperf
) || (cpu
->prev_tsc
== tsc
)) {
904 local_irq_restore(flags
);
907 local_irq_restore(flags
);
909 cpu
->last_sample_time
= cpu
->sample
.time
;
910 cpu
->sample
.time
= time
;
911 cpu
->sample
.aperf
= aperf
;
912 cpu
->sample
.mperf
= mperf
;
913 cpu
->sample
.tsc
= tsc
;
914 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
915 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
916 cpu
->sample
.tsc
-= cpu
->prev_tsc
;
918 cpu
->prev_aperf
= aperf
;
919 cpu
->prev_mperf
= mperf
;
923 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata
*cpu
)
925 struct sample
*sample
= &cpu
->sample
;
926 u64 cummulative_iowait
, delta_iowait_us
;
927 u64 delta_iowait_mperf
;
931 cummulative_iowait
= get_cpu_iowait_time_us(cpu
->cpu
, &now
);
934 * Convert iowait time into number of IO cycles spent at max_freq.
935 * IO is considered as busy only for the cpu_load algorithm. For
936 * performance this is not needed since we always try to reach the
937 * maximum P-State, so we are already boosting the IOs.
939 delta_iowait_us
= cummulative_iowait
- cpu
->prev_cummulative_iowait
;
940 delta_iowait_mperf
= div64_u64(delta_iowait_us
* cpu
->pstate
.scaling
*
941 cpu
->pstate
.max_pstate
, MSEC_PER_SEC
);
943 mperf
= cpu
->sample
.mperf
+ delta_iowait_mperf
;
944 cpu
->prev_cummulative_iowait
= cummulative_iowait
;
947 * The load can be estimated as the ratio of the mperf counter
948 * running at a constant frequency during active periods
949 * (C0) and the time stamp counter running at the same frequency
950 * also during C-states.
952 cpu_load
= div64_u64(int_tofp(100) * mperf
, sample
->tsc
);
953 cpu
->sample
.busy_scaled
= cpu_load
;
955 return cpu
->pstate
.current_pstate
- pid_calc(&cpu
->pid
, cpu_load
);
958 static inline int32_t get_target_pstate_use_performance(struct cpudata
*cpu
)
960 int32_t core_busy
, max_pstate
, current_pstate
, sample_ratio
;
963 intel_pstate_calc_busy(cpu
);
966 * core_busy is the ratio of actual performance to max
967 * max_pstate is the max non turbo pstate available
968 * current_pstate was the pstate that was requested during
969 * the last sample period.
971 * We normalize core_busy, which was our actual percent
972 * performance to what we requested during the last sample
973 * period. The result will be a percentage of busy at a
976 core_busy
= cpu
->sample
.core_pct_busy
;
977 max_pstate
= int_tofp(cpu
->pstate
.max_pstate_physical
);
978 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
979 core_busy
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
982 * Since our utilization update callback will not run unless we are
983 * in C0, check if the actual elapsed time is significantly greater (3x)
984 * than our sample interval. If it is, then we were idle for a long
985 * enough period of time to adjust our busyness.
987 duration_ns
= cpu
->sample
.time
- cpu
->last_sample_time
;
988 if ((s64
)duration_ns
> pid_params
.sample_rate_ns
* 3
989 && cpu
->last_sample_time
> 0) {
990 sample_ratio
= div_fp(int_tofp(pid_params
.sample_rate_ns
),
991 int_tofp(duration_ns
));
992 core_busy
= mul_fp(core_busy
, sample_ratio
);
995 cpu
->sample
.busy_scaled
= core_busy
;
996 return cpu
->pstate
.current_pstate
- pid_calc(&cpu
->pid
, core_busy
);
999 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
1001 int from
, target_pstate
;
1002 struct sample
*sample
;
1004 from
= cpu
->pstate
.current_pstate
;
1006 target_pstate
= pstate_funcs
.get_target_pstate(cpu
);
1008 intel_pstate_set_pstate(cpu
, target_pstate
, true);
1010 sample
= &cpu
->sample
;
1011 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
1012 fp_toint(sample
->busy_scaled
),
1014 cpu
->pstate
.current_pstate
,
1021 static void intel_pstate_update_util(struct update_util_data
*data
, u64 time
,
1022 unsigned long util
, unsigned long max
)
1024 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
1025 u64 delta_ns
= time
- cpu
->sample
.time
;
1027 if ((s64
)delta_ns
>= pid_params
.sample_rate_ns
) {
1028 intel_pstate_sample(cpu
, time
);
1030 intel_pstate_adjust_busy_pstate(cpu
);
1034 #define ICPU(model, policy) \
1035 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1036 (unsigned long)&policy }
1038 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
1039 ICPU(0x2a, core_params
),
1040 ICPU(0x2d, core_params
),
1041 ICPU(0x37, silvermont_params
),
1042 ICPU(0x3a, core_params
),
1043 ICPU(0x3c, core_params
),
1044 ICPU(0x3d, core_params
),
1045 ICPU(0x3e, core_params
),
1046 ICPU(0x3f, core_params
),
1047 ICPU(0x45, core_params
),
1048 ICPU(0x46, core_params
),
1049 ICPU(0x47, core_params
),
1050 ICPU(0x4c, airmont_params
),
1051 ICPU(0x4e, core_params
),
1052 ICPU(0x4f, core_params
),
1053 ICPU(0x5e, core_params
),
1054 ICPU(0x56, core_params
),
1055 ICPU(0x57, knl_params
),
1058 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
1060 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] = {
1061 ICPU(0x56, core_params
),
1065 static int intel_pstate_init_cpu(unsigned int cpunum
)
1067 struct cpudata
*cpu
;
1069 if (!all_cpu_data
[cpunum
])
1070 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
),
1072 if (!all_cpu_data
[cpunum
])
1075 cpu
= all_cpu_data
[cpunum
];
1080 intel_pstate_hwp_enable(cpu
);
1081 pid_params
.sample_rate_ms
= 50;
1082 pid_params
.sample_rate_ns
= 50 * NSEC_PER_MSEC
;
1085 intel_pstate_get_cpu_pstates(cpu
);
1087 intel_pstate_busy_pid_reset(cpu
);
1088 intel_pstate_sample(cpu
, 0);
1090 cpu
->update_util
.func
= intel_pstate_update_util
;
1091 cpufreq_set_update_util_data(cpunum
, &cpu
->update_util
);
1093 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum
);
1098 static unsigned int intel_pstate_get(unsigned int cpu_num
)
1100 struct sample
*sample
;
1101 struct cpudata
*cpu
;
1103 cpu
= all_cpu_data
[cpu_num
];
1106 sample
= &cpu
->sample
;
1107 return sample
->freq
;
1110 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
1112 if (!policy
->cpuinfo
.max_freq
)
1115 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
&&
1116 policy
->max
>= policy
->cpuinfo
.max_freq
) {
1117 pr_debug("intel_pstate: set performance\n");
1118 limits
= &performance_limits
;
1120 intel_pstate_hwp_set(policy
->cpus
);
1124 pr_debug("intel_pstate: set powersave\n");
1125 limits
= &powersave_limits
;
1126 limits
->min_policy_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
1127 limits
->min_policy_pct
= clamp_t(int, limits
->min_policy_pct
, 0 , 100);
1128 limits
->max_policy_pct
= DIV_ROUND_UP(policy
->max
* 100,
1129 policy
->cpuinfo
.max_freq
);
1130 limits
->max_policy_pct
= clamp_t(int, limits
->max_policy_pct
, 0 , 100);
1132 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1133 limits
->min_perf_pct
= max(limits
->min_policy_pct
,
1134 limits
->min_sysfs_pct
);
1135 limits
->min_perf_pct
= min(limits
->max_policy_pct
,
1136 limits
->min_perf_pct
);
1137 limits
->max_perf_pct
= min(limits
->max_policy_pct
,
1138 limits
->max_sysfs_pct
);
1139 limits
->max_perf_pct
= max(limits
->min_policy_pct
,
1140 limits
->max_perf_pct
);
1141 limits
->max_perf
= round_up(limits
->max_perf
, FRAC_BITS
);
1143 /* Make sure min_perf_pct <= max_perf_pct */
1144 limits
->min_perf_pct
= min(limits
->max_perf_pct
, limits
->min_perf_pct
);
1146 limits
->min_perf
= div_fp(int_tofp(limits
->min_perf_pct
),
1148 limits
->max_perf
= div_fp(int_tofp(limits
->max_perf_pct
),
1152 intel_pstate_hwp_set(policy
->cpus
);
1157 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
1159 cpufreq_verify_within_cpu_limits(policy
);
1161 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
1162 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
1168 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
1170 int cpu_num
= policy
->cpu
;
1171 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
1173 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num
);
1175 cpufreq_set_update_util_data(cpu_num
, NULL
);
1176 synchronize_sched();
1181 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
, false);
1184 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
1186 struct cpudata
*cpu
;
1189 rc
= intel_pstate_init_cpu(policy
->cpu
);
1193 cpu
= all_cpu_data
[policy
->cpu
];
1195 if (limits
->min_perf_pct
== 100 && limits
->max_perf_pct
== 100)
1196 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
1198 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
1200 policy
->min
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
1201 policy
->max
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1203 /* cpuinfo and default policy values */
1204 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
1205 policy
->cpuinfo
.max_freq
=
1206 cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1207 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
1208 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
1213 static struct cpufreq_driver intel_pstate_driver
= {
1214 .flags
= CPUFREQ_CONST_LOOPS
,
1215 .verify
= intel_pstate_verify_policy
,
1216 .setpolicy
= intel_pstate_set_policy
,
1217 .get
= intel_pstate_get
,
1218 .init
= intel_pstate_cpu_init
,
1219 .stop_cpu
= intel_pstate_stop_cpu
,
1220 .name
= "intel_pstate",
1223 static int __initdata no_load
;
1224 static int __initdata no_hwp
;
1225 static int __initdata hwp_only
;
1226 static unsigned int force_load
;
1228 static int intel_pstate_msrs_not_valid(void)
1230 if (!pstate_funcs
.get_max() ||
1231 !pstate_funcs
.get_min() ||
1232 !pstate_funcs
.get_turbo())
1238 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
1240 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
1241 pid_params
.sample_rate_ns
= pid_params
.sample_rate_ms
* NSEC_PER_MSEC
;
1242 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
1243 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
1244 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
1245 pid_params
.deadband
= policy
->deadband
;
1246 pid_params
.setpoint
= policy
->setpoint
;
1249 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
1251 pstate_funcs
.get_max
= funcs
->get_max
;
1252 pstate_funcs
.get_max_physical
= funcs
->get_max_physical
;
1253 pstate_funcs
.get_min
= funcs
->get_min
;
1254 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
1255 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
1256 pstate_funcs
.set
= funcs
->set
;
1257 pstate_funcs
.get_vid
= funcs
->get_vid
;
1258 pstate_funcs
.get_target_pstate
= funcs
->get_target_pstate
;
1262 #if IS_ENABLED(CONFIG_ACPI)
1263 #include <acpi/processor.h>
1265 static bool intel_pstate_no_acpi_pss(void)
1269 for_each_possible_cpu(i
) {
1271 union acpi_object
*pss
;
1272 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1273 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1278 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
1279 if (ACPI_FAILURE(status
))
1282 pss
= buffer
.pointer
;
1283 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
1294 static bool intel_pstate_has_acpi_ppc(void)
1298 for_each_possible_cpu(i
) {
1299 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1303 if (acpi_has_method(pr
->handle
, "_PPC"))
1314 struct hw_vendor_info
{
1316 char oem_id
[ACPI_OEM_ID_SIZE
];
1317 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
1321 /* Hardware vendor-specific info that has its own power management modes */
1322 static struct hw_vendor_info vendor_info
[] = {
1323 {1, "HP ", "ProLiant", PSS
},
1324 {1, "ORACLE", "X4-2 ", PPC
},
1325 {1, "ORACLE", "X4-2L ", PPC
},
1326 {1, "ORACLE", "X4-2B ", PPC
},
1327 {1, "ORACLE", "X3-2 ", PPC
},
1328 {1, "ORACLE", "X3-2L ", PPC
},
1329 {1, "ORACLE", "X3-2B ", PPC
},
1330 {1, "ORACLE", "X4470M2 ", PPC
},
1331 {1, "ORACLE", "X4270M3 ", PPC
},
1332 {1, "ORACLE", "X4270M2 ", PPC
},
1333 {1, "ORACLE", "X4170M2 ", PPC
},
1334 {1, "ORACLE", "X4170 M3", PPC
},
1335 {1, "ORACLE", "X4275 M3", PPC
},
1336 {1, "ORACLE", "X6-2 ", PPC
},
1337 {1, "ORACLE", "Sudbury ", PPC
},
1341 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1343 struct acpi_table_header hdr
;
1344 struct hw_vendor_info
*v_info
;
1345 const struct x86_cpu_id
*id
;
1348 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
1350 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
1351 if ( misc_pwr
& (1 << 8))
1355 if (acpi_disabled
||
1356 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
1359 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
1360 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
) &&
1361 !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
,
1362 ACPI_OEM_TABLE_ID_SIZE
))
1363 switch (v_info
->oem_pwr_table
) {
1365 return intel_pstate_no_acpi_pss();
1367 return intel_pstate_has_acpi_ppc() &&
1374 #else /* CONFIG_ACPI not enabled */
1375 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1376 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1377 #endif /* CONFIG_ACPI */
1379 static const struct x86_cpu_id hwp_support_ids
[] __initconst
= {
1380 { X86_VENDOR_INTEL
, 6, X86_MODEL_ANY
, X86_FEATURE_HWP
},
1384 static int __init
intel_pstate_init(void)
1387 const struct x86_cpu_id
*id
;
1388 struct cpu_defaults
*cpu_def
;
1393 if (x86_match_cpu(hwp_support_ids
) && !no_hwp
) {
1394 copy_cpu_funcs(&core_params
.funcs
);
1396 goto hwp_cpu_matched
;
1399 id
= x86_match_cpu(intel_pstate_cpu_ids
);
1403 cpu_def
= (struct cpu_defaults
*)id
->driver_data
;
1405 copy_pid_params(&cpu_def
->pid_policy
);
1406 copy_cpu_funcs(&cpu_def
->funcs
);
1408 if (intel_pstate_msrs_not_valid())
1413 * The Intel pstate driver will be ignored if the platform
1414 * firmware has its own power management modes.
1416 if (intel_pstate_platform_pwr_mgmt_exists())
1419 pr_info("Intel P-state driver initializing.\n");
1421 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
1425 if (!hwp_active
&& hwp_only
)
1428 rc
= cpufreq_register_driver(&intel_pstate_driver
);
1432 intel_pstate_debug_expose_params();
1433 intel_pstate_sysfs_expose_params();
1436 pr_info("intel_pstate: HWP enabled\n");
1441 for_each_online_cpu(cpu
) {
1442 if (all_cpu_data
[cpu
]) {
1443 cpufreq_set_update_util_data(cpu
, NULL
);
1444 synchronize_sched();
1445 kfree(all_cpu_data
[cpu
]);
1450 vfree(all_cpu_data
);
1453 device_initcall(intel_pstate_init
);
1455 static int __init
intel_pstate_setup(char *str
)
1460 if (!strcmp(str
, "disable"))
1462 if (!strcmp(str
, "no_hwp")) {
1463 pr_info("intel_pstate: HWP disabled\n");
1466 if (!strcmp(str
, "force"))
1468 if (!strcmp(str
, "hwp_only"))
1472 early_param("intel_pstate", intel_pstate_setup
);
1474 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1475 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1476 MODULE_LICENSE("GPL");