2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <linux/tick.h>
21 #include <linux/slab.h>
22 #include <linux/sched.h>
23 #include <linux/list.h>
24 #include <linux/cpu.h>
25 #include <linux/cpufreq.h>
26 #include <linux/sysfs.h>
27 #include <linux/types.h>
29 #include <linux/debugfs.h>
30 #include <linux/acpi.h>
31 #include <linux/vmalloc.h>
32 #include <trace/events/power.h>
34 #include <asm/div64.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/cpufeature.h>
38 #include <asm/intel-family.h>
40 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
42 #define ATOM_RATIOS 0x66a
43 #define ATOM_VIDS 0x66b
44 #define ATOM_TURBO_RATIOS 0x66c
45 #define ATOM_TURBO_VIDS 0x66d
48 #include <acpi/processor.h>
49 #include <acpi/cppc_acpi.h>
53 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
54 #define fp_toint(X) ((X) >> FRAC_BITS)
57 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
58 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
59 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
61 static inline int32_t mul_fp(int32_t x
, int32_t y
)
63 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
66 static inline int32_t div_fp(s64 x
, s64 y
)
68 return div64_s64((int64_t)x
<< FRAC_BITS
, y
);
71 static inline int ceiling_fp(int32_t x
)
76 mask
= (1 << FRAC_BITS
) - 1;
82 static inline u64
mul_ext_fp(u64 x
, u64 y
)
84 return (x
* y
) >> EXT_FRAC_BITS
;
87 static inline u64
div_ext_fp(u64 x
, u64 y
)
89 return div64_u64(x
<< EXT_FRAC_BITS
, y
);
93 * struct sample - Store performance sample
94 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
95 * performance during last sample period
96 * @busy_scaled: Scaled busy value which is used to calculate next
97 * P state. This can be different than core_avg_perf
98 * to account for cpu idle period
99 * @aperf: Difference of actual performance frequency clock count
100 * read from APERF MSR between last and current sample
101 * @mperf: Difference of maximum performance frequency clock count
102 * read from MPERF MSR between last and current sample
103 * @tsc: Difference of time stamp counter between last and
105 * @time: Current time from scheduler
107 * This structure is used in the cpudata structure to store performance sample
108 * data for choosing next P State.
111 int32_t core_avg_perf
;
120 * struct pstate_data - Store P state data
121 * @current_pstate: Current requested P state
122 * @min_pstate: Min P state possible for this platform
123 * @max_pstate: Max P state possible for this platform
124 * @max_pstate_physical:This is physical Max P state for a processor
125 * This can be higher than the max_pstate which can
126 * be limited by platform thermal design power limits
127 * @scaling: Scaling factor to convert frequency to cpufreq
129 * @turbo_pstate: Max Turbo P state possible for this platform
130 * @max_freq: @max_pstate frequency in cpufreq units
131 * @turbo_freq: @turbo_pstate frequency in cpufreq units
133 * Stores the per cpu model P state limits and current P state.
139 int max_pstate_physical
;
142 unsigned int max_freq
;
143 unsigned int turbo_freq
;
147 * struct vid_data - Stores voltage information data
148 * @min: VID data for this platform corresponding to
150 * @max: VID data corresponding to the highest P State.
151 * @turbo: VID data for turbo P state
152 * @ratio: Ratio of (vid max - vid min) /
153 * (max P state - Min P State)
155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
156 * This data is used in Atom platforms, where in addition to target P state,
157 * the voltage data needs to be specified to select next P State.
167 * struct _pid - Stores PID data
168 * @setpoint: Target set point for busyness or performance
169 * @integral: Storage for accumulated error values
170 * @p_gain: PID proportional gain
171 * @i_gain: PID integral gain
172 * @d_gain: PID derivative gain
173 * @deadband: PID deadband
174 * @last_err: Last error storage for integral part of PID calculation
176 * Stores PID coefficients and last error for PID controller.
189 * struct perf_limits - Store user and policy limits
190 * @no_turbo: User requested turbo state from intel_pstate sysfs
191 * @turbo_disabled: Platform turbo status either from msr
192 * MSR_IA32_MISC_ENABLE or when maximum available pstate
193 * matches the maximum turbo pstate
194 * @max_perf_pct: Effective maximum performance limit in percentage, this
195 * is minimum of either limits enforced by cpufreq policy
196 * or limits from user set limits via intel_pstate sysfs
197 * @min_perf_pct: Effective minimum performance limit in percentage, this
198 * is maximum of either limits enforced by cpufreq policy
199 * or limits from user set limits via intel_pstate sysfs
200 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
201 * This value is used to limit max pstate
202 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
203 * This value is used to limit min pstate
204 * @max_policy_pct: The maximum performance in percentage enforced by
205 * cpufreq setpolicy interface
206 * @max_sysfs_pct: The maximum performance in percentage enforced by
207 * intel pstate sysfs interface, unused when per cpu
208 * controls are enforced
209 * @min_policy_pct: The minimum performance in percentage enforced by
210 * cpufreq setpolicy interface
211 * @min_sysfs_pct: The minimum performance in percentage enforced by
212 * intel pstate sysfs interface, unused when per cpu
213 * controls are enforced
215 * Storage for user and policy defined limits.
231 * struct cpudata - Per CPU instance data storage
232 * @cpu: CPU number for this instance data
233 * @policy: CPUFreq policy value
234 * @update_util: CPUFreq utility callback information
235 * @update_util_set: CPUFreq utility callback is set
236 * @iowait_boost: iowait-related boost fraction
237 * @last_update: Time of the last update.
238 * @pstate: Stores P state limits for this CPU
239 * @vid: Stores VID limits for this CPU
240 * @pid: Stores PID parameters for this CPU
241 * @last_sample_time: Last Sample time
242 * @prev_aperf: Last APERF value read from APERF MSR
243 * @prev_mperf: Last MPERF value read from MPERF MSR
244 * @prev_tsc: Last timestamp counter (TSC) value
245 * @prev_cummulative_iowait: IO Wait time difference from last and
247 * @sample: Storage for storing last Sample data
248 * @perf_limits: Pointer to perf_limit unique to this CPU
249 * Not all field in the structure are applicable
250 * when per cpu controls are enforced
251 * @acpi_perf_data: Stores ACPI perf information read from _PSS
252 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
253 * @epp_powersave: Last saved HWP energy performance preference
254 * (EPP) or energy performance bias (EPB),
255 * when policy switched to performance
256 * @epp_policy: Last saved policy used to set EPP/EPB
257 * @epp_default: Power on default HWP energy performance
259 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
262 * This structure stores per CPU instance data for all CPUs.
268 struct update_util_data update_util
;
269 bool update_util_set
;
271 struct pstate_data pstate
;
276 u64 last_sample_time
;
280 u64 prev_cummulative_iowait
;
281 struct sample sample
;
282 struct perf_limits
*perf_limits
;
284 struct acpi_processor_performance acpi_perf_data
;
285 bool valid_pss_table
;
287 unsigned int iowait_boost
;
294 static struct cpudata
**all_cpu_data
;
297 * struct pstate_adjust_policy - Stores static PID configuration data
298 * @sample_rate_ms: PID calculation sample rate in ms
299 * @sample_rate_ns: Sample rate calculation in ns
300 * @deadband: PID deadband
301 * @setpoint: PID Setpoint
302 * @p_gain_pct: PID proportional gain
303 * @i_gain_pct: PID integral gain
304 * @d_gain_pct: PID derivative gain
306 * Stores per CPU model static PID configuration data.
308 struct pstate_adjust_policy
{
319 * struct pstate_funcs - Per CPU model specific callbacks
320 * @get_max: Callback to get maximum non turbo effective P state
321 * @get_max_physical: Callback to get maximum non turbo physical P state
322 * @get_min: Callback to get minimum P state
323 * @get_turbo: Callback to get turbo P state
324 * @get_scaling: Callback to get frequency scaling factor
325 * @get_val: Callback to convert P state to actual MSR write value
326 * @get_vid: Callback to get VID data for Atom platforms
327 * @get_target_pstate: Callback to a function to calculate next P state to use
329 * Core and Atom CPU models have different way to get P State limits. This
330 * structure is used to store those callbacks.
332 struct pstate_funcs
{
333 int (*get_max
)(void);
334 int (*get_max_physical
)(void);
335 int (*get_min
)(void);
336 int (*get_turbo
)(void);
337 int (*get_scaling
)(void);
338 u64 (*get_val
)(struct cpudata
*, int pstate
);
339 void (*get_vid
)(struct cpudata
*);
340 int32_t (*get_target_pstate
)(struct cpudata
*);
344 * struct cpu_defaults- Per CPU model default config data
345 * @pid_policy: PID config data
346 * @funcs: Callback function data
348 struct cpu_defaults
{
349 struct pstate_adjust_policy pid_policy
;
350 struct pstate_funcs funcs
;
353 static inline int32_t get_target_pstate_use_performance(struct cpudata
*cpu
);
354 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata
*cpu
);
356 static struct pstate_adjust_policy pid_params __read_mostly
;
357 static struct pstate_funcs pstate_funcs __read_mostly
;
358 static int hwp_active __read_mostly
;
359 static bool per_cpu_limits __read_mostly
;
361 static bool driver_registered __read_mostly
;
364 static bool acpi_ppc
;
367 static struct perf_limits performance_limits
= {
371 .max_perf
= int_ext_tofp(1),
373 .min_perf
= int_ext_tofp(1),
374 .max_policy_pct
= 100,
375 .max_sysfs_pct
= 100,
380 static struct perf_limits powersave_limits
= {
384 .max_perf
= int_ext_tofp(1),
387 .max_policy_pct
= 100,
388 .max_sysfs_pct
= 100,
393 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
394 static struct perf_limits
*limits
= &performance_limits
;
396 static struct perf_limits
*limits
= &powersave_limits
;
399 static DEFINE_MUTEX(intel_pstate_driver_lock
);
400 static DEFINE_MUTEX(intel_pstate_limits_lock
);
404 static bool intel_pstate_get_ppc_enable_status(void)
406 if (acpi_gbl_FADT
.preferred_profile
== PM_ENTERPRISE_SERVER
||
407 acpi_gbl_FADT
.preferred_profile
== PM_PERFORMANCE_SERVER
)
413 #ifdef CONFIG_ACPI_CPPC_LIB
415 /* The work item is needed to avoid CPU hotplug locking issues */
416 static void intel_pstste_sched_itmt_work_fn(struct work_struct
*work
)
418 sched_set_itmt_support();
421 static DECLARE_WORK(sched_itmt_work
, intel_pstste_sched_itmt_work_fn
);
423 static void intel_pstate_set_itmt_prio(int cpu
)
425 struct cppc_perf_caps cppc_perf
;
426 static u32 max_highest_perf
= 0, min_highest_perf
= U32_MAX
;
429 ret
= cppc_get_perf_caps(cpu
, &cppc_perf
);
434 * The priorities can be set regardless of whether or not
435 * sched_set_itmt_support(true) has been called and it is valid to
436 * update them at any time after it has been called.
438 sched_set_itmt_core_prio(cppc_perf
.highest_perf
, cpu
);
440 if (max_highest_perf
<= min_highest_perf
) {
441 if (cppc_perf
.highest_perf
> max_highest_perf
)
442 max_highest_perf
= cppc_perf
.highest_perf
;
444 if (cppc_perf
.highest_perf
< min_highest_perf
)
445 min_highest_perf
= cppc_perf
.highest_perf
;
447 if (max_highest_perf
> min_highest_perf
) {
449 * This code can be run during CPU online under the
450 * CPU hotplug locks, so sched_set_itmt_support()
451 * cannot be called from here. Queue up a work item
454 schedule_work(&sched_itmt_work
);
459 static void intel_pstate_set_itmt_prio(int cpu
)
464 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
471 intel_pstate_set_itmt_prio(policy
->cpu
);
475 if (!intel_pstate_get_ppc_enable_status())
478 cpu
= all_cpu_data
[policy
->cpu
];
480 ret
= acpi_processor_register_performance(&cpu
->acpi_perf_data
,
486 * Check if the control value in _PSS is for PERF_CTL MSR, which should
487 * guarantee that the states returned by it map to the states in our
490 if (cpu
->acpi_perf_data
.control_register
.space_id
!=
491 ACPI_ADR_SPACE_FIXED_HARDWARE
)
495 * If there is only one entry _PSS, simply ignore _PSS and continue as
496 * usual without taking _PSS into account
498 if (cpu
->acpi_perf_data
.state_count
< 2)
501 pr_debug("CPU%u - ACPI _PSS perf data\n", policy
->cpu
);
502 for (i
= 0; i
< cpu
->acpi_perf_data
.state_count
; i
++) {
503 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
504 (i
== cpu
->acpi_perf_data
.state
? '*' : ' '), i
,
505 (u32
) cpu
->acpi_perf_data
.states
[i
].core_frequency
,
506 (u32
) cpu
->acpi_perf_data
.states
[i
].power
,
507 (u32
) cpu
->acpi_perf_data
.states
[i
].control
);
511 * The _PSS table doesn't contain whole turbo frequency range.
512 * This just contains +1 MHZ above the max non turbo frequency,
513 * with control value corresponding to max turbo ratio. But
514 * when cpufreq set policy is called, it will call with this
515 * max frequency, which will cause a reduced performance as
516 * this driver uses real max turbo frequency as the max
517 * frequency. So correct this frequency in _PSS table to
518 * correct max turbo frequency based on the turbo state.
519 * Also need to convert to MHz as _PSS freq is in MHz.
521 if (!limits
->turbo_disabled
)
522 cpu
->acpi_perf_data
.states
[0].core_frequency
=
523 policy
->cpuinfo
.max_freq
/ 1000;
524 cpu
->valid_pss_table
= true;
525 pr_debug("_PPC limits will be enforced\n");
530 cpu
->valid_pss_table
= false;
531 acpi_processor_unregister_performance(policy
->cpu
);
534 static void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
538 cpu
= all_cpu_data
[policy
->cpu
];
539 if (!cpu
->valid_pss_table
)
542 acpi_processor_unregister_performance(policy
->cpu
);
545 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
549 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
554 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
555 int deadband
, int integral
) {
556 pid
->setpoint
= int_tofp(setpoint
);
557 pid
->deadband
= int_tofp(deadband
);
558 pid
->integral
= int_tofp(integral
);
559 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
562 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
564 pid
->p_gain
= div_fp(percent
, 100);
567 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
569 pid
->i_gain
= div_fp(percent
, 100);
572 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
574 pid
->d_gain
= div_fp(percent
, 100);
577 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
580 int32_t pterm
, dterm
, fp_error
;
581 int32_t integral_limit
;
583 fp_error
= pid
->setpoint
- busy
;
585 if (abs(fp_error
) <= pid
->deadband
)
588 pterm
= mul_fp(pid
->p_gain
, fp_error
);
590 pid
->integral
+= fp_error
;
593 * We limit the integral here so that it will never
594 * get higher than 30. This prevents it from becoming
595 * too large an input over long periods of time and allows
596 * it to get factored out sooner.
598 * The value of 30 was chosen through experimentation.
600 integral_limit
= int_tofp(30);
601 if (pid
->integral
> integral_limit
)
602 pid
->integral
= integral_limit
;
603 if (pid
->integral
< -integral_limit
)
604 pid
->integral
= -integral_limit
;
606 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
607 pid
->last_err
= fp_error
;
609 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
610 result
= result
+ (1 << (FRAC_BITS
-1));
611 return (signed int)fp_toint(result
);
614 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
616 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
617 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
618 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
620 pid_reset(&cpu
->pid
, pid_params
.setpoint
, 100, pid_params
.deadband
, 0);
623 static inline void intel_pstate_reset_all_pid(void)
627 for_each_online_cpu(cpu
) {
628 if (all_cpu_data
[cpu
])
629 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
633 static inline void update_turbo_state(void)
638 cpu
= all_cpu_data
[0];
639 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
640 limits
->turbo_disabled
=
641 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
642 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
645 static s16
intel_pstate_get_epb(struct cpudata
*cpu_data
)
650 if (!static_cpu_has(X86_FEATURE_EPB
))
653 ret
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
657 return (s16
)(epb
& 0x0f);
660 static s16
intel_pstate_get_epp(struct cpudata
*cpu_data
, u64 hwp_req_data
)
664 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
666 * When hwp_req_data is 0, means that caller didn't read
667 * MSR_HWP_REQUEST, so need to read and get EPP.
670 epp
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
,
675 epp
= (hwp_req_data
>> 24) & 0xff;
677 /* When there is no EPP present, HWP uses EPB settings */
678 epp
= intel_pstate_get_epb(cpu_data
);
684 static int intel_pstate_set_epb(int cpu
, s16 pref
)
689 if (!static_cpu_has(X86_FEATURE_EPB
))
692 ret
= rdmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
696 epb
= (epb
& ~0x0f) | pref
;
697 wrmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, epb
);
703 * EPP/EPB display strings corresponding to EPP index in the
704 * energy_perf_strings[]
706 *-------------------------------------
709 * 2 balance_performance
713 static const char * const energy_perf_strings
[] = {
716 "balance_performance",
722 static int intel_pstate_get_energy_pref_index(struct cpudata
*cpu_data
)
727 epp
= intel_pstate_get_epp(cpu_data
, 0);
731 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
734 * 0x00-0x3F : Performance
735 * 0x40-0x7F : Balance performance
736 * 0x80-0xBF : Balance power
738 * The EPP is a 8 bit value, but our ranges restrict the
739 * value which can be set. Here only using top two bits
742 index
= (epp
>> 6) + 1;
743 } else if (static_cpu_has(X86_FEATURE_EPB
)) {
746 * 0x00-0x03 : Performance
747 * 0x04-0x07 : Balance performance
748 * 0x08-0x0B : Balance power
750 * The EPB is a 4 bit value, but our ranges restrict the
751 * value which can be set. Here only using top two bits
754 index
= (epp
>> 2) + 1;
760 static int intel_pstate_set_energy_pref_index(struct cpudata
*cpu_data
,
767 epp
= cpu_data
->epp_default
;
769 mutex_lock(&intel_pstate_limits_lock
);
771 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
774 ret
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
, &value
);
778 value
&= ~GENMASK_ULL(31, 24);
781 * If epp is not default, convert from index into
782 * energy_perf_strings to epp value, by shifting 6
783 * bits left to use only top two bits in epp.
784 * The resultant epp need to shifted by 24 bits to
785 * epp position in MSR_HWP_REQUEST.
788 epp
= (pref_index
- 1) << 6;
790 value
|= (u64
)epp
<< 24;
791 ret
= wrmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
, value
);
794 epp
= (pref_index
- 1) << 2;
795 ret
= intel_pstate_set_epb(cpu_data
->cpu
, epp
);
798 mutex_unlock(&intel_pstate_limits_lock
);
803 static ssize_t
show_energy_performance_available_preferences(
804 struct cpufreq_policy
*policy
, char *buf
)
809 while (energy_perf_strings
[i
] != NULL
)
810 ret
+= sprintf(&buf
[ret
], "%s ", energy_perf_strings
[i
++]);
812 ret
+= sprintf(&buf
[ret
], "\n");
817 cpufreq_freq_attr_ro(energy_performance_available_preferences
);
819 static ssize_t
store_energy_performance_preference(
820 struct cpufreq_policy
*policy
, const char *buf
, size_t count
)
822 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
823 char str_preference
[21];
826 ret
= sscanf(buf
, "%20s", str_preference
);
830 while (energy_perf_strings
[i
] != NULL
) {
831 if (!strcmp(str_preference
, energy_perf_strings
[i
])) {
832 intel_pstate_set_energy_pref_index(cpu_data
, i
);
841 static ssize_t
show_energy_performance_preference(
842 struct cpufreq_policy
*policy
, char *buf
)
844 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
847 preference
= intel_pstate_get_energy_pref_index(cpu_data
);
851 return sprintf(buf
, "%s\n", energy_perf_strings
[preference
]);
854 cpufreq_freq_attr_rw(energy_performance_preference
);
856 static struct freq_attr
*hwp_cpufreq_attrs
[] = {
857 &energy_performance_preference
,
858 &energy_performance_available_preferences
,
862 static void intel_pstate_hwp_set(struct cpufreq_policy
*policy
)
864 int min
, hw_min
, max
, hw_max
, cpu
, range
, adj_range
;
865 struct perf_limits
*perf_limits
= limits
;
868 for_each_cpu(cpu
, policy
->cpus
) {
869 int max_perf_pct
, min_perf_pct
;
870 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
874 perf_limits
= all_cpu_data
[cpu
]->perf_limits
;
876 rdmsrl_on_cpu(cpu
, MSR_HWP_CAPABILITIES
, &cap
);
877 hw_min
= HWP_LOWEST_PERF(cap
);
878 if (limits
->no_turbo
)
879 hw_max
= HWP_GUARANTEED_PERF(cap
);
881 hw_max
= HWP_HIGHEST_PERF(cap
);
882 range
= hw_max
- hw_min
;
884 max_perf_pct
= perf_limits
->max_perf_pct
;
885 min_perf_pct
= perf_limits
->min_perf_pct
;
887 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
888 adj_range
= min_perf_pct
* range
/ 100;
889 min
= hw_min
+ adj_range
;
890 value
&= ~HWP_MIN_PERF(~0L);
891 value
|= HWP_MIN_PERF(min
);
893 adj_range
= max_perf_pct
* range
/ 100;
894 max
= hw_min
+ adj_range
;
896 value
&= ~HWP_MAX_PERF(~0L);
897 value
|= HWP_MAX_PERF(max
);
899 if (cpu_data
->epp_policy
== cpu_data
->policy
)
902 cpu_data
->epp_policy
= cpu_data
->policy
;
904 if (cpu_data
->epp_saved
>= 0) {
905 epp
= cpu_data
->epp_saved
;
906 cpu_data
->epp_saved
= -EINVAL
;
910 if (cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
911 epp
= intel_pstate_get_epp(cpu_data
, value
);
912 cpu_data
->epp_powersave
= epp
;
913 /* If EPP read was failed, then don't try to write */
920 /* skip setting EPP, when saved value is invalid */
921 if (cpu_data
->epp_powersave
< 0)
925 * No need to restore EPP when it is not zero. This
927 * - Policy is not changed
928 * - user has manually changed
929 * - Error reading EPB
931 epp
= intel_pstate_get_epp(cpu_data
, value
);
935 epp
= cpu_data
->epp_powersave
;
938 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
939 value
&= ~GENMASK_ULL(31, 24);
940 value
|= (u64
)epp
<< 24;
942 intel_pstate_set_epb(cpu
, epp
);
945 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
949 static int intel_pstate_hwp_set_policy(struct cpufreq_policy
*policy
)
952 intel_pstate_hwp_set(policy
);
957 static int intel_pstate_hwp_save_state(struct cpufreq_policy
*policy
)
959 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
964 cpu_data
->epp_saved
= intel_pstate_get_epp(cpu_data
, 0);
969 static int intel_pstate_resume(struct cpufreq_policy
*policy
)
976 mutex_lock(&intel_pstate_limits_lock
);
978 all_cpu_data
[policy
->cpu
]->epp_policy
= 0;
980 ret
= intel_pstate_hwp_set_policy(policy
);
982 mutex_unlock(&intel_pstate_limits_lock
);
987 static void intel_pstate_update_policies(void)
991 for_each_possible_cpu(cpu
)
992 cpufreq_update_policy(cpu
);
995 /************************** debugfs begin ************************/
996 static int pid_param_set(void *data
, u64 val
)
999 intel_pstate_reset_all_pid();
1003 static int pid_param_get(void *data
, u64
*val
)
1005 *val
= *(u32
*)data
;
1008 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
, pid_param_set
, "%llu\n");
1010 static struct dentry
*debugfs_parent
;
1015 struct dentry
*dentry
;
1018 static struct pid_param pid_files
[] = {
1019 {"sample_rate_ms", &pid_params
.sample_rate_ms
, },
1020 {"d_gain_pct", &pid_params
.d_gain_pct
, },
1021 {"i_gain_pct", &pid_params
.i_gain_pct
, },
1022 {"deadband", &pid_params
.deadband
, },
1023 {"setpoint", &pid_params
.setpoint
, },
1024 {"p_gain_pct", &pid_params
.p_gain_pct
, },
1028 static void intel_pstate_debug_expose_params(void)
1032 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
1033 if (IS_ERR_OR_NULL(debugfs_parent
))
1036 for (i
= 0; pid_files
[i
].name
; i
++) {
1037 struct dentry
*dentry
;
1039 dentry
= debugfs_create_file(pid_files
[i
].name
, 0660,
1040 debugfs_parent
, pid_files
[i
].value
,
1042 if (!IS_ERR(dentry
))
1043 pid_files
[i
].dentry
= dentry
;
1047 static void intel_pstate_debug_hide_params(void)
1051 if (IS_ERR_OR_NULL(debugfs_parent
))
1054 for (i
= 0; pid_files
[i
].name
; i
++) {
1055 debugfs_remove(pid_files
[i
].dentry
);
1056 pid_files
[i
].dentry
= NULL
;
1059 debugfs_remove(debugfs_parent
);
1060 debugfs_parent
= NULL
;
1063 /************************** debugfs end ************************/
1065 /************************** sysfs begin ************************/
1066 #define show_one(file_name, object) \
1067 static ssize_t show_##file_name \
1068 (struct kobject *kobj, struct attribute *attr, char *buf) \
1070 return sprintf(buf, "%u\n", limits->object); \
1073 static ssize_t
intel_pstate_show_status(char *buf
);
1074 static int intel_pstate_update_status(const char *buf
, size_t size
);
1076 static ssize_t
show_status(struct kobject
*kobj
,
1077 struct attribute
*attr
, char *buf
)
1081 mutex_lock(&intel_pstate_driver_lock
);
1082 ret
= intel_pstate_show_status(buf
);
1083 mutex_unlock(&intel_pstate_driver_lock
);
1088 static ssize_t
store_status(struct kobject
*a
, struct attribute
*b
,
1089 const char *buf
, size_t count
)
1091 char *p
= memchr(buf
, '\n', count
);
1094 mutex_lock(&intel_pstate_driver_lock
);
1095 ret
= intel_pstate_update_status(buf
, p
? p
- buf
: count
);
1096 mutex_unlock(&intel_pstate_driver_lock
);
1098 return ret
< 0 ? ret
: count
;
1101 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
1102 struct attribute
*attr
, char *buf
)
1104 struct cpudata
*cpu
;
1105 int total
, no_turbo
, turbo_pct
;
1108 mutex_lock(&intel_pstate_driver_lock
);
1110 if (!driver_registered
) {
1111 mutex_unlock(&intel_pstate_driver_lock
);
1115 cpu
= all_cpu_data
[0];
1117 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
1118 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
1119 turbo_fp
= div_fp(no_turbo
, total
);
1120 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
1122 mutex_unlock(&intel_pstate_driver_lock
);
1124 return sprintf(buf
, "%u\n", turbo_pct
);
1127 static ssize_t
show_num_pstates(struct kobject
*kobj
,
1128 struct attribute
*attr
, char *buf
)
1130 struct cpudata
*cpu
;
1133 mutex_lock(&intel_pstate_driver_lock
);
1135 if (!driver_registered
) {
1136 mutex_unlock(&intel_pstate_driver_lock
);
1140 cpu
= all_cpu_data
[0];
1141 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
1143 mutex_unlock(&intel_pstate_driver_lock
);
1145 return sprintf(buf
, "%u\n", total
);
1148 static ssize_t
show_no_turbo(struct kobject
*kobj
,
1149 struct attribute
*attr
, char *buf
)
1153 mutex_lock(&intel_pstate_driver_lock
);
1155 if (!driver_registered
) {
1156 mutex_unlock(&intel_pstate_driver_lock
);
1160 update_turbo_state();
1161 if (limits
->turbo_disabled
)
1162 ret
= sprintf(buf
, "%u\n", limits
->turbo_disabled
);
1164 ret
= sprintf(buf
, "%u\n", limits
->no_turbo
);
1166 mutex_unlock(&intel_pstate_driver_lock
);
1171 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
1172 const char *buf
, size_t count
)
1177 ret
= sscanf(buf
, "%u", &input
);
1181 mutex_lock(&intel_pstate_driver_lock
);
1183 if (!driver_registered
) {
1184 mutex_unlock(&intel_pstate_driver_lock
);
1188 mutex_lock(&intel_pstate_limits_lock
);
1190 update_turbo_state();
1191 if (limits
->turbo_disabled
) {
1192 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1193 mutex_unlock(&intel_pstate_limits_lock
);
1194 mutex_unlock(&intel_pstate_driver_lock
);
1198 limits
->no_turbo
= clamp_t(int, input
, 0, 1);
1200 mutex_unlock(&intel_pstate_limits_lock
);
1202 intel_pstate_update_policies();
1204 mutex_unlock(&intel_pstate_driver_lock
);
1209 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
1210 const char *buf
, size_t count
)
1215 ret
= sscanf(buf
, "%u", &input
);
1219 mutex_lock(&intel_pstate_driver_lock
);
1221 if (!driver_registered
) {
1222 mutex_unlock(&intel_pstate_driver_lock
);
1226 mutex_lock(&intel_pstate_limits_lock
);
1228 limits
->max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
1229 limits
->max_perf_pct
= min(limits
->max_policy_pct
,
1230 limits
->max_sysfs_pct
);
1231 limits
->max_perf_pct
= max(limits
->min_policy_pct
,
1232 limits
->max_perf_pct
);
1233 limits
->max_perf_pct
= max(limits
->min_perf_pct
,
1234 limits
->max_perf_pct
);
1235 limits
->max_perf
= div_ext_fp(limits
->max_perf_pct
, 100);
1237 mutex_unlock(&intel_pstate_limits_lock
);
1239 intel_pstate_update_policies();
1241 mutex_unlock(&intel_pstate_driver_lock
);
1246 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
1247 const char *buf
, size_t count
)
1252 ret
= sscanf(buf
, "%u", &input
);
1256 mutex_lock(&intel_pstate_driver_lock
);
1258 if (!driver_registered
) {
1259 mutex_unlock(&intel_pstate_driver_lock
);
1263 mutex_lock(&intel_pstate_limits_lock
);
1265 limits
->min_sysfs_pct
= clamp_t(int, input
, 0 , 100);
1266 limits
->min_perf_pct
= max(limits
->min_policy_pct
,
1267 limits
->min_sysfs_pct
);
1268 limits
->min_perf_pct
= min(limits
->max_policy_pct
,
1269 limits
->min_perf_pct
);
1270 limits
->min_perf_pct
= min(limits
->max_perf_pct
,
1271 limits
->min_perf_pct
);
1272 limits
->min_perf
= div_ext_fp(limits
->min_perf_pct
, 100);
1274 mutex_unlock(&intel_pstate_limits_lock
);
1276 intel_pstate_update_policies();
1278 mutex_unlock(&intel_pstate_driver_lock
);
1283 show_one(max_perf_pct
, max_perf_pct
);
1284 show_one(min_perf_pct
, min_perf_pct
);
1286 define_one_global_rw(status
);
1287 define_one_global_rw(no_turbo
);
1288 define_one_global_rw(max_perf_pct
);
1289 define_one_global_rw(min_perf_pct
);
1290 define_one_global_ro(turbo_pct
);
1291 define_one_global_ro(num_pstates
);
1293 static struct attribute
*intel_pstate_attributes
[] = {
1301 static struct attribute_group intel_pstate_attr_group
= {
1302 .attrs
= intel_pstate_attributes
,
1305 static void __init
intel_pstate_sysfs_expose_params(void)
1307 struct kobject
*intel_pstate_kobject
;
1310 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
1311 &cpu_subsys
.dev_root
->kobj
);
1312 if (WARN_ON(!intel_pstate_kobject
))
1315 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
1320 * If per cpu limits are enforced there are no global limits, so
1321 * return without creating max/min_perf_pct attributes
1326 rc
= sysfs_create_file(intel_pstate_kobject
, &max_perf_pct
.attr
);
1329 rc
= sysfs_create_file(intel_pstate_kobject
, &min_perf_pct
.attr
);
1333 /************************** sysfs end ************************/
1335 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
)
1337 /* First disable HWP notification interrupt as we don't process them */
1338 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY
))
1339 wrmsrl_on_cpu(cpudata
->cpu
, MSR_HWP_INTERRUPT
, 0x00);
1341 wrmsrl_on_cpu(cpudata
->cpu
, MSR_PM_ENABLE
, 0x1);
1342 cpudata
->epp_policy
= 0;
1343 if (cpudata
->epp_default
== -EINVAL
)
1344 cpudata
->epp_default
= intel_pstate_get_epp(cpudata
, 0);
1347 #define MSR_IA32_POWER_CTL_BIT_EE 19
1349 /* Disable energy efficiency optimization */
1350 static void intel_pstate_disable_ee(int cpu
)
1355 ret
= rdmsrl_on_cpu(cpu
, MSR_IA32_POWER_CTL
, &power_ctl
);
1359 if (!(power_ctl
& BIT(MSR_IA32_POWER_CTL_BIT_EE
))) {
1360 pr_info("Disabling energy efficiency optimization\n");
1361 power_ctl
|= BIT(MSR_IA32_POWER_CTL_BIT_EE
);
1362 wrmsrl_on_cpu(cpu
, MSR_IA32_POWER_CTL
, power_ctl
);
1366 static int atom_get_min_pstate(void)
1370 rdmsrl(ATOM_RATIOS
, value
);
1371 return (value
>> 8) & 0x7F;
1374 static int atom_get_max_pstate(void)
1378 rdmsrl(ATOM_RATIOS
, value
);
1379 return (value
>> 16) & 0x7F;
1382 static int atom_get_turbo_pstate(void)
1386 rdmsrl(ATOM_TURBO_RATIOS
, value
);
1387 return value
& 0x7F;
1390 static u64
atom_get_val(struct cpudata
*cpudata
, int pstate
)
1396 val
= (u64
)pstate
<< 8;
1397 if (limits
->no_turbo
&& !limits
->turbo_disabled
)
1398 val
|= (u64
)1 << 32;
1400 vid_fp
= cpudata
->vid
.min
+ mul_fp(
1401 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
1402 cpudata
->vid
.ratio
);
1404 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
1405 vid
= ceiling_fp(vid_fp
);
1407 if (pstate
> cpudata
->pstate
.max_pstate
)
1408 vid
= cpudata
->vid
.turbo
;
1413 static int silvermont_get_scaling(void)
1417 /* Defined in Table 35-6 from SDM (Sept 2015) */
1418 static int silvermont_freq_table
[] = {
1419 83300, 100000, 133300, 116700, 80000};
1421 rdmsrl(MSR_FSB_FREQ
, value
);
1425 return silvermont_freq_table
[i
];
1428 static int airmont_get_scaling(void)
1432 /* Defined in Table 35-10 from SDM (Sept 2015) */
1433 static int airmont_freq_table
[] = {
1434 83300, 100000, 133300, 116700, 80000,
1435 93300, 90000, 88900, 87500};
1437 rdmsrl(MSR_FSB_FREQ
, value
);
1441 return airmont_freq_table
[i
];
1444 static void atom_get_vid(struct cpudata
*cpudata
)
1448 rdmsrl(ATOM_VIDS
, value
);
1449 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
1450 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
1451 cpudata
->vid
.ratio
= div_fp(
1452 cpudata
->vid
.max
- cpudata
->vid
.min
,
1453 int_tofp(cpudata
->pstate
.max_pstate
-
1454 cpudata
->pstate
.min_pstate
));
1456 rdmsrl(ATOM_TURBO_VIDS
, value
);
1457 cpudata
->vid
.turbo
= value
& 0x7f;
1460 static int core_get_min_pstate(void)
1464 rdmsrl(MSR_PLATFORM_INFO
, value
);
1465 return (value
>> 40) & 0xFF;
1468 static int core_get_max_pstate_physical(void)
1472 rdmsrl(MSR_PLATFORM_INFO
, value
);
1473 return (value
>> 8) & 0xFF;
1476 static int core_get_tdp_ratio(u64 plat_info
)
1478 /* Check how many TDP levels present */
1479 if (plat_info
& 0x600000000) {
1485 /* Get the TDP level (0, 1, 2) to get ratios */
1486 err
= rdmsrl_safe(MSR_CONFIG_TDP_CONTROL
, &tdp_ctrl
);
1490 /* TDP MSR are continuous starting at 0x648 */
1491 tdp_msr
= MSR_CONFIG_TDP_NOMINAL
+ (tdp_ctrl
& 0x03);
1492 err
= rdmsrl_safe(tdp_msr
, &tdp_ratio
);
1496 /* For level 1 and 2, bits[23:16] contain the ratio */
1497 if (tdp_ctrl
& 0x03)
1500 tdp_ratio
&= 0xff; /* ratios are only 8 bits long */
1501 pr_debug("tdp_ratio %x\n", (int)tdp_ratio
);
1503 return (int)tdp_ratio
;
1509 static int core_get_max_pstate(void)
1517 rdmsrl(MSR_PLATFORM_INFO
, plat_info
);
1518 max_pstate
= (plat_info
>> 8) & 0xFF;
1520 tdp_ratio
= core_get_tdp_ratio(plat_info
);
1525 /* Turbo activation ratio is not used on HWP platforms */
1529 err
= rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO
, &tar
);
1533 /* Do some sanity checking for safety */
1534 tar_levels
= tar
& 0xff;
1535 if (tdp_ratio
- 1 == tar_levels
) {
1536 max_pstate
= tar_levels
;
1537 pr_debug("max_pstate=TAC %x\n", max_pstate
);
1544 static int core_get_turbo_pstate(void)
1549 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1550 nont
= core_get_max_pstate();
1551 ret
= (value
) & 255;
1557 static inline int core_get_scaling(void)
1562 static u64
core_get_val(struct cpudata
*cpudata
, int pstate
)
1566 val
= (u64
)pstate
<< 8;
1567 if (limits
->no_turbo
&& !limits
->turbo_disabled
)
1568 val
|= (u64
)1 << 32;
1573 static int knl_get_turbo_pstate(void)
1578 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1579 nont
= core_get_max_pstate();
1580 ret
= (((value
) >> 8) & 0xFF);
1586 static struct cpu_defaults core_params
= {
1588 .sample_rate_ms
= 10,
1596 .get_max
= core_get_max_pstate
,
1597 .get_max_physical
= core_get_max_pstate_physical
,
1598 .get_min
= core_get_min_pstate
,
1599 .get_turbo
= core_get_turbo_pstate
,
1600 .get_scaling
= core_get_scaling
,
1601 .get_val
= core_get_val
,
1602 .get_target_pstate
= get_target_pstate_use_performance
,
1606 static const struct cpu_defaults silvermont_params
= {
1608 .sample_rate_ms
= 10,
1616 .get_max
= atom_get_max_pstate
,
1617 .get_max_physical
= atom_get_max_pstate
,
1618 .get_min
= atom_get_min_pstate
,
1619 .get_turbo
= atom_get_turbo_pstate
,
1620 .get_val
= atom_get_val
,
1621 .get_scaling
= silvermont_get_scaling
,
1622 .get_vid
= atom_get_vid
,
1623 .get_target_pstate
= get_target_pstate_use_cpu_load
,
1627 static const struct cpu_defaults airmont_params
= {
1629 .sample_rate_ms
= 10,
1637 .get_max
= atom_get_max_pstate
,
1638 .get_max_physical
= atom_get_max_pstate
,
1639 .get_min
= atom_get_min_pstate
,
1640 .get_turbo
= atom_get_turbo_pstate
,
1641 .get_val
= atom_get_val
,
1642 .get_scaling
= airmont_get_scaling
,
1643 .get_vid
= atom_get_vid
,
1644 .get_target_pstate
= get_target_pstate_use_cpu_load
,
1648 static const struct cpu_defaults knl_params
= {
1650 .sample_rate_ms
= 10,
1658 .get_max
= core_get_max_pstate
,
1659 .get_max_physical
= core_get_max_pstate_physical
,
1660 .get_min
= core_get_min_pstate
,
1661 .get_turbo
= knl_get_turbo_pstate
,
1662 .get_scaling
= core_get_scaling
,
1663 .get_val
= core_get_val
,
1664 .get_target_pstate
= get_target_pstate_use_performance
,
1668 static const struct cpu_defaults bxt_params
= {
1670 .sample_rate_ms
= 10,
1678 .get_max
= core_get_max_pstate
,
1679 .get_max_physical
= core_get_max_pstate_physical
,
1680 .get_min
= core_get_min_pstate
,
1681 .get_turbo
= core_get_turbo_pstate
,
1682 .get_scaling
= core_get_scaling
,
1683 .get_val
= core_get_val
,
1684 .get_target_pstate
= get_target_pstate_use_cpu_load
,
1688 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
1690 int max_perf
= cpu
->pstate
.turbo_pstate
;
1693 struct perf_limits
*perf_limits
= limits
;
1695 if (limits
->no_turbo
|| limits
->turbo_disabled
)
1696 max_perf
= cpu
->pstate
.max_pstate
;
1699 perf_limits
= cpu
->perf_limits
;
1702 * performance can be limited by user through sysfs, by cpufreq
1703 * policy, or by cpu specific default values determined through
1706 max_perf_adj
= fp_ext_toint(max_perf
* perf_limits
->max_perf
);
1707 *max
= clamp_t(int, max_perf_adj
,
1708 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
1710 min_perf
= fp_ext_toint(max_perf
* perf_limits
->min_perf
);
1711 *min
= clamp_t(int, min_perf
, cpu
->pstate
.min_pstate
, max_perf
);
1714 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
1716 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
1717 cpu
->pstate
.current_pstate
= pstate
;
1719 * Generally, there is no guarantee that this code will always run on
1720 * the CPU being updated, so force the register update to run on the
1723 wrmsrl_on_cpu(cpu
->cpu
, MSR_IA32_PERF_CTL
,
1724 pstate_funcs
.get_val(cpu
, pstate
));
1727 static void intel_pstate_set_min_pstate(struct cpudata
*cpu
)
1729 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
1732 static void intel_pstate_max_within_limits(struct cpudata
*cpu
)
1734 int min_pstate
, max_pstate
;
1736 update_turbo_state();
1737 intel_pstate_get_min_max(cpu
, &min_pstate
, &max_pstate
);
1738 intel_pstate_set_pstate(cpu
, max_pstate
);
1741 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
1743 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
1744 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
1745 cpu
->pstate
.max_pstate_physical
= pstate_funcs
.get_max_physical();
1746 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
1747 cpu
->pstate
.scaling
= pstate_funcs
.get_scaling();
1748 cpu
->pstate
.max_freq
= cpu
->pstate
.max_pstate
* cpu
->pstate
.scaling
;
1749 cpu
->pstate
.turbo_freq
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1751 if (pstate_funcs
.get_vid
)
1752 pstate_funcs
.get_vid(cpu
);
1754 intel_pstate_set_min_pstate(cpu
);
1757 static inline void intel_pstate_calc_avg_perf(struct cpudata
*cpu
)
1759 struct sample
*sample
= &cpu
->sample
;
1761 sample
->core_avg_perf
= div_ext_fp(sample
->aperf
, sample
->mperf
);
1764 static inline bool intel_pstate_sample(struct cpudata
*cpu
, u64 time
)
1767 unsigned long flags
;
1770 local_irq_save(flags
);
1771 rdmsrl(MSR_IA32_APERF
, aperf
);
1772 rdmsrl(MSR_IA32_MPERF
, mperf
);
1774 if (cpu
->prev_mperf
== mperf
|| cpu
->prev_tsc
== tsc
) {
1775 local_irq_restore(flags
);
1778 local_irq_restore(flags
);
1780 cpu
->last_sample_time
= cpu
->sample
.time
;
1781 cpu
->sample
.time
= time
;
1782 cpu
->sample
.aperf
= aperf
;
1783 cpu
->sample
.mperf
= mperf
;
1784 cpu
->sample
.tsc
= tsc
;
1785 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
1786 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
1787 cpu
->sample
.tsc
-= cpu
->prev_tsc
;
1789 cpu
->prev_aperf
= aperf
;
1790 cpu
->prev_mperf
= mperf
;
1791 cpu
->prev_tsc
= tsc
;
1793 * First time this function is invoked in a given cycle, all of the
1794 * previous sample data fields are equal to zero or stale and they must
1795 * be populated with meaningful numbers for things to work, so assume
1796 * that sample.time will always be reset before setting the utilization
1797 * update hook and make the caller skip the sample then.
1799 return !!cpu
->last_sample_time
;
1802 static inline int32_t get_avg_frequency(struct cpudata
*cpu
)
1804 return mul_ext_fp(cpu
->sample
.core_avg_perf
,
1805 cpu
->pstate
.max_pstate_physical
* cpu
->pstate
.scaling
);
1808 static inline int32_t get_avg_pstate(struct cpudata
*cpu
)
1810 return mul_ext_fp(cpu
->pstate
.max_pstate_physical
,
1811 cpu
->sample
.core_avg_perf
);
1814 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata
*cpu
)
1816 struct sample
*sample
= &cpu
->sample
;
1817 int32_t busy_frac
, boost
;
1818 int target
, avg_pstate
;
1820 busy_frac
= div_fp(sample
->mperf
, sample
->tsc
);
1822 boost
= cpu
->iowait_boost
;
1823 cpu
->iowait_boost
>>= 1;
1825 if (busy_frac
< boost
)
1828 sample
->busy_scaled
= busy_frac
* 100;
1830 target
= limits
->no_turbo
|| limits
->turbo_disabled
?
1831 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
1832 target
+= target
>> 2;
1833 target
= mul_fp(target
, busy_frac
);
1834 if (target
< cpu
->pstate
.min_pstate
)
1835 target
= cpu
->pstate
.min_pstate
;
1838 * If the average P-state during the previous cycle was higher than the
1839 * current target, add 50% of the difference to the target to reduce
1840 * possible performance oscillations and offset possible performance
1841 * loss related to moving the workload from one CPU to another within
1844 avg_pstate
= get_avg_pstate(cpu
);
1845 if (avg_pstate
> target
)
1846 target
+= (avg_pstate
- target
) >> 1;
1851 static inline int32_t get_target_pstate_use_performance(struct cpudata
*cpu
)
1853 int32_t perf_scaled
, max_pstate
, current_pstate
, sample_ratio
;
1857 * perf_scaled is the ratio of the average P-state during the last
1858 * sampling period to the P-state requested last time (in percent).
1860 * That measures the system's response to the previous P-state
1863 max_pstate
= cpu
->pstate
.max_pstate_physical
;
1864 current_pstate
= cpu
->pstate
.current_pstate
;
1865 perf_scaled
= mul_ext_fp(cpu
->sample
.core_avg_perf
,
1866 div_fp(100 * max_pstate
, current_pstate
));
1869 * Since our utilization update callback will not run unless we are
1870 * in C0, check if the actual elapsed time is significantly greater (3x)
1871 * than our sample interval. If it is, then we were idle for a long
1872 * enough period of time to adjust our performance metric.
1874 duration_ns
= cpu
->sample
.time
- cpu
->last_sample_time
;
1875 if ((s64
)duration_ns
> pid_params
.sample_rate_ns
* 3) {
1876 sample_ratio
= div_fp(pid_params
.sample_rate_ns
, duration_ns
);
1877 perf_scaled
= mul_fp(perf_scaled
, sample_ratio
);
1879 sample_ratio
= div_fp(100 * cpu
->sample
.mperf
, cpu
->sample
.tsc
);
1880 if (sample_ratio
< int_tofp(1))
1884 cpu
->sample
.busy_scaled
= perf_scaled
;
1885 return cpu
->pstate
.current_pstate
- pid_calc(&cpu
->pid
, perf_scaled
);
1888 static int intel_pstate_prepare_request(struct cpudata
*cpu
, int pstate
)
1890 int max_perf
, min_perf
;
1892 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
1893 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
1894 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
1898 static void intel_pstate_update_pstate(struct cpudata
*cpu
, int pstate
)
1900 pstate
= intel_pstate_prepare_request(cpu
, pstate
);
1901 if (pstate
== cpu
->pstate
.current_pstate
)
1904 cpu
->pstate
.current_pstate
= pstate
;
1905 wrmsrl(MSR_IA32_PERF_CTL
, pstate_funcs
.get_val(cpu
, pstate
));
1908 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
1910 int from
, target_pstate
;
1911 struct sample
*sample
;
1913 from
= cpu
->pstate
.current_pstate
;
1915 target_pstate
= cpu
->policy
== CPUFREQ_POLICY_PERFORMANCE
?
1916 cpu
->pstate
.turbo_pstate
: pstate_funcs
.get_target_pstate(cpu
);
1918 update_turbo_state();
1920 intel_pstate_update_pstate(cpu
, target_pstate
);
1922 sample
= &cpu
->sample
;
1923 trace_pstate_sample(mul_ext_fp(100, sample
->core_avg_perf
),
1924 fp_toint(sample
->busy_scaled
),
1926 cpu
->pstate
.current_pstate
,
1930 get_avg_frequency(cpu
),
1931 fp_toint(cpu
->iowait_boost
* 100));
1934 static void intel_pstate_update_util(struct update_util_data
*data
, u64 time
,
1937 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
1940 if (pstate_funcs
.get_target_pstate
== get_target_pstate_use_cpu_load
) {
1941 if (flags
& SCHED_CPUFREQ_IOWAIT
) {
1942 cpu
->iowait_boost
= int_tofp(1);
1943 } else if (cpu
->iowait_boost
) {
1944 /* Clear iowait_boost if the CPU may have been idle. */
1945 delta_ns
= time
- cpu
->last_update
;
1946 if (delta_ns
> TICK_NSEC
)
1947 cpu
->iowait_boost
= 0;
1949 cpu
->last_update
= time
;
1952 delta_ns
= time
- cpu
->sample
.time
;
1953 if ((s64
)delta_ns
>= pid_params
.sample_rate_ns
) {
1954 bool sample_taken
= intel_pstate_sample(cpu
, time
);
1957 intel_pstate_calc_avg_perf(cpu
);
1959 intel_pstate_adjust_busy_pstate(cpu
);
1964 #define ICPU(model, policy) \
1965 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1966 (unsigned long)&policy }
1968 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
1969 ICPU(INTEL_FAM6_SANDYBRIDGE
, core_params
),
1970 ICPU(INTEL_FAM6_SANDYBRIDGE_X
, core_params
),
1971 ICPU(INTEL_FAM6_ATOM_SILVERMONT1
, silvermont_params
),
1972 ICPU(INTEL_FAM6_IVYBRIDGE
, core_params
),
1973 ICPU(INTEL_FAM6_HASWELL_CORE
, core_params
),
1974 ICPU(INTEL_FAM6_BROADWELL_CORE
, core_params
),
1975 ICPU(INTEL_FAM6_IVYBRIDGE_X
, core_params
),
1976 ICPU(INTEL_FAM6_HASWELL_X
, core_params
),
1977 ICPU(INTEL_FAM6_HASWELL_ULT
, core_params
),
1978 ICPU(INTEL_FAM6_HASWELL_GT3E
, core_params
),
1979 ICPU(INTEL_FAM6_BROADWELL_GT3E
, core_params
),
1980 ICPU(INTEL_FAM6_ATOM_AIRMONT
, airmont_params
),
1981 ICPU(INTEL_FAM6_SKYLAKE_MOBILE
, core_params
),
1982 ICPU(INTEL_FAM6_BROADWELL_X
, core_params
),
1983 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP
, core_params
),
1984 ICPU(INTEL_FAM6_BROADWELL_XEON_D
, core_params
),
1985 ICPU(INTEL_FAM6_XEON_PHI_KNL
, knl_params
),
1986 ICPU(INTEL_FAM6_XEON_PHI_KNM
, knl_params
),
1987 ICPU(INTEL_FAM6_ATOM_GOLDMONT
, bxt_params
),
1990 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
1992 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] __initconst
= {
1993 ICPU(INTEL_FAM6_BROADWELL_XEON_D
, core_params
),
1994 ICPU(INTEL_FAM6_BROADWELL_X
, core_params
),
1995 ICPU(INTEL_FAM6_SKYLAKE_X
, core_params
),
1999 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids
[] = {
2000 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP
, core_params
),
2004 static int intel_pstate_init_cpu(unsigned int cpunum
)
2006 struct cpudata
*cpu
;
2008 cpu
= all_cpu_data
[cpunum
];
2011 unsigned int size
= sizeof(struct cpudata
);
2014 size
+= sizeof(struct perf_limits
);
2016 cpu
= kzalloc(size
, GFP_KERNEL
);
2020 all_cpu_data
[cpunum
] = cpu
;
2022 cpu
->perf_limits
= (struct perf_limits
*)(cpu
+ 1);
2024 cpu
->epp_default
= -EINVAL
;
2025 cpu
->epp_powersave
= -EINVAL
;
2026 cpu
->epp_saved
= -EINVAL
;
2029 cpu
= all_cpu_data
[cpunum
];
2034 const struct x86_cpu_id
*id
;
2036 id
= x86_match_cpu(intel_pstate_cpu_ee_disable_ids
);
2038 intel_pstate_disable_ee(cpunum
);
2040 intel_pstate_hwp_enable(cpu
);
2041 pid_params
.sample_rate_ms
= 50;
2042 pid_params
.sample_rate_ns
= 50 * NSEC_PER_MSEC
;
2045 intel_pstate_get_cpu_pstates(cpu
);
2047 intel_pstate_busy_pid_reset(cpu
);
2049 pr_debug("controlling: cpu %d\n", cpunum
);
2054 static unsigned int intel_pstate_get(unsigned int cpu_num
)
2056 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
2058 return cpu
? get_avg_frequency(cpu
) : 0;
2061 static void intel_pstate_set_update_util_hook(unsigned int cpu_num
)
2063 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
2065 if (cpu
->update_util_set
)
2068 /* Prevent intel_pstate_update_util() from using stale data. */
2069 cpu
->sample
.time
= 0;
2070 cpufreq_add_update_util_hook(cpu_num
, &cpu
->update_util
,
2071 intel_pstate_update_util
);
2072 cpu
->update_util_set
= true;
2075 static void intel_pstate_clear_update_util_hook(unsigned int cpu
)
2077 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
2079 if (!cpu_data
->update_util_set
)
2082 cpufreq_remove_update_util_hook(cpu
);
2083 cpu_data
->update_util_set
= false;
2084 synchronize_sched();
2087 static void intel_pstate_set_performance_limits(struct perf_limits
*limits
)
2089 limits
->no_turbo
= 0;
2090 limits
->turbo_disabled
= 0;
2091 limits
->max_perf_pct
= 100;
2092 limits
->max_perf
= int_ext_tofp(1);
2093 limits
->min_perf_pct
= 100;
2094 limits
->min_perf
= int_ext_tofp(1);
2095 limits
->max_policy_pct
= 100;
2096 limits
->max_sysfs_pct
= 100;
2097 limits
->min_policy_pct
= 0;
2098 limits
->min_sysfs_pct
= 0;
2101 static void intel_pstate_update_perf_limits(struct cpufreq_policy
*policy
,
2102 struct perf_limits
*limits
)
2105 limits
->max_policy_pct
= DIV_ROUND_UP(policy
->max
* 100,
2106 policy
->cpuinfo
.max_freq
);
2107 limits
->max_policy_pct
= clamp_t(int, limits
->max_policy_pct
, 0, 100);
2108 if (policy
->max
== policy
->min
) {
2109 limits
->min_policy_pct
= limits
->max_policy_pct
;
2111 limits
->min_policy_pct
= DIV_ROUND_UP(policy
->min
* 100,
2112 policy
->cpuinfo
.max_freq
);
2113 limits
->min_policy_pct
= clamp_t(int, limits
->min_policy_pct
,
2117 /* Normalize user input to [min_policy_pct, max_policy_pct] */
2118 limits
->min_perf_pct
= max(limits
->min_policy_pct
,
2119 limits
->min_sysfs_pct
);
2120 limits
->min_perf_pct
= min(limits
->max_policy_pct
,
2121 limits
->min_perf_pct
);
2122 limits
->max_perf_pct
= min(limits
->max_policy_pct
,
2123 limits
->max_sysfs_pct
);
2124 limits
->max_perf_pct
= max(limits
->min_policy_pct
,
2125 limits
->max_perf_pct
);
2127 /* Make sure min_perf_pct <= max_perf_pct */
2128 limits
->min_perf_pct
= min(limits
->max_perf_pct
, limits
->min_perf_pct
);
2130 limits
->min_perf
= div_ext_fp(limits
->min_perf_pct
, 100);
2131 limits
->max_perf
= div_ext_fp(limits
->max_perf_pct
, 100);
2132 limits
->max_perf
= round_up(limits
->max_perf
, EXT_FRAC_BITS
);
2133 limits
->min_perf
= round_up(limits
->min_perf
, EXT_FRAC_BITS
);
2135 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy
->cpu
,
2136 limits
->max_perf_pct
, limits
->min_perf_pct
);
2139 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
2141 struct cpudata
*cpu
;
2142 struct perf_limits
*perf_limits
= NULL
;
2144 if (!policy
->cpuinfo
.max_freq
)
2147 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2148 policy
->cpuinfo
.max_freq
, policy
->max
);
2150 cpu
= all_cpu_data
[policy
->cpu
];
2151 cpu
->policy
= policy
->policy
;
2153 if (cpu
->pstate
.max_pstate_physical
> cpu
->pstate
.max_pstate
&&
2154 policy
->max
< policy
->cpuinfo
.max_freq
&&
2155 policy
->max
> cpu
->pstate
.max_pstate
* cpu
->pstate
.scaling
) {
2156 pr_debug("policy->max > max non turbo frequency\n");
2157 policy
->max
= policy
->cpuinfo
.max_freq
;
2161 perf_limits
= cpu
->perf_limits
;
2163 mutex_lock(&intel_pstate_limits_lock
);
2165 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
2167 limits
= &performance_limits
;
2168 perf_limits
= limits
;
2170 if (policy
->max
>= policy
->cpuinfo
.max_freq
&&
2171 !limits
->no_turbo
) {
2172 pr_debug("set performance\n");
2173 intel_pstate_set_performance_limits(perf_limits
);
2177 pr_debug("set powersave\n");
2179 limits
= &powersave_limits
;
2180 perf_limits
= limits
;
2185 intel_pstate_update_perf_limits(policy
, perf_limits
);
2187 if (cpu
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
2189 * NOHZ_FULL CPUs need this as the governor callback may not
2190 * be invoked on them.
2192 intel_pstate_clear_update_util_hook(policy
->cpu
);
2193 intel_pstate_max_within_limits(cpu
);
2196 intel_pstate_set_update_util_hook(policy
->cpu
);
2198 intel_pstate_hwp_set_policy(policy
);
2200 mutex_unlock(&intel_pstate_limits_lock
);
2205 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
2207 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2208 struct perf_limits
*perf_limits
;
2210 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
2211 perf_limits
= &performance_limits
;
2213 perf_limits
= &powersave_limits
;
2215 update_turbo_state();
2216 policy
->cpuinfo
.max_freq
= perf_limits
->turbo_disabled
||
2217 perf_limits
->no_turbo
?
2218 cpu
->pstate
.max_freq
:
2219 cpu
->pstate
.turbo_freq
;
2221 cpufreq_verify_within_cpu_limits(policy
);
2223 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
2224 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
2227 /* When per-CPU limits are used, sysfs limits are not used */
2228 if (!per_cpu_limits
) {
2229 unsigned int max_freq
, min_freq
;
2231 max_freq
= policy
->cpuinfo
.max_freq
*
2232 limits
->max_sysfs_pct
/ 100;
2233 min_freq
= policy
->cpuinfo
.max_freq
*
2234 limits
->min_sysfs_pct
/ 100;
2235 cpufreq_verify_within_limits(policy
, min_freq
, max_freq
);
2241 static void intel_cpufreq_stop_cpu(struct cpufreq_policy
*policy
)
2243 intel_pstate_set_min_pstate(all_cpu_data
[policy
->cpu
]);
2246 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
2248 pr_debug("CPU %d exiting\n", policy
->cpu
);
2250 intel_pstate_clear_update_util_hook(policy
->cpu
);
2252 intel_pstate_hwp_save_state(policy
);
2254 intel_cpufreq_stop_cpu(policy
);
2257 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
2259 intel_pstate_exit_perf_limits(policy
);
2261 policy
->fast_switch_possible
= false;
2266 static int __intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2268 struct cpudata
*cpu
;
2271 rc
= intel_pstate_init_cpu(policy
->cpu
);
2275 cpu
= all_cpu_data
[policy
->cpu
];
2278 * We need sane value in the cpu->perf_limits, so inherit from global
2279 * perf_limits limits, which are seeded with values based on the
2280 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
2283 memcpy(cpu
->perf_limits
, limits
, sizeof(struct perf_limits
));
2285 policy
->min
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
2286 policy
->max
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
2288 /* cpuinfo and default policy values */
2289 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
2290 update_turbo_state();
2291 policy
->cpuinfo
.max_freq
= limits
->turbo_disabled
?
2292 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
2293 policy
->cpuinfo
.max_freq
*= cpu
->pstate
.scaling
;
2295 intel_pstate_init_acpi_perf_limits(policy
);
2296 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
2298 policy
->fast_switch_possible
= true;
2303 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2305 int ret
= __intel_pstate_cpu_init(policy
);
2310 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
2311 if (limits
->min_perf_pct
== 100 && limits
->max_perf_pct
== 100)
2312 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
2314 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
2319 static struct cpufreq_driver intel_pstate
= {
2320 .flags
= CPUFREQ_CONST_LOOPS
,
2321 .verify
= intel_pstate_verify_policy
,
2322 .setpolicy
= intel_pstate_set_policy
,
2323 .suspend
= intel_pstate_hwp_save_state
,
2324 .resume
= intel_pstate_resume
,
2325 .get
= intel_pstate_get
,
2326 .init
= intel_pstate_cpu_init
,
2327 .exit
= intel_pstate_cpu_exit
,
2328 .stop_cpu
= intel_pstate_stop_cpu
,
2329 .name
= "intel_pstate",
2332 static int intel_cpufreq_verify_policy(struct cpufreq_policy
*policy
)
2334 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2335 struct perf_limits
*perf_limits
= limits
;
2337 update_turbo_state();
2338 policy
->cpuinfo
.max_freq
= limits
->turbo_disabled
?
2339 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2341 cpufreq_verify_within_cpu_limits(policy
);
2344 perf_limits
= cpu
->perf_limits
;
2346 mutex_lock(&intel_pstate_limits_lock
);
2348 intel_pstate_update_perf_limits(policy
, perf_limits
);
2350 mutex_unlock(&intel_pstate_limits_lock
);
2355 static unsigned int intel_cpufreq_turbo_update(struct cpudata
*cpu
,
2356 struct cpufreq_policy
*policy
,
2357 unsigned int target_freq
)
2359 unsigned int max_freq
;
2361 update_turbo_state();
2363 max_freq
= limits
->no_turbo
|| limits
->turbo_disabled
?
2364 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2365 policy
->cpuinfo
.max_freq
= max_freq
;
2366 if (policy
->max
> max_freq
)
2367 policy
->max
= max_freq
;
2369 if (target_freq
> max_freq
)
2370 target_freq
= max_freq
;
2375 static int intel_cpufreq_target(struct cpufreq_policy
*policy
,
2376 unsigned int target_freq
,
2377 unsigned int relation
)
2379 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2380 struct cpufreq_freqs freqs
;
2383 freqs
.old
= policy
->cur
;
2384 freqs
.new = intel_cpufreq_turbo_update(cpu
, policy
, target_freq
);
2386 cpufreq_freq_transition_begin(policy
, &freqs
);
2388 case CPUFREQ_RELATION_L
:
2389 target_pstate
= DIV_ROUND_UP(freqs
.new, cpu
->pstate
.scaling
);
2391 case CPUFREQ_RELATION_H
:
2392 target_pstate
= freqs
.new / cpu
->pstate
.scaling
;
2395 target_pstate
= DIV_ROUND_CLOSEST(freqs
.new, cpu
->pstate
.scaling
);
2398 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
2399 if (target_pstate
!= cpu
->pstate
.current_pstate
) {
2400 cpu
->pstate
.current_pstate
= target_pstate
;
2401 wrmsrl_on_cpu(policy
->cpu
, MSR_IA32_PERF_CTL
,
2402 pstate_funcs
.get_val(cpu
, target_pstate
));
2404 cpufreq_freq_transition_end(policy
, &freqs
, false);
2409 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy
*policy
,
2410 unsigned int target_freq
)
2412 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2415 target_freq
= intel_cpufreq_turbo_update(cpu
, policy
, target_freq
);
2416 target_pstate
= DIV_ROUND_UP(target_freq
, cpu
->pstate
.scaling
);
2417 intel_pstate_update_pstate(cpu
, target_pstate
);
2421 static int intel_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
2423 int ret
= __intel_pstate_cpu_init(policy
);
2428 policy
->cpuinfo
.transition_latency
= INTEL_CPUFREQ_TRANSITION_LATENCY
;
2429 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2430 policy
->cur
= policy
->cpuinfo
.min_freq
;
2435 static struct cpufreq_driver intel_cpufreq
= {
2436 .flags
= CPUFREQ_CONST_LOOPS
,
2437 .verify
= intel_cpufreq_verify_policy
,
2438 .target
= intel_cpufreq_target
,
2439 .fast_switch
= intel_cpufreq_fast_switch
,
2440 .init
= intel_cpufreq_cpu_init
,
2441 .exit
= intel_pstate_cpu_exit
,
2442 .stop_cpu
= intel_cpufreq_stop_cpu
,
2443 .name
= "intel_cpufreq",
2446 static struct cpufreq_driver
*intel_pstate_driver
= &intel_pstate
;
2448 static void intel_pstate_driver_cleanup(void)
2453 for_each_online_cpu(cpu
) {
2454 if (all_cpu_data
[cpu
]) {
2455 if (intel_pstate_driver
== &intel_pstate
)
2456 intel_pstate_clear_update_util_hook(cpu
);
2458 kfree(all_cpu_data
[cpu
]);
2459 all_cpu_data
[cpu
] = NULL
;
2465 static int intel_pstate_register_driver(void)
2469 ret
= cpufreq_register_driver(intel_pstate_driver
);
2471 intel_pstate_driver_cleanup();
2475 mutex_lock(&intel_pstate_limits_lock
);
2476 driver_registered
= true;
2477 mutex_unlock(&intel_pstate_limits_lock
);
2479 if (intel_pstate_driver
== &intel_pstate
&& !hwp_active
&&
2480 pstate_funcs
.get_target_pstate
!= get_target_pstate_use_cpu_load
)
2481 intel_pstate_debug_expose_params();
2486 static int intel_pstate_unregister_driver(void)
2491 if (intel_pstate_driver
== &intel_pstate
&& !hwp_active
&&
2492 pstate_funcs
.get_target_pstate
!= get_target_pstate_use_cpu_load
)
2493 intel_pstate_debug_hide_params();
2495 mutex_lock(&intel_pstate_limits_lock
);
2496 driver_registered
= false;
2497 mutex_unlock(&intel_pstate_limits_lock
);
2499 cpufreq_unregister_driver(intel_pstate_driver
);
2500 intel_pstate_driver_cleanup();
2505 static ssize_t
intel_pstate_show_status(char *buf
)
2507 if (!driver_registered
)
2508 return sprintf(buf
, "off\n");
2510 return sprintf(buf
, "%s\n", intel_pstate_driver
== &intel_pstate
?
2511 "active" : "passive");
2514 static int intel_pstate_update_status(const char *buf
, size_t size
)
2518 if (size
== 3 && !strncmp(buf
, "off", size
))
2519 return driver_registered
?
2520 intel_pstate_unregister_driver() : -EINVAL
;
2522 if (size
== 6 && !strncmp(buf
, "active", size
)) {
2523 if (driver_registered
) {
2524 if (intel_pstate_driver
== &intel_pstate
)
2527 ret
= intel_pstate_unregister_driver();
2532 intel_pstate_driver
= &intel_pstate
;
2533 return intel_pstate_register_driver();
2536 if (size
== 7 && !strncmp(buf
, "passive", size
)) {
2537 if (driver_registered
) {
2538 if (intel_pstate_driver
!= &intel_pstate
)
2541 ret
= intel_pstate_unregister_driver();
2546 intel_pstate_driver
= &intel_cpufreq
;
2547 return intel_pstate_register_driver();
2553 static int no_load __initdata
;
2554 static int no_hwp __initdata
;
2555 static int hwp_only __initdata
;
2556 static unsigned int force_load __initdata
;
2558 static int __init
intel_pstate_msrs_not_valid(void)
2560 if (!pstate_funcs
.get_max() ||
2561 !pstate_funcs
.get_min() ||
2562 !pstate_funcs
.get_turbo())
2568 static void __init
copy_pid_params(struct pstate_adjust_policy
*policy
)
2570 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
2571 pid_params
.sample_rate_ns
= pid_params
.sample_rate_ms
* NSEC_PER_MSEC
;
2572 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
2573 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
2574 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
2575 pid_params
.deadband
= policy
->deadband
;
2576 pid_params
.setpoint
= policy
->setpoint
;
2580 static void intel_pstate_use_acpi_profile(void)
2582 if (acpi_gbl_FADT
.preferred_profile
== PM_MOBILE
)
2583 pstate_funcs
.get_target_pstate
=
2584 get_target_pstate_use_cpu_load
;
2587 static void intel_pstate_use_acpi_profile(void)
2592 static void __init
copy_cpu_funcs(struct pstate_funcs
*funcs
)
2594 pstate_funcs
.get_max
= funcs
->get_max
;
2595 pstate_funcs
.get_max_physical
= funcs
->get_max_physical
;
2596 pstate_funcs
.get_min
= funcs
->get_min
;
2597 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
2598 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
2599 pstate_funcs
.get_val
= funcs
->get_val
;
2600 pstate_funcs
.get_vid
= funcs
->get_vid
;
2601 pstate_funcs
.get_target_pstate
= funcs
->get_target_pstate
;
2603 intel_pstate_use_acpi_profile();
2608 static bool __init
intel_pstate_no_acpi_pss(void)
2612 for_each_possible_cpu(i
) {
2614 union acpi_object
*pss
;
2615 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2616 struct acpi_processor
*pr
= per_cpu(processors
, i
);
2621 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
2622 if (ACPI_FAILURE(status
))
2625 pss
= buffer
.pointer
;
2626 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
2637 static bool __init
intel_pstate_has_acpi_ppc(void)
2641 for_each_possible_cpu(i
) {
2642 struct acpi_processor
*pr
= per_cpu(processors
, i
);
2646 if (acpi_has_method(pr
->handle
, "_PPC"))
2657 struct hw_vendor_info
{
2659 char oem_id
[ACPI_OEM_ID_SIZE
];
2660 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
2664 /* Hardware vendor-specific info that has its own power management modes */
2665 static struct hw_vendor_info vendor_info
[] __initdata
= {
2666 {1, "HP ", "ProLiant", PSS
},
2667 {1, "ORACLE", "X4-2 ", PPC
},
2668 {1, "ORACLE", "X4-2L ", PPC
},
2669 {1, "ORACLE", "X4-2B ", PPC
},
2670 {1, "ORACLE", "X3-2 ", PPC
},
2671 {1, "ORACLE", "X3-2L ", PPC
},
2672 {1, "ORACLE", "X3-2B ", PPC
},
2673 {1, "ORACLE", "X4470M2 ", PPC
},
2674 {1, "ORACLE", "X4270M3 ", PPC
},
2675 {1, "ORACLE", "X4270M2 ", PPC
},
2676 {1, "ORACLE", "X4170M2 ", PPC
},
2677 {1, "ORACLE", "X4170 M3", PPC
},
2678 {1, "ORACLE", "X4275 M3", PPC
},
2679 {1, "ORACLE", "X6-2 ", PPC
},
2680 {1, "ORACLE", "Sudbury ", PPC
},
2684 static bool __init
intel_pstate_platform_pwr_mgmt_exists(void)
2686 struct acpi_table_header hdr
;
2687 struct hw_vendor_info
*v_info
;
2688 const struct x86_cpu_id
*id
;
2691 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
2693 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
2694 if ( misc_pwr
& (1 << 8))
2698 if (acpi_disabled
||
2699 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
2702 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
2703 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
) &&
2704 !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
,
2705 ACPI_OEM_TABLE_ID_SIZE
))
2706 switch (v_info
->oem_pwr_table
) {
2708 return intel_pstate_no_acpi_pss();
2710 return intel_pstate_has_acpi_ppc() &&
2718 static void intel_pstate_request_control_from_smm(void)
2721 * It may be unsafe to request P-states control from SMM if _PPC support
2722 * has not been enabled.
2725 acpi_processor_pstate_control();
2727 #else /* CONFIG_ACPI not enabled */
2728 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2729 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2730 static inline void intel_pstate_request_control_from_smm(void) {}
2731 #endif /* CONFIG_ACPI */
2733 static const struct x86_cpu_id hwp_support_ids
[] __initconst
= {
2734 { X86_VENDOR_INTEL
, 6, X86_MODEL_ANY
, X86_FEATURE_HWP
},
2738 static int __init
intel_pstate_init(void)
2740 const struct x86_cpu_id
*id
;
2741 struct cpu_defaults
*cpu_def
;
2747 if (x86_match_cpu(hwp_support_ids
) && !no_hwp
) {
2748 copy_cpu_funcs(&core_params
.funcs
);
2750 intel_pstate
.attr
= hwp_cpufreq_attrs
;
2751 goto hwp_cpu_matched
;
2754 id
= x86_match_cpu(intel_pstate_cpu_ids
);
2758 cpu_def
= (struct cpu_defaults
*)id
->driver_data
;
2760 copy_pid_params(&cpu_def
->pid_policy
);
2761 copy_cpu_funcs(&cpu_def
->funcs
);
2763 if (intel_pstate_msrs_not_valid())
2768 * The Intel pstate driver will be ignored if the platform
2769 * firmware has its own power management modes.
2771 if (intel_pstate_platform_pwr_mgmt_exists())
2774 if (!hwp_active
&& hwp_only
)
2777 pr_info("Intel P-state driver initializing\n");
2779 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
2783 intel_pstate_request_control_from_smm();
2785 intel_pstate_sysfs_expose_params();
2787 mutex_lock(&intel_pstate_driver_lock
);
2788 rc
= intel_pstate_register_driver();
2789 mutex_unlock(&intel_pstate_driver_lock
);
2794 pr_info("HWP enabled\n");
2798 device_initcall(intel_pstate_init
);
2800 static int __init
intel_pstate_setup(char *str
)
2805 if (!strcmp(str
, "disable")) {
2807 } else if (!strcmp(str
, "passive")) {
2808 pr_info("Passive mode enabled\n");
2809 intel_pstate_driver
= &intel_cpufreq
;
2812 if (!strcmp(str
, "no_hwp")) {
2813 pr_info("HWP disabled\n");
2816 if (!strcmp(str
, "force"))
2818 if (!strcmp(str
, "hwp_only"))
2820 if (!strcmp(str
, "per_cpu_perf_limits"))
2821 per_cpu_limits
= true;
2824 if (!strcmp(str
, "support_acpi_ppc"))
2830 early_param("intel_pstate", intel_pstate_setup
);
2832 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
2833 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
2834 MODULE_LICENSE("GPL");