1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pstate.c: Native P state management for Intel processors
5 * (C) Copyright 2012 Intel Corporation
6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/module.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/slab.h>
18 #include <linux/sched/cpufreq.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/sysfs.h>
23 #include <linux/types.h>
25 #include <linux/acpi.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pm_qos.h>
28 #include <trace/events/power.h>
30 #include <asm/div64.h>
32 #include <asm/cpu_device_id.h>
33 #include <asm/cpufeature.h>
34 #include <asm/intel-family.h>
36 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
38 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
39 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
40 #define INTEL_CPUFREQ_TRANSITION_DELAY 500
43 #include <acpi/processor.h>
44 #include <acpi/cppc_acpi.h>
48 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
49 #define fp_toint(X) ((X) >> FRAC_BITS)
51 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
55 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
56 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
58 static inline int32_t mul_fp(int32_t x
, int32_t y
)
60 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
63 static inline int32_t div_fp(s64 x
, s64 y
)
65 return div64_s64((int64_t)x
<< FRAC_BITS
, y
);
68 static inline int ceiling_fp(int32_t x
)
73 mask
= (1 << FRAC_BITS
) - 1;
79 static inline u64
mul_ext_fp(u64 x
, u64 y
)
81 return (x
* y
) >> EXT_FRAC_BITS
;
84 static inline u64
div_ext_fp(u64 x
, u64 y
)
86 return div64_u64(x
<< EXT_FRAC_BITS
, y
);
90 * struct sample - Store performance sample
91 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
92 * performance during last sample period
93 * @busy_scaled: Scaled busy value which is used to calculate next
94 * P state. This can be different than core_avg_perf
95 * to account for cpu idle period
96 * @aperf: Difference of actual performance frequency clock count
97 * read from APERF MSR between last and current sample
98 * @mperf: Difference of maximum performance frequency clock count
99 * read from MPERF MSR between last and current sample
100 * @tsc: Difference of time stamp counter between last and
102 * @time: Current time from scheduler
104 * This structure is used in the cpudata structure to store performance sample
105 * data for choosing next P State.
108 int32_t core_avg_perf
;
117 * struct pstate_data - Store P state data
118 * @current_pstate: Current requested P state
119 * @min_pstate: Min P state possible for this platform
120 * @max_pstate: Max P state possible for this platform
121 * @max_pstate_physical:This is physical Max P state for a processor
122 * This can be higher than the max_pstate which can
123 * be limited by platform thermal design power limits
124 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
125 * @scaling: Scaling factor between performance and frequency
126 * @turbo_pstate: Max Turbo P state possible for this platform
127 * @min_freq: @min_pstate frequency in cpufreq units
128 * @max_freq: @max_pstate frequency in cpufreq units
129 * @turbo_freq: @turbo_pstate frequency in cpufreq units
131 * Stores the per cpu model P state limits and current P state.
137 int max_pstate_physical
;
138 int perf_ctl_scaling
;
141 unsigned int min_freq
;
142 unsigned int max_freq
;
143 unsigned int turbo_freq
;
147 * struct vid_data - Stores voltage information data
148 * @min: VID data for this platform corresponding to
150 * @max: VID data corresponding to the highest P State.
151 * @turbo: VID data for turbo P state
152 * @ratio: Ratio of (vid max - vid min) /
153 * (max P state - Min P State)
155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
156 * This data is used in Atom platforms, where in addition to target P state,
157 * the voltage data needs to be specified to select next P State.
167 * struct global_params - Global parameters, mostly tunable via sysfs.
168 * @no_turbo: Whether or not to use turbo P-states.
169 * @turbo_disabled: Whether or not turbo P-states are available at all,
170 * based on the MSR_IA32_MISC_ENABLE value and whether or
171 * not the maximum reported turbo P-state is different from
172 * the maximum reported non-turbo one.
173 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
174 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
176 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
179 struct global_params
{
182 bool turbo_disabled_mf
;
188 * struct cpudata - Per CPU instance data storage
189 * @cpu: CPU number for this instance data
190 * @policy: CPUFreq policy value
191 * @update_util: CPUFreq utility callback information
192 * @update_util_set: CPUFreq utility callback is set
193 * @iowait_boost: iowait-related boost fraction
194 * @last_update: Time of the last update.
195 * @pstate: Stores P state limits for this CPU
196 * @vid: Stores VID limits for this CPU
197 * @last_sample_time: Last Sample time
198 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
199 * @prev_aperf: Last APERF value read from APERF MSR
200 * @prev_mperf: Last MPERF value read from MPERF MSR
201 * @prev_tsc: Last timestamp counter (TSC) value
202 * @prev_cummulative_iowait: IO Wait time difference from last and
204 * @sample: Storage for storing last Sample data
205 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
206 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
207 * @acpi_perf_data: Stores ACPI perf information read from _PSS
208 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
209 * @epp_powersave: Last saved HWP energy performance preference
210 * (EPP) or energy performance bias (EPB),
211 * when policy switched to performance
212 * @epp_policy: Last saved policy used to set EPP/EPB
213 * @epp_default: Power on default HWP energy performance
215 * @epp_cached Cached HWP energy-performance preference value
216 * @hwp_req_cached: Cached value of the last HWP Request MSR
217 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
218 * @last_io_update: Last time when IO wake flag was set
219 * @sched_flags: Store scheduler flags for possible cross CPU update
220 * @hwp_boost_min: Last HWP boosted min performance
221 * @suspended: Whether or not the driver has been suspended.
223 * This structure stores per CPU instance data for all CPUs.
229 struct update_util_data update_util
;
230 bool update_util_set
;
232 struct pstate_data pstate
;
236 u64 last_sample_time
;
237 u64 aperf_mperf_shift
;
241 u64 prev_cummulative_iowait
;
242 struct sample sample
;
243 int32_t min_perf_ratio
;
244 int32_t max_perf_ratio
;
246 struct acpi_processor_performance acpi_perf_data
;
247 bool valid_pss_table
;
249 unsigned int iowait_boost
;
257 unsigned int sched_flags
;
262 static struct cpudata
**all_cpu_data
;
265 * struct pstate_funcs - Per CPU model specific callbacks
266 * @get_max: Callback to get maximum non turbo effective P state
267 * @get_max_physical: Callback to get maximum non turbo physical P state
268 * @get_min: Callback to get minimum P state
269 * @get_turbo: Callback to get turbo P state
270 * @get_scaling: Callback to get frequency scaling factor
271 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
272 * @get_val: Callback to convert P state to actual MSR write value
273 * @get_vid: Callback to get VID data for Atom platforms
275 * Core and Atom CPU models have different way to get P State limits. This
276 * structure is used to store those callbacks.
278 struct pstate_funcs
{
279 int (*get_max
)(void);
280 int (*get_max_physical
)(void);
281 int (*get_min
)(void);
282 int (*get_turbo
)(void);
283 int (*get_scaling
)(void);
284 int (*get_aperf_mperf_shift
)(void);
285 u64 (*get_val
)(struct cpudata
*, int pstate
);
286 void (*get_vid
)(struct cpudata
*);
289 static struct pstate_funcs pstate_funcs __read_mostly
;
291 static int hwp_active __read_mostly
;
292 static int hwp_mode_bdw __read_mostly
;
293 static bool per_cpu_limits __read_mostly
;
294 static bool hwp_boost __read_mostly
;
296 static struct cpufreq_driver
*intel_pstate_driver __read_mostly
;
299 static bool acpi_ppc
;
302 static struct global_params global
;
304 static DEFINE_MUTEX(intel_pstate_driver_lock
);
305 static DEFINE_MUTEX(intel_pstate_limits_lock
);
309 static bool intel_pstate_acpi_pm_profile_server(void)
311 if (acpi_gbl_FADT
.preferred_profile
== PM_ENTERPRISE_SERVER
||
312 acpi_gbl_FADT
.preferred_profile
== PM_PERFORMANCE_SERVER
)
318 static bool intel_pstate_get_ppc_enable_status(void)
320 if (intel_pstate_acpi_pm_profile_server())
326 #ifdef CONFIG_ACPI_CPPC_LIB
328 /* The work item is needed to avoid CPU hotplug locking issues */
329 static void intel_pstste_sched_itmt_work_fn(struct work_struct
*work
)
331 sched_set_itmt_support();
334 static DECLARE_WORK(sched_itmt_work
, intel_pstste_sched_itmt_work_fn
);
336 static void intel_pstate_set_itmt_prio(int cpu
)
338 struct cppc_perf_caps cppc_perf
;
339 static u32 max_highest_perf
= 0, min_highest_perf
= U32_MAX
;
342 ret
= cppc_get_perf_caps(cpu
, &cppc_perf
);
347 * The priorities can be set regardless of whether or not
348 * sched_set_itmt_support(true) has been called and it is valid to
349 * update them at any time after it has been called.
351 sched_set_itmt_core_prio(cppc_perf
.highest_perf
, cpu
);
353 if (max_highest_perf
<= min_highest_perf
) {
354 if (cppc_perf
.highest_perf
> max_highest_perf
)
355 max_highest_perf
= cppc_perf
.highest_perf
;
357 if (cppc_perf
.highest_perf
< min_highest_perf
)
358 min_highest_perf
= cppc_perf
.highest_perf
;
360 if (max_highest_perf
> min_highest_perf
) {
362 * This code can be run during CPU online under the
363 * CPU hotplug locks, so sched_set_itmt_support()
364 * cannot be called from here. Queue up a work item
367 schedule_work(&sched_itmt_work
);
372 static int intel_pstate_get_cppc_guaranteed(int cpu
)
374 struct cppc_perf_caps cppc_perf
;
377 ret
= cppc_get_perf_caps(cpu
, &cppc_perf
);
381 if (cppc_perf
.guaranteed_perf
)
382 return cppc_perf
.guaranteed_perf
;
384 return cppc_perf
.nominal_perf
;
387 #else /* CONFIG_ACPI_CPPC_LIB */
388 static inline void intel_pstate_set_itmt_prio(int cpu
)
391 #endif /* CONFIG_ACPI_CPPC_LIB */
393 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
400 intel_pstate_set_itmt_prio(policy
->cpu
);
404 if (!intel_pstate_get_ppc_enable_status())
407 cpu
= all_cpu_data
[policy
->cpu
];
409 ret
= acpi_processor_register_performance(&cpu
->acpi_perf_data
,
415 * Check if the control value in _PSS is for PERF_CTL MSR, which should
416 * guarantee that the states returned by it map to the states in our
419 if (cpu
->acpi_perf_data
.control_register
.space_id
!=
420 ACPI_ADR_SPACE_FIXED_HARDWARE
)
424 * If there is only one entry _PSS, simply ignore _PSS and continue as
425 * usual without taking _PSS into account
427 if (cpu
->acpi_perf_data
.state_count
< 2)
430 pr_debug("CPU%u - ACPI _PSS perf data\n", policy
->cpu
);
431 for (i
= 0; i
< cpu
->acpi_perf_data
.state_count
; i
++) {
432 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
433 (i
== cpu
->acpi_perf_data
.state
? '*' : ' '), i
,
434 (u32
) cpu
->acpi_perf_data
.states
[i
].core_frequency
,
435 (u32
) cpu
->acpi_perf_data
.states
[i
].power
,
436 (u32
) cpu
->acpi_perf_data
.states
[i
].control
);
440 * The _PSS table doesn't contain whole turbo frequency range.
441 * This just contains +1 MHZ above the max non turbo frequency,
442 * with control value corresponding to max turbo ratio. But
443 * when cpufreq set policy is called, it will call with this
444 * max frequency, which will cause a reduced performance as
445 * this driver uses real max turbo frequency as the max
446 * frequency. So correct this frequency in _PSS table to
447 * correct max turbo frequency based on the turbo state.
448 * Also need to convert to MHz as _PSS freq is in MHz.
450 if (!global
.turbo_disabled
)
451 cpu
->acpi_perf_data
.states
[0].core_frequency
=
452 policy
->cpuinfo
.max_freq
/ 1000;
453 cpu
->valid_pss_table
= true;
454 pr_debug("_PPC limits will be enforced\n");
459 cpu
->valid_pss_table
= false;
460 acpi_processor_unregister_performance(policy
->cpu
);
463 static void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
467 cpu
= all_cpu_data
[policy
->cpu
];
468 if (!cpu
->valid_pss_table
)
471 acpi_processor_unregister_performance(policy
->cpu
);
474 static bool intel_pstate_cppc_perf_valid(u32 perf
, struct cppc_perf_caps
*caps
)
476 return perf
&& perf
<= caps
->highest_perf
&& perf
>= caps
->lowest_perf
;
479 static bool intel_pstate_cppc_perf_caps(struct cpudata
*cpu
,
480 struct cppc_perf_caps
*caps
)
482 if (cppc_get_perf_caps(cpu
->cpu
, caps
))
485 return caps
->highest_perf
&& caps
->lowest_perf
<= caps
->highest_perf
;
487 #else /* CONFIG_ACPI */
488 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
492 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
496 static inline bool intel_pstate_acpi_pm_profile_server(void)
500 #endif /* CONFIG_ACPI */
502 #ifndef CONFIG_ACPI_CPPC_LIB
503 static inline int intel_pstate_get_cppc_guaranteed(int cpu
)
507 #endif /* CONFIG_ACPI_CPPC_LIB */
509 static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata
*cpu
)
511 pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu
->cpu
);
513 cpu
->pstate
.scaling
= cpu
->pstate
.perf_ctl_scaling
;
517 * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
520 * On hybrid processors, HWP may expose more performance levels than there are
521 * P-states accessible through the PERF_CTL interface. If that happens, the
522 * scaling factor between HWP performance levels and CPU frequency will be less
523 * than the scaling factor between P-state values and CPU frequency.
525 * In that case, the scaling factor between HWP performance levels and CPU
526 * frequency needs to be determined which can be done with the help of the
527 * observation that certain HWP performance levels should correspond to certain
528 * P-states, like for example the HWP highest performance should correspond
529 * to the maximum turbo P-state of the CPU.
531 static void intel_pstate_hybrid_hwp_calibrate(struct cpudata
*cpu
)
533 int perf_ctl_max_phys
= cpu
->pstate
.max_pstate_physical
;
534 int perf_ctl_scaling
= cpu
->pstate
.perf_ctl_scaling
;
535 int perf_ctl_turbo
= pstate_funcs
.get_turbo();
536 int turbo_freq
= perf_ctl_turbo
* perf_ctl_scaling
;
537 int perf_ctl_max
= pstate_funcs
.get_max();
538 int max_freq
= perf_ctl_max
* perf_ctl_scaling
;
539 int scaling
= INT_MAX
;
542 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu
->cpu
, perf_ctl_max_phys
);
543 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu
->cpu
, perf_ctl_max
);
544 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu
->cpu
, perf_ctl_turbo
);
545 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu
->cpu
, perf_ctl_scaling
);
547 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu
->cpu
, cpu
->pstate
.max_pstate
);
548 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu
->cpu
, cpu
->pstate
.turbo_pstate
);
551 if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB
)) {
552 struct cppc_perf_caps caps
;
554 if (intel_pstate_cppc_perf_caps(cpu
, &caps
)) {
555 if (intel_pstate_cppc_perf_valid(caps
.nominal_perf
, &caps
)) {
556 pr_debug("CPU%d: Using CPPC nominal\n", cpu
->cpu
);
559 * If the CPPC nominal performance is valid, it
560 * can be assumed to correspond to cpu_khz.
562 if (caps
.nominal_perf
== perf_ctl_max_phys
) {
563 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu
);
566 scaling
= DIV_ROUND_UP(cpu_khz
, caps
.nominal_perf
);
567 } else if (intel_pstate_cppc_perf_valid(caps
.guaranteed_perf
, &caps
)) {
568 pr_debug("CPU%d: Using CPPC guaranteed\n", cpu
->cpu
);
571 * If the CPPC guaranteed performance is valid,
572 * it can be assumed to correspond to max_freq.
574 if (caps
.guaranteed_perf
== perf_ctl_max
) {
575 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu
);
578 scaling
= DIV_ROUND_UP(max_freq
, caps
.guaranteed_perf
);
584 * If using the CPPC data to compute the HWP-to-frequency scaling factor
585 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
586 * the assumption that it corresponds to max_freq.
588 if (scaling
> perf_ctl_scaling
) {
589 pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu
->cpu
);
591 if (cpu
->pstate
.max_pstate
== perf_ctl_max
) {
592 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu
);
595 scaling
= DIV_ROUND_UP(max_freq
, cpu
->pstate
.max_pstate
);
596 if (scaling
> perf_ctl_scaling
) {
598 * This should not happen, because it would mean that
599 * the number of HWP perf levels was less than the
600 * number of P-states, so use the PERF_CTL scaling in
603 pr_debug("CPU%d: scaling (%d) out of range\n", cpu
->cpu
,
606 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu
);
612 * If the product of the HWP performance scaling factor obtained above
613 * and the HWP_CAP highest performance is greater than the maximum turbo
614 * frequency corresponding to the pstate_funcs.get_turbo() return value,
615 * the scaling factor is too high, so recompute it so that the HWP_CAP
616 * highest performance corresponds to the maximum turbo frequency.
618 if (turbo_freq
< cpu
->pstate
.turbo_pstate
* scaling
) {
619 pr_debug("CPU%d: scaling too high (%d)\n", cpu
->cpu
, scaling
);
621 cpu
->pstate
.turbo_freq
= turbo_freq
;
622 scaling
= DIV_ROUND_UP(turbo_freq
, cpu
->pstate
.turbo_pstate
);
625 cpu
->pstate
.scaling
= scaling
;
627 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu
->cpu
, scaling
);
629 cpu
->pstate
.max_freq
= rounddown(cpu
->pstate
.max_pstate
* scaling
,
632 freq
= perf_ctl_max_phys
* perf_ctl_scaling
;
633 cpu
->pstate
.max_pstate_physical
= DIV_ROUND_UP(freq
, scaling
);
635 cpu
->pstate
.min_freq
= cpu
->pstate
.min_pstate
* perf_ctl_scaling
;
637 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
638 * the effective range of HWP performance levels.
640 cpu
->pstate
.min_pstate
= DIV_ROUND_UP(cpu
->pstate
.min_freq
, scaling
);
643 static inline void update_turbo_state(void)
648 cpu
= all_cpu_data
[0];
649 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
650 global
.turbo_disabled
=
651 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
652 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
655 static int min_perf_pct_min(void)
657 struct cpudata
*cpu
= all_cpu_data
[0];
658 int turbo_pstate
= cpu
->pstate
.turbo_pstate
;
660 return turbo_pstate
?
661 (cpu
->pstate
.min_pstate
* 100 / turbo_pstate
) : 0;
664 static s16
intel_pstate_get_epb(struct cpudata
*cpu_data
)
669 if (!boot_cpu_has(X86_FEATURE_EPB
))
672 ret
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
676 return (s16
)(epb
& 0x0f);
679 static s16
intel_pstate_get_epp(struct cpudata
*cpu_data
, u64 hwp_req_data
)
683 if (boot_cpu_has(X86_FEATURE_HWP_EPP
)) {
685 * When hwp_req_data is 0, means that caller didn't read
686 * MSR_HWP_REQUEST, so need to read and get EPP.
689 epp
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
,
694 epp
= (hwp_req_data
>> 24) & 0xff;
696 /* When there is no EPP present, HWP uses EPB settings */
697 epp
= intel_pstate_get_epb(cpu_data
);
703 static int intel_pstate_set_epb(int cpu
, s16 pref
)
708 if (!boot_cpu_has(X86_FEATURE_EPB
))
711 ret
= rdmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
715 epb
= (epb
& ~0x0f) | pref
;
716 wrmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, epb
);
722 * EPP/EPB display strings corresponding to EPP index in the
723 * energy_perf_strings[]
725 *-------------------------------------
728 * 2 balance_performance
732 static const char * const energy_perf_strings
[] = {
735 "balance_performance",
740 static const unsigned int epp_values
[] = {
742 HWP_EPP_BALANCE_PERFORMANCE
,
743 HWP_EPP_BALANCE_POWERSAVE
,
747 static int intel_pstate_get_energy_pref_index(struct cpudata
*cpu_data
, int *raw_epp
)
753 epp
= intel_pstate_get_epp(cpu_data
, 0);
757 if (boot_cpu_has(X86_FEATURE_HWP_EPP
)) {
758 if (epp
== HWP_EPP_PERFORMANCE
)
760 if (epp
== HWP_EPP_BALANCE_PERFORMANCE
)
762 if (epp
== HWP_EPP_BALANCE_POWERSAVE
)
764 if (epp
== HWP_EPP_POWERSAVE
)
768 } else if (boot_cpu_has(X86_FEATURE_EPB
)) {
771 * 0x00-0x03 : Performance
772 * 0x04-0x07 : Balance performance
773 * 0x08-0x0B : Balance power
775 * The EPB is a 4 bit value, but our ranges restrict the
776 * value which can be set. Here only using top two bits
779 index
= (epp
>> 2) + 1;
785 static int intel_pstate_set_epp(struct cpudata
*cpu
, u32 epp
)
790 * Use the cached HWP Request MSR value, because in the active mode the
791 * register itself may be updated by intel_pstate_hwp_boost_up() or
792 * intel_pstate_hwp_boost_down() at any time.
794 u64 value
= READ_ONCE(cpu
->hwp_req_cached
);
796 value
&= ~GENMASK_ULL(31, 24);
797 value
|= (u64
)epp
<< 24;
799 * The only other updater of hwp_req_cached in the active mode,
800 * intel_pstate_hwp_set(), is called under the same lock as this
801 * function, so it cannot run in parallel with the update below.
803 WRITE_ONCE(cpu
->hwp_req_cached
, value
);
804 ret
= wrmsrl_on_cpu(cpu
->cpu
, MSR_HWP_REQUEST
, value
);
806 cpu
->epp_cached
= epp
;
811 static int intel_pstate_set_energy_pref_index(struct cpudata
*cpu_data
,
812 int pref_index
, bool use_raw
,
819 epp
= cpu_data
->epp_default
;
821 if (boot_cpu_has(X86_FEATURE_HWP_EPP
)) {
824 else if (epp
== -EINVAL
)
825 epp
= epp_values
[pref_index
- 1];
828 * To avoid confusion, refuse to set EPP to any values different
829 * from 0 (performance) if the current policy is "performance",
830 * because those values would be overridden.
832 if (epp
> 0 && cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
835 ret
= intel_pstate_set_epp(cpu_data
, epp
);
838 epp
= (pref_index
- 1) << 2;
839 ret
= intel_pstate_set_epb(cpu_data
->cpu
, epp
);
845 static ssize_t
show_energy_performance_available_preferences(
846 struct cpufreq_policy
*policy
, char *buf
)
851 while (energy_perf_strings
[i
] != NULL
)
852 ret
+= sprintf(&buf
[ret
], "%s ", energy_perf_strings
[i
++]);
854 ret
+= sprintf(&buf
[ret
], "\n");
859 cpufreq_freq_attr_ro(energy_performance_available_preferences
);
861 static struct cpufreq_driver intel_pstate
;
863 static ssize_t
store_energy_performance_preference(
864 struct cpufreq_policy
*policy
, const char *buf
, size_t count
)
866 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
867 char str_preference
[21];
872 ret
= sscanf(buf
, "%20s", str_preference
);
876 ret
= match_string(energy_perf_strings
, -1, str_preference
);
878 if (!boot_cpu_has(X86_FEATURE_HWP_EPP
))
881 ret
= kstrtouint(buf
, 10, &epp
);
892 * This function runs with the policy R/W semaphore held, which
893 * guarantees that the driver pointer will not change while it is
896 if (!intel_pstate_driver
)
899 mutex_lock(&intel_pstate_limits_lock
);
901 if (intel_pstate_driver
== &intel_pstate
) {
902 ret
= intel_pstate_set_energy_pref_index(cpu
, ret
, raw
, epp
);
905 * In the passive mode the governor needs to be stopped on the
906 * target CPU before the EPP update and restarted after it,
907 * which is super-heavy-weight, so make sure it is worth doing
911 epp
= ret
? epp_values
[ret
- 1] : cpu
->epp_default
;
913 if (cpu
->epp_cached
!= epp
) {
916 cpufreq_stop_governor(policy
);
917 ret
= intel_pstate_set_epp(cpu
, epp
);
918 err
= cpufreq_start_governor(policy
);
924 mutex_unlock(&intel_pstate_limits_lock
);
929 static ssize_t
show_energy_performance_preference(
930 struct cpufreq_policy
*policy
, char *buf
)
932 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
933 int preference
, raw_epp
;
935 preference
= intel_pstate_get_energy_pref_index(cpu_data
, &raw_epp
);
940 return sprintf(buf
, "%d\n", raw_epp
);
942 return sprintf(buf
, "%s\n", energy_perf_strings
[preference
]);
945 cpufreq_freq_attr_rw(energy_performance_preference
);
947 static ssize_t
show_base_frequency(struct cpufreq_policy
*policy
, char *buf
)
949 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
952 ratio
= intel_pstate_get_cppc_guaranteed(policy
->cpu
);
956 rdmsrl_on_cpu(policy
->cpu
, MSR_HWP_CAPABILITIES
, &cap
);
957 ratio
= HWP_GUARANTEED_PERF(cap
);
960 freq
= ratio
* cpu
->pstate
.scaling
;
961 if (cpu
->pstate
.scaling
!= cpu
->pstate
.perf_ctl_scaling
)
962 freq
= rounddown(freq
, cpu
->pstate
.perf_ctl_scaling
);
964 return sprintf(buf
, "%d\n", freq
);
967 cpufreq_freq_attr_ro(base_frequency
);
969 static struct freq_attr
*hwp_cpufreq_attrs
[] = {
970 &energy_performance_preference
,
971 &energy_performance_available_preferences
,
976 static void __intel_pstate_get_hwp_cap(struct cpudata
*cpu
)
980 rdmsrl_on_cpu(cpu
->cpu
, MSR_HWP_CAPABILITIES
, &cap
);
981 WRITE_ONCE(cpu
->hwp_cap_cached
, cap
);
982 cpu
->pstate
.max_pstate
= HWP_GUARANTEED_PERF(cap
);
983 cpu
->pstate
.turbo_pstate
= HWP_HIGHEST_PERF(cap
);
986 static void intel_pstate_get_hwp_cap(struct cpudata
*cpu
)
988 int scaling
= cpu
->pstate
.scaling
;
990 __intel_pstate_get_hwp_cap(cpu
);
992 cpu
->pstate
.max_freq
= cpu
->pstate
.max_pstate
* scaling
;
993 cpu
->pstate
.turbo_freq
= cpu
->pstate
.turbo_pstate
* scaling
;
994 if (scaling
!= cpu
->pstate
.perf_ctl_scaling
) {
995 int perf_ctl_scaling
= cpu
->pstate
.perf_ctl_scaling
;
997 cpu
->pstate
.max_freq
= rounddown(cpu
->pstate
.max_freq
,
999 cpu
->pstate
.turbo_freq
= rounddown(cpu
->pstate
.turbo_freq
,
1004 static void intel_pstate_hwp_set(unsigned int cpu
)
1006 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
1011 max
= cpu_data
->max_perf_ratio
;
1012 min
= cpu_data
->min_perf_ratio
;
1014 if (cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
1017 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
1019 value
&= ~HWP_MIN_PERF(~0L);
1020 value
|= HWP_MIN_PERF(min
);
1022 value
&= ~HWP_MAX_PERF(~0L);
1023 value
|= HWP_MAX_PERF(max
);
1025 if (cpu_data
->epp_policy
== cpu_data
->policy
)
1028 cpu_data
->epp_policy
= cpu_data
->policy
;
1030 if (cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
1031 epp
= intel_pstate_get_epp(cpu_data
, value
);
1032 cpu_data
->epp_powersave
= epp
;
1033 /* If EPP read was failed, then don't try to write */
1039 /* skip setting EPP, when saved value is invalid */
1040 if (cpu_data
->epp_powersave
< 0)
1044 * No need to restore EPP when it is not zero. This
1046 * - Policy is not changed
1047 * - user has manually changed
1048 * - Error reading EPB
1050 epp
= intel_pstate_get_epp(cpu_data
, value
);
1054 epp
= cpu_data
->epp_powersave
;
1056 if (boot_cpu_has(X86_FEATURE_HWP_EPP
)) {
1057 value
&= ~GENMASK_ULL(31, 24);
1058 value
|= (u64
)epp
<< 24;
1060 intel_pstate_set_epb(cpu
, epp
);
1063 WRITE_ONCE(cpu_data
->hwp_req_cached
, value
);
1064 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
1067 static void intel_pstate_hwp_offline(struct cpudata
*cpu
)
1069 u64 value
= READ_ONCE(cpu
->hwp_req_cached
);
1072 if (boot_cpu_has(X86_FEATURE_HWP_EPP
)) {
1074 * In case the EPP has been set to "performance" by the
1075 * active mode "performance" scaling algorithm, replace that
1076 * temporary value with the cached EPP one.
1078 value
&= ~GENMASK_ULL(31, 24);
1079 value
|= HWP_ENERGY_PERF_PREFERENCE(cpu
->epp_cached
);
1080 WRITE_ONCE(cpu
->hwp_req_cached
, value
);
1083 value
&= ~GENMASK_ULL(31, 0);
1084 min_perf
= HWP_LOWEST_PERF(READ_ONCE(cpu
->hwp_cap_cached
));
1086 /* Set hwp_max = hwp_min */
1087 value
|= HWP_MAX_PERF(min_perf
);
1088 value
|= HWP_MIN_PERF(min_perf
);
1090 /* Set EPP to min */
1091 if (boot_cpu_has(X86_FEATURE_HWP_EPP
))
1092 value
|= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE
);
1094 wrmsrl_on_cpu(cpu
->cpu
, MSR_HWP_REQUEST
, value
);
1097 #define POWER_CTL_EE_ENABLE 1
1098 #define POWER_CTL_EE_DISABLE 2
1100 static int power_ctl_ee_state
;
1102 static void set_power_ctl_ee_state(bool input
)
1106 mutex_lock(&intel_pstate_driver_lock
);
1107 rdmsrl(MSR_IA32_POWER_CTL
, power_ctl
);
1109 power_ctl
&= ~BIT(MSR_IA32_POWER_CTL_BIT_EE
);
1110 power_ctl_ee_state
= POWER_CTL_EE_ENABLE
;
1112 power_ctl
|= BIT(MSR_IA32_POWER_CTL_BIT_EE
);
1113 power_ctl_ee_state
= POWER_CTL_EE_DISABLE
;
1115 wrmsrl(MSR_IA32_POWER_CTL
, power_ctl
);
1116 mutex_unlock(&intel_pstate_driver_lock
);
1119 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
);
1121 static void intel_pstate_hwp_reenable(struct cpudata
*cpu
)
1123 intel_pstate_hwp_enable(cpu
);
1124 wrmsrl_on_cpu(cpu
->cpu
, MSR_HWP_REQUEST
, READ_ONCE(cpu
->hwp_req_cached
));
1127 static int intel_pstate_suspend(struct cpufreq_policy
*policy
)
1129 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
1131 pr_debug("CPU %d suspending\n", cpu
->cpu
);
1133 cpu
->suspended
= true;
1138 static int intel_pstate_resume(struct cpufreq_policy
*policy
)
1140 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
1142 pr_debug("CPU %d resuming\n", cpu
->cpu
);
1144 /* Only restore if the system default is changed */
1145 if (power_ctl_ee_state
== POWER_CTL_EE_ENABLE
)
1146 set_power_ctl_ee_state(true);
1147 else if (power_ctl_ee_state
== POWER_CTL_EE_DISABLE
)
1148 set_power_ctl_ee_state(false);
1150 if (cpu
->suspended
&& hwp_active
) {
1151 mutex_lock(&intel_pstate_limits_lock
);
1153 /* Re-enable HWP, because "online" has not done that. */
1154 intel_pstate_hwp_reenable(cpu
);
1156 mutex_unlock(&intel_pstate_limits_lock
);
1159 cpu
->suspended
= false;
1164 static void intel_pstate_update_policies(void)
1168 for_each_possible_cpu(cpu
)
1169 cpufreq_update_policy(cpu
);
1172 static void intel_pstate_update_max_freq(unsigned int cpu
)
1174 struct cpufreq_policy
*policy
= cpufreq_cpu_acquire(cpu
);
1175 struct cpudata
*cpudata
;
1180 cpudata
= all_cpu_data
[cpu
];
1181 policy
->cpuinfo
.max_freq
= global
.turbo_disabled_mf
?
1182 cpudata
->pstate
.max_freq
: cpudata
->pstate
.turbo_freq
;
1184 refresh_frequency_limits(policy
);
1186 cpufreq_cpu_release(policy
);
1189 static void intel_pstate_update_limits(unsigned int cpu
)
1191 mutex_lock(&intel_pstate_driver_lock
);
1193 update_turbo_state();
1195 * If turbo has been turned on or off globally, policy limits for
1196 * all CPUs need to be updated to reflect that.
1198 if (global
.turbo_disabled_mf
!= global
.turbo_disabled
) {
1199 global
.turbo_disabled_mf
= global
.turbo_disabled
;
1200 arch_set_max_freq_ratio(global
.turbo_disabled
);
1201 for_each_possible_cpu(cpu
)
1202 intel_pstate_update_max_freq(cpu
);
1204 cpufreq_update_policy(cpu
);
1207 mutex_unlock(&intel_pstate_driver_lock
);
1210 /************************** sysfs begin ************************/
1211 #define show_one(file_name, object) \
1212 static ssize_t show_##file_name \
1213 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
1215 return sprintf(buf, "%u\n", global.object); \
1218 static ssize_t
intel_pstate_show_status(char *buf
);
1219 static int intel_pstate_update_status(const char *buf
, size_t size
);
1221 static ssize_t
show_status(struct kobject
*kobj
,
1222 struct kobj_attribute
*attr
, char *buf
)
1226 mutex_lock(&intel_pstate_driver_lock
);
1227 ret
= intel_pstate_show_status(buf
);
1228 mutex_unlock(&intel_pstate_driver_lock
);
1233 static ssize_t
store_status(struct kobject
*a
, struct kobj_attribute
*b
,
1234 const char *buf
, size_t count
)
1236 char *p
= memchr(buf
, '\n', count
);
1239 mutex_lock(&intel_pstate_driver_lock
);
1240 ret
= intel_pstate_update_status(buf
, p
? p
- buf
: count
);
1241 mutex_unlock(&intel_pstate_driver_lock
);
1243 return ret
< 0 ? ret
: count
;
1246 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
1247 struct kobj_attribute
*attr
, char *buf
)
1249 struct cpudata
*cpu
;
1250 int total
, no_turbo
, turbo_pct
;
1253 mutex_lock(&intel_pstate_driver_lock
);
1255 if (!intel_pstate_driver
) {
1256 mutex_unlock(&intel_pstate_driver_lock
);
1260 cpu
= all_cpu_data
[0];
1262 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
1263 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
1264 turbo_fp
= div_fp(no_turbo
, total
);
1265 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
1267 mutex_unlock(&intel_pstate_driver_lock
);
1269 return sprintf(buf
, "%u\n", turbo_pct
);
1272 static ssize_t
show_num_pstates(struct kobject
*kobj
,
1273 struct kobj_attribute
*attr
, char *buf
)
1275 struct cpudata
*cpu
;
1278 mutex_lock(&intel_pstate_driver_lock
);
1280 if (!intel_pstate_driver
) {
1281 mutex_unlock(&intel_pstate_driver_lock
);
1285 cpu
= all_cpu_data
[0];
1286 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
1288 mutex_unlock(&intel_pstate_driver_lock
);
1290 return sprintf(buf
, "%u\n", total
);
1293 static ssize_t
show_no_turbo(struct kobject
*kobj
,
1294 struct kobj_attribute
*attr
, char *buf
)
1298 mutex_lock(&intel_pstate_driver_lock
);
1300 if (!intel_pstate_driver
) {
1301 mutex_unlock(&intel_pstate_driver_lock
);
1305 update_turbo_state();
1306 if (global
.turbo_disabled
)
1307 ret
= sprintf(buf
, "%u\n", global
.turbo_disabled
);
1309 ret
= sprintf(buf
, "%u\n", global
.no_turbo
);
1311 mutex_unlock(&intel_pstate_driver_lock
);
1316 static ssize_t
store_no_turbo(struct kobject
*a
, struct kobj_attribute
*b
,
1317 const char *buf
, size_t count
)
1322 ret
= sscanf(buf
, "%u", &input
);
1326 mutex_lock(&intel_pstate_driver_lock
);
1328 if (!intel_pstate_driver
) {
1329 mutex_unlock(&intel_pstate_driver_lock
);
1333 mutex_lock(&intel_pstate_limits_lock
);
1335 update_turbo_state();
1336 if (global
.turbo_disabled
) {
1337 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
1338 mutex_unlock(&intel_pstate_limits_lock
);
1339 mutex_unlock(&intel_pstate_driver_lock
);
1343 global
.no_turbo
= clamp_t(int, input
, 0, 1);
1345 if (global
.no_turbo
) {
1346 struct cpudata
*cpu
= all_cpu_data
[0];
1347 int pct
= cpu
->pstate
.max_pstate
* 100 / cpu
->pstate
.turbo_pstate
;
1349 /* Squash the global minimum into the permitted range. */
1350 if (global
.min_perf_pct
> pct
)
1351 global
.min_perf_pct
= pct
;
1354 mutex_unlock(&intel_pstate_limits_lock
);
1356 intel_pstate_update_policies();
1358 mutex_unlock(&intel_pstate_driver_lock
);
1363 static void update_qos_request(enum freq_qos_req_type type
)
1365 struct freq_qos_request
*req
;
1366 struct cpufreq_policy
*policy
;
1369 for_each_possible_cpu(i
) {
1370 struct cpudata
*cpu
= all_cpu_data
[i
];
1371 unsigned int freq
, perf_pct
;
1373 policy
= cpufreq_cpu_get(i
);
1377 req
= policy
->driver_data
;
1378 cpufreq_cpu_put(policy
);
1384 intel_pstate_get_hwp_cap(cpu
);
1386 if (type
== FREQ_QOS_MIN
) {
1387 perf_pct
= global
.min_perf_pct
;
1390 perf_pct
= global
.max_perf_pct
;
1393 freq
= DIV_ROUND_UP(cpu
->pstate
.turbo_freq
* perf_pct
, 100);
1395 if (freq_qos_update_request(req
, freq
) < 0)
1396 pr_warn("Failed to update freq constraint: CPU%d\n", i
);
1400 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct kobj_attribute
*b
,
1401 const char *buf
, size_t count
)
1406 ret
= sscanf(buf
, "%u", &input
);
1410 mutex_lock(&intel_pstate_driver_lock
);
1412 if (!intel_pstate_driver
) {
1413 mutex_unlock(&intel_pstate_driver_lock
);
1417 mutex_lock(&intel_pstate_limits_lock
);
1419 global
.max_perf_pct
= clamp_t(int, input
, global
.min_perf_pct
, 100);
1421 mutex_unlock(&intel_pstate_limits_lock
);
1423 if (intel_pstate_driver
== &intel_pstate
)
1424 intel_pstate_update_policies();
1426 update_qos_request(FREQ_QOS_MAX
);
1428 mutex_unlock(&intel_pstate_driver_lock
);
1433 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct kobj_attribute
*b
,
1434 const char *buf
, size_t count
)
1439 ret
= sscanf(buf
, "%u", &input
);
1443 mutex_lock(&intel_pstate_driver_lock
);
1445 if (!intel_pstate_driver
) {
1446 mutex_unlock(&intel_pstate_driver_lock
);
1450 mutex_lock(&intel_pstate_limits_lock
);
1452 global
.min_perf_pct
= clamp_t(int, input
,
1453 min_perf_pct_min(), global
.max_perf_pct
);
1455 mutex_unlock(&intel_pstate_limits_lock
);
1457 if (intel_pstate_driver
== &intel_pstate
)
1458 intel_pstate_update_policies();
1460 update_qos_request(FREQ_QOS_MIN
);
1462 mutex_unlock(&intel_pstate_driver_lock
);
1467 static ssize_t
show_hwp_dynamic_boost(struct kobject
*kobj
,
1468 struct kobj_attribute
*attr
, char *buf
)
1470 return sprintf(buf
, "%u\n", hwp_boost
);
1473 static ssize_t
store_hwp_dynamic_boost(struct kobject
*a
,
1474 struct kobj_attribute
*b
,
1475 const char *buf
, size_t count
)
1480 ret
= kstrtouint(buf
, 10, &input
);
1484 mutex_lock(&intel_pstate_driver_lock
);
1485 hwp_boost
= !!input
;
1486 intel_pstate_update_policies();
1487 mutex_unlock(&intel_pstate_driver_lock
);
1492 static ssize_t
show_energy_efficiency(struct kobject
*kobj
, struct kobj_attribute
*attr
,
1498 rdmsrl(MSR_IA32_POWER_CTL
, power_ctl
);
1499 enable
= !!(power_ctl
& BIT(MSR_IA32_POWER_CTL_BIT_EE
));
1500 return sprintf(buf
, "%d\n", !enable
);
1503 static ssize_t
store_energy_efficiency(struct kobject
*a
, struct kobj_attribute
*b
,
1504 const char *buf
, size_t count
)
1509 ret
= kstrtobool(buf
, &input
);
1513 set_power_ctl_ee_state(input
);
1518 show_one(max_perf_pct
, max_perf_pct
);
1519 show_one(min_perf_pct
, min_perf_pct
);
1521 define_one_global_rw(status
);
1522 define_one_global_rw(no_turbo
);
1523 define_one_global_rw(max_perf_pct
);
1524 define_one_global_rw(min_perf_pct
);
1525 define_one_global_ro(turbo_pct
);
1526 define_one_global_ro(num_pstates
);
1527 define_one_global_rw(hwp_dynamic_boost
);
1528 define_one_global_rw(energy_efficiency
);
1530 static struct attribute
*intel_pstate_attributes
[] = {
1536 static const struct attribute_group intel_pstate_attr_group
= {
1537 .attrs
= intel_pstate_attributes
,
1540 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids
[];
1542 static struct kobject
*intel_pstate_kobject
;
1544 static void __init
intel_pstate_sysfs_expose_params(void)
1548 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
1549 &cpu_subsys
.dev_root
->kobj
);
1550 if (WARN_ON(!intel_pstate_kobject
))
1553 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
1557 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU
)) {
1558 rc
= sysfs_create_file(intel_pstate_kobject
, &turbo_pct
.attr
);
1561 rc
= sysfs_create_file(intel_pstate_kobject
, &num_pstates
.attr
);
1566 * If per cpu limits are enforced there are no global limits, so
1567 * return without creating max/min_perf_pct attributes
1572 rc
= sysfs_create_file(intel_pstate_kobject
, &max_perf_pct
.attr
);
1575 rc
= sysfs_create_file(intel_pstate_kobject
, &min_perf_pct
.attr
);
1578 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids
)) {
1579 rc
= sysfs_create_file(intel_pstate_kobject
, &energy_efficiency
.attr
);
1584 static void __init
intel_pstate_sysfs_remove(void)
1586 if (!intel_pstate_kobject
)
1589 sysfs_remove_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
1591 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU
)) {
1592 sysfs_remove_file(intel_pstate_kobject
, &num_pstates
.attr
);
1593 sysfs_remove_file(intel_pstate_kobject
, &turbo_pct
.attr
);
1596 if (!per_cpu_limits
) {
1597 sysfs_remove_file(intel_pstate_kobject
, &max_perf_pct
.attr
);
1598 sysfs_remove_file(intel_pstate_kobject
, &min_perf_pct
.attr
);
1600 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids
))
1601 sysfs_remove_file(intel_pstate_kobject
, &energy_efficiency
.attr
);
1604 kobject_put(intel_pstate_kobject
);
1607 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1614 rc
= sysfs_create_file(intel_pstate_kobject
, &hwp_dynamic_boost
.attr
);
1618 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1623 sysfs_remove_file(intel_pstate_kobject
, &hwp_dynamic_boost
.attr
);
1626 /************************** sysfs end ************************/
1628 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
)
1630 /* First disable HWP notification interrupt as we don't process them */
1631 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY
))
1632 wrmsrl_on_cpu(cpudata
->cpu
, MSR_HWP_INTERRUPT
, 0x00);
1634 wrmsrl_on_cpu(cpudata
->cpu
, MSR_PM_ENABLE
, 0x1);
1635 if (cpudata
->epp_default
== -EINVAL
)
1636 cpudata
->epp_default
= intel_pstate_get_epp(cpudata
, 0);
1639 static int atom_get_min_pstate(void)
1643 rdmsrl(MSR_ATOM_CORE_RATIOS
, value
);
1644 return (value
>> 8) & 0x7F;
1647 static int atom_get_max_pstate(void)
1651 rdmsrl(MSR_ATOM_CORE_RATIOS
, value
);
1652 return (value
>> 16) & 0x7F;
1655 static int atom_get_turbo_pstate(void)
1659 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS
, value
);
1660 return value
& 0x7F;
1663 static u64
atom_get_val(struct cpudata
*cpudata
, int pstate
)
1669 val
= (u64
)pstate
<< 8;
1670 if (global
.no_turbo
&& !global
.turbo_disabled
)
1671 val
|= (u64
)1 << 32;
1673 vid_fp
= cpudata
->vid
.min
+ mul_fp(
1674 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
1675 cpudata
->vid
.ratio
);
1677 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
1678 vid
= ceiling_fp(vid_fp
);
1680 if (pstate
> cpudata
->pstate
.max_pstate
)
1681 vid
= cpudata
->vid
.turbo
;
1686 static int silvermont_get_scaling(void)
1690 /* Defined in Table 35-6 from SDM (Sept 2015) */
1691 static int silvermont_freq_table
[] = {
1692 83300, 100000, 133300, 116700, 80000};
1694 rdmsrl(MSR_FSB_FREQ
, value
);
1698 return silvermont_freq_table
[i
];
1701 static int airmont_get_scaling(void)
1705 /* Defined in Table 35-10 from SDM (Sept 2015) */
1706 static int airmont_freq_table
[] = {
1707 83300, 100000, 133300, 116700, 80000,
1708 93300, 90000, 88900, 87500};
1710 rdmsrl(MSR_FSB_FREQ
, value
);
1714 return airmont_freq_table
[i
];
1717 static void atom_get_vid(struct cpudata
*cpudata
)
1721 rdmsrl(MSR_ATOM_CORE_VIDS
, value
);
1722 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
1723 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
1724 cpudata
->vid
.ratio
= div_fp(
1725 cpudata
->vid
.max
- cpudata
->vid
.min
,
1726 int_tofp(cpudata
->pstate
.max_pstate
-
1727 cpudata
->pstate
.min_pstate
));
1729 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS
, value
);
1730 cpudata
->vid
.turbo
= value
& 0x7f;
1733 static int core_get_min_pstate(void)
1737 rdmsrl(MSR_PLATFORM_INFO
, value
);
1738 return (value
>> 40) & 0xFF;
1741 static int core_get_max_pstate_physical(void)
1745 rdmsrl(MSR_PLATFORM_INFO
, value
);
1746 return (value
>> 8) & 0xFF;
1749 static int core_get_tdp_ratio(u64 plat_info
)
1751 /* Check how many TDP levels present */
1752 if (plat_info
& 0x600000000) {
1758 /* Get the TDP level (0, 1, 2) to get ratios */
1759 err
= rdmsrl_safe(MSR_CONFIG_TDP_CONTROL
, &tdp_ctrl
);
1763 /* TDP MSR are continuous starting at 0x648 */
1764 tdp_msr
= MSR_CONFIG_TDP_NOMINAL
+ (tdp_ctrl
& 0x03);
1765 err
= rdmsrl_safe(tdp_msr
, &tdp_ratio
);
1769 /* For level 1 and 2, bits[23:16] contain the ratio */
1770 if (tdp_ctrl
& 0x03)
1773 tdp_ratio
&= 0xff; /* ratios are only 8 bits long */
1774 pr_debug("tdp_ratio %x\n", (int)tdp_ratio
);
1776 return (int)tdp_ratio
;
1782 static int core_get_max_pstate(void)
1790 rdmsrl(MSR_PLATFORM_INFO
, plat_info
);
1791 max_pstate
= (plat_info
>> 8) & 0xFF;
1793 tdp_ratio
= core_get_tdp_ratio(plat_info
);
1798 /* Turbo activation ratio is not used on HWP platforms */
1802 err
= rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO
, &tar
);
1806 /* Do some sanity checking for safety */
1807 tar_levels
= tar
& 0xff;
1808 if (tdp_ratio
- 1 == tar_levels
) {
1809 max_pstate
= tar_levels
;
1810 pr_debug("max_pstate=TAC %x\n", max_pstate
);
1817 static int core_get_turbo_pstate(void)
1822 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1823 nont
= core_get_max_pstate();
1824 ret
= (value
) & 255;
1830 static inline int core_get_scaling(void)
1835 static u64
core_get_val(struct cpudata
*cpudata
, int pstate
)
1839 val
= (u64
)pstate
<< 8;
1840 if (global
.no_turbo
&& !global
.turbo_disabled
)
1841 val
|= (u64
)1 << 32;
1846 static int knl_get_aperf_mperf_shift(void)
1851 static int knl_get_turbo_pstate(void)
1856 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1857 nont
= core_get_max_pstate();
1858 ret
= (((value
) >> 8) & 0xFF);
1864 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
1866 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
1867 cpu
->pstate
.current_pstate
= pstate
;
1869 * Generally, there is no guarantee that this code will always run on
1870 * the CPU being updated, so force the register update to run on the
1873 wrmsrl_on_cpu(cpu
->cpu
, MSR_IA32_PERF_CTL
,
1874 pstate_funcs
.get_val(cpu
, pstate
));
1877 static void intel_pstate_set_min_pstate(struct cpudata
*cpu
)
1879 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
1882 static void intel_pstate_max_within_limits(struct cpudata
*cpu
)
1884 int pstate
= max(cpu
->pstate
.min_pstate
, cpu
->max_perf_ratio
);
1886 update_turbo_state();
1887 intel_pstate_set_pstate(cpu
, pstate
);
1890 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
1892 bool hybrid_cpu
= boot_cpu_has(X86_FEATURE_HYBRID_CPU
);
1893 int perf_ctl_max_phys
= pstate_funcs
.get_max_physical();
1894 int perf_ctl_scaling
= hybrid_cpu
? cpu_khz
/ perf_ctl_max_phys
:
1895 pstate_funcs
.get_scaling();
1897 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
1898 cpu
->pstate
.max_pstate_physical
= perf_ctl_max_phys
;
1899 cpu
->pstate
.perf_ctl_scaling
= perf_ctl_scaling
;
1901 if (hwp_active
&& !hwp_mode_bdw
) {
1902 __intel_pstate_get_hwp_cap(cpu
);
1905 intel_pstate_hybrid_hwp_calibrate(cpu
);
1907 cpu
->pstate
.scaling
= perf_ctl_scaling
;
1909 cpu
->pstate
.scaling
= perf_ctl_scaling
;
1910 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
1911 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
1914 if (cpu
->pstate
.scaling
== perf_ctl_scaling
) {
1915 cpu
->pstate
.min_freq
= cpu
->pstate
.min_pstate
* perf_ctl_scaling
;
1916 cpu
->pstate
.max_freq
= cpu
->pstate
.max_pstate
* perf_ctl_scaling
;
1917 cpu
->pstate
.turbo_freq
= cpu
->pstate
.turbo_pstate
* perf_ctl_scaling
;
1920 if (pstate_funcs
.get_aperf_mperf_shift
)
1921 cpu
->aperf_mperf_shift
= pstate_funcs
.get_aperf_mperf_shift();
1923 if (pstate_funcs
.get_vid
)
1924 pstate_funcs
.get_vid(cpu
);
1926 intel_pstate_set_min_pstate(cpu
);
1930 * Long hold time will keep high perf limits for long time,
1931 * which negatively impacts perf/watt for some workloads,
1932 * like specpower. 3ms is based on experiements on some
1935 static int hwp_boost_hold_time_ns
= 3 * NSEC_PER_MSEC
;
1937 static inline void intel_pstate_hwp_boost_up(struct cpudata
*cpu
)
1939 u64 hwp_req
= READ_ONCE(cpu
->hwp_req_cached
);
1940 u64 hwp_cap
= READ_ONCE(cpu
->hwp_cap_cached
);
1941 u32 max_limit
= (hwp_req
& 0xff00) >> 8;
1942 u32 min_limit
= (hwp_req
& 0xff);
1946 * Cases to consider (User changes via sysfs or boot time):
1947 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1949 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1950 * Should result in one level boost only for P0.
1951 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1952 * Should result in two level boost:
1953 * (min + p1)/2 and P1.
1954 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1955 * Should result in three level boost:
1956 * (min + p1)/2, P1 and P0.
1959 /* If max and min are equal or already at max, nothing to boost */
1960 if (max_limit
== min_limit
|| cpu
->hwp_boost_min
>= max_limit
)
1963 if (!cpu
->hwp_boost_min
)
1964 cpu
->hwp_boost_min
= min_limit
;
1966 /* level at half way mark between min and guranteed */
1967 boost_level1
= (HWP_GUARANTEED_PERF(hwp_cap
) + min_limit
) >> 1;
1969 if (cpu
->hwp_boost_min
< boost_level1
)
1970 cpu
->hwp_boost_min
= boost_level1
;
1971 else if (cpu
->hwp_boost_min
< HWP_GUARANTEED_PERF(hwp_cap
))
1972 cpu
->hwp_boost_min
= HWP_GUARANTEED_PERF(hwp_cap
);
1973 else if (cpu
->hwp_boost_min
== HWP_GUARANTEED_PERF(hwp_cap
) &&
1974 max_limit
!= HWP_GUARANTEED_PERF(hwp_cap
))
1975 cpu
->hwp_boost_min
= max_limit
;
1979 hwp_req
= (hwp_req
& ~GENMASK_ULL(7, 0)) | cpu
->hwp_boost_min
;
1980 wrmsrl(MSR_HWP_REQUEST
, hwp_req
);
1981 cpu
->last_update
= cpu
->sample
.time
;
1984 static inline void intel_pstate_hwp_boost_down(struct cpudata
*cpu
)
1986 if (cpu
->hwp_boost_min
) {
1989 /* Check if we are idle for hold time to boost down */
1990 expired
= time_after64(cpu
->sample
.time
, cpu
->last_update
+
1991 hwp_boost_hold_time_ns
);
1993 wrmsrl(MSR_HWP_REQUEST
, cpu
->hwp_req_cached
);
1994 cpu
->hwp_boost_min
= 0;
1997 cpu
->last_update
= cpu
->sample
.time
;
2000 static inline void intel_pstate_update_util_hwp_local(struct cpudata
*cpu
,
2003 cpu
->sample
.time
= time
;
2005 if (cpu
->sched_flags
& SCHED_CPUFREQ_IOWAIT
) {
2008 cpu
->sched_flags
= 0;
2010 * Set iowait_boost flag and update time. Since IO WAIT flag
2011 * is set all the time, we can't just conclude that there is
2012 * some IO bound activity is scheduled on this CPU with just
2013 * one occurrence. If we receive at least two in two
2014 * consecutive ticks, then we treat as boost candidate.
2016 if (time_before64(time
, cpu
->last_io_update
+ 2 * TICK_NSEC
))
2019 cpu
->last_io_update
= time
;
2022 intel_pstate_hwp_boost_up(cpu
);
2025 intel_pstate_hwp_boost_down(cpu
);
2029 static inline void intel_pstate_update_util_hwp(struct update_util_data
*data
,
2030 u64 time
, unsigned int flags
)
2032 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
2034 cpu
->sched_flags
|= flags
;
2036 if (smp_processor_id() == cpu
->cpu
)
2037 intel_pstate_update_util_hwp_local(cpu
, time
);
2040 static inline void intel_pstate_calc_avg_perf(struct cpudata
*cpu
)
2042 struct sample
*sample
= &cpu
->sample
;
2044 sample
->core_avg_perf
= div_ext_fp(sample
->aperf
, sample
->mperf
);
2047 static inline bool intel_pstate_sample(struct cpudata
*cpu
, u64 time
)
2050 unsigned long flags
;
2053 local_irq_save(flags
);
2054 rdmsrl(MSR_IA32_APERF
, aperf
);
2055 rdmsrl(MSR_IA32_MPERF
, mperf
);
2057 if (cpu
->prev_mperf
== mperf
|| cpu
->prev_tsc
== tsc
) {
2058 local_irq_restore(flags
);
2061 local_irq_restore(flags
);
2063 cpu
->last_sample_time
= cpu
->sample
.time
;
2064 cpu
->sample
.time
= time
;
2065 cpu
->sample
.aperf
= aperf
;
2066 cpu
->sample
.mperf
= mperf
;
2067 cpu
->sample
.tsc
= tsc
;
2068 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
2069 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
2070 cpu
->sample
.tsc
-= cpu
->prev_tsc
;
2072 cpu
->prev_aperf
= aperf
;
2073 cpu
->prev_mperf
= mperf
;
2074 cpu
->prev_tsc
= tsc
;
2076 * First time this function is invoked in a given cycle, all of the
2077 * previous sample data fields are equal to zero or stale and they must
2078 * be populated with meaningful numbers for things to work, so assume
2079 * that sample.time will always be reset before setting the utilization
2080 * update hook and make the caller skip the sample then.
2082 if (cpu
->last_sample_time
) {
2083 intel_pstate_calc_avg_perf(cpu
);
2089 static inline int32_t get_avg_frequency(struct cpudata
*cpu
)
2091 return mul_ext_fp(cpu
->sample
.core_avg_perf
, cpu_khz
);
2094 static inline int32_t get_avg_pstate(struct cpudata
*cpu
)
2096 return mul_ext_fp(cpu
->pstate
.max_pstate_physical
,
2097 cpu
->sample
.core_avg_perf
);
2100 static inline int32_t get_target_pstate(struct cpudata
*cpu
)
2102 struct sample
*sample
= &cpu
->sample
;
2104 int target
, avg_pstate
;
2106 busy_frac
= div_fp(sample
->mperf
<< cpu
->aperf_mperf_shift
,
2109 if (busy_frac
< cpu
->iowait_boost
)
2110 busy_frac
= cpu
->iowait_boost
;
2112 sample
->busy_scaled
= busy_frac
* 100;
2114 target
= global
.no_turbo
|| global
.turbo_disabled
?
2115 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
2116 target
+= target
>> 2;
2117 target
= mul_fp(target
, busy_frac
);
2118 if (target
< cpu
->pstate
.min_pstate
)
2119 target
= cpu
->pstate
.min_pstate
;
2122 * If the average P-state during the previous cycle was higher than the
2123 * current target, add 50% of the difference to the target to reduce
2124 * possible performance oscillations and offset possible performance
2125 * loss related to moving the workload from one CPU to another within
2128 avg_pstate
= get_avg_pstate(cpu
);
2129 if (avg_pstate
> target
)
2130 target
+= (avg_pstate
- target
) >> 1;
2135 static int intel_pstate_prepare_request(struct cpudata
*cpu
, int pstate
)
2137 int min_pstate
= max(cpu
->pstate
.min_pstate
, cpu
->min_perf_ratio
);
2138 int max_pstate
= max(min_pstate
, cpu
->max_perf_ratio
);
2140 return clamp_t(int, pstate
, min_pstate
, max_pstate
);
2143 static void intel_pstate_update_pstate(struct cpudata
*cpu
, int pstate
)
2145 if (pstate
== cpu
->pstate
.current_pstate
)
2148 cpu
->pstate
.current_pstate
= pstate
;
2149 wrmsrl(MSR_IA32_PERF_CTL
, pstate_funcs
.get_val(cpu
, pstate
));
2152 static void intel_pstate_adjust_pstate(struct cpudata
*cpu
)
2154 int from
= cpu
->pstate
.current_pstate
;
2155 struct sample
*sample
;
2158 update_turbo_state();
2160 target_pstate
= get_target_pstate(cpu
);
2161 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
2162 trace_cpu_frequency(target_pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
2163 intel_pstate_update_pstate(cpu
, target_pstate
);
2165 sample
= &cpu
->sample
;
2166 trace_pstate_sample(mul_ext_fp(100, sample
->core_avg_perf
),
2167 fp_toint(sample
->busy_scaled
),
2169 cpu
->pstate
.current_pstate
,
2173 get_avg_frequency(cpu
),
2174 fp_toint(cpu
->iowait_boost
* 100));
2177 static void intel_pstate_update_util(struct update_util_data
*data
, u64 time
,
2180 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
2183 /* Don't allow remote callbacks */
2184 if (smp_processor_id() != cpu
->cpu
)
2187 delta_ns
= time
- cpu
->last_update
;
2188 if (flags
& SCHED_CPUFREQ_IOWAIT
) {
2189 /* Start over if the CPU may have been idle. */
2190 if (delta_ns
> TICK_NSEC
) {
2191 cpu
->iowait_boost
= ONE_EIGHTH_FP
;
2192 } else if (cpu
->iowait_boost
>= ONE_EIGHTH_FP
) {
2193 cpu
->iowait_boost
<<= 1;
2194 if (cpu
->iowait_boost
> int_tofp(1))
2195 cpu
->iowait_boost
= int_tofp(1);
2197 cpu
->iowait_boost
= ONE_EIGHTH_FP
;
2199 } else if (cpu
->iowait_boost
) {
2200 /* Clear iowait_boost if the CPU may have been idle. */
2201 if (delta_ns
> TICK_NSEC
)
2202 cpu
->iowait_boost
= 0;
2204 cpu
->iowait_boost
>>= 1;
2206 cpu
->last_update
= time
;
2207 delta_ns
= time
- cpu
->sample
.time
;
2208 if ((s64
)delta_ns
< INTEL_PSTATE_SAMPLING_INTERVAL
)
2211 if (intel_pstate_sample(cpu
, time
))
2212 intel_pstate_adjust_pstate(cpu
);
2215 static struct pstate_funcs core_funcs
= {
2216 .get_max
= core_get_max_pstate
,
2217 .get_max_physical
= core_get_max_pstate_physical
,
2218 .get_min
= core_get_min_pstate
,
2219 .get_turbo
= core_get_turbo_pstate
,
2220 .get_scaling
= core_get_scaling
,
2221 .get_val
= core_get_val
,
2224 static const struct pstate_funcs silvermont_funcs
= {
2225 .get_max
= atom_get_max_pstate
,
2226 .get_max_physical
= atom_get_max_pstate
,
2227 .get_min
= atom_get_min_pstate
,
2228 .get_turbo
= atom_get_turbo_pstate
,
2229 .get_val
= atom_get_val
,
2230 .get_scaling
= silvermont_get_scaling
,
2231 .get_vid
= atom_get_vid
,
2234 static const struct pstate_funcs airmont_funcs
= {
2235 .get_max
= atom_get_max_pstate
,
2236 .get_max_physical
= atom_get_max_pstate
,
2237 .get_min
= atom_get_min_pstate
,
2238 .get_turbo
= atom_get_turbo_pstate
,
2239 .get_val
= atom_get_val
,
2240 .get_scaling
= airmont_get_scaling
,
2241 .get_vid
= atom_get_vid
,
2244 static const struct pstate_funcs knl_funcs
= {
2245 .get_max
= core_get_max_pstate
,
2246 .get_max_physical
= core_get_max_pstate_physical
,
2247 .get_min
= core_get_min_pstate
,
2248 .get_turbo
= knl_get_turbo_pstate
,
2249 .get_aperf_mperf_shift
= knl_get_aperf_mperf_shift
,
2250 .get_scaling
= core_get_scaling
,
2251 .get_val
= core_get_val
,
2254 #define X86_MATCH(model, policy) \
2255 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
2256 X86_FEATURE_APERFMPERF, &policy)
2258 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
2259 X86_MATCH(SANDYBRIDGE
, core_funcs
),
2260 X86_MATCH(SANDYBRIDGE_X
, core_funcs
),
2261 X86_MATCH(ATOM_SILVERMONT
, silvermont_funcs
),
2262 X86_MATCH(IVYBRIDGE
, core_funcs
),
2263 X86_MATCH(HASWELL
, core_funcs
),
2264 X86_MATCH(BROADWELL
, core_funcs
),
2265 X86_MATCH(IVYBRIDGE_X
, core_funcs
),
2266 X86_MATCH(HASWELL_X
, core_funcs
),
2267 X86_MATCH(HASWELL_L
, core_funcs
),
2268 X86_MATCH(HASWELL_G
, core_funcs
),
2269 X86_MATCH(BROADWELL_G
, core_funcs
),
2270 X86_MATCH(ATOM_AIRMONT
, airmont_funcs
),
2271 X86_MATCH(SKYLAKE_L
, core_funcs
),
2272 X86_MATCH(BROADWELL_X
, core_funcs
),
2273 X86_MATCH(SKYLAKE
, core_funcs
),
2274 X86_MATCH(BROADWELL_D
, core_funcs
),
2275 X86_MATCH(XEON_PHI_KNL
, knl_funcs
),
2276 X86_MATCH(XEON_PHI_KNM
, knl_funcs
),
2277 X86_MATCH(ATOM_GOLDMONT
, core_funcs
),
2278 X86_MATCH(ATOM_GOLDMONT_PLUS
, core_funcs
),
2279 X86_MATCH(SKYLAKE_X
, core_funcs
),
2280 X86_MATCH(COMETLAKE
, core_funcs
),
2281 X86_MATCH(ICELAKE_X
, core_funcs
),
2284 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
2286 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] __initconst
= {
2287 X86_MATCH(BROADWELL_D
, core_funcs
),
2288 X86_MATCH(BROADWELL_X
, core_funcs
),
2289 X86_MATCH(SKYLAKE_X
, core_funcs
),
2293 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids
[] = {
2294 X86_MATCH(KABYLAKE
, core_funcs
),
2298 static const struct x86_cpu_id intel_pstate_hwp_boost_ids
[] = {
2299 X86_MATCH(SKYLAKE_X
, core_funcs
),
2300 X86_MATCH(SKYLAKE
, core_funcs
),
2304 static int intel_pstate_init_cpu(unsigned int cpunum
)
2306 struct cpudata
*cpu
;
2308 cpu
= all_cpu_data
[cpunum
];
2311 cpu
= kzalloc(sizeof(*cpu
), GFP_KERNEL
);
2315 all_cpu_data
[cpunum
] = cpu
;
2319 cpu
->epp_default
= -EINVAL
;
2322 const struct x86_cpu_id
*id
;
2324 intel_pstate_hwp_enable(cpu
);
2326 id
= x86_match_cpu(intel_pstate_hwp_boost_ids
);
2327 if (id
&& intel_pstate_acpi_pm_profile_server())
2330 } else if (hwp_active
) {
2332 * Re-enable HWP in case this happens after a resume from ACPI
2333 * S3 if the CPU was offline during the whole system/resume
2336 intel_pstate_hwp_reenable(cpu
);
2339 cpu
->epp_powersave
= -EINVAL
;
2340 cpu
->epp_policy
= 0;
2342 intel_pstate_get_cpu_pstates(cpu
);
2344 pr_debug("controlling: cpu %d\n", cpunum
);
2349 static void intel_pstate_set_update_util_hook(unsigned int cpu_num
)
2351 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
2353 if (hwp_active
&& !hwp_boost
)
2356 if (cpu
->update_util_set
)
2359 /* Prevent intel_pstate_update_util() from using stale data. */
2360 cpu
->sample
.time
= 0;
2361 cpufreq_add_update_util_hook(cpu_num
, &cpu
->update_util
,
2363 intel_pstate_update_util_hwp
:
2364 intel_pstate_update_util
));
2365 cpu
->update_util_set
= true;
2368 static void intel_pstate_clear_update_util_hook(unsigned int cpu
)
2370 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
2372 if (!cpu_data
->update_util_set
)
2375 cpufreq_remove_update_util_hook(cpu
);
2376 cpu_data
->update_util_set
= false;
2380 static int intel_pstate_get_max_freq(struct cpudata
*cpu
)
2382 return global
.turbo_disabled
|| global
.no_turbo
?
2383 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2386 static void intel_pstate_update_perf_limits(struct cpudata
*cpu
,
2387 unsigned int policy_min
,
2388 unsigned int policy_max
)
2390 int perf_ctl_scaling
= cpu
->pstate
.perf_ctl_scaling
;
2391 int32_t max_policy_perf
, min_policy_perf
;
2393 max_policy_perf
= policy_max
/ perf_ctl_scaling
;
2394 if (policy_max
== policy_min
) {
2395 min_policy_perf
= max_policy_perf
;
2397 min_policy_perf
= policy_min
/ perf_ctl_scaling
;
2398 min_policy_perf
= clamp_t(int32_t, min_policy_perf
,
2399 0, max_policy_perf
);
2403 * HWP needs some special consideration, because HWP_REQUEST uses
2404 * abstract values to represent performance rather than pure ratios.
2407 intel_pstate_get_hwp_cap(cpu
);
2409 if (cpu
->pstate
.scaling
!= perf_ctl_scaling
) {
2410 int scaling
= cpu
->pstate
.scaling
;
2413 freq
= max_policy_perf
* perf_ctl_scaling
;
2414 max_policy_perf
= DIV_ROUND_UP(freq
, scaling
);
2415 freq
= min_policy_perf
* perf_ctl_scaling
;
2416 min_policy_perf
= DIV_ROUND_UP(freq
, scaling
);
2420 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2421 cpu
->cpu
, min_policy_perf
, max_policy_perf
);
2423 /* Normalize user input to [min_perf, max_perf] */
2424 if (per_cpu_limits
) {
2425 cpu
->min_perf_ratio
= min_policy_perf
;
2426 cpu
->max_perf_ratio
= max_policy_perf
;
2428 int turbo_max
= cpu
->pstate
.turbo_pstate
;
2429 int32_t global_min
, global_max
;
2431 /* Global limits are in percent of the maximum turbo P-state. */
2432 global_max
= DIV_ROUND_UP(turbo_max
* global
.max_perf_pct
, 100);
2433 global_min
= DIV_ROUND_UP(turbo_max
* global
.min_perf_pct
, 100);
2434 global_min
= clamp_t(int32_t, global_min
, 0, global_max
);
2436 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu
->cpu
,
2437 global_min
, global_max
);
2439 cpu
->min_perf_ratio
= max(min_policy_perf
, global_min
);
2440 cpu
->min_perf_ratio
= min(cpu
->min_perf_ratio
, max_policy_perf
);
2441 cpu
->max_perf_ratio
= min(max_policy_perf
, global_max
);
2442 cpu
->max_perf_ratio
= max(min_policy_perf
, cpu
->max_perf_ratio
);
2444 /* Make sure min_perf <= max_perf */
2445 cpu
->min_perf_ratio
= min(cpu
->min_perf_ratio
,
2446 cpu
->max_perf_ratio
);
2449 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu
->cpu
,
2450 cpu
->max_perf_ratio
,
2451 cpu
->min_perf_ratio
);
2454 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
2456 struct cpudata
*cpu
;
2458 if (!policy
->cpuinfo
.max_freq
)
2461 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2462 policy
->cpuinfo
.max_freq
, policy
->max
);
2464 cpu
= all_cpu_data
[policy
->cpu
];
2465 cpu
->policy
= policy
->policy
;
2467 mutex_lock(&intel_pstate_limits_lock
);
2469 intel_pstate_update_perf_limits(cpu
, policy
->min
, policy
->max
);
2471 if (cpu
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
2473 * NOHZ_FULL CPUs need this as the governor callback may not
2474 * be invoked on them.
2476 intel_pstate_clear_update_util_hook(policy
->cpu
);
2477 intel_pstate_max_within_limits(cpu
);
2479 intel_pstate_set_update_util_hook(policy
->cpu
);
2484 * When hwp_boost was active before and dynamically it
2485 * was turned off, in that case we need to clear the
2489 intel_pstate_clear_update_util_hook(policy
->cpu
);
2490 intel_pstate_hwp_set(policy
->cpu
);
2493 mutex_unlock(&intel_pstate_limits_lock
);
2498 static void intel_pstate_adjust_policy_max(struct cpudata
*cpu
,
2499 struct cpufreq_policy_data
*policy
)
2502 cpu
->pstate
.max_pstate_physical
> cpu
->pstate
.max_pstate
&&
2503 policy
->max
< policy
->cpuinfo
.max_freq
&&
2504 policy
->max
> cpu
->pstate
.max_freq
) {
2505 pr_debug("policy->max > max non turbo frequency\n");
2506 policy
->max
= policy
->cpuinfo
.max_freq
;
2510 static void intel_pstate_verify_cpu_policy(struct cpudata
*cpu
,
2511 struct cpufreq_policy_data
*policy
)
2515 update_turbo_state();
2517 intel_pstate_get_hwp_cap(cpu
);
2518 max_freq
= global
.no_turbo
|| global
.turbo_disabled
?
2519 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2521 max_freq
= intel_pstate_get_max_freq(cpu
);
2523 cpufreq_verify_within_limits(policy
, policy
->cpuinfo
.min_freq
, max_freq
);
2525 intel_pstate_adjust_policy_max(cpu
, policy
);
2528 static int intel_pstate_verify_policy(struct cpufreq_policy_data
*policy
)
2530 intel_pstate_verify_cpu_policy(all_cpu_data
[policy
->cpu
], policy
);
2535 static int intel_cpufreq_cpu_offline(struct cpufreq_policy
*policy
)
2537 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2539 pr_debug("CPU %d going offline\n", cpu
->cpu
);
2545 * If the CPU is an SMT thread and it goes offline with the performance
2546 * settings different from the minimum, it will prevent its sibling
2547 * from getting to lower performance levels, so force the minimum
2548 * performance on CPU offline to prevent that from happening.
2551 intel_pstate_hwp_offline(cpu
);
2553 intel_pstate_set_min_pstate(cpu
);
2555 intel_pstate_exit_perf_limits(policy
);
2560 static int intel_pstate_cpu_online(struct cpufreq_policy
*policy
)
2562 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2564 pr_debug("CPU %d going online\n", cpu
->cpu
);
2566 intel_pstate_init_acpi_perf_limits(policy
);
2570 * Re-enable HWP and clear the "suspended" flag to let "resume"
2571 * know that it need not do that.
2573 intel_pstate_hwp_reenable(cpu
);
2574 cpu
->suspended
= false;
2580 static int intel_pstate_cpu_offline(struct cpufreq_policy
*policy
)
2582 intel_pstate_clear_update_util_hook(policy
->cpu
);
2584 return intel_cpufreq_cpu_offline(policy
);
2587 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
2589 pr_debug("CPU %d exiting\n", policy
->cpu
);
2591 policy
->fast_switch_possible
= false;
2596 static int __intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2598 struct cpudata
*cpu
;
2601 rc
= intel_pstate_init_cpu(policy
->cpu
);
2605 cpu
= all_cpu_data
[policy
->cpu
];
2607 cpu
->max_perf_ratio
= 0xFF;
2608 cpu
->min_perf_ratio
= 0;
2610 /* cpuinfo and default policy values */
2611 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_freq
;
2612 update_turbo_state();
2613 global
.turbo_disabled_mf
= global
.turbo_disabled
;
2614 policy
->cpuinfo
.max_freq
= global
.turbo_disabled
?
2615 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2617 policy
->min
= policy
->cpuinfo
.min_freq
;
2618 policy
->max
= policy
->cpuinfo
.max_freq
;
2620 intel_pstate_init_acpi_perf_limits(policy
);
2622 policy
->fast_switch_possible
= true;
2627 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2629 int ret
= __intel_pstate_cpu_init(policy
);
2635 * Set the policy to powersave to provide a valid fallback value in case
2636 * the default cpufreq governor is neither powersave nor performance.
2638 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
2641 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2643 cpu
->epp_cached
= intel_pstate_get_epp(cpu
, 0);
2649 static struct cpufreq_driver intel_pstate
= {
2650 .flags
= CPUFREQ_CONST_LOOPS
,
2651 .verify
= intel_pstate_verify_policy
,
2652 .setpolicy
= intel_pstate_set_policy
,
2653 .suspend
= intel_pstate_suspend
,
2654 .resume
= intel_pstate_resume
,
2655 .init
= intel_pstate_cpu_init
,
2656 .exit
= intel_pstate_cpu_exit
,
2657 .offline
= intel_pstate_cpu_offline
,
2658 .online
= intel_pstate_cpu_online
,
2659 .update_limits
= intel_pstate_update_limits
,
2660 .name
= "intel_pstate",
2663 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data
*policy
)
2665 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2667 intel_pstate_verify_cpu_policy(cpu
, policy
);
2668 intel_pstate_update_perf_limits(cpu
, policy
->min
, policy
->max
);
2673 /* Use of trace in passive mode:
2675 * In passive mode the trace core_busy field (also known as the
2676 * performance field, and lablelled as such on the graphs; also known as
2677 * core_avg_perf) is not needed and so is re-assigned to indicate if the
2678 * driver call was via the normal or fast switch path. Various graphs
2679 * output from the intel_pstate_tracer.py utility that include core_busy
2680 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2681 * so we use 10 to indicate the normal path through the driver, and
2682 * 90 to indicate the fast switch path through the driver.
2683 * The scaled_busy field is not used, and is set to 0.
2686 #define INTEL_PSTATE_TRACE_TARGET 10
2687 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90
2689 static void intel_cpufreq_trace(struct cpudata
*cpu
, unsigned int trace_type
, int old_pstate
)
2691 struct sample
*sample
;
2693 if (!trace_pstate_sample_enabled())
2696 if (!intel_pstate_sample(cpu
, ktime_get()))
2699 sample
= &cpu
->sample
;
2700 trace_pstate_sample(trace_type
,
2703 cpu
->pstate
.current_pstate
,
2707 get_avg_frequency(cpu
),
2708 fp_toint(cpu
->iowait_boost
* 100));
2711 static void intel_cpufreq_hwp_update(struct cpudata
*cpu
, u32 min
, u32 max
,
2712 u32 desired
, bool fast_switch
)
2714 u64 prev
= READ_ONCE(cpu
->hwp_req_cached
), value
= prev
;
2716 value
&= ~HWP_MIN_PERF(~0L);
2717 value
|= HWP_MIN_PERF(min
);
2719 value
&= ~HWP_MAX_PERF(~0L);
2720 value
|= HWP_MAX_PERF(max
);
2722 value
&= ~HWP_DESIRED_PERF(~0L);
2723 value
|= HWP_DESIRED_PERF(desired
);
2728 WRITE_ONCE(cpu
->hwp_req_cached
, value
);
2730 wrmsrl(MSR_HWP_REQUEST
, value
);
2732 wrmsrl_on_cpu(cpu
->cpu
, MSR_HWP_REQUEST
, value
);
2735 static void intel_cpufreq_perf_ctl_update(struct cpudata
*cpu
,
2736 u32 target_pstate
, bool fast_switch
)
2739 wrmsrl(MSR_IA32_PERF_CTL
,
2740 pstate_funcs
.get_val(cpu
, target_pstate
));
2742 wrmsrl_on_cpu(cpu
->cpu
, MSR_IA32_PERF_CTL
,
2743 pstate_funcs
.get_val(cpu
, target_pstate
));
2746 static int intel_cpufreq_update_pstate(struct cpufreq_policy
*policy
,
2747 int target_pstate
, bool fast_switch
)
2749 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2750 int old_pstate
= cpu
->pstate
.current_pstate
;
2752 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
2754 int max_pstate
= policy
->strict_target
?
2755 target_pstate
: cpu
->max_perf_ratio
;
2757 intel_cpufreq_hwp_update(cpu
, target_pstate
, max_pstate
, 0,
2759 } else if (target_pstate
!= old_pstate
) {
2760 intel_cpufreq_perf_ctl_update(cpu
, target_pstate
, fast_switch
);
2763 cpu
->pstate
.current_pstate
= target_pstate
;
2765 intel_cpufreq_trace(cpu
, fast_switch
? INTEL_PSTATE_TRACE_FAST_SWITCH
:
2766 INTEL_PSTATE_TRACE_TARGET
, old_pstate
);
2768 return target_pstate
;
2771 static int intel_cpufreq_target(struct cpufreq_policy
*policy
,
2772 unsigned int target_freq
,
2773 unsigned int relation
)
2775 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2776 struct cpufreq_freqs freqs
;
2779 update_turbo_state();
2781 freqs
.old
= policy
->cur
;
2782 freqs
.new = target_freq
;
2784 cpufreq_freq_transition_begin(policy
, &freqs
);
2787 case CPUFREQ_RELATION_L
:
2788 target_pstate
= DIV_ROUND_UP(freqs
.new, cpu
->pstate
.scaling
);
2790 case CPUFREQ_RELATION_H
:
2791 target_pstate
= freqs
.new / cpu
->pstate
.scaling
;
2794 target_pstate
= DIV_ROUND_CLOSEST(freqs
.new, cpu
->pstate
.scaling
);
2798 target_pstate
= intel_cpufreq_update_pstate(policy
, target_pstate
, false);
2800 freqs
.new = target_pstate
* cpu
->pstate
.scaling
;
2802 cpufreq_freq_transition_end(policy
, &freqs
, false);
2807 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy
*policy
,
2808 unsigned int target_freq
)
2810 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2813 update_turbo_state();
2815 target_pstate
= DIV_ROUND_UP(target_freq
, cpu
->pstate
.scaling
);
2817 target_pstate
= intel_cpufreq_update_pstate(policy
, target_pstate
, true);
2819 return target_pstate
* cpu
->pstate
.scaling
;
2822 static void intel_cpufreq_adjust_perf(unsigned int cpunum
,
2823 unsigned long min_perf
,
2824 unsigned long target_perf
,
2825 unsigned long capacity
)
2827 struct cpudata
*cpu
= all_cpu_data
[cpunum
];
2828 u64 hwp_cap
= READ_ONCE(cpu
->hwp_cap_cached
);
2829 int old_pstate
= cpu
->pstate
.current_pstate
;
2830 int cap_pstate
, min_pstate
, max_pstate
, target_pstate
;
2832 update_turbo_state();
2833 cap_pstate
= global
.turbo_disabled
? HWP_GUARANTEED_PERF(hwp_cap
) :
2834 HWP_HIGHEST_PERF(hwp_cap
);
2836 /* Optimization: Avoid unnecessary divisions. */
2838 target_pstate
= cap_pstate
;
2839 if (target_perf
< capacity
)
2840 target_pstate
= DIV_ROUND_UP(cap_pstate
* target_perf
, capacity
);
2842 min_pstate
= cap_pstate
;
2843 if (min_perf
< capacity
)
2844 min_pstate
= DIV_ROUND_UP(cap_pstate
* min_perf
, capacity
);
2846 if (min_pstate
< cpu
->pstate
.min_pstate
)
2847 min_pstate
= cpu
->pstate
.min_pstate
;
2849 if (min_pstate
< cpu
->min_perf_ratio
)
2850 min_pstate
= cpu
->min_perf_ratio
;
2852 max_pstate
= min(cap_pstate
, cpu
->max_perf_ratio
);
2853 if (max_pstate
< min_pstate
)
2854 max_pstate
= min_pstate
;
2856 target_pstate
= clamp_t(int, target_pstate
, min_pstate
, max_pstate
);
2858 intel_cpufreq_hwp_update(cpu
, min_pstate
, max_pstate
, target_pstate
, true);
2860 cpu
->pstate
.current_pstate
= target_pstate
;
2861 intel_cpufreq_trace(cpu
, INTEL_PSTATE_TRACE_FAST_SWITCH
, old_pstate
);
2864 static int intel_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
2866 struct freq_qos_request
*req
;
2867 struct cpudata
*cpu
;
2871 dev
= get_cpu_device(policy
->cpu
);
2875 ret
= __intel_pstate_cpu_init(policy
);
2879 policy
->cpuinfo
.transition_latency
= INTEL_CPUFREQ_TRANSITION_LATENCY
;
2880 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2881 policy
->cur
= policy
->cpuinfo
.min_freq
;
2883 req
= kcalloc(2, sizeof(*req
), GFP_KERNEL
);
2889 cpu
= all_cpu_data
[policy
->cpu
];
2894 policy
->transition_delay_us
= INTEL_CPUFREQ_TRANSITION_DELAY_HWP
;
2896 intel_pstate_get_hwp_cap(cpu
);
2898 rdmsrl_on_cpu(cpu
->cpu
, MSR_HWP_REQUEST
, &value
);
2899 WRITE_ONCE(cpu
->hwp_req_cached
, value
);
2901 cpu
->epp_cached
= intel_pstate_get_epp(cpu
, value
);
2903 policy
->transition_delay_us
= INTEL_CPUFREQ_TRANSITION_DELAY
;
2906 freq
= DIV_ROUND_UP(cpu
->pstate
.turbo_freq
* global
.min_perf_pct
, 100);
2908 ret
= freq_qos_add_request(&policy
->constraints
, req
, FREQ_QOS_MIN
,
2911 dev_err(dev
, "Failed to add min-freq constraint (%d)\n", ret
);
2915 freq
= DIV_ROUND_UP(cpu
->pstate
.turbo_freq
* global
.max_perf_pct
, 100);
2917 ret
= freq_qos_add_request(&policy
->constraints
, req
+ 1, FREQ_QOS_MAX
,
2920 dev_err(dev
, "Failed to add max-freq constraint (%d)\n", ret
);
2921 goto remove_min_req
;
2924 policy
->driver_data
= req
;
2929 freq_qos_remove_request(req
);
2933 intel_pstate_exit_perf_limits(policy
);
2938 static int intel_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
2940 struct freq_qos_request
*req
;
2942 req
= policy
->driver_data
;
2944 freq_qos_remove_request(req
+ 1);
2945 freq_qos_remove_request(req
);
2948 return intel_pstate_cpu_exit(policy
);
2951 static struct cpufreq_driver intel_cpufreq
= {
2952 .flags
= CPUFREQ_CONST_LOOPS
,
2953 .verify
= intel_cpufreq_verify_policy
,
2954 .target
= intel_cpufreq_target
,
2955 .fast_switch
= intel_cpufreq_fast_switch
,
2956 .init
= intel_cpufreq_cpu_init
,
2957 .exit
= intel_cpufreq_cpu_exit
,
2958 .offline
= intel_cpufreq_cpu_offline
,
2959 .online
= intel_pstate_cpu_online
,
2960 .suspend
= intel_pstate_suspend
,
2961 .resume
= intel_pstate_resume
,
2962 .update_limits
= intel_pstate_update_limits
,
2963 .name
= "intel_cpufreq",
2966 static struct cpufreq_driver
*default_driver
;
2968 static void intel_pstate_driver_cleanup(void)
2973 for_each_online_cpu(cpu
) {
2974 if (all_cpu_data
[cpu
]) {
2975 if (intel_pstate_driver
== &intel_pstate
)
2976 intel_pstate_clear_update_util_hook(cpu
);
2978 kfree(all_cpu_data
[cpu
]);
2979 all_cpu_data
[cpu
] = NULL
;
2984 intel_pstate_driver
= NULL
;
2987 static int intel_pstate_register_driver(struct cpufreq_driver
*driver
)
2991 if (driver
== &intel_pstate
)
2992 intel_pstate_sysfs_expose_hwp_dynamic_boost();
2994 memset(&global
, 0, sizeof(global
));
2995 global
.max_perf_pct
= 100;
2997 intel_pstate_driver
= driver
;
2998 ret
= cpufreq_register_driver(intel_pstate_driver
);
3000 intel_pstate_driver_cleanup();
3004 global
.min_perf_pct
= min_perf_pct_min();
3009 static ssize_t
intel_pstate_show_status(char *buf
)
3011 if (!intel_pstate_driver
)
3012 return sprintf(buf
, "off\n");
3014 return sprintf(buf
, "%s\n", intel_pstate_driver
== &intel_pstate
?
3015 "active" : "passive");
3018 static int intel_pstate_update_status(const char *buf
, size_t size
)
3020 if (size
== 3 && !strncmp(buf
, "off", size
)) {
3021 if (!intel_pstate_driver
)
3027 cpufreq_unregister_driver(intel_pstate_driver
);
3028 intel_pstate_driver_cleanup();
3032 if (size
== 6 && !strncmp(buf
, "active", size
)) {
3033 if (intel_pstate_driver
) {
3034 if (intel_pstate_driver
== &intel_pstate
)
3037 cpufreq_unregister_driver(intel_pstate_driver
);
3040 return intel_pstate_register_driver(&intel_pstate
);
3043 if (size
== 7 && !strncmp(buf
, "passive", size
)) {
3044 if (intel_pstate_driver
) {
3045 if (intel_pstate_driver
== &intel_cpufreq
)
3048 cpufreq_unregister_driver(intel_pstate_driver
);
3049 intel_pstate_sysfs_hide_hwp_dynamic_boost();
3052 return intel_pstate_register_driver(&intel_cpufreq
);
3058 static int no_load __initdata
;
3059 static int no_hwp __initdata
;
3060 static int hwp_only __initdata
;
3061 static unsigned int force_load __initdata
;
3063 static int __init
intel_pstate_msrs_not_valid(void)
3065 if (!pstate_funcs
.get_max() ||
3066 !pstate_funcs
.get_min() ||
3067 !pstate_funcs
.get_turbo())
3073 static void __init
copy_cpu_funcs(struct pstate_funcs
*funcs
)
3075 pstate_funcs
.get_max
= funcs
->get_max
;
3076 pstate_funcs
.get_max_physical
= funcs
->get_max_physical
;
3077 pstate_funcs
.get_min
= funcs
->get_min
;
3078 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
3079 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
3080 pstate_funcs
.get_val
= funcs
->get_val
;
3081 pstate_funcs
.get_vid
= funcs
->get_vid
;
3082 pstate_funcs
.get_aperf_mperf_shift
= funcs
->get_aperf_mperf_shift
;
3087 static bool __init
intel_pstate_no_acpi_pss(void)
3091 for_each_possible_cpu(i
) {
3093 union acpi_object
*pss
;
3094 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3095 struct acpi_processor
*pr
= per_cpu(processors
, i
);
3100 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
3101 if (ACPI_FAILURE(status
))
3104 pss
= buffer
.pointer
;
3105 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
3113 pr_debug("ACPI _PSS not found\n");
3117 static bool __init
intel_pstate_no_acpi_pcch(void)
3122 status
= acpi_get_handle(NULL
, "\\_SB", &handle
);
3123 if (ACPI_FAILURE(status
))
3126 if (acpi_has_method(handle
, "PCCH"))
3130 pr_debug("ACPI PCCH not found\n");
3134 static bool __init
intel_pstate_has_acpi_ppc(void)
3138 for_each_possible_cpu(i
) {
3139 struct acpi_processor
*pr
= per_cpu(processors
, i
);
3143 if (acpi_has_method(pr
->handle
, "_PPC"))
3146 pr_debug("ACPI _PPC not found\n");
3155 /* Hardware vendor-specific info that has its own power management modes */
3156 static struct acpi_platform_list plat_info
[] __initdata
= {
3157 {"HP ", "ProLiant", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PSS
},
3158 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3159 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3160 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3161 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3162 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3163 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3164 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3165 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3166 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3167 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3168 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3169 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3170 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3171 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT
, all_versions
, NULL
, PPC
},
3175 #define BITMASK_OOB (BIT(8) | BIT(18))
3177 static bool __init
intel_pstate_platform_pwr_mgmt_exists(void)
3179 const struct x86_cpu_id
*id
;
3183 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
3185 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
3186 if (misc_pwr
& BITMASK_OOB
) {
3187 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
3188 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
3193 idx
= acpi_match_platform_list(plat_info
);
3197 switch (plat_info
[idx
].data
) {
3199 if (!intel_pstate_no_acpi_pss())
3202 return intel_pstate_no_acpi_pcch();
3204 return intel_pstate_has_acpi_ppc() && !force_load
;
3210 static void intel_pstate_request_control_from_smm(void)
3213 * It may be unsafe to request P-states control from SMM if _PPC support
3214 * has not been enabled.
3217 acpi_processor_pstate_control();
3219 #else /* CONFIG_ACPI not enabled */
3220 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
3221 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
3222 static inline void intel_pstate_request_control_from_smm(void) {}
3223 #endif /* CONFIG_ACPI */
3225 #define INTEL_PSTATE_HWP_BROADWELL 0x01
3227 #define X86_MATCH_HWP(model, hwp_mode) \
3228 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
3229 X86_FEATURE_HWP, hwp_mode)
3231 static const struct x86_cpu_id hwp_support_ids
[] __initconst
= {
3232 X86_MATCH_HWP(BROADWELL_X
, INTEL_PSTATE_HWP_BROADWELL
),
3233 X86_MATCH_HWP(BROADWELL_D
, INTEL_PSTATE_HWP_BROADWELL
),
3234 X86_MATCH_HWP(ANY
, 0),
3238 static bool intel_pstate_hwp_is_enabled(void)
3242 rdmsrl(MSR_PM_ENABLE
, value
);
3243 return !!(value
& 0x1);
3246 static int __init
intel_pstate_init(void)
3248 const struct x86_cpu_id
*id
;
3251 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
3257 id
= x86_match_cpu(hwp_support_ids
);
3259 copy_cpu_funcs(&core_funcs
);
3261 * Avoid enabling HWP for processors without EPP support,
3262 * because that means incomplete HWP implementation which is a
3263 * corner case and supporting it is generally problematic.
3265 * If HWP is enabled already, though, there is no choice but to
3268 if ((!no_hwp
&& boot_cpu_has(X86_FEATURE_HWP_EPP
)) ||
3269 intel_pstate_hwp_is_enabled()) {
3271 hwp_mode_bdw
= id
->driver_data
;
3272 intel_pstate
.attr
= hwp_cpufreq_attrs
;
3273 intel_cpufreq
.attr
= hwp_cpufreq_attrs
;
3274 intel_cpufreq
.flags
|= CPUFREQ_NEED_UPDATE_LIMITS
;
3275 intel_cpufreq
.adjust_perf
= intel_cpufreq_adjust_perf
;
3276 if (!default_driver
)
3277 default_driver
= &intel_pstate
;
3279 goto hwp_cpu_matched
;
3282 id
= x86_match_cpu(intel_pstate_cpu_ids
);
3284 pr_info("CPU model not supported\n");
3288 copy_cpu_funcs((struct pstate_funcs
*)id
->driver_data
);
3291 if (intel_pstate_msrs_not_valid()) {
3292 pr_info("Invalid MSRs\n");
3295 /* Without HWP start in the passive mode. */
3296 if (!default_driver
)
3297 default_driver
= &intel_cpufreq
;
3301 * The Intel pstate driver will be ignored if the platform
3302 * firmware has its own power management modes.
3304 if (intel_pstate_platform_pwr_mgmt_exists()) {
3305 pr_info("P-states controlled by the platform\n");
3309 if (!hwp_active
&& hwp_only
)
3312 pr_info("Intel P-state driver initializing\n");
3314 all_cpu_data
= vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3318 intel_pstate_request_control_from_smm();
3320 intel_pstate_sysfs_expose_params();
3322 mutex_lock(&intel_pstate_driver_lock
);
3323 rc
= intel_pstate_register_driver(default_driver
);
3324 mutex_unlock(&intel_pstate_driver_lock
);
3326 intel_pstate_sysfs_remove();
3331 const struct x86_cpu_id
*id
;
3333 id
= x86_match_cpu(intel_pstate_cpu_ee_disable_ids
);
3335 set_power_ctl_ee_state(false);
3336 pr_info("Disabling energy efficiency optimization\n");
3339 pr_info("HWP enabled\n");
3340 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU
)) {
3341 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
3346 device_initcall(intel_pstate_init
);
3348 static int __init
intel_pstate_setup(char *str
)
3353 if (!strcmp(str
, "disable"))
3355 else if (!strcmp(str
, "active"))
3356 default_driver
= &intel_pstate
;
3357 else if (!strcmp(str
, "passive"))
3358 default_driver
= &intel_cpufreq
;
3360 if (!strcmp(str
, "no_hwp")) {
3361 pr_info("HWP disabled\n");
3364 if (!strcmp(str
, "force"))
3366 if (!strcmp(str
, "hwp_only"))
3368 if (!strcmp(str
, "per_cpu_perf_limits"))
3369 per_cpu_limits
= true;
3372 if (!strcmp(str
, "support_acpi_ppc"))
3378 early_param("intel_pstate", intel_pstate_setup
);
3380 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3381 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
3382 MODULE_LICENSE("GPL");