]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
93f0822d | 2 | /* |
d1b68485 | 3 | * intel_pstate.c: Native P state management for Intel processors |
93f0822d DB |
4 | * |
5 | * (C) Copyright 2012 Intel Corporation | |
6 | * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> | |
93f0822d DB |
7 | */ |
8 | ||
4836df17 JP |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
93f0822d DB |
11 | #include <linux/kernel.h> |
12 | #include <linux/kernel_stat.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/ktime.h> | |
15 | #include <linux/hrtimer.h> | |
16 | #include <linux/tick.h> | |
17 | #include <linux/slab.h> | |
55687da1 | 18 | #include <linux/sched/cpufreq.h> |
93f0822d DB |
19 | #include <linux/list.h> |
20 | #include <linux/cpu.h> | |
21 | #include <linux/cpufreq.h> | |
22 | #include <linux/sysfs.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/fs.h> | |
fbbcdc07 | 25 | #include <linux/acpi.h> |
d6472302 | 26 | #include <linux/vmalloc.h> |
da5c504c | 27 | #include <linux/pm_qos.h> |
93f0822d DB |
28 | #include <trace/events/power.h> |
29 | ||
30 | #include <asm/div64.h> | |
31 | #include <asm/msr.h> | |
32 | #include <asm/cpu_device_id.h> | |
64df1fdf | 33 | #include <asm/cpufeature.h> |
5b20c944 | 34 | #include <asm/intel-family.h> |
93f0822d | 35 | |
d77d4888 | 36 | #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) |
eabd22c6 | 37 | |
001c76f0 | 38 | #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 |
f6ebbcf0 | 39 | #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 |
1b72e7fd | 40 | #define INTEL_CPUFREQ_TRANSITION_DELAY 500 |
001c76f0 | 41 | |
9522a2ff SP |
42 | #ifdef CONFIG_ACPI |
43 | #include <acpi/processor.h> | |
17669006 | 44 | #include <acpi/cppc_acpi.h> |
9522a2ff SP |
45 | #endif |
46 | ||
f0fe3cd7 | 47 | #define FRAC_BITS 8 |
93f0822d DB |
48 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
49 | #define fp_toint(X) ((X) >> FRAC_BITS) | |
f0fe3cd7 | 50 | |
b8bd1581 RW |
51 | #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) |
52 | ||
a1c9787d RW |
53 | #define EXT_BITS 6 |
54 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) | |
d5dd33d9 SP |
55 | #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) |
56 | #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) | |
a1c9787d | 57 | |
93f0822d DB |
58 | static inline int32_t mul_fp(int32_t x, int32_t y) |
59 | { | |
60 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; | |
61 | } | |
62 | ||
7180dddf | 63 | static inline int32_t div_fp(s64 x, s64 y) |
93f0822d | 64 | { |
7180dddf | 65 | return div64_s64((int64_t)x << FRAC_BITS, y); |
93f0822d DB |
66 | } |
67 | ||
d022a65e DB |
68 | static inline int ceiling_fp(int32_t x) |
69 | { | |
70 | int mask, ret; | |
71 | ||
72 | ret = fp_toint(x); | |
73 | mask = (1 << FRAC_BITS) - 1; | |
74 | if (x & mask) | |
75 | ret += 1; | |
76 | return ret; | |
77 | } | |
78 | ||
a1c9787d RW |
79 | static inline u64 mul_ext_fp(u64 x, u64 y) |
80 | { | |
81 | return (x * y) >> EXT_FRAC_BITS; | |
82 | } | |
83 | ||
84 | static inline u64 div_ext_fp(u64 x, u64 y) | |
85 | { | |
86 | return div64_u64(x << EXT_FRAC_BITS, y); | |
87 | } | |
88 | ||
13ad7701 SP |
89 | /** |
90 | * struct sample - Store performance sample | |
a1c9787d | 91 | * @core_avg_perf: Ratio of APERF/MPERF which is the actual average |
13ad7701 SP |
92 | * performance during last sample period |
93 | * @busy_scaled: Scaled busy value which is used to calculate next | |
a1c9787d | 94 | * P state. This can be different than core_avg_perf |
13ad7701 SP |
95 | * to account for cpu idle period |
96 | * @aperf: Difference of actual performance frequency clock count | |
97 | * read from APERF MSR between last and current sample | |
98 | * @mperf: Difference of maximum performance frequency clock count | |
99 | * read from MPERF MSR between last and current sample | |
100 | * @tsc: Difference of time stamp counter between last and | |
101 | * current sample | |
13ad7701 SP |
102 | * @time: Current time from scheduler |
103 | * | |
104 | * This structure is used in the cpudata structure to store performance sample | |
105 | * data for choosing next P State. | |
106 | */ | |
93f0822d | 107 | struct sample { |
a1c9787d | 108 | int32_t core_avg_perf; |
157386b6 | 109 | int32_t busy_scaled; |
93f0822d DB |
110 | u64 aperf; |
111 | u64 mperf; | |
4055fad3 | 112 | u64 tsc; |
a4675fbc | 113 | u64 time; |
93f0822d DB |
114 | }; |
115 | ||
13ad7701 SP |
116 | /** |
117 | * struct pstate_data - Store P state data | |
118 | * @current_pstate: Current requested P state | |
119 | * @min_pstate: Min P state possible for this platform | |
120 | * @max_pstate: Max P state possible for this platform | |
121 | * @max_pstate_physical:This is physical Max P state for a processor | |
122 | * This can be higher than the max_pstate which can | |
123 | * be limited by platform thermal design power limits | |
eb3693f0 RW |
124 | * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor |
125 | * @scaling: Scaling factor between performance and frequency | |
13ad7701 | 126 | * @turbo_pstate: Max Turbo P state possible for this platform |
eb3693f0 | 127 | * @min_freq: @min_pstate frequency in cpufreq units |
001c76f0 RW |
128 | * @max_freq: @max_pstate frequency in cpufreq units |
129 | * @turbo_freq: @turbo_pstate frequency in cpufreq units | |
13ad7701 SP |
130 | * |
131 | * Stores the per cpu model P state limits and current P state. | |
132 | */ | |
93f0822d DB |
133 | struct pstate_data { |
134 | int current_pstate; | |
135 | int min_pstate; | |
136 | int max_pstate; | |
3bcc6fa9 | 137 | int max_pstate_physical; |
eb3693f0 | 138 | int perf_ctl_scaling; |
b27580b0 | 139 | int scaling; |
93f0822d | 140 | int turbo_pstate; |
eb3693f0 | 141 | unsigned int min_freq; |
001c76f0 RW |
142 | unsigned int max_freq; |
143 | unsigned int turbo_freq; | |
93f0822d DB |
144 | }; |
145 | ||
13ad7701 SP |
146 | /** |
147 | * struct vid_data - Stores voltage information data | |
148 | * @min: VID data for this platform corresponding to | |
149 | * the lowest P state | |
150 | * @max: VID data corresponding to the highest P State. | |
151 | * @turbo: VID data for turbo P state | |
152 | * @ratio: Ratio of (vid max - vid min) / | |
153 | * (max P state - Min P State) | |
154 | * | |
155 | * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) | |
156 | * This data is used in Atom platforms, where in addition to target P state, | |
157 | * the voltage data needs to be specified to select next P State. | |
158 | */ | |
007bea09 | 159 | struct vid_data { |
21855ff5 DB |
160 | int min; |
161 | int max; | |
162 | int turbo; | |
007bea09 DB |
163 | int32_t ratio; |
164 | }; | |
165 | ||
c5a2ee7d RW |
166 | /** |
167 | * struct global_params - Global parameters, mostly tunable via sysfs. | |
168 | * @no_turbo: Whether or not to use turbo P-states. | |
731e6b97 | 169 | * @turbo_disabled: Whether or not turbo P-states are available at all, |
c5a2ee7d RW |
170 | * based on the MSR_IA32_MISC_ENABLE value and whether or |
171 | * not the maximum reported turbo P-state is different from | |
172 | * the maximum reported non-turbo one. | |
9083e498 | 173 | * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. |
c5a2ee7d RW |
174 | * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo |
175 | * P-state capacity. | |
176 | * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo | |
177 | * P-state capacity. | |
178 | */ | |
179 | struct global_params { | |
180 | bool no_turbo; | |
181 | bool turbo_disabled; | |
9083e498 | 182 | bool turbo_disabled_mf; |
c5a2ee7d RW |
183 | int max_perf_pct; |
184 | int min_perf_pct; | |
eae48f04 SP |
185 | }; |
186 | ||
13ad7701 SP |
187 | /** |
188 | * struct cpudata - Per CPU instance data storage | |
189 | * @cpu: CPU number for this instance data | |
2f1d407a | 190 | * @policy: CPUFreq policy value |
13ad7701 | 191 | * @update_util: CPUFreq utility callback information |
4578ee7e | 192 | * @update_util_set: CPUFreq utility callback is set |
09c448d3 RW |
193 | * @iowait_boost: iowait-related boost fraction |
194 | * @last_update: Time of the last update. | |
13ad7701 SP |
195 | * @pstate: Stores P state limits for this CPU |
196 | * @vid: Stores VID limits for this CPU | |
13ad7701 | 197 | * @last_sample_time: Last Sample time |
23a522e3 | 198 | * @aperf_mperf_shift: APERF vs MPERF counting frequency difference |
13ad7701 SP |
199 | * @prev_aperf: Last APERF value read from APERF MSR |
200 | * @prev_mperf: Last MPERF value read from MPERF MSR | |
201 | * @prev_tsc: Last timestamp counter (TSC) value | |
202 | * @prev_cummulative_iowait: IO Wait time difference from last and | |
203 | * current sample | |
204 | * @sample: Storage for storing last Sample data | |
1a4fe38a SP |
205 | * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios |
206 | * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios | |
9522a2ff SP |
207 | * @acpi_perf_data: Stores ACPI perf information read from _PSS |
208 | * @valid_pss_table: Set to true for valid ACPI _PSS entries found | |
984edbdc SP |
209 | * @epp_powersave: Last saved HWP energy performance preference |
210 | * (EPP) or energy performance bias (EPB), | |
211 | * when policy switched to performance | |
8442885f | 212 | * @epp_policy: Last saved policy used to set EPP/EPB |
984edbdc SP |
213 | * @epp_default: Power on default HWP energy performance |
214 | * preference/bias | |
f6ebbcf0 | 215 | * @epp_cached Cached HWP energy-performance preference value |
e0efd5be SP |
216 | * @hwp_req_cached: Cached value of the last HWP Request MSR |
217 | * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR | |
52ccc431 SP |
218 | * @last_io_update: Last time when IO wake flag was set |
219 | * @sched_flags: Store scheduler flags for possible cross CPU update | |
e0efd5be | 220 | * @hwp_boost_min: Last HWP boosted min performance |
4adcf2e5 | 221 | * @suspended: Whether or not the driver has been suspended. |
13ad7701 SP |
222 | * |
223 | * This structure stores per CPU instance data for all CPUs. | |
224 | */ | |
93f0822d DB |
225 | struct cpudata { |
226 | int cpu; | |
227 | ||
2f1d407a | 228 | unsigned int policy; |
a4675fbc | 229 | struct update_util_data update_util; |
4578ee7e | 230 | bool update_util_set; |
93f0822d | 231 | |
93f0822d | 232 | struct pstate_data pstate; |
007bea09 | 233 | struct vid_data vid; |
93f0822d | 234 | |
09c448d3 | 235 | u64 last_update; |
a4675fbc | 236 | u64 last_sample_time; |
6e34e1f2 | 237 | u64 aperf_mperf_shift; |
93f0822d DB |
238 | u64 prev_aperf; |
239 | u64 prev_mperf; | |
4055fad3 | 240 | u64 prev_tsc; |
63d1d656 | 241 | u64 prev_cummulative_iowait; |
d37e2b76 | 242 | struct sample sample; |
1a4fe38a SP |
243 | int32_t min_perf_ratio; |
244 | int32_t max_perf_ratio; | |
9522a2ff SP |
245 | #ifdef CONFIG_ACPI |
246 | struct acpi_processor_performance acpi_perf_data; | |
247 | bool valid_pss_table; | |
248 | #endif | |
09c448d3 | 249 | unsigned int iowait_boost; |
984edbdc | 250 | s16 epp_powersave; |
8442885f | 251 | s16 epp_policy; |
984edbdc | 252 | s16 epp_default; |
f6ebbcf0 | 253 | s16 epp_cached; |
e0efd5be SP |
254 | u64 hwp_req_cached; |
255 | u64 hwp_cap_cached; | |
52ccc431 SP |
256 | u64 last_io_update; |
257 | unsigned int sched_flags; | |
e0efd5be | 258 | u32 hwp_boost_min; |
4adcf2e5 | 259 | bool suspended; |
93f0822d DB |
260 | }; |
261 | ||
262 | static struct cpudata **all_cpu_data; | |
13ad7701 | 263 | |
13ad7701 SP |
264 | /** |
265 | * struct pstate_funcs - Per CPU model specific callbacks | |
266 | * @get_max: Callback to get maximum non turbo effective P state | |
267 | * @get_max_physical: Callback to get maximum non turbo physical P state | |
268 | * @get_min: Callback to get minimum P state | |
269 | * @get_turbo: Callback to get turbo P state | |
270 | * @get_scaling: Callback to get frequency scaling factor | |
8f23d1f1 | 271 | * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference |
13ad7701 SP |
272 | * @get_val: Callback to convert P state to actual MSR write value |
273 | * @get_vid: Callback to get VID data for Atom platforms | |
13ad7701 SP |
274 | * |
275 | * Core and Atom CPU models have different way to get P State limits. This | |
276 | * structure is used to store those callbacks. | |
277 | */ | |
016c8150 DB |
278 | struct pstate_funcs { |
279 | int (*get_max)(void); | |
3bcc6fa9 | 280 | int (*get_max_physical)(void); |
016c8150 DB |
281 | int (*get_min)(void); |
282 | int (*get_turbo)(void); | |
b27580b0 | 283 | int (*get_scaling)(void); |
6e34e1f2 | 284 | int (*get_aperf_mperf_shift)(void); |
fdfdb2b1 | 285 | u64 (*get_val)(struct cpudata*, int pstate); |
007bea09 | 286 | void (*get_vid)(struct cpudata *); |
93f0822d DB |
287 | }; |
288 | ||
4a7cb7a9 | 289 | static struct pstate_funcs pstate_funcs __read_mostly; |
5c439053 | 290 | |
4a7cb7a9 | 291 | static int hwp_active __read_mostly; |
ff7c9917 | 292 | static int hwp_mode_bdw __read_mostly; |
eae48f04 | 293 | static bool per_cpu_limits __read_mostly; |
e0efd5be | 294 | static bool hwp_boost __read_mostly; |
016c8150 | 295 | |
ee8df89a | 296 | static struct cpufreq_driver *intel_pstate_driver __read_mostly; |
0c30b65b | 297 | |
9522a2ff SP |
298 | #ifdef CONFIG_ACPI |
299 | static bool acpi_ppc; | |
300 | #endif | |
13ad7701 | 301 | |
c5a2ee7d | 302 | static struct global_params global; |
93f0822d | 303 | |
0c30b65b | 304 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
a410c03d SP |
305 | static DEFINE_MUTEX(intel_pstate_limits_lock); |
306 | ||
9522a2ff | 307 | #ifdef CONFIG_ACPI |
2b3ec765 | 308 | |
01e61a42 | 309 | static bool intel_pstate_acpi_pm_profile_server(void) |
2b3ec765 SP |
310 | { |
311 | if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || | |
312 | acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) | |
313 | return true; | |
314 | ||
01e61a42 SP |
315 | return false; |
316 | } | |
317 | ||
318 | static bool intel_pstate_get_ppc_enable_status(void) | |
319 | { | |
320 | if (intel_pstate_acpi_pm_profile_server()) | |
321 | return true; | |
322 | ||
2b3ec765 SP |
323 | return acpi_ppc; |
324 | } | |
325 | ||
17669006 RW |
326 | #ifdef CONFIG_ACPI_CPPC_LIB |
327 | ||
328 | /* The work item is needed to avoid CPU hotplug locking issues */ | |
329 | static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) | |
330 | { | |
331 | sched_set_itmt_support(); | |
332 | } | |
333 | ||
334 | static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); | |
335 | ||
336 | static void intel_pstate_set_itmt_prio(int cpu) | |
337 | { | |
338 | struct cppc_perf_caps cppc_perf; | |
339 | static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; | |
340 | int ret; | |
341 | ||
342 | ret = cppc_get_perf_caps(cpu, &cppc_perf); | |
343 | if (ret) | |
344 | return; | |
345 | ||
346 | /* | |
347 | * The priorities can be set regardless of whether or not | |
348 | * sched_set_itmt_support(true) has been called and it is valid to | |
349 | * update them at any time after it has been called. | |
350 | */ | |
351 | sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); | |
352 | ||
353 | if (max_highest_perf <= min_highest_perf) { | |
354 | if (cppc_perf.highest_perf > max_highest_perf) | |
355 | max_highest_perf = cppc_perf.highest_perf; | |
356 | ||
357 | if (cppc_perf.highest_perf < min_highest_perf) | |
358 | min_highest_perf = cppc_perf.highest_perf; | |
359 | ||
360 | if (max_highest_perf > min_highest_perf) { | |
361 | /* | |
362 | * This code can be run during CPU online under the | |
363 | * CPU hotplug locks, so sched_set_itmt_support() | |
364 | * cannot be called from here. Queue up a work item | |
365 | * to invoke it. | |
366 | */ | |
367 | schedule_work(&sched_itmt_work); | |
368 | } | |
369 | } | |
370 | } | |
86d333a8 | 371 | |
8df71a7d | 372 | static int intel_pstate_get_cppc_guaranteed(int cpu) |
86d333a8 SP |
373 | { |
374 | struct cppc_perf_caps cppc_perf; | |
375 | int ret; | |
376 | ||
377 | ret = cppc_get_perf_caps(cpu, &cppc_perf); | |
378 | if (ret) | |
379 | return ret; | |
380 | ||
92a3e426 SP |
381 | if (cppc_perf.guaranteed_perf) |
382 | return cppc_perf.guaranteed_perf; | |
383 | ||
384 | return cppc_perf.nominal_perf; | |
86d333a8 SP |
385 | } |
386 | ||
5906056e | 387 | #else /* CONFIG_ACPI_CPPC_LIB */ |
8df71a7d | 388 | static inline void intel_pstate_set_itmt_prio(int cpu) |
17669006 RW |
389 | { |
390 | } | |
5906056e | 391 | #endif /* CONFIG_ACPI_CPPC_LIB */ |
17669006 | 392 | |
9522a2ff SP |
393 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
394 | { | |
395 | struct cpudata *cpu; | |
9522a2ff SP |
396 | int ret; |
397 | int i; | |
398 | ||
17669006 RW |
399 | if (hwp_active) { |
400 | intel_pstate_set_itmt_prio(policy->cpu); | |
e59a8f7f | 401 | return; |
17669006 | 402 | } |
e59a8f7f | 403 | |
2b3ec765 | 404 | if (!intel_pstate_get_ppc_enable_status()) |
9522a2ff SP |
405 | return; |
406 | ||
407 | cpu = all_cpu_data[policy->cpu]; | |
408 | ||
409 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | |
410 | policy->cpu); | |
411 | if (ret) | |
412 | return; | |
413 | ||
414 | /* | |
415 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | |
416 | * guarantee that the states returned by it map to the states in our | |
417 | * list directly. | |
418 | */ | |
419 | if (cpu->acpi_perf_data.control_register.space_id != | |
420 | ACPI_ADR_SPACE_FIXED_HARDWARE) | |
421 | goto err; | |
422 | ||
423 | /* | |
424 | * If there is only one entry _PSS, simply ignore _PSS and continue as | |
425 | * usual without taking _PSS into account | |
426 | */ | |
427 | if (cpu->acpi_perf_data.state_count < 2) | |
428 | goto err; | |
429 | ||
430 | pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); | |
431 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | |
432 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | |
433 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | |
434 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | |
435 | (u32) cpu->acpi_perf_data.states[i].power, | |
436 | (u32) cpu->acpi_perf_data.states[i].control); | |
437 | } | |
438 | ||
439 | /* | |
440 | * The _PSS table doesn't contain whole turbo frequency range. | |
441 | * This just contains +1 MHZ above the max non turbo frequency, | |
442 | * with control value corresponding to max turbo ratio. But | |
443 | * when cpufreq set policy is called, it will call with this | |
444 | * max frequency, which will cause a reduced performance as | |
445 | * this driver uses real max turbo frequency as the max | |
446 | * frequency. So correct this frequency in _PSS table to | |
b00345d1 | 447 | * correct max turbo frequency based on the turbo state. |
9522a2ff SP |
448 | * Also need to convert to MHz as _PSS freq is in MHz. |
449 | */ | |
7de32556 | 450 | if (!global.turbo_disabled) |
9522a2ff SP |
451 | cpu->acpi_perf_data.states[0].core_frequency = |
452 | policy->cpuinfo.max_freq / 1000; | |
453 | cpu->valid_pss_table = true; | |
6cacd115 | 454 | pr_debug("_PPC limits will be enforced\n"); |
9522a2ff SP |
455 | |
456 | return; | |
457 | ||
458 | err: | |
459 | cpu->valid_pss_table = false; | |
460 | acpi_processor_unregister_performance(policy->cpu); | |
461 | } | |
462 | ||
463 | static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | |
464 | { | |
465 | struct cpudata *cpu; | |
466 | ||
467 | cpu = all_cpu_data[policy->cpu]; | |
468 | if (!cpu->valid_pss_table) | |
469 | return; | |
470 | ||
471 | acpi_processor_unregister_performance(policy->cpu); | |
472 | } | |
8df71a7d RW |
473 | |
474 | static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) | |
475 | { | |
476 | return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; | |
477 | } | |
478 | ||
479 | static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, | |
480 | struct cppc_perf_caps *caps) | |
481 | { | |
482 | if (cppc_get_perf_caps(cpu->cpu, caps)) | |
483 | return false; | |
484 | ||
485 | return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; | |
486 | } | |
5906056e | 487 | #else /* CONFIG_ACPI */ |
7a3ba767 | 488 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
9522a2ff SP |
489 | { |
490 | } | |
491 | ||
7a3ba767 | 492 | static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) |
9522a2ff SP |
493 | { |
494 | } | |
01e61a42 SP |
495 | |
496 | static inline bool intel_pstate_acpi_pm_profile_server(void) | |
497 | { | |
498 | return false; | |
499 | } | |
5906056e DB |
500 | #endif /* CONFIG_ACPI */ |
501 | ||
502 | #ifndef CONFIG_ACPI_CPPC_LIB | |
8df71a7d | 503 | static inline int intel_pstate_get_cppc_guaranteed(int cpu) |
5906056e DB |
504 | { |
505 | return -ENOTSUPP; | |
506 | } | |
507 | #endif /* CONFIG_ACPI_CPPC_LIB */ | |
9522a2ff | 508 | |
eb3693f0 RW |
509 | static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) |
510 | { | |
511 | pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); | |
512 | ||
513 | cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling; | |
514 | } | |
515 | ||
516 | /** | |
517 | * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels. | |
518 | * @cpu: Target CPU. | |
519 | * | |
520 | * On hybrid processors, HWP may expose more performance levels than there are | |
521 | * P-states accessible through the PERF_CTL interface. If that happens, the | |
522 | * scaling factor between HWP performance levels and CPU frequency will be less | |
523 | * than the scaling factor between P-state values and CPU frequency. | |
524 | * | |
525 | * In that case, the scaling factor between HWP performance levels and CPU | |
526 | * frequency needs to be determined which can be done with the help of the | |
527 | * observation that certain HWP performance levels should correspond to certain | |
528 | * P-states, like for example the HWP highest performance should correspond | |
529 | * to the maximum turbo P-state of the CPU. | |
530 | */ | |
531 | static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) | |
532 | { | |
eb3693f0 RW |
533 | int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; |
534 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; | |
535 | int perf_ctl_turbo = pstate_funcs.get_turbo(); | |
536 | int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; | |
537 | int perf_ctl_max = pstate_funcs.get_max(); | |
538 | int max_freq = perf_ctl_max * perf_ctl_scaling; | |
539 | int scaling = INT_MAX; | |
540 | int freq; | |
541 | ||
542 | pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); | |
543 | pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max); | |
544 | pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); | |
545 | pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); | |
546 | ||
547 | pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); | |
548 | pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); | |
549 | ||
8df71a7d RW |
550 | #ifdef CONFIG_ACPI |
551 | if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) { | |
552 | struct cppc_perf_caps caps; | |
553 | ||
554 | if (intel_pstate_cppc_perf_caps(cpu, &caps)) { | |
555 | if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { | |
556 | pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); | |
557 | ||
558 | /* | |
559 | * If the CPPC nominal performance is valid, it | |
560 | * can be assumed to correspond to cpu_khz. | |
561 | */ | |
562 | if (caps.nominal_perf == perf_ctl_max_phys) { | |
563 | intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); | |
564 | return; | |
565 | } | |
566 | scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); | |
567 | } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { | |
568 | pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); | |
569 | ||
570 | /* | |
571 | * If the CPPC guaranteed performance is valid, | |
572 | * it can be assumed to correspond to max_freq. | |
573 | */ | |
574 | if (caps.guaranteed_perf == perf_ctl_max) { | |
575 | intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); | |
576 | return; | |
577 | } | |
578 | scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); | |
eb3693f0 | 579 | } |
eb3693f0 RW |
580 | } |
581 | } | |
8df71a7d | 582 | #endif |
eb3693f0 RW |
583 | /* |
584 | * If using the CPPC data to compute the HWP-to-frequency scaling factor | |
585 | * doesn't work, use the HWP_CAP gauranteed perf for this purpose with | |
586 | * the assumption that it corresponds to max_freq. | |
587 | */ | |
588 | if (scaling > perf_ctl_scaling) { | |
589 | pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu); | |
590 | ||
591 | if (cpu->pstate.max_pstate == perf_ctl_max) { | |
592 | intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); | |
593 | return; | |
594 | } | |
595 | scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate); | |
596 | if (scaling > perf_ctl_scaling) { | |
597 | /* | |
598 | * This should not happen, because it would mean that | |
599 | * the number of HWP perf levels was less than the | |
600 | * number of P-states, so use the PERF_CTL scaling in | |
601 | * that case. | |
602 | */ | |
603 | pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu, | |
604 | scaling); | |
605 | ||
606 | intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); | |
607 | return; | |
608 | } | |
609 | } | |
610 | ||
611 | /* | |
612 | * If the product of the HWP performance scaling factor obtained above | |
613 | * and the HWP_CAP highest performance is greater than the maximum turbo | |
614 | * frequency corresponding to the pstate_funcs.get_turbo() return value, | |
615 | * the scaling factor is too high, so recompute it so that the HWP_CAP | |
616 | * highest performance corresponds to the maximum turbo frequency. | |
617 | */ | |
618 | if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { | |
619 | pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling); | |
620 | ||
621 | cpu->pstate.turbo_freq = turbo_freq; | |
622 | scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); | |
623 | } | |
624 | ||
625 | cpu->pstate.scaling = scaling; | |
626 | ||
627 | pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); | |
628 | ||
629 | cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, | |
630 | perf_ctl_scaling); | |
631 | ||
632 | freq = perf_ctl_max_phys * perf_ctl_scaling; | |
633 | cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling); | |
634 | ||
635 | cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; | |
636 | /* | |
637 | * Cast the min P-state value retrieved via pstate_funcs.get_min() to | |
638 | * the effective range of HWP performance levels. | |
639 | */ | |
640 | cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); | |
641 | } | |
642 | ||
4521e1a0 GM |
643 | static inline void update_turbo_state(void) |
644 | { | |
645 | u64 misc_en; | |
646 | struct cpudata *cpu; | |
647 | ||
648 | cpu = all_cpu_data[0]; | |
649 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); | |
7de32556 | 650 | global.turbo_disabled = |
4521e1a0 GM |
651 | (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || |
652 | cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); | |
653 | } | |
654 | ||
c5a2ee7d RW |
655 | static int min_perf_pct_min(void) |
656 | { | |
657 | struct cpudata *cpu = all_cpu_data[0]; | |
57caf4ec | 658 | int turbo_pstate = cpu->pstate.turbo_pstate; |
c5a2ee7d | 659 | |
57caf4ec | 660 | return turbo_pstate ? |
d4436c0d | 661 | (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; |
c5a2ee7d RW |
662 | } |
663 | ||
8442885f SP |
664 | static s16 intel_pstate_get_epb(struct cpudata *cpu_data) |
665 | { | |
666 | u64 epb; | |
667 | int ret; | |
668 | ||
108ec36b | 669 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
8442885f SP |
670 | return -ENXIO; |
671 | ||
672 | ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); | |
673 | if (ret) | |
674 | return (s16)ret; | |
675 | ||
676 | return (s16)(epb & 0x0f); | |
677 | } | |
678 | ||
679 | static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) | |
680 | { | |
681 | s16 epp; | |
682 | ||
108ec36b | 683 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
984edbdc SP |
684 | /* |
685 | * When hwp_req_data is 0, means that caller didn't read | |
686 | * MSR_HWP_REQUEST, so need to read and get EPP. | |
687 | */ | |
688 | if (!hwp_req_data) { | |
689 | epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, | |
690 | &hwp_req_data); | |
691 | if (epp) | |
692 | return epp; | |
693 | } | |
8442885f | 694 | epp = (hwp_req_data >> 24) & 0xff; |
984edbdc | 695 | } else { |
8442885f SP |
696 | /* When there is no EPP present, HWP uses EPB settings */ |
697 | epp = intel_pstate_get_epb(cpu_data); | |
984edbdc | 698 | } |
8442885f SP |
699 | |
700 | return epp; | |
701 | } | |
702 | ||
984edbdc | 703 | static int intel_pstate_set_epb(int cpu, s16 pref) |
8442885f SP |
704 | { |
705 | u64 epb; | |
984edbdc | 706 | int ret; |
8442885f | 707 | |
108ec36b | 708 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
984edbdc | 709 | return -ENXIO; |
8442885f | 710 | |
984edbdc SP |
711 | ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); |
712 | if (ret) | |
713 | return ret; | |
8442885f SP |
714 | |
715 | epb = (epb & ~0x0f) | pref; | |
716 | wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); | |
984edbdc SP |
717 | |
718 | return 0; | |
8442885f SP |
719 | } |
720 | ||
984edbdc SP |
721 | /* |
722 | * EPP/EPB display strings corresponding to EPP index in the | |
723 | * energy_perf_strings[] | |
724 | * index String | |
725 | *------------------------------------- | |
726 | * 0 default | |
727 | * 1 performance | |
728 | * 2 balance_performance | |
729 | * 3 balance_power | |
730 | * 4 power | |
731 | */ | |
732 | static const char * const energy_perf_strings[] = { | |
733 | "default", | |
734 | "performance", | |
735 | "balance_performance", | |
736 | "balance_power", | |
737 | "power", | |
738 | NULL | |
739 | }; | |
3cedbc5a LB |
740 | static const unsigned int epp_values[] = { |
741 | HWP_EPP_PERFORMANCE, | |
742 | HWP_EPP_BALANCE_PERFORMANCE, | |
743 | HWP_EPP_BALANCE_POWERSAVE, | |
744 | HWP_EPP_POWERSAVE | |
745 | }; | |
984edbdc | 746 | |
f473bf39 | 747 | static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) |
984edbdc SP |
748 | { |
749 | s16 epp; | |
750 | int index = -EINVAL; | |
751 | ||
f473bf39 | 752 | *raw_epp = 0; |
984edbdc SP |
753 | epp = intel_pstate_get_epp(cpu_data, 0); |
754 | if (epp < 0) | |
755 | return epp; | |
756 | ||
108ec36b | 757 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
3cedbc5a LB |
758 | if (epp == HWP_EPP_PERFORMANCE) |
759 | return 1; | |
f473bf39 | 760 | if (epp == HWP_EPP_BALANCE_PERFORMANCE) |
3cedbc5a | 761 | return 2; |
f473bf39 | 762 | if (epp == HWP_EPP_BALANCE_POWERSAVE) |
3cedbc5a | 763 | return 3; |
f473bf39 | 764 | if (epp == HWP_EPP_POWERSAVE) |
3cedbc5a | 765 | return 4; |
f473bf39 SP |
766 | *raw_epp = epp; |
767 | return 0; | |
108ec36b | 768 | } else if (boot_cpu_has(X86_FEATURE_EPB)) { |
984edbdc SP |
769 | /* |
770 | * Range: | |
771 | * 0x00-0x03 : Performance | |
772 | * 0x04-0x07 : Balance performance | |
773 | * 0x08-0x0B : Balance power | |
774 | * 0x0C-0x0F : Power | |
775 | * The EPB is a 4 bit value, but our ranges restrict the | |
776 | * value which can be set. Here only using top two bits | |
777 | * effectively. | |
778 | */ | |
779 | index = (epp >> 2) + 1; | |
780 | } | |
781 | ||
782 | return index; | |
783 | } | |
784 | ||
f6ebbcf0 RW |
785 | static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) |
786 | { | |
c27a0ccc RW |
787 | int ret; |
788 | ||
f6ebbcf0 RW |
789 | /* |
790 | * Use the cached HWP Request MSR value, because in the active mode the | |
791 | * register itself may be updated by intel_pstate_hwp_boost_up() or | |
792 | * intel_pstate_hwp_boost_down() at any time. | |
793 | */ | |
794 | u64 value = READ_ONCE(cpu->hwp_req_cached); | |
795 | ||
796 | value &= ~GENMASK_ULL(31, 24); | |
797 | value |= (u64)epp << 24; | |
798 | /* | |
799 | * The only other updater of hwp_req_cached in the active mode, | |
800 | * intel_pstate_hwp_set(), is called under the same lock as this | |
801 | * function, so it cannot run in parallel with the update below. | |
802 | */ | |
803 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
c27a0ccc RW |
804 | ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
805 | if (!ret) | |
806 | cpu->epp_cached = epp; | |
807 | ||
808 | return ret; | |
f6ebbcf0 RW |
809 | } |
810 | ||
984edbdc | 811 | static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, |
f473bf39 SP |
812 | int pref_index, bool use_raw, |
813 | u32 raw_epp) | |
984edbdc SP |
814 | { |
815 | int epp = -EINVAL; | |
816 | int ret; | |
817 | ||
818 | if (!pref_index) | |
819 | epp = cpu_data->epp_default; | |
820 | ||
108ec36b | 821 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
3a957176 RW |
822 | if (use_raw) |
823 | epp = raw_epp; | |
824 | else if (epp == -EINVAL) | |
3cedbc5a | 825 | epp = epp_values[pref_index - 1]; |
984edbdc | 826 | |
b388eb58 RW |
827 | /* |
828 | * To avoid confusion, refuse to set EPP to any values different | |
829 | * from 0 (performance) if the current policy is "performance", | |
830 | * because those values would be overridden. | |
831 | */ | |
832 | if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) | |
833 | return -EBUSY; | |
834 | ||
f6ebbcf0 | 835 | ret = intel_pstate_set_epp(cpu_data, epp); |
984edbdc SP |
836 | } else { |
837 | if (epp == -EINVAL) | |
838 | epp = (pref_index - 1) << 2; | |
839 | ret = intel_pstate_set_epb(cpu_data->cpu, epp); | |
840 | } | |
984edbdc SP |
841 | |
842 | return ret; | |
843 | } | |
844 | ||
845 | static ssize_t show_energy_performance_available_preferences( | |
846 | struct cpufreq_policy *policy, char *buf) | |
847 | { | |
848 | int i = 0; | |
849 | int ret = 0; | |
850 | ||
851 | while (energy_perf_strings[i] != NULL) | |
852 | ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); | |
853 | ||
854 | ret += sprintf(&buf[ret], "\n"); | |
855 | ||
856 | return ret; | |
857 | } | |
858 | ||
859 | cpufreq_freq_attr_ro(energy_performance_available_preferences); | |
860 | ||
f6ebbcf0 RW |
861 | static struct cpufreq_driver intel_pstate; |
862 | ||
984edbdc SP |
863 | static ssize_t store_energy_performance_preference( |
864 | struct cpufreq_policy *policy, const char *buf, size_t count) | |
865 | { | |
f6ebbcf0 | 866 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
984edbdc | 867 | char str_preference[21]; |
f473bf39 | 868 | bool raw = false; |
3a957176 | 869 | ssize_t ret; |
3ff79754 | 870 | u32 epp = 0; |
984edbdc SP |
871 | |
872 | ret = sscanf(buf, "%20s", str_preference); | |
873 | if (ret != 1) | |
874 | return -EINVAL; | |
875 | ||
1111b783 | 876 | ret = match_string(energy_perf_strings, -1, str_preference); |
f473bf39 SP |
877 | if (ret < 0) { |
878 | if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) | |
879 | return ret; | |
880 | ||
881 | ret = kstrtouint(buf, 10, &epp); | |
882 | if (ret) | |
883 | return ret; | |
884 | ||
3a957176 RW |
885 | if (epp > 255) |
886 | return -EINVAL; | |
887 | ||
f473bf39 SP |
888 | raw = true; |
889 | } | |
890 | ||
f6ebbcf0 RW |
891 | /* |
892 | * This function runs with the policy R/W semaphore held, which | |
893 | * guarantees that the driver pointer will not change while it is | |
894 | * running. | |
895 | */ | |
896 | if (!intel_pstate_driver) | |
897 | return -EAGAIN; | |
898 | ||
3a957176 RW |
899 | mutex_lock(&intel_pstate_limits_lock); |
900 | ||
f6ebbcf0 RW |
901 | if (intel_pstate_driver == &intel_pstate) { |
902 | ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); | |
903 | } else { | |
904 | /* | |
905 | * In the passive mode the governor needs to be stopped on the | |
906 | * target CPU before the EPP update and restarted after it, | |
907 | * which is super-heavy-weight, so make sure it is worth doing | |
908 | * upfront. | |
909 | */ | |
910 | if (!raw) | |
911 | epp = ret ? epp_values[ret - 1] : cpu->epp_default; | |
912 | ||
913 | if (cpu->epp_cached != epp) { | |
914 | int err; | |
915 | ||
916 | cpufreq_stop_governor(policy); | |
917 | ret = intel_pstate_set_epp(cpu, epp); | |
918 | err = cpufreq_start_governor(policy); | |
c27a0ccc | 919 | if (!ret) |
f6ebbcf0 | 920 | ret = err; |
f6ebbcf0 RW |
921 | } |
922 | } | |
3a957176 RW |
923 | |
924 | mutex_unlock(&intel_pstate_limits_lock); | |
984edbdc | 925 | |
f6ebbcf0 | 926 | return ret ?: count; |
984edbdc SP |
927 | } |
928 | ||
929 | static ssize_t show_energy_performance_preference( | |
930 | struct cpufreq_policy *policy, char *buf) | |
931 | { | |
932 | struct cpudata *cpu_data = all_cpu_data[policy->cpu]; | |
f473bf39 | 933 | int preference, raw_epp; |
984edbdc | 934 | |
f473bf39 | 935 | preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); |
984edbdc SP |
936 | if (preference < 0) |
937 | return preference; | |
938 | ||
f473bf39 SP |
939 | if (raw_epp) |
940 | return sprintf(buf, "%d\n", raw_epp); | |
941 | else | |
942 | return sprintf(buf, "%s\n", energy_perf_strings[preference]); | |
984edbdc SP |
943 | } |
944 | ||
945 | cpufreq_freq_attr_rw(energy_performance_preference); | |
946 | ||
86d333a8 SP |
947 | static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) |
948 | { | |
eb3693f0 RW |
949 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
950 | int ratio, freq; | |
86d333a8 | 951 | |
8df71a7d | 952 | ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); |
86d333a8 | 953 | if (ratio <= 0) { |
eb3693f0 RW |
954 | u64 cap; |
955 | ||
86d333a8 SP |
956 | rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); |
957 | ratio = HWP_GUARANTEED_PERF(cap); | |
958 | } | |
959 | ||
eb3693f0 RW |
960 | freq = ratio * cpu->pstate.scaling; |
961 | if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) | |
962 | freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); | |
86d333a8 | 963 | |
eb3693f0 | 964 | return sprintf(buf, "%d\n", freq); |
86d333a8 SP |
965 | } |
966 | ||
967 | cpufreq_freq_attr_ro(base_frequency); | |
968 | ||
984edbdc SP |
969 | static struct freq_attr *hwp_cpufreq_attrs[] = { |
970 | &energy_performance_preference, | |
971 | &energy_performance_available_preferences, | |
86d333a8 | 972 | &base_frequency, |
984edbdc SP |
973 | NULL, |
974 | }; | |
975 | ||
de5bcf40 | 976 | static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) |
2f86dc4c | 977 | { |
1a4fe38a | 978 | u64 cap; |
74da56ce | 979 | |
a45ee4d4 RW |
980 | rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); |
981 | WRITE_ONCE(cpu->hwp_cap_cached, cap); | |
de5bcf40 RW |
982 | cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); |
983 | cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); | |
984 | } | |
1a4fe38a | 985 | |
de5bcf40 RW |
986 | static void intel_pstate_get_hwp_cap(struct cpudata *cpu) |
987 | { | |
eb3693f0 RW |
988 | int scaling = cpu->pstate.scaling; |
989 | ||
de5bcf40 | 990 | __intel_pstate_get_hwp_cap(cpu); |
eb3693f0 RW |
991 | |
992 | cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; | |
993 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; | |
994 | if (scaling != cpu->pstate.perf_ctl_scaling) { | |
995 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; | |
996 | ||
997 | cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, | |
998 | perf_ctl_scaling); | |
999 | cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, | |
1000 | perf_ctl_scaling); | |
1001 | } | |
1a4fe38a SP |
1002 | } |
1003 | ||
1004 | static void intel_pstate_hwp_set(unsigned int cpu) | |
1005 | { | |
1006 | struct cpudata *cpu_data = all_cpu_data[cpu]; | |
1007 | int max, min; | |
1008 | u64 value; | |
1009 | s16 epp; | |
1010 | ||
1011 | max = cpu_data->max_perf_ratio; | |
1012 | min = cpu_data->min_perf_ratio; | |
eae48f04 | 1013 | |
2bfc4cbb RW |
1014 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
1015 | min = max; | |
3f8ed54a | 1016 | |
2bfc4cbb | 1017 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); |
2f86dc4c | 1018 | |
2bfc4cbb RW |
1019 | value &= ~HWP_MIN_PERF(~0L); |
1020 | value |= HWP_MIN_PERF(min); | |
8442885f | 1021 | |
2bfc4cbb RW |
1022 | value &= ~HWP_MAX_PERF(~0L); |
1023 | value |= HWP_MAX_PERF(max); | |
8442885f | 1024 | |
2bfc4cbb RW |
1025 | if (cpu_data->epp_policy == cpu_data->policy) |
1026 | goto skip_epp; | |
8442885f | 1027 | |
2bfc4cbb | 1028 | cpu_data->epp_policy = cpu_data->policy; |
984edbdc | 1029 | |
2bfc4cbb RW |
1030 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { |
1031 | epp = intel_pstate_get_epp(cpu_data, value); | |
1032 | cpu_data->epp_powersave = epp; | |
1033 | /* If EPP read was failed, then don't try to write */ | |
1034 | if (epp < 0) | |
1035 | goto skip_epp; | |
8442885f | 1036 | |
2bfc4cbb RW |
1037 | epp = 0; |
1038 | } else { | |
1039 | /* skip setting EPP, when saved value is invalid */ | |
1040 | if (cpu_data->epp_powersave < 0) | |
1041 | goto skip_epp; | |
8442885f | 1042 | |
2bfc4cbb RW |
1043 | /* |
1044 | * No need to restore EPP when it is not zero. This | |
1045 | * means: | |
1046 | * - Policy is not changed | |
1047 | * - user has manually changed | |
1048 | * - Error reading EPB | |
1049 | */ | |
1050 | epp = intel_pstate_get_epp(cpu_data, value); | |
1051 | if (epp) | |
1052 | goto skip_epp; | |
8442885f | 1053 | |
2bfc4cbb RW |
1054 | epp = cpu_data->epp_powersave; |
1055 | } | |
108ec36b | 1056 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
2bfc4cbb RW |
1057 | value &= ~GENMASK_ULL(31, 24); |
1058 | value |= (u64)epp << 24; | |
1059 | } else { | |
1060 | intel_pstate_set_epb(cpu, epp); | |
2f86dc4c | 1061 | } |
2bfc4cbb | 1062 | skip_epp: |
e0efd5be | 1063 | WRITE_ONCE(cpu_data->hwp_req_cached, value); |
2bfc4cbb | 1064 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); |
41cfd64c | 1065 | } |
2f86dc4c | 1066 | |
4adcf2e5 | 1067 | static void intel_pstate_hwp_offline(struct cpudata *cpu) |
af3b7379 | 1068 | { |
4adcf2e5 | 1069 | u64 value = READ_ONCE(cpu->hwp_req_cached); |
af3b7379 SP |
1070 | int min_perf; |
1071 | ||
4adcf2e5 RW |
1072 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
1073 | /* | |
1074 | * In case the EPP has been set to "performance" by the | |
1075 | * active mode "performance" scaling algorithm, replace that | |
1076 | * temporary value with the cached EPP one. | |
1077 | */ | |
1078 | value &= ~GENMASK_ULL(31, 24); | |
1079 | value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); | |
1080 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
1081 | } | |
1082 | ||
af3b7379 | 1083 | value &= ~GENMASK_ULL(31, 0); |
9dd04ec6 | 1084 | min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); |
af3b7379 SP |
1085 | |
1086 | /* Set hwp_max = hwp_min */ | |
1087 | value |= HWP_MAX_PERF(min_perf); | |
1088 | value |= HWP_MIN_PERF(min_perf); | |
1089 | ||
c31432fa | 1090 | /* Set EPP to min */ |
108ec36b | 1091 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) |
af3b7379 | 1092 | value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); |
af3b7379 | 1093 | |
4adcf2e5 | 1094 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
984edbdc SP |
1095 | } |
1096 | ||
ed7bde7a SP |
1097 | #define POWER_CTL_EE_ENABLE 1 |
1098 | #define POWER_CTL_EE_DISABLE 2 | |
1099 | ||
1100 | static int power_ctl_ee_state; | |
1101 | ||
1102 | static void set_power_ctl_ee_state(bool input) | |
1103 | { | |
1104 | u64 power_ctl; | |
1105 | ||
1106 | mutex_lock(&intel_pstate_driver_lock); | |
1107 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
1108 | if (input) { | |
1109 | power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); | |
1110 | power_ctl_ee_state = POWER_CTL_EE_ENABLE; | |
1111 | } else { | |
1112 | power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); | |
1113 | power_ctl_ee_state = POWER_CTL_EE_DISABLE; | |
1114 | } | |
1115 | wrmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
1116 | mutex_unlock(&intel_pstate_driver_lock); | |
1117 | } | |
1118 | ||
70f6bf2a CY |
1119 | static void intel_pstate_hwp_enable(struct cpudata *cpudata); |
1120 | ||
4adcf2e5 RW |
1121 | static void intel_pstate_hwp_reenable(struct cpudata *cpu) |
1122 | { | |
1123 | intel_pstate_hwp_enable(cpu); | |
1124 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); | |
1125 | } | |
1126 | ||
1127 | static int intel_pstate_suspend(struct cpufreq_policy *policy) | |
1128 | { | |
1129 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
1130 | ||
1131 | pr_debug("CPU %d suspending\n", cpu->cpu); | |
1132 | ||
1133 | cpu->suspended = true; | |
1134 | ||
1135 | return 0; | |
1136 | } | |
1137 | ||
8442885f SP |
1138 | static int intel_pstate_resume(struct cpufreq_policy *policy) |
1139 | { | |
4adcf2e5 RW |
1140 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
1141 | ||
1142 | pr_debug("CPU %d resuming\n", cpu->cpu); | |
ed7bde7a SP |
1143 | |
1144 | /* Only restore if the system default is changed */ | |
1145 | if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) | |
1146 | set_power_ctl_ee_state(true); | |
1147 | else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) | |
1148 | set_power_ctl_ee_state(false); | |
1149 | ||
4adcf2e5 RW |
1150 | if (cpu->suspended && hwp_active) { |
1151 | mutex_lock(&intel_pstate_limits_lock); | |
8442885f | 1152 | |
4adcf2e5 RW |
1153 | /* Re-enable HWP, because "online" has not done that. */ |
1154 | intel_pstate_hwp_reenable(cpu); | |
70f6bf2a | 1155 | |
4adcf2e5 RW |
1156 | mutex_unlock(&intel_pstate_limits_lock); |
1157 | } | |
aa439248 | 1158 | |
4adcf2e5 | 1159 | cpu->suspended = false; |
aa439248 | 1160 | |
5f98ced1 | 1161 | return 0; |
8442885f SP |
1162 | } |
1163 | ||
111b8b3f | 1164 | static void intel_pstate_update_policies(void) |
41cfd64c | 1165 | { |
111b8b3f RW |
1166 | int cpu; |
1167 | ||
1168 | for_each_possible_cpu(cpu) | |
1169 | cpufreq_update_policy(cpu); | |
2f86dc4c DB |
1170 | } |
1171 | ||
9083e498 RW |
1172 | static void intel_pstate_update_max_freq(unsigned int cpu) |
1173 | { | |
1174 | struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); | |
9083e498 RW |
1175 | struct cpudata *cpudata; |
1176 | ||
1177 | if (!policy) | |
1178 | return; | |
1179 | ||
1180 | cpudata = all_cpu_data[cpu]; | |
1181 | policy->cpuinfo.max_freq = global.turbo_disabled_mf ? | |
1182 | cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; | |
1183 | ||
c57b25bd | 1184 | refresh_frequency_limits(policy); |
9083e498 RW |
1185 | |
1186 | cpufreq_cpu_release(policy); | |
1187 | } | |
1188 | ||
5a25e3f7 RW |
1189 | static void intel_pstate_update_limits(unsigned int cpu) |
1190 | { | |
1191 | mutex_lock(&intel_pstate_driver_lock); | |
1192 | ||
1193 | update_turbo_state(); | |
1194 | /* | |
1195 | * If turbo has been turned on or off globally, policy limits for | |
1196 | * all CPUs need to be updated to reflect that. | |
1197 | */ | |
9083e498 RW |
1198 | if (global.turbo_disabled_mf != global.turbo_disabled) { |
1199 | global.turbo_disabled_mf = global.turbo_disabled; | |
918229cd | 1200 | arch_set_max_freq_ratio(global.turbo_disabled); |
9083e498 RW |
1201 | for_each_possible_cpu(cpu) |
1202 | intel_pstate_update_max_freq(cpu); | |
5a25e3f7 RW |
1203 | } else { |
1204 | cpufreq_update_policy(cpu); | |
1205 | } | |
1206 | ||
1207 | mutex_unlock(&intel_pstate_driver_lock); | |
1208 | } | |
1209 | ||
93f0822d DB |
1210 | /************************** sysfs begin ************************/ |
1211 | #define show_one(file_name, object) \ | |
1212 | static ssize_t show_##file_name \ | |
625c85a6 | 1213 | (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ |
93f0822d | 1214 | { \ |
7de32556 | 1215 | return sprintf(buf, "%u\n", global.object); \ |
93f0822d DB |
1216 | } |
1217 | ||
fb1fe104 RW |
1218 | static ssize_t intel_pstate_show_status(char *buf); |
1219 | static int intel_pstate_update_status(const char *buf, size_t size); | |
1220 | ||
1221 | static ssize_t show_status(struct kobject *kobj, | |
625c85a6 | 1222 | struct kobj_attribute *attr, char *buf) |
fb1fe104 RW |
1223 | { |
1224 | ssize_t ret; | |
1225 | ||
1226 | mutex_lock(&intel_pstate_driver_lock); | |
1227 | ret = intel_pstate_show_status(buf); | |
1228 | mutex_unlock(&intel_pstate_driver_lock); | |
1229 | ||
1230 | return ret; | |
1231 | } | |
1232 | ||
625c85a6 | 1233 | static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, |
fb1fe104 RW |
1234 | const char *buf, size_t count) |
1235 | { | |
1236 | char *p = memchr(buf, '\n', count); | |
1237 | int ret; | |
1238 | ||
1239 | mutex_lock(&intel_pstate_driver_lock); | |
1240 | ret = intel_pstate_update_status(buf, p ? p - buf : count); | |
1241 | mutex_unlock(&intel_pstate_driver_lock); | |
1242 | ||
1243 | return ret < 0 ? ret : count; | |
1244 | } | |
1245 | ||
d01b1f48 | 1246 | static ssize_t show_turbo_pct(struct kobject *kobj, |
625c85a6 | 1247 | struct kobj_attribute *attr, char *buf) |
d01b1f48 KCA |
1248 | { |
1249 | struct cpudata *cpu; | |
1250 | int total, no_turbo, turbo_pct; | |
1251 | uint32_t turbo_fp; | |
1252 | ||
0c30b65b RW |
1253 | mutex_lock(&intel_pstate_driver_lock); |
1254 | ||
ee8df89a | 1255 | if (!intel_pstate_driver) { |
0c30b65b RW |
1256 | mutex_unlock(&intel_pstate_driver_lock); |
1257 | return -EAGAIN; | |
1258 | } | |
1259 | ||
d01b1f48 KCA |
1260 | cpu = all_cpu_data[0]; |
1261 | ||
1262 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | |
1263 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; | |
22590efb | 1264 | turbo_fp = div_fp(no_turbo, total); |
d01b1f48 | 1265 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); |
0c30b65b RW |
1266 | |
1267 | mutex_unlock(&intel_pstate_driver_lock); | |
1268 | ||
d01b1f48 KCA |
1269 | return sprintf(buf, "%u\n", turbo_pct); |
1270 | } | |
1271 | ||
0522424e | 1272 | static ssize_t show_num_pstates(struct kobject *kobj, |
625c85a6 | 1273 | struct kobj_attribute *attr, char *buf) |
0522424e KCA |
1274 | { |
1275 | struct cpudata *cpu; | |
1276 | int total; | |
1277 | ||
0c30b65b RW |
1278 | mutex_lock(&intel_pstate_driver_lock); |
1279 | ||
ee8df89a | 1280 | if (!intel_pstate_driver) { |
0c30b65b RW |
1281 | mutex_unlock(&intel_pstate_driver_lock); |
1282 | return -EAGAIN; | |
1283 | } | |
1284 | ||
0522424e KCA |
1285 | cpu = all_cpu_data[0]; |
1286 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | |
0c30b65b RW |
1287 | |
1288 | mutex_unlock(&intel_pstate_driver_lock); | |
1289 | ||
0522424e KCA |
1290 | return sprintf(buf, "%u\n", total); |
1291 | } | |
1292 | ||
4521e1a0 | 1293 | static ssize_t show_no_turbo(struct kobject *kobj, |
625c85a6 | 1294 | struct kobj_attribute *attr, char *buf) |
4521e1a0 GM |
1295 | { |
1296 | ssize_t ret; | |
1297 | ||
0c30b65b RW |
1298 | mutex_lock(&intel_pstate_driver_lock); |
1299 | ||
ee8df89a | 1300 | if (!intel_pstate_driver) { |
0c30b65b RW |
1301 | mutex_unlock(&intel_pstate_driver_lock); |
1302 | return -EAGAIN; | |
1303 | } | |
1304 | ||
4521e1a0 | 1305 | update_turbo_state(); |
7de32556 RW |
1306 | if (global.turbo_disabled) |
1307 | ret = sprintf(buf, "%u\n", global.turbo_disabled); | |
4521e1a0 | 1308 | else |
7de32556 | 1309 | ret = sprintf(buf, "%u\n", global.no_turbo); |
4521e1a0 | 1310 | |
0c30b65b RW |
1311 | mutex_unlock(&intel_pstate_driver_lock); |
1312 | ||
4521e1a0 GM |
1313 | return ret; |
1314 | } | |
1315 | ||
625c85a6 | 1316 | static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1317 | const char *buf, size_t count) |
93f0822d DB |
1318 | { |
1319 | unsigned int input; | |
1320 | int ret; | |
845c1cbe | 1321 | |
93f0822d DB |
1322 | ret = sscanf(buf, "%u", &input); |
1323 | if (ret != 1) | |
1324 | return -EINVAL; | |
4521e1a0 | 1325 | |
0c30b65b RW |
1326 | mutex_lock(&intel_pstate_driver_lock); |
1327 | ||
ee8df89a | 1328 | if (!intel_pstate_driver) { |
0c30b65b RW |
1329 | mutex_unlock(&intel_pstate_driver_lock); |
1330 | return -EAGAIN; | |
1331 | } | |
1332 | ||
a410c03d SP |
1333 | mutex_lock(&intel_pstate_limits_lock); |
1334 | ||
4521e1a0 | 1335 | update_turbo_state(); |
7de32556 | 1336 | if (global.turbo_disabled) { |
8c539776 | 1337 | pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); |
a410c03d | 1338 | mutex_unlock(&intel_pstate_limits_lock); |
0c30b65b | 1339 | mutex_unlock(&intel_pstate_driver_lock); |
4521e1a0 | 1340 | return -EPERM; |
dd5fbf70 | 1341 | } |
2f86dc4c | 1342 | |
7de32556 | 1343 | global.no_turbo = clamp_t(int, input, 0, 1); |
111b8b3f | 1344 | |
c5a2ee7d RW |
1345 | if (global.no_turbo) { |
1346 | struct cpudata *cpu = all_cpu_data[0]; | |
1347 | int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; | |
1348 | ||
1349 | /* Squash the global minimum into the permitted range. */ | |
1350 | if (global.min_perf_pct > pct) | |
1351 | global.min_perf_pct = pct; | |
1352 | } | |
1353 | ||
cd59b4be RW |
1354 | mutex_unlock(&intel_pstate_limits_lock); |
1355 | ||
7de32556 RW |
1356 | intel_pstate_update_policies(); |
1357 | ||
0c30b65b RW |
1358 | mutex_unlock(&intel_pstate_driver_lock); |
1359 | ||
93f0822d DB |
1360 | return count; |
1361 | } | |
1362 | ||
3000ce3c | 1363 | static void update_qos_request(enum freq_qos_req_type type) |
da5c504c | 1364 | { |
3000ce3c | 1365 | struct freq_qos_request *req; |
da5c504c | 1366 | struct cpufreq_policy *policy; |
de5bcf40 | 1367 | int i; |
da5c504c VK |
1368 | |
1369 | for_each_possible_cpu(i) { | |
1370 | struct cpudata *cpu = all_cpu_data[i]; | |
de5bcf40 | 1371 | unsigned int freq, perf_pct; |
da5c504c VK |
1372 | |
1373 | policy = cpufreq_cpu_get(i); | |
1374 | if (!policy) | |
1375 | continue; | |
1376 | ||
1377 | req = policy->driver_data; | |
1378 | cpufreq_cpu_put(policy); | |
1379 | ||
1380 | if (!req) | |
1381 | continue; | |
1382 | ||
1383 | if (hwp_active) | |
de5bcf40 | 1384 | intel_pstate_get_hwp_cap(cpu); |
da5c504c | 1385 | |
3000ce3c | 1386 | if (type == FREQ_QOS_MIN) { |
da5c504c VK |
1387 | perf_pct = global.min_perf_pct; |
1388 | } else { | |
1389 | req++; | |
1390 | perf_pct = global.max_perf_pct; | |
1391 | } | |
1392 | ||
de5bcf40 | 1393 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); |
da5c504c | 1394 | |
3000ce3c | 1395 | if (freq_qos_update_request(req, freq) < 0) |
da5c504c VK |
1396 | pr_warn("Failed to update freq constraint: CPU%d\n", i); |
1397 | } | |
1398 | } | |
1399 | ||
625c85a6 | 1400 | static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1401 | const char *buf, size_t count) |
93f0822d DB |
1402 | { |
1403 | unsigned int input; | |
1404 | int ret; | |
845c1cbe | 1405 | |
93f0822d DB |
1406 | ret = sscanf(buf, "%u", &input); |
1407 | if (ret != 1) | |
1408 | return -EINVAL; | |
1409 | ||
0c30b65b RW |
1410 | mutex_lock(&intel_pstate_driver_lock); |
1411 | ||
ee8df89a | 1412 | if (!intel_pstate_driver) { |
0c30b65b RW |
1413 | mutex_unlock(&intel_pstate_driver_lock); |
1414 | return -EAGAIN; | |
1415 | } | |
1416 | ||
a410c03d SP |
1417 | mutex_lock(&intel_pstate_limits_lock); |
1418 | ||
c5a2ee7d | 1419 | global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); |
111b8b3f | 1420 | |
cd59b4be RW |
1421 | mutex_unlock(&intel_pstate_limits_lock); |
1422 | ||
da5c504c VK |
1423 | if (intel_pstate_driver == &intel_pstate) |
1424 | intel_pstate_update_policies(); | |
1425 | else | |
3000ce3c | 1426 | update_qos_request(FREQ_QOS_MAX); |
7de32556 | 1427 | |
0c30b65b RW |
1428 | mutex_unlock(&intel_pstate_driver_lock); |
1429 | ||
93f0822d DB |
1430 | return count; |
1431 | } | |
1432 | ||
625c85a6 | 1433 | static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1434 | const char *buf, size_t count) |
93f0822d DB |
1435 | { |
1436 | unsigned int input; | |
1437 | int ret; | |
845c1cbe | 1438 | |
93f0822d DB |
1439 | ret = sscanf(buf, "%u", &input); |
1440 | if (ret != 1) | |
1441 | return -EINVAL; | |
a0475992 | 1442 | |
0c30b65b RW |
1443 | mutex_lock(&intel_pstate_driver_lock); |
1444 | ||
ee8df89a | 1445 | if (!intel_pstate_driver) { |
0c30b65b RW |
1446 | mutex_unlock(&intel_pstate_driver_lock); |
1447 | return -EAGAIN; | |
1448 | } | |
1449 | ||
a410c03d SP |
1450 | mutex_lock(&intel_pstate_limits_lock); |
1451 | ||
c5a2ee7d RW |
1452 | global.min_perf_pct = clamp_t(int, input, |
1453 | min_perf_pct_min(), global.max_perf_pct); | |
111b8b3f | 1454 | |
cd59b4be RW |
1455 | mutex_unlock(&intel_pstate_limits_lock); |
1456 | ||
da5c504c VK |
1457 | if (intel_pstate_driver == &intel_pstate) |
1458 | intel_pstate_update_policies(); | |
1459 | else | |
3000ce3c | 1460 | update_qos_request(FREQ_QOS_MIN); |
7de32556 | 1461 | |
0c30b65b RW |
1462 | mutex_unlock(&intel_pstate_driver_lock); |
1463 | ||
93f0822d DB |
1464 | return count; |
1465 | } | |
1466 | ||
aaaece3d | 1467 | static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, |
625c85a6 | 1468 | struct kobj_attribute *attr, char *buf) |
aaaece3d SP |
1469 | { |
1470 | return sprintf(buf, "%u\n", hwp_boost); | |
1471 | } | |
1472 | ||
625c85a6 VK |
1473 | static ssize_t store_hwp_dynamic_boost(struct kobject *a, |
1474 | struct kobj_attribute *b, | |
aaaece3d SP |
1475 | const char *buf, size_t count) |
1476 | { | |
1477 | unsigned int input; | |
1478 | int ret; | |
1479 | ||
1480 | ret = kstrtouint(buf, 10, &input); | |
1481 | if (ret) | |
1482 | return ret; | |
1483 | ||
1484 | mutex_lock(&intel_pstate_driver_lock); | |
1485 | hwp_boost = !!input; | |
1486 | intel_pstate_update_policies(); | |
1487 | mutex_unlock(&intel_pstate_driver_lock); | |
1488 | ||
1489 | return count; | |
1490 | } | |
1491 | ||
ed7bde7a SP |
1492 | static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, |
1493 | char *buf) | |
1494 | { | |
1495 | u64 power_ctl; | |
1496 | int enable; | |
1497 | ||
1498 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
1499 | enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); | |
1500 | return sprintf(buf, "%d\n", !enable); | |
1501 | } | |
1502 | ||
1503 | static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, | |
1504 | const char *buf, size_t count) | |
1505 | { | |
1506 | bool input; | |
1507 | int ret; | |
1508 | ||
1509 | ret = kstrtobool(buf, &input); | |
1510 | if (ret) | |
1511 | return ret; | |
1512 | ||
1513 | set_power_ctl_ee_state(input); | |
1514 | ||
1515 | return count; | |
1516 | } | |
1517 | ||
93f0822d DB |
1518 | show_one(max_perf_pct, max_perf_pct); |
1519 | show_one(min_perf_pct, min_perf_pct); | |
1520 | ||
fb1fe104 | 1521 | define_one_global_rw(status); |
93f0822d DB |
1522 | define_one_global_rw(no_turbo); |
1523 | define_one_global_rw(max_perf_pct); | |
1524 | define_one_global_rw(min_perf_pct); | |
d01b1f48 | 1525 | define_one_global_ro(turbo_pct); |
0522424e | 1526 | define_one_global_ro(num_pstates); |
aaaece3d | 1527 | define_one_global_rw(hwp_dynamic_boost); |
ed7bde7a | 1528 | define_one_global_rw(energy_efficiency); |
93f0822d DB |
1529 | |
1530 | static struct attribute *intel_pstate_attributes[] = { | |
fb1fe104 | 1531 | &status.attr, |
93f0822d | 1532 | &no_turbo.attr, |
93f0822d DB |
1533 | NULL |
1534 | }; | |
1535 | ||
106c9c77 | 1536 | static const struct attribute_group intel_pstate_attr_group = { |
93f0822d DB |
1537 | .attrs = intel_pstate_attributes, |
1538 | }; | |
93f0822d | 1539 | |
ed7bde7a SP |
1540 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; |
1541 | ||
f6ebbcf0 RW |
1542 | static struct kobject *intel_pstate_kobject; |
1543 | ||
317dd50e | 1544 | static void __init intel_pstate_sysfs_expose_params(void) |
93f0822d DB |
1545 | { |
1546 | int rc; | |
1547 | ||
1548 | intel_pstate_kobject = kobject_create_and_add("intel_pstate", | |
1549 | &cpu_subsys.dev_root->kobj); | |
eae48f04 SP |
1550 | if (WARN_ON(!intel_pstate_kobject)) |
1551 | return; | |
1552 | ||
2d8d1f18 | 1553 | rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); |
eae48f04 SP |
1554 | if (WARN_ON(rc)) |
1555 | return; | |
1556 | ||
c3d175e4 RW |
1557 | if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
1558 | rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); | |
1559 | WARN_ON(rc); | |
1560 | ||
1561 | rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); | |
1562 | WARN_ON(rc); | |
1563 | } | |
1564 | ||
eae48f04 SP |
1565 | /* |
1566 | * If per cpu limits are enforced there are no global limits, so | |
1567 | * return without creating max/min_perf_pct attributes | |
1568 | */ | |
1569 | if (per_cpu_limits) | |
1570 | return; | |
1571 | ||
1572 | rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); | |
1573 | WARN_ON(rc); | |
1574 | ||
1575 | rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); | |
1576 | WARN_ON(rc); | |
1577 | ||
ed7bde7a SP |
1578 | if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { |
1579 | rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); | |
1580 | WARN_ON(rc); | |
1581 | } | |
93f0822d | 1582 | } |
f6ebbcf0 | 1583 | |
cdc1719c CY |
1584 | static void __init intel_pstate_sysfs_remove(void) |
1585 | { | |
1586 | if (!intel_pstate_kobject) | |
1587 | return; | |
1588 | ||
1589 | sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); | |
1590 | ||
c3d175e4 RW |
1591 | if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
1592 | sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); | |
1593 | sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); | |
1594 | } | |
1595 | ||
cdc1719c CY |
1596 | if (!per_cpu_limits) { |
1597 | sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); | |
1598 | sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); | |
1599 | ||
1600 | if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) | |
1601 | sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); | |
1602 | } | |
1603 | ||
1604 | kobject_put(intel_pstate_kobject); | |
1605 | } | |
1606 | ||
f6ebbcf0 RW |
1607 | static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) |
1608 | { | |
1609 | int rc; | |
1610 | ||
1611 | if (!hwp_active) | |
1612 | return; | |
1613 | ||
1614 | rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); | |
1615 | WARN_ON_ONCE(rc); | |
1616 | } | |
1617 | ||
1618 | static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) | |
1619 | { | |
1620 | if (!hwp_active) | |
1621 | return; | |
1622 | ||
1623 | sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); | |
1624 | } | |
1625 | ||
93f0822d | 1626 | /************************** sysfs end ************************/ |
2f86dc4c | 1627 | |
ba88d433 | 1628 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) |
2f86dc4c | 1629 | { |
f05c9665 | 1630 | /* First disable HWP notification interrupt as we don't process them */ |
108ec36b | 1631 | if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
da7de91c | 1632 | wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); |
f05c9665 | 1633 | |
ba88d433 | 1634 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
984edbdc SP |
1635 | if (cpudata->epp_default == -EINVAL) |
1636 | cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); | |
2f86dc4c DB |
1637 | } |
1638 | ||
938d21a2 | 1639 | static int atom_get_min_pstate(void) |
19e77c28 DB |
1640 | { |
1641 | u64 value; | |
845c1cbe | 1642 | |
92134bdb | 1643 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
c16ed060 | 1644 | return (value >> 8) & 0x7F; |
19e77c28 DB |
1645 | } |
1646 | ||
938d21a2 | 1647 | static int atom_get_max_pstate(void) |
19e77c28 DB |
1648 | { |
1649 | u64 value; | |
845c1cbe | 1650 | |
92134bdb | 1651 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
c16ed060 | 1652 | return (value >> 16) & 0x7F; |
19e77c28 | 1653 | } |
93f0822d | 1654 | |
938d21a2 | 1655 | static int atom_get_turbo_pstate(void) |
61d8d2ab DB |
1656 | { |
1657 | u64 value; | |
845c1cbe | 1658 | |
92134bdb | 1659 | rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); |
c16ed060 | 1660 | return value & 0x7F; |
61d8d2ab DB |
1661 | } |
1662 | ||
fdfdb2b1 | 1663 | static u64 atom_get_val(struct cpudata *cpudata, int pstate) |
007bea09 DB |
1664 | { |
1665 | u64 val; | |
1666 | int32_t vid_fp; | |
1667 | u32 vid; | |
1668 | ||
144c8e17 | 1669 | val = (u64)pstate << 8; |
7de32556 | 1670 | if (global.no_turbo && !global.turbo_disabled) |
007bea09 DB |
1671 | val |= (u64)1 << 32; |
1672 | ||
1673 | vid_fp = cpudata->vid.min + mul_fp( | |
1674 | int_tofp(pstate - cpudata->pstate.min_pstate), | |
1675 | cpudata->vid.ratio); | |
1676 | ||
1677 | vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); | |
d022a65e | 1678 | vid = ceiling_fp(vid_fp); |
007bea09 | 1679 | |
21855ff5 DB |
1680 | if (pstate > cpudata->pstate.max_pstate) |
1681 | vid = cpudata->vid.turbo; | |
1682 | ||
fdfdb2b1 | 1683 | return val | vid; |
007bea09 DB |
1684 | } |
1685 | ||
1421df63 | 1686 | static int silvermont_get_scaling(void) |
b27580b0 DB |
1687 | { |
1688 | u64 value; | |
1689 | int i; | |
1421df63 PL |
1690 | /* Defined in Table 35-6 from SDM (Sept 2015) */ |
1691 | static int silvermont_freq_table[] = { | |
1692 | 83300, 100000, 133300, 116700, 80000}; | |
b27580b0 DB |
1693 | |
1694 | rdmsrl(MSR_FSB_FREQ, value); | |
1421df63 PL |
1695 | i = value & 0x7; |
1696 | WARN_ON(i > 4); | |
b27580b0 | 1697 | |
1421df63 PL |
1698 | return silvermont_freq_table[i]; |
1699 | } | |
b27580b0 | 1700 | |
1421df63 PL |
1701 | static int airmont_get_scaling(void) |
1702 | { | |
1703 | u64 value; | |
1704 | int i; | |
1705 | /* Defined in Table 35-10 from SDM (Sept 2015) */ | |
1706 | static int airmont_freq_table[] = { | |
1707 | 83300, 100000, 133300, 116700, 80000, | |
1708 | 93300, 90000, 88900, 87500}; | |
1709 | ||
1710 | rdmsrl(MSR_FSB_FREQ, value); | |
1711 | i = value & 0xF; | |
1712 | WARN_ON(i > 8); | |
1713 | ||
1714 | return airmont_freq_table[i]; | |
b27580b0 DB |
1715 | } |
1716 | ||
938d21a2 | 1717 | static void atom_get_vid(struct cpudata *cpudata) |
007bea09 DB |
1718 | { |
1719 | u64 value; | |
1720 | ||
92134bdb | 1721 | rdmsrl(MSR_ATOM_CORE_VIDS, value); |
c16ed060 DB |
1722 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
1723 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | |
007bea09 DB |
1724 | cpudata->vid.ratio = div_fp( |
1725 | cpudata->vid.max - cpudata->vid.min, | |
1726 | int_tofp(cpudata->pstate.max_pstate - | |
1727 | cpudata->pstate.min_pstate)); | |
21855ff5 | 1728 | |
92134bdb | 1729 | rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); |
21855ff5 | 1730 | cpudata->vid.turbo = value & 0x7f; |
007bea09 DB |
1731 | } |
1732 | ||
016c8150 | 1733 | static int core_get_min_pstate(void) |
93f0822d DB |
1734 | { |
1735 | u64 value; | |
845c1cbe | 1736 | |
05e99c8c | 1737 | rdmsrl(MSR_PLATFORM_INFO, value); |
93f0822d DB |
1738 | return (value >> 40) & 0xFF; |
1739 | } | |
1740 | ||
3bcc6fa9 | 1741 | static int core_get_max_pstate_physical(void) |
93f0822d DB |
1742 | { |
1743 | u64 value; | |
845c1cbe | 1744 | |
05e99c8c | 1745 | rdmsrl(MSR_PLATFORM_INFO, value); |
93f0822d DB |
1746 | return (value >> 8) & 0xFF; |
1747 | } | |
1748 | ||
8fc7554a SP |
1749 | static int core_get_tdp_ratio(u64 plat_info) |
1750 | { | |
1751 | /* Check how many TDP levels present */ | |
1752 | if (plat_info & 0x600000000) { | |
1753 | u64 tdp_ctrl; | |
1754 | u64 tdp_ratio; | |
1755 | int tdp_msr; | |
1756 | int err; | |
1757 | ||
1758 | /* Get the TDP level (0, 1, 2) to get ratios */ | |
1759 | err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); | |
1760 | if (err) | |
1761 | return err; | |
1762 | ||
1763 | /* TDP MSR are continuous starting at 0x648 */ | |
1764 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); | |
1765 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); | |
1766 | if (err) | |
1767 | return err; | |
1768 | ||
1769 | /* For level 1 and 2, bits[23:16] contain the ratio */ | |
1770 | if (tdp_ctrl & 0x03) | |
1771 | tdp_ratio >>= 16; | |
1772 | ||
1773 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ | |
1774 | pr_debug("tdp_ratio %x\n", (int)tdp_ratio); | |
1775 | ||
1776 | return (int)tdp_ratio; | |
1777 | } | |
1778 | ||
1779 | return -ENXIO; | |
1780 | } | |
1781 | ||
016c8150 | 1782 | static int core_get_max_pstate(void) |
93f0822d | 1783 | { |
6a35fc2d SP |
1784 | u64 tar; |
1785 | u64 plat_info; | |
1786 | int max_pstate; | |
8fc7554a | 1787 | int tdp_ratio; |
6a35fc2d SP |
1788 | int err; |
1789 | ||
1790 | rdmsrl(MSR_PLATFORM_INFO, plat_info); | |
1791 | max_pstate = (plat_info >> 8) & 0xFF; | |
1792 | ||
8fc7554a SP |
1793 | tdp_ratio = core_get_tdp_ratio(plat_info); |
1794 | if (tdp_ratio <= 0) | |
1795 | return max_pstate; | |
1796 | ||
1797 | if (hwp_active) { | |
1798 | /* Turbo activation ratio is not used on HWP platforms */ | |
1799 | return tdp_ratio; | |
1800 | } | |
1801 | ||
6a35fc2d SP |
1802 | err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); |
1803 | if (!err) { | |
8fc7554a SP |
1804 | int tar_levels; |
1805 | ||
6a35fc2d | 1806 | /* Do some sanity checking for safety */ |
8fc7554a SP |
1807 | tar_levels = tar & 0xff; |
1808 | if (tdp_ratio - 1 == tar_levels) { | |
1809 | max_pstate = tar_levels; | |
1810 | pr_debug("max_pstate=TAC %x\n", max_pstate); | |
6a35fc2d SP |
1811 | } |
1812 | } | |
845c1cbe | 1813 | |
6a35fc2d | 1814 | return max_pstate; |
93f0822d DB |
1815 | } |
1816 | ||
016c8150 | 1817 | static int core_get_turbo_pstate(void) |
93f0822d DB |
1818 | { |
1819 | u64 value; | |
1820 | int nont, ret; | |
845c1cbe | 1821 | |
100cf6f2 | 1822 | rdmsrl(MSR_TURBO_RATIO_LIMIT, value); |
016c8150 | 1823 | nont = core_get_max_pstate(); |
285cb990 | 1824 | ret = (value) & 255; |
93f0822d DB |
1825 | if (ret <= nont) |
1826 | ret = nont; | |
1827 | return ret; | |
1828 | } | |
1829 | ||
b27580b0 DB |
1830 | static inline int core_get_scaling(void) |
1831 | { | |
1832 | return 100000; | |
1833 | } | |
1834 | ||
fdfdb2b1 | 1835 | static u64 core_get_val(struct cpudata *cpudata, int pstate) |
016c8150 DB |
1836 | { |
1837 | u64 val; | |
1838 | ||
144c8e17 | 1839 | val = (u64)pstate << 8; |
7de32556 | 1840 | if (global.no_turbo && !global.turbo_disabled) |
016c8150 DB |
1841 | val |= (u64)1 << 32; |
1842 | ||
fdfdb2b1 | 1843 | return val; |
016c8150 DB |
1844 | } |
1845 | ||
6e34e1f2 SP |
1846 | static int knl_get_aperf_mperf_shift(void) |
1847 | { | |
1848 | return 10; | |
1849 | } | |
1850 | ||
b34ef932 DC |
1851 | static int knl_get_turbo_pstate(void) |
1852 | { | |
1853 | u64 value; | |
1854 | int nont, ret; | |
1855 | ||
100cf6f2 | 1856 | rdmsrl(MSR_TURBO_RATIO_LIMIT, value); |
b34ef932 DC |
1857 | nont = core_get_max_pstate(); |
1858 | ret = (((value) >> 8) & 0xFF); | |
1859 | if (ret <= nont) | |
1860 | ret = nont; | |
1861 | return ret; | |
1862 | } | |
1863 | ||
a6c6ead1 | 1864 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
fdfdb2b1 | 1865 | { |
bc95a454 RW |
1866 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); |
1867 | cpu->pstate.current_pstate = pstate; | |
fdfdb2b1 RW |
1868 | /* |
1869 | * Generally, there is no guarantee that this code will always run on | |
1870 | * the CPU being updated, so force the register update to run on the | |
1871 | * right CPU. | |
1872 | */ | |
1873 | wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, | |
1874 | pstate_funcs.get_val(cpu, pstate)); | |
93f0822d DB |
1875 | } |
1876 | ||
a6c6ead1 RW |
1877 | static void intel_pstate_set_min_pstate(struct cpudata *cpu) |
1878 | { | |
1879 | intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); | |
1880 | } | |
1881 | ||
1882 | static void intel_pstate_max_within_limits(struct cpudata *cpu) | |
1883 | { | |
fa93b51c | 1884 | int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
a6c6ead1 RW |
1885 | |
1886 | update_turbo_state(); | |
b02aabe8 | 1887 | intel_pstate_set_pstate(cpu, pstate); |
a6c6ead1 RW |
1888 | } |
1889 | ||
93f0822d DB |
1890 | static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) |
1891 | { | |
eb3693f0 RW |
1892 | bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU); |
1893 | int perf_ctl_max_phys = pstate_funcs.get_max_physical(); | |
1894 | int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys : | |
1895 | pstate_funcs.get_scaling(); | |
1896 | ||
016c8150 | 1897 | cpu->pstate.min_pstate = pstate_funcs.get_min(); |
eb3693f0 RW |
1898 | cpu->pstate.max_pstate_physical = perf_ctl_max_phys; |
1899 | cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; | |
ff7c9917 SP |
1900 | |
1901 | if (hwp_active && !hwp_mode_bdw) { | |
de5bcf40 | 1902 | __intel_pstate_get_hwp_cap(cpu); |
eb3693f0 RW |
1903 | |
1904 | if (hybrid_cpu) | |
1905 | intel_pstate_hybrid_hwp_calibrate(cpu); | |
1906 | else | |
1907 | cpu->pstate.scaling = perf_ctl_scaling; | |
ff7c9917 | 1908 | } else { |
eb3693f0 | 1909 | cpu->pstate.scaling = perf_ctl_scaling; |
6f67e060 | 1910 | cpu->pstate.max_pstate = pstate_funcs.get_max(); |
de5bcf40 | 1911 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
ff7c9917 | 1912 | } |
de5bcf40 | 1913 | |
eb3693f0 RW |
1914 | if (cpu->pstate.scaling == perf_ctl_scaling) { |
1915 | cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; | |
1916 | cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; | |
1917 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; | |
1918 | } | |
93f0822d | 1919 | |
6e34e1f2 SP |
1920 | if (pstate_funcs.get_aperf_mperf_shift) |
1921 | cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); | |
1922 | ||
007bea09 DB |
1923 | if (pstate_funcs.get_vid) |
1924 | pstate_funcs.get_vid(cpu); | |
fdfdb2b1 RW |
1925 | |
1926 | intel_pstate_set_min_pstate(cpu); | |
93f0822d DB |
1927 | } |
1928 | ||
e0efd5be SP |
1929 | /* |
1930 | * Long hold time will keep high perf limits for long time, | |
1931 | * which negatively impacts perf/watt for some workloads, | |
1932 | * like specpower. 3ms is based on experiements on some | |
1933 | * workoads. | |
1934 | */ | |
1935 | static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; | |
1936 | ||
1937 | static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) | |
1938 | { | |
1939 | u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); | |
9dd04ec6 | 1940 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
e0efd5be SP |
1941 | u32 max_limit = (hwp_req & 0xff00) >> 8; |
1942 | u32 min_limit = (hwp_req & 0xff); | |
1943 | u32 boost_level1; | |
1944 | ||
1945 | /* | |
1946 | * Cases to consider (User changes via sysfs or boot time): | |
1947 | * If, P0 (Turbo max) = P1 (Guaranteed max) = min: | |
1948 | * No boost, return. | |
1949 | * If, P0 (Turbo max) > P1 (Guaranteed max) = min: | |
1950 | * Should result in one level boost only for P0. | |
1951 | * If, P0 (Turbo max) = P1 (Guaranteed max) > min: | |
1952 | * Should result in two level boost: | |
1953 | * (min + p1)/2 and P1. | |
1954 | * If, P0 (Turbo max) > P1 (Guaranteed max) > min: | |
1955 | * Should result in three level boost: | |
1956 | * (min + p1)/2, P1 and P0. | |
1957 | */ | |
1958 | ||
1959 | /* If max and min are equal or already at max, nothing to boost */ | |
1960 | if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) | |
1961 | return; | |
1962 | ||
1963 | if (!cpu->hwp_boost_min) | |
1964 | cpu->hwp_boost_min = min_limit; | |
1965 | ||
1966 | /* level at half way mark between min and guranteed */ | |
9dd04ec6 | 1967 | boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; |
e0efd5be SP |
1968 | |
1969 | if (cpu->hwp_boost_min < boost_level1) | |
1970 | cpu->hwp_boost_min = boost_level1; | |
9dd04ec6 RW |
1971 | else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) |
1972 | cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); | |
1973 | else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && | |
1974 | max_limit != HWP_GUARANTEED_PERF(hwp_cap)) | |
e0efd5be SP |
1975 | cpu->hwp_boost_min = max_limit; |
1976 | else | |
1977 | return; | |
1978 | ||
1979 | hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; | |
1980 | wrmsrl(MSR_HWP_REQUEST, hwp_req); | |
1981 | cpu->last_update = cpu->sample.time; | |
1982 | } | |
1983 | ||
1984 | static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) | |
1985 | { | |
1986 | if (cpu->hwp_boost_min) { | |
1987 | bool expired; | |
1988 | ||
1989 | /* Check if we are idle for hold time to boost down */ | |
1990 | expired = time_after64(cpu->sample.time, cpu->last_update + | |
1991 | hwp_boost_hold_time_ns); | |
1992 | if (expired) { | |
1993 | wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); | |
1994 | cpu->hwp_boost_min = 0; | |
1995 | } | |
1996 | } | |
1997 | cpu->last_update = cpu->sample.time; | |
1998 | } | |
1999 | ||
52ccc431 SP |
2000 | static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, |
2001 | u64 time) | |
2002 | { | |
2003 | cpu->sample.time = time; | |
2004 | ||
2005 | if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { | |
2006 | bool do_io = false; | |
2007 | ||
2008 | cpu->sched_flags = 0; | |
2009 | /* | |
2010 | * Set iowait_boost flag and update time. Since IO WAIT flag | |
2011 | * is set all the time, we can't just conclude that there is | |
2012 | * some IO bound activity is scheduled on this CPU with just | |
2013 | * one occurrence. If we receive at least two in two | |
2014 | * consecutive ticks, then we treat as boost candidate. | |
2015 | */ | |
2016 | if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) | |
2017 | do_io = true; | |
2018 | ||
2019 | cpu->last_io_update = time; | |
2020 | ||
2021 | if (do_io) | |
2022 | intel_pstate_hwp_boost_up(cpu); | |
2023 | ||
2024 | } else { | |
2025 | intel_pstate_hwp_boost_down(cpu); | |
2026 | } | |
2027 | } | |
2028 | ||
e0efd5be SP |
2029 | static inline void intel_pstate_update_util_hwp(struct update_util_data *data, |
2030 | u64 time, unsigned int flags) | |
2031 | { | |
52ccc431 SP |
2032 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
2033 | ||
2034 | cpu->sched_flags |= flags; | |
2035 | ||
2036 | if (smp_processor_id() == cpu->cpu) | |
2037 | intel_pstate_update_util_hwp_local(cpu, time); | |
e0efd5be SP |
2038 | } |
2039 | ||
a1c9787d | 2040 | static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) |
93f0822d | 2041 | { |
6b17ddb2 | 2042 | struct sample *sample = &cpu->sample; |
e66c1768 | 2043 | |
a1c9787d | 2044 | sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); |
93f0822d DB |
2045 | } |
2046 | ||
4fec7ad5 | 2047 | static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) |
93f0822d | 2048 | { |
93f0822d | 2049 | u64 aperf, mperf; |
4ab60c3f | 2050 | unsigned long flags; |
4055fad3 | 2051 | u64 tsc; |
93f0822d | 2052 | |
4ab60c3f | 2053 | local_irq_save(flags); |
93f0822d DB |
2054 | rdmsrl(MSR_IA32_APERF, aperf); |
2055 | rdmsrl(MSR_IA32_MPERF, mperf); | |
e70eed2b | 2056 | tsc = rdtsc(); |
4fec7ad5 | 2057 | if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { |
8e601a9f | 2058 | local_irq_restore(flags); |
4fec7ad5 | 2059 | return false; |
8e601a9f | 2060 | } |
4ab60c3f | 2061 | local_irq_restore(flags); |
b69880f9 | 2062 | |
c4ee841f | 2063 | cpu->last_sample_time = cpu->sample.time; |
a4675fbc | 2064 | cpu->sample.time = time; |
d37e2b76 DB |
2065 | cpu->sample.aperf = aperf; |
2066 | cpu->sample.mperf = mperf; | |
4055fad3 | 2067 | cpu->sample.tsc = tsc; |
d37e2b76 DB |
2068 | cpu->sample.aperf -= cpu->prev_aperf; |
2069 | cpu->sample.mperf -= cpu->prev_mperf; | |
4055fad3 | 2070 | cpu->sample.tsc -= cpu->prev_tsc; |
1abc4b20 | 2071 | |
93f0822d DB |
2072 | cpu->prev_aperf = aperf; |
2073 | cpu->prev_mperf = mperf; | |
4055fad3 | 2074 | cpu->prev_tsc = tsc; |
febce40f RW |
2075 | /* |
2076 | * First time this function is invoked in a given cycle, all of the | |
2077 | * previous sample data fields are equal to zero or stale and they must | |
2078 | * be populated with meaningful numbers for things to work, so assume | |
2079 | * that sample.time will always be reset before setting the utilization | |
2080 | * update hook and make the caller skip the sample then. | |
2081 | */ | |
eabd22c6 RW |
2082 | if (cpu->last_sample_time) { |
2083 | intel_pstate_calc_avg_perf(cpu); | |
2084 | return true; | |
2085 | } | |
2086 | return false; | |
93f0822d DB |
2087 | } |
2088 | ||
8fa520af PL |
2089 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
2090 | { | |
c587c79f | 2091 | return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); |
8fa520af PL |
2092 | } |
2093 | ||
bdcaa23f PL |
2094 | static inline int32_t get_avg_pstate(struct cpudata *cpu) |
2095 | { | |
8edb0a6e RW |
2096 | return mul_ext_fp(cpu->pstate.max_pstate_physical, |
2097 | cpu->sample.core_avg_perf); | |
bdcaa23f PL |
2098 | } |
2099 | ||
d77d4888 | 2100 | static inline int32_t get_target_pstate(struct cpudata *cpu) |
e70eed2b PL |
2101 | { |
2102 | struct sample *sample = &cpu->sample; | |
b8bd1581 | 2103 | int32_t busy_frac; |
0843e83c | 2104 | int target, avg_pstate; |
e70eed2b | 2105 | |
6e34e1f2 SP |
2106 | busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, |
2107 | sample->tsc); | |
63d1d656 | 2108 | |
b8bd1581 RW |
2109 | if (busy_frac < cpu->iowait_boost) |
2110 | busy_frac = cpu->iowait_boost; | |
63d1d656 | 2111 | |
09c448d3 | 2112 | sample->busy_scaled = busy_frac * 100; |
0843e83c | 2113 | |
7de32556 | 2114 | target = global.no_turbo || global.turbo_disabled ? |
0843e83c RW |
2115 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
2116 | target += target >> 2; | |
2117 | target = mul_fp(target, busy_frac); | |
2118 | if (target < cpu->pstate.min_pstate) | |
2119 | target = cpu->pstate.min_pstate; | |
2120 | ||
2121 | /* | |
2122 | * If the average P-state during the previous cycle was higher than the | |
2123 | * current target, add 50% of the difference to the target to reduce | |
2124 | * possible performance oscillations and offset possible performance | |
2125 | * loss related to moving the workload from one CPU to another within | |
2126 | * a package/module. | |
2127 | */ | |
2128 | avg_pstate = get_avg_pstate(cpu); | |
2129 | if (avg_pstate > target) | |
2130 | target += (avg_pstate - target) >> 1; | |
2131 | ||
2132 | return target; | |
e70eed2b PL |
2133 | } |
2134 | ||
001c76f0 | 2135 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) |
fdfdb2b1 | 2136 | { |
fa93b51c RW |
2137 | int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
2138 | int max_pstate = max(min_pstate, cpu->max_perf_ratio); | |
fdfdb2b1 | 2139 | |
b02aabe8 | 2140 | return clamp_t(int, pstate, min_pstate, max_pstate); |
001c76f0 RW |
2141 | } |
2142 | ||
2143 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) | |
2144 | { | |
fdfdb2b1 RW |
2145 | if (pstate == cpu->pstate.current_pstate) |
2146 | return; | |
2147 | ||
bc95a454 | 2148 | cpu->pstate.current_pstate = pstate; |
fdfdb2b1 RW |
2149 | wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); |
2150 | } | |
2151 | ||
a891283e | 2152 | static void intel_pstate_adjust_pstate(struct cpudata *cpu) |
93f0822d | 2153 | { |
67dd9bf4 | 2154 | int from = cpu->pstate.current_pstate; |
4055fad3 | 2155 | struct sample *sample; |
a891283e | 2156 | int target_pstate; |
4055fad3 | 2157 | |
001c76f0 RW |
2158 | update_turbo_state(); |
2159 | ||
d77d4888 | 2160 | target_pstate = get_target_pstate(cpu); |
64078299 RW |
2161 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
2162 | trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); | |
fdfdb2b1 | 2163 | intel_pstate_update_pstate(cpu, target_pstate); |
4055fad3 DS |
2164 | |
2165 | sample = &cpu->sample; | |
a1c9787d | 2166 | trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), |
157386b6 | 2167 | fp_toint(sample->busy_scaled), |
4055fad3 DS |
2168 | from, |
2169 | cpu->pstate.current_pstate, | |
2170 | sample->mperf, | |
2171 | sample->aperf, | |
2172 | sample->tsc, | |
3ba7bcaa SP |
2173 | get_avg_frequency(cpu), |
2174 | fp_toint(cpu->iowait_boost * 100)); | |
93f0822d DB |
2175 | } |
2176 | ||
a4675fbc | 2177 | static void intel_pstate_update_util(struct update_util_data *data, u64 time, |
58919e83 | 2178 | unsigned int flags) |
93f0822d | 2179 | { |
a4675fbc | 2180 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
09c448d3 RW |
2181 | u64 delta_ns; |
2182 | ||
674e7541 VK |
2183 | /* Don't allow remote callbacks */ |
2184 | if (smp_processor_id() != cpu->cpu) | |
2185 | return; | |
2186 | ||
b8bd1581 | 2187 | delta_ns = time - cpu->last_update; |
eabd22c6 | 2188 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
b8bd1581 RW |
2189 | /* Start over if the CPU may have been idle. */ |
2190 | if (delta_ns > TICK_NSEC) { | |
2191 | cpu->iowait_boost = ONE_EIGHTH_FP; | |
8e3b4039 | 2192 | } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { |
b8bd1581 RW |
2193 | cpu->iowait_boost <<= 1; |
2194 | if (cpu->iowait_boost > int_tofp(1)) | |
2195 | cpu->iowait_boost = int_tofp(1); | |
2196 | } else { | |
2197 | cpu->iowait_boost = ONE_EIGHTH_FP; | |
2198 | } | |
eabd22c6 RW |
2199 | } else if (cpu->iowait_boost) { |
2200 | /* Clear iowait_boost if the CPU may have been idle. */ | |
eabd22c6 RW |
2201 | if (delta_ns > TICK_NSEC) |
2202 | cpu->iowait_boost = 0; | |
b8bd1581 RW |
2203 | else |
2204 | cpu->iowait_boost >>= 1; | |
09c448d3 | 2205 | } |
eabd22c6 | 2206 | cpu->last_update = time; |
09c448d3 | 2207 | delta_ns = time - cpu->sample.time; |
d77d4888 | 2208 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) |
eabd22c6 | 2209 | return; |
4fec7ad5 | 2210 | |
a891283e RW |
2211 | if (intel_pstate_sample(cpu, time)) |
2212 | intel_pstate_adjust_pstate(cpu); | |
67dd9bf4 | 2213 | } |
eabd22c6 | 2214 | |
2f49afc2 RW |
2215 | static struct pstate_funcs core_funcs = { |
2216 | .get_max = core_get_max_pstate, | |
2217 | .get_max_physical = core_get_max_pstate_physical, | |
2218 | .get_min = core_get_min_pstate, | |
2219 | .get_turbo = core_get_turbo_pstate, | |
2220 | .get_scaling = core_get_scaling, | |
2221 | .get_val = core_get_val, | |
de4a76cb RW |
2222 | }; |
2223 | ||
2f49afc2 RW |
2224 | static const struct pstate_funcs silvermont_funcs = { |
2225 | .get_max = atom_get_max_pstate, | |
2226 | .get_max_physical = atom_get_max_pstate, | |
2227 | .get_min = atom_get_min_pstate, | |
2228 | .get_turbo = atom_get_turbo_pstate, | |
2229 | .get_val = atom_get_val, | |
2230 | .get_scaling = silvermont_get_scaling, | |
2231 | .get_vid = atom_get_vid, | |
de4a76cb RW |
2232 | }; |
2233 | ||
2f49afc2 RW |
2234 | static const struct pstate_funcs airmont_funcs = { |
2235 | .get_max = atom_get_max_pstate, | |
2236 | .get_max_physical = atom_get_max_pstate, | |
2237 | .get_min = atom_get_min_pstate, | |
2238 | .get_turbo = atom_get_turbo_pstate, | |
2239 | .get_val = atom_get_val, | |
2240 | .get_scaling = airmont_get_scaling, | |
2241 | .get_vid = atom_get_vid, | |
de4a76cb RW |
2242 | }; |
2243 | ||
2f49afc2 RW |
2244 | static const struct pstate_funcs knl_funcs = { |
2245 | .get_max = core_get_max_pstate, | |
2246 | .get_max_physical = core_get_max_pstate_physical, | |
2247 | .get_min = core_get_min_pstate, | |
2248 | .get_turbo = knl_get_turbo_pstate, | |
6e34e1f2 | 2249 | .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, |
2f49afc2 RW |
2250 | .get_scaling = core_get_scaling, |
2251 | .get_val = core_get_val, | |
de4a76cb RW |
2252 | }; |
2253 | ||
b11d77fa TG |
2254 | #define X86_MATCH(model, policy) \ |
2255 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ | |
2256 | X86_FEATURE_APERFMPERF, &policy) | |
93f0822d DB |
2257 | |
2258 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |
b11d77fa TG |
2259 | X86_MATCH(SANDYBRIDGE, core_funcs), |
2260 | X86_MATCH(SANDYBRIDGE_X, core_funcs), | |
2261 | X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), | |
2262 | X86_MATCH(IVYBRIDGE, core_funcs), | |
2263 | X86_MATCH(HASWELL, core_funcs), | |
2264 | X86_MATCH(BROADWELL, core_funcs), | |
2265 | X86_MATCH(IVYBRIDGE_X, core_funcs), | |
2266 | X86_MATCH(HASWELL_X, core_funcs), | |
2267 | X86_MATCH(HASWELL_L, core_funcs), | |
2268 | X86_MATCH(HASWELL_G, core_funcs), | |
2269 | X86_MATCH(BROADWELL_G, core_funcs), | |
2270 | X86_MATCH(ATOM_AIRMONT, airmont_funcs), | |
2271 | X86_MATCH(SKYLAKE_L, core_funcs), | |
2272 | X86_MATCH(BROADWELL_X, core_funcs), | |
2273 | X86_MATCH(SKYLAKE, core_funcs), | |
2274 | X86_MATCH(BROADWELL_D, core_funcs), | |
2275 | X86_MATCH(XEON_PHI_KNL, knl_funcs), | |
2276 | X86_MATCH(XEON_PHI_KNM, knl_funcs), | |
2277 | X86_MATCH(ATOM_GOLDMONT, core_funcs), | |
2278 | X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), | |
2279 | X86_MATCH(SKYLAKE_X, core_funcs), | |
706c5328 | 2280 | X86_MATCH(COMETLAKE, core_funcs), |
fbdc21e9 | 2281 | X86_MATCH(ICELAKE_X, core_funcs), |
93f0822d DB |
2282 | {} |
2283 | }; | |
2284 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | |
2285 | ||
29327c84 | 2286 | static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { |
b11d77fa TG |
2287 | X86_MATCH(BROADWELL_D, core_funcs), |
2288 | X86_MATCH(BROADWELL_X, core_funcs), | |
2289 | X86_MATCH(SKYLAKE_X, core_funcs), | |
2f86dc4c DB |
2290 | {} |
2291 | }; | |
2292 | ||
6e978b22 | 2293 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { |
b11d77fa | 2294 | X86_MATCH(KABYLAKE, core_funcs), |
6e978b22 SP |
2295 | {} |
2296 | }; | |
2297 | ||
41ab43c9 | 2298 | static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { |
b11d77fa TG |
2299 | X86_MATCH(SKYLAKE_X, core_funcs), |
2300 | X86_MATCH(SKYLAKE, core_funcs), | |
41ab43c9 SP |
2301 | {} |
2302 | }; | |
2303 | ||
93f0822d DB |
2304 | static int intel_pstate_init_cpu(unsigned int cpunum) |
2305 | { | |
93f0822d DB |
2306 | struct cpudata *cpu; |
2307 | ||
eae48f04 SP |
2308 | cpu = all_cpu_data[cpunum]; |
2309 | ||
2310 | if (!cpu) { | |
c5a2ee7d | 2311 | cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); |
eae48f04 SP |
2312 | if (!cpu) |
2313 | return -ENOMEM; | |
2314 | ||
2315 | all_cpu_data[cpunum] = cpu; | |
eae48f04 | 2316 | |
55671ea3 | 2317 | cpu->cpu = cpunum; |
93f0822d | 2318 | |
55671ea3 | 2319 | cpu->epp_default = -EINVAL; |
ba88d433 | 2320 | |
55671ea3 RW |
2321 | if (hwp_active) { |
2322 | const struct x86_cpu_id *id; | |
6e978b22 | 2323 | |
55671ea3 | 2324 | intel_pstate_hwp_enable(cpu); |
41ab43c9 | 2325 | |
55671ea3 RW |
2326 | id = x86_match_cpu(intel_pstate_hwp_boost_ids); |
2327 | if (id && intel_pstate_acpi_pm_profile_server()) | |
2328 | hwp_boost = true; | |
2329 | } | |
2330 | } else if (hwp_active) { | |
2331 | /* | |
2332 | * Re-enable HWP in case this happens after a resume from ACPI | |
2333 | * S3 if the CPU was offline during the whole system/resume | |
2334 | * cycle. | |
2335 | */ | |
2336 | intel_pstate_hwp_reenable(cpu); | |
a4675fbc | 2337 | } |
ba88d433 | 2338 | |
55671ea3 RW |
2339 | cpu->epp_powersave = -EINVAL; |
2340 | cpu->epp_policy = 0; | |
2341 | ||
179e8471 | 2342 | intel_pstate_get_cpu_pstates(cpu); |
016c8150 | 2343 | |
4836df17 | 2344 | pr_debug("controlling: cpu %d\n", cpunum); |
93f0822d DB |
2345 | |
2346 | return 0; | |
2347 | } | |
2348 | ||
febce40f | 2349 | static void intel_pstate_set_update_util_hook(unsigned int cpu_num) |
bb6ab52f | 2350 | { |
febce40f RW |
2351 | struct cpudata *cpu = all_cpu_data[cpu_num]; |
2352 | ||
e0efd5be | 2353 | if (hwp_active && !hwp_boost) |
62611cb9 LB |
2354 | return; |
2355 | ||
5ab666e0 RW |
2356 | if (cpu->update_util_set) |
2357 | return; | |
2358 | ||
febce40f RW |
2359 | /* Prevent intel_pstate_update_util() from using stale data. */ |
2360 | cpu->sample.time = 0; | |
67dd9bf4 | 2361 | cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, |
e0efd5be SP |
2362 | (hwp_active ? |
2363 | intel_pstate_update_util_hwp : | |
2364 | intel_pstate_update_util)); | |
4578ee7e | 2365 | cpu->update_util_set = true; |
bb6ab52f RW |
2366 | } |
2367 | ||
2368 | static void intel_pstate_clear_update_util_hook(unsigned int cpu) | |
2369 | { | |
4578ee7e CY |
2370 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
2371 | ||
2372 | if (!cpu_data->update_util_set) | |
2373 | return; | |
2374 | ||
0bed612b | 2375 | cpufreq_remove_update_util_hook(cpu); |
4578ee7e | 2376 | cpu_data->update_util_set = false; |
09659af3 | 2377 | synchronize_rcu(); |
bb6ab52f RW |
2378 | } |
2379 | ||
80b120ca RW |
2380 | static int intel_pstate_get_max_freq(struct cpudata *cpu) |
2381 | { | |
2382 | return global.turbo_disabled || global.no_turbo ? | |
2383 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | |
2384 | } | |
2385 | ||
1e4f63ae RW |
2386 | static void intel_pstate_update_perf_limits(struct cpudata *cpu, |
2387 | unsigned int policy_min, | |
2388 | unsigned int policy_max) | |
eae48f04 | 2389 | { |
eb3693f0 | 2390 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; |
e4c204ce | 2391 | int32_t max_policy_perf, min_policy_perf; |
a410c03d | 2392 | |
eb3693f0 RW |
2393 | max_policy_perf = policy_max / perf_ctl_scaling; |
2394 | if (policy_max == policy_min) { | |
2395 | min_policy_perf = max_policy_perf; | |
2396 | } else { | |
2397 | min_policy_perf = policy_min / perf_ctl_scaling; | |
2398 | min_policy_perf = clamp_t(int32_t, min_policy_perf, | |
2399 | 0, max_policy_perf); | |
2400 | } | |
2401 | ||
1a4fe38a | 2402 | /* |
de5bcf40 RW |
2403 | * HWP needs some special consideration, because HWP_REQUEST uses |
2404 | * abstract values to represent performance rather than pure ratios. | |
1a4fe38a | 2405 | */ |
eb3693f0 | 2406 | if (hwp_active) { |
de5bcf40 RW |
2407 | intel_pstate_get_hwp_cap(cpu); |
2408 | ||
eb3693f0 RW |
2409 | if (cpu->pstate.scaling != perf_ctl_scaling) { |
2410 | int scaling = cpu->pstate.scaling; | |
2411 | int freq; | |
2412 | ||
2413 | freq = max_policy_perf * perf_ctl_scaling; | |
2414 | max_policy_perf = DIV_ROUND_UP(freq, scaling); | |
2415 | freq = min_policy_perf * perf_ctl_scaling; | |
2416 | min_policy_perf = DIV_ROUND_UP(freq, scaling); | |
2417 | } | |
5879f877 | 2418 | } |
eae48f04 | 2419 | |
b989bc0f RW |
2420 | pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", |
2421 | cpu->cpu, min_policy_perf, max_policy_perf); | |
1a4fe38a | 2422 | |
e4c204ce | 2423 | /* Normalize user input to [min_perf, max_perf] */ |
c5a2ee7d | 2424 | if (per_cpu_limits) { |
1a4fe38a SP |
2425 | cpu->min_perf_ratio = min_policy_perf; |
2426 | cpu->max_perf_ratio = max_policy_perf; | |
c5a2ee7d | 2427 | } else { |
b989bc0f | 2428 | int turbo_max = cpu->pstate.turbo_pstate; |
c5a2ee7d RW |
2429 | int32_t global_min, global_max; |
2430 | ||
2431 | /* Global limits are in percent of the maximum turbo P-state. */ | |
1a4fe38a SP |
2432 | global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); |
2433 | global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); | |
c5a2ee7d | 2434 | global_min = clamp_t(int32_t, global_min, 0, global_max); |
eae48f04 | 2435 | |
1e4f63ae | 2436 | pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, |
1a4fe38a | 2437 | global_min, global_max); |
c5a2ee7d | 2438 | |
1a4fe38a SP |
2439 | cpu->min_perf_ratio = max(min_policy_perf, global_min); |
2440 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); | |
2441 | cpu->max_perf_ratio = min(max_policy_perf, global_max); | |
2442 | cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); | |
eae48f04 | 2443 | |
1a4fe38a SP |
2444 | /* Make sure min_perf <= max_perf */ |
2445 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, | |
2446 | cpu->max_perf_ratio); | |
eae48f04 | 2447 | |
1a4fe38a | 2448 | } |
1e4f63ae | 2449 | pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, |
1a4fe38a SP |
2450 | cpu->max_perf_ratio, |
2451 | cpu->min_perf_ratio); | |
eae48f04 SP |
2452 | } |
2453 | ||
93f0822d DB |
2454 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
2455 | { | |
3be9200d SP |
2456 | struct cpudata *cpu; |
2457 | ||
d3929b83 DB |
2458 | if (!policy->cpuinfo.max_freq) |
2459 | return -ENODEV; | |
2460 | ||
2c2c1af4 SP |
2461 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", |
2462 | policy->cpuinfo.max_freq, policy->max); | |
2463 | ||
a6c6ead1 | 2464 | cpu = all_cpu_data[policy->cpu]; |
2f1d407a RW |
2465 | cpu->policy = policy->policy; |
2466 | ||
b59fe540 SP |
2467 | mutex_lock(&intel_pstate_limits_lock); |
2468 | ||
1e4f63ae | 2469 | intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
a240c4aa | 2470 | |
2f1d407a | 2471 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
a6c6ead1 RW |
2472 | /* |
2473 | * NOHZ_FULL CPUs need this as the governor callback may not | |
2474 | * be invoked on them. | |
2475 | */ | |
2476 | intel_pstate_clear_update_util_hook(policy->cpu); | |
2477 | intel_pstate_max_within_limits(cpu); | |
82b4e03e LB |
2478 | } else { |
2479 | intel_pstate_set_update_util_hook(policy->cpu); | |
a6c6ead1 RW |
2480 | } |
2481 | ||
e0efd5be SP |
2482 | if (hwp_active) { |
2483 | /* | |
2484 | * When hwp_boost was active before and dynamically it | |
2485 | * was turned off, in that case we need to clear the | |
2486 | * update util hook. | |
2487 | */ | |
2488 | if (!hwp_boost) | |
2489 | intel_pstate_clear_update_util_hook(policy->cpu); | |
2bfc4cbb | 2490 | intel_pstate_hwp_set(policy->cpu); |
e0efd5be | 2491 | } |
2f86dc4c | 2492 | |
b59fe540 SP |
2493 | mutex_unlock(&intel_pstate_limits_lock); |
2494 | ||
93f0822d DB |
2495 | return 0; |
2496 | } | |
2497 | ||
1e4f63ae RW |
2498 | static void intel_pstate_adjust_policy_max(struct cpudata *cpu, |
2499 | struct cpufreq_policy_data *policy) | |
80b120ca | 2500 | { |
d3264f75 SP |
2501 | if (!hwp_active && |
2502 | cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | |
80b120ca RW |
2503 | policy->max < policy->cpuinfo.max_freq && |
2504 | policy->max > cpu->pstate.max_freq) { | |
2505 | pr_debug("policy->max > max non turbo frequency\n"); | |
2506 | policy->max = policy->cpuinfo.max_freq; | |
2507 | } | |
2508 | } | |
2509 | ||
d5a2a6bb RW |
2510 | static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, |
2511 | struct cpufreq_policy_data *policy) | |
93f0822d | 2512 | { |
e40ad84c RW |
2513 | int max_freq; |
2514 | ||
7d9a8a9f | 2515 | update_turbo_state(); |
e40ad84c | 2516 | if (hwp_active) { |
de5bcf40 RW |
2517 | intel_pstate_get_hwp_cap(cpu); |
2518 | max_freq = global.no_turbo || global.turbo_disabled ? | |
2519 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | |
e40ad84c RW |
2520 | } else { |
2521 | max_freq = intel_pstate_get_max_freq(cpu); | |
2522 | } | |
2523 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); | |
93f0822d | 2524 | |
1e4f63ae | 2525 | intel_pstate_adjust_policy_max(cpu, policy); |
d5a2a6bb RW |
2526 | } |
2527 | ||
2528 | static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) | |
2529 | { | |
2530 | intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); | |
80b120ca | 2531 | |
93f0822d DB |
2532 | return 0; |
2533 | } | |
2534 | ||
49d6feef | 2535 | static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) |
001c76f0 | 2536 | { |
4adcf2e5 RW |
2537 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2538 | ||
2539 | pr_debug("CPU %d going offline\n", cpu->cpu); | |
2540 | ||
2541 | if (cpu->suspended) | |
2542 | return 0; | |
2543 | ||
2544 | /* | |
2545 | * If the CPU is an SMT thread and it goes offline with the performance | |
2546 | * settings different from the minimum, it will prevent its sibling | |
2547 | * from getting to lower performance levels, so force the minimum | |
2548 | * performance on CPU offline to prevent that from happening. | |
2549 | */ | |
f6ebbcf0 | 2550 | if (hwp_active) |
4adcf2e5 | 2551 | intel_pstate_hwp_offline(cpu); |
f6ebbcf0 | 2552 | else |
4adcf2e5 RW |
2553 | intel_pstate_set_min_pstate(cpu); |
2554 | ||
2555 | intel_pstate_exit_perf_limits(policy); | |
2556 | ||
2557 | return 0; | |
2558 | } | |
2559 | ||
2560 | static int intel_pstate_cpu_online(struct cpufreq_policy *policy) | |
2561 | { | |
2562 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2563 | ||
2564 | pr_debug("CPU %d going online\n", cpu->cpu); | |
2565 | ||
2566 | intel_pstate_init_acpi_perf_limits(policy); | |
2567 | ||
2568 | if (hwp_active) { | |
2569 | /* | |
2570 | * Re-enable HWP and clear the "suspended" flag to let "resume" | |
2571 | * know that it need not do that. | |
2572 | */ | |
2573 | intel_pstate_hwp_reenable(cpu); | |
2574 | cpu->suspended = false; | |
2575 | } | |
2576 | ||
2577 | return 0; | |
001c76f0 RW |
2578 | } |
2579 | ||
49d6feef | 2580 | static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) |
93f0822d | 2581 | { |
001c76f0 | 2582 | intel_pstate_clear_update_util_hook(policy->cpu); |
49d6feef RW |
2583 | |
2584 | return intel_cpufreq_cpu_offline(policy); | |
001c76f0 | 2585 | } |
bb18008f | 2586 | |
001c76f0 RW |
2587 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) |
2588 | { | |
4adcf2e5 | 2589 | pr_debug("CPU %d exiting\n", policy->cpu); |
a4675fbc | 2590 | |
001c76f0 | 2591 | policy->fast_switch_possible = false; |
2f86dc4c | 2592 | |
001c76f0 | 2593 | return 0; |
93f0822d DB |
2594 | } |
2595 | ||
001c76f0 | 2596 | static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) |
93f0822d | 2597 | { |
93f0822d | 2598 | struct cpudata *cpu; |
52e0a509 | 2599 | int rc; |
93f0822d DB |
2600 | |
2601 | rc = intel_pstate_init_cpu(policy->cpu); | |
2602 | if (rc) | |
2603 | return rc; | |
2604 | ||
2605 | cpu = all_cpu_data[policy->cpu]; | |
2606 | ||
1a4fe38a SP |
2607 | cpu->max_perf_ratio = 0xFF; |
2608 | cpu->min_perf_ratio = 0; | |
93f0822d | 2609 | |
93f0822d | 2610 | /* cpuinfo and default policy values */ |
eb3693f0 | 2611 | policy->cpuinfo.min_freq = cpu->pstate.min_freq; |
983e600e | 2612 | update_turbo_state(); |
9083e498 | 2613 | global.turbo_disabled_mf = global.turbo_disabled; |
7de32556 | 2614 | policy->cpuinfo.max_freq = global.turbo_disabled ? |
eea033d0 | 2615 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
de5bcf40 RW |
2616 | |
2617 | policy->min = policy->cpuinfo.min_freq; | |
2618 | policy->max = policy->cpuinfo.max_freq; | |
eea033d0 | 2619 | |
9522a2ff | 2620 | intel_pstate_init_acpi_perf_limits(policy); |
93f0822d | 2621 | |
001c76f0 RW |
2622 | policy->fast_switch_possible = true; |
2623 | ||
93f0822d DB |
2624 | return 0; |
2625 | } | |
2626 | ||
001c76f0 | 2627 | static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
9522a2ff | 2628 | { |
001c76f0 RW |
2629 | int ret = __intel_pstate_cpu_init(policy); |
2630 | ||
2631 | if (ret) | |
2632 | return ret; | |
2633 | ||
5ac54113 RW |
2634 | /* |
2635 | * Set the policy to powersave to provide a valid fallback value in case | |
2636 | * the default cpufreq governor is neither powersave nor performance. | |
2637 | */ | |
2638 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | |
9522a2ff | 2639 | |
c27a0ccc RW |
2640 | if (hwp_active) { |
2641 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2642 | ||
2643 | cpu->epp_cached = intel_pstate_get_epp(cpu, 0); | |
2644 | } | |
2645 | ||
9522a2ff SP |
2646 | return 0; |
2647 | } | |
2648 | ||
001c76f0 | 2649 | static struct cpufreq_driver intel_pstate = { |
93f0822d DB |
2650 | .flags = CPUFREQ_CONST_LOOPS, |
2651 | .verify = intel_pstate_verify_policy, | |
2652 | .setpolicy = intel_pstate_set_policy, | |
4adcf2e5 | 2653 | .suspend = intel_pstate_suspend, |
8442885f | 2654 | .resume = intel_pstate_resume, |
93f0822d | 2655 | .init = intel_pstate_cpu_init, |
9522a2ff | 2656 | .exit = intel_pstate_cpu_exit, |
4adcf2e5 RW |
2657 | .offline = intel_pstate_cpu_offline, |
2658 | .online = intel_pstate_cpu_online, | |
5a25e3f7 | 2659 | .update_limits = intel_pstate_update_limits, |
93f0822d | 2660 | .name = "intel_pstate", |
93f0822d DB |
2661 | }; |
2662 | ||
1e4f63ae | 2663 | static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) |
001c76f0 RW |
2664 | { |
2665 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
001c76f0 | 2666 | |
d5a2a6bb | 2667 | intel_pstate_verify_cpu_policy(cpu, policy); |
1e4f63ae | 2668 | intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
c5a2ee7d | 2669 | |
001c76f0 RW |
2670 | return 0; |
2671 | } | |
2672 | ||
50e9ffab DS |
2673 | /* Use of trace in passive mode: |
2674 | * | |
2675 | * In passive mode the trace core_busy field (also known as the | |
2676 | * performance field, and lablelled as such on the graphs; also known as | |
2677 | * core_avg_perf) is not needed and so is re-assigned to indicate if the | |
2678 | * driver call was via the normal or fast switch path. Various graphs | |
2679 | * output from the intel_pstate_tracer.py utility that include core_busy | |
2680 | * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, | |
75a8d877 | 2681 | * so we use 10 to indicate the normal path through the driver, and |
50e9ffab DS |
2682 | * 90 to indicate the fast switch path through the driver. |
2683 | * The scaled_busy field is not used, and is set to 0. | |
2684 | */ | |
2685 | ||
2686 | #define INTEL_PSTATE_TRACE_TARGET 10 | |
2687 | #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 | |
2688 | ||
2689 | static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) | |
2690 | { | |
2691 | struct sample *sample; | |
2692 | ||
2693 | if (!trace_pstate_sample_enabled()) | |
2694 | return; | |
2695 | ||
2696 | if (!intel_pstate_sample(cpu, ktime_get())) | |
2697 | return; | |
2698 | ||
2699 | sample = &cpu->sample; | |
2700 | trace_pstate_sample(trace_type, | |
2701 | 0, | |
2702 | old_pstate, | |
2703 | cpu->pstate.current_pstate, | |
2704 | sample->mperf, | |
2705 | sample->aperf, | |
2706 | sample->tsc, | |
2707 | get_avg_frequency(cpu), | |
2708 | fp_toint(cpu->iowait_boost * 100)); | |
2709 | } | |
2710 | ||
597ffbc8 | 2711 | static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, |
a365ab6b | 2712 | u32 desired, bool fast_switch) |
f6ebbcf0 RW |
2713 | { |
2714 | u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; | |
2715 | ||
2716 | value &= ~HWP_MIN_PERF(~0L); | |
a365ab6b | 2717 | value |= HWP_MIN_PERF(min); |
f6ebbcf0 | 2718 | |
f6ebbcf0 | 2719 | value &= ~HWP_MAX_PERF(~0L); |
a365ab6b RW |
2720 | value |= HWP_MAX_PERF(max); |
2721 | ||
2722 | value &= ~HWP_DESIRED_PERF(~0L); | |
2723 | value |= HWP_DESIRED_PERF(desired); | |
f6ebbcf0 RW |
2724 | |
2725 | if (value == prev) | |
2726 | return; | |
2727 | ||
2728 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
2729 | if (fast_switch) | |
2730 | wrmsrl(MSR_HWP_REQUEST, value); | |
2731 | else | |
2732 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); | |
2733 | } | |
2734 | ||
597ffbc8 | 2735 | static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, |
f6ebbcf0 RW |
2736 | u32 target_pstate, bool fast_switch) |
2737 | { | |
2738 | if (fast_switch) | |
2739 | wrmsrl(MSR_IA32_PERF_CTL, | |
2740 | pstate_funcs.get_val(cpu, target_pstate)); | |
2741 | else | |
2742 | wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, | |
2743 | pstate_funcs.get_val(cpu, target_pstate)); | |
2744 | } | |
2745 | ||
fcb3a1ab RW |
2746 | static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, |
2747 | int target_pstate, bool fast_switch) | |
f6ebbcf0 | 2748 | { |
fcb3a1ab | 2749 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
f6ebbcf0 RW |
2750 | int old_pstate = cpu->pstate.current_pstate; |
2751 | ||
2752 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | |
a365ab6b RW |
2753 | if (hwp_active) { |
2754 | int max_pstate = policy->strict_target ? | |
2755 | target_pstate : cpu->max_perf_ratio; | |
2756 | ||
597ffbc8 | 2757 | intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, |
a365ab6b RW |
2758 | fast_switch); |
2759 | } else if (target_pstate != old_pstate) { | |
597ffbc8 | 2760 | intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); |
a365ab6b | 2761 | } |
2554c32f RW |
2762 | |
2763 | cpu->pstate.current_pstate = target_pstate; | |
f6ebbcf0 RW |
2764 | |
2765 | intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : | |
2766 | INTEL_PSTATE_TRACE_TARGET, old_pstate); | |
2767 | ||
2768 | return target_pstate; | |
2769 | } | |
2770 | ||
001c76f0 RW |
2771 | static int intel_cpufreq_target(struct cpufreq_policy *policy, |
2772 | unsigned int target_freq, | |
2773 | unsigned int relation) | |
2774 | { | |
2775 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2776 | struct cpufreq_freqs freqs; | |
f6ebbcf0 | 2777 | int target_pstate; |
001c76f0 | 2778 | |
64897b20 RW |
2779 | update_turbo_state(); |
2780 | ||
001c76f0 | 2781 | freqs.old = policy->cur; |
64897b20 | 2782 | freqs.new = target_freq; |
001c76f0 RW |
2783 | |
2784 | cpufreq_freq_transition_begin(policy, &freqs); | |
f6ebbcf0 | 2785 | |
001c76f0 RW |
2786 | switch (relation) { |
2787 | case CPUFREQ_RELATION_L: | |
2788 | target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); | |
2789 | break; | |
2790 | case CPUFREQ_RELATION_H: | |
2791 | target_pstate = freqs.new / cpu->pstate.scaling; | |
2792 | break; | |
2793 | default: | |
2794 | target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); | |
2795 | break; | |
2796 | } | |
f6ebbcf0 | 2797 | |
fcb3a1ab | 2798 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); |
f6ebbcf0 | 2799 | |
64078299 | 2800 | freqs.new = target_pstate * cpu->pstate.scaling; |
f6ebbcf0 | 2801 | |
001c76f0 RW |
2802 | cpufreq_freq_transition_end(policy, &freqs, false); |
2803 | ||
2804 | return 0; | |
2805 | } | |
2806 | ||
2807 | static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |
2808 | unsigned int target_freq) | |
2809 | { | |
2810 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
f6ebbcf0 | 2811 | int target_pstate; |
001c76f0 | 2812 | |
64897b20 RW |
2813 | update_turbo_state(); |
2814 | ||
001c76f0 | 2815 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
f6ebbcf0 | 2816 | |
fcb3a1ab | 2817 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); |
f6ebbcf0 | 2818 | |
64078299 | 2819 | return target_pstate * cpu->pstate.scaling; |
001c76f0 RW |
2820 | } |
2821 | ||
a365ab6b RW |
2822 | static void intel_cpufreq_adjust_perf(unsigned int cpunum, |
2823 | unsigned long min_perf, | |
2824 | unsigned long target_perf, | |
2825 | unsigned long capacity) | |
2826 | { | |
2827 | struct cpudata *cpu = all_cpu_data[cpunum]; | |
17ffd358 | 2828 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
a365ab6b RW |
2829 | int old_pstate = cpu->pstate.current_pstate; |
2830 | int cap_pstate, min_pstate, max_pstate, target_pstate; | |
2831 | ||
2832 | update_turbo_state(); | |
17ffd358 RW |
2833 | cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : |
2834 | HWP_HIGHEST_PERF(hwp_cap); | |
a365ab6b RW |
2835 | |
2836 | /* Optimization: Avoid unnecessary divisions. */ | |
2837 | ||
2838 | target_pstate = cap_pstate; | |
2839 | if (target_perf < capacity) | |
2840 | target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); | |
2841 | ||
2842 | min_pstate = cap_pstate; | |
2843 | if (min_perf < capacity) | |
2844 | min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); | |
2845 | ||
2846 | if (min_pstate < cpu->pstate.min_pstate) | |
2847 | min_pstate = cpu->pstate.min_pstate; | |
2848 | ||
2849 | if (min_pstate < cpu->min_perf_ratio) | |
2850 | min_pstate = cpu->min_perf_ratio; | |
2851 | ||
2852 | max_pstate = min(cap_pstate, cpu->max_perf_ratio); | |
2853 | if (max_pstate < min_pstate) | |
2854 | max_pstate = min_pstate; | |
2855 | ||
2856 | target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); | |
2857 | ||
597ffbc8 | 2858 | intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); |
a365ab6b RW |
2859 | |
2860 | cpu->pstate.current_pstate = target_pstate; | |
2861 | intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); | |
2862 | } | |
2863 | ||
001c76f0 RW |
2864 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
2865 | { | |
3000ce3c | 2866 | struct freq_qos_request *req; |
da5c504c VK |
2867 | struct cpudata *cpu; |
2868 | struct device *dev; | |
de5bcf40 | 2869 | int ret, freq; |
da5c504c VK |
2870 | |
2871 | dev = get_cpu_device(policy->cpu); | |
2872 | if (!dev) | |
2873 | return -ENODEV; | |
001c76f0 | 2874 | |
da5c504c | 2875 | ret = __intel_pstate_cpu_init(policy); |
001c76f0 RW |
2876 | if (ret) |
2877 | return ret; | |
2878 | ||
2879 | policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; | |
2880 | /* This reflects the intel_pstate_get_cpu_pstates() setting. */ | |
2881 | policy->cur = policy->cpuinfo.min_freq; | |
2882 | ||
da5c504c VK |
2883 | req = kcalloc(2, sizeof(*req), GFP_KERNEL); |
2884 | if (!req) { | |
2885 | ret = -ENOMEM; | |
2886 | goto pstate_exit; | |
2887 | } | |
2888 | ||
2889 | cpu = all_cpu_data[policy->cpu]; | |
2890 | ||
f6ebbcf0 RW |
2891 | if (hwp_active) { |
2892 | u64 value; | |
2893 | ||
f6ebbcf0 | 2894 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; |
de5bcf40 RW |
2895 | |
2896 | intel_pstate_get_hwp_cap(cpu); | |
2897 | ||
f6ebbcf0 RW |
2898 | rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); |
2899 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
de5bcf40 | 2900 | |
c27a0ccc | 2901 | cpu->epp_cached = intel_pstate_get_epp(cpu, value); |
f6ebbcf0 | 2902 | } else { |
f6ebbcf0 RW |
2903 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; |
2904 | } | |
da5c504c | 2905 | |
de5bcf40 | 2906 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); |
da5c504c | 2907 | |
3000ce3c | 2908 | ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, |
de5bcf40 | 2909 | freq); |
da5c504c VK |
2910 | if (ret < 0) { |
2911 | dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); | |
2912 | goto free_req; | |
2913 | } | |
2914 | ||
de5bcf40 RW |
2915 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); |
2916 | ||
3000ce3c | 2917 | ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, |
de5bcf40 | 2918 | freq); |
da5c504c VK |
2919 | if (ret < 0) { |
2920 | dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); | |
2921 | goto remove_min_req; | |
2922 | } | |
2923 | ||
2924 | policy->driver_data = req; | |
2925 | ||
001c76f0 | 2926 | return 0; |
da5c504c VK |
2927 | |
2928 | remove_min_req: | |
3000ce3c | 2929 | freq_qos_remove_request(req); |
da5c504c VK |
2930 | free_req: |
2931 | kfree(req); | |
2932 | pstate_exit: | |
2933 | intel_pstate_exit_perf_limits(policy); | |
2934 | ||
2935 | return ret; | |
2936 | } | |
2937 | ||
2938 | static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
2939 | { | |
3000ce3c | 2940 | struct freq_qos_request *req; |
da5c504c VK |
2941 | |
2942 | req = policy->driver_data; | |
2943 | ||
3000ce3c RW |
2944 | freq_qos_remove_request(req + 1); |
2945 | freq_qos_remove_request(req); | |
da5c504c VK |
2946 | kfree(req); |
2947 | ||
2948 | return intel_pstate_cpu_exit(policy); | |
001c76f0 RW |
2949 | } |
2950 | ||
2951 | static struct cpufreq_driver intel_cpufreq = { | |
2952 | .flags = CPUFREQ_CONST_LOOPS, | |
2953 | .verify = intel_cpufreq_verify_policy, | |
2954 | .target = intel_cpufreq_target, | |
2955 | .fast_switch = intel_cpufreq_fast_switch, | |
2956 | .init = intel_cpufreq_cpu_init, | |
da5c504c | 2957 | .exit = intel_cpufreq_cpu_exit, |
49d6feef | 2958 | .offline = intel_cpufreq_cpu_offline, |
4adcf2e5 RW |
2959 | .online = intel_pstate_cpu_online, |
2960 | .suspend = intel_pstate_suspend, | |
2961 | .resume = intel_pstate_resume, | |
5a25e3f7 | 2962 | .update_limits = intel_pstate_update_limits, |
001c76f0 RW |
2963 | .name = "intel_cpufreq", |
2964 | }; | |
2965 | ||
39a188b8 | 2966 | static struct cpufreq_driver *default_driver; |
001c76f0 | 2967 | |
fb1fe104 RW |
2968 | static void intel_pstate_driver_cleanup(void) |
2969 | { | |
2970 | unsigned int cpu; | |
2971 | ||
2972 | get_online_cpus(); | |
2973 | for_each_online_cpu(cpu) { | |
2974 | if (all_cpu_data[cpu]) { | |
2975 | if (intel_pstate_driver == &intel_pstate) | |
2976 | intel_pstate_clear_update_util_hook(cpu); | |
2977 | ||
2978 | kfree(all_cpu_data[cpu]); | |
2979 | all_cpu_data[cpu] = NULL; | |
2980 | } | |
2981 | } | |
2982 | put_online_cpus(); | |
f6ebbcf0 | 2983 | |
ee8df89a | 2984 | intel_pstate_driver = NULL; |
fb1fe104 RW |
2985 | } |
2986 | ||
ee8df89a | 2987 | static int intel_pstate_register_driver(struct cpufreq_driver *driver) |
fb1fe104 RW |
2988 | { |
2989 | int ret; | |
2990 | ||
f6ebbcf0 RW |
2991 | if (driver == &intel_pstate) |
2992 | intel_pstate_sysfs_expose_hwp_dynamic_boost(); | |
2993 | ||
c5a2ee7d RW |
2994 | memset(&global, 0, sizeof(global)); |
2995 | global.max_perf_pct = 100; | |
c3a49c89 | 2996 | |
ee8df89a | 2997 | intel_pstate_driver = driver; |
fb1fe104 RW |
2998 | ret = cpufreq_register_driver(intel_pstate_driver); |
2999 | if (ret) { | |
3000 | intel_pstate_driver_cleanup(); | |
3001 | return ret; | |
3002 | } | |
3003 | ||
c5a2ee7d RW |
3004 | global.min_perf_pct = min_perf_pct_min(); |
3005 | ||
fb1fe104 RW |
3006 | return 0; |
3007 | } | |
3008 | ||
fb1fe104 RW |
3009 | static ssize_t intel_pstate_show_status(char *buf) |
3010 | { | |
ee8df89a | 3011 | if (!intel_pstate_driver) |
fb1fe104 RW |
3012 | return sprintf(buf, "off\n"); |
3013 | ||
3014 | return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? | |
3015 | "active" : "passive"); | |
3016 | } | |
3017 | ||
3018 | static int intel_pstate_update_status(const char *buf, size_t size) | |
3019 | { | |
43298db3 RW |
3020 | if (size == 3 && !strncmp(buf, "off", size)) { |
3021 | if (!intel_pstate_driver) | |
3022 | return -EINVAL; | |
3023 | ||
3024 | if (hwp_active) | |
3025 | return -EBUSY; | |
3026 | ||
55671ea3 RW |
3027 | cpufreq_unregister_driver(intel_pstate_driver); |
3028 | intel_pstate_driver_cleanup(); | |
fc7d1755 | 3029 | return 0; |
43298db3 | 3030 | } |
fb1fe104 RW |
3031 | |
3032 | if (size == 6 && !strncmp(buf, "active", size)) { | |
ee8df89a | 3033 | if (intel_pstate_driver) { |
fb1fe104 RW |
3034 | if (intel_pstate_driver == &intel_pstate) |
3035 | return 0; | |
3036 | ||
55671ea3 | 3037 | cpufreq_unregister_driver(intel_pstate_driver); |
fb1fe104 RW |
3038 | } |
3039 | ||
ee8df89a | 3040 | return intel_pstate_register_driver(&intel_pstate); |
fb1fe104 RW |
3041 | } |
3042 | ||
3043 | if (size == 7 && !strncmp(buf, "passive", size)) { | |
ee8df89a | 3044 | if (intel_pstate_driver) { |
0042b2c0 | 3045 | if (intel_pstate_driver == &intel_cpufreq) |
fb1fe104 RW |
3046 | return 0; |
3047 | ||
55671ea3 RW |
3048 | cpufreq_unregister_driver(intel_pstate_driver); |
3049 | intel_pstate_sysfs_hide_hwp_dynamic_boost(); | |
fb1fe104 RW |
3050 | } |
3051 | ||
ee8df89a | 3052 | return intel_pstate_register_driver(&intel_cpufreq); |
fb1fe104 RW |
3053 | } |
3054 | ||
3055 | return -EINVAL; | |
3056 | } | |
3057 | ||
eed43609 JZ |
3058 | static int no_load __initdata; |
3059 | static int no_hwp __initdata; | |
3060 | static int hwp_only __initdata; | |
29327c84 | 3061 | static unsigned int force_load __initdata; |
6be26498 | 3062 | |
29327c84 | 3063 | static int __init intel_pstate_msrs_not_valid(void) |
b563b4e3 | 3064 | { |
016c8150 | 3065 | if (!pstate_funcs.get_max() || |
c410833a SK |
3066 | !pstate_funcs.get_min() || |
3067 | !pstate_funcs.get_turbo()) | |
b563b4e3 DB |
3068 | return -ENODEV; |
3069 | ||
b563b4e3 DB |
3070 | return 0; |
3071 | } | |
016c8150 | 3072 | |
29327c84 | 3073 | static void __init copy_cpu_funcs(struct pstate_funcs *funcs) |
016c8150 DB |
3074 | { |
3075 | pstate_funcs.get_max = funcs->get_max; | |
3bcc6fa9 | 3076 | pstate_funcs.get_max_physical = funcs->get_max_physical; |
016c8150 DB |
3077 | pstate_funcs.get_min = funcs->get_min; |
3078 | pstate_funcs.get_turbo = funcs->get_turbo; | |
b27580b0 | 3079 | pstate_funcs.get_scaling = funcs->get_scaling; |
fdfdb2b1 | 3080 | pstate_funcs.get_val = funcs->get_val; |
007bea09 | 3081 | pstate_funcs.get_vid = funcs->get_vid; |
6e34e1f2 | 3082 | pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; |
016c8150 DB |
3083 | } |
3084 | ||
9522a2ff | 3085 | #ifdef CONFIG_ACPI |
fbbcdc07 | 3086 | |
29327c84 | 3087 | static bool __init intel_pstate_no_acpi_pss(void) |
fbbcdc07 AH |
3088 | { |
3089 | int i; | |
3090 | ||
3091 | for_each_possible_cpu(i) { | |
3092 | acpi_status status; | |
3093 | union acpi_object *pss; | |
3094 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | |
3095 | struct acpi_processor *pr = per_cpu(processors, i); | |
3096 | ||
3097 | if (!pr) | |
3098 | continue; | |
3099 | ||
3100 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); | |
3101 | if (ACPI_FAILURE(status)) | |
3102 | continue; | |
3103 | ||
3104 | pss = buffer.pointer; | |
3105 | if (pss && pss->type == ACPI_TYPE_PACKAGE) { | |
3106 | kfree(pss); | |
3107 | return false; | |
3108 | } | |
3109 | ||
3110 | kfree(pss); | |
3111 | } | |
3112 | ||
076b862c | 3113 | pr_debug("ACPI _PSS not found\n"); |
fbbcdc07 AH |
3114 | return true; |
3115 | } | |
3116 | ||
95d6c085 RW |
3117 | static bool __init intel_pstate_no_acpi_pcch(void) |
3118 | { | |
3119 | acpi_status status; | |
3120 | acpi_handle handle; | |
3121 | ||
3122 | status = acpi_get_handle(NULL, "\\_SB", &handle); | |
3123 | if (ACPI_FAILURE(status)) | |
076b862c EV |
3124 | goto not_found; |
3125 | ||
3126 | if (acpi_has_method(handle, "PCCH")) | |
3127 | return false; | |
95d6c085 | 3128 | |
076b862c EV |
3129 | not_found: |
3130 | pr_debug("ACPI PCCH not found\n"); | |
3131 | return true; | |
95d6c085 RW |
3132 | } |
3133 | ||
29327c84 | 3134 | static bool __init intel_pstate_has_acpi_ppc(void) |
966916ea | 3135 | { |
3136 | int i; | |
3137 | ||
3138 | for_each_possible_cpu(i) { | |
3139 | struct acpi_processor *pr = per_cpu(processors, i); | |
3140 | ||
3141 | if (!pr) | |
3142 | continue; | |
3143 | if (acpi_has_method(pr->handle, "_PPC")) | |
3144 | return true; | |
3145 | } | |
076b862c | 3146 | pr_debug("ACPI _PPC not found\n"); |
966916ea | 3147 | return false; |
3148 | } | |
3149 | ||
3150 | enum { | |
3151 | PSS, | |
3152 | PPC, | |
3153 | }; | |
3154 | ||
fbbcdc07 | 3155 | /* Hardware vendor-specific info that has its own power management modes */ |
5e932321 | 3156 | static struct acpi_platform_list plat_info[] __initdata = { |
8d2eecea JS |
3157 | {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, |
3158 | {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3159 | {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3160 | {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3161 | {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3162 | {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3163 | {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3164 | {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3165 | {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3166 | {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3167 | {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3168 | {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3169 | {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3170 | {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
3171 | {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
5e932321 | 3172 | { } /* End */ |
fbbcdc07 AH |
3173 | }; |
3174 | ||
589bab6b SP |
3175 | #define BITMASK_OOB (BIT(8) | BIT(18)) |
3176 | ||
29327c84 | 3177 | static bool __init intel_pstate_platform_pwr_mgmt_exists(void) |
fbbcdc07 | 3178 | { |
2f86dc4c DB |
3179 | const struct x86_cpu_id *id; |
3180 | u64 misc_pwr; | |
5e932321 | 3181 | int idx; |
2f86dc4c DB |
3182 | |
3183 | id = x86_match_cpu(intel_pstate_cpu_oob_ids); | |
3184 | if (id) { | |
3185 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); | |
589bab6b SP |
3186 | if (misc_pwr & BITMASK_OOB) { |
3187 | pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); | |
3188 | pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); | |
2f86dc4c | 3189 | return true; |
076b862c | 3190 | } |
2f86dc4c | 3191 | } |
fbbcdc07 | 3192 | |
5e932321 TK |
3193 | idx = acpi_match_platform_list(plat_info); |
3194 | if (idx < 0) | |
fbbcdc07 AH |
3195 | return false; |
3196 | ||
5e932321 TK |
3197 | switch (plat_info[idx].data) { |
3198 | case PSS: | |
95d6c085 RW |
3199 | if (!intel_pstate_no_acpi_pss()) |
3200 | return false; | |
3201 | ||
3202 | return intel_pstate_no_acpi_pcch(); | |
5e932321 TK |
3203 | case PPC: |
3204 | return intel_pstate_has_acpi_ppc() && !force_load; | |
fbbcdc07 AH |
3205 | } |
3206 | ||
3207 | return false; | |
3208 | } | |
d0ea59e1 RW |
3209 | |
3210 | static void intel_pstate_request_control_from_smm(void) | |
3211 | { | |
3212 | /* | |
3213 | * It may be unsafe to request P-states control from SMM if _PPC support | |
3214 | * has not been enabled. | |
3215 | */ | |
3216 | if (acpi_ppc) | |
3217 | acpi_processor_pstate_control(); | |
3218 | } | |
fbbcdc07 AH |
3219 | #else /* CONFIG_ACPI not enabled */ |
3220 | static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } | |
966916ea | 3221 | static inline bool intel_pstate_has_acpi_ppc(void) { return false; } |
d0ea59e1 | 3222 | static inline void intel_pstate_request_control_from_smm(void) {} |
fbbcdc07 AH |
3223 | #endif /* CONFIG_ACPI */ |
3224 | ||
ff7c9917 SP |
3225 | #define INTEL_PSTATE_HWP_BROADWELL 0x01 |
3226 | ||
b11d77fa TG |
3227 | #define X86_MATCH_HWP(model, hwp_mode) \ |
3228 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ | |
d9782807 | 3229 | X86_FEATURE_HWP, hwp_mode) |
ff7c9917 | 3230 | |
7791e4aa | 3231 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { |
b11d77fa TG |
3232 | X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
3233 | X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), | |
3234 | X86_MATCH_HWP(ANY, 0), | |
7791e4aa SP |
3235 | {} |
3236 | }; | |
3237 | ||
e5af36b2 RW |
3238 | static bool intel_pstate_hwp_is_enabled(void) |
3239 | { | |
3240 | u64 value; | |
3241 | ||
3242 | rdmsrl(MSR_PM_ENABLE, value); | |
3243 | return !!(value & 0x1); | |
3244 | } | |
3245 | ||
93f0822d DB |
3246 | static int __init intel_pstate_init(void) |
3247 | { | |
ff7c9917 | 3248 | const struct x86_cpu_id *id; |
eb5139d1 | 3249 | int rc; |
93f0822d | 3250 | |
4ab52646 BP |
3251 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
3252 | return -ENODEV; | |
3253 | ||
6be26498 DB |
3254 | if (no_load) |
3255 | return -ENODEV; | |
3256 | ||
ff7c9917 SP |
3257 | id = x86_match_cpu(hwp_support_ids); |
3258 | if (id) { | |
2f49afc2 | 3259 | copy_cpu_funcs(&core_funcs); |
7aa10312 RW |
3260 | /* |
3261 | * Avoid enabling HWP for processors without EPP support, | |
3262 | * because that means incomplete HWP implementation which is a | |
3263 | * corner case and supporting it is generally problematic. | |
e5af36b2 RW |
3264 | * |
3265 | * If HWP is enabled already, though, there is no choice but to | |
3266 | * deal with it. | |
7aa10312 | 3267 | */ |
e5af36b2 RW |
3268 | if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || |
3269 | intel_pstate_hwp_is_enabled()) { | |
eb5139d1 | 3270 | hwp_active++; |
ff7c9917 | 3271 | hwp_mode_bdw = id->driver_data; |
eb5139d1 | 3272 | intel_pstate.attr = hwp_cpufreq_attrs; |
f6ebbcf0 | 3273 | intel_cpufreq.attr = hwp_cpufreq_attrs; |
e0be38ed | 3274 | intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; |
a365ab6b | 3275 | intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; |
f6ebbcf0 RW |
3276 | if (!default_driver) |
3277 | default_driver = &intel_pstate; | |
3278 | ||
eb5139d1 RW |
3279 | goto hwp_cpu_matched; |
3280 | } | |
3281 | } else { | |
eb5139d1 | 3282 | id = x86_match_cpu(intel_pstate_cpu_ids); |
076b862c | 3283 | if (!id) { |
4ab52646 | 3284 | pr_info("CPU model not supported\n"); |
eb5139d1 | 3285 | return -ENODEV; |
076b862c | 3286 | } |
93f0822d | 3287 | |
2f49afc2 | 3288 | copy_cpu_funcs((struct pstate_funcs *)id->driver_data); |
eb5139d1 | 3289 | } |
016c8150 | 3290 | |
076b862c EV |
3291 | if (intel_pstate_msrs_not_valid()) { |
3292 | pr_info("Invalid MSRs\n"); | |
b563b4e3 | 3293 | return -ENODEV; |
076b862c | 3294 | } |
33aa46f2 | 3295 | /* Without HWP start in the passive mode. */ |
39a188b8 RW |
3296 | if (!default_driver) |
3297 | default_driver = &intel_cpufreq; | |
b563b4e3 | 3298 | |
7791e4aa SP |
3299 | hwp_cpu_matched: |
3300 | /* | |
3301 | * The Intel pstate driver will be ignored if the platform | |
3302 | * firmware has its own power management modes. | |
3303 | */ | |
076b862c EV |
3304 | if (intel_pstate_platform_pwr_mgmt_exists()) { |
3305 | pr_info("P-states controlled by the platform\n"); | |
7791e4aa | 3306 | return -ENODEV; |
076b862c | 3307 | } |
7791e4aa | 3308 | |
fb1fe104 RW |
3309 | if (!hwp_active && hwp_only) |
3310 | return -ENOTSUPP; | |
3311 | ||
4836df17 | 3312 | pr_info("Intel P-state driver initializing\n"); |
93f0822d | 3313 | |
fad953ce | 3314 | all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); |
93f0822d DB |
3315 | if (!all_cpu_data) |
3316 | return -ENOMEM; | |
93f0822d | 3317 | |
d0ea59e1 RW |
3318 | intel_pstate_request_control_from_smm(); |
3319 | ||
93f0822d | 3320 | intel_pstate_sysfs_expose_params(); |
b69880f9 | 3321 | |
0c30b65b | 3322 | mutex_lock(&intel_pstate_driver_lock); |
ee8df89a | 3323 | rc = intel_pstate_register_driver(default_driver); |
0c30b65b | 3324 | mutex_unlock(&intel_pstate_driver_lock); |
cdc1719c CY |
3325 | if (rc) { |
3326 | intel_pstate_sysfs_remove(); | |
fb1fe104 | 3327 | return rc; |
cdc1719c | 3328 | } |
366430b5 | 3329 | |
ed7bde7a SP |
3330 | if (hwp_active) { |
3331 | const struct x86_cpu_id *id; | |
3332 | ||
3333 | id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); | |
3334 | if (id) { | |
3335 | set_power_ctl_ee_state(false); | |
3336 | pr_info("Disabling energy efficiency optimization\n"); | |
3337 | } | |
3338 | ||
4836df17 | 3339 | pr_info("HWP enabled\n"); |
eb3693f0 RW |
3340 | } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
3341 | pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); | |
ed7bde7a | 3342 | } |
7791e4aa | 3343 | |
fb1fe104 | 3344 | return 0; |
93f0822d DB |
3345 | } |
3346 | device_initcall(intel_pstate_init); | |
3347 | ||
6be26498 DB |
3348 | static int __init intel_pstate_setup(char *str) |
3349 | { | |
3350 | if (!str) | |
3351 | return -EINVAL; | |
3352 | ||
f6ebbcf0 | 3353 | if (!strcmp(str, "disable")) |
6be26498 | 3354 | no_load = 1; |
f6ebbcf0 | 3355 | else if (!strcmp(str, "active")) |
39a188b8 | 3356 | default_driver = &intel_pstate; |
f6ebbcf0 | 3357 | else if (!strcmp(str, "passive")) |
ee8df89a | 3358 | default_driver = &intel_cpufreq; |
f6ebbcf0 | 3359 | |
539342f6 | 3360 | if (!strcmp(str, "no_hwp")) { |
4836df17 | 3361 | pr_info("HWP disabled\n"); |
2f86dc4c | 3362 | no_hwp = 1; |
539342f6 | 3363 | } |
aa4ea34d EZ |
3364 | if (!strcmp(str, "force")) |
3365 | force_load = 1; | |
d64c3b0b KCA |
3366 | if (!strcmp(str, "hwp_only")) |
3367 | hwp_only = 1; | |
eae48f04 SP |
3368 | if (!strcmp(str, "per_cpu_perf_limits")) |
3369 | per_cpu_limits = true; | |
9522a2ff SP |
3370 | |
3371 | #ifdef CONFIG_ACPI | |
3372 | if (!strcmp(str, "support_acpi_ppc")) | |
3373 | acpi_ppc = true; | |
3374 | #endif | |
3375 | ||
6be26498 DB |
3376 | return 0; |
3377 | } | |
3378 | early_param("intel_pstate", intel_pstate_setup); | |
3379 | ||
93f0822d DB |
3380 | MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); |
3381 | MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); | |
3382 | MODULE_LICENSE("GPL"); |