]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
93f0822d | 2 | /* |
d1b68485 | 3 | * intel_pstate.c: Native P state management for Intel processors |
93f0822d DB |
4 | * |
5 | * (C) Copyright 2012 Intel Corporation | |
6 | * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> | |
93f0822d DB |
7 | */ |
8 | ||
4836df17 JP |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
93f0822d DB |
11 | #include <linux/kernel.h> |
12 | #include <linux/kernel_stat.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/ktime.h> | |
15 | #include <linux/hrtimer.h> | |
16 | #include <linux/tick.h> | |
17 | #include <linux/slab.h> | |
55687da1 | 18 | #include <linux/sched/cpufreq.h> |
93f0822d DB |
19 | #include <linux/list.h> |
20 | #include <linux/cpu.h> | |
21 | #include <linux/cpufreq.h> | |
22 | #include <linux/sysfs.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/fs.h> | |
fbbcdc07 | 25 | #include <linux/acpi.h> |
d6472302 | 26 | #include <linux/vmalloc.h> |
da5c504c | 27 | #include <linux/pm_qos.h> |
93f0822d DB |
28 | #include <trace/events/power.h> |
29 | ||
30 | #include <asm/div64.h> | |
31 | #include <asm/msr.h> | |
32 | #include <asm/cpu_device_id.h> | |
64df1fdf | 33 | #include <asm/cpufeature.h> |
5b20c944 | 34 | #include <asm/intel-family.h> |
93f0822d | 35 | |
d77d4888 | 36 | #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) |
eabd22c6 | 37 | |
001c76f0 | 38 | #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 |
f6ebbcf0 | 39 | #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 |
1b72e7fd | 40 | #define INTEL_CPUFREQ_TRANSITION_DELAY 500 |
001c76f0 | 41 | |
9522a2ff SP |
42 | #ifdef CONFIG_ACPI |
43 | #include <acpi/processor.h> | |
17669006 | 44 | #include <acpi/cppc_acpi.h> |
9522a2ff SP |
45 | #endif |
46 | ||
f0fe3cd7 | 47 | #define FRAC_BITS 8 |
93f0822d DB |
48 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
49 | #define fp_toint(X) ((X) >> FRAC_BITS) | |
f0fe3cd7 | 50 | |
b8bd1581 RW |
51 | #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) |
52 | ||
a1c9787d RW |
53 | #define EXT_BITS 6 |
54 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) | |
d5dd33d9 SP |
55 | #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) |
56 | #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) | |
a1c9787d | 57 | |
93f0822d DB |
58 | static inline int32_t mul_fp(int32_t x, int32_t y) |
59 | { | |
60 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; | |
61 | } | |
62 | ||
7180dddf | 63 | static inline int32_t div_fp(s64 x, s64 y) |
93f0822d | 64 | { |
7180dddf | 65 | return div64_s64((int64_t)x << FRAC_BITS, y); |
93f0822d DB |
66 | } |
67 | ||
d022a65e DB |
68 | static inline int ceiling_fp(int32_t x) |
69 | { | |
70 | int mask, ret; | |
71 | ||
72 | ret = fp_toint(x); | |
73 | mask = (1 << FRAC_BITS) - 1; | |
74 | if (x & mask) | |
75 | ret += 1; | |
76 | return ret; | |
77 | } | |
78 | ||
a1c9787d RW |
79 | static inline u64 mul_ext_fp(u64 x, u64 y) |
80 | { | |
81 | return (x * y) >> EXT_FRAC_BITS; | |
82 | } | |
83 | ||
84 | static inline u64 div_ext_fp(u64 x, u64 y) | |
85 | { | |
86 | return div64_u64(x << EXT_FRAC_BITS, y); | |
87 | } | |
88 | ||
13ad7701 SP |
89 | /** |
90 | * struct sample - Store performance sample | |
a1c9787d | 91 | * @core_avg_perf: Ratio of APERF/MPERF which is the actual average |
13ad7701 SP |
92 | * performance during last sample period |
93 | * @busy_scaled: Scaled busy value which is used to calculate next | |
a1c9787d | 94 | * P state. This can be different than core_avg_perf |
13ad7701 SP |
95 | * to account for cpu idle period |
96 | * @aperf: Difference of actual performance frequency clock count | |
97 | * read from APERF MSR between last and current sample | |
98 | * @mperf: Difference of maximum performance frequency clock count | |
99 | * read from MPERF MSR between last and current sample | |
100 | * @tsc: Difference of time stamp counter between last and | |
101 | * current sample | |
13ad7701 SP |
102 | * @time: Current time from scheduler |
103 | * | |
104 | * This structure is used in the cpudata structure to store performance sample | |
105 | * data for choosing next P State. | |
106 | */ | |
93f0822d | 107 | struct sample { |
a1c9787d | 108 | int32_t core_avg_perf; |
157386b6 | 109 | int32_t busy_scaled; |
93f0822d DB |
110 | u64 aperf; |
111 | u64 mperf; | |
4055fad3 | 112 | u64 tsc; |
a4675fbc | 113 | u64 time; |
93f0822d DB |
114 | }; |
115 | ||
13ad7701 SP |
116 | /** |
117 | * struct pstate_data - Store P state data | |
118 | * @current_pstate: Current requested P state | |
119 | * @min_pstate: Min P state possible for this platform | |
120 | * @max_pstate: Max P state possible for this platform | |
121 | * @max_pstate_physical:This is physical Max P state for a processor | |
122 | * This can be higher than the max_pstate which can | |
123 | * be limited by platform thermal design power limits | |
124 | * @scaling: Scaling factor to convert frequency to cpufreq | |
125 | * frequency units | |
126 | * @turbo_pstate: Max Turbo P state possible for this platform | |
001c76f0 RW |
127 | * @max_freq: @max_pstate frequency in cpufreq units |
128 | * @turbo_freq: @turbo_pstate frequency in cpufreq units | |
13ad7701 SP |
129 | * |
130 | * Stores the per cpu model P state limits and current P state. | |
131 | */ | |
93f0822d DB |
132 | struct pstate_data { |
133 | int current_pstate; | |
134 | int min_pstate; | |
135 | int max_pstate; | |
3bcc6fa9 | 136 | int max_pstate_physical; |
b27580b0 | 137 | int scaling; |
93f0822d | 138 | int turbo_pstate; |
001c76f0 RW |
139 | unsigned int max_freq; |
140 | unsigned int turbo_freq; | |
93f0822d DB |
141 | }; |
142 | ||
13ad7701 SP |
143 | /** |
144 | * struct vid_data - Stores voltage information data | |
145 | * @min: VID data for this platform corresponding to | |
146 | * the lowest P state | |
147 | * @max: VID data corresponding to the highest P State. | |
148 | * @turbo: VID data for turbo P state | |
149 | * @ratio: Ratio of (vid max - vid min) / | |
150 | * (max P state - Min P State) | |
151 | * | |
152 | * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) | |
153 | * This data is used in Atom platforms, where in addition to target P state, | |
154 | * the voltage data needs to be specified to select next P State. | |
155 | */ | |
007bea09 | 156 | struct vid_data { |
21855ff5 DB |
157 | int min; |
158 | int max; | |
159 | int turbo; | |
007bea09 DB |
160 | int32_t ratio; |
161 | }; | |
162 | ||
c5a2ee7d RW |
163 | /** |
164 | * struct global_params - Global parameters, mostly tunable via sysfs. | |
165 | * @no_turbo: Whether or not to use turbo P-states. | |
731e6b97 | 166 | * @turbo_disabled: Whether or not turbo P-states are available at all, |
c5a2ee7d RW |
167 | * based on the MSR_IA32_MISC_ENABLE value and whether or |
168 | * not the maximum reported turbo P-state is different from | |
169 | * the maximum reported non-turbo one. | |
9083e498 | 170 | * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. |
c5a2ee7d RW |
171 | * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo |
172 | * P-state capacity. | |
173 | * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo | |
174 | * P-state capacity. | |
175 | */ | |
176 | struct global_params { | |
177 | bool no_turbo; | |
178 | bool turbo_disabled; | |
9083e498 | 179 | bool turbo_disabled_mf; |
c5a2ee7d RW |
180 | int max_perf_pct; |
181 | int min_perf_pct; | |
eae48f04 SP |
182 | }; |
183 | ||
13ad7701 SP |
184 | /** |
185 | * struct cpudata - Per CPU instance data storage | |
186 | * @cpu: CPU number for this instance data | |
2f1d407a | 187 | * @policy: CPUFreq policy value |
13ad7701 | 188 | * @update_util: CPUFreq utility callback information |
4578ee7e | 189 | * @update_util_set: CPUFreq utility callback is set |
09c448d3 RW |
190 | * @iowait_boost: iowait-related boost fraction |
191 | * @last_update: Time of the last update. | |
13ad7701 SP |
192 | * @pstate: Stores P state limits for this CPU |
193 | * @vid: Stores VID limits for this CPU | |
13ad7701 | 194 | * @last_sample_time: Last Sample time |
23a522e3 | 195 | * @aperf_mperf_shift: APERF vs MPERF counting frequency difference |
13ad7701 SP |
196 | * @prev_aperf: Last APERF value read from APERF MSR |
197 | * @prev_mperf: Last MPERF value read from MPERF MSR | |
198 | * @prev_tsc: Last timestamp counter (TSC) value | |
199 | * @prev_cummulative_iowait: IO Wait time difference from last and | |
200 | * current sample | |
201 | * @sample: Storage for storing last Sample data | |
1a4fe38a SP |
202 | * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios |
203 | * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios | |
9522a2ff SP |
204 | * @acpi_perf_data: Stores ACPI perf information read from _PSS |
205 | * @valid_pss_table: Set to true for valid ACPI _PSS entries found | |
984edbdc SP |
206 | * @epp_powersave: Last saved HWP energy performance preference |
207 | * (EPP) or energy performance bias (EPB), | |
208 | * when policy switched to performance | |
8442885f | 209 | * @epp_policy: Last saved policy used to set EPP/EPB |
984edbdc SP |
210 | * @epp_default: Power on default HWP energy performance |
211 | * preference/bias | |
f6ebbcf0 | 212 | * @epp_cached Cached HWP energy-performance preference value |
e0efd5be SP |
213 | * @hwp_req_cached: Cached value of the last HWP Request MSR |
214 | * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR | |
52ccc431 SP |
215 | * @last_io_update: Last time when IO wake flag was set |
216 | * @sched_flags: Store scheduler flags for possible cross CPU update | |
e0efd5be | 217 | * @hwp_boost_min: Last HWP boosted min performance |
4adcf2e5 | 218 | * @suspended: Whether or not the driver has been suspended. |
13ad7701 SP |
219 | * |
220 | * This structure stores per CPU instance data for all CPUs. | |
221 | */ | |
93f0822d DB |
222 | struct cpudata { |
223 | int cpu; | |
224 | ||
2f1d407a | 225 | unsigned int policy; |
a4675fbc | 226 | struct update_util_data update_util; |
4578ee7e | 227 | bool update_util_set; |
93f0822d | 228 | |
93f0822d | 229 | struct pstate_data pstate; |
007bea09 | 230 | struct vid_data vid; |
93f0822d | 231 | |
09c448d3 | 232 | u64 last_update; |
a4675fbc | 233 | u64 last_sample_time; |
6e34e1f2 | 234 | u64 aperf_mperf_shift; |
93f0822d DB |
235 | u64 prev_aperf; |
236 | u64 prev_mperf; | |
4055fad3 | 237 | u64 prev_tsc; |
63d1d656 | 238 | u64 prev_cummulative_iowait; |
d37e2b76 | 239 | struct sample sample; |
1a4fe38a SP |
240 | int32_t min_perf_ratio; |
241 | int32_t max_perf_ratio; | |
9522a2ff SP |
242 | #ifdef CONFIG_ACPI |
243 | struct acpi_processor_performance acpi_perf_data; | |
244 | bool valid_pss_table; | |
245 | #endif | |
09c448d3 | 246 | unsigned int iowait_boost; |
984edbdc | 247 | s16 epp_powersave; |
8442885f | 248 | s16 epp_policy; |
984edbdc | 249 | s16 epp_default; |
f6ebbcf0 | 250 | s16 epp_cached; |
e0efd5be SP |
251 | u64 hwp_req_cached; |
252 | u64 hwp_cap_cached; | |
52ccc431 SP |
253 | u64 last_io_update; |
254 | unsigned int sched_flags; | |
e0efd5be | 255 | u32 hwp_boost_min; |
4adcf2e5 | 256 | bool suspended; |
93f0822d DB |
257 | }; |
258 | ||
259 | static struct cpudata **all_cpu_data; | |
13ad7701 | 260 | |
13ad7701 SP |
261 | /** |
262 | * struct pstate_funcs - Per CPU model specific callbacks | |
263 | * @get_max: Callback to get maximum non turbo effective P state | |
264 | * @get_max_physical: Callback to get maximum non turbo physical P state | |
265 | * @get_min: Callback to get minimum P state | |
266 | * @get_turbo: Callback to get turbo P state | |
267 | * @get_scaling: Callback to get frequency scaling factor | |
8f23d1f1 | 268 | * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference |
13ad7701 SP |
269 | * @get_val: Callback to convert P state to actual MSR write value |
270 | * @get_vid: Callback to get VID data for Atom platforms | |
13ad7701 SP |
271 | * |
272 | * Core and Atom CPU models have different way to get P State limits. This | |
273 | * structure is used to store those callbacks. | |
274 | */ | |
016c8150 DB |
275 | struct pstate_funcs { |
276 | int (*get_max)(void); | |
3bcc6fa9 | 277 | int (*get_max_physical)(void); |
016c8150 DB |
278 | int (*get_min)(void); |
279 | int (*get_turbo)(void); | |
b27580b0 | 280 | int (*get_scaling)(void); |
6e34e1f2 | 281 | int (*get_aperf_mperf_shift)(void); |
fdfdb2b1 | 282 | u64 (*get_val)(struct cpudata*, int pstate); |
007bea09 | 283 | void (*get_vid)(struct cpudata *); |
93f0822d DB |
284 | }; |
285 | ||
4a7cb7a9 | 286 | static struct pstate_funcs pstate_funcs __read_mostly; |
5c439053 | 287 | |
4a7cb7a9 | 288 | static int hwp_active __read_mostly; |
ff7c9917 | 289 | static int hwp_mode_bdw __read_mostly; |
eae48f04 | 290 | static bool per_cpu_limits __read_mostly; |
e0efd5be | 291 | static bool hwp_boost __read_mostly; |
016c8150 | 292 | |
ee8df89a | 293 | static struct cpufreq_driver *intel_pstate_driver __read_mostly; |
0c30b65b | 294 | |
9522a2ff SP |
295 | #ifdef CONFIG_ACPI |
296 | static bool acpi_ppc; | |
297 | #endif | |
13ad7701 | 298 | |
c5a2ee7d | 299 | static struct global_params global; |
93f0822d | 300 | |
0c30b65b | 301 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
a410c03d SP |
302 | static DEFINE_MUTEX(intel_pstate_limits_lock); |
303 | ||
9522a2ff | 304 | #ifdef CONFIG_ACPI |
2b3ec765 | 305 | |
01e61a42 | 306 | static bool intel_pstate_acpi_pm_profile_server(void) |
2b3ec765 SP |
307 | { |
308 | if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || | |
309 | acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) | |
310 | return true; | |
311 | ||
01e61a42 SP |
312 | return false; |
313 | } | |
314 | ||
315 | static bool intel_pstate_get_ppc_enable_status(void) | |
316 | { | |
317 | if (intel_pstate_acpi_pm_profile_server()) | |
318 | return true; | |
319 | ||
2b3ec765 SP |
320 | return acpi_ppc; |
321 | } | |
322 | ||
17669006 RW |
323 | #ifdef CONFIG_ACPI_CPPC_LIB |
324 | ||
325 | /* The work item is needed to avoid CPU hotplug locking issues */ | |
326 | static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) | |
327 | { | |
328 | sched_set_itmt_support(); | |
329 | } | |
330 | ||
331 | static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); | |
332 | ||
333 | static void intel_pstate_set_itmt_prio(int cpu) | |
334 | { | |
335 | struct cppc_perf_caps cppc_perf; | |
336 | static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; | |
337 | int ret; | |
338 | ||
339 | ret = cppc_get_perf_caps(cpu, &cppc_perf); | |
340 | if (ret) | |
341 | return; | |
342 | ||
343 | /* | |
344 | * The priorities can be set regardless of whether or not | |
345 | * sched_set_itmt_support(true) has been called and it is valid to | |
346 | * update them at any time after it has been called. | |
347 | */ | |
348 | sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); | |
349 | ||
350 | if (max_highest_perf <= min_highest_perf) { | |
351 | if (cppc_perf.highest_perf > max_highest_perf) | |
352 | max_highest_perf = cppc_perf.highest_perf; | |
353 | ||
354 | if (cppc_perf.highest_perf < min_highest_perf) | |
355 | min_highest_perf = cppc_perf.highest_perf; | |
356 | ||
357 | if (max_highest_perf > min_highest_perf) { | |
358 | /* | |
359 | * This code can be run during CPU online under the | |
360 | * CPU hotplug locks, so sched_set_itmt_support() | |
361 | * cannot be called from here. Queue up a work item | |
362 | * to invoke it. | |
363 | */ | |
364 | schedule_work(&sched_itmt_work); | |
365 | } | |
366 | } | |
367 | } | |
86d333a8 SP |
368 | |
369 | static int intel_pstate_get_cppc_guranteed(int cpu) | |
370 | { | |
371 | struct cppc_perf_caps cppc_perf; | |
372 | int ret; | |
373 | ||
374 | ret = cppc_get_perf_caps(cpu, &cppc_perf); | |
375 | if (ret) | |
376 | return ret; | |
377 | ||
92a3e426 SP |
378 | if (cppc_perf.guaranteed_perf) |
379 | return cppc_perf.guaranteed_perf; | |
380 | ||
381 | return cppc_perf.nominal_perf; | |
86d333a8 SP |
382 | } |
383 | ||
5906056e | 384 | #else /* CONFIG_ACPI_CPPC_LIB */ |
17669006 RW |
385 | static void intel_pstate_set_itmt_prio(int cpu) |
386 | { | |
387 | } | |
5906056e | 388 | #endif /* CONFIG_ACPI_CPPC_LIB */ |
17669006 | 389 | |
9522a2ff SP |
390 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
391 | { | |
392 | struct cpudata *cpu; | |
9522a2ff SP |
393 | int ret; |
394 | int i; | |
395 | ||
17669006 RW |
396 | if (hwp_active) { |
397 | intel_pstate_set_itmt_prio(policy->cpu); | |
e59a8f7f | 398 | return; |
17669006 | 399 | } |
e59a8f7f | 400 | |
2b3ec765 | 401 | if (!intel_pstate_get_ppc_enable_status()) |
9522a2ff SP |
402 | return; |
403 | ||
404 | cpu = all_cpu_data[policy->cpu]; | |
405 | ||
406 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | |
407 | policy->cpu); | |
408 | if (ret) | |
409 | return; | |
410 | ||
411 | /* | |
412 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | |
413 | * guarantee that the states returned by it map to the states in our | |
414 | * list directly. | |
415 | */ | |
416 | if (cpu->acpi_perf_data.control_register.space_id != | |
417 | ACPI_ADR_SPACE_FIXED_HARDWARE) | |
418 | goto err; | |
419 | ||
420 | /* | |
421 | * If there is only one entry _PSS, simply ignore _PSS and continue as | |
422 | * usual without taking _PSS into account | |
423 | */ | |
424 | if (cpu->acpi_perf_data.state_count < 2) | |
425 | goto err; | |
426 | ||
427 | pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); | |
428 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | |
429 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | |
430 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | |
431 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | |
432 | (u32) cpu->acpi_perf_data.states[i].power, | |
433 | (u32) cpu->acpi_perf_data.states[i].control); | |
434 | } | |
435 | ||
436 | /* | |
437 | * The _PSS table doesn't contain whole turbo frequency range. | |
438 | * This just contains +1 MHZ above the max non turbo frequency, | |
439 | * with control value corresponding to max turbo ratio. But | |
440 | * when cpufreq set policy is called, it will call with this | |
441 | * max frequency, which will cause a reduced performance as | |
442 | * this driver uses real max turbo frequency as the max | |
443 | * frequency. So correct this frequency in _PSS table to | |
b00345d1 | 444 | * correct max turbo frequency based on the turbo state. |
9522a2ff SP |
445 | * Also need to convert to MHz as _PSS freq is in MHz. |
446 | */ | |
7de32556 | 447 | if (!global.turbo_disabled) |
9522a2ff SP |
448 | cpu->acpi_perf_data.states[0].core_frequency = |
449 | policy->cpuinfo.max_freq / 1000; | |
450 | cpu->valid_pss_table = true; | |
6cacd115 | 451 | pr_debug("_PPC limits will be enforced\n"); |
9522a2ff SP |
452 | |
453 | return; | |
454 | ||
455 | err: | |
456 | cpu->valid_pss_table = false; | |
457 | acpi_processor_unregister_performance(policy->cpu); | |
458 | } | |
459 | ||
460 | static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | |
461 | { | |
462 | struct cpudata *cpu; | |
463 | ||
464 | cpu = all_cpu_data[policy->cpu]; | |
465 | if (!cpu->valid_pss_table) | |
466 | return; | |
467 | ||
468 | acpi_processor_unregister_performance(policy->cpu); | |
469 | } | |
5906056e | 470 | #else /* CONFIG_ACPI */ |
7a3ba767 | 471 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
9522a2ff SP |
472 | { |
473 | } | |
474 | ||
7a3ba767 | 475 | static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) |
9522a2ff SP |
476 | { |
477 | } | |
01e61a42 SP |
478 | |
479 | static inline bool intel_pstate_acpi_pm_profile_server(void) | |
480 | { | |
481 | return false; | |
482 | } | |
5906056e DB |
483 | #endif /* CONFIG_ACPI */ |
484 | ||
485 | #ifndef CONFIG_ACPI_CPPC_LIB | |
486 | static int intel_pstate_get_cppc_guranteed(int cpu) | |
487 | { | |
488 | return -ENOTSUPP; | |
489 | } | |
490 | #endif /* CONFIG_ACPI_CPPC_LIB */ | |
9522a2ff | 491 | |
4521e1a0 GM |
492 | static inline void update_turbo_state(void) |
493 | { | |
494 | u64 misc_en; | |
495 | struct cpudata *cpu; | |
496 | ||
497 | cpu = all_cpu_data[0]; | |
498 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); | |
7de32556 | 499 | global.turbo_disabled = |
4521e1a0 GM |
500 | (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || |
501 | cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); | |
502 | } | |
503 | ||
c5a2ee7d RW |
504 | static int min_perf_pct_min(void) |
505 | { | |
506 | struct cpudata *cpu = all_cpu_data[0]; | |
57caf4ec | 507 | int turbo_pstate = cpu->pstate.turbo_pstate; |
c5a2ee7d | 508 | |
57caf4ec | 509 | return turbo_pstate ? |
d4436c0d | 510 | (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; |
c5a2ee7d RW |
511 | } |
512 | ||
8442885f SP |
513 | static s16 intel_pstate_get_epb(struct cpudata *cpu_data) |
514 | { | |
515 | u64 epb; | |
516 | int ret; | |
517 | ||
108ec36b | 518 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
8442885f SP |
519 | return -ENXIO; |
520 | ||
521 | ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); | |
522 | if (ret) | |
523 | return (s16)ret; | |
524 | ||
525 | return (s16)(epb & 0x0f); | |
526 | } | |
527 | ||
528 | static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) | |
529 | { | |
530 | s16 epp; | |
531 | ||
108ec36b | 532 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
984edbdc SP |
533 | /* |
534 | * When hwp_req_data is 0, means that caller didn't read | |
535 | * MSR_HWP_REQUEST, so need to read and get EPP. | |
536 | */ | |
537 | if (!hwp_req_data) { | |
538 | epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, | |
539 | &hwp_req_data); | |
540 | if (epp) | |
541 | return epp; | |
542 | } | |
8442885f | 543 | epp = (hwp_req_data >> 24) & 0xff; |
984edbdc | 544 | } else { |
8442885f SP |
545 | /* When there is no EPP present, HWP uses EPB settings */ |
546 | epp = intel_pstate_get_epb(cpu_data); | |
984edbdc | 547 | } |
8442885f SP |
548 | |
549 | return epp; | |
550 | } | |
551 | ||
984edbdc | 552 | static int intel_pstate_set_epb(int cpu, s16 pref) |
8442885f SP |
553 | { |
554 | u64 epb; | |
984edbdc | 555 | int ret; |
8442885f | 556 | |
108ec36b | 557 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
984edbdc | 558 | return -ENXIO; |
8442885f | 559 | |
984edbdc SP |
560 | ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); |
561 | if (ret) | |
562 | return ret; | |
8442885f SP |
563 | |
564 | epb = (epb & ~0x0f) | pref; | |
565 | wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); | |
984edbdc SP |
566 | |
567 | return 0; | |
8442885f SP |
568 | } |
569 | ||
984edbdc SP |
570 | /* |
571 | * EPP/EPB display strings corresponding to EPP index in the | |
572 | * energy_perf_strings[] | |
573 | * index String | |
574 | *------------------------------------- | |
575 | * 0 default | |
576 | * 1 performance | |
577 | * 2 balance_performance | |
578 | * 3 balance_power | |
579 | * 4 power | |
580 | */ | |
581 | static const char * const energy_perf_strings[] = { | |
582 | "default", | |
583 | "performance", | |
584 | "balance_performance", | |
585 | "balance_power", | |
586 | "power", | |
587 | NULL | |
588 | }; | |
3cedbc5a LB |
589 | static const unsigned int epp_values[] = { |
590 | HWP_EPP_PERFORMANCE, | |
591 | HWP_EPP_BALANCE_PERFORMANCE, | |
592 | HWP_EPP_BALANCE_POWERSAVE, | |
593 | HWP_EPP_POWERSAVE | |
594 | }; | |
984edbdc | 595 | |
f473bf39 | 596 | static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) |
984edbdc SP |
597 | { |
598 | s16 epp; | |
599 | int index = -EINVAL; | |
600 | ||
f473bf39 | 601 | *raw_epp = 0; |
984edbdc SP |
602 | epp = intel_pstate_get_epp(cpu_data, 0); |
603 | if (epp < 0) | |
604 | return epp; | |
605 | ||
108ec36b | 606 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
3cedbc5a LB |
607 | if (epp == HWP_EPP_PERFORMANCE) |
608 | return 1; | |
f473bf39 | 609 | if (epp == HWP_EPP_BALANCE_PERFORMANCE) |
3cedbc5a | 610 | return 2; |
f473bf39 | 611 | if (epp == HWP_EPP_BALANCE_POWERSAVE) |
3cedbc5a | 612 | return 3; |
f473bf39 | 613 | if (epp == HWP_EPP_POWERSAVE) |
3cedbc5a | 614 | return 4; |
f473bf39 SP |
615 | *raw_epp = epp; |
616 | return 0; | |
108ec36b | 617 | } else if (boot_cpu_has(X86_FEATURE_EPB)) { |
984edbdc SP |
618 | /* |
619 | * Range: | |
620 | * 0x00-0x03 : Performance | |
621 | * 0x04-0x07 : Balance performance | |
622 | * 0x08-0x0B : Balance power | |
623 | * 0x0C-0x0F : Power | |
624 | * The EPB is a 4 bit value, but our ranges restrict the | |
625 | * value which can be set. Here only using top two bits | |
626 | * effectively. | |
627 | */ | |
628 | index = (epp >> 2) + 1; | |
629 | } | |
630 | ||
631 | return index; | |
632 | } | |
633 | ||
f6ebbcf0 RW |
634 | static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) |
635 | { | |
c27a0ccc RW |
636 | int ret; |
637 | ||
f6ebbcf0 RW |
638 | /* |
639 | * Use the cached HWP Request MSR value, because in the active mode the | |
640 | * register itself may be updated by intel_pstate_hwp_boost_up() or | |
641 | * intel_pstate_hwp_boost_down() at any time. | |
642 | */ | |
643 | u64 value = READ_ONCE(cpu->hwp_req_cached); | |
644 | ||
645 | value &= ~GENMASK_ULL(31, 24); | |
646 | value |= (u64)epp << 24; | |
647 | /* | |
648 | * The only other updater of hwp_req_cached in the active mode, | |
649 | * intel_pstate_hwp_set(), is called under the same lock as this | |
650 | * function, so it cannot run in parallel with the update below. | |
651 | */ | |
652 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
c27a0ccc RW |
653 | ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
654 | if (!ret) | |
655 | cpu->epp_cached = epp; | |
656 | ||
657 | return ret; | |
f6ebbcf0 RW |
658 | } |
659 | ||
984edbdc | 660 | static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, |
f473bf39 SP |
661 | int pref_index, bool use_raw, |
662 | u32 raw_epp) | |
984edbdc SP |
663 | { |
664 | int epp = -EINVAL; | |
665 | int ret; | |
666 | ||
667 | if (!pref_index) | |
668 | epp = cpu_data->epp_default; | |
669 | ||
108ec36b | 670 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
3a957176 RW |
671 | if (use_raw) |
672 | epp = raw_epp; | |
673 | else if (epp == -EINVAL) | |
3cedbc5a | 674 | epp = epp_values[pref_index - 1]; |
984edbdc | 675 | |
b388eb58 RW |
676 | /* |
677 | * To avoid confusion, refuse to set EPP to any values different | |
678 | * from 0 (performance) if the current policy is "performance", | |
679 | * because those values would be overridden. | |
680 | */ | |
681 | if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) | |
682 | return -EBUSY; | |
683 | ||
f6ebbcf0 | 684 | ret = intel_pstate_set_epp(cpu_data, epp); |
984edbdc SP |
685 | } else { |
686 | if (epp == -EINVAL) | |
687 | epp = (pref_index - 1) << 2; | |
688 | ret = intel_pstate_set_epb(cpu_data->cpu, epp); | |
689 | } | |
984edbdc SP |
690 | |
691 | return ret; | |
692 | } | |
693 | ||
694 | static ssize_t show_energy_performance_available_preferences( | |
695 | struct cpufreq_policy *policy, char *buf) | |
696 | { | |
697 | int i = 0; | |
698 | int ret = 0; | |
699 | ||
700 | while (energy_perf_strings[i] != NULL) | |
701 | ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); | |
702 | ||
703 | ret += sprintf(&buf[ret], "\n"); | |
704 | ||
705 | return ret; | |
706 | } | |
707 | ||
708 | cpufreq_freq_attr_ro(energy_performance_available_preferences); | |
709 | ||
f6ebbcf0 RW |
710 | static struct cpufreq_driver intel_pstate; |
711 | ||
984edbdc SP |
712 | static ssize_t store_energy_performance_preference( |
713 | struct cpufreq_policy *policy, const char *buf, size_t count) | |
714 | { | |
f6ebbcf0 | 715 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
984edbdc | 716 | char str_preference[21]; |
f473bf39 | 717 | bool raw = false; |
3a957176 | 718 | ssize_t ret; |
3ff79754 | 719 | u32 epp = 0; |
984edbdc SP |
720 | |
721 | ret = sscanf(buf, "%20s", str_preference); | |
722 | if (ret != 1) | |
723 | return -EINVAL; | |
724 | ||
1111b783 | 725 | ret = match_string(energy_perf_strings, -1, str_preference); |
f473bf39 SP |
726 | if (ret < 0) { |
727 | if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) | |
728 | return ret; | |
729 | ||
730 | ret = kstrtouint(buf, 10, &epp); | |
731 | if (ret) | |
732 | return ret; | |
733 | ||
3a957176 RW |
734 | if (epp > 255) |
735 | return -EINVAL; | |
736 | ||
f473bf39 SP |
737 | raw = true; |
738 | } | |
739 | ||
f6ebbcf0 RW |
740 | /* |
741 | * This function runs with the policy R/W semaphore held, which | |
742 | * guarantees that the driver pointer will not change while it is | |
743 | * running. | |
744 | */ | |
745 | if (!intel_pstate_driver) | |
746 | return -EAGAIN; | |
747 | ||
3a957176 RW |
748 | mutex_lock(&intel_pstate_limits_lock); |
749 | ||
f6ebbcf0 RW |
750 | if (intel_pstate_driver == &intel_pstate) { |
751 | ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); | |
752 | } else { | |
753 | /* | |
754 | * In the passive mode the governor needs to be stopped on the | |
755 | * target CPU before the EPP update and restarted after it, | |
756 | * which is super-heavy-weight, so make sure it is worth doing | |
757 | * upfront. | |
758 | */ | |
759 | if (!raw) | |
760 | epp = ret ? epp_values[ret - 1] : cpu->epp_default; | |
761 | ||
762 | if (cpu->epp_cached != epp) { | |
763 | int err; | |
764 | ||
765 | cpufreq_stop_governor(policy); | |
766 | ret = intel_pstate_set_epp(cpu, epp); | |
767 | err = cpufreq_start_governor(policy); | |
c27a0ccc | 768 | if (!ret) |
f6ebbcf0 | 769 | ret = err; |
f6ebbcf0 RW |
770 | } |
771 | } | |
3a957176 RW |
772 | |
773 | mutex_unlock(&intel_pstate_limits_lock); | |
984edbdc | 774 | |
f6ebbcf0 | 775 | return ret ?: count; |
984edbdc SP |
776 | } |
777 | ||
778 | static ssize_t show_energy_performance_preference( | |
779 | struct cpufreq_policy *policy, char *buf) | |
780 | { | |
781 | struct cpudata *cpu_data = all_cpu_data[policy->cpu]; | |
f473bf39 | 782 | int preference, raw_epp; |
984edbdc | 783 | |
f473bf39 | 784 | preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); |
984edbdc SP |
785 | if (preference < 0) |
786 | return preference; | |
787 | ||
f473bf39 SP |
788 | if (raw_epp) |
789 | return sprintf(buf, "%d\n", raw_epp); | |
790 | else | |
791 | return sprintf(buf, "%s\n", energy_perf_strings[preference]); | |
984edbdc SP |
792 | } |
793 | ||
794 | cpufreq_freq_attr_rw(energy_performance_preference); | |
795 | ||
86d333a8 SP |
796 | static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) |
797 | { | |
798 | struct cpudata *cpu; | |
799 | u64 cap; | |
800 | int ratio; | |
801 | ||
802 | ratio = intel_pstate_get_cppc_guranteed(policy->cpu); | |
803 | if (ratio <= 0) { | |
804 | rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); | |
805 | ratio = HWP_GUARANTEED_PERF(cap); | |
806 | } | |
807 | ||
808 | cpu = all_cpu_data[policy->cpu]; | |
809 | ||
810 | return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling); | |
811 | } | |
812 | ||
813 | cpufreq_freq_attr_ro(base_frequency); | |
814 | ||
984edbdc SP |
815 | static struct freq_attr *hwp_cpufreq_attrs[] = { |
816 | &energy_performance_preference, | |
817 | &energy_performance_available_preferences, | |
86d333a8 | 818 | &base_frequency, |
984edbdc SP |
819 | NULL, |
820 | }; | |
821 | ||
de5bcf40 | 822 | static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) |
2f86dc4c | 823 | { |
1a4fe38a | 824 | u64 cap; |
74da56ce | 825 | |
a45ee4d4 RW |
826 | rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); |
827 | WRITE_ONCE(cpu->hwp_cap_cached, cap); | |
de5bcf40 RW |
828 | cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); |
829 | cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); | |
830 | } | |
1a4fe38a | 831 | |
de5bcf40 RW |
832 | static void intel_pstate_get_hwp_cap(struct cpudata *cpu) |
833 | { | |
834 | __intel_pstate_get_hwp_cap(cpu); | |
835 | cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; | |
836 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; | |
1a4fe38a SP |
837 | } |
838 | ||
839 | static void intel_pstate_hwp_set(unsigned int cpu) | |
840 | { | |
841 | struct cpudata *cpu_data = all_cpu_data[cpu]; | |
842 | int max, min; | |
843 | u64 value; | |
844 | s16 epp; | |
845 | ||
846 | max = cpu_data->max_perf_ratio; | |
847 | min = cpu_data->min_perf_ratio; | |
eae48f04 | 848 | |
2bfc4cbb RW |
849 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
850 | min = max; | |
3f8ed54a | 851 | |
2bfc4cbb | 852 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); |
2f86dc4c | 853 | |
2bfc4cbb RW |
854 | value &= ~HWP_MIN_PERF(~0L); |
855 | value |= HWP_MIN_PERF(min); | |
8442885f | 856 | |
2bfc4cbb RW |
857 | value &= ~HWP_MAX_PERF(~0L); |
858 | value |= HWP_MAX_PERF(max); | |
8442885f | 859 | |
2bfc4cbb RW |
860 | if (cpu_data->epp_policy == cpu_data->policy) |
861 | goto skip_epp; | |
8442885f | 862 | |
2bfc4cbb | 863 | cpu_data->epp_policy = cpu_data->policy; |
984edbdc | 864 | |
2bfc4cbb RW |
865 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { |
866 | epp = intel_pstate_get_epp(cpu_data, value); | |
867 | cpu_data->epp_powersave = epp; | |
868 | /* If EPP read was failed, then don't try to write */ | |
869 | if (epp < 0) | |
870 | goto skip_epp; | |
8442885f | 871 | |
2bfc4cbb RW |
872 | epp = 0; |
873 | } else { | |
874 | /* skip setting EPP, when saved value is invalid */ | |
875 | if (cpu_data->epp_powersave < 0) | |
876 | goto skip_epp; | |
8442885f | 877 | |
2bfc4cbb RW |
878 | /* |
879 | * No need to restore EPP when it is not zero. This | |
880 | * means: | |
881 | * - Policy is not changed | |
882 | * - user has manually changed | |
883 | * - Error reading EPB | |
884 | */ | |
885 | epp = intel_pstate_get_epp(cpu_data, value); | |
886 | if (epp) | |
887 | goto skip_epp; | |
8442885f | 888 | |
2bfc4cbb RW |
889 | epp = cpu_data->epp_powersave; |
890 | } | |
108ec36b | 891 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
2bfc4cbb RW |
892 | value &= ~GENMASK_ULL(31, 24); |
893 | value |= (u64)epp << 24; | |
894 | } else { | |
895 | intel_pstate_set_epb(cpu, epp); | |
2f86dc4c | 896 | } |
2bfc4cbb | 897 | skip_epp: |
e0efd5be | 898 | WRITE_ONCE(cpu_data->hwp_req_cached, value); |
2bfc4cbb | 899 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); |
41cfd64c | 900 | } |
2f86dc4c | 901 | |
4adcf2e5 | 902 | static void intel_pstate_hwp_offline(struct cpudata *cpu) |
af3b7379 | 903 | { |
4adcf2e5 | 904 | u64 value = READ_ONCE(cpu->hwp_req_cached); |
af3b7379 SP |
905 | int min_perf; |
906 | ||
4adcf2e5 RW |
907 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
908 | /* | |
909 | * In case the EPP has been set to "performance" by the | |
910 | * active mode "performance" scaling algorithm, replace that | |
911 | * temporary value with the cached EPP one. | |
912 | */ | |
913 | value &= ~GENMASK_ULL(31, 24); | |
914 | value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); | |
915 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
916 | } | |
917 | ||
af3b7379 | 918 | value &= ~GENMASK_ULL(31, 0); |
9dd04ec6 | 919 | min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); |
af3b7379 SP |
920 | |
921 | /* Set hwp_max = hwp_min */ | |
922 | value |= HWP_MAX_PERF(min_perf); | |
923 | value |= HWP_MIN_PERF(min_perf); | |
924 | ||
c31432fa | 925 | /* Set EPP to min */ |
108ec36b | 926 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) |
af3b7379 | 927 | value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); |
af3b7379 | 928 | |
4adcf2e5 | 929 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
984edbdc SP |
930 | } |
931 | ||
ed7bde7a SP |
932 | #define POWER_CTL_EE_ENABLE 1 |
933 | #define POWER_CTL_EE_DISABLE 2 | |
934 | ||
935 | static int power_ctl_ee_state; | |
936 | ||
937 | static void set_power_ctl_ee_state(bool input) | |
938 | { | |
939 | u64 power_ctl; | |
940 | ||
941 | mutex_lock(&intel_pstate_driver_lock); | |
942 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
943 | if (input) { | |
944 | power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); | |
945 | power_ctl_ee_state = POWER_CTL_EE_ENABLE; | |
946 | } else { | |
947 | power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); | |
948 | power_ctl_ee_state = POWER_CTL_EE_DISABLE; | |
949 | } | |
950 | wrmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
951 | mutex_unlock(&intel_pstate_driver_lock); | |
952 | } | |
953 | ||
70f6bf2a CY |
954 | static void intel_pstate_hwp_enable(struct cpudata *cpudata); |
955 | ||
4adcf2e5 RW |
956 | static void intel_pstate_hwp_reenable(struct cpudata *cpu) |
957 | { | |
958 | intel_pstate_hwp_enable(cpu); | |
959 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); | |
960 | } | |
961 | ||
962 | static int intel_pstate_suspend(struct cpufreq_policy *policy) | |
963 | { | |
964 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
965 | ||
966 | pr_debug("CPU %d suspending\n", cpu->cpu); | |
967 | ||
968 | cpu->suspended = true; | |
969 | ||
970 | return 0; | |
971 | } | |
972 | ||
8442885f SP |
973 | static int intel_pstate_resume(struct cpufreq_policy *policy) |
974 | { | |
4adcf2e5 RW |
975 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
976 | ||
977 | pr_debug("CPU %d resuming\n", cpu->cpu); | |
ed7bde7a SP |
978 | |
979 | /* Only restore if the system default is changed */ | |
980 | if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) | |
981 | set_power_ctl_ee_state(true); | |
982 | else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) | |
983 | set_power_ctl_ee_state(false); | |
984 | ||
4adcf2e5 RW |
985 | if (cpu->suspended && hwp_active) { |
986 | mutex_lock(&intel_pstate_limits_lock); | |
8442885f | 987 | |
4adcf2e5 RW |
988 | /* Re-enable HWP, because "online" has not done that. */ |
989 | intel_pstate_hwp_reenable(cpu); | |
70f6bf2a | 990 | |
4adcf2e5 RW |
991 | mutex_unlock(&intel_pstate_limits_lock); |
992 | } | |
aa439248 | 993 | |
4adcf2e5 | 994 | cpu->suspended = false; |
aa439248 | 995 | |
5f98ced1 | 996 | return 0; |
8442885f SP |
997 | } |
998 | ||
111b8b3f | 999 | static void intel_pstate_update_policies(void) |
41cfd64c | 1000 | { |
111b8b3f RW |
1001 | int cpu; |
1002 | ||
1003 | for_each_possible_cpu(cpu) | |
1004 | cpufreq_update_policy(cpu); | |
2f86dc4c DB |
1005 | } |
1006 | ||
9083e498 RW |
1007 | static void intel_pstate_update_max_freq(unsigned int cpu) |
1008 | { | |
1009 | struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); | |
9083e498 RW |
1010 | struct cpudata *cpudata; |
1011 | ||
1012 | if (!policy) | |
1013 | return; | |
1014 | ||
1015 | cpudata = all_cpu_data[cpu]; | |
1016 | policy->cpuinfo.max_freq = global.turbo_disabled_mf ? | |
1017 | cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; | |
1018 | ||
c57b25bd | 1019 | refresh_frequency_limits(policy); |
9083e498 RW |
1020 | |
1021 | cpufreq_cpu_release(policy); | |
1022 | } | |
1023 | ||
5a25e3f7 RW |
1024 | static void intel_pstate_update_limits(unsigned int cpu) |
1025 | { | |
1026 | mutex_lock(&intel_pstate_driver_lock); | |
1027 | ||
1028 | update_turbo_state(); | |
1029 | /* | |
1030 | * If turbo has been turned on or off globally, policy limits for | |
1031 | * all CPUs need to be updated to reflect that. | |
1032 | */ | |
9083e498 RW |
1033 | if (global.turbo_disabled_mf != global.turbo_disabled) { |
1034 | global.turbo_disabled_mf = global.turbo_disabled; | |
918229cd | 1035 | arch_set_max_freq_ratio(global.turbo_disabled); |
9083e498 RW |
1036 | for_each_possible_cpu(cpu) |
1037 | intel_pstate_update_max_freq(cpu); | |
5a25e3f7 RW |
1038 | } else { |
1039 | cpufreq_update_policy(cpu); | |
1040 | } | |
1041 | ||
1042 | mutex_unlock(&intel_pstate_driver_lock); | |
1043 | } | |
1044 | ||
93f0822d DB |
1045 | /************************** sysfs begin ************************/ |
1046 | #define show_one(file_name, object) \ | |
1047 | static ssize_t show_##file_name \ | |
625c85a6 | 1048 | (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ |
93f0822d | 1049 | { \ |
7de32556 | 1050 | return sprintf(buf, "%u\n", global.object); \ |
93f0822d DB |
1051 | } |
1052 | ||
fb1fe104 RW |
1053 | static ssize_t intel_pstate_show_status(char *buf); |
1054 | static int intel_pstate_update_status(const char *buf, size_t size); | |
1055 | ||
1056 | static ssize_t show_status(struct kobject *kobj, | |
625c85a6 | 1057 | struct kobj_attribute *attr, char *buf) |
fb1fe104 RW |
1058 | { |
1059 | ssize_t ret; | |
1060 | ||
1061 | mutex_lock(&intel_pstate_driver_lock); | |
1062 | ret = intel_pstate_show_status(buf); | |
1063 | mutex_unlock(&intel_pstate_driver_lock); | |
1064 | ||
1065 | return ret; | |
1066 | } | |
1067 | ||
625c85a6 | 1068 | static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, |
fb1fe104 RW |
1069 | const char *buf, size_t count) |
1070 | { | |
1071 | char *p = memchr(buf, '\n', count); | |
1072 | int ret; | |
1073 | ||
1074 | mutex_lock(&intel_pstate_driver_lock); | |
1075 | ret = intel_pstate_update_status(buf, p ? p - buf : count); | |
1076 | mutex_unlock(&intel_pstate_driver_lock); | |
1077 | ||
1078 | return ret < 0 ? ret : count; | |
1079 | } | |
1080 | ||
d01b1f48 | 1081 | static ssize_t show_turbo_pct(struct kobject *kobj, |
625c85a6 | 1082 | struct kobj_attribute *attr, char *buf) |
d01b1f48 KCA |
1083 | { |
1084 | struct cpudata *cpu; | |
1085 | int total, no_turbo, turbo_pct; | |
1086 | uint32_t turbo_fp; | |
1087 | ||
0c30b65b RW |
1088 | mutex_lock(&intel_pstate_driver_lock); |
1089 | ||
ee8df89a | 1090 | if (!intel_pstate_driver) { |
0c30b65b RW |
1091 | mutex_unlock(&intel_pstate_driver_lock); |
1092 | return -EAGAIN; | |
1093 | } | |
1094 | ||
d01b1f48 KCA |
1095 | cpu = all_cpu_data[0]; |
1096 | ||
1097 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | |
1098 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; | |
22590efb | 1099 | turbo_fp = div_fp(no_turbo, total); |
d01b1f48 | 1100 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); |
0c30b65b RW |
1101 | |
1102 | mutex_unlock(&intel_pstate_driver_lock); | |
1103 | ||
d01b1f48 KCA |
1104 | return sprintf(buf, "%u\n", turbo_pct); |
1105 | } | |
1106 | ||
0522424e | 1107 | static ssize_t show_num_pstates(struct kobject *kobj, |
625c85a6 | 1108 | struct kobj_attribute *attr, char *buf) |
0522424e KCA |
1109 | { |
1110 | struct cpudata *cpu; | |
1111 | int total; | |
1112 | ||
0c30b65b RW |
1113 | mutex_lock(&intel_pstate_driver_lock); |
1114 | ||
ee8df89a | 1115 | if (!intel_pstate_driver) { |
0c30b65b RW |
1116 | mutex_unlock(&intel_pstate_driver_lock); |
1117 | return -EAGAIN; | |
1118 | } | |
1119 | ||
0522424e KCA |
1120 | cpu = all_cpu_data[0]; |
1121 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | |
0c30b65b RW |
1122 | |
1123 | mutex_unlock(&intel_pstate_driver_lock); | |
1124 | ||
0522424e KCA |
1125 | return sprintf(buf, "%u\n", total); |
1126 | } | |
1127 | ||
4521e1a0 | 1128 | static ssize_t show_no_turbo(struct kobject *kobj, |
625c85a6 | 1129 | struct kobj_attribute *attr, char *buf) |
4521e1a0 GM |
1130 | { |
1131 | ssize_t ret; | |
1132 | ||
0c30b65b RW |
1133 | mutex_lock(&intel_pstate_driver_lock); |
1134 | ||
ee8df89a | 1135 | if (!intel_pstate_driver) { |
0c30b65b RW |
1136 | mutex_unlock(&intel_pstate_driver_lock); |
1137 | return -EAGAIN; | |
1138 | } | |
1139 | ||
4521e1a0 | 1140 | update_turbo_state(); |
7de32556 RW |
1141 | if (global.turbo_disabled) |
1142 | ret = sprintf(buf, "%u\n", global.turbo_disabled); | |
4521e1a0 | 1143 | else |
7de32556 | 1144 | ret = sprintf(buf, "%u\n", global.no_turbo); |
4521e1a0 | 1145 | |
0c30b65b RW |
1146 | mutex_unlock(&intel_pstate_driver_lock); |
1147 | ||
4521e1a0 GM |
1148 | return ret; |
1149 | } | |
1150 | ||
625c85a6 | 1151 | static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1152 | const char *buf, size_t count) |
93f0822d DB |
1153 | { |
1154 | unsigned int input; | |
1155 | int ret; | |
845c1cbe | 1156 | |
93f0822d DB |
1157 | ret = sscanf(buf, "%u", &input); |
1158 | if (ret != 1) | |
1159 | return -EINVAL; | |
4521e1a0 | 1160 | |
0c30b65b RW |
1161 | mutex_lock(&intel_pstate_driver_lock); |
1162 | ||
ee8df89a | 1163 | if (!intel_pstate_driver) { |
0c30b65b RW |
1164 | mutex_unlock(&intel_pstate_driver_lock); |
1165 | return -EAGAIN; | |
1166 | } | |
1167 | ||
a410c03d SP |
1168 | mutex_lock(&intel_pstate_limits_lock); |
1169 | ||
4521e1a0 | 1170 | update_turbo_state(); |
7de32556 | 1171 | if (global.turbo_disabled) { |
8c539776 | 1172 | pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); |
a410c03d | 1173 | mutex_unlock(&intel_pstate_limits_lock); |
0c30b65b | 1174 | mutex_unlock(&intel_pstate_driver_lock); |
4521e1a0 | 1175 | return -EPERM; |
dd5fbf70 | 1176 | } |
2f86dc4c | 1177 | |
7de32556 | 1178 | global.no_turbo = clamp_t(int, input, 0, 1); |
111b8b3f | 1179 | |
c5a2ee7d RW |
1180 | if (global.no_turbo) { |
1181 | struct cpudata *cpu = all_cpu_data[0]; | |
1182 | int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; | |
1183 | ||
1184 | /* Squash the global minimum into the permitted range. */ | |
1185 | if (global.min_perf_pct > pct) | |
1186 | global.min_perf_pct = pct; | |
1187 | } | |
1188 | ||
cd59b4be RW |
1189 | mutex_unlock(&intel_pstate_limits_lock); |
1190 | ||
7de32556 RW |
1191 | intel_pstate_update_policies(); |
1192 | ||
0c30b65b RW |
1193 | mutex_unlock(&intel_pstate_driver_lock); |
1194 | ||
93f0822d DB |
1195 | return count; |
1196 | } | |
1197 | ||
3000ce3c | 1198 | static void update_qos_request(enum freq_qos_req_type type) |
da5c504c | 1199 | { |
3000ce3c | 1200 | struct freq_qos_request *req; |
da5c504c | 1201 | struct cpufreq_policy *policy; |
de5bcf40 | 1202 | int i; |
da5c504c VK |
1203 | |
1204 | for_each_possible_cpu(i) { | |
1205 | struct cpudata *cpu = all_cpu_data[i]; | |
de5bcf40 | 1206 | unsigned int freq, perf_pct; |
da5c504c VK |
1207 | |
1208 | policy = cpufreq_cpu_get(i); | |
1209 | if (!policy) | |
1210 | continue; | |
1211 | ||
1212 | req = policy->driver_data; | |
1213 | cpufreq_cpu_put(policy); | |
1214 | ||
1215 | if (!req) | |
1216 | continue; | |
1217 | ||
1218 | if (hwp_active) | |
de5bcf40 | 1219 | intel_pstate_get_hwp_cap(cpu); |
da5c504c | 1220 | |
3000ce3c | 1221 | if (type == FREQ_QOS_MIN) { |
da5c504c VK |
1222 | perf_pct = global.min_perf_pct; |
1223 | } else { | |
1224 | req++; | |
1225 | perf_pct = global.max_perf_pct; | |
1226 | } | |
1227 | ||
de5bcf40 | 1228 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); |
da5c504c | 1229 | |
3000ce3c | 1230 | if (freq_qos_update_request(req, freq) < 0) |
da5c504c VK |
1231 | pr_warn("Failed to update freq constraint: CPU%d\n", i); |
1232 | } | |
1233 | } | |
1234 | ||
625c85a6 | 1235 | static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1236 | const char *buf, size_t count) |
93f0822d DB |
1237 | { |
1238 | unsigned int input; | |
1239 | int ret; | |
845c1cbe | 1240 | |
93f0822d DB |
1241 | ret = sscanf(buf, "%u", &input); |
1242 | if (ret != 1) | |
1243 | return -EINVAL; | |
1244 | ||
0c30b65b RW |
1245 | mutex_lock(&intel_pstate_driver_lock); |
1246 | ||
ee8df89a | 1247 | if (!intel_pstate_driver) { |
0c30b65b RW |
1248 | mutex_unlock(&intel_pstate_driver_lock); |
1249 | return -EAGAIN; | |
1250 | } | |
1251 | ||
a410c03d SP |
1252 | mutex_lock(&intel_pstate_limits_lock); |
1253 | ||
c5a2ee7d | 1254 | global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); |
111b8b3f | 1255 | |
cd59b4be RW |
1256 | mutex_unlock(&intel_pstate_limits_lock); |
1257 | ||
da5c504c VK |
1258 | if (intel_pstate_driver == &intel_pstate) |
1259 | intel_pstate_update_policies(); | |
1260 | else | |
3000ce3c | 1261 | update_qos_request(FREQ_QOS_MAX); |
7de32556 | 1262 | |
0c30b65b RW |
1263 | mutex_unlock(&intel_pstate_driver_lock); |
1264 | ||
93f0822d DB |
1265 | return count; |
1266 | } | |
1267 | ||
625c85a6 | 1268 | static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, |
c410833a | 1269 | const char *buf, size_t count) |
93f0822d DB |
1270 | { |
1271 | unsigned int input; | |
1272 | int ret; | |
845c1cbe | 1273 | |
93f0822d DB |
1274 | ret = sscanf(buf, "%u", &input); |
1275 | if (ret != 1) | |
1276 | return -EINVAL; | |
a0475992 | 1277 | |
0c30b65b RW |
1278 | mutex_lock(&intel_pstate_driver_lock); |
1279 | ||
ee8df89a | 1280 | if (!intel_pstate_driver) { |
0c30b65b RW |
1281 | mutex_unlock(&intel_pstate_driver_lock); |
1282 | return -EAGAIN; | |
1283 | } | |
1284 | ||
a410c03d SP |
1285 | mutex_lock(&intel_pstate_limits_lock); |
1286 | ||
c5a2ee7d RW |
1287 | global.min_perf_pct = clamp_t(int, input, |
1288 | min_perf_pct_min(), global.max_perf_pct); | |
111b8b3f | 1289 | |
cd59b4be RW |
1290 | mutex_unlock(&intel_pstate_limits_lock); |
1291 | ||
da5c504c VK |
1292 | if (intel_pstate_driver == &intel_pstate) |
1293 | intel_pstate_update_policies(); | |
1294 | else | |
3000ce3c | 1295 | update_qos_request(FREQ_QOS_MIN); |
7de32556 | 1296 | |
0c30b65b RW |
1297 | mutex_unlock(&intel_pstate_driver_lock); |
1298 | ||
93f0822d DB |
1299 | return count; |
1300 | } | |
1301 | ||
aaaece3d | 1302 | static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, |
625c85a6 | 1303 | struct kobj_attribute *attr, char *buf) |
aaaece3d SP |
1304 | { |
1305 | return sprintf(buf, "%u\n", hwp_boost); | |
1306 | } | |
1307 | ||
625c85a6 VK |
1308 | static ssize_t store_hwp_dynamic_boost(struct kobject *a, |
1309 | struct kobj_attribute *b, | |
aaaece3d SP |
1310 | const char *buf, size_t count) |
1311 | { | |
1312 | unsigned int input; | |
1313 | int ret; | |
1314 | ||
1315 | ret = kstrtouint(buf, 10, &input); | |
1316 | if (ret) | |
1317 | return ret; | |
1318 | ||
1319 | mutex_lock(&intel_pstate_driver_lock); | |
1320 | hwp_boost = !!input; | |
1321 | intel_pstate_update_policies(); | |
1322 | mutex_unlock(&intel_pstate_driver_lock); | |
1323 | ||
1324 | return count; | |
1325 | } | |
1326 | ||
ed7bde7a SP |
1327 | static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, |
1328 | char *buf) | |
1329 | { | |
1330 | u64 power_ctl; | |
1331 | int enable; | |
1332 | ||
1333 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); | |
1334 | enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); | |
1335 | return sprintf(buf, "%d\n", !enable); | |
1336 | } | |
1337 | ||
1338 | static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, | |
1339 | const char *buf, size_t count) | |
1340 | { | |
1341 | bool input; | |
1342 | int ret; | |
1343 | ||
1344 | ret = kstrtobool(buf, &input); | |
1345 | if (ret) | |
1346 | return ret; | |
1347 | ||
1348 | set_power_ctl_ee_state(input); | |
1349 | ||
1350 | return count; | |
1351 | } | |
1352 | ||
93f0822d DB |
1353 | show_one(max_perf_pct, max_perf_pct); |
1354 | show_one(min_perf_pct, min_perf_pct); | |
1355 | ||
fb1fe104 | 1356 | define_one_global_rw(status); |
93f0822d DB |
1357 | define_one_global_rw(no_turbo); |
1358 | define_one_global_rw(max_perf_pct); | |
1359 | define_one_global_rw(min_perf_pct); | |
d01b1f48 | 1360 | define_one_global_ro(turbo_pct); |
0522424e | 1361 | define_one_global_ro(num_pstates); |
aaaece3d | 1362 | define_one_global_rw(hwp_dynamic_boost); |
ed7bde7a | 1363 | define_one_global_rw(energy_efficiency); |
93f0822d DB |
1364 | |
1365 | static struct attribute *intel_pstate_attributes[] = { | |
fb1fe104 | 1366 | &status.attr, |
93f0822d | 1367 | &no_turbo.attr, |
d01b1f48 | 1368 | &turbo_pct.attr, |
0522424e | 1369 | &num_pstates.attr, |
93f0822d DB |
1370 | NULL |
1371 | }; | |
1372 | ||
106c9c77 | 1373 | static const struct attribute_group intel_pstate_attr_group = { |
93f0822d DB |
1374 | .attrs = intel_pstate_attributes, |
1375 | }; | |
93f0822d | 1376 | |
ed7bde7a SP |
1377 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; |
1378 | ||
f6ebbcf0 RW |
1379 | static struct kobject *intel_pstate_kobject; |
1380 | ||
317dd50e | 1381 | static void __init intel_pstate_sysfs_expose_params(void) |
93f0822d DB |
1382 | { |
1383 | int rc; | |
1384 | ||
1385 | intel_pstate_kobject = kobject_create_and_add("intel_pstate", | |
1386 | &cpu_subsys.dev_root->kobj); | |
eae48f04 SP |
1387 | if (WARN_ON(!intel_pstate_kobject)) |
1388 | return; | |
1389 | ||
2d8d1f18 | 1390 | rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); |
eae48f04 SP |
1391 | if (WARN_ON(rc)) |
1392 | return; | |
1393 | ||
1394 | /* | |
1395 | * If per cpu limits are enforced there are no global limits, so | |
1396 | * return without creating max/min_perf_pct attributes | |
1397 | */ | |
1398 | if (per_cpu_limits) | |
1399 | return; | |
1400 | ||
1401 | rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); | |
1402 | WARN_ON(rc); | |
1403 | ||
1404 | rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); | |
1405 | WARN_ON(rc); | |
1406 | ||
ed7bde7a SP |
1407 | if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { |
1408 | rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); | |
1409 | WARN_ON(rc); | |
1410 | } | |
93f0822d | 1411 | } |
f6ebbcf0 | 1412 | |
cdc1719c CY |
1413 | static void __init intel_pstate_sysfs_remove(void) |
1414 | { | |
1415 | if (!intel_pstate_kobject) | |
1416 | return; | |
1417 | ||
1418 | sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); | |
1419 | ||
1420 | if (!per_cpu_limits) { | |
1421 | sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); | |
1422 | sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); | |
1423 | ||
1424 | if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) | |
1425 | sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); | |
1426 | } | |
1427 | ||
1428 | kobject_put(intel_pstate_kobject); | |
1429 | } | |
1430 | ||
f6ebbcf0 RW |
1431 | static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) |
1432 | { | |
1433 | int rc; | |
1434 | ||
1435 | if (!hwp_active) | |
1436 | return; | |
1437 | ||
1438 | rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); | |
1439 | WARN_ON_ONCE(rc); | |
1440 | } | |
1441 | ||
1442 | static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) | |
1443 | { | |
1444 | if (!hwp_active) | |
1445 | return; | |
1446 | ||
1447 | sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); | |
1448 | } | |
1449 | ||
93f0822d | 1450 | /************************** sysfs end ************************/ |
2f86dc4c | 1451 | |
ba88d433 | 1452 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) |
2f86dc4c | 1453 | { |
f05c9665 | 1454 | /* First disable HWP notification interrupt as we don't process them */ |
108ec36b | 1455 | if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
da7de91c | 1456 | wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); |
f05c9665 | 1457 | |
ba88d433 | 1458 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
984edbdc SP |
1459 | if (cpudata->epp_default == -EINVAL) |
1460 | cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); | |
2f86dc4c DB |
1461 | } |
1462 | ||
938d21a2 | 1463 | static int atom_get_min_pstate(void) |
19e77c28 DB |
1464 | { |
1465 | u64 value; | |
845c1cbe | 1466 | |
92134bdb | 1467 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
c16ed060 | 1468 | return (value >> 8) & 0x7F; |
19e77c28 DB |
1469 | } |
1470 | ||
938d21a2 | 1471 | static int atom_get_max_pstate(void) |
19e77c28 DB |
1472 | { |
1473 | u64 value; | |
845c1cbe | 1474 | |
92134bdb | 1475 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
c16ed060 | 1476 | return (value >> 16) & 0x7F; |
19e77c28 | 1477 | } |
93f0822d | 1478 | |
938d21a2 | 1479 | static int atom_get_turbo_pstate(void) |
61d8d2ab DB |
1480 | { |
1481 | u64 value; | |
845c1cbe | 1482 | |
92134bdb | 1483 | rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); |
c16ed060 | 1484 | return value & 0x7F; |
61d8d2ab DB |
1485 | } |
1486 | ||
fdfdb2b1 | 1487 | static u64 atom_get_val(struct cpudata *cpudata, int pstate) |
007bea09 DB |
1488 | { |
1489 | u64 val; | |
1490 | int32_t vid_fp; | |
1491 | u32 vid; | |
1492 | ||
144c8e17 | 1493 | val = (u64)pstate << 8; |
7de32556 | 1494 | if (global.no_turbo && !global.turbo_disabled) |
007bea09 DB |
1495 | val |= (u64)1 << 32; |
1496 | ||
1497 | vid_fp = cpudata->vid.min + mul_fp( | |
1498 | int_tofp(pstate - cpudata->pstate.min_pstate), | |
1499 | cpudata->vid.ratio); | |
1500 | ||
1501 | vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); | |
d022a65e | 1502 | vid = ceiling_fp(vid_fp); |
007bea09 | 1503 | |
21855ff5 DB |
1504 | if (pstate > cpudata->pstate.max_pstate) |
1505 | vid = cpudata->vid.turbo; | |
1506 | ||
fdfdb2b1 | 1507 | return val | vid; |
007bea09 DB |
1508 | } |
1509 | ||
1421df63 | 1510 | static int silvermont_get_scaling(void) |
b27580b0 DB |
1511 | { |
1512 | u64 value; | |
1513 | int i; | |
1421df63 PL |
1514 | /* Defined in Table 35-6 from SDM (Sept 2015) */ |
1515 | static int silvermont_freq_table[] = { | |
1516 | 83300, 100000, 133300, 116700, 80000}; | |
b27580b0 DB |
1517 | |
1518 | rdmsrl(MSR_FSB_FREQ, value); | |
1421df63 PL |
1519 | i = value & 0x7; |
1520 | WARN_ON(i > 4); | |
b27580b0 | 1521 | |
1421df63 PL |
1522 | return silvermont_freq_table[i]; |
1523 | } | |
b27580b0 | 1524 | |
1421df63 PL |
1525 | static int airmont_get_scaling(void) |
1526 | { | |
1527 | u64 value; | |
1528 | int i; | |
1529 | /* Defined in Table 35-10 from SDM (Sept 2015) */ | |
1530 | static int airmont_freq_table[] = { | |
1531 | 83300, 100000, 133300, 116700, 80000, | |
1532 | 93300, 90000, 88900, 87500}; | |
1533 | ||
1534 | rdmsrl(MSR_FSB_FREQ, value); | |
1535 | i = value & 0xF; | |
1536 | WARN_ON(i > 8); | |
1537 | ||
1538 | return airmont_freq_table[i]; | |
b27580b0 DB |
1539 | } |
1540 | ||
938d21a2 | 1541 | static void atom_get_vid(struct cpudata *cpudata) |
007bea09 DB |
1542 | { |
1543 | u64 value; | |
1544 | ||
92134bdb | 1545 | rdmsrl(MSR_ATOM_CORE_VIDS, value); |
c16ed060 DB |
1546 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
1547 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | |
007bea09 DB |
1548 | cpudata->vid.ratio = div_fp( |
1549 | cpudata->vid.max - cpudata->vid.min, | |
1550 | int_tofp(cpudata->pstate.max_pstate - | |
1551 | cpudata->pstate.min_pstate)); | |
21855ff5 | 1552 | |
92134bdb | 1553 | rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); |
21855ff5 | 1554 | cpudata->vid.turbo = value & 0x7f; |
007bea09 DB |
1555 | } |
1556 | ||
016c8150 | 1557 | static int core_get_min_pstate(void) |
93f0822d DB |
1558 | { |
1559 | u64 value; | |
845c1cbe | 1560 | |
05e99c8c | 1561 | rdmsrl(MSR_PLATFORM_INFO, value); |
93f0822d DB |
1562 | return (value >> 40) & 0xFF; |
1563 | } | |
1564 | ||
3bcc6fa9 | 1565 | static int core_get_max_pstate_physical(void) |
93f0822d DB |
1566 | { |
1567 | u64 value; | |
845c1cbe | 1568 | |
05e99c8c | 1569 | rdmsrl(MSR_PLATFORM_INFO, value); |
93f0822d DB |
1570 | return (value >> 8) & 0xFF; |
1571 | } | |
1572 | ||
8fc7554a SP |
1573 | static int core_get_tdp_ratio(u64 plat_info) |
1574 | { | |
1575 | /* Check how many TDP levels present */ | |
1576 | if (plat_info & 0x600000000) { | |
1577 | u64 tdp_ctrl; | |
1578 | u64 tdp_ratio; | |
1579 | int tdp_msr; | |
1580 | int err; | |
1581 | ||
1582 | /* Get the TDP level (0, 1, 2) to get ratios */ | |
1583 | err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); | |
1584 | if (err) | |
1585 | return err; | |
1586 | ||
1587 | /* TDP MSR are continuous starting at 0x648 */ | |
1588 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); | |
1589 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); | |
1590 | if (err) | |
1591 | return err; | |
1592 | ||
1593 | /* For level 1 and 2, bits[23:16] contain the ratio */ | |
1594 | if (tdp_ctrl & 0x03) | |
1595 | tdp_ratio >>= 16; | |
1596 | ||
1597 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ | |
1598 | pr_debug("tdp_ratio %x\n", (int)tdp_ratio); | |
1599 | ||
1600 | return (int)tdp_ratio; | |
1601 | } | |
1602 | ||
1603 | return -ENXIO; | |
1604 | } | |
1605 | ||
016c8150 | 1606 | static int core_get_max_pstate(void) |
93f0822d | 1607 | { |
6a35fc2d SP |
1608 | u64 tar; |
1609 | u64 plat_info; | |
1610 | int max_pstate; | |
8fc7554a | 1611 | int tdp_ratio; |
6a35fc2d SP |
1612 | int err; |
1613 | ||
1614 | rdmsrl(MSR_PLATFORM_INFO, plat_info); | |
1615 | max_pstate = (plat_info >> 8) & 0xFF; | |
1616 | ||
8fc7554a SP |
1617 | tdp_ratio = core_get_tdp_ratio(plat_info); |
1618 | if (tdp_ratio <= 0) | |
1619 | return max_pstate; | |
1620 | ||
1621 | if (hwp_active) { | |
1622 | /* Turbo activation ratio is not used on HWP platforms */ | |
1623 | return tdp_ratio; | |
1624 | } | |
1625 | ||
6a35fc2d SP |
1626 | err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); |
1627 | if (!err) { | |
8fc7554a SP |
1628 | int tar_levels; |
1629 | ||
6a35fc2d | 1630 | /* Do some sanity checking for safety */ |
8fc7554a SP |
1631 | tar_levels = tar & 0xff; |
1632 | if (tdp_ratio - 1 == tar_levels) { | |
1633 | max_pstate = tar_levels; | |
1634 | pr_debug("max_pstate=TAC %x\n", max_pstate); | |
6a35fc2d SP |
1635 | } |
1636 | } | |
845c1cbe | 1637 | |
6a35fc2d | 1638 | return max_pstate; |
93f0822d DB |
1639 | } |
1640 | ||
016c8150 | 1641 | static int core_get_turbo_pstate(void) |
93f0822d DB |
1642 | { |
1643 | u64 value; | |
1644 | int nont, ret; | |
845c1cbe | 1645 | |
100cf6f2 | 1646 | rdmsrl(MSR_TURBO_RATIO_LIMIT, value); |
016c8150 | 1647 | nont = core_get_max_pstate(); |
285cb990 | 1648 | ret = (value) & 255; |
93f0822d DB |
1649 | if (ret <= nont) |
1650 | ret = nont; | |
1651 | return ret; | |
1652 | } | |
1653 | ||
b27580b0 DB |
1654 | static inline int core_get_scaling(void) |
1655 | { | |
1656 | return 100000; | |
1657 | } | |
1658 | ||
fdfdb2b1 | 1659 | static u64 core_get_val(struct cpudata *cpudata, int pstate) |
016c8150 DB |
1660 | { |
1661 | u64 val; | |
1662 | ||
144c8e17 | 1663 | val = (u64)pstate << 8; |
7de32556 | 1664 | if (global.no_turbo && !global.turbo_disabled) |
016c8150 DB |
1665 | val |= (u64)1 << 32; |
1666 | ||
fdfdb2b1 | 1667 | return val; |
016c8150 DB |
1668 | } |
1669 | ||
6e34e1f2 SP |
1670 | static int knl_get_aperf_mperf_shift(void) |
1671 | { | |
1672 | return 10; | |
1673 | } | |
1674 | ||
b34ef932 DC |
1675 | static int knl_get_turbo_pstate(void) |
1676 | { | |
1677 | u64 value; | |
1678 | int nont, ret; | |
1679 | ||
100cf6f2 | 1680 | rdmsrl(MSR_TURBO_RATIO_LIMIT, value); |
b34ef932 DC |
1681 | nont = core_get_max_pstate(); |
1682 | ret = (((value) >> 8) & 0xFF); | |
1683 | if (ret <= nont) | |
1684 | ret = nont; | |
1685 | return ret; | |
1686 | } | |
1687 | ||
a6c6ead1 | 1688 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
fdfdb2b1 | 1689 | { |
bc95a454 RW |
1690 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); |
1691 | cpu->pstate.current_pstate = pstate; | |
fdfdb2b1 RW |
1692 | /* |
1693 | * Generally, there is no guarantee that this code will always run on | |
1694 | * the CPU being updated, so force the register update to run on the | |
1695 | * right CPU. | |
1696 | */ | |
1697 | wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, | |
1698 | pstate_funcs.get_val(cpu, pstate)); | |
93f0822d DB |
1699 | } |
1700 | ||
a6c6ead1 RW |
1701 | static void intel_pstate_set_min_pstate(struct cpudata *cpu) |
1702 | { | |
1703 | intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); | |
1704 | } | |
1705 | ||
1706 | static void intel_pstate_max_within_limits(struct cpudata *cpu) | |
1707 | { | |
fa93b51c | 1708 | int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
a6c6ead1 RW |
1709 | |
1710 | update_turbo_state(); | |
b02aabe8 | 1711 | intel_pstate_set_pstate(cpu, pstate); |
a6c6ead1 RW |
1712 | } |
1713 | ||
93f0822d DB |
1714 | static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) |
1715 | { | |
016c8150 | 1716 | cpu->pstate.min_pstate = pstate_funcs.get_min(); |
3bcc6fa9 | 1717 | cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); |
b27580b0 | 1718 | cpu->pstate.scaling = pstate_funcs.get_scaling(); |
ff7c9917 SP |
1719 | |
1720 | if (hwp_active && !hwp_mode_bdw) { | |
de5bcf40 | 1721 | __intel_pstate_get_hwp_cap(cpu); |
ff7c9917 | 1722 | } else { |
6f67e060 | 1723 | cpu->pstate.max_pstate = pstate_funcs.get_max(); |
de5bcf40 | 1724 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
ff7c9917 | 1725 | } |
de5bcf40 | 1726 | |
6f67e060 | 1727 | cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; |
de5bcf40 | 1728 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
93f0822d | 1729 | |
6e34e1f2 SP |
1730 | if (pstate_funcs.get_aperf_mperf_shift) |
1731 | cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); | |
1732 | ||
007bea09 DB |
1733 | if (pstate_funcs.get_vid) |
1734 | pstate_funcs.get_vid(cpu); | |
fdfdb2b1 RW |
1735 | |
1736 | intel_pstate_set_min_pstate(cpu); | |
93f0822d DB |
1737 | } |
1738 | ||
e0efd5be SP |
1739 | /* |
1740 | * Long hold time will keep high perf limits for long time, | |
1741 | * which negatively impacts perf/watt for some workloads, | |
1742 | * like specpower. 3ms is based on experiements on some | |
1743 | * workoads. | |
1744 | */ | |
1745 | static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; | |
1746 | ||
1747 | static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) | |
1748 | { | |
1749 | u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); | |
9dd04ec6 | 1750 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
e0efd5be SP |
1751 | u32 max_limit = (hwp_req & 0xff00) >> 8; |
1752 | u32 min_limit = (hwp_req & 0xff); | |
1753 | u32 boost_level1; | |
1754 | ||
1755 | /* | |
1756 | * Cases to consider (User changes via sysfs or boot time): | |
1757 | * If, P0 (Turbo max) = P1 (Guaranteed max) = min: | |
1758 | * No boost, return. | |
1759 | * If, P0 (Turbo max) > P1 (Guaranteed max) = min: | |
1760 | * Should result in one level boost only for P0. | |
1761 | * If, P0 (Turbo max) = P1 (Guaranteed max) > min: | |
1762 | * Should result in two level boost: | |
1763 | * (min + p1)/2 and P1. | |
1764 | * If, P0 (Turbo max) > P1 (Guaranteed max) > min: | |
1765 | * Should result in three level boost: | |
1766 | * (min + p1)/2, P1 and P0. | |
1767 | */ | |
1768 | ||
1769 | /* If max and min are equal or already at max, nothing to boost */ | |
1770 | if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) | |
1771 | return; | |
1772 | ||
1773 | if (!cpu->hwp_boost_min) | |
1774 | cpu->hwp_boost_min = min_limit; | |
1775 | ||
1776 | /* level at half way mark between min and guranteed */ | |
9dd04ec6 | 1777 | boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; |
e0efd5be SP |
1778 | |
1779 | if (cpu->hwp_boost_min < boost_level1) | |
1780 | cpu->hwp_boost_min = boost_level1; | |
9dd04ec6 RW |
1781 | else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) |
1782 | cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); | |
1783 | else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && | |
1784 | max_limit != HWP_GUARANTEED_PERF(hwp_cap)) | |
e0efd5be SP |
1785 | cpu->hwp_boost_min = max_limit; |
1786 | else | |
1787 | return; | |
1788 | ||
1789 | hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; | |
1790 | wrmsrl(MSR_HWP_REQUEST, hwp_req); | |
1791 | cpu->last_update = cpu->sample.time; | |
1792 | } | |
1793 | ||
1794 | static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) | |
1795 | { | |
1796 | if (cpu->hwp_boost_min) { | |
1797 | bool expired; | |
1798 | ||
1799 | /* Check if we are idle for hold time to boost down */ | |
1800 | expired = time_after64(cpu->sample.time, cpu->last_update + | |
1801 | hwp_boost_hold_time_ns); | |
1802 | if (expired) { | |
1803 | wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); | |
1804 | cpu->hwp_boost_min = 0; | |
1805 | } | |
1806 | } | |
1807 | cpu->last_update = cpu->sample.time; | |
1808 | } | |
1809 | ||
52ccc431 SP |
1810 | static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, |
1811 | u64 time) | |
1812 | { | |
1813 | cpu->sample.time = time; | |
1814 | ||
1815 | if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { | |
1816 | bool do_io = false; | |
1817 | ||
1818 | cpu->sched_flags = 0; | |
1819 | /* | |
1820 | * Set iowait_boost flag and update time. Since IO WAIT flag | |
1821 | * is set all the time, we can't just conclude that there is | |
1822 | * some IO bound activity is scheduled on this CPU with just | |
1823 | * one occurrence. If we receive at least two in two | |
1824 | * consecutive ticks, then we treat as boost candidate. | |
1825 | */ | |
1826 | if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) | |
1827 | do_io = true; | |
1828 | ||
1829 | cpu->last_io_update = time; | |
1830 | ||
1831 | if (do_io) | |
1832 | intel_pstate_hwp_boost_up(cpu); | |
1833 | ||
1834 | } else { | |
1835 | intel_pstate_hwp_boost_down(cpu); | |
1836 | } | |
1837 | } | |
1838 | ||
e0efd5be SP |
1839 | static inline void intel_pstate_update_util_hwp(struct update_util_data *data, |
1840 | u64 time, unsigned int flags) | |
1841 | { | |
52ccc431 SP |
1842 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
1843 | ||
1844 | cpu->sched_flags |= flags; | |
1845 | ||
1846 | if (smp_processor_id() == cpu->cpu) | |
1847 | intel_pstate_update_util_hwp_local(cpu, time); | |
e0efd5be SP |
1848 | } |
1849 | ||
a1c9787d | 1850 | static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) |
93f0822d | 1851 | { |
6b17ddb2 | 1852 | struct sample *sample = &cpu->sample; |
e66c1768 | 1853 | |
a1c9787d | 1854 | sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); |
93f0822d DB |
1855 | } |
1856 | ||
4fec7ad5 | 1857 | static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) |
93f0822d | 1858 | { |
93f0822d | 1859 | u64 aperf, mperf; |
4ab60c3f | 1860 | unsigned long flags; |
4055fad3 | 1861 | u64 tsc; |
93f0822d | 1862 | |
4ab60c3f | 1863 | local_irq_save(flags); |
93f0822d DB |
1864 | rdmsrl(MSR_IA32_APERF, aperf); |
1865 | rdmsrl(MSR_IA32_MPERF, mperf); | |
e70eed2b | 1866 | tsc = rdtsc(); |
4fec7ad5 | 1867 | if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { |
8e601a9f | 1868 | local_irq_restore(flags); |
4fec7ad5 | 1869 | return false; |
8e601a9f | 1870 | } |
4ab60c3f | 1871 | local_irq_restore(flags); |
b69880f9 | 1872 | |
c4ee841f | 1873 | cpu->last_sample_time = cpu->sample.time; |
a4675fbc | 1874 | cpu->sample.time = time; |
d37e2b76 DB |
1875 | cpu->sample.aperf = aperf; |
1876 | cpu->sample.mperf = mperf; | |
4055fad3 | 1877 | cpu->sample.tsc = tsc; |
d37e2b76 DB |
1878 | cpu->sample.aperf -= cpu->prev_aperf; |
1879 | cpu->sample.mperf -= cpu->prev_mperf; | |
4055fad3 | 1880 | cpu->sample.tsc -= cpu->prev_tsc; |
1abc4b20 | 1881 | |
93f0822d DB |
1882 | cpu->prev_aperf = aperf; |
1883 | cpu->prev_mperf = mperf; | |
4055fad3 | 1884 | cpu->prev_tsc = tsc; |
febce40f RW |
1885 | /* |
1886 | * First time this function is invoked in a given cycle, all of the | |
1887 | * previous sample data fields are equal to zero or stale and they must | |
1888 | * be populated with meaningful numbers for things to work, so assume | |
1889 | * that sample.time will always be reset before setting the utilization | |
1890 | * update hook and make the caller skip the sample then. | |
1891 | */ | |
eabd22c6 RW |
1892 | if (cpu->last_sample_time) { |
1893 | intel_pstate_calc_avg_perf(cpu); | |
1894 | return true; | |
1895 | } | |
1896 | return false; | |
93f0822d DB |
1897 | } |
1898 | ||
8fa520af PL |
1899 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
1900 | { | |
c587c79f | 1901 | return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); |
8fa520af PL |
1902 | } |
1903 | ||
bdcaa23f PL |
1904 | static inline int32_t get_avg_pstate(struct cpudata *cpu) |
1905 | { | |
8edb0a6e RW |
1906 | return mul_ext_fp(cpu->pstate.max_pstate_physical, |
1907 | cpu->sample.core_avg_perf); | |
bdcaa23f PL |
1908 | } |
1909 | ||
d77d4888 | 1910 | static inline int32_t get_target_pstate(struct cpudata *cpu) |
e70eed2b PL |
1911 | { |
1912 | struct sample *sample = &cpu->sample; | |
b8bd1581 | 1913 | int32_t busy_frac; |
0843e83c | 1914 | int target, avg_pstate; |
e70eed2b | 1915 | |
6e34e1f2 SP |
1916 | busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, |
1917 | sample->tsc); | |
63d1d656 | 1918 | |
b8bd1581 RW |
1919 | if (busy_frac < cpu->iowait_boost) |
1920 | busy_frac = cpu->iowait_boost; | |
63d1d656 | 1921 | |
09c448d3 | 1922 | sample->busy_scaled = busy_frac * 100; |
0843e83c | 1923 | |
7de32556 | 1924 | target = global.no_turbo || global.turbo_disabled ? |
0843e83c RW |
1925 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
1926 | target += target >> 2; | |
1927 | target = mul_fp(target, busy_frac); | |
1928 | if (target < cpu->pstate.min_pstate) | |
1929 | target = cpu->pstate.min_pstate; | |
1930 | ||
1931 | /* | |
1932 | * If the average P-state during the previous cycle was higher than the | |
1933 | * current target, add 50% of the difference to the target to reduce | |
1934 | * possible performance oscillations and offset possible performance | |
1935 | * loss related to moving the workload from one CPU to another within | |
1936 | * a package/module. | |
1937 | */ | |
1938 | avg_pstate = get_avg_pstate(cpu); | |
1939 | if (avg_pstate > target) | |
1940 | target += (avg_pstate - target) >> 1; | |
1941 | ||
1942 | return target; | |
e70eed2b PL |
1943 | } |
1944 | ||
001c76f0 | 1945 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) |
fdfdb2b1 | 1946 | { |
fa93b51c RW |
1947 | int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
1948 | int max_pstate = max(min_pstate, cpu->max_perf_ratio); | |
fdfdb2b1 | 1949 | |
b02aabe8 | 1950 | return clamp_t(int, pstate, min_pstate, max_pstate); |
001c76f0 RW |
1951 | } |
1952 | ||
1953 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) | |
1954 | { | |
fdfdb2b1 RW |
1955 | if (pstate == cpu->pstate.current_pstate) |
1956 | return; | |
1957 | ||
bc95a454 | 1958 | cpu->pstate.current_pstate = pstate; |
fdfdb2b1 RW |
1959 | wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); |
1960 | } | |
1961 | ||
a891283e | 1962 | static void intel_pstate_adjust_pstate(struct cpudata *cpu) |
93f0822d | 1963 | { |
67dd9bf4 | 1964 | int from = cpu->pstate.current_pstate; |
4055fad3 | 1965 | struct sample *sample; |
a891283e | 1966 | int target_pstate; |
4055fad3 | 1967 | |
001c76f0 RW |
1968 | update_turbo_state(); |
1969 | ||
d77d4888 | 1970 | target_pstate = get_target_pstate(cpu); |
64078299 RW |
1971 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
1972 | trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); | |
fdfdb2b1 | 1973 | intel_pstate_update_pstate(cpu, target_pstate); |
4055fad3 DS |
1974 | |
1975 | sample = &cpu->sample; | |
a1c9787d | 1976 | trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), |
157386b6 | 1977 | fp_toint(sample->busy_scaled), |
4055fad3 DS |
1978 | from, |
1979 | cpu->pstate.current_pstate, | |
1980 | sample->mperf, | |
1981 | sample->aperf, | |
1982 | sample->tsc, | |
3ba7bcaa SP |
1983 | get_avg_frequency(cpu), |
1984 | fp_toint(cpu->iowait_boost * 100)); | |
93f0822d DB |
1985 | } |
1986 | ||
a4675fbc | 1987 | static void intel_pstate_update_util(struct update_util_data *data, u64 time, |
58919e83 | 1988 | unsigned int flags) |
93f0822d | 1989 | { |
a4675fbc | 1990 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
09c448d3 RW |
1991 | u64 delta_ns; |
1992 | ||
674e7541 VK |
1993 | /* Don't allow remote callbacks */ |
1994 | if (smp_processor_id() != cpu->cpu) | |
1995 | return; | |
1996 | ||
b8bd1581 | 1997 | delta_ns = time - cpu->last_update; |
eabd22c6 | 1998 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
b8bd1581 RW |
1999 | /* Start over if the CPU may have been idle. */ |
2000 | if (delta_ns > TICK_NSEC) { | |
2001 | cpu->iowait_boost = ONE_EIGHTH_FP; | |
8e3b4039 | 2002 | } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { |
b8bd1581 RW |
2003 | cpu->iowait_boost <<= 1; |
2004 | if (cpu->iowait_boost > int_tofp(1)) | |
2005 | cpu->iowait_boost = int_tofp(1); | |
2006 | } else { | |
2007 | cpu->iowait_boost = ONE_EIGHTH_FP; | |
2008 | } | |
eabd22c6 RW |
2009 | } else if (cpu->iowait_boost) { |
2010 | /* Clear iowait_boost if the CPU may have been idle. */ | |
eabd22c6 RW |
2011 | if (delta_ns > TICK_NSEC) |
2012 | cpu->iowait_boost = 0; | |
b8bd1581 RW |
2013 | else |
2014 | cpu->iowait_boost >>= 1; | |
09c448d3 | 2015 | } |
eabd22c6 | 2016 | cpu->last_update = time; |
09c448d3 | 2017 | delta_ns = time - cpu->sample.time; |
d77d4888 | 2018 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) |
eabd22c6 | 2019 | return; |
4fec7ad5 | 2020 | |
a891283e RW |
2021 | if (intel_pstate_sample(cpu, time)) |
2022 | intel_pstate_adjust_pstate(cpu); | |
67dd9bf4 | 2023 | } |
eabd22c6 | 2024 | |
2f49afc2 RW |
2025 | static struct pstate_funcs core_funcs = { |
2026 | .get_max = core_get_max_pstate, | |
2027 | .get_max_physical = core_get_max_pstate_physical, | |
2028 | .get_min = core_get_min_pstate, | |
2029 | .get_turbo = core_get_turbo_pstate, | |
2030 | .get_scaling = core_get_scaling, | |
2031 | .get_val = core_get_val, | |
de4a76cb RW |
2032 | }; |
2033 | ||
2f49afc2 RW |
2034 | static const struct pstate_funcs silvermont_funcs = { |
2035 | .get_max = atom_get_max_pstate, | |
2036 | .get_max_physical = atom_get_max_pstate, | |
2037 | .get_min = atom_get_min_pstate, | |
2038 | .get_turbo = atom_get_turbo_pstate, | |
2039 | .get_val = atom_get_val, | |
2040 | .get_scaling = silvermont_get_scaling, | |
2041 | .get_vid = atom_get_vid, | |
de4a76cb RW |
2042 | }; |
2043 | ||
2f49afc2 RW |
2044 | static const struct pstate_funcs airmont_funcs = { |
2045 | .get_max = atom_get_max_pstate, | |
2046 | .get_max_physical = atom_get_max_pstate, | |
2047 | .get_min = atom_get_min_pstate, | |
2048 | .get_turbo = atom_get_turbo_pstate, | |
2049 | .get_val = atom_get_val, | |
2050 | .get_scaling = airmont_get_scaling, | |
2051 | .get_vid = atom_get_vid, | |
de4a76cb RW |
2052 | }; |
2053 | ||
2f49afc2 RW |
2054 | static const struct pstate_funcs knl_funcs = { |
2055 | .get_max = core_get_max_pstate, | |
2056 | .get_max_physical = core_get_max_pstate_physical, | |
2057 | .get_min = core_get_min_pstate, | |
2058 | .get_turbo = knl_get_turbo_pstate, | |
6e34e1f2 | 2059 | .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, |
2f49afc2 RW |
2060 | .get_scaling = core_get_scaling, |
2061 | .get_val = core_get_val, | |
de4a76cb RW |
2062 | }; |
2063 | ||
b11d77fa TG |
2064 | #define X86_MATCH(model, policy) \ |
2065 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ | |
2066 | X86_FEATURE_APERFMPERF, &policy) | |
93f0822d DB |
2067 | |
2068 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |
b11d77fa TG |
2069 | X86_MATCH(SANDYBRIDGE, core_funcs), |
2070 | X86_MATCH(SANDYBRIDGE_X, core_funcs), | |
2071 | X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), | |
2072 | X86_MATCH(IVYBRIDGE, core_funcs), | |
2073 | X86_MATCH(HASWELL, core_funcs), | |
2074 | X86_MATCH(BROADWELL, core_funcs), | |
2075 | X86_MATCH(IVYBRIDGE_X, core_funcs), | |
2076 | X86_MATCH(HASWELL_X, core_funcs), | |
2077 | X86_MATCH(HASWELL_L, core_funcs), | |
2078 | X86_MATCH(HASWELL_G, core_funcs), | |
2079 | X86_MATCH(BROADWELL_G, core_funcs), | |
2080 | X86_MATCH(ATOM_AIRMONT, airmont_funcs), | |
2081 | X86_MATCH(SKYLAKE_L, core_funcs), | |
2082 | X86_MATCH(BROADWELL_X, core_funcs), | |
2083 | X86_MATCH(SKYLAKE, core_funcs), | |
2084 | X86_MATCH(BROADWELL_D, core_funcs), | |
2085 | X86_MATCH(XEON_PHI_KNL, knl_funcs), | |
2086 | X86_MATCH(XEON_PHI_KNM, knl_funcs), | |
2087 | X86_MATCH(ATOM_GOLDMONT, core_funcs), | |
2088 | X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), | |
2089 | X86_MATCH(SKYLAKE_X, core_funcs), | |
93f0822d DB |
2090 | {} |
2091 | }; | |
2092 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | |
2093 | ||
29327c84 | 2094 | static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { |
b11d77fa TG |
2095 | X86_MATCH(BROADWELL_D, core_funcs), |
2096 | X86_MATCH(BROADWELL_X, core_funcs), | |
2097 | X86_MATCH(SKYLAKE_X, core_funcs), | |
2f86dc4c DB |
2098 | {} |
2099 | }; | |
2100 | ||
6e978b22 | 2101 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { |
b11d77fa | 2102 | X86_MATCH(KABYLAKE, core_funcs), |
6e978b22 SP |
2103 | {} |
2104 | }; | |
2105 | ||
41ab43c9 | 2106 | static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { |
b11d77fa TG |
2107 | X86_MATCH(SKYLAKE_X, core_funcs), |
2108 | X86_MATCH(SKYLAKE, core_funcs), | |
41ab43c9 SP |
2109 | {} |
2110 | }; | |
2111 | ||
93f0822d DB |
2112 | static int intel_pstate_init_cpu(unsigned int cpunum) |
2113 | { | |
93f0822d DB |
2114 | struct cpudata *cpu; |
2115 | ||
eae48f04 SP |
2116 | cpu = all_cpu_data[cpunum]; |
2117 | ||
2118 | if (!cpu) { | |
c5a2ee7d | 2119 | cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); |
eae48f04 SP |
2120 | if (!cpu) |
2121 | return -ENOMEM; | |
2122 | ||
2123 | all_cpu_data[cpunum] = cpu; | |
eae48f04 | 2124 | |
55671ea3 | 2125 | cpu->cpu = cpunum; |
93f0822d | 2126 | |
55671ea3 | 2127 | cpu->epp_default = -EINVAL; |
ba88d433 | 2128 | |
55671ea3 RW |
2129 | if (hwp_active) { |
2130 | const struct x86_cpu_id *id; | |
6e978b22 | 2131 | |
55671ea3 | 2132 | intel_pstate_hwp_enable(cpu); |
41ab43c9 | 2133 | |
55671ea3 RW |
2134 | id = x86_match_cpu(intel_pstate_hwp_boost_ids); |
2135 | if (id && intel_pstate_acpi_pm_profile_server()) | |
2136 | hwp_boost = true; | |
2137 | } | |
2138 | } else if (hwp_active) { | |
2139 | /* | |
2140 | * Re-enable HWP in case this happens after a resume from ACPI | |
2141 | * S3 if the CPU was offline during the whole system/resume | |
2142 | * cycle. | |
2143 | */ | |
2144 | intel_pstate_hwp_reenable(cpu); | |
a4675fbc | 2145 | } |
ba88d433 | 2146 | |
55671ea3 RW |
2147 | cpu->epp_powersave = -EINVAL; |
2148 | cpu->epp_policy = 0; | |
2149 | ||
179e8471 | 2150 | intel_pstate_get_cpu_pstates(cpu); |
016c8150 | 2151 | |
4836df17 | 2152 | pr_debug("controlling: cpu %d\n", cpunum); |
93f0822d DB |
2153 | |
2154 | return 0; | |
2155 | } | |
2156 | ||
febce40f | 2157 | static void intel_pstate_set_update_util_hook(unsigned int cpu_num) |
bb6ab52f | 2158 | { |
febce40f RW |
2159 | struct cpudata *cpu = all_cpu_data[cpu_num]; |
2160 | ||
e0efd5be | 2161 | if (hwp_active && !hwp_boost) |
62611cb9 LB |
2162 | return; |
2163 | ||
5ab666e0 RW |
2164 | if (cpu->update_util_set) |
2165 | return; | |
2166 | ||
febce40f RW |
2167 | /* Prevent intel_pstate_update_util() from using stale data. */ |
2168 | cpu->sample.time = 0; | |
67dd9bf4 | 2169 | cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, |
e0efd5be SP |
2170 | (hwp_active ? |
2171 | intel_pstate_update_util_hwp : | |
2172 | intel_pstate_update_util)); | |
4578ee7e | 2173 | cpu->update_util_set = true; |
bb6ab52f RW |
2174 | } |
2175 | ||
2176 | static void intel_pstate_clear_update_util_hook(unsigned int cpu) | |
2177 | { | |
4578ee7e CY |
2178 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
2179 | ||
2180 | if (!cpu_data->update_util_set) | |
2181 | return; | |
2182 | ||
0bed612b | 2183 | cpufreq_remove_update_util_hook(cpu); |
4578ee7e | 2184 | cpu_data->update_util_set = false; |
09659af3 | 2185 | synchronize_rcu(); |
bb6ab52f RW |
2186 | } |
2187 | ||
80b120ca RW |
2188 | static int intel_pstate_get_max_freq(struct cpudata *cpu) |
2189 | { | |
2190 | return global.turbo_disabled || global.no_turbo ? | |
2191 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | |
2192 | } | |
2193 | ||
1e4f63ae RW |
2194 | static void intel_pstate_update_perf_limits(struct cpudata *cpu, |
2195 | unsigned int policy_min, | |
2196 | unsigned int policy_max) | |
eae48f04 | 2197 | { |
e4c204ce | 2198 | int32_t max_policy_perf, min_policy_perf; |
1a4fe38a | 2199 | int max_state, turbo_max; |
e40ad84c | 2200 | int max_freq; |
a410c03d | 2201 | |
1a4fe38a | 2202 | /* |
de5bcf40 RW |
2203 | * HWP needs some special consideration, because HWP_REQUEST uses |
2204 | * abstract values to represent performance rather than pure ratios. | |
1a4fe38a | 2205 | */ |
de5bcf40 RW |
2206 | if (hwp_active) |
2207 | intel_pstate_get_hwp_cap(cpu); | |
2208 | ||
2209 | if (global.no_turbo || global.turbo_disabled) { | |
2210 | max_state = cpu->pstate.max_pstate; | |
2211 | max_freq = cpu->pstate.max_freq; | |
1a4fe38a | 2212 | } else { |
de5bcf40 RW |
2213 | max_state = cpu->pstate.turbo_pstate; |
2214 | max_freq = cpu->pstate.turbo_freq; | |
1a4fe38a | 2215 | } |
de5bcf40 RW |
2216 | |
2217 | turbo_max = cpu->pstate.turbo_pstate; | |
1a4fe38a | 2218 | |
1e4f63ae RW |
2219 | max_policy_perf = max_state * policy_max / max_freq; |
2220 | if (policy_max == policy_min) { | |
e4c204ce | 2221 | min_policy_perf = max_policy_perf; |
5879f877 | 2222 | } else { |
1e4f63ae | 2223 | min_policy_perf = max_state * policy_min / max_freq; |
e4c204ce RW |
2224 | min_policy_perf = clamp_t(int32_t, min_policy_perf, |
2225 | 0, max_policy_perf); | |
5879f877 | 2226 | } |
eae48f04 | 2227 | |
1a4fe38a | 2228 | pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", |
1e4f63ae | 2229 | cpu->cpu, max_state, min_policy_perf, max_policy_perf); |
1a4fe38a | 2230 | |
e4c204ce | 2231 | /* Normalize user input to [min_perf, max_perf] */ |
c5a2ee7d | 2232 | if (per_cpu_limits) { |
1a4fe38a SP |
2233 | cpu->min_perf_ratio = min_policy_perf; |
2234 | cpu->max_perf_ratio = max_policy_perf; | |
c5a2ee7d RW |
2235 | } else { |
2236 | int32_t global_min, global_max; | |
2237 | ||
2238 | /* Global limits are in percent of the maximum turbo P-state. */ | |
1a4fe38a SP |
2239 | global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); |
2240 | global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); | |
c5a2ee7d | 2241 | global_min = clamp_t(int32_t, global_min, 0, global_max); |
eae48f04 | 2242 | |
1e4f63ae | 2243 | pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, |
1a4fe38a | 2244 | global_min, global_max); |
c5a2ee7d | 2245 | |
1a4fe38a SP |
2246 | cpu->min_perf_ratio = max(min_policy_perf, global_min); |
2247 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); | |
2248 | cpu->max_perf_ratio = min(max_policy_perf, global_max); | |
2249 | cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); | |
eae48f04 | 2250 | |
1a4fe38a SP |
2251 | /* Make sure min_perf <= max_perf */ |
2252 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, | |
2253 | cpu->max_perf_ratio); | |
eae48f04 | 2254 | |
1a4fe38a | 2255 | } |
1e4f63ae | 2256 | pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, |
1a4fe38a SP |
2257 | cpu->max_perf_ratio, |
2258 | cpu->min_perf_ratio); | |
eae48f04 SP |
2259 | } |
2260 | ||
93f0822d DB |
2261 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
2262 | { | |
3be9200d SP |
2263 | struct cpudata *cpu; |
2264 | ||
d3929b83 DB |
2265 | if (!policy->cpuinfo.max_freq) |
2266 | return -ENODEV; | |
2267 | ||
2c2c1af4 SP |
2268 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", |
2269 | policy->cpuinfo.max_freq, policy->max); | |
2270 | ||
a6c6ead1 | 2271 | cpu = all_cpu_data[policy->cpu]; |
2f1d407a RW |
2272 | cpu->policy = policy->policy; |
2273 | ||
b59fe540 SP |
2274 | mutex_lock(&intel_pstate_limits_lock); |
2275 | ||
1e4f63ae | 2276 | intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
a240c4aa | 2277 | |
2f1d407a | 2278 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
a6c6ead1 RW |
2279 | /* |
2280 | * NOHZ_FULL CPUs need this as the governor callback may not | |
2281 | * be invoked on them. | |
2282 | */ | |
2283 | intel_pstate_clear_update_util_hook(policy->cpu); | |
2284 | intel_pstate_max_within_limits(cpu); | |
82b4e03e LB |
2285 | } else { |
2286 | intel_pstate_set_update_util_hook(policy->cpu); | |
a6c6ead1 RW |
2287 | } |
2288 | ||
e0efd5be SP |
2289 | if (hwp_active) { |
2290 | /* | |
2291 | * When hwp_boost was active before and dynamically it | |
2292 | * was turned off, in that case we need to clear the | |
2293 | * update util hook. | |
2294 | */ | |
2295 | if (!hwp_boost) | |
2296 | intel_pstate_clear_update_util_hook(policy->cpu); | |
2bfc4cbb | 2297 | intel_pstate_hwp_set(policy->cpu); |
e0efd5be | 2298 | } |
2f86dc4c | 2299 | |
b59fe540 SP |
2300 | mutex_unlock(&intel_pstate_limits_lock); |
2301 | ||
93f0822d DB |
2302 | return 0; |
2303 | } | |
2304 | ||
1e4f63ae RW |
2305 | static void intel_pstate_adjust_policy_max(struct cpudata *cpu, |
2306 | struct cpufreq_policy_data *policy) | |
80b120ca | 2307 | { |
d3264f75 SP |
2308 | if (!hwp_active && |
2309 | cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | |
80b120ca RW |
2310 | policy->max < policy->cpuinfo.max_freq && |
2311 | policy->max > cpu->pstate.max_freq) { | |
2312 | pr_debug("policy->max > max non turbo frequency\n"); | |
2313 | policy->max = policy->cpuinfo.max_freq; | |
2314 | } | |
2315 | } | |
2316 | ||
d5a2a6bb RW |
2317 | static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, |
2318 | struct cpufreq_policy_data *policy) | |
93f0822d | 2319 | { |
e40ad84c RW |
2320 | int max_freq; |
2321 | ||
7d9a8a9f | 2322 | update_turbo_state(); |
e40ad84c | 2323 | if (hwp_active) { |
de5bcf40 RW |
2324 | intel_pstate_get_hwp_cap(cpu); |
2325 | max_freq = global.no_turbo || global.turbo_disabled ? | |
2326 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | |
e40ad84c RW |
2327 | } else { |
2328 | max_freq = intel_pstate_get_max_freq(cpu); | |
2329 | } | |
2330 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); | |
93f0822d | 2331 | |
1e4f63ae | 2332 | intel_pstate_adjust_policy_max(cpu, policy); |
d5a2a6bb RW |
2333 | } |
2334 | ||
2335 | static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) | |
2336 | { | |
2337 | intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); | |
80b120ca | 2338 | |
93f0822d DB |
2339 | return 0; |
2340 | } | |
2341 | ||
4adcf2e5 | 2342 | static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) |
001c76f0 | 2343 | { |
4adcf2e5 RW |
2344 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2345 | ||
2346 | pr_debug("CPU %d going offline\n", cpu->cpu); | |
2347 | ||
2348 | if (cpu->suspended) | |
2349 | return 0; | |
2350 | ||
2351 | /* | |
2352 | * If the CPU is an SMT thread and it goes offline with the performance | |
2353 | * settings different from the minimum, it will prevent its sibling | |
2354 | * from getting to lower performance levels, so force the minimum | |
2355 | * performance on CPU offline to prevent that from happening. | |
2356 | */ | |
f6ebbcf0 | 2357 | if (hwp_active) |
4adcf2e5 | 2358 | intel_pstate_hwp_offline(cpu); |
f6ebbcf0 | 2359 | else |
4adcf2e5 RW |
2360 | intel_pstate_set_min_pstate(cpu); |
2361 | ||
2362 | intel_pstate_exit_perf_limits(policy); | |
2363 | ||
2364 | return 0; | |
2365 | } | |
2366 | ||
2367 | static int intel_pstate_cpu_online(struct cpufreq_policy *policy) | |
2368 | { | |
2369 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2370 | ||
2371 | pr_debug("CPU %d going online\n", cpu->cpu); | |
2372 | ||
2373 | intel_pstate_init_acpi_perf_limits(policy); | |
2374 | ||
2375 | if (hwp_active) { | |
2376 | /* | |
2377 | * Re-enable HWP and clear the "suspended" flag to let "resume" | |
2378 | * know that it need not do that. | |
2379 | */ | |
2380 | intel_pstate_hwp_reenable(cpu); | |
2381 | cpu->suspended = false; | |
2382 | } | |
2383 | ||
2384 | return 0; | |
001c76f0 RW |
2385 | } |
2386 | ||
bb18008f | 2387 | static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) |
93f0822d | 2388 | { |
4adcf2e5 | 2389 | pr_debug("CPU %d stopping\n", policy->cpu); |
93f0822d | 2390 | |
001c76f0 | 2391 | intel_pstate_clear_update_util_hook(policy->cpu); |
001c76f0 | 2392 | } |
bb18008f | 2393 | |
001c76f0 RW |
2394 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) |
2395 | { | |
4adcf2e5 | 2396 | pr_debug("CPU %d exiting\n", policy->cpu); |
a4675fbc | 2397 | |
001c76f0 | 2398 | policy->fast_switch_possible = false; |
2f86dc4c | 2399 | |
001c76f0 | 2400 | return 0; |
93f0822d DB |
2401 | } |
2402 | ||
001c76f0 | 2403 | static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) |
93f0822d | 2404 | { |
93f0822d | 2405 | struct cpudata *cpu; |
52e0a509 | 2406 | int rc; |
93f0822d DB |
2407 | |
2408 | rc = intel_pstate_init_cpu(policy->cpu); | |
2409 | if (rc) | |
2410 | return rc; | |
2411 | ||
2412 | cpu = all_cpu_data[policy->cpu]; | |
2413 | ||
1a4fe38a SP |
2414 | cpu->max_perf_ratio = 0xFF; |
2415 | cpu->min_perf_ratio = 0; | |
93f0822d | 2416 | |
93f0822d | 2417 | /* cpuinfo and default policy values */ |
b27580b0 | 2418 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
983e600e | 2419 | update_turbo_state(); |
9083e498 | 2420 | global.turbo_disabled_mf = global.turbo_disabled; |
7de32556 | 2421 | policy->cpuinfo.max_freq = global.turbo_disabled ? |
eea033d0 | 2422 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
de5bcf40 RW |
2423 | |
2424 | policy->min = policy->cpuinfo.min_freq; | |
2425 | policy->max = policy->cpuinfo.max_freq; | |
eea033d0 | 2426 | |
9522a2ff | 2427 | intel_pstate_init_acpi_perf_limits(policy); |
93f0822d | 2428 | |
001c76f0 RW |
2429 | policy->fast_switch_possible = true; |
2430 | ||
93f0822d DB |
2431 | return 0; |
2432 | } | |
2433 | ||
001c76f0 | 2434 | static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
9522a2ff | 2435 | { |
001c76f0 RW |
2436 | int ret = __intel_pstate_cpu_init(policy); |
2437 | ||
2438 | if (ret) | |
2439 | return ret; | |
2440 | ||
5ac54113 RW |
2441 | /* |
2442 | * Set the policy to powersave to provide a valid fallback value in case | |
2443 | * the default cpufreq governor is neither powersave nor performance. | |
2444 | */ | |
2445 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | |
9522a2ff | 2446 | |
c27a0ccc RW |
2447 | if (hwp_active) { |
2448 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2449 | ||
2450 | cpu->epp_cached = intel_pstate_get_epp(cpu, 0); | |
2451 | } | |
2452 | ||
9522a2ff SP |
2453 | return 0; |
2454 | } | |
2455 | ||
001c76f0 | 2456 | static struct cpufreq_driver intel_pstate = { |
93f0822d DB |
2457 | .flags = CPUFREQ_CONST_LOOPS, |
2458 | .verify = intel_pstate_verify_policy, | |
2459 | .setpolicy = intel_pstate_set_policy, | |
4adcf2e5 | 2460 | .suspend = intel_pstate_suspend, |
8442885f | 2461 | .resume = intel_pstate_resume, |
93f0822d | 2462 | .init = intel_pstate_cpu_init, |
9522a2ff | 2463 | .exit = intel_pstate_cpu_exit, |
bb18008f | 2464 | .stop_cpu = intel_pstate_stop_cpu, |
4adcf2e5 RW |
2465 | .offline = intel_pstate_cpu_offline, |
2466 | .online = intel_pstate_cpu_online, | |
5a25e3f7 | 2467 | .update_limits = intel_pstate_update_limits, |
93f0822d | 2468 | .name = "intel_pstate", |
93f0822d DB |
2469 | }; |
2470 | ||
1e4f63ae | 2471 | static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) |
001c76f0 RW |
2472 | { |
2473 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
001c76f0 | 2474 | |
d5a2a6bb | 2475 | intel_pstate_verify_cpu_policy(cpu, policy); |
1e4f63ae | 2476 | intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
c5a2ee7d | 2477 | |
001c76f0 RW |
2478 | return 0; |
2479 | } | |
2480 | ||
50e9ffab DS |
2481 | /* Use of trace in passive mode: |
2482 | * | |
2483 | * In passive mode the trace core_busy field (also known as the | |
2484 | * performance field, and lablelled as such on the graphs; also known as | |
2485 | * core_avg_perf) is not needed and so is re-assigned to indicate if the | |
2486 | * driver call was via the normal or fast switch path. Various graphs | |
2487 | * output from the intel_pstate_tracer.py utility that include core_busy | |
2488 | * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, | |
75a8d877 | 2489 | * so we use 10 to indicate the normal path through the driver, and |
50e9ffab DS |
2490 | * 90 to indicate the fast switch path through the driver. |
2491 | * The scaled_busy field is not used, and is set to 0. | |
2492 | */ | |
2493 | ||
2494 | #define INTEL_PSTATE_TRACE_TARGET 10 | |
2495 | #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 | |
2496 | ||
2497 | static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) | |
2498 | { | |
2499 | struct sample *sample; | |
2500 | ||
2501 | if (!trace_pstate_sample_enabled()) | |
2502 | return; | |
2503 | ||
2504 | if (!intel_pstate_sample(cpu, ktime_get())) | |
2505 | return; | |
2506 | ||
2507 | sample = &cpu->sample; | |
2508 | trace_pstate_sample(trace_type, | |
2509 | 0, | |
2510 | old_pstate, | |
2511 | cpu->pstate.current_pstate, | |
2512 | sample->mperf, | |
2513 | sample->aperf, | |
2514 | sample->tsc, | |
2515 | get_avg_frequency(cpu), | |
2516 | fp_toint(cpu->iowait_boost * 100)); | |
2517 | } | |
2518 | ||
597ffbc8 | 2519 | static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, |
a365ab6b | 2520 | u32 desired, bool fast_switch) |
f6ebbcf0 RW |
2521 | { |
2522 | u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; | |
2523 | ||
2524 | value &= ~HWP_MIN_PERF(~0L); | |
a365ab6b | 2525 | value |= HWP_MIN_PERF(min); |
f6ebbcf0 | 2526 | |
f6ebbcf0 | 2527 | value &= ~HWP_MAX_PERF(~0L); |
a365ab6b RW |
2528 | value |= HWP_MAX_PERF(max); |
2529 | ||
2530 | value &= ~HWP_DESIRED_PERF(~0L); | |
2531 | value |= HWP_DESIRED_PERF(desired); | |
f6ebbcf0 RW |
2532 | |
2533 | if (value == prev) | |
2534 | return; | |
2535 | ||
2536 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
2537 | if (fast_switch) | |
2538 | wrmsrl(MSR_HWP_REQUEST, value); | |
2539 | else | |
2540 | wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); | |
2541 | } | |
2542 | ||
597ffbc8 | 2543 | static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, |
f6ebbcf0 RW |
2544 | u32 target_pstate, bool fast_switch) |
2545 | { | |
2546 | if (fast_switch) | |
2547 | wrmsrl(MSR_IA32_PERF_CTL, | |
2548 | pstate_funcs.get_val(cpu, target_pstate)); | |
2549 | else | |
2550 | wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, | |
2551 | pstate_funcs.get_val(cpu, target_pstate)); | |
2552 | } | |
2553 | ||
fcb3a1ab RW |
2554 | static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, |
2555 | int target_pstate, bool fast_switch) | |
f6ebbcf0 | 2556 | { |
fcb3a1ab | 2557 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
f6ebbcf0 RW |
2558 | int old_pstate = cpu->pstate.current_pstate; |
2559 | ||
2560 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | |
a365ab6b RW |
2561 | if (hwp_active) { |
2562 | int max_pstate = policy->strict_target ? | |
2563 | target_pstate : cpu->max_perf_ratio; | |
2564 | ||
597ffbc8 | 2565 | intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, |
a365ab6b RW |
2566 | fast_switch); |
2567 | } else if (target_pstate != old_pstate) { | |
597ffbc8 | 2568 | intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); |
a365ab6b | 2569 | } |
2554c32f RW |
2570 | |
2571 | cpu->pstate.current_pstate = target_pstate; | |
f6ebbcf0 RW |
2572 | |
2573 | intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : | |
2574 | INTEL_PSTATE_TRACE_TARGET, old_pstate); | |
2575 | ||
2576 | return target_pstate; | |
2577 | } | |
2578 | ||
001c76f0 RW |
2579 | static int intel_cpufreq_target(struct cpufreq_policy *policy, |
2580 | unsigned int target_freq, | |
2581 | unsigned int relation) | |
2582 | { | |
2583 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
2584 | struct cpufreq_freqs freqs; | |
f6ebbcf0 | 2585 | int target_pstate; |
001c76f0 | 2586 | |
64897b20 RW |
2587 | update_turbo_state(); |
2588 | ||
001c76f0 | 2589 | freqs.old = policy->cur; |
64897b20 | 2590 | freqs.new = target_freq; |
001c76f0 RW |
2591 | |
2592 | cpufreq_freq_transition_begin(policy, &freqs); | |
f6ebbcf0 | 2593 | |
001c76f0 RW |
2594 | switch (relation) { |
2595 | case CPUFREQ_RELATION_L: | |
2596 | target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); | |
2597 | break; | |
2598 | case CPUFREQ_RELATION_H: | |
2599 | target_pstate = freqs.new / cpu->pstate.scaling; | |
2600 | break; | |
2601 | default: | |
2602 | target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); | |
2603 | break; | |
2604 | } | |
f6ebbcf0 | 2605 | |
fcb3a1ab | 2606 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); |
f6ebbcf0 | 2607 | |
64078299 | 2608 | freqs.new = target_pstate * cpu->pstate.scaling; |
f6ebbcf0 | 2609 | |
001c76f0 RW |
2610 | cpufreq_freq_transition_end(policy, &freqs, false); |
2611 | ||
2612 | return 0; | |
2613 | } | |
2614 | ||
2615 | static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |
2616 | unsigned int target_freq) | |
2617 | { | |
2618 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
f6ebbcf0 | 2619 | int target_pstate; |
001c76f0 | 2620 | |
64897b20 RW |
2621 | update_turbo_state(); |
2622 | ||
001c76f0 | 2623 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
f6ebbcf0 | 2624 | |
fcb3a1ab | 2625 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); |
f6ebbcf0 | 2626 | |
64078299 | 2627 | return target_pstate * cpu->pstate.scaling; |
001c76f0 RW |
2628 | } |
2629 | ||
a365ab6b RW |
2630 | static void intel_cpufreq_adjust_perf(unsigned int cpunum, |
2631 | unsigned long min_perf, | |
2632 | unsigned long target_perf, | |
2633 | unsigned long capacity) | |
2634 | { | |
2635 | struct cpudata *cpu = all_cpu_data[cpunum]; | |
17ffd358 | 2636 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
a365ab6b RW |
2637 | int old_pstate = cpu->pstate.current_pstate; |
2638 | int cap_pstate, min_pstate, max_pstate, target_pstate; | |
2639 | ||
2640 | update_turbo_state(); | |
17ffd358 RW |
2641 | cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : |
2642 | HWP_HIGHEST_PERF(hwp_cap); | |
a365ab6b RW |
2643 | |
2644 | /* Optimization: Avoid unnecessary divisions. */ | |
2645 | ||
2646 | target_pstate = cap_pstate; | |
2647 | if (target_perf < capacity) | |
2648 | target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); | |
2649 | ||
2650 | min_pstate = cap_pstate; | |
2651 | if (min_perf < capacity) | |
2652 | min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); | |
2653 | ||
2654 | if (min_pstate < cpu->pstate.min_pstate) | |
2655 | min_pstate = cpu->pstate.min_pstate; | |
2656 | ||
2657 | if (min_pstate < cpu->min_perf_ratio) | |
2658 | min_pstate = cpu->min_perf_ratio; | |
2659 | ||
2660 | max_pstate = min(cap_pstate, cpu->max_perf_ratio); | |
2661 | if (max_pstate < min_pstate) | |
2662 | max_pstate = min_pstate; | |
2663 | ||
2664 | target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); | |
2665 | ||
597ffbc8 | 2666 | intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); |
a365ab6b RW |
2667 | |
2668 | cpu->pstate.current_pstate = target_pstate; | |
2669 | intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); | |
2670 | } | |
2671 | ||
001c76f0 RW |
2672 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
2673 | { | |
3000ce3c | 2674 | struct freq_qos_request *req; |
da5c504c VK |
2675 | struct cpudata *cpu; |
2676 | struct device *dev; | |
de5bcf40 | 2677 | int ret, freq; |
da5c504c VK |
2678 | |
2679 | dev = get_cpu_device(policy->cpu); | |
2680 | if (!dev) | |
2681 | return -ENODEV; | |
001c76f0 | 2682 | |
da5c504c | 2683 | ret = __intel_pstate_cpu_init(policy); |
001c76f0 RW |
2684 | if (ret) |
2685 | return ret; | |
2686 | ||
2687 | policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; | |
2688 | /* This reflects the intel_pstate_get_cpu_pstates() setting. */ | |
2689 | policy->cur = policy->cpuinfo.min_freq; | |
2690 | ||
da5c504c VK |
2691 | req = kcalloc(2, sizeof(*req), GFP_KERNEL); |
2692 | if (!req) { | |
2693 | ret = -ENOMEM; | |
2694 | goto pstate_exit; | |
2695 | } | |
2696 | ||
2697 | cpu = all_cpu_data[policy->cpu]; | |
2698 | ||
f6ebbcf0 RW |
2699 | if (hwp_active) { |
2700 | u64 value; | |
2701 | ||
f6ebbcf0 | 2702 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; |
de5bcf40 RW |
2703 | |
2704 | intel_pstate_get_hwp_cap(cpu); | |
2705 | ||
f6ebbcf0 RW |
2706 | rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); |
2707 | WRITE_ONCE(cpu->hwp_req_cached, value); | |
de5bcf40 | 2708 | |
c27a0ccc | 2709 | cpu->epp_cached = intel_pstate_get_epp(cpu, value); |
f6ebbcf0 | 2710 | } else { |
f6ebbcf0 RW |
2711 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; |
2712 | } | |
da5c504c | 2713 | |
de5bcf40 | 2714 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); |
da5c504c | 2715 | |
3000ce3c | 2716 | ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, |
de5bcf40 | 2717 | freq); |
da5c504c VK |
2718 | if (ret < 0) { |
2719 | dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); | |
2720 | goto free_req; | |
2721 | } | |
2722 | ||
de5bcf40 RW |
2723 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); |
2724 | ||
3000ce3c | 2725 | ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, |
de5bcf40 | 2726 | freq); |
da5c504c VK |
2727 | if (ret < 0) { |
2728 | dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); | |
2729 | goto remove_min_req; | |
2730 | } | |
2731 | ||
2732 | policy->driver_data = req; | |
2733 | ||
001c76f0 | 2734 | return 0; |
da5c504c VK |
2735 | |
2736 | remove_min_req: | |
3000ce3c | 2737 | freq_qos_remove_request(req); |
da5c504c VK |
2738 | free_req: |
2739 | kfree(req); | |
2740 | pstate_exit: | |
2741 | intel_pstate_exit_perf_limits(policy); | |
2742 | ||
2743 | return ret; | |
2744 | } | |
2745 | ||
2746 | static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
2747 | { | |
3000ce3c | 2748 | struct freq_qos_request *req; |
da5c504c VK |
2749 | |
2750 | req = policy->driver_data; | |
2751 | ||
3000ce3c RW |
2752 | freq_qos_remove_request(req + 1); |
2753 | freq_qos_remove_request(req); | |
da5c504c VK |
2754 | kfree(req); |
2755 | ||
2756 | return intel_pstate_cpu_exit(policy); | |
001c76f0 RW |
2757 | } |
2758 | ||
2759 | static struct cpufreq_driver intel_cpufreq = { | |
2760 | .flags = CPUFREQ_CONST_LOOPS, | |
2761 | .verify = intel_cpufreq_verify_policy, | |
2762 | .target = intel_cpufreq_target, | |
2763 | .fast_switch = intel_cpufreq_fast_switch, | |
2764 | .init = intel_cpufreq_cpu_init, | |
da5c504c | 2765 | .exit = intel_cpufreq_cpu_exit, |
4adcf2e5 RW |
2766 | .offline = intel_pstate_cpu_offline, |
2767 | .online = intel_pstate_cpu_online, | |
2768 | .suspend = intel_pstate_suspend, | |
2769 | .resume = intel_pstate_resume, | |
5a25e3f7 | 2770 | .update_limits = intel_pstate_update_limits, |
001c76f0 RW |
2771 | .name = "intel_cpufreq", |
2772 | }; | |
2773 | ||
39a188b8 | 2774 | static struct cpufreq_driver *default_driver; |
001c76f0 | 2775 | |
fb1fe104 RW |
2776 | static void intel_pstate_driver_cleanup(void) |
2777 | { | |
2778 | unsigned int cpu; | |
2779 | ||
2780 | get_online_cpus(); | |
2781 | for_each_online_cpu(cpu) { | |
2782 | if (all_cpu_data[cpu]) { | |
2783 | if (intel_pstate_driver == &intel_pstate) | |
2784 | intel_pstate_clear_update_util_hook(cpu); | |
2785 | ||
2786 | kfree(all_cpu_data[cpu]); | |
2787 | all_cpu_data[cpu] = NULL; | |
2788 | } | |
2789 | } | |
2790 | put_online_cpus(); | |
f6ebbcf0 | 2791 | |
ee8df89a | 2792 | intel_pstate_driver = NULL; |
fb1fe104 RW |
2793 | } |
2794 | ||
ee8df89a | 2795 | static int intel_pstate_register_driver(struct cpufreq_driver *driver) |
fb1fe104 RW |
2796 | { |
2797 | int ret; | |
2798 | ||
f6ebbcf0 RW |
2799 | if (driver == &intel_pstate) |
2800 | intel_pstate_sysfs_expose_hwp_dynamic_boost(); | |
2801 | ||
c5a2ee7d RW |
2802 | memset(&global, 0, sizeof(global)); |
2803 | global.max_perf_pct = 100; | |
c3a49c89 | 2804 | |
ee8df89a | 2805 | intel_pstate_driver = driver; |
fb1fe104 RW |
2806 | ret = cpufreq_register_driver(intel_pstate_driver); |
2807 | if (ret) { | |
2808 | intel_pstate_driver_cleanup(); | |
2809 | return ret; | |
2810 | } | |
2811 | ||
c5a2ee7d RW |
2812 | global.min_perf_pct = min_perf_pct_min(); |
2813 | ||
fb1fe104 RW |
2814 | return 0; |
2815 | } | |
2816 | ||
fb1fe104 RW |
2817 | static ssize_t intel_pstate_show_status(char *buf) |
2818 | { | |
ee8df89a | 2819 | if (!intel_pstate_driver) |
fb1fe104 RW |
2820 | return sprintf(buf, "off\n"); |
2821 | ||
2822 | return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? | |
2823 | "active" : "passive"); | |
2824 | } | |
2825 | ||
2826 | static int intel_pstate_update_status(const char *buf, size_t size) | |
2827 | { | |
43298db3 RW |
2828 | if (size == 3 && !strncmp(buf, "off", size)) { |
2829 | if (!intel_pstate_driver) | |
2830 | return -EINVAL; | |
2831 | ||
2832 | if (hwp_active) | |
2833 | return -EBUSY; | |
2834 | ||
55671ea3 RW |
2835 | cpufreq_unregister_driver(intel_pstate_driver); |
2836 | intel_pstate_driver_cleanup(); | |
fc7d1755 | 2837 | return 0; |
43298db3 | 2838 | } |
fb1fe104 RW |
2839 | |
2840 | if (size == 6 && !strncmp(buf, "active", size)) { | |
ee8df89a | 2841 | if (intel_pstate_driver) { |
fb1fe104 RW |
2842 | if (intel_pstate_driver == &intel_pstate) |
2843 | return 0; | |
2844 | ||
55671ea3 | 2845 | cpufreq_unregister_driver(intel_pstate_driver); |
fb1fe104 RW |
2846 | } |
2847 | ||
ee8df89a | 2848 | return intel_pstate_register_driver(&intel_pstate); |
fb1fe104 RW |
2849 | } |
2850 | ||
2851 | if (size == 7 && !strncmp(buf, "passive", size)) { | |
ee8df89a | 2852 | if (intel_pstate_driver) { |
0042b2c0 | 2853 | if (intel_pstate_driver == &intel_cpufreq) |
fb1fe104 RW |
2854 | return 0; |
2855 | ||
55671ea3 RW |
2856 | cpufreq_unregister_driver(intel_pstate_driver); |
2857 | intel_pstate_sysfs_hide_hwp_dynamic_boost(); | |
fb1fe104 RW |
2858 | } |
2859 | ||
ee8df89a | 2860 | return intel_pstate_register_driver(&intel_cpufreq); |
fb1fe104 RW |
2861 | } |
2862 | ||
2863 | return -EINVAL; | |
2864 | } | |
2865 | ||
eed43609 JZ |
2866 | static int no_load __initdata; |
2867 | static int no_hwp __initdata; | |
2868 | static int hwp_only __initdata; | |
29327c84 | 2869 | static unsigned int force_load __initdata; |
6be26498 | 2870 | |
29327c84 | 2871 | static int __init intel_pstate_msrs_not_valid(void) |
b563b4e3 | 2872 | { |
016c8150 | 2873 | if (!pstate_funcs.get_max() || |
c410833a SK |
2874 | !pstate_funcs.get_min() || |
2875 | !pstate_funcs.get_turbo()) | |
b563b4e3 DB |
2876 | return -ENODEV; |
2877 | ||
b563b4e3 DB |
2878 | return 0; |
2879 | } | |
016c8150 | 2880 | |
29327c84 | 2881 | static void __init copy_cpu_funcs(struct pstate_funcs *funcs) |
016c8150 DB |
2882 | { |
2883 | pstate_funcs.get_max = funcs->get_max; | |
3bcc6fa9 | 2884 | pstate_funcs.get_max_physical = funcs->get_max_physical; |
016c8150 DB |
2885 | pstate_funcs.get_min = funcs->get_min; |
2886 | pstate_funcs.get_turbo = funcs->get_turbo; | |
b27580b0 | 2887 | pstate_funcs.get_scaling = funcs->get_scaling; |
fdfdb2b1 | 2888 | pstate_funcs.get_val = funcs->get_val; |
007bea09 | 2889 | pstate_funcs.get_vid = funcs->get_vid; |
6e34e1f2 | 2890 | pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; |
016c8150 DB |
2891 | } |
2892 | ||
9522a2ff | 2893 | #ifdef CONFIG_ACPI |
fbbcdc07 | 2894 | |
29327c84 | 2895 | static bool __init intel_pstate_no_acpi_pss(void) |
fbbcdc07 AH |
2896 | { |
2897 | int i; | |
2898 | ||
2899 | for_each_possible_cpu(i) { | |
2900 | acpi_status status; | |
2901 | union acpi_object *pss; | |
2902 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | |
2903 | struct acpi_processor *pr = per_cpu(processors, i); | |
2904 | ||
2905 | if (!pr) | |
2906 | continue; | |
2907 | ||
2908 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); | |
2909 | if (ACPI_FAILURE(status)) | |
2910 | continue; | |
2911 | ||
2912 | pss = buffer.pointer; | |
2913 | if (pss && pss->type == ACPI_TYPE_PACKAGE) { | |
2914 | kfree(pss); | |
2915 | return false; | |
2916 | } | |
2917 | ||
2918 | kfree(pss); | |
2919 | } | |
2920 | ||
076b862c | 2921 | pr_debug("ACPI _PSS not found\n"); |
fbbcdc07 AH |
2922 | return true; |
2923 | } | |
2924 | ||
95d6c085 RW |
2925 | static bool __init intel_pstate_no_acpi_pcch(void) |
2926 | { | |
2927 | acpi_status status; | |
2928 | acpi_handle handle; | |
2929 | ||
2930 | status = acpi_get_handle(NULL, "\\_SB", &handle); | |
2931 | if (ACPI_FAILURE(status)) | |
076b862c EV |
2932 | goto not_found; |
2933 | ||
2934 | if (acpi_has_method(handle, "PCCH")) | |
2935 | return false; | |
95d6c085 | 2936 | |
076b862c EV |
2937 | not_found: |
2938 | pr_debug("ACPI PCCH not found\n"); | |
2939 | return true; | |
95d6c085 RW |
2940 | } |
2941 | ||
29327c84 | 2942 | static bool __init intel_pstate_has_acpi_ppc(void) |
966916ea | 2943 | { |
2944 | int i; | |
2945 | ||
2946 | for_each_possible_cpu(i) { | |
2947 | struct acpi_processor *pr = per_cpu(processors, i); | |
2948 | ||
2949 | if (!pr) | |
2950 | continue; | |
2951 | if (acpi_has_method(pr->handle, "_PPC")) | |
2952 | return true; | |
2953 | } | |
076b862c | 2954 | pr_debug("ACPI _PPC not found\n"); |
966916ea | 2955 | return false; |
2956 | } | |
2957 | ||
2958 | enum { | |
2959 | PSS, | |
2960 | PPC, | |
2961 | }; | |
2962 | ||
fbbcdc07 | 2963 | /* Hardware vendor-specific info that has its own power management modes */ |
5e932321 | 2964 | static struct acpi_platform_list plat_info[] __initdata = { |
8d2eecea JS |
2965 | {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, |
2966 | {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2967 | {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2968 | {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2969 | {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2970 | {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2971 | {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2972 | {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2973 | {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2974 | {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2975 | {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2976 | {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2977 | {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2978 | {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
2979 | {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, | |
5e932321 | 2980 | { } /* End */ |
fbbcdc07 AH |
2981 | }; |
2982 | ||
589bab6b SP |
2983 | #define BITMASK_OOB (BIT(8) | BIT(18)) |
2984 | ||
29327c84 | 2985 | static bool __init intel_pstate_platform_pwr_mgmt_exists(void) |
fbbcdc07 | 2986 | { |
2f86dc4c DB |
2987 | const struct x86_cpu_id *id; |
2988 | u64 misc_pwr; | |
5e932321 | 2989 | int idx; |
2f86dc4c DB |
2990 | |
2991 | id = x86_match_cpu(intel_pstate_cpu_oob_ids); | |
2992 | if (id) { | |
2993 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); | |
589bab6b SP |
2994 | if (misc_pwr & BITMASK_OOB) { |
2995 | pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); | |
2996 | pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); | |
2f86dc4c | 2997 | return true; |
076b862c | 2998 | } |
2f86dc4c | 2999 | } |
fbbcdc07 | 3000 | |
5e932321 TK |
3001 | idx = acpi_match_platform_list(plat_info); |
3002 | if (idx < 0) | |
fbbcdc07 AH |
3003 | return false; |
3004 | ||
5e932321 TK |
3005 | switch (plat_info[idx].data) { |
3006 | case PSS: | |
95d6c085 RW |
3007 | if (!intel_pstate_no_acpi_pss()) |
3008 | return false; | |
3009 | ||
3010 | return intel_pstate_no_acpi_pcch(); | |
5e932321 TK |
3011 | case PPC: |
3012 | return intel_pstate_has_acpi_ppc() && !force_load; | |
fbbcdc07 AH |
3013 | } |
3014 | ||
3015 | return false; | |
3016 | } | |
d0ea59e1 RW |
3017 | |
3018 | static void intel_pstate_request_control_from_smm(void) | |
3019 | { | |
3020 | /* | |
3021 | * It may be unsafe to request P-states control from SMM if _PPC support | |
3022 | * has not been enabled. | |
3023 | */ | |
3024 | if (acpi_ppc) | |
3025 | acpi_processor_pstate_control(); | |
3026 | } | |
fbbcdc07 AH |
3027 | #else /* CONFIG_ACPI not enabled */ |
3028 | static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } | |
966916ea | 3029 | static inline bool intel_pstate_has_acpi_ppc(void) { return false; } |
d0ea59e1 | 3030 | static inline void intel_pstate_request_control_from_smm(void) {} |
fbbcdc07 AH |
3031 | #endif /* CONFIG_ACPI */ |
3032 | ||
ff7c9917 SP |
3033 | #define INTEL_PSTATE_HWP_BROADWELL 0x01 |
3034 | ||
b11d77fa TG |
3035 | #define X86_MATCH_HWP(model, hwp_mode) \ |
3036 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ | |
d9782807 | 3037 | X86_FEATURE_HWP, hwp_mode) |
ff7c9917 | 3038 | |
7791e4aa | 3039 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { |
b11d77fa TG |
3040 | X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
3041 | X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), | |
3042 | X86_MATCH_HWP(ANY, 0), | |
7791e4aa SP |
3043 | {} |
3044 | }; | |
3045 | ||
93f0822d DB |
3046 | static int __init intel_pstate_init(void) |
3047 | { | |
ff7c9917 | 3048 | const struct x86_cpu_id *id; |
eb5139d1 | 3049 | int rc; |
93f0822d | 3050 | |
4ab52646 BP |
3051 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
3052 | return -ENODEV; | |
3053 | ||
6be26498 DB |
3054 | if (no_load) |
3055 | return -ENODEV; | |
3056 | ||
ff7c9917 SP |
3057 | id = x86_match_cpu(hwp_support_ids); |
3058 | if (id) { | |
2f49afc2 | 3059 | copy_cpu_funcs(&core_funcs); |
7aa10312 RW |
3060 | /* |
3061 | * Avoid enabling HWP for processors without EPP support, | |
3062 | * because that means incomplete HWP implementation which is a | |
3063 | * corner case and supporting it is generally problematic. | |
3064 | */ | |
3065 | if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) { | |
eb5139d1 | 3066 | hwp_active++; |
ff7c9917 | 3067 | hwp_mode_bdw = id->driver_data; |
eb5139d1 | 3068 | intel_pstate.attr = hwp_cpufreq_attrs; |
f6ebbcf0 | 3069 | intel_cpufreq.attr = hwp_cpufreq_attrs; |
e0be38ed | 3070 | intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; |
a365ab6b | 3071 | intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; |
f6ebbcf0 RW |
3072 | if (!default_driver) |
3073 | default_driver = &intel_pstate; | |
3074 | ||
eb5139d1 RW |
3075 | goto hwp_cpu_matched; |
3076 | } | |
3077 | } else { | |
eb5139d1 | 3078 | id = x86_match_cpu(intel_pstate_cpu_ids); |
076b862c | 3079 | if (!id) { |
4ab52646 | 3080 | pr_info("CPU model not supported\n"); |
eb5139d1 | 3081 | return -ENODEV; |
076b862c | 3082 | } |
93f0822d | 3083 | |
2f49afc2 | 3084 | copy_cpu_funcs((struct pstate_funcs *)id->driver_data); |
eb5139d1 | 3085 | } |
016c8150 | 3086 | |
076b862c EV |
3087 | if (intel_pstate_msrs_not_valid()) { |
3088 | pr_info("Invalid MSRs\n"); | |
b563b4e3 | 3089 | return -ENODEV; |
076b862c | 3090 | } |
33aa46f2 | 3091 | /* Without HWP start in the passive mode. */ |
39a188b8 RW |
3092 | if (!default_driver) |
3093 | default_driver = &intel_cpufreq; | |
b563b4e3 | 3094 | |
7791e4aa SP |
3095 | hwp_cpu_matched: |
3096 | /* | |
3097 | * The Intel pstate driver will be ignored if the platform | |
3098 | * firmware has its own power management modes. | |
3099 | */ | |
076b862c EV |
3100 | if (intel_pstate_platform_pwr_mgmt_exists()) { |
3101 | pr_info("P-states controlled by the platform\n"); | |
7791e4aa | 3102 | return -ENODEV; |
076b862c | 3103 | } |
7791e4aa | 3104 | |
fb1fe104 RW |
3105 | if (!hwp_active && hwp_only) |
3106 | return -ENOTSUPP; | |
3107 | ||
4836df17 | 3108 | pr_info("Intel P-state driver initializing\n"); |
93f0822d | 3109 | |
fad953ce | 3110 | all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); |
93f0822d DB |
3111 | if (!all_cpu_data) |
3112 | return -ENOMEM; | |
93f0822d | 3113 | |
d0ea59e1 RW |
3114 | intel_pstate_request_control_from_smm(); |
3115 | ||
93f0822d | 3116 | intel_pstate_sysfs_expose_params(); |
b69880f9 | 3117 | |
0c30b65b | 3118 | mutex_lock(&intel_pstate_driver_lock); |
ee8df89a | 3119 | rc = intel_pstate_register_driver(default_driver); |
0c30b65b | 3120 | mutex_unlock(&intel_pstate_driver_lock); |
cdc1719c CY |
3121 | if (rc) { |
3122 | intel_pstate_sysfs_remove(); | |
fb1fe104 | 3123 | return rc; |
cdc1719c | 3124 | } |
366430b5 | 3125 | |
ed7bde7a SP |
3126 | if (hwp_active) { |
3127 | const struct x86_cpu_id *id; | |
3128 | ||
3129 | id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); | |
3130 | if (id) { | |
3131 | set_power_ctl_ee_state(false); | |
3132 | pr_info("Disabling energy efficiency optimization\n"); | |
3133 | } | |
3134 | ||
4836df17 | 3135 | pr_info("HWP enabled\n"); |
ed7bde7a | 3136 | } |
7791e4aa | 3137 | |
fb1fe104 | 3138 | return 0; |
93f0822d DB |
3139 | } |
3140 | device_initcall(intel_pstate_init); | |
3141 | ||
6be26498 DB |
3142 | static int __init intel_pstate_setup(char *str) |
3143 | { | |
3144 | if (!str) | |
3145 | return -EINVAL; | |
3146 | ||
f6ebbcf0 | 3147 | if (!strcmp(str, "disable")) |
6be26498 | 3148 | no_load = 1; |
f6ebbcf0 | 3149 | else if (!strcmp(str, "active")) |
39a188b8 | 3150 | default_driver = &intel_pstate; |
f6ebbcf0 | 3151 | else if (!strcmp(str, "passive")) |
ee8df89a | 3152 | default_driver = &intel_cpufreq; |
f6ebbcf0 | 3153 | |
539342f6 | 3154 | if (!strcmp(str, "no_hwp")) { |
4836df17 | 3155 | pr_info("HWP disabled\n"); |
2f86dc4c | 3156 | no_hwp = 1; |
539342f6 | 3157 | } |
aa4ea34d EZ |
3158 | if (!strcmp(str, "force")) |
3159 | force_load = 1; | |
d64c3b0b KCA |
3160 | if (!strcmp(str, "hwp_only")) |
3161 | hwp_only = 1; | |
eae48f04 SP |
3162 | if (!strcmp(str, "per_cpu_perf_limits")) |
3163 | per_cpu_limits = true; | |
9522a2ff SP |
3164 | |
3165 | #ifdef CONFIG_ACPI | |
3166 | if (!strcmp(str, "support_acpi_ppc")) | |
3167 | acpi_ppc = true; | |
3168 | #endif | |
3169 | ||
6be26498 DB |
3170 | return 0; |
3171 | } | |
3172 | early_param("intel_pstate", intel_pstate_setup); | |
3173 | ||
93f0822d DB |
3174 | MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); |
3175 | MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); | |
3176 | MODULE_LICENSE("GPL"); |