]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
5477fb3b AC |
2 | /* |
3 | * CPPC (Collaborative Processor Performance Control) driver for | |
4 | * interfacing with the CPUfreq layer and governors. See | |
5 | * cppc_acpi.c for CPPC specific methods. | |
6 | * | |
7 | * (C) Copyright 2014, 2015 Linaro Ltd. | |
8 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> | |
5477fb3b AC |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "CPPC Cpufreq:" fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/cpu.h> | |
17 | #include <linux/cpufreq.h> | |
ad38677d | 18 | #include <linux/dmi.h> |
3d41386d | 19 | #include <linux/time.h> |
5477fb3b AC |
20 | #include <linux/vmalloc.h> |
21 | ||
ad38677d AS |
22 | #include <asm/unaligned.h> |
23 | ||
5477fb3b AC |
24 | #include <acpi/cppc_acpi.h> |
25 | ||
ad38677d AS |
26 | /* Minimum struct length needed for the DMI processor entry we want */ |
27 | #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 | |
28 | ||
63087265 IV |
29 | /* Offset in the DMI processor structure for the max frequency */ |
30 | #define DMI_PROCESSOR_MAX_SPEED 0x14 | |
ad38677d | 31 | |
5477fb3b | 32 | /* |
a28b2bfc IV |
33 | * This list contains information parsed from per CPU ACPI _CPC and _PSD |
34 | * structures: e.g. the highest and lowest supported performance, capabilities, | |
35 | * desired performance, level requested etc. Depending on the share_type, not | |
36 | * all CPUs will have an entry in the list. | |
5477fb3b | 37 | */ |
a28b2bfc IV |
38 | static LIST_HEAD(cpu_data_list); |
39 | ||
54e74df5 | 40 | static bool boost_supported; |
5477fb3b | 41 | |
6c8d750f | 42 | struct cppc_workaround_oem_info { |
c7402379 | 43 | char oem_id[ACPI_OEM_ID_SIZE + 1]; |
6c8d750f XW |
44 | char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; |
45 | u32 oem_revision; | |
46 | }; | |
47 | ||
6c8d750f XW |
48 | static struct cppc_workaround_oem_info wa_info[] = { |
49 | { | |
50 | .oem_id = "HISI ", | |
51 | .oem_table_id = "HIP07 ", | |
52 | .oem_revision = 0, | |
53 | }, { | |
54 | .oem_id = "HISI ", | |
55 | .oem_table_id = "HIP08 ", | |
56 | .oem_revision = 0, | |
57 | } | |
58 | }; | |
59 | ||
ad38677d AS |
60 | /* Callback function used to retrieve the max frequency from DMI */ |
61 | static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) | |
62 | { | |
63 | const u8 *dmi_data = (const u8 *)dm; | |
64 | u16 *mhz = (u16 *)private; | |
65 | ||
66 | if (dm->type == DMI_ENTRY_PROCESSOR && | |
67 | dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { | |
68 | u16 val = (u16)get_unaligned((const u16 *) | |
69 | (dmi_data + DMI_PROCESSOR_MAX_SPEED)); | |
70 | *mhz = val > *mhz ? val : *mhz; | |
71 | } | |
72 | } | |
73 | ||
74 | /* Look up the max frequency in DMI */ | |
75 | static u64 cppc_get_dmi_max_khz(void) | |
76 | { | |
77 | u16 mhz = 0; | |
78 | ||
79 | dmi_walk(cppc_find_dmi_mhz, &mhz); | |
80 | ||
81 | /* | |
82 | * Real stupid fallback value, just in case there is no | |
83 | * actual value set. | |
84 | */ | |
85 | mhz = mhz ? mhz : 1; | |
86 | ||
87 | return (1000 * mhz); | |
88 | } | |
89 | ||
256f19d2 PP |
90 | /* |
91 | * If CPPC lowest_freq and nominal_freq registers are exposed then we can | |
92 | * use them to convert perf to freq and vice versa | |
93 | * | |
94 | * If the perf/freq point lies between Nominal and Lowest, we can treat | |
95 | * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line | |
96 | * and extrapolate the rest | |
97 | * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion | |
98 | */ | |
48ad8dc9 | 99 | static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, |
63087265 | 100 | unsigned int perf) |
256f19d2 | 101 | { |
48ad8dc9 | 102 | struct cppc_perf_caps *caps = &cpu_data->perf_caps; |
63087265 | 103 | static u64 max_khz; |
256f19d2 PP |
104 | u64 mul, div; |
105 | ||
106 | if (caps->lowest_freq && caps->nominal_freq) { | |
107 | if (perf >= caps->nominal_perf) { | |
108 | mul = caps->nominal_freq; | |
109 | div = caps->nominal_perf; | |
110 | } else { | |
111 | mul = caps->nominal_freq - caps->lowest_freq; | |
112 | div = caps->nominal_perf - caps->lowest_perf; | |
113 | } | |
114 | } else { | |
115 | if (!max_khz) | |
116 | max_khz = cppc_get_dmi_max_khz(); | |
117 | mul = max_khz; | |
4264e02d | 118 | div = caps->highest_perf; |
256f19d2 PP |
119 | } |
120 | return (u64)perf * mul / div; | |
121 | } | |
122 | ||
48ad8dc9 | 123 | static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, |
63087265 | 124 | unsigned int freq) |
256f19d2 | 125 | { |
48ad8dc9 | 126 | struct cppc_perf_caps *caps = &cpu_data->perf_caps; |
63087265 | 127 | static u64 max_khz; |
256f19d2 PP |
128 | u64 mul, div; |
129 | ||
130 | if (caps->lowest_freq && caps->nominal_freq) { | |
131 | if (freq >= caps->nominal_freq) { | |
132 | mul = caps->nominal_perf; | |
133 | div = caps->nominal_freq; | |
134 | } else { | |
135 | mul = caps->lowest_perf; | |
136 | div = caps->lowest_freq; | |
137 | } | |
138 | } else { | |
139 | if (!max_khz) | |
140 | max_khz = cppc_get_dmi_max_khz(); | |
4264e02d | 141 | mul = caps->highest_perf; |
256f19d2 PP |
142 | div = max_khz; |
143 | } | |
144 | ||
145 | return (u64)freq * mul / div; | |
146 | } | |
147 | ||
5477fb3b | 148 | static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, |
63087265 IV |
149 | unsigned int target_freq, |
150 | unsigned int relation) | |
a28b2bfc | 151 | |
5477fb3b | 152 | { |
a28b2bfc | 153 | struct cppc_cpudata *cpu_data = policy->driver_data; |
d2641a5c | 154 | unsigned int cpu = policy->cpu; |
5477fb3b | 155 | struct cpufreq_freqs freqs; |
c197d758 | 156 | u32 desired_perf; |
5477fb3b AC |
157 | int ret = 0; |
158 | ||
48ad8dc9 | 159 | desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); |
c197d758 | 160 | /* Return if it is exactly the same perf */ |
48ad8dc9 | 161 | if (desired_perf == cpu_data->perf_ctrls.desired_perf) |
c197d758 HT |
162 | return ret; |
163 | ||
48ad8dc9 | 164 | cpu_data->perf_ctrls.desired_perf = desired_perf; |
5477fb3b AC |
165 | freqs.old = policy->cur; |
166 | freqs.new = target_freq; | |
167 | ||
168 | cpufreq_freq_transition_begin(policy, &freqs); | |
d2641a5c | 169 | ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); |
5477fb3b AC |
170 | cpufreq_freq_transition_end(policy, &freqs, ret != 0); |
171 | ||
172 | if (ret) | |
173 | pr_debug("Failed to set target on CPU:%d. ret:%d\n", | |
d2641a5c | 174 | cpu, ret); |
5477fb3b AC |
175 | |
176 | return ret; | |
177 | } | |
178 | ||
1e4f63ae | 179 | static int cppc_verify_policy(struct cpufreq_policy_data *policy) |
5477fb3b AC |
180 | { |
181 | cpufreq_verify_within_cpu_limits(policy); | |
182 | return 0; | |
183 | } | |
184 | ||
d4f3388a PP |
185 | /* |
186 | * The PCC subspace describes the rate at which platform can accept commands | |
187 | * on the shared PCC channel (including READs which do not count towards freq | |
63087265 | 188 | * transition requests), so ideally we need to use the PCC values as a fallback |
d4f3388a PP |
189 | * if we don't have a platform specific transition_delay_us |
190 | */ | |
191 | #ifdef CONFIG_ARM64 | |
192 | #include <asm/cputype.h> | |
193 | ||
48ad8dc9 | 194 | static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) |
d4f3388a PP |
195 | { |
196 | unsigned long implementor = read_cpuid_implementor(); | |
197 | unsigned long part_num = read_cpuid_part_number(); | |
d4f3388a PP |
198 | |
199 | switch (implementor) { | |
200 | case ARM_CPU_IMP_QCOM: | |
201 | switch (part_num) { | |
202 | case QCOM_CPU_PART_FALKOR_V1: | |
203 | case QCOM_CPU_PART_FALKOR: | |
2b53d1bd | 204 | return 10000; |
d4f3388a | 205 | } |
d4f3388a | 206 | } |
2b53d1bd | 207 | return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; |
d4f3388a PP |
208 | } |
209 | ||
210 | #else | |
211 | ||
48ad8dc9 | 212 | static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) |
d4f3388a PP |
213 | { |
214 | return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; | |
215 | } | |
216 | #endif | |
217 | ||
a28b2bfc IV |
218 | |
219 | static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) | |
5477fb3b | 220 | { |
a28b2bfc IV |
221 | struct cppc_cpudata *cpu_data; |
222 | int ret; | |
223 | ||
224 | cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); | |
225 | if (!cpu_data) | |
226 | goto out; | |
5477fb3b | 227 | |
a28b2bfc IV |
228 | if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) |
229 | goto free_cpu; | |
5477fb3b | 230 | |
a28b2bfc | 231 | ret = acpi_get_psd_map(cpu, cpu_data); |
5477fb3b | 232 | if (ret) { |
a28b2bfc IV |
233 | pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret); |
234 | goto free_mask; | |
235 | } | |
236 | ||
237 | ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); | |
238 | if (ret) { | |
239 | pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret); | |
240 | goto free_mask; | |
5477fb3b AC |
241 | } |
242 | ||
256f19d2 | 243 | /* Convert the lowest and nominal freq from MHz to KHz */ |
a28b2bfc IV |
244 | cpu_data->perf_caps.lowest_freq *= 1000; |
245 | cpu_data->perf_caps.nominal_freq *= 1000; | |
246 | ||
247 | list_add(&cpu_data->node, &cpu_data_list); | |
248 | ||
249 | return cpu_data; | |
250 | ||
251 | free_mask: | |
252 | free_cpumask_var(cpu_data->shared_cpu_map); | |
253 | free_cpu: | |
254 | kfree(cpu_data); | |
255 | out: | |
256 | return NULL; | |
257 | } | |
258 | ||
fe2535a4 VK |
259 | static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy) |
260 | { | |
261 | struct cppc_cpudata *cpu_data = policy->driver_data; | |
262 | ||
263 | list_del(&cpu_data->node); | |
264 | free_cpumask_var(cpu_data->shared_cpu_map); | |
265 | kfree(cpu_data); | |
266 | policy->driver_data = NULL; | |
267 | } | |
268 | ||
a28b2bfc IV |
269 | static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) |
270 | { | |
271 | unsigned int cpu = policy->cpu; | |
272 | struct cppc_cpudata *cpu_data; | |
273 | struct cppc_perf_caps *caps; | |
274 | int ret; | |
275 | ||
276 | cpu_data = cppc_cpufreq_get_cpu_data(cpu); | |
277 | if (!cpu_data) { | |
278 | pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); | |
279 | return -ENODEV; | |
280 | } | |
281 | caps = &cpu_data->perf_caps; | |
282 | policy->driver_data = cpu_data; | |
ad38677d | 283 | |
73808d0f PP |
284 | /* |
285 | * Set min to lowest nonlinear perf to avoid any efficiency penalty (see | |
286 | * Section 8.4.7.1.1.5 of ACPI 6.1 spec) | |
287 | */ | |
bb025fb6 IV |
288 | policy->min = cppc_cpufreq_perf_to_khz(cpu_data, |
289 | caps->lowest_nonlinear_perf); | |
290 | policy->max = cppc_cpufreq_perf_to_khz(cpu_data, | |
291 | caps->nominal_perf); | |
73808d0f PP |
292 | |
293 | /* | |
294 | * Set cpuinfo.min_freq to Lowest to make the full range of performance | |
295 | * available if userspace wants to use any perf between lowest & lowest | |
296 | * nonlinear perf | |
297 | */ | |
bb025fb6 IV |
298 | policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, |
299 | caps->lowest_perf); | |
300 | policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, | |
301 | caps->nominal_perf); | |
73808d0f | 302 | |
48ad8dc9 IV |
303 | policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); |
304 | policy->shared_type = cpu_data->shared_type; | |
5477fb3b | 305 | |
bf76bb20 IV |
306 | switch (policy->shared_type) { |
307 | case CPUFREQ_SHARED_TYPE_HW: | |
308 | case CPUFREQ_SHARED_TYPE_NONE: | |
309 | /* Nothing to be done - we'll have a policy for each CPU */ | |
310 | break; | |
311 | case CPUFREQ_SHARED_TYPE_ANY: | |
a28b2bfc IV |
312 | /* |
313 | * All CPUs in the domain will share a policy and all cpufreq | |
314 | * operations will use a single cppc_cpudata structure stored | |
315 | * in policy->driver_data. | |
316 | */ | |
48ad8dc9 | 317 | cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); |
bf76bb20 IV |
318 | break; |
319 | default: | |
320 | pr_debug("Unsupported CPU co-ord type: %d\n", | |
321 | policy->shared_type); | |
fe2535a4 VK |
322 | ret = -EFAULT; |
323 | goto out; | |
5477fb3b AC |
324 | } |
325 | ||
54e74df5 XW |
326 | /* |
327 | * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost | |
328 | * is supported. | |
329 | */ | |
bb025fb6 | 330 | if (caps->highest_perf > caps->nominal_perf) |
54e74df5 XW |
331 | boost_supported = true; |
332 | ||
5477fb3b | 333 | /* Set policy->cur to max now. The governors will adjust later. */ |
bb025fb6 IV |
334 | policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); |
335 | cpu_data->perf_ctrls.desired_perf = caps->highest_perf; | |
5477fb3b | 336 | |
48ad8dc9 | 337 | ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); |
fe2535a4 | 338 | if (ret) { |
5477fb3b | 339 | pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", |
bb025fb6 | 340 | caps->highest_perf, cpu, ret); |
fe2535a4 VK |
341 | goto out; |
342 | } | |
343 | ||
344 | return 0; | |
5477fb3b | 345 | |
fe2535a4 VK |
346 | out: |
347 | cppc_cpufreq_put_cpu_data(policy); | |
5477fb3b AC |
348 | return ret; |
349 | } | |
350 | ||
9357a380 VK |
351 | static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
352 | { | |
353 | struct cppc_cpudata *cpu_data = policy->driver_data; | |
354 | struct cppc_perf_caps *caps = &cpu_data->perf_caps; | |
355 | unsigned int cpu = policy->cpu; | |
356 | int ret; | |
357 | ||
358 | cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; | |
359 | ||
360 | ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); | |
361 | if (ret) | |
362 | pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", | |
363 | caps->lowest_perf, cpu, ret); | |
364 | ||
fe2535a4 | 365 | cppc_cpufreq_put_cpu_data(policy); |
9357a380 VK |
366 | return 0; |
367 | } | |
368 | ||
33477d84 GC |
369 | static inline u64 get_delta(u64 t1, u64 t0) |
370 | { | |
371 | if (t1 > t0 || t0 > ~(u32)0) | |
372 | return t1 - t0; | |
373 | ||
374 | return (u32)t1 - (u32)t0; | |
375 | } | |
376 | ||
771fac5e VK |
377 | static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, |
378 | struct cppc_perf_fb_ctrs fb_ctrs_t0, | |
379 | struct cppc_perf_fb_ctrs fb_ctrs_t1) | |
33477d84 GC |
380 | { |
381 | u64 delta_reference, delta_delivered; | |
771fac5e | 382 | u64 reference_perf, delivered_perf; |
33477d84 GC |
383 | |
384 | reference_perf = fb_ctrs_t0.reference_perf; | |
385 | ||
386 | delta_reference = get_delta(fb_ctrs_t1.reference, | |
387 | fb_ctrs_t0.reference); | |
388 | delta_delivered = get_delta(fb_ctrs_t1.delivered, | |
389 | fb_ctrs_t0.delivered); | |
390 | ||
771fac5e VK |
391 | /* Check to avoid divide-by zero */ |
392 | if (delta_reference || delta_delivered) | |
393 | delivered_perf = (reference_perf * delta_delivered) / | |
394 | delta_reference; | |
395 | else | |
396 | delivered_perf = cpu_data->perf_ctrls.desired_perf; | |
33477d84 | 397 | |
48ad8dc9 | 398 | return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); |
33477d84 GC |
399 | } |
400 | ||
48ad8dc9 | 401 | static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) |
33477d84 GC |
402 | { |
403 | struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; | |
a28b2bfc IV |
404 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
405 | struct cppc_cpudata *cpu_data = policy->driver_data; | |
33477d84 GC |
406 | int ret; |
407 | ||
a28b2bfc IV |
408 | cpufreq_cpu_put(policy); |
409 | ||
48ad8dc9 | 410 | ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); |
33477d84 GC |
411 | if (ret) |
412 | return ret; | |
413 | ||
414 | udelay(2); /* 2usec delay between sampling */ | |
415 | ||
48ad8dc9 | 416 | ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); |
33477d84 GC |
417 | if (ret) |
418 | return ret; | |
419 | ||
48ad8dc9 | 420 | return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); |
33477d84 GC |
421 | } |
422 | ||
54e74df5 XW |
423 | static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) |
424 | { | |
a28b2bfc | 425 | struct cppc_cpudata *cpu_data = policy->driver_data; |
bb025fb6 | 426 | struct cppc_perf_caps *caps = &cpu_data->perf_caps; |
54e74df5 XW |
427 | int ret; |
428 | ||
429 | if (!boost_supported) { | |
430 | pr_err("BOOST not supported by CPU or firmware\n"); | |
431 | return -EINVAL; | |
432 | } | |
433 | ||
54e74df5 | 434 | if (state) |
48ad8dc9 | 435 | policy->max = cppc_cpufreq_perf_to_khz(cpu_data, |
bb025fb6 | 436 | caps->highest_perf); |
54e74df5 | 437 | else |
48ad8dc9 | 438 | policy->max = cppc_cpufreq_perf_to_khz(cpu_data, |
bb025fb6 | 439 | caps->nominal_perf); |
54e74df5 XW |
440 | policy->cpuinfo.max_freq = policy->max; |
441 | ||
442 | ret = freq_qos_update_request(policy->max_freq_req, policy->max); | |
443 | if (ret < 0) | |
444 | return ret; | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
cfdc589f IV |
449 | static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) |
450 | { | |
a28b2bfc | 451 | struct cppc_cpudata *cpu_data = policy->driver_data; |
cfdc589f | 452 | |
a28b2bfc | 453 | return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); |
cfdc589f IV |
454 | } |
455 | cpufreq_freq_attr_ro(freqdomain_cpus); | |
456 | ||
457 | static struct freq_attr *cppc_cpufreq_attr[] = { | |
458 | &freqdomain_cpus, | |
459 | NULL, | |
460 | }; | |
461 | ||
5477fb3b AC |
462 | static struct cpufreq_driver cppc_cpufreq_driver = { |
463 | .flags = CPUFREQ_CONST_LOOPS, | |
464 | .verify = cppc_verify_policy, | |
465 | .target = cppc_cpufreq_set_target, | |
33477d84 | 466 | .get = cppc_cpufreq_get_rate, |
5477fb3b | 467 | .init = cppc_cpufreq_cpu_init, |
9357a380 | 468 | .exit = cppc_cpufreq_cpu_exit, |
54e74df5 | 469 | .set_boost = cppc_cpufreq_set_boost, |
cfdc589f | 470 | .attr = cppc_cpufreq_attr, |
5477fb3b AC |
471 | .name = "cppc_cpufreq", |
472 | }; | |
473 | ||
d88b0f0e VK |
474 | /* |
475 | * HISI platform does not support delivered performance counter and | |
476 | * reference performance counter. It can calculate the performance using the | |
477 | * platform specific mechanism. We reuse the desired performance register to | |
478 | * store the real performance calculated by the platform. | |
479 | */ | |
48ad8dc9 | 480 | static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) |
d88b0f0e | 481 | { |
a28b2bfc IV |
482 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
483 | struct cppc_cpudata *cpu_data = policy->driver_data; | |
d88b0f0e VK |
484 | u64 desired_perf; |
485 | int ret; | |
486 | ||
a28b2bfc IV |
487 | cpufreq_cpu_put(policy); |
488 | ||
48ad8dc9 | 489 | ret = cppc_get_desired_perf(cpu, &desired_perf); |
d88b0f0e VK |
490 | if (ret < 0) |
491 | return -EIO; | |
492 | ||
48ad8dc9 | 493 | return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); |
d88b0f0e VK |
494 | } |
495 | ||
496 | static void cppc_check_hisi_workaround(void) | |
497 | { | |
498 | struct acpi_table_header *tbl; | |
499 | acpi_status status = AE_OK; | |
500 | int i; | |
501 | ||
502 | status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); | |
503 | if (ACPI_FAILURE(status) || !tbl) | |
504 | return; | |
505 | ||
506 | for (i = 0; i < ARRAY_SIZE(wa_info); i++) { | |
507 | if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && | |
508 | !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && | |
509 | wa_info[i].oem_revision == tbl->oem_revision) { | |
510 | /* Overwrite the get() callback */ | |
511 | cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; | |
512 | break; | |
513 | } | |
514 | } | |
515 | ||
516 | acpi_put_table(tbl); | |
517 | } | |
518 | ||
5477fb3b AC |
519 | static int __init cppc_cpufreq_init(void) |
520 | { | |
a28b2bfc | 521 | if ((acpi_disabled) || !acpi_cpc_valid()) |
5477fb3b AC |
522 | return -ENODEV; |
523 | ||
a28b2bfc | 524 | INIT_LIST_HEAD(&cpu_data_list); |
5477fb3b | 525 | |
6c8d750f XW |
526 | cppc_check_hisi_workaround(); |
527 | ||
771fac5e | 528 | return cpufreq_register_driver(&cppc_cpufreq_driver); |
a28b2bfc | 529 | } |
5477fb3b | 530 | |
a28b2bfc IV |
531 | static inline void free_cpu_data(void) |
532 | { | |
533 | struct cppc_cpudata *iter, *tmp; | |
5477fb3b | 534 | |
a28b2bfc IV |
535 | list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { |
536 | free_cpumask_var(iter->shared_cpu_map); | |
537 | list_del(&iter->node); | |
538 | kfree(iter); | |
55b55abc | 539 | } |
5477fb3b | 540 | |
5477fb3b AC |
541 | } |
542 | ||
a29a1e76 AC |
543 | static void __exit cppc_cpufreq_exit(void) |
544 | { | |
a29a1e76 AC |
545 | cpufreq_unregister_driver(&cppc_cpufreq_driver); |
546 | ||
a28b2bfc | 547 | free_cpu_data(); |
a29a1e76 AC |
548 | } |
549 | ||
550 | module_exit(cppc_cpufreq_exit); | |
551 | MODULE_AUTHOR("Ashwin Chaugule"); | |
552 | MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); | |
553 | MODULE_LICENSE("GPL"); | |
554 | ||
5477fb3b | 555 | late_initcall(cppc_cpufreq_init); |
974f8649 | 556 | |
8ff3c226 | 557 | static const struct acpi_device_id cppc_acpi_ids[] __used = { |
974f8649 PP |
558 | {ACPI_PROCESSOR_DEVICE_HID, }, |
559 | {} | |
560 | }; | |
561 | ||
562 | MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); |