]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/acpi/cppc_acpi.c
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[mirror_ubuntu-bionic-kernel.git] / drivers / acpi / cppc_acpi.c
1 /*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38 #define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42 #include <linux/ktime.h>
43 #include <linux/rwsem.h>
44 #include <linux/wait.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 int pcc_subspace_idx;
52 bool pcc_channel_acquired;
53 ktime_t deadline;
54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
55
56 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
57 bool platform_owns_pcc; /* Ownership of PCC subspace */
58 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
59
60 /*
61 * Lock to provide controlled access to the PCC channel.
62 *
63 * For performance critical usecases(currently cppc_set_perf)
64 * We need to take read_lock and check if channel belongs to OSPM
65 * before reading or writing to PCC subspace
66 * We need to take write_lock before transferring the channel
67 * ownership to the platform via a Doorbell
68 * This allows us to batch a number of CPPC requests if they happen
69 * to originate in about the same time
70 *
71 * For non-performance critical usecases(init)
72 * Take write_lock for all purposes which gives exclusive access
73 */
74 struct rw_semaphore pcc_lock;
75
76 /* Wait queue for CPUs whose requests were batched */
77 wait_queue_head_t pcc_write_wait_q;
78 };
79
80 /* Structure to represent the single PCC channel */
81 static struct cppc_pcc_data pcc_data = {
82 .pcc_subspace_idx = -1,
83 .platform_owns_pcc = true,
84 };
85
86 /*
87 * The cpc_desc structure contains the ACPI register details
88 * as described in the per CPU _CPC tables. The details
89 * include the type of register (e.g. PCC, System IO, FFH etc.)
90 * and destination addresses which lets us READ/WRITE CPU performance
91 * information using the appropriate I/O methods.
92 */
93 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
94
95 /* pcc mapped address + header size + offset within PCC subspace */
96 #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
97
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103 /* Evalutes to True if reg is a NULL register descriptor */
104 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110 /* Evalutes to True if an optional cpc field is supported */
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114 /*
115 * Arbitrary Retries in case the remote processor is slow to respond
116 * to PCC commands. Keeping it high enough to cover emulators where
117 * the processors run painfully slow.
118 */
119 #define NUM_RETRIES 500
120
121 struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127 };
128
129 #define define_one_cppc_ro(_name) \
130 static struct cppc_attr _name = \
131 __ATTR(_name, 0444, show_##_name, NULL)
132
133 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135 #define show_cppc_data(access_fn, struct_name, member_name) \
136 static ssize_t show_##member_name(struct kobject *kobj, \
137 struct attribute *attr, char *buf) \
138 { \
139 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
140 struct struct_name st_name = {0}; \
141 int ret; \
142 \
143 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
144 if (ret) \
145 return ret; \
146 \
147 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
148 (u64)st_name.member_name); \
149 } \
150 define_one_cppc_ro(member_name)
151
152 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
153 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
154 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
155 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
156 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
157 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
158
159 static ssize_t show_feedback_ctrs(struct kobject *kobj,
160 struct attribute *attr, char *buf)
161 {
162 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
163 struct cppc_perf_fb_ctrs fb_ctrs = {0};
164 int ret;
165
166 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
167 if (ret)
168 return ret;
169
170 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
171 fb_ctrs.reference, fb_ctrs.delivered);
172 }
173 define_one_cppc_ro(feedback_ctrs);
174
175 static struct attribute *cppc_attrs[] = {
176 &feedback_ctrs.attr,
177 &reference_perf.attr,
178 &wraparound_time.attr,
179 &highest_perf.attr,
180 &lowest_perf.attr,
181 &lowest_nonlinear_perf.attr,
182 &nominal_perf.attr,
183 NULL
184 };
185
186 static struct kobj_type cppc_ktype = {
187 .sysfs_ops = &kobj_sysfs_ops,
188 .default_attrs = cppc_attrs,
189 };
190
191 static int check_pcc_chan(bool chk_err_bit)
192 {
193 int ret = -EIO, status = 0;
194 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
195 ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
196
197 if (!pcc_data.platform_owns_pcc)
198 return 0;
199
200 /* Retry in case the remote processor was too slow to catch up. */
201 while (!ktime_after(ktime_get(), next_deadline)) {
202 /*
203 * Per spec, prior to boot the PCC space wil be initialized by
204 * platform and should have set the command completion bit when
205 * PCC can be used by OSPM
206 */
207 status = readw_relaxed(&generic_comm_base->status);
208 if (status & PCC_CMD_COMPLETE_MASK) {
209 ret = 0;
210 if (chk_err_bit && (status & PCC_ERROR_MASK))
211 ret = -EIO;
212 break;
213 }
214 /*
215 * Reducing the bus traffic in case this loop takes longer than
216 * a few retries.
217 */
218 udelay(3);
219 }
220
221 if (likely(!ret))
222 pcc_data.platform_owns_pcc = false;
223 else
224 pr_err("PCC check channel failed. Status=%x\n", status);
225
226 return ret;
227 }
228
229 /*
230 * This function transfers the ownership of the PCC to the platform
231 * So it must be called while holding write_lock(pcc_lock)
232 */
233 static int send_pcc_cmd(u16 cmd)
234 {
235 int ret = -EIO, i;
236 struct acpi_pcct_shared_memory *generic_comm_base =
237 (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
238 static ktime_t last_cmd_cmpl_time, last_mpar_reset;
239 static int mpar_count;
240 unsigned int time_delta;
241
242 /*
243 * For CMD_WRITE we know for a fact the caller should have checked
244 * the channel before writing to PCC space
245 */
246 if (cmd == CMD_READ) {
247 /*
248 * If there are pending cpc_writes, then we stole the channel
249 * before write completion, so first send a WRITE command to
250 * platform
251 */
252 if (pcc_data.pending_pcc_write_cmd)
253 send_pcc_cmd(CMD_WRITE);
254
255 ret = check_pcc_chan(false);
256 if (ret)
257 goto end;
258 } else /* CMD_WRITE */
259 pcc_data.pending_pcc_write_cmd = FALSE;
260
261 /*
262 * Handle the Minimum Request Turnaround Time(MRTT)
263 * "The minimum amount of time that OSPM must wait after the completion
264 * of a command before issuing the next command, in microseconds"
265 */
266 if (pcc_data.pcc_mrtt) {
267 time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
268 if (pcc_data.pcc_mrtt > time_delta)
269 udelay(pcc_data.pcc_mrtt - time_delta);
270 }
271
272 /*
273 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
274 * "The maximum number of periodic requests that the subspace channel can
275 * support, reported in commands per minute. 0 indicates no limitation."
276 *
277 * This parameter should be ideally zero or large enough so that it can
278 * handle maximum number of requests that all the cores in the system can
279 * collectively generate. If it is not, we will follow the spec and just
280 * not send the request to the platform after hitting the MPAR limit in
281 * any 60s window
282 */
283 if (pcc_data.pcc_mpar) {
284 if (mpar_count == 0) {
285 time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
286 if (time_delta < 60 * MSEC_PER_SEC) {
287 pr_debug("PCC cmd not sent due to MPAR limit");
288 ret = -EIO;
289 goto end;
290 }
291 last_mpar_reset = ktime_get();
292 mpar_count = pcc_data.pcc_mpar;
293 }
294 mpar_count--;
295 }
296
297 /* Write to the shared comm region. */
298 writew_relaxed(cmd, &generic_comm_base->command);
299
300 /* Flip CMD COMPLETE bit */
301 writew_relaxed(0, &generic_comm_base->status);
302
303 pcc_data.platform_owns_pcc = true;
304
305 /* Ring doorbell */
306 ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
307 if (ret < 0) {
308 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
309 cmd, ret);
310 goto end;
311 }
312
313 /* wait for completion and check for PCC errro bit */
314 ret = check_pcc_chan(true);
315
316 if (pcc_data.pcc_mrtt)
317 last_cmd_cmpl_time = ktime_get();
318
319 if (pcc_data.pcc_channel->mbox->txdone_irq)
320 mbox_chan_txdone(pcc_data.pcc_channel, ret);
321 else
322 mbox_client_txdone(pcc_data.pcc_channel, ret);
323
324 end:
325 if (cmd == CMD_WRITE) {
326 if (unlikely(ret)) {
327 for_each_possible_cpu(i) {
328 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
329 if (!desc)
330 continue;
331
332 if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
333 desc->write_cmd_status = ret;
334 }
335 }
336 pcc_data.pcc_write_cnt++;
337 wake_up_all(&pcc_data.pcc_write_wait_q);
338 }
339
340 return ret;
341 }
342
343 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
344 {
345 if (ret < 0)
346 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
347 *(u16 *)msg, ret);
348 else
349 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
350 *(u16 *)msg, ret);
351 }
352
353 struct mbox_client cppc_mbox_cl = {
354 .tx_done = cppc_chan_tx_done,
355 .knows_txdone = true,
356 };
357
358 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
359 {
360 int result = -EFAULT;
361 acpi_status status = AE_OK;
362 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
363 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
364 struct acpi_buffer state = {0, NULL};
365 union acpi_object *psd = NULL;
366 struct acpi_psd_package *pdomain;
367
368 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
369 ACPI_TYPE_PACKAGE);
370 if (ACPI_FAILURE(status))
371 return -ENODEV;
372
373 psd = buffer.pointer;
374 if (!psd || psd->package.count != 1) {
375 pr_debug("Invalid _PSD data\n");
376 goto end;
377 }
378
379 pdomain = &(cpc_ptr->domain_info);
380
381 state.length = sizeof(struct acpi_psd_package);
382 state.pointer = pdomain;
383
384 status = acpi_extract_package(&(psd->package.elements[0]),
385 &format, &state);
386 if (ACPI_FAILURE(status)) {
387 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
388 goto end;
389 }
390
391 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
392 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
393 goto end;
394 }
395
396 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
397 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
398 goto end;
399 }
400
401 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
402 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
403 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
404 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
405 goto end;
406 }
407
408 result = 0;
409 end:
410 kfree(buffer.pointer);
411 return result;
412 }
413
414 /**
415 * acpi_get_psd_map - Map the CPUs in a common freq domain.
416 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
417 *
418 * Return: 0 for success or negative value for err.
419 */
420 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
421 {
422 int count_target;
423 int retval = 0;
424 unsigned int i, j;
425 cpumask_var_t covered_cpus;
426 struct cppc_cpudata *pr, *match_pr;
427 struct acpi_psd_package *pdomain;
428 struct acpi_psd_package *match_pdomain;
429 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
430
431 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
432 return -ENOMEM;
433
434 /*
435 * Now that we have _PSD data from all CPUs, lets setup P-state
436 * domain info.
437 */
438 for_each_possible_cpu(i) {
439 pr = all_cpu_data[i];
440 if (!pr)
441 continue;
442
443 if (cpumask_test_cpu(i, covered_cpus))
444 continue;
445
446 cpc_ptr = per_cpu(cpc_desc_ptr, i);
447 if (!cpc_ptr) {
448 retval = -EFAULT;
449 goto err_ret;
450 }
451
452 pdomain = &(cpc_ptr->domain_info);
453 cpumask_set_cpu(i, pr->shared_cpu_map);
454 cpumask_set_cpu(i, covered_cpus);
455 if (pdomain->num_processors <= 1)
456 continue;
457
458 /* Validate the Domain info */
459 count_target = pdomain->num_processors;
460 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
461 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
462 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
463 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
464 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
465 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
466
467 for_each_possible_cpu(j) {
468 if (i == j)
469 continue;
470
471 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
472 if (!match_cpc_ptr) {
473 retval = -EFAULT;
474 goto err_ret;
475 }
476
477 match_pdomain = &(match_cpc_ptr->domain_info);
478 if (match_pdomain->domain != pdomain->domain)
479 continue;
480
481 /* Here i and j are in the same domain */
482 if (match_pdomain->num_processors != count_target) {
483 retval = -EFAULT;
484 goto err_ret;
485 }
486
487 if (pdomain->coord_type != match_pdomain->coord_type) {
488 retval = -EFAULT;
489 goto err_ret;
490 }
491
492 cpumask_set_cpu(j, covered_cpus);
493 cpumask_set_cpu(j, pr->shared_cpu_map);
494 }
495
496 for_each_possible_cpu(j) {
497 if (i == j)
498 continue;
499
500 match_pr = all_cpu_data[j];
501 if (!match_pr)
502 continue;
503
504 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
505 if (!match_cpc_ptr) {
506 retval = -EFAULT;
507 goto err_ret;
508 }
509
510 match_pdomain = &(match_cpc_ptr->domain_info);
511 if (match_pdomain->domain != pdomain->domain)
512 continue;
513
514 match_pr->shared_type = pr->shared_type;
515 cpumask_copy(match_pr->shared_cpu_map,
516 pr->shared_cpu_map);
517 }
518 }
519
520 err_ret:
521 for_each_possible_cpu(i) {
522 pr = all_cpu_data[i];
523 if (!pr)
524 continue;
525
526 /* Assume no coordination on any error parsing domain info */
527 if (retval) {
528 cpumask_clear(pr->shared_cpu_map);
529 cpumask_set_cpu(i, pr->shared_cpu_map);
530 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
531 }
532 }
533
534 free_cpumask_var(covered_cpus);
535 return retval;
536 }
537 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
538
539 static int register_pcc_channel(int pcc_subspace_idx)
540 {
541 struct acpi_pcct_hw_reduced *cppc_ss;
542 u64 usecs_lat;
543
544 if (pcc_subspace_idx >= 0) {
545 pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
546 pcc_subspace_idx);
547
548 if (IS_ERR(pcc_data.pcc_channel)) {
549 pr_err("Failed to find PCC communication channel\n");
550 return -ENODEV;
551 }
552
553 /*
554 * The PCC mailbox controller driver should
555 * have parsed the PCCT (global table of all
556 * PCC channels) and stored pointers to the
557 * subspace communication region in con_priv.
558 */
559 cppc_ss = (pcc_data.pcc_channel)->con_priv;
560
561 if (!cppc_ss) {
562 pr_err("No PCC subspace found for CPPC\n");
563 return -ENODEV;
564 }
565
566 /*
567 * cppc_ss->latency is just a Nominal value. In reality
568 * the remote processor could be much slower to reply.
569 * So add an arbitrary amount of wait on top of Nominal.
570 */
571 usecs_lat = NUM_RETRIES * cppc_ss->latency;
572 pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
573 pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
574 pcc_data.pcc_mpar = cppc_ss->max_access_rate;
575 pcc_data.pcc_nominal = cppc_ss->latency;
576
577 pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
578 if (!pcc_data.pcc_comm_addr) {
579 pr_err("Failed to ioremap PCC comm region mem\n");
580 return -ENOMEM;
581 }
582
583 /* Set flag so that we dont come here for each CPU. */
584 pcc_data.pcc_channel_acquired = true;
585 }
586
587 return 0;
588 }
589
590 /**
591 * cpc_ffh_supported() - check if FFH reading supported
592 *
593 * Check if the architecture has support for functional fixed hardware
594 * read/write capability.
595 *
596 * Return: true for supported, false for not supported
597 */
598 bool __weak cpc_ffh_supported(void)
599 {
600 return false;
601 }
602
603 /*
604 * An example CPC table looks like the following.
605 *
606 * Name(_CPC, Package()
607 * {
608 * 17,
609 * NumEntries
610 * 1,
611 * // Revision
612 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
613 * // Highest Performance
614 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
615 * // Nominal Performance
616 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
617 * // Lowest Nonlinear Performance
618 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
619 * // Lowest Performance
620 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
621 * // Guaranteed Performance Register
622 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
623 * // Desired Performance Register
624 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
625 * ..
626 * ..
627 * ..
628 *
629 * }
630 * Each Register() encodes how to access that specific register.
631 * e.g. a sample PCC entry has the following encoding:
632 *
633 * Register (
634 * PCC,
635 * AddressSpaceKeyword
636 * 8,
637 * //RegisterBitWidth
638 * 8,
639 * //RegisterBitOffset
640 * 0x30,
641 * //RegisterAddress
642 * 9
643 * //AccessSize (subspace ID)
644 * 0
645 * )
646 * }
647 */
648
649 /**
650 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
651 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
652 *
653 * Return: 0 for success or negative value for err.
654 */
655 int acpi_cppc_processor_probe(struct acpi_processor *pr)
656 {
657 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
658 union acpi_object *out_obj, *cpc_obj;
659 struct cpc_desc *cpc_ptr;
660 struct cpc_reg *gas_t;
661 struct device *cpu_dev;
662 acpi_handle handle = pr->handle;
663 unsigned int num_ent, i, cpc_rev;
664 acpi_status status;
665 int ret = -EFAULT;
666
667 /* Parse the ACPI _CPC table for this cpu. */
668 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
669 ACPI_TYPE_PACKAGE);
670 if (ACPI_FAILURE(status)) {
671 ret = -ENODEV;
672 goto out_buf_free;
673 }
674
675 out_obj = (union acpi_object *) output.pointer;
676
677 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
678 if (!cpc_ptr) {
679 ret = -ENOMEM;
680 goto out_buf_free;
681 }
682
683 /* First entry is NumEntries. */
684 cpc_obj = &out_obj->package.elements[0];
685 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
686 num_ent = cpc_obj->integer.value;
687 } else {
688 pr_debug("Unexpected entry type(%d) for NumEntries\n",
689 cpc_obj->type);
690 goto out_free;
691 }
692
693 /* Only support CPPCv2. Bail otherwise. */
694 if (num_ent != CPPC_NUM_ENT) {
695 pr_debug("Firmware exports %d entries. Expected: %d\n",
696 num_ent, CPPC_NUM_ENT);
697 goto out_free;
698 }
699
700 cpc_ptr->num_entries = num_ent;
701
702 /* Second entry should be revision. */
703 cpc_obj = &out_obj->package.elements[1];
704 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
705 cpc_rev = cpc_obj->integer.value;
706 } else {
707 pr_debug("Unexpected entry type(%d) for Revision\n",
708 cpc_obj->type);
709 goto out_free;
710 }
711
712 if (cpc_rev != CPPC_REV) {
713 pr_debug("Firmware exports revision:%d. Expected:%d\n",
714 cpc_rev, CPPC_REV);
715 goto out_free;
716 }
717
718 /* Iterate through remaining entries in _CPC */
719 for (i = 2; i < num_ent; i++) {
720 cpc_obj = &out_obj->package.elements[i];
721
722 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
723 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
724 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
725 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
726 gas_t = (struct cpc_reg *)
727 cpc_obj->buffer.pointer;
728
729 /*
730 * The PCC Subspace index is encoded inside
731 * the CPC table entries. The same PCC index
732 * will be used for all the PCC entries,
733 * so extract it only once.
734 */
735 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
736 if (pcc_data.pcc_subspace_idx < 0)
737 pcc_data.pcc_subspace_idx = gas_t->access_width;
738 else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
739 pr_debug("Mismatched PCC ids.\n");
740 goto out_free;
741 }
742 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
743 if (gas_t->address) {
744 void __iomem *addr;
745
746 addr = ioremap(gas_t->address, gas_t->bit_width/8);
747 if (!addr)
748 goto out_free;
749 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
750 }
751 } else {
752 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
753 /* Support only PCC ,SYS MEM and FFH type regs */
754 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
755 goto out_free;
756 }
757 }
758
759 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
760 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
761 } else {
762 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
763 goto out_free;
764 }
765 }
766 /* Store CPU Logical ID */
767 cpc_ptr->cpu_id = pr->id;
768
769 /* Parse PSD data for this CPU */
770 ret = acpi_get_psd(cpc_ptr, handle);
771 if (ret)
772 goto out_free;
773
774 /* Register PCC channel once for all CPUs. */
775 if (!pcc_data.pcc_channel_acquired) {
776 ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
777 if (ret)
778 goto out_free;
779
780 init_rwsem(&pcc_data.pcc_lock);
781 init_waitqueue_head(&pcc_data.pcc_write_wait_q);
782 }
783
784 /* Everything looks okay */
785 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
786
787 /* Add per logical CPU nodes for reading its feedback counters. */
788 cpu_dev = get_cpu_device(pr->id);
789 if (!cpu_dev) {
790 ret = -EINVAL;
791 goto out_free;
792 }
793
794 /* Plug PSD data into this CPUs CPC descriptor. */
795 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
796
797 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
798 "acpi_cppc");
799 if (ret) {
800 per_cpu(cpc_desc_ptr, pr->id) = NULL;
801 goto out_free;
802 }
803
804 kfree(output.pointer);
805 return 0;
806
807 out_free:
808 /* Free all the mapped sys mem areas for this CPU */
809 for (i = 2; i < cpc_ptr->num_entries; i++) {
810 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
811
812 if (addr)
813 iounmap(addr);
814 }
815 kfree(cpc_ptr);
816
817 out_buf_free:
818 kfree(output.pointer);
819 return ret;
820 }
821 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
822
823 /**
824 * acpi_cppc_processor_exit - Cleanup CPC structs.
825 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
826 *
827 * Return: Void
828 */
829 void acpi_cppc_processor_exit(struct acpi_processor *pr)
830 {
831 struct cpc_desc *cpc_ptr;
832 unsigned int i;
833 void __iomem *addr;
834
835 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
836 if (!cpc_ptr)
837 return;
838
839 /* Free all the mapped sys mem areas for this CPU */
840 for (i = 2; i < cpc_ptr->num_entries; i++) {
841 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
842 if (addr)
843 iounmap(addr);
844 }
845
846 kobject_put(&cpc_ptr->kobj);
847 kfree(cpc_ptr);
848 }
849 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
850
851 /**
852 * cpc_read_ffh() - Read FFH register
853 * @cpunum: cpu number to read
854 * @reg: cppc register information
855 * @val: place holder for return value
856 *
857 * Read bit_width bits from a specified address and bit_offset
858 *
859 * Return: 0 for success and error code
860 */
861 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
862 {
863 return -ENOTSUPP;
864 }
865
866 /**
867 * cpc_write_ffh() - Write FFH register
868 * @cpunum: cpu number to write
869 * @reg: cppc register information
870 * @val: value to write
871 *
872 * Write value of bit_width bits to a specified address and bit_offset
873 *
874 * Return: 0 for success and error code
875 */
876 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
877 {
878 return -ENOTSUPP;
879 }
880
881 /*
882 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
883 * as fast as possible. We have already mapped the PCC subspace during init, so
884 * we can directly write to it.
885 */
886
887 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
888 {
889 int ret_val = 0;
890 void __iomem *vaddr = 0;
891 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
892
893 if (reg_res->type == ACPI_TYPE_INTEGER) {
894 *val = reg_res->cpc_entry.int_value;
895 return ret_val;
896 }
897
898 *val = 0;
899 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
900 vaddr = GET_PCC_VADDR(reg->address);
901 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
902 vaddr = reg_res->sys_mem_vaddr;
903 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
904 return cpc_read_ffh(cpu, reg, val);
905 else
906 return acpi_os_read_memory((acpi_physical_address)reg->address,
907 val, reg->bit_width);
908
909 switch (reg->bit_width) {
910 case 8:
911 *val = readb_relaxed(vaddr);
912 break;
913 case 16:
914 *val = readw_relaxed(vaddr);
915 break;
916 case 32:
917 *val = readl_relaxed(vaddr);
918 break;
919 case 64:
920 *val = readq_relaxed(vaddr);
921 break;
922 default:
923 pr_debug("Error: Cannot read %u bit width from PCC\n",
924 reg->bit_width);
925 ret_val = -EFAULT;
926 }
927
928 return ret_val;
929 }
930
931 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
932 {
933 int ret_val = 0;
934 void __iomem *vaddr = 0;
935 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
936
937 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
938 vaddr = GET_PCC_VADDR(reg->address);
939 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
940 vaddr = reg_res->sys_mem_vaddr;
941 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
942 return cpc_write_ffh(cpu, reg, val);
943 else
944 return acpi_os_write_memory((acpi_physical_address)reg->address,
945 val, reg->bit_width);
946
947 switch (reg->bit_width) {
948 case 8:
949 writeb_relaxed(val, vaddr);
950 break;
951 case 16:
952 writew_relaxed(val, vaddr);
953 break;
954 case 32:
955 writel_relaxed(val, vaddr);
956 break;
957 case 64:
958 writeq_relaxed(val, vaddr);
959 break;
960 default:
961 pr_debug("Error: Cannot write %u bit width to PCC\n",
962 reg->bit_width);
963 ret_val = -EFAULT;
964 break;
965 }
966
967 return ret_val;
968 }
969
970 /**
971 * cppc_get_perf_caps - Get a CPUs performance capabilities.
972 * @cpunum: CPU from which to get capabilities info.
973 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
974 *
975 * Return: 0 for success with perf_caps populated else -ERRNO.
976 */
977 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
978 {
979 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
980 struct cpc_register_resource *highest_reg, *lowest_reg,
981 *lowest_non_linear_reg, *nominal_reg;
982 u64 high, low, nom, min_nonlinear;
983 int ret = 0, regs_in_pcc = 0;
984
985 if (!cpc_desc) {
986 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
987 return -ENODEV;
988 }
989
990 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
991 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
992 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
993 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
994
995 /* Are any of the regs PCC ?*/
996 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
997 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
998 regs_in_pcc = 1;
999 down_write(&pcc_data.pcc_lock);
1000 /* Ring doorbell once to update PCC subspace */
1001 if (send_pcc_cmd(CMD_READ) < 0) {
1002 ret = -EIO;
1003 goto out_err;
1004 }
1005 }
1006
1007 cpc_read(cpunum, highest_reg, &high);
1008 perf_caps->highest_perf = high;
1009
1010 cpc_read(cpunum, lowest_reg, &low);
1011 perf_caps->lowest_perf = low;
1012
1013 cpc_read(cpunum, nominal_reg, &nom);
1014 perf_caps->nominal_perf = nom;
1015
1016 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1017 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1018
1019 if (!high || !low || !nom || !min_nonlinear)
1020 ret = -EFAULT;
1021
1022 out_err:
1023 if (regs_in_pcc)
1024 up_write(&pcc_data.pcc_lock);
1025 return ret;
1026 }
1027 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1028
1029 /**
1030 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1031 * @cpunum: CPU from which to read counters.
1032 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1033 *
1034 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1035 */
1036 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1037 {
1038 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039 struct cpc_register_resource *delivered_reg, *reference_reg,
1040 *ref_perf_reg, *ctr_wrap_reg;
1041 u64 delivered, reference, ref_perf, ctr_wrap_time;
1042 int ret = 0, regs_in_pcc = 0;
1043
1044 if (!cpc_desc) {
1045 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1046 return -ENODEV;
1047 }
1048
1049 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1050 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1051 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1052 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1053
1054 /*
1055 * If refernce perf register is not supported then we should
1056 * use the nominal perf value
1057 */
1058 if (!CPC_SUPPORTED(ref_perf_reg))
1059 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1060
1061 /* Are any of the regs PCC ?*/
1062 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1063 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1064 down_write(&pcc_data.pcc_lock);
1065 regs_in_pcc = 1;
1066 /* Ring doorbell once to update PCC subspace */
1067 if (send_pcc_cmd(CMD_READ) < 0) {
1068 ret = -EIO;
1069 goto out_err;
1070 }
1071 }
1072
1073 cpc_read(cpunum, delivered_reg, &delivered);
1074 cpc_read(cpunum, reference_reg, &reference);
1075 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1076
1077 /*
1078 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1079 * performance counters are assumed to never wrap during the lifetime of
1080 * platform
1081 */
1082 ctr_wrap_time = (u64)(~((u64)0));
1083 if (CPC_SUPPORTED(ctr_wrap_reg))
1084 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1085
1086 if (!delivered || !reference || !ref_perf) {
1087 ret = -EFAULT;
1088 goto out_err;
1089 }
1090
1091 perf_fb_ctrs->delivered = delivered;
1092 perf_fb_ctrs->reference = reference;
1093 perf_fb_ctrs->reference_perf = ref_perf;
1094 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1095 out_err:
1096 if (regs_in_pcc)
1097 up_write(&pcc_data.pcc_lock);
1098 return ret;
1099 }
1100 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1101
1102 /**
1103 * cppc_set_perf - Set a CPUs performance controls.
1104 * @cpu: CPU for which to set performance controls.
1105 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1106 *
1107 * Return: 0 for success, -ERRNO otherwise.
1108 */
1109 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1110 {
1111 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1112 struct cpc_register_resource *desired_reg;
1113 int ret = 0;
1114
1115 if (!cpc_desc) {
1116 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1117 return -ENODEV;
1118 }
1119
1120 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1121
1122 /*
1123 * This is Phase-I where we want to write to CPC registers
1124 * -> We want all CPUs to be able to execute this phase in parallel
1125 *
1126 * Since read_lock can be acquired by multiple CPUs simultaneously we
1127 * achieve that goal here
1128 */
1129 if (CPC_IN_PCC(desired_reg)) {
1130 down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
1131 if (pcc_data.platform_owns_pcc) {
1132 ret = check_pcc_chan(false);
1133 if (ret) {
1134 up_read(&pcc_data.pcc_lock);
1135 return ret;
1136 }
1137 }
1138 /*
1139 * Update the pending_write to make sure a PCC CMD_READ will not
1140 * arrive and steal the channel during the switch to write lock
1141 */
1142 pcc_data.pending_pcc_write_cmd = true;
1143 cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
1144 cpc_desc->write_cmd_status = 0;
1145 }
1146
1147 /*
1148 * Skip writing MIN/MAX until Linux knows how to come up with
1149 * useful values.
1150 */
1151 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1152
1153 if (CPC_IN_PCC(desired_reg))
1154 up_read(&pcc_data.pcc_lock); /* END Phase-I */
1155 /*
1156 * This is Phase-II where we transfer the ownership of PCC to Platform
1157 *
1158 * Short Summary: Basically if we think of a group of cppc_set_perf
1159 * requests that happened in short overlapping interval. The last CPU to
1160 * come out of Phase-I will enter Phase-II and ring the doorbell.
1161 *
1162 * We have the following requirements for Phase-II:
1163 * 1. We want to execute Phase-II only when there are no CPUs
1164 * currently executing in Phase-I
1165 * 2. Once we start Phase-II we want to avoid all other CPUs from
1166 * entering Phase-I.
1167 * 3. We want only one CPU among all those who went through Phase-I
1168 * to run phase-II
1169 *
1170 * If write_trylock fails to get the lock and doesn't transfer the
1171 * PCC ownership to the platform, then one of the following will be TRUE
1172 * 1. There is at-least one CPU in Phase-I which will later execute
1173 * write_trylock, so the CPUs in Phase-I will be responsible for
1174 * executing the Phase-II.
1175 * 2. Some other CPU has beaten this CPU to successfully execute the
1176 * write_trylock and has already acquired the write_lock. We know for a
1177 * fact it(other CPU acquiring the write_lock) couldn't have happened
1178 * before this CPU's Phase-I as we held the read_lock.
1179 * 3. Some other CPU executing pcc CMD_READ has stolen the
1180 * down_write, in which case, send_pcc_cmd will check for pending
1181 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1182 * So this CPU can be certain that its request will be delivered
1183 * So in all cases, this CPU knows that its request will be delivered
1184 * by another CPU and can return
1185 *
1186 * After getting the down_write we still need to check for
1187 * pending_pcc_write_cmd to take care of the following scenario
1188 * The thread running this code could be scheduled out between
1189 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1190 * could have delivered the request to Platform by triggering the
1191 * doorbell and transferred the ownership of PCC to platform. So this
1192 * avoids triggering an unnecessary doorbell and more importantly before
1193 * triggering the doorbell it makes sure that the PCC channel ownership
1194 * is still with OSPM.
1195 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1196 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1197 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1198 * case during a CMD_READ and if there are pending writes it delivers
1199 * the write command before servicing the read command
1200 */
1201 if (CPC_IN_PCC(desired_reg)) {
1202 if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
1203 /* Update only if there are pending write commands */
1204 if (pcc_data.pending_pcc_write_cmd)
1205 send_pcc_cmd(CMD_WRITE);
1206 up_write(&pcc_data.pcc_lock); /* END Phase-II */
1207 } else
1208 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1209 wait_event(pcc_data.pcc_write_wait_q,
1210 cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
1211
1212 /* send_pcc_cmd updates the status in case of failure */
1213 ret = cpc_desc->write_cmd_status;
1214 }
1215 return ret;
1216 }
1217 EXPORT_SYMBOL_GPL(cppc_set_perf);
1218
1219 /**
1220 * cppc_get_transition_latency - returns frequency transition latency in ns
1221 *
1222 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1223 * transition latency for perfromance change requests. The closest we have
1224 * is the timing information from the PCCT tables which provides the info
1225 * on the number and frequency of PCC commands the platform can handle.
1226 */
1227 unsigned int cppc_get_transition_latency(int cpu_num)
1228 {
1229 /*
1230 * Expected transition latency is based on the PCCT timing values
1231 * Below are definition from ACPI spec:
1232 * pcc_nominal- Expected latency to process a command, in microseconds
1233 * pcc_mpar - The maximum number of periodic requests that the subspace
1234 * channel can support, reported in commands per minute. 0
1235 * indicates no limitation.
1236 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1237 * completion of a command before issuing the next command,
1238 * in microseconds.
1239 */
1240 unsigned int latency_ns = 0;
1241 struct cpc_desc *cpc_desc;
1242 struct cpc_register_resource *desired_reg;
1243
1244 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1245 if (!cpc_desc)
1246 return CPUFREQ_ETERNAL;
1247
1248 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1249 if (!CPC_IN_PCC(desired_reg))
1250 return CPUFREQ_ETERNAL;
1251
1252 if (pcc_data.pcc_mpar)
1253 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
1254
1255 latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
1256 latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
1257
1258 return latency_ns;
1259 }
1260 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);