]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/acpi/cppc_acpi.c
ACPI / CPPC: Optimize PCC Read Write operations
[mirror_ubuntu-artful-kernel.git] / drivers / acpi / cppc_acpi.c
CommitLineData
337aadff
AC
1/*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
ad62e1e6 42#include <linux/ktime.h>
337aadff
AC
43
44#include <acpi/cppc_acpi.h>
45/*
46 * Lock to provide mutually exclusive access to the PCC
47 * channel. e.g. When the remote updates the shared region
48 * with new data, the reader needs to be protected from
49 * other CPUs activity on the same channel.
50 */
51static DEFINE_SPINLOCK(pcc_lock);
52
53/*
54 * The cpc_desc structure contains the ACPI register details
55 * as described in the per CPU _CPC tables. The details
56 * include the type of register (e.g. PCC, System IO, FFH etc.)
57 * and destination addresses which lets us READ/WRITE CPU performance
58 * information using the appropriate I/O methods.
59 */
60static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
61
62/* This layer handles all the PCC specifics for CPPC. */
63static struct mbox_chan *pcc_channel;
64static void __iomem *pcc_comm_addr;
65static u64 comm_base_addr;
66static int pcc_subspace_idx = -1;
337aadff 67static bool pcc_channel_acquired;
ad62e1e6 68static ktime_t deadline;
337aadff
AC
69
70/*
71 * Arbitrary Retries in case the remote processor is slow to respond
ad62e1e6
AC
72 * to PCC commands. Keeping it high enough to cover emulators where
73 * the processors run painfully slow.
337aadff
AC
74 */
75#define NUM_RETRIES 500
76
ad62e1e6
AC
77static int check_pcc_chan(void)
78{
79 int ret = -EIO;
80 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
81 ktime_t next_deadline = ktime_add(ktime_get(), deadline);
82
83 /* Retry in case the remote processor was too slow to catch up. */
84 while (!ktime_after(ktime_get(), next_deadline)) {
85 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
86 ret = 0;
87 break;
88 }
89 /*
90 * Reducing the bus traffic in case this loop takes longer than
91 * a few retries.
92 */
93 udelay(3);
94 }
95
96 return ret;
97}
98
337aadff
AC
99static int send_pcc_cmd(u16 cmd)
100{
ad62e1e6 101 int ret = -EIO;
337aadff
AC
102 struct acpi_pcct_shared_memory *generic_comm_base =
103 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
337aadff 104
ad62e1e6
AC
105 /*
106 * For CMD_WRITE we know for a fact the caller should have checked
107 * the channel before writing to PCC space
108 */
109 if (cmd == CMD_READ) {
110 ret = check_pcc_chan();
111 if (ret)
112 return ret;
113 }
337aadff
AC
114
115 /* Write to the shared comm region. */
116 writew(cmd, &generic_comm_base->command);
117
118 /* Flip CMD COMPLETE bit */
119 writew(0, &generic_comm_base->status);
120
121 /* Ring doorbell */
ad62e1e6
AC
122 ret = mbox_send_message(pcc_channel, &cmd);
123 if (ret < 0) {
337aadff 124 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
ad62e1e6
AC
125 cmd, ret);
126 return ret;
337aadff
AC
127 }
128
ad62e1e6
AC
129 /*
130 * For READs we need to ensure the cmd completed to ensure
131 * the ensuing read()s can proceed. For WRITEs we dont care
132 * because the actual write()s are done before coming here
133 * and the next READ or WRITE will check if the channel
134 * is busy/free at the entry of this call.
135 */
136 if (cmd == CMD_READ)
137 ret = check_pcc_chan();
337aadff 138
ad62e1e6
AC
139 mbox_client_txdone(pcc_channel, ret);
140 return ret;
337aadff
AC
141}
142
143static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
144{
ad62e1e6 145 if (ret < 0)
337aadff
AC
146 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
147 *(u16 *)msg, ret);
148 else
149 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
150 *(u16 *)msg, ret);
151}
152
153struct mbox_client cppc_mbox_cl = {
154 .tx_done = cppc_chan_tx_done,
155 .knows_txdone = true,
156};
157
158static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
159{
160 int result = -EFAULT;
161 acpi_status status = AE_OK;
162 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
163 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
164 struct acpi_buffer state = {0, NULL};
165 union acpi_object *psd = NULL;
166 struct acpi_psd_package *pdomain;
167
168 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
169 ACPI_TYPE_PACKAGE);
170 if (ACPI_FAILURE(status))
171 return -ENODEV;
172
173 psd = buffer.pointer;
174 if (!psd || psd->package.count != 1) {
175 pr_debug("Invalid _PSD data\n");
176 goto end;
177 }
178
179 pdomain = &(cpc_ptr->domain_info);
180
181 state.length = sizeof(struct acpi_psd_package);
182 state.pointer = pdomain;
183
184 status = acpi_extract_package(&(psd->package.elements[0]),
185 &format, &state);
186 if (ACPI_FAILURE(status)) {
187 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
188 goto end;
189 }
190
191 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
192 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
193 goto end;
194 }
195
196 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
197 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
198 goto end;
199 }
200
201 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
202 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
203 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
204 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
205 goto end;
206 }
207
208 result = 0;
209end:
210 kfree(buffer.pointer);
211 return result;
212}
213
214/**
215 * acpi_get_psd_map - Map the CPUs in a common freq domain.
216 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
217 *
218 * Return: 0 for success or negative value for err.
219 */
220int acpi_get_psd_map(struct cpudata **all_cpu_data)
221{
222 int count_target;
223 int retval = 0;
224 unsigned int i, j;
225 cpumask_var_t covered_cpus;
226 struct cpudata *pr, *match_pr;
227 struct acpi_psd_package *pdomain;
228 struct acpi_psd_package *match_pdomain;
229 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
230
231 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
232 return -ENOMEM;
233
234 /*
235 * Now that we have _PSD data from all CPUs, lets setup P-state
236 * domain info.
237 */
238 for_each_possible_cpu(i) {
239 pr = all_cpu_data[i];
240 if (!pr)
241 continue;
242
243 if (cpumask_test_cpu(i, covered_cpus))
244 continue;
245
246 cpc_ptr = per_cpu(cpc_desc_ptr, i);
247 if (!cpc_ptr)
248 continue;
249
250 pdomain = &(cpc_ptr->domain_info);
251 cpumask_set_cpu(i, pr->shared_cpu_map);
252 cpumask_set_cpu(i, covered_cpus);
253 if (pdomain->num_processors <= 1)
254 continue;
255
256 /* Validate the Domain info */
257 count_target = pdomain->num_processors;
258 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
259 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
260 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
261 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
262 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
263 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
264
265 for_each_possible_cpu(j) {
266 if (i == j)
267 continue;
268
269 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
270 if (!match_cpc_ptr)
271 continue;
272
273 match_pdomain = &(match_cpc_ptr->domain_info);
274 if (match_pdomain->domain != pdomain->domain)
275 continue;
276
277 /* Here i and j are in the same domain */
278 if (match_pdomain->num_processors != count_target) {
279 retval = -EFAULT;
280 goto err_ret;
281 }
282
283 if (pdomain->coord_type != match_pdomain->coord_type) {
284 retval = -EFAULT;
285 goto err_ret;
286 }
287
288 cpumask_set_cpu(j, covered_cpus);
289 cpumask_set_cpu(j, pr->shared_cpu_map);
290 }
291
292 for_each_possible_cpu(j) {
293 if (i == j)
294 continue;
295
296 match_pr = all_cpu_data[j];
297 if (!match_pr)
298 continue;
299
300 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
301 if (!match_cpc_ptr)
302 continue;
303
304 match_pdomain = &(match_cpc_ptr->domain_info);
305 if (match_pdomain->domain != pdomain->domain)
306 continue;
307
308 match_pr->shared_type = pr->shared_type;
309 cpumask_copy(match_pr->shared_cpu_map,
310 pr->shared_cpu_map);
311 }
312 }
313
314err_ret:
315 for_each_possible_cpu(i) {
316 pr = all_cpu_data[i];
317 if (!pr)
318 continue;
319
320 /* Assume no coordination on any error parsing domain info */
321 if (retval) {
322 cpumask_clear(pr->shared_cpu_map);
323 cpumask_set_cpu(i, pr->shared_cpu_map);
324 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
325 }
326 }
327
328 free_cpumask_var(covered_cpus);
329 return retval;
330}
331EXPORT_SYMBOL_GPL(acpi_get_psd_map);
332
32c0b2f6 333static int register_pcc_channel(int pcc_subspace_idx)
337aadff 334{
d29d6735 335 struct acpi_pcct_hw_reduced *cppc_ss;
337aadff 336 unsigned int len;
ad62e1e6 337 u64 usecs_lat;
337aadff
AC
338
339 if (pcc_subspace_idx >= 0) {
340 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
341 pcc_subspace_idx);
342
343 if (IS_ERR(pcc_channel)) {
344 pr_err("Failed to find PCC communication channel\n");
345 return -ENODEV;
346 }
347
348 /*
349 * The PCC mailbox controller driver should
350 * have parsed the PCCT (global table of all
351 * PCC channels) and stored pointers to the
352 * subspace communication region in con_priv.
353 */
354 cppc_ss = pcc_channel->con_priv;
355
356 if (!cppc_ss) {
357 pr_err("No PCC subspace found for CPPC\n");
358 return -ENODEV;
359 }
360
361 /*
362 * This is the shared communication region
363 * for the OS and Platform to communicate over.
364 */
365 comm_base_addr = cppc_ss->base_address;
366 len = cppc_ss->length;
ad62e1e6
AC
367
368 /*
369 * cppc_ss->latency is just a Nominal value. In reality
370 * the remote processor could be much slower to reply.
371 * So add an arbitrary amount of wait on top of Nominal.
372 */
373 usecs_lat = NUM_RETRIES * cppc_ss->latency;
374 deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
337aadff
AC
375
376 pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
377 if (!pcc_comm_addr) {
378 pr_err("Failed to ioremap PCC comm region mem\n");
379 return -ENOMEM;
380 }
381
382 /* Set flag so that we dont come here for each CPU. */
383 pcc_channel_acquired = true;
384 }
385
386 return 0;
387}
388
389/*
390 * An example CPC table looks like the following.
391 *
392 * Name(_CPC, Package()
393 * {
394 * 17,
395 * NumEntries
396 * 1,
397 * // Revision
398 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
399 * // Highest Performance
400 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
401 * // Nominal Performance
402 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
403 * // Lowest Nonlinear Performance
404 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
405 * // Lowest Performance
406 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
407 * // Guaranteed Performance Register
408 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
409 * // Desired Performance Register
410 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
411 * ..
412 * ..
413 * ..
414 *
415 * }
416 * Each Register() encodes how to access that specific register.
417 * e.g. a sample PCC entry has the following encoding:
418 *
419 * Register (
420 * PCC,
421 * AddressSpaceKeyword
422 * 8,
423 * //RegisterBitWidth
424 * 8,
425 * //RegisterBitOffset
426 * 0x30,
427 * //RegisterAddress
428 * 9
429 * //AccessSize (subspace ID)
430 * 0
431 * )
432 * }
433 */
434
435/**
436 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
437 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
438 *
439 * Return: 0 for success or negative value for err.
440 */
441int acpi_cppc_processor_probe(struct acpi_processor *pr)
442{
443 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
444 union acpi_object *out_obj, *cpc_obj;
445 struct cpc_desc *cpc_ptr;
446 struct cpc_reg *gas_t;
447 acpi_handle handle = pr->handle;
448 unsigned int num_ent, i, cpc_rev;
449 acpi_status status;
450 int ret = -EFAULT;
451
452 /* Parse the ACPI _CPC table for this cpu. */
453 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
454 ACPI_TYPE_PACKAGE);
455 if (ACPI_FAILURE(status)) {
456 ret = -ENODEV;
457 goto out_buf_free;
458 }
459
460 out_obj = (union acpi_object *) output.pointer;
461
462 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
463 if (!cpc_ptr) {
464 ret = -ENOMEM;
465 goto out_buf_free;
466 }
467
468 /* First entry is NumEntries. */
469 cpc_obj = &out_obj->package.elements[0];
470 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
471 num_ent = cpc_obj->integer.value;
472 } else {
473 pr_debug("Unexpected entry type(%d) for NumEntries\n",
474 cpc_obj->type);
475 goto out_free;
476 }
477
478 /* Only support CPPCv2. Bail otherwise. */
479 if (num_ent != CPPC_NUM_ENT) {
480 pr_debug("Firmware exports %d entries. Expected: %d\n",
481 num_ent, CPPC_NUM_ENT);
482 goto out_free;
483 }
484
485 /* Second entry should be revision. */
486 cpc_obj = &out_obj->package.elements[1];
487 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
488 cpc_rev = cpc_obj->integer.value;
489 } else {
490 pr_debug("Unexpected entry type(%d) for Revision\n",
491 cpc_obj->type);
492 goto out_free;
493 }
494
495 if (cpc_rev != CPPC_REV) {
496 pr_debug("Firmware exports revision:%d. Expected:%d\n",
497 cpc_rev, CPPC_REV);
498 goto out_free;
499 }
500
501 /* Iterate through remaining entries in _CPC */
502 for (i = 2; i < num_ent; i++) {
503 cpc_obj = &out_obj->package.elements[i];
504
505 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
506 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
507 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
508 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
509 gas_t = (struct cpc_reg *)
510 cpc_obj->buffer.pointer;
511
512 /*
513 * The PCC Subspace index is encoded inside
514 * the CPC table entries. The same PCC index
515 * will be used for all the PCC entries,
516 * so extract it only once.
517 */
518 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
519 if (pcc_subspace_idx < 0)
520 pcc_subspace_idx = gas_t->access_width;
521 else if (pcc_subspace_idx != gas_t->access_width) {
522 pr_debug("Mismatched PCC ids.\n");
523 goto out_free;
524 }
525 } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
526 /* Support only PCC and SYS MEM type regs */
527 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
528 goto out_free;
529 }
530
531 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
532 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
533 } else {
534 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
535 goto out_free;
536 }
537 }
538 /* Store CPU Logical ID */
539 cpc_ptr->cpu_id = pr->id;
540
541 /* Plug it into this CPUs CPC descriptor. */
542 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
543
544 /* Parse PSD data for this CPU */
545 ret = acpi_get_psd(cpc_ptr, handle);
546 if (ret)
547 goto out_free;
548
549 /* Register PCC channel once for all CPUs. */
550 if (!pcc_channel_acquired) {
551 ret = register_pcc_channel(pcc_subspace_idx);
552 if (ret)
553 goto out_free;
554 }
555
556 /* Everything looks okay */
557 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
558
559 kfree(output.pointer);
560 return 0;
561
562out_free:
337aadff
AC
563 kfree(cpc_ptr);
564
565out_buf_free:
566 kfree(output.pointer);
567 return ret;
568}
569EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
570
571/**
572 * acpi_cppc_processor_exit - Cleanup CPC structs.
573 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
574 *
575 * Return: Void
576 */
577void acpi_cppc_processor_exit(struct acpi_processor *pr)
578{
579 struct cpc_desc *cpc_ptr;
580 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
581 kfree(cpc_ptr);
582}
583EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
584
585static u64 get_phys_addr(struct cpc_reg *reg)
586{
587 /* PCC communication addr space begins at byte offset 0x8. */
588 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
589 return (u64)comm_base_addr + 0x8 + reg->address;
590 else
591 return reg->address;
592}
593
594static void cpc_read(struct cpc_reg *reg, u64 *val)
595{
596 u64 addr = get_phys_addr(reg);
597
598 acpi_os_read_memory((acpi_physical_address)addr,
599 val, reg->bit_width);
600}
601
602static void cpc_write(struct cpc_reg *reg, u64 val)
603{
604 u64 addr = get_phys_addr(reg);
605
606 acpi_os_write_memory((acpi_physical_address)addr,
607 val, reg->bit_width);
608}
609
610/**
611 * cppc_get_perf_caps - Get a CPUs performance capabilities.
612 * @cpunum: CPU from which to get capabilities info.
613 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
614 *
615 * Return: 0 for success with perf_caps populated else -ERRNO.
616 */
617int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
618{
619 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
620 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
621 *nom_perf;
622 u64 high, low, ref, nom;
623 int ret = 0;
624
625 if (!cpc_desc) {
626 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
627 return -ENODEV;
628 }
629
630 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
631 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
632 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
633 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
634
635 spin_lock(&pcc_lock);
636
637 /* Are any of the regs PCC ?*/
638 if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
639 (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
640 (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
641 (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
642 /* Ring doorbell once to update PCC subspace */
ad62e1e6 643 if (send_pcc_cmd(CMD_READ) < 0) {
337aadff
AC
644 ret = -EIO;
645 goto out_err;
646 }
647 }
648
649 cpc_read(&highest_reg->cpc_entry.reg, &high);
650 perf_caps->highest_perf = high;
651
652 cpc_read(&lowest_reg->cpc_entry.reg, &low);
653 perf_caps->lowest_perf = low;
654
655 cpc_read(&ref_perf->cpc_entry.reg, &ref);
656 perf_caps->reference_perf = ref;
657
658 cpc_read(&nom_perf->cpc_entry.reg, &nom);
659 perf_caps->nominal_perf = nom;
660
661 if (!ref)
662 perf_caps->reference_perf = perf_caps->nominal_perf;
663
664 if (!high || !low || !nom)
665 ret = -EFAULT;
666
667out_err:
668 spin_unlock(&pcc_lock);
669 return ret;
670}
671EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
672
673/**
674 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
675 * @cpunum: CPU from which to read counters.
676 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
677 *
678 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
679 */
680int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
681{
682 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
683 struct cpc_register_resource *delivered_reg, *reference_reg;
684 u64 delivered, reference;
685 int ret = 0;
686
687 if (!cpc_desc) {
688 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
689 return -ENODEV;
690 }
691
692 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
693 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
694
695 spin_lock(&pcc_lock);
696
697 /* Are any of the regs PCC ?*/
698 if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
699 (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
700 /* Ring doorbell once to update PCC subspace */
ad62e1e6 701 if (send_pcc_cmd(CMD_READ) < 0) {
337aadff
AC
702 ret = -EIO;
703 goto out_err;
704 }
705 }
706
707 cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
708 cpc_read(&reference_reg->cpc_entry.reg, &reference);
709
710 if (!delivered || !reference) {
711 ret = -EFAULT;
712 goto out_err;
713 }
714
715 perf_fb_ctrs->delivered = delivered;
716 perf_fb_ctrs->reference = reference;
717
718 perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
719 perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
720
721 perf_fb_ctrs->prev_delivered = delivered;
722 perf_fb_ctrs->prev_reference = reference;
723
724out_err:
725 spin_unlock(&pcc_lock);
726 return ret;
727}
728EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
729
730/**
731 * cppc_set_perf - Set a CPUs performance controls.
732 * @cpu: CPU for which to set performance controls.
733 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
734 *
735 * Return: 0 for success, -ERRNO otherwise.
736 */
737int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
738{
739 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
740 struct cpc_register_resource *desired_reg;
741 int ret = 0;
742
743 if (!cpc_desc) {
744 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
745 return -ENODEV;
746 }
747
748 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
749
750 spin_lock(&pcc_lock);
751
ad62e1e6
AC
752 /* If this is PCC reg, check if channel is free before writing */
753 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
754 ret = check_pcc_chan();
755 if (ret)
756 goto busy_channel;
757 }
758
337aadff
AC
759 /*
760 * Skip writing MIN/MAX until Linux knows how to come up with
761 * useful values.
762 */
763 cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
764
765 /* Is this a PCC reg ?*/
766 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
767 /* Ring doorbell so Remote can get our perf request. */
ad62e1e6 768 if (send_pcc_cmd(CMD_WRITE) < 0)
337aadff
AC
769 ret = -EIO;
770 }
ad62e1e6 771busy_channel:
337aadff
AC
772 spin_unlock(&pcc_lock);
773
774 return ret;
775}
776EXPORT_SYMBOL_GPL(cppc_set_perf);