]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/acpi/cppc_acpi.c
UBUNTU: Ubuntu-snapdragon-4.4.0-1053.57
[mirror_ubuntu-artful-kernel.git] / drivers / acpi / cppc_acpi.c
CommitLineData
337aadff
AC
1/*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
42
43#include <acpi/cppc_acpi.h>
44/*
45 * Lock to provide mutually exclusive access to the PCC
46 * channel. e.g. When the remote updates the shared region
47 * with new data, the reader needs to be protected from
48 * other CPUs activity on the same channel.
49 */
50static DEFINE_SPINLOCK(pcc_lock);
51
52/*
53 * The cpc_desc structure contains the ACPI register details
54 * as described in the per CPU _CPC tables. The details
55 * include the type of register (e.g. PCC, System IO, FFH etc.)
56 * and destination addresses which lets us READ/WRITE CPU performance
57 * information using the appropriate I/O methods.
58 */
59static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
60
61/* This layer handles all the PCC specifics for CPPC. */
62static struct mbox_chan *pcc_channel;
63static void __iomem *pcc_comm_addr;
64static u64 comm_base_addr;
65static int pcc_subspace_idx = -1;
66static u16 pcc_cmd_delay;
67static bool pcc_channel_acquired;
68
69/*
70 * Arbitrary Retries in case the remote processor is slow to respond
71 * to PCC commands.
72 */
73#define NUM_RETRIES 500
74
75static int send_pcc_cmd(u16 cmd)
76{
77 int retries, result = -EIO;
78 struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
79 struct acpi_pcct_shared_memory *generic_comm_base =
80 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
81 u32 cmd_latency = pcct_ss->latency;
82
83 /* Min time OS should wait before sending next command. */
84 udelay(pcc_cmd_delay);
85
86 /* Write to the shared comm region. */
87 writew(cmd, &generic_comm_base->command);
88
89 /* Flip CMD COMPLETE bit */
90 writew(0, &generic_comm_base->status);
91
92 /* Ring doorbell */
93 result = mbox_send_message(pcc_channel, &cmd);
94 if (result < 0) {
95 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
96 cmd, result);
97 return result;
98 }
99
100 /* Wait for a nominal time to let platform process command. */
101 udelay(cmd_latency);
102
103 /* Retry in case the remote processor was too slow to catch up. */
104 for (retries = NUM_RETRIES; retries > 0; retries--) {
105 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
106 result = 0;
107 break;
108 }
109 }
110
111 mbox_client_txdone(pcc_channel, result);
112 return result;
113}
114
115static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
116{
117 if (ret)
118 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
119 *(u16 *)msg, ret);
120 else
121 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
122 *(u16 *)msg, ret);
123}
124
125struct mbox_client cppc_mbox_cl = {
126 .tx_done = cppc_chan_tx_done,
127 .knows_txdone = true,
128};
129
130static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
131{
132 int result = -EFAULT;
133 acpi_status status = AE_OK;
134 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
135 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
136 struct acpi_buffer state = {0, NULL};
137 union acpi_object *psd = NULL;
138 struct acpi_psd_package *pdomain;
139
140 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
141 ACPI_TYPE_PACKAGE);
142 if (ACPI_FAILURE(status))
143 return -ENODEV;
144
145 psd = buffer.pointer;
146 if (!psd || psd->package.count != 1) {
147 pr_debug("Invalid _PSD data\n");
148 goto end;
149 }
150
151 pdomain = &(cpc_ptr->domain_info);
152
153 state.length = sizeof(struct acpi_psd_package);
154 state.pointer = pdomain;
155
156 status = acpi_extract_package(&(psd->package.elements[0]),
157 &format, &state);
158 if (ACPI_FAILURE(status)) {
159 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
160 goto end;
161 }
162
163 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
164 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
165 goto end;
166 }
167
168 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
169 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
170 goto end;
171 }
172
173 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
174 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
175 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
176 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
177 goto end;
178 }
179
180 result = 0;
181end:
182 kfree(buffer.pointer);
183 return result;
184}
185
186/**
187 * acpi_get_psd_map - Map the CPUs in a common freq domain.
188 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
189 *
190 * Return: 0 for success or negative value for err.
191 */
192int acpi_get_psd_map(struct cpudata **all_cpu_data)
193{
194 int count_target;
195 int retval = 0;
196 unsigned int i, j;
197 cpumask_var_t covered_cpus;
198 struct cpudata *pr, *match_pr;
199 struct acpi_psd_package *pdomain;
200 struct acpi_psd_package *match_pdomain;
201 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
202
203 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
204 return -ENOMEM;
205
206 /*
207 * Now that we have _PSD data from all CPUs, lets setup P-state
208 * domain info.
209 */
210 for_each_possible_cpu(i) {
211 pr = all_cpu_data[i];
212 if (!pr)
213 continue;
214
215 if (cpumask_test_cpu(i, covered_cpus))
216 continue;
217
218 cpc_ptr = per_cpu(cpc_desc_ptr, i);
9644ffe4
HT
219 if (!cpc_ptr) {
220 retval = -EFAULT;
221 goto err_ret;
222 }
337aadff
AC
223
224 pdomain = &(cpc_ptr->domain_info);
225 cpumask_set_cpu(i, pr->shared_cpu_map);
226 cpumask_set_cpu(i, covered_cpus);
227 if (pdomain->num_processors <= 1)
228 continue;
229
230 /* Validate the Domain info */
231 count_target = pdomain->num_processors;
232 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
233 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
234 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
235 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
236 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
237 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
238
239 for_each_possible_cpu(j) {
240 if (i == j)
241 continue;
242
243 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
9644ffe4
HT
244 if (!match_cpc_ptr) {
245 retval = -EFAULT;
246 goto err_ret;
247 }
337aadff
AC
248
249 match_pdomain = &(match_cpc_ptr->domain_info);
250 if (match_pdomain->domain != pdomain->domain)
251 continue;
252
253 /* Here i and j are in the same domain */
254 if (match_pdomain->num_processors != count_target) {
255 retval = -EFAULT;
256 goto err_ret;
257 }
258
259 if (pdomain->coord_type != match_pdomain->coord_type) {
260 retval = -EFAULT;
261 goto err_ret;
262 }
263
264 cpumask_set_cpu(j, covered_cpus);
265 cpumask_set_cpu(j, pr->shared_cpu_map);
266 }
267
268 for_each_possible_cpu(j) {
269 if (i == j)
270 continue;
271
272 match_pr = all_cpu_data[j];
273 if (!match_pr)
274 continue;
275
276 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
9644ffe4
HT
277 if (!match_cpc_ptr) {
278 retval = -EFAULT;
279 goto err_ret;
280 }
337aadff
AC
281
282 match_pdomain = &(match_cpc_ptr->domain_info);
283 if (match_pdomain->domain != pdomain->domain)
284 continue;
285
286 match_pr->shared_type = pr->shared_type;
287 cpumask_copy(match_pr->shared_cpu_map,
288 pr->shared_cpu_map);
289 }
290 }
291
292err_ret:
293 for_each_possible_cpu(i) {
294 pr = all_cpu_data[i];
295 if (!pr)
296 continue;
297
298 /* Assume no coordination on any error parsing domain info */
299 if (retval) {
300 cpumask_clear(pr->shared_cpu_map);
301 cpumask_set_cpu(i, pr->shared_cpu_map);
302 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
303 }
304 }
305
306 free_cpumask_var(covered_cpus);
307 return retval;
308}
309EXPORT_SYMBOL_GPL(acpi_get_psd_map);
310
32c0b2f6 311static int register_pcc_channel(int pcc_subspace_idx)
337aadff 312{
d29d6735 313 struct acpi_pcct_hw_reduced *cppc_ss;
337aadff
AC
314 unsigned int len;
315
316 if (pcc_subspace_idx >= 0) {
317 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
318 pcc_subspace_idx);
319
320 if (IS_ERR(pcc_channel)) {
321 pr_err("Failed to find PCC communication channel\n");
322 return -ENODEV;
323 }
324
325 /*
326 * The PCC mailbox controller driver should
327 * have parsed the PCCT (global table of all
328 * PCC channels) and stored pointers to the
329 * subspace communication region in con_priv.
330 */
331 cppc_ss = pcc_channel->con_priv;
332
333 if (!cppc_ss) {
334 pr_err("No PCC subspace found for CPPC\n");
335 return -ENODEV;
336 }
337
338 /*
339 * This is the shared communication region
340 * for the OS and Platform to communicate over.
341 */
342 comm_base_addr = cppc_ss->base_address;
343 len = cppc_ss->length;
344 pcc_cmd_delay = cppc_ss->min_turnaround_time;
345
346 pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
347 if (!pcc_comm_addr) {
348 pr_err("Failed to ioremap PCC comm region mem\n");
349 return -ENOMEM;
350 }
351
352 /* Set flag so that we dont come here for each CPU. */
353 pcc_channel_acquired = true;
354 }
355
356 return 0;
357}
358
359/*
360 * An example CPC table looks like the following.
361 *
362 * Name(_CPC, Package()
363 * {
364 * 17,
365 * NumEntries
366 * 1,
367 * // Revision
368 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
369 * // Highest Performance
370 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
371 * // Nominal Performance
372 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
373 * // Lowest Nonlinear Performance
374 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
375 * // Lowest Performance
376 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
377 * // Guaranteed Performance Register
378 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
379 * // Desired Performance Register
380 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
381 * ..
382 * ..
383 * ..
384 *
385 * }
386 * Each Register() encodes how to access that specific register.
387 * e.g. a sample PCC entry has the following encoding:
388 *
389 * Register (
390 * PCC,
391 * AddressSpaceKeyword
392 * 8,
393 * //RegisterBitWidth
394 * 8,
395 * //RegisterBitOffset
396 * 0x30,
397 * //RegisterAddress
398 * 9
399 * //AccessSize (subspace ID)
400 * 0
401 * )
402 * }
403 */
404
405/**
406 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
407 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
408 *
409 * Return: 0 for success or negative value for err.
410 */
411int acpi_cppc_processor_probe(struct acpi_processor *pr)
412{
413 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
414 union acpi_object *out_obj, *cpc_obj;
415 struct cpc_desc *cpc_ptr;
416 struct cpc_reg *gas_t;
417 acpi_handle handle = pr->handle;
418 unsigned int num_ent, i, cpc_rev;
419 acpi_status status;
420 int ret = -EFAULT;
421
422 /* Parse the ACPI _CPC table for this cpu. */
423 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
424 ACPI_TYPE_PACKAGE);
425 if (ACPI_FAILURE(status)) {
426 ret = -ENODEV;
427 goto out_buf_free;
428 }
429
430 out_obj = (union acpi_object *) output.pointer;
431
432 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
433 if (!cpc_ptr) {
434 ret = -ENOMEM;
435 goto out_buf_free;
436 }
437
438 /* First entry is NumEntries. */
439 cpc_obj = &out_obj->package.elements[0];
440 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
441 num_ent = cpc_obj->integer.value;
442 } else {
443 pr_debug("Unexpected entry type(%d) for NumEntries\n",
444 cpc_obj->type);
445 goto out_free;
446 }
447
448 /* Only support CPPCv2. Bail otherwise. */
449 if (num_ent != CPPC_NUM_ENT) {
450 pr_debug("Firmware exports %d entries. Expected: %d\n",
451 num_ent, CPPC_NUM_ENT);
452 goto out_free;
453 }
454
455 /* Second entry should be revision. */
456 cpc_obj = &out_obj->package.elements[1];
457 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
458 cpc_rev = cpc_obj->integer.value;
459 } else {
460 pr_debug("Unexpected entry type(%d) for Revision\n",
461 cpc_obj->type);
462 goto out_free;
463 }
464
465 if (cpc_rev != CPPC_REV) {
466 pr_debug("Firmware exports revision:%d. Expected:%d\n",
467 cpc_rev, CPPC_REV);
468 goto out_free;
469 }
470
471 /* Iterate through remaining entries in _CPC */
472 for (i = 2; i < num_ent; i++) {
473 cpc_obj = &out_obj->package.elements[i];
474
475 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
476 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
477 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
478 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
479 gas_t = (struct cpc_reg *)
480 cpc_obj->buffer.pointer;
481
482 /*
483 * The PCC Subspace index is encoded inside
484 * the CPC table entries. The same PCC index
485 * will be used for all the PCC entries,
486 * so extract it only once.
487 */
488 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
489 if (pcc_subspace_idx < 0)
490 pcc_subspace_idx = gas_t->access_width;
491 else if (pcc_subspace_idx != gas_t->access_width) {
492 pr_debug("Mismatched PCC ids.\n");
493 goto out_free;
494 }
495 } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
496 /* Support only PCC and SYS MEM type regs */
497 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
498 goto out_free;
499 }
500
501 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
502 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
503 } else {
504 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
505 goto out_free;
506 }
507 }
508 /* Store CPU Logical ID */
509 cpc_ptr->cpu_id = pr->id;
510
337aadff
AC
511 /* Parse PSD data for this CPU */
512 ret = acpi_get_psd(cpc_ptr, handle);
513 if (ret)
514 goto out_free;
515
516 /* Register PCC channel once for all CPUs. */
517 if (!pcc_channel_acquired) {
518 ret = register_pcc_channel(pcc_subspace_idx);
519 if (ret)
520 goto out_free;
521 }
522
48e38438
HT
523 /* Plug PSD data into this CPUs CPC descriptor. */
524 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
525
337aadff
AC
526 /* Everything looks okay */
527 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
528
529 kfree(output.pointer);
530 return 0;
531
532out_free:
337aadff
AC
533 kfree(cpc_ptr);
534
535out_buf_free:
536 kfree(output.pointer);
537 return ret;
538}
539EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
540
541/**
542 * acpi_cppc_processor_exit - Cleanup CPC structs.
543 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
544 *
545 * Return: Void
546 */
547void acpi_cppc_processor_exit(struct acpi_processor *pr)
548{
549 struct cpc_desc *cpc_ptr;
550 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
551 kfree(cpc_ptr);
552}
553EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
554
555static u64 get_phys_addr(struct cpc_reg *reg)
556{
557 /* PCC communication addr space begins at byte offset 0x8. */
558 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
559 return (u64)comm_base_addr + 0x8 + reg->address;
560 else
561 return reg->address;
562}
563
564static void cpc_read(struct cpc_reg *reg, u64 *val)
565{
566 u64 addr = get_phys_addr(reg);
567
568 acpi_os_read_memory((acpi_physical_address)addr,
569 val, reg->bit_width);
570}
571
572static void cpc_write(struct cpc_reg *reg, u64 val)
573{
574 u64 addr = get_phys_addr(reg);
575
576 acpi_os_write_memory((acpi_physical_address)addr,
577 val, reg->bit_width);
578}
579
580/**
581 * cppc_get_perf_caps - Get a CPUs performance capabilities.
582 * @cpunum: CPU from which to get capabilities info.
583 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
584 *
585 * Return: 0 for success with perf_caps populated else -ERRNO.
586 */
587int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
588{
589 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
590 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
591 *nom_perf;
592 u64 high, low, ref, nom;
593 int ret = 0;
594
595 if (!cpc_desc) {
596 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
597 return -ENODEV;
598 }
599
600 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
601 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
602 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
603 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
604
605 spin_lock(&pcc_lock);
606
607 /* Are any of the regs PCC ?*/
608 if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
609 (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
610 (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
611 (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
612 /* Ring doorbell once to update PCC subspace */
613 if (send_pcc_cmd(CMD_READ)) {
614 ret = -EIO;
615 goto out_err;
616 }
617 }
618
619 cpc_read(&highest_reg->cpc_entry.reg, &high);
620 perf_caps->highest_perf = high;
621
622 cpc_read(&lowest_reg->cpc_entry.reg, &low);
623 perf_caps->lowest_perf = low;
624
625 cpc_read(&ref_perf->cpc_entry.reg, &ref);
626 perf_caps->reference_perf = ref;
627
628 cpc_read(&nom_perf->cpc_entry.reg, &nom);
629 perf_caps->nominal_perf = nom;
630
631 if (!ref)
632 perf_caps->reference_perf = perf_caps->nominal_perf;
633
634 if (!high || !low || !nom)
635 ret = -EFAULT;
636
637out_err:
638 spin_unlock(&pcc_lock);
639 return ret;
640}
641EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
642
643/**
644 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
645 * @cpunum: CPU from which to read counters.
646 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
647 *
648 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
649 */
650int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
651{
652 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
653 struct cpc_register_resource *delivered_reg, *reference_reg;
654 u64 delivered, reference;
655 int ret = 0;
656
657 if (!cpc_desc) {
658 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
659 return -ENODEV;
660 }
661
662 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
663 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
664
665 spin_lock(&pcc_lock);
666
667 /* Are any of the regs PCC ?*/
668 if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
669 (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
670 /* Ring doorbell once to update PCC subspace */
671 if (send_pcc_cmd(CMD_READ)) {
672 ret = -EIO;
673 goto out_err;
674 }
675 }
676
677 cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
678 cpc_read(&reference_reg->cpc_entry.reg, &reference);
679
680 if (!delivered || !reference) {
681 ret = -EFAULT;
682 goto out_err;
683 }
684
685 perf_fb_ctrs->delivered = delivered;
686 perf_fb_ctrs->reference = reference;
687
688 perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
689 perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
690
691 perf_fb_ctrs->prev_delivered = delivered;
692 perf_fb_ctrs->prev_reference = reference;
693
694out_err:
695 spin_unlock(&pcc_lock);
696 return ret;
697}
698EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
699
700/**
701 * cppc_set_perf - Set a CPUs performance controls.
702 * @cpu: CPU for which to set performance controls.
703 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
704 *
705 * Return: 0 for success, -ERRNO otherwise.
706 */
707int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
708{
709 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
710 struct cpc_register_resource *desired_reg;
711 int ret = 0;
712
713 if (!cpc_desc) {
714 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
715 return -ENODEV;
716 }
717
718 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
719
720 spin_lock(&pcc_lock);
721
722 /*
723 * Skip writing MIN/MAX until Linux knows how to come up with
724 * useful values.
725 */
726 cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
727
728 /* Is this a PCC reg ?*/
729 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
730 /* Ring doorbell so Remote can get our perf request. */
731 if (send_pcc_cmd(CMD_WRITE))
732 ret = -EIO;
733 }
734
735 spin_unlock(&pcc_lock);
736
737 return ret;
738}
739EXPORT_SYMBOL_GPL(cppc_set_perf);