2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
27 * - Platform conveys its decision back to OS
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
38 #define pr_fmt(fmt) "ACPI CPPC: " fmt
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42 #include <linux/iopoll.h>
43 #include <linux/ktime.h>
44 #include <linux/rwsem.h>
45 #include <linux/wait.h>
47 #include <acpi/cppc_acpi.h>
49 struct cppc_pcc_data
{
50 struct mbox_chan
*pcc_channel
;
51 void __iomem
*pcc_comm_addr
;
52 bool pcc_channel_acquired
;
53 unsigned int deadline_us
;
54 unsigned int pcc_mpar
, pcc_mrtt
, pcc_nominal
;
56 bool pending_pcc_write_cmd
; /* Any pending/batched PCC write cmds? */
57 bool platform_owns_pcc
; /* Ownership of PCC subspace */
58 unsigned int pcc_write_cnt
; /* Running count of PCC write commands */
61 * Lock to provide controlled access to the PCC channel.
63 * For performance critical usecases(currently cppc_set_perf)
64 * We need to take read_lock and check if channel belongs to OSPM
65 * before reading or writing to PCC subspace
66 * We need to take write_lock before transferring the channel
67 * ownership to the platform via a Doorbell
68 * This allows us to batch a number of CPPC requests if they happen
69 * to originate in about the same time
71 * For non-performance critical usecases(init)
72 * Take write_lock for all purposes which gives exclusive access
74 struct rw_semaphore pcc_lock
;
76 /* Wait queue for CPUs whose requests were batched */
77 wait_queue_head_t pcc_write_wait_q
;
78 ktime_t last_cmd_cmpl_time
;
79 ktime_t last_mpar_reset
;
84 /* Array to represent the PCC channel per subspace id */
85 static struct cppc_pcc_data
*pcc_data
[MAX_PCC_SUBSPACES
];
86 /* The cpu_pcc_subspace_idx containsper CPU subspace id */
87 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx
);
90 * The cpc_desc structure contains the ACPI register details
91 * as described in the per CPU _CPC tables. The details
92 * include the type of register (e.g. PCC, System IO, FFH etc.)
93 * and destination addresses which lets us READ/WRITE CPU performance
94 * information using the appropriate I/O methods.
96 static DEFINE_PER_CPU(struct cpc_desc
*, cpc_desc_ptr
);
98 /* pcc mapped address + header size + offset within PCC subspace */
99 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
102 /* Check if a CPC register is in PCC */
103 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
104 (cpc)->cpc_entry.reg.space_id == \
105 ACPI_ADR_SPACE_PLATFORM_COMM)
107 /* Evalutes to True if reg is a NULL register descriptor */
108 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
109 (reg)->address == 0 && \
110 (reg)->bit_width == 0 && \
111 (reg)->bit_offset == 0 && \
112 (reg)->access_width == 0)
114 /* Evalutes to True if an optional cpc field is supported */
115 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
116 !!(cpc)->cpc_entry.int_value : \
117 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
119 * Arbitrary Retries in case the remote processor is slow to respond
120 * to PCC commands. Keeping it high enough to cover emulators where
121 * the processors run painfully slow.
123 #define NUM_RETRIES 500
126 struct attribute attr
;
127 ssize_t (*show
)(struct kobject
*kobj
,
128 struct attribute
*attr
, char *buf
);
129 ssize_t (*store
)(struct kobject
*kobj
,
130 struct attribute
*attr
, const char *c
, ssize_t count
);
133 #define define_one_cppc_ro(_name) \
134 static struct cppc_attr _name = \
135 __ATTR(_name, 0444, show_##_name, NULL)
137 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
139 #define show_cppc_data(access_fn, struct_name, member_name) \
140 static ssize_t show_##member_name(struct kobject *kobj, \
141 struct attribute *attr, char *buf) \
143 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
144 struct struct_name st_name = {0}; \
147 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
151 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
152 (u64)st_name.member_name); \
154 define_one_cppc_ro(member_name)
156 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, highest_perf
);
157 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, lowest_perf
);
158 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, nominal_perf
);
159 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, lowest_nonlinear_perf
);
160 show_cppc_data(cppc_get_perf_ctrs
, cppc_perf_fb_ctrs
, reference_perf
);
161 show_cppc_data(cppc_get_perf_ctrs
, cppc_perf_fb_ctrs
, wraparound_time
);
163 static ssize_t
show_feedback_ctrs(struct kobject
*kobj
,
164 struct attribute
*attr
, char *buf
)
166 struct cpc_desc
*cpc_ptr
= to_cpc_desc(kobj
);
167 struct cppc_perf_fb_ctrs fb_ctrs
= {0};
170 ret
= cppc_get_perf_ctrs(cpc_ptr
->cpu_id
, &fb_ctrs
);
174 return scnprintf(buf
, PAGE_SIZE
, "ref:%llu del:%llu\n",
175 fb_ctrs
.reference
, fb_ctrs
.delivered
);
177 define_one_cppc_ro(feedback_ctrs
);
179 static struct attribute
*cppc_attrs
[] = {
181 &reference_perf
.attr
,
182 &wraparound_time
.attr
,
185 &lowest_nonlinear_perf
.attr
,
190 static struct kobj_type cppc_ktype
= {
191 .sysfs_ops
= &kobj_sysfs_ops
,
192 .default_attrs
= cppc_attrs
,
195 static int check_pcc_chan(int pcc_ss_id
, bool chk_err_bit
)
198 struct cppc_pcc_data
*pcc_ss_data
= pcc_data
[pcc_ss_id
];
199 struct acpi_pcct_shared_memory __iomem
*generic_comm_base
=
200 pcc_ss_data
->pcc_comm_addr
;
202 if (!pcc_ss_data
->platform_owns_pcc
)
206 * Poll PCC status register every 3us(delay_us) for maximum of
207 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
209 ret
= readw_relaxed_poll_timeout(&generic_comm_base
->status
, status
,
210 status
& PCC_CMD_COMPLETE_MASK
, 3,
211 pcc_ss_data
->deadline_us
);
214 pcc_ss_data
->platform_owns_pcc
= false;
215 if (chk_err_bit
&& (status
& PCC_ERROR_MASK
))
220 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
227 * This function transfers the ownership of the PCC to the platform
228 * So it must be called while holding write_lock(pcc_lock)
230 static int send_pcc_cmd(int pcc_ss_id
, u16 cmd
)
233 struct cppc_pcc_data
*pcc_ss_data
= pcc_data
[pcc_ss_id
];
234 struct acpi_pcct_shared_memory
*generic_comm_base
=
235 (struct acpi_pcct_shared_memory
*)pcc_ss_data
->pcc_comm_addr
;
236 unsigned int time_delta
;
239 * For CMD_WRITE we know for a fact the caller should have checked
240 * the channel before writing to PCC space
242 if (cmd
== CMD_READ
) {
244 * If there are pending cpc_writes, then we stole the channel
245 * before write completion, so first send a WRITE command to
248 if (pcc_ss_data
->pending_pcc_write_cmd
)
249 send_pcc_cmd(pcc_ss_id
, CMD_WRITE
);
251 ret
= check_pcc_chan(pcc_ss_id
, false);
254 } else /* CMD_WRITE */
255 pcc_ss_data
->pending_pcc_write_cmd
= FALSE
;
258 * Handle the Minimum Request Turnaround Time(MRTT)
259 * "The minimum amount of time that OSPM must wait after the completion
260 * of a command before issuing the next command, in microseconds"
262 if (pcc_ss_data
->pcc_mrtt
) {
263 time_delta
= ktime_us_delta(ktime_get(),
264 pcc_ss_data
->last_cmd_cmpl_time
);
265 if (pcc_ss_data
->pcc_mrtt
> time_delta
)
266 udelay(pcc_ss_data
->pcc_mrtt
- time_delta
);
270 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
271 * "The maximum number of periodic requests that the subspace channel can
272 * support, reported in commands per minute. 0 indicates no limitation."
274 * This parameter should be ideally zero or large enough so that it can
275 * handle maximum number of requests that all the cores in the system can
276 * collectively generate. If it is not, we will follow the spec and just
277 * not send the request to the platform after hitting the MPAR limit in
280 if (pcc_ss_data
->pcc_mpar
) {
281 if (pcc_ss_data
->mpar_count
== 0) {
282 time_delta
= ktime_ms_delta(ktime_get(),
283 pcc_ss_data
->last_mpar_reset
);
284 if ((time_delta
< 60 * MSEC_PER_SEC
) && pcc_ss_data
->last_mpar_reset
) {
285 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
290 pcc_ss_data
->last_mpar_reset
= ktime_get();
291 pcc_ss_data
->mpar_count
= pcc_ss_data
->pcc_mpar
;
293 pcc_ss_data
->mpar_count
--;
296 /* Write to the shared comm region. */
297 writew_relaxed(cmd
, &generic_comm_base
->command
);
299 /* Flip CMD COMPLETE bit */
300 writew_relaxed(0, &generic_comm_base
->status
);
302 pcc_ss_data
->platform_owns_pcc
= true;
305 ret
= mbox_send_message(pcc_ss_data
->pcc_channel
, &cmd
);
307 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
308 pcc_ss_id
, cmd
, ret
);
312 /* wait for completion and check for PCC errro bit */
313 ret
= check_pcc_chan(pcc_ss_id
, true);
315 if (pcc_ss_data
->pcc_mrtt
)
316 pcc_ss_data
->last_cmd_cmpl_time
= ktime_get();
318 if (pcc_ss_data
->pcc_channel
->mbox
->txdone_irq
)
319 mbox_chan_txdone(pcc_ss_data
->pcc_channel
, ret
);
321 mbox_client_txdone(pcc_ss_data
->pcc_channel
, ret
);
324 if (cmd
== CMD_WRITE
) {
326 for_each_possible_cpu(i
) {
327 struct cpc_desc
*desc
= per_cpu(cpc_desc_ptr
, i
);
331 if (desc
->write_cmd_id
== pcc_ss_data
->pcc_write_cnt
)
332 desc
->write_cmd_status
= ret
;
335 pcc_ss_data
->pcc_write_cnt
++;
336 wake_up_all(&pcc_ss_data
->pcc_write_wait_q
);
342 static void cppc_chan_tx_done(struct mbox_client
*cl
, void *msg
, int ret
)
345 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
348 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
352 struct mbox_client cppc_mbox_cl
= {
353 .tx_done
= cppc_chan_tx_done
,
354 .knows_txdone
= true,
357 static int acpi_get_psd(struct cpc_desc
*cpc_ptr
, acpi_handle handle
)
359 int result
= -EFAULT
;
360 acpi_status status
= AE_OK
;
361 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
362 struct acpi_buffer format
= {sizeof("NNNNN"), "NNNNN"};
363 struct acpi_buffer state
= {0, NULL
};
364 union acpi_object
*psd
= NULL
;
365 struct acpi_psd_package
*pdomain
;
367 status
= acpi_evaluate_object_typed(handle
, "_PSD", NULL
, &buffer
,
369 if (ACPI_FAILURE(status
))
372 psd
= buffer
.pointer
;
373 if (!psd
|| psd
->package
.count
!= 1) {
374 pr_debug("Invalid _PSD data\n");
378 pdomain
= &(cpc_ptr
->domain_info
);
380 state
.length
= sizeof(struct acpi_psd_package
);
381 state
.pointer
= pdomain
;
383 status
= acpi_extract_package(&(psd
->package
.elements
[0]),
385 if (ACPI_FAILURE(status
)) {
386 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr
->cpu_id
);
390 if (pdomain
->num_entries
!= ACPI_PSD_REV0_ENTRIES
) {
391 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr
->cpu_id
);
395 if (pdomain
->revision
!= ACPI_PSD_REV0_REVISION
) {
396 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr
->cpu_id
);
400 if (pdomain
->coord_type
!= DOMAIN_COORD_TYPE_SW_ALL
&&
401 pdomain
->coord_type
!= DOMAIN_COORD_TYPE_SW_ANY
&&
402 pdomain
->coord_type
!= DOMAIN_COORD_TYPE_HW_ALL
) {
403 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr
->cpu_id
);
409 kfree(buffer
.pointer
);
414 * acpi_get_psd_map - Map the CPUs in a common freq domain.
415 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
417 * Return: 0 for success or negative value for err.
419 int acpi_get_psd_map(struct cppc_cpudata
**all_cpu_data
)
424 cpumask_var_t covered_cpus
;
425 struct cppc_cpudata
*pr
, *match_pr
;
426 struct acpi_psd_package
*pdomain
;
427 struct acpi_psd_package
*match_pdomain
;
428 struct cpc_desc
*cpc_ptr
, *match_cpc_ptr
;
430 if (!zalloc_cpumask_var(&covered_cpus
, GFP_KERNEL
))
434 * Now that we have _PSD data from all CPUs, lets setup P-state
437 for_each_possible_cpu(i
) {
438 pr
= all_cpu_data
[i
];
442 if (cpumask_test_cpu(i
, covered_cpus
))
445 cpc_ptr
= per_cpu(cpc_desc_ptr
, i
);
451 pdomain
= &(cpc_ptr
->domain_info
);
452 cpumask_set_cpu(i
, pr
->shared_cpu_map
);
453 cpumask_set_cpu(i
, covered_cpus
);
454 if (pdomain
->num_processors
<= 1)
457 /* Validate the Domain info */
458 count_target
= pdomain
->num_processors
;
459 if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ALL
)
460 pr
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
461 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_HW_ALL
)
462 pr
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
463 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ANY
)
464 pr
->shared_type
= CPUFREQ_SHARED_TYPE_ANY
;
466 for_each_possible_cpu(j
) {
470 match_cpc_ptr
= per_cpu(cpc_desc_ptr
, j
);
471 if (!match_cpc_ptr
) {
476 match_pdomain
= &(match_cpc_ptr
->domain_info
);
477 if (match_pdomain
->domain
!= pdomain
->domain
)
480 /* Here i and j are in the same domain */
481 if (match_pdomain
->num_processors
!= count_target
) {
486 if (pdomain
->coord_type
!= match_pdomain
->coord_type
) {
491 cpumask_set_cpu(j
, covered_cpus
);
492 cpumask_set_cpu(j
, pr
->shared_cpu_map
);
495 for_each_possible_cpu(j
) {
499 match_pr
= all_cpu_data
[j
];
503 match_cpc_ptr
= per_cpu(cpc_desc_ptr
, j
);
504 if (!match_cpc_ptr
) {
509 match_pdomain
= &(match_cpc_ptr
->domain_info
);
510 if (match_pdomain
->domain
!= pdomain
->domain
)
513 match_pr
->shared_type
= pr
->shared_type
;
514 cpumask_copy(match_pr
->shared_cpu_map
,
520 for_each_possible_cpu(i
) {
521 pr
= all_cpu_data
[i
];
525 /* Assume no coordination on any error parsing domain info */
527 cpumask_clear(pr
->shared_cpu_map
);
528 cpumask_set_cpu(i
, pr
->shared_cpu_map
);
529 pr
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
533 free_cpumask_var(covered_cpus
);
536 EXPORT_SYMBOL_GPL(acpi_get_psd_map
);
538 static int register_pcc_channel(int pcc_ss_idx
)
540 struct acpi_pcct_hw_reduced
*cppc_ss
;
543 if (pcc_ss_idx
>= 0) {
544 pcc_data
[pcc_ss_idx
]->pcc_channel
=
545 pcc_mbox_request_channel(&cppc_mbox_cl
, pcc_ss_idx
);
547 if (IS_ERR(pcc_data
[pcc_ss_idx
]->pcc_channel
)) {
548 pr_err("Failed to find PCC channel for subspace %d\n",
554 * The PCC mailbox controller driver should
555 * have parsed the PCCT (global table of all
556 * PCC channels) and stored pointers to the
557 * subspace communication region in con_priv.
559 cppc_ss
= (pcc_data
[pcc_ss_idx
]->pcc_channel
)->con_priv
;
562 pr_err("No PCC subspace found for %d CPPC\n",
568 * cppc_ss->latency is just a Nominal value. In reality
569 * the remote processor could be much slower to reply.
570 * So add an arbitrary amount of wait on top of Nominal.
572 usecs_lat
= NUM_RETRIES
* cppc_ss
->latency
;
573 pcc_data
[pcc_ss_idx
]->deadline_us
= usecs_lat
;
574 pcc_data
[pcc_ss_idx
]->pcc_mrtt
= cppc_ss
->min_turnaround_time
;
575 pcc_data
[pcc_ss_idx
]->pcc_mpar
= cppc_ss
->max_access_rate
;
576 pcc_data
[pcc_ss_idx
]->pcc_nominal
= cppc_ss
->latency
;
578 pcc_data
[pcc_ss_idx
]->pcc_comm_addr
=
579 acpi_os_ioremap(cppc_ss
->base_address
, cppc_ss
->length
);
580 if (!pcc_data
[pcc_ss_idx
]->pcc_comm_addr
) {
581 pr_err("Failed to ioremap PCC comm region mem for %d\n",
586 /* Set flag so that we dont come here for each CPU. */
587 pcc_data
[pcc_ss_idx
]->pcc_channel_acquired
= true;
594 * cpc_ffh_supported() - check if FFH reading supported
596 * Check if the architecture has support for functional fixed hardware
597 * read/write capability.
599 * Return: true for supported, false for not supported
601 bool __weak
cpc_ffh_supported(void)
608 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
610 * Check and allocate the cppc_pcc_data memory.
611 * In some processor configurations it is possible that same subspace
612 * is shared between multiple CPU's. This is seen especially in CPU's
613 * with hardware multi-threading support.
615 * Return: 0 for success, errno for failure
617 int pcc_data_alloc(int pcc_ss_id
)
619 if (pcc_ss_id
< 0 || pcc_ss_id
>= MAX_PCC_SUBSPACES
)
622 if (pcc_data
[pcc_ss_id
]) {
623 pcc_data
[pcc_ss_id
]->refcount
++;
625 pcc_data
[pcc_ss_id
] = kzalloc(sizeof(struct cppc_pcc_data
),
627 if (!pcc_data
[pcc_ss_id
])
629 pcc_data
[pcc_ss_id
]->refcount
++;
635 * An example CPC table looks like the following.
637 * Name(_CPC, Package()
643 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
644 * // Highest Performance
645 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
646 * // Nominal Performance
647 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
648 * // Lowest Nonlinear Performance
649 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
650 * // Lowest Performance
651 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
652 * // Guaranteed Performance Register
653 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
654 * // Desired Performance Register
655 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
661 * Each Register() encodes how to access that specific register.
662 * e.g. a sample PCC entry has the following encoding:
666 * AddressSpaceKeyword
670 * //RegisterBitOffset
674 * //AccessSize (subspace ID)
681 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
682 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
684 * Return: 0 for success or negative value for err.
686 int acpi_cppc_processor_probe(struct acpi_processor
*pr
)
688 struct acpi_buffer output
= {ACPI_ALLOCATE_BUFFER
, NULL
};
689 union acpi_object
*out_obj
, *cpc_obj
;
690 struct cpc_desc
*cpc_ptr
;
691 struct cpc_reg
*gas_t
;
692 struct device
*cpu_dev
;
693 acpi_handle handle
= pr
->handle
;
694 unsigned int num_ent
, i
, cpc_rev
;
695 int pcc_subspace_id
= -1;
699 /* Parse the ACPI _CPC table for this cpu. */
700 status
= acpi_evaluate_object_typed(handle
, "_CPC", NULL
, &output
,
702 if (ACPI_FAILURE(status
)) {
707 out_obj
= (union acpi_object
*) output
.pointer
;
709 cpc_ptr
= kzalloc(sizeof(struct cpc_desc
), GFP_KERNEL
);
715 /* First entry is NumEntries. */
716 cpc_obj
= &out_obj
->package
.elements
[0];
717 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
718 num_ent
= cpc_obj
->integer
.value
;
720 pr_debug("Unexpected entry type(%d) for NumEntries\n",
725 /* Only support CPPCv2. Bail otherwise. */
726 if (num_ent
!= CPPC_NUM_ENT
) {
727 pr_debug("Firmware exports %d entries. Expected: %d\n",
728 num_ent
, CPPC_NUM_ENT
);
732 cpc_ptr
->num_entries
= num_ent
;
734 /* Second entry should be revision. */
735 cpc_obj
= &out_obj
->package
.elements
[1];
736 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
737 cpc_rev
= cpc_obj
->integer
.value
;
739 pr_debug("Unexpected entry type(%d) for Revision\n",
744 if (cpc_rev
!= CPPC_REV
) {
745 pr_debug("Firmware exports revision:%d. Expected:%d\n",
750 /* Iterate through remaining entries in _CPC */
751 for (i
= 2; i
< num_ent
; i
++) {
752 cpc_obj
= &out_obj
->package
.elements
[i
];
754 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
755 cpc_ptr
->cpc_regs
[i
-2].type
= ACPI_TYPE_INTEGER
;
756 cpc_ptr
->cpc_regs
[i
-2].cpc_entry
.int_value
= cpc_obj
->integer
.value
;
757 } else if (cpc_obj
->type
== ACPI_TYPE_BUFFER
) {
758 gas_t
= (struct cpc_reg
*)
759 cpc_obj
->buffer
.pointer
;
762 * The PCC Subspace index is encoded inside
763 * the CPC table entries. The same PCC index
764 * will be used for all the PCC entries,
765 * so extract it only once.
767 if (gas_t
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
) {
768 if (pcc_subspace_id
< 0) {
769 pcc_subspace_id
= gas_t
->access_width
;
770 if (pcc_data_alloc(pcc_subspace_id
))
772 } else if (pcc_subspace_id
!= gas_t
->access_width
) {
773 pr_debug("Mismatched PCC ids.\n");
776 } else if (gas_t
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
) {
777 if (gas_t
->address
) {
780 addr
= ioremap(gas_t
->address
, gas_t
->bit_width
/8);
783 cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
= addr
;
786 if (gas_t
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
|| !cpc_ffh_supported()) {
787 /* Support only PCC ,SYS MEM and FFH type regs */
788 pr_debug("Unsupported register type: %d\n", gas_t
->space_id
);
793 cpc_ptr
->cpc_regs
[i
-2].type
= ACPI_TYPE_BUFFER
;
794 memcpy(&cpc_ptr
->cpc_regs
[i
-2].cpc_entry
.reg
, gas_t
, sizeof(*gas_t
));
796 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i
, pr
->id
);
800 per_cpu(cpu_pcc_subspace_idx
, pr
->id
) = pcc_subspace_id
;
801 /* Store CPU Logical ID */
802 cpc_ptr
->cpu_id
= pr
->id
;
804 /* Parse PSD data for this CPU */
805 ret
= acpi_get_psd(cpc_ptr
, handle
);
809 /* Register PCC channel once for all PCC subspace id. */
810 if (pcc_subspace_id
>= 0 && !pcc_data
[pcc_subspace_id
]->pcc_channel_acquired
) {
811 ret
= register_pcc_channel(pcc_subspace_id
);
815 init_rwsem(&pcc_data
[pcc_subspace_id
]->pcc_lock
);
816 init_waitqueue_head(&pcc_data
[pcc_subspace_id
]->pcc_write_wait_q
);
819 /* Everything looks okay */
820 pr_debug("Parsed CPC struct for CPU: %d\n", pr
->id
);
822 /* Add per logical CPU nodes for reading its feedback counters. */
823 cpu_dev
= get_cpu_device(pr
->id
);
829 /* Plug PSD data into this CPUs CPC descriptor. */
830 per_cpu(cpc_desc_ptr
, pr
->id
) = cpc_ptr
;
832 ret
= kobject_init_and_add(&cpc_ptr
->kobj
, &cppc_ktype
, &cpu_dev
->kobj
,
835 per_cpu(cpc_desc_ptr
, pr
->id
) = NULL
;
839 kfree(output
.pointer
);
843 /* Free all the mapped sys mem areas for this CPU */
844 for (i
= 2; i
< cpc_ptr
->num_entries
; i
++) {
845 void __iomem
*addr
= cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
;
853 kfree(output
.pointer
);
856 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe
);
859 * acpi_cppc_processor_exit - Cleanup CPC structs.
860 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
864 void acpi_cppc_processor_exit(struct acpi_processor
*pr
)
866 struct cpc_desc
*cpc_ptr
;
869 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, pr
->id
);
871 if (pcc_ss_id
>=0 && pcc_data
[pcc_ss_id
]) {
872 if (pcc_data
[pcc_ss_id
]->pcc_channel_acquired
) {
873 pcc_data
[pcc_ss_id
]->refcount
--;
874 if (!pcc_data
[pcc_ss_id
]->refcount
) {
875 pcc_mbox_free_channel(pcc_data
[pcc_ss_id
]->pcc_channel
);
876 pcc_data
[pcc_ss_id
]->pcc_channel_acquired
= 0;
877 kfree(pcc_data
[pcc_ss_id
]);
882 cpc_ptr
= per_cpu(cpc_desc_ptr
, pr
->id
);
886 /* Free all the mapped sys mem areas for this CPU */
887 for (i
= 2; i
< cpc_ptr
->num_entries
; i
++) {
888 addr
= cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
;
893 kobject_put(&cpc_ptr
->kobj
);
896 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit
);
899 * cpc_read_ffh() - Read FFH register
900 * @cpunum: cpu number to read
901 * @reg: cppc register information
902 * @val: place holder for return value
904 * Read bit_width bits from a specified address and bit_offset
906 * Return: 0 for success and error code
908 int __weak
cpc_read_ffh(int cpunum
, struct cpc_reg
*reg
, u64
*val
)
914 * cpc_write_ffh() - Write FFH register
915 * @cpunum: cpu number to write
916 * @reg: cppc register information
917 * @val: value to write
919 * Write value of bit_width bits to a specified address and bit_offset
921 * Return: 0 for success and error code
923 int __weak
cpc_write_ffh(int cpunum
, struct cpc_reg
*reg
, u64 val
)
929 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
930 * as fast as possible. We have already mapped the PCC subspace during init, so
931 * we can directly write to it.
934 static int cpc_read(int cpu
, struct cpc_register_resource
*reg_res
, u64
*val
)
937 void __iomem
*vaddr
= 0;
938 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
939 struct cpc_reg
*reg
= ®_res
->cpc_entry
.reg
;
941 if (reg_res
->type
== ACPI_TYPE_INTEGER
) {
942 *val
= reg_res
->cpc_entry
.int_value
;
947 if (reg
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
&& pcc_ss_id
>= 0)
948 vaddr
= GET_PCC_VADDR(reg
->address
, pcc_ss_id
);
949 else if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
950 vaddr
= reg_res
->sys_mem_vaddr
;
951 else if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
)
952 return cpc_read_ffh(cpu
, reg
, val
);
954 return acpi_os_read_memory((acpi_physical_address
)reg
->address
,
955 val
, reg
->bit_width
);
957 switch (reg
->bit_width
) {
959 *val
= readb_relaxed(vaddr
);
962 *val
= readw_relaxed(vaddr
);
965 *val
= readl_relaxed(vaddr
);
968 *val
= readq_relaxed(vaddr
);
971 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
972 reg
->bit_width
, pcc_ss_id
);
979 static int cpc_write(int cpu
, struct cpc_register_resource
*reg_res
, u64 val
)
982 void __iomem
*vaddr
= 0;
983 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
984 struct cpc_reg
*reg
= ®_res
->cpc_entry
.reg
;
986 if (reg
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
&& pcc_ss_id
>= 0)
987 vaddr
= GET_PCC_VADDR(reg
->address
, pcc_ss_id
);
988 else if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
989 vaddr
= reg_res
->sys_mem_vaddr
;
990 else if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
)
991 return cpc_write_ffh(cpu
, reg
, val
);
993 return acpi_os_write_memory((acpi_physical_address
)reg
->address
,
994 val
, reg
->bit_width
);
996 switch (reg
->bit_width
) {
998 writeb_relaxed(val
, vaddr
);
1001 writew_relaxed(val
, vaddr
);
1004 writel_relaxed(val
, vaddr
);
1007 writeq_relaxed(val
, vaddr
);
1010 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1011 reg
->bit_width
, pcc_ss_id
);
1020 * cppc_get_perf_caps - Get a CPUs performance capabilities.
1021 * @cpunum: CPU from which to get capabilities info.
1022 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1024 * Return: 0 for success with perf_caps populated else -ERRNO.
1026 int cppc_get_perf_caps(int cpunum
, struct cppc_perf_caps
*perf_caps
)
1028 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpunum
);
1029 struct cpc_register_resource
*highest_reg
, *lowest_reg
,
1030 *lowest_non_linear_reg
, *nominal_reg
;
1031 u64 high
, low
, nom
, min_nonlinear
;
1032 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpunum
);
1033 struct cppc_pcc_data
*pcc_ss_data
;
1034 int ret
= 0, regs_in_pcc
= 0;
1036 if (!cpc_desc
|| pcc_ss_id
< 0) {
1037 pr_debug("No CPC descriptor for CPU:%d\n", cpunum
);
1041 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1042 highest_reg
= &cpc_desc
->cpc_regs
[HIGHEST_PERF
];
1043 lowest_reg
= &cpc_desc
->cpc_regs
[LOWEST_PERF
];
1044 lowest_non_linear_reg
= &cpc_desc
->cpc_regs
[LOW_NON_LINEAR_PERF
];
1045 nominal_reg
= &cpc_desc
->cpc_regs
[NOMINAL_PERF
];
1047 /* Are any of the regs PCC ?*/
1048 if (CPC_IN_PCC(highest_reg
) || CPC_IN_PCC(lowest_reg
) ||
1049 CPC_IN_PCC(lowest_non_linear_reg
) || CPC_IN_PCC(nominal_reg
)) {
1051 down_write(&pcc_ss_data
->pcc_lock
);
1052 /* Ring doorbell once to update PCC subspace */
1053 if (send_pcc_cmd(pcc_ss_id
, CMD_READ
) < 0) {
1059 cpc_read(cpunum
, highest_reg
, &high
);
1060 perf_caps
->highest_perf
= high
;
1062 cpc_read(cpunum
, lowest_reg
, &low
);
1063 perf_caps
->lowest_perf
= low
;
1065 cpc_read(cpunum
, nominal_reg
, &nom
);
1066 perf_caps
->nominal_perf
= nom
;
1068 cpc_read(cpunum
, lowest_non_linear_reg
, &min_nonlinear
);
1069 perf_caps
->lowest_nonlinear_perf
= min_nonlinear
;
1071 if (!high
|| !low
|| !nom
|| !min_nonlinear
)
1076 up_write(&pcc_ss_data
->pcc_lock
);
1079 EXPORT_SYMBOL_GPL(cppc_get_perf_caps
);
1082 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1083 * @cpunum: CPU from which to read counters.
1084 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1086 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1088 int cppc_get_perf_ctrs(int cpunum
, struct cppc_perf_fb_ctrs
*perf_fb_ctrs
)
1090 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpunum
);
1091 struct cpc_register_resource
*delivered_reg
, *reference_reg
,
1092 *ref_perf_reg
, *ctr_wrap_reg
;
1093 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpunum
);
1094 struct cppc_pcc_data
*pcc_ss_data
;
1095 u64 delivered
, reference
, ref_perf
, ctr_wrap_time
;
1096 int ret
= 0, regs_in_pcc
= 0;
1098 if (!cpc_desc
|| pcc_ss_id
< 0) {
1099 pr_debug("No CPC descriptor for CPU:%d\n", cpunum
);
1103 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1104 delivered_reg
= &cpc_desc
->cpc_regs
[DELIVERED_CTR
];
1105 reference_reg
= &cpc_desc
->cpc_regs
[REFERENCE_CTR
];
1106 ref_perf_reg
= &cpc_desc
->cpc_regs
[REFERENCE_PERF
];
1107 ctr_wrap_reg
= &cpc_desc
->cpc_regs
[CTR_WRAP_TIME
];
1110 * If refernce perf register is not supported then we should
1111 * use the nominal perf value
1113 if (!CPC_SUPPORTED(ref_perf_reg
))
1114 ref_perf_reg
= &cpc_desc
->cpc_regs
[NOMINAL_PERF
];
1116 /* Are any of the regs PCC ?*/
1117 if (CPC_IN_PCC(delivered_reg
) || CPC_IN_PCC(reference_reg
) ||
1118 CPC_IN_PCC(ctr_wrap_reg
) || CPC_IN_PCC(ref_perf_reg
)) {
1119 down_write(&pcc_ss_data
->pcc_lock
);
1121 /* Ring doorbell once to update PCC subspace */
1122 if (send_pcc_cmd(pcc_ss_id
, CMD_READ
) < 0) {
1128 cpc_read(cpunum
, delivered_reg
, &delivered
);
1129 cpc_read(cpunum
, reference_reg
, &reference
);
1130 cpc_read(cpunum
, ref_perf_reg
, &ref_perf
);
1133 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1134 * performance counters are assumed to never wrap during the lifetime of
1137 ctr_wrap_time
= (u64
)(~((u64
)0));
1138 if (CPC_SUPPORTED(ctr_wrap_reg
))
1139 cpc_read(cpunum
, ctr_wrap_reg
, &ctr_wrap_time
);
1141 if (!delivered
|| !reference
|| !ref_perf
) {
1146 perf_fb_ctrs
->delivered
= delivered
;
1147 perf_fb_ctrs
->reference
= reference
;
1148 perf_fb_ctrs
->reference_perf
= ref_perf
;
1149 perf_fb_ctrs
->wraparound_time
= ctr_wrap_time
;
1152 up_write(&pcc_ss_data
->pcc_lock
);
1155 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs
);
1158 * cppc_set_perf - Set a CPUs performance controls.
1159 * @cpu: CPU for which to set performance controls.
1160 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1162 * Return: 0 for success, -ERRNO otherwise.
1164 int cppc_set_perf(int cpu
, struct cppc_perf_ctrls
*perf_ctrls
)
1166 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpu
);
1167 struct cpc_register_resource
*desired_reg
;
1168 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
1169 struct cppc_pcc_data
*pcc_ss_data
;
1172 if (!cpc_desc
|| pcc_ss_id
< 0) {
1173 pr_debug("No CPC descriptor for CPU:%d\n", cpu
);
1177 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1178 desired_reg
= &cpc_desc
->cpc_regs
[DESIRED_PERF
];
1181 * This is Phase-I where we want to write to CPC registers
1182 * -> We want all CPUs to be able to execute this phase in parallel
1184 * Since read_lock can be acquired by multiple CPUs simultaneously we
1185 * achieve that goal here
1187 if (CPC_IN_PCC(desired_reg
)) {
1188 down_read(&pcc_ss_data
->pcc_lock
); /* BEGIN Phase-I */
1189 if (pcc_ss_data
->platform_owns_pcc
) {
1190 ret
= check_pcc_chan(pcc_ss_id
, false);
1192 up_read(&pcc_ss_data
->pcc_lock
);
1197 * Update the pending_write to make sure a PCC CMD_READ will not
1198 * arrive and steal the channel during the switch to write lock
1200 pcc_ss_data
->pending_pcc_write_cmd
= true;
1201 cpc_desc
->write_cmd_id
= pcc_ss_data
->pcc_write_cnt
;
1202 cpc_desc
->write_cmd_status
= 0;
1206 * Skip writing MIN/MAX until Linux knows how to come up with
1209 cpc_write(cpu
, desired_reg
, perf_ctrls
->desired_perf
);
1211 if (CPC_IN_PCC(desired_reg
))
1212 up_read(&pcc_ss_data
->pcc_lock
); /* END Phase-I */
1214 * This is Phase-II where we transfer the ownership of PCC to Platform
1216 * Short Summary: Basically if we think of a group of cppc_set_perf
1217 * requests that happened in short overlapping interval. The last CPU to
1218 * come out of Phase-I will enter Phase-II and ring the doorbell.
1220 * We have the following requirements for Phase-II:
1221 * 1. We want to execute Phase-II only when there are no CPUs
1222 * currently executing in Phase-I
1223 * 2. Once we start Phase-II we want to avoid all other CPUs from
1225 * 3. We want only one CPU among all those who went through Phase-I
1228 * If write_trylock fails to get the lock and doesn't transfer the
1229 * PCC ownership to the platform, then one of the following will be TRUE
1230 * 1. There is at-least one CPU in Phase-I which will later execute
1231 * write_trylock, so the CPUs in Phase-I will be responsible for
1232 * executing the Phase-II.
1233 * 2. Some other CPU has beaten this CPU to successfully execute the
1234 * write_trylock and has already acquired the write_lock. We know for a
1235 * fact it(other CPU acquiring the write_lock) couldn't have happened
1236 * before this CPU's Phase-I as we held the read_lock.
1237 * 3. Some other CPU executing pcc CMD_READ has stolen the
1238 * down_write, in which case, send_pcc_cmd will check for pending
1239 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1240 * So this CPU can be certain that its request will be delivered
1241 * So in all cases, this CPU knows that its request will be delivered
1242 * by another CPU and can return
1244 * After getting the down_write we still need to check for
1245 * pending_pcc_write_cmd to take care of the following scenario
1246 * The thread running this code could be scheduled out between
1247 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1248 * could have delivered the request to Platform by triggering the
1249 * doorbell and transferred the ownership of PCC to platform. So this
1250 * avoids triggering an unnecessary doorbell and more importantly before
1251 * triggering the doorbell it makes sure that the PCC channel ownership
1252 * is still with OSPM.
1253 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1254 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1255 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1256 * case during a CMD_READ and if there are pending writes it delivers
1257 * the write command before servicing the read command
1259 if (CPC_IN_PCC(desired_reg
)) {
1260 if (down_write_trylock(&pcc_ss_data
->pcc_lock
)) {/* BEGIN Phase-II */
1261 /* Update only if there are pending write commands */
1262 if (pcc_ss_data
->pending_pcc_write_cmd
)
1263 send_pcc_cmd(pcc_ss_id
, CMD_WRITE
);
1264 up_write(&pcc_ss_data
->pcc_lock
); /* END Phase-II */
1266 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1267 wait_event(pcc_ss_data
->pcc_write_wait_q
,
1268 cpc_desc
->write_cmd_id
!= pcc_ss_data
->pcc_write_cnt
);
1270 /* send_pcc_cmd updates the status in case of failure */
1271 ret
= cpc_desc
->write_cmd_status
;
1275 EXPORT_SYMBOL_GPL(cppc_set_perf
);
1278 * cppc_get_transition_latency - returns frequency transition latency in ns
1280 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1281 * transition latency for perfromance change requests. The closest we have
1282 * is the timing information from the PCCT tables which provides the info
1283 * on the number and frequency of PCC commands the platform can handle.
1285 unsigned int cppc_get_transition_latency(int cpu_num
)
1288 * Expected transition latency is based on the PCCT timing values
1289 * Below are definition from ACPI spec:
1290 * pcc_nominal- Expected latency to process a command, in microseconds
1291 * pcc_mpar - The maximum number of periodic requests that the subspace
1292 * channel can support, reported in commands per minute. 0
1293 * indicates no limitation.
1294 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1295 * completion of a command before issuing the next command,
1298 unsigned int latency_ns
= 0;
1299 struct cpc_desc
*cpc_desc
;
1300 struct cpc_register_resource
*desired_reg
;
1301 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu_num
);
1302 struct cppc_pcc_data
*pcc_ss_data
;
1304 cpc_desc
= per_cpu(cpc_desc_ptr
, cpu_num
);
1306 return CPUFREQ_ETERNAL
;
1308 desired_reg
= &cpc_desc
->cpc_regs
[DESIRED_PERF
];
1309 if (!CPC_IN_PCC(desired_reg
))
1310 return CPUFREQ_ETERNAL
;
1313 return CPUFREQ_ETERNAL
;
1315 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1316 if (pcc_ss_data
->pcc_mpar
)
1317 latency_ns
= 60 * (1000 * 1000 * 1000 / pcc_ss_data
->pcc_mpar
);
1319 latency_ns
= max(latency_ns
, pcc_ss_data
->pcc_nominal
* 1000);
1320 latency_ns
= max(latency_ns
, pcc_ss_data
->pcc_mrtt
* 1000);
1324 EXPORT_SYMBOL_GPL(cppc_get_transition_latency
);