1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
23 * - Platform conveys its decision back to OS
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
43 #include <acpi/cppc_acpi.h>
45 struct cppc_pcc_data
{
46 struct mbox_chan
*pcc_channel
;
47 void __iomem
*pcc_comm_addr
;
48 bool pcc_channel_acquired
;
49 unsigned int deadline_us
;
50 unsigned int pcc_mpar
, pcc_mrtt
, pcc_nominal
;
52 bool pending_pcc_write_cmd
; /* Any pending/batched PCC write cmds? */
53 bool platform_owns_pcc
; /* Ownership of PCC subspace */
54 unsigned int pcc_write_cnt
; /* Running count of PCC write commands */
57 * Lock to provide controlled access to the PCC channel.
59 * For performance critical usecases(currently cppc_set_perf)
60 * We need to take read_lock and check if channel belongs to OSPM
61 * before reading or writing to PCC subspace
62 * We need to take write_lock before transferring the channel
63 * ownership to the platform via a Doorbell
64 * This allows us to batch a number of CPPC requests if they happen
65 * to originate in about the same time
67 * For non-performance critical usecases(init)
68 * Take write_lock for all purposes which gives exclusive access
70 struct rw_semaphore pcc_lock
;
72 /* Wait queue for CPUs whose requests were batched */
73 wait_queue_head_t pcc_write_wait_q
;
74 ktime_t last_cmd_cmpl_time
;
75 ktime_t last_mpar_reset
;
80 /* Array to represent the PCC channel per subspace ID */
81 static struct cppc_pcc_data
*pcc_data
[MAX_PCC_SUBSPACES
];
82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx
);
86 * The cpc_desc structure contains the ACPI register details
87 * as described in the per CPU _CPC tables. The details
88 * include the type of register (e.g. PCC, System IO, FFH etc.)
89 * and destination addresses which lets us READ/WRITE CPU performance
90 * information using the appropriate I/O methods.
92 static DEFINE_PER_CPU(struct cpc_desc
*, cpc_desc_ptr
);
94 /* pcc mapped address + header size + offset within PCC subspace */
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
103 /* Evaluates to True if reg is a NULL register descriptor */
104 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
110 /* Evaluates to True if an optional cpc field is supported */
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
115 * Arbitrary Retries in case the remote processor is slow to respond
116 * to PCC commands. Keeping it high enough to cover emulators where
117 * the processors run painfully slow.
119 #define NUM_RETRIES 500ULL
121 #define OVER_16BTS_MASK ~0xFFFFULL
123 #define define_one_cppc_ro(_name) \
124 static struct kobj_attribute _name = \
125 __ATTR(_name, 0444, show_##_name, NULL)
127 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
129 #define show_cppc_data(access_fn, struct_name, member_name) \
130 static ssize_t show_##member_name(struct kobject *kobj, \
131 struct kobj_attribute *attr, char *buf) \
133 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
134 struct struct_name st_name = {0}; \
137 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
141 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
142 (u64)st_name.member_name); \
144 define_one_cppc_ro(member_name)
146 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, highest_perf
);
147 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, lowest_perf
);
148 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, nominal_perf
);
149 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, lowest_nonlinear_perf
);
150 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, lowest_freq
);
151 show_cppc_data(cppc_get_perf_caps
, cppc_perf_caps
, nominal_freq
);
153 show_cppc_data(cppc_get_perf_ctrs
, cppc_perf_fb_ctrs
, reference_perf
);
154 show_cppc_data(cppc_get_perf_ctrs
, cppc_perf_fb_ctrs
, wraparound_time
);
156 static ssize_t
show_feedback_ctrs(struct kobject
*kobj
,
157 struct kobj_attribute
*attr
, char *buf
)
159 struct cpc_desc
*cpc_ptr
= to_cpc_desc(kobj
);
160 struct cppc_perf_fb_ctrs fb_ctrs
= {0};
163 ret
= cppc_get_perf_ctrs(cpc_ptr
->cpu_id
, &fb_ctrs
);
167 return scnprintf(buf
, PAGE_SIZE
, "ref:%llu del:%llu\n",
168 fb_ctrs
.reference
, fb_ctrs
.delivered
);
170 define_one_cppc_ro(feedback_ctrs
);
172 static struct attribute
*cppc_attrs
[] = {
174 &reference_perf
.attr
,
175 &wraparound_time
.attr
,
178 &lowest_nonlinear_perf
.attr
,
185 static struct kobj_type cppc_ktype
= {
186 .sysfs_ops
= &kobj_sysfs_ops
,
187 .default_attrs
= cppc_attrs
,
190 static int check_pcc_chan(int pcc_ss_id
, bool chk_err_bit
)
193 struct cppc_pcc_data
*pcc_ss_data
= pcc_data
[pcc_ss_id
];
194 struct acpi_pcct_shared_memory __iomem
*generic_comm_base
=
195 pcc_ss_data
->pcc_comm_addr
;
197 if (!pcc_ss_data
->platform_owns_pcc
)
201 * Poll PCC status register every 3us(delay_us) for maximum of
202 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
204 ret
= readw_relaxed_poll_timeout(&generic_comm_base
->status
, status
,
205 status
& PCC_CMD_COMPLETE_MASK
, 3,
206 pcc_ss_data
->deadline_us
);
209 pcc_ss_data
->platform_owns_pcc
= false;
210 if (chk_err_bit
&& (status
& PCC_ERROR_MASK
))
215 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
222 * This function transfers the ownership of the PCC to the platform
223 * So it must be called while holding write_lock(pcc_lock)
225 static int send_pcc_cmd(int pcc_ss_id
, u16 cmd
)
228 struct cppc_pcc_data
*pcc_ss_data
= pcc_data
[pcc_ss_id
];
229 struct acpi_pcct_shared_memory __iomem
*generic_comm_base
=
230 pcc_ss_data
->pcc_comm_addr
;
231 unsigned int time_delta
;
234 * For CMD_WRITE we know for a fact the caller should have checked
235 * the channel before writing to PCC space
237 if (cmd
== CMD_READ
) {
239 * If there are pending cpc_writes, then we stole the channel
240 * before write completion, so first send a WRITE command to
243 if (pcc_ss_data
->pending_pcc_write_cmd
)
244 send_pcc_cmd(pcc_ss_id
, CMD_WRITE
);
246 ret
= check_pcc_chan(pcc_ss_id
, false);
249 } else /* CMD_WRITE */
250 pcc_ss_data
->pending_pcc_write_cmd
= FALSE
;
253 * Handle the Minimum Request Turnaround Time(MRTT)
254 * "The minimum amount of time that OSPM must wait after the completion
255 * of a command before issuing the next command, in microseconds"
257 if (pcc_ss_data
->pcc_mrtt
) {
258 time_delta
= ktime_us_delta(ktime_get(),
259 pcc_ss_data
->last_cmd_cmpl_time
);
260 if (pcc_ss_data
->pcc_mrtt
> time_delta
)
261 udelay(pcc_ss_data
->pcc_mrtt
- time_delta
);
265 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
266 * "The maximum number of periodic requests that the subspace channel can
267 * support, reported in commands per minute. 0 indicates no limitation."
269 * This parameter should be ideally zero or large enough so that it can
270 * handle maximum number of requests that all the cores in the system can
271 * collectively generate. If it is not, we will follow the spec and just
272 * not send the request to the platform after hitting the MPAR limit in
275 if (pcc_ss_data
->pcc_mpar
) {
276 if (pcc_ss_data
->mpar_count
== 0) {
277 time_delta
= ktime_ms_delta(ktime_get(),
278 pcc_ss_data
->last_mpar_reset
);
279 if ((time_delta
< 60 * MSEC_PER_SEC
) && pcc_ss_data
->last_mpar_reset
) {
280 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
285 pcc_ss_data
->last_mpar_reset
= ktime_get();
286 pcc_ss_data
->mpar_count
= pcc_ss_data
->pcc_mpar
;
288 pcc_ss_data
->mpar_count
--;
291 /* Write to the shared comm region. */
292 writew_relaxed(cmd
, &generic_comm_base
->command
);
294 /* Flip CMD COMPLETE bit */
295 writew_relaxed(0, &generic_comm_base
->status
);
297 pcc_ss_data
->platform_owns_pcc
= true;
300 ret
= mbox_send_message(pcc_ss_data
->pcc_channel
, &cmd
);
302 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
303 pcc_ss_id
, cmd
, ret
);
307 /* wait for completion and check for PCC errro bit */
308 ret
= check_pcc_chan(pcc_ss_id
, true);
310 if (pcc_ss_data
->pcc_mrtt
)
311 pcc_ss_data
->last_cmd_cmpl_time
= ktime_get();
313 if (pcc_ss_data
->pcc_channel
->mbox
->txdone_irq
)
314 mbox_chan_txdone(pcc_ss_data
->pcc_channel
, ret
);
316 mbox_client_txdone(pcc_ss_data
->pcc_channel
, ret
);
319 if (cmd
== CMD_WRITE
) {
321 for_each_possible_cpu(i
) {
322 struct cpc_desc
*desc
= per_cpu(cpc_desc_ptr
, i
);
327 if (desc
->write_cmd_id
== pcc_ss_data
->pcc_write_cnt
)
328 desc
->write_cmd_status
= ret
;
331 pcc_ss_data
->pcc_write_cnt
++;
332 wake_up_all(&pcc_ss_data
->pcc_write_wait_q
);
338 static void cppc_chan_tx_done(struct mbox_client
*cl
, void *msg
, int ret
)
341 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
344 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
348 static struct mbox_client cppc_mbox_cl
= {
349 .tx_done
= cppc_chan_tx_done
,
350 .knows_txdone
= true,
353 static int acpi_get_psd(struct cpc_desc
*cpc_ptr
, acpi_handle handle
)
355 int result
= -EFAULT
;
356 acpi_status status
= AE_OK
;
357 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
358 struct acpi_buffer format
= {sizeof("NNNNN"), "NNNNN"};
359 struct acpi_buffer state
= {0, NULL
};
360 union acpi_object
*psd
= NULL
;
361 struct acpi_psd_package
*pdomain
;
363 status
= acpi_evaluate_object_typed(handle
, "_PSD", NULL
,
364 &buffer
, ACPI_TYPE_PACKAGE
);
365 if (status
== AE_NOT_FOUND
) /* _PSD is optional */
367 if (ACPI_FAILURE(status
))
370 psd
= buffer
.pointer
;
371 if (!psd
|| psd
->package
.count
!= 1) {
372 pr_debug("Invalid _PSD data\n");
376 pdomain
= &(cpc_ptr
->domain_info
);
378 state
.length
= sizeof(struct acpi_psd_package
);
379 state
.pointer
= pdomain
;
381 status
= acpi_extract_package(&(psd
->package
.elements
[0]),
383 if (ACPI_FAILURE(status
)) {
384 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr
->cpu_id
);
388 if (pdomain
->num_entries
!= ACPI_PSD_REV0_ENTRIES
) {
389 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr
->cpu_id
);
393 if (pdomain
->revision
!= ACPI_PSD_REV0_REVISION
) {
394 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr
->cpu_id
);
398 if (pdomain
->coord_type
!= DOMAIN_COORD_TYPE_SW_ALL
&&
399 pdomain
->coord_type
!= DOMAIN_COORD_TYPE_SW_ANY
&&
400 pdomain
->coord_type
!= DOMAIN_COORD_TYPE_HW_ALL
) {
401 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr
->cpu_id
);
407 kfree(buffer
.pointer
);
411 bool acpi_cpc_valid(void)
413 struct cpc_desc
*cpc_ptr
;
416 for_each_present_cpu(cpu
) {
417 cpc_ptr
= per_cpu(cpc_desc_ptr
, cpu
);
424 EXPORT_SYMBOL_GPL(acpi_cpc_valid
);
427 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
428 * @cpu: Find all CPUs that share a domain with cpu.
429 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
431 * Return: 0 for success or negative value for err.
433 int acpi_get_psd_map(unsigned int cpu
, struct cppc_cpudata
*cpu_data
)
435 struct cpc_desc
*cpc_ptr
, *match_cpc_ptr
;
436 struct acpi_psd_package
*match_pdomain
;
437 struct acpi_psd_package
*pdomain
;
441 * Now that we have _PSD data from all CPUs, let's setup P-state
444 cpc_ptr
= per_cpu(cpc_desc_ptr
, cpu
);
448 pdomain
= &(cpc_ptr
->domain_info
);
449 cpumask_set_cpu(cpu
, cpu_data
->shared_cpu_map
);
450 if (pdomain
->num_processors
<= 1)
453 /* Validate the Domain info */
454 count_target
= pdomain
->num_processors
;
455 if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ALL
)
456 cpu_data
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
457 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_HW_ALL
)
458 cpu_data
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
459 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ANY
)
460 cpu_data
->shared_type
= CPUFREQ_SHARED_TYPE_ANY
;
462 for_each_possible_cpu(i
) {
466 match_cpc_ptr
= per_cpu(cpc_desc_ptr
, i
);
470 match_pdomain
= &(match_cpc_ptr
->domain_info
);
471 if (match_pdomain
->domain
!= pdomain
->domain
)
474 /* Here i and cpu are in the same domain */
475 if (match_pdomain
->num_processors
!= count_target
)
478 if (pdomain
->coord_type
!= match_pdomain
->coord_type
)
481 cpumask_set_cpu(i
, cpu_data
->shared_cpu_map
);
487 /* Assume no coordination on any error parsing domain info */
488 cpumask_clear(cpu_data
->shared_cpu_map
);
489 cpumask_set_cpu(cpu
, cpu_data
->shared_cpu_map
);
490 cpu_data
->shared_type
= CPUFREQ_SHARED_TYPE_NONE
;
494 EXPORT_SYMBOL_GPL(acpi_get_psd_map
);
496 static int register_pcc_channel(int pcc_ss_idx
)
498 struct acpi_pcct_hw_reduced
*cppc_ss
;
501 if (pcc_ss_idx
>= 0) {
502 pcc_data
[pcc_ss_idx
]->pcc_channel
=
503 pcc_mbox_request_channel(&cppc_mbox_cl
, pcc_ss_idx
);
505 if (IS_ERR(pcc_data
[pcc_ss_idx
]->pcc_channel
)) {
506 pr_err("Failed to find PCC channel for subspace %d\n",
512 * The PCC mailbox controller driver should
513 * have parsed the PCCT (global table of all
514 * PCC channels) and stored pointers to the
515 * subspace communication region in con_priv.
517 cppc_ss
= (pcc_data
[pcc_ss_idx
]->pcc_channel
)->con_priv
;
520 pr_err("No PCC subspace found for %d CPPC\n",
526 * cppc_ss->latency is just a Nominal value. In reality
527 * the remote processor could be much slower to reply.
528 * So add an arbitrary amount of wait on top of Nominal.
530 usecs_lat
= NUM_RETRIES
* cppc_ss
->latency
;
531 pcc_data
[pcc_ss_idx
]->deadline_us
= usecs_lat
;
532 pcc_data
[pcc_ss_idx
]->pcc_mrtt
= cppc_ss
->min_turnaround_time
;
533 pcc_data
[pcc_ss_idx
]->pcc_mpar
= cppc_ss
->max_access_rate
;
534 pcc_data
[pcc_ss_idx
]->pcc_nominal
= cppc_ss
->latency
;
536 pcc_data
[pcc_ss_idx
]->pcc_comm_addr
=
537 acpi_os_ioremap(cppc_ss
->base_address
, cppc_ss
->length
);
538 if (!pcc_data
[pcc_ss_idx
]->pcc_comm_addr
) {
539 pr_err("Failed to ioremap PCC comm region mem for %d\n",
544 /* Set flag so that we don't come here for each CPU. */
545 pcc_data
[pcc_ss_idx
]->pcc_channel_acquired
= true;
552 * cpc_ffh_supported() - check if FFH reading supported
554 * Check if the architecture has support for functional fixed hardware
555 * read/write capability.
557 * Return: true for supported, false for not supported
559 bool __weak
cpc_ffh_supported(void)
565 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
567 * Check and allocate the cppc_pcc_data memory.
568 * In some processor configurations it is possible that same subspace
569 * is shared between multiple CPUs. This is seen especially in CPUs
570 * with hardware multi-threading support.
572 * Return: 0 for success, errno for failure
574 static int pcc_data_alloc(int pcc_ss_id
)
576 if (pcc_ss_id
< 0 || pcc_ss_id
>= MAX_PCC_SUBSPACES
)
579 if (pcc_data
[pcc_ss_id
]) {
580 pcc_data
[pcc_ss_id
]->refcount
++;
582 pcc_data
[pcc_ss_id
] = kzalloc(sizeof(struct cppc_pcc_data
),
584 if (!pcc_data
[pcc_ss_id
])
586 pcc_data
[pcc_ss_id
]->refcount
++;
592 /* Check if CPPC revision + num_ent combination is supported */
593 static bool is_cppc_supported(int revision
, int num_ent
)
595 int expected_num_ent
;
599 expected_num_ent
= CPPC_V2_NUM_ENT
;
602 expected_num_ent
= CPPC_V3_NUM_ENT
;
605 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
610 if (expected_num_ent
!= num_ent
) {
611 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
612 num_ent
, expected_num_ent
, revision
);
620 * An example CPC table looks like the following.
622 * Name(_CPC, Package()
628 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
629 * // Highest Performance
630 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
631 * // Nominal Performance
632 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
633 * // Lowest Nonlinear Performance
634 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
635 * // Lowest Performance
636 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
637 * // Guaranteed Performance Register
638 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
639 * // Desired Performance Register
640 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
646 * Each Register() encodes how to access that specific register.
647 * e.g. a sample PCC entry has the following encoding:
651 * AddressSpaceKeyword
655 * //RegisterBitOffset
659 * //AccessSize (subspace ID)
665 #ifndef init_freq_invariance_cppc
666 static inline void init_freq_invariance_cppc(void) { }
670 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
671 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
673 * Return: 0 for success or negative value for err.
675 int acpi_cppc_processor_probe(struct acpi_processor
*pr
)
677 struct acpi_buffer output
= {ACPI_ALLOCATE_BUFFER
, NULL
};
678 union acpi_object
*out_obj
, *cpc_obj
;
679 struct cpc_desc
*cpc_ptr
;
680 struct cpc_reg
*gas_t
;
681 struct device
*cpu_dev
;
682 acpi_handle handle
= pr
->handle
;
683 unsigned int num_ent
, i
, cpc_rev
;
684 int pcc_subspace_id
= -1;
688 /* Parse the ACPI _CPC table for this CPU. */
689 status
= acpi_evaluate_object_typed(handle
, "_CPC", NULL
, &output
,
691 if (ACPI_FAILURE(status
)) {
696 out_obj
= (union acpi_object
*) output
.pointer
;
698 cpc_ptr
= kzalloc(sizeof(struct cpc_desc
), GFP_KERNEL
);
704 /* First entry is NumEntries. */
705 cpc_obj
= &out_obj
->package
.elements
[0];
706 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
707 num_ent
= cpc_obj
->integer
.value
;
709 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
714 pr_debug("Unexpected entry type(%d) for NumEntries\n",
718 cpc_ptr
->num_entries
= num_ent
;
720 /* Second entry should be revision. */
721 cpc_obj
= &out_obj
->package
.elements
[1];
722 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
723 cpc_rev
= cpc_obj
->integer
.value
;
725 pr_debug("Unexpected entry type(%d) for Revision\n",
729 cpc_ptr
->version
= cpc_rev
;
731 if (!is_cppc_supported(cpc_rev
, num_ent
))
734 /* Iterate through remaining entries in _CPC */
735 for (i
= 2; i
< num_ent
; i
++) {
736 cpc_obj
= &out_obj
->package
.elements
[i
];
738 if (cpc_obj
->type
== ACPI_TYPE_INTEGER
) {
739 cpc_ptr
->cpc_regs
[i
-2].type
= ACPI_TYPE_INTEGER
;
740 cpc_ptr
->cpc_regs
[i
-2].cpc_entry
.int_value
= cpc_obj
->integer
.value
;
741 } else if (cpc_obj
->type
== ACPI_TYPE_BUFFER
) {
742 gas_t
= (struct cpc_reg
*)
743 cpc_obj
->buffer
.pointer
;
746 * The PCC Subspace index is encoded inside
747 * the CPC table entries. The same PCC index
748 * will be used for all the PCC entries,
749 * so extract it only once.
751 if (gas_t
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
) {
752 if (pcc_subspace_id
< 0) {
753 pcc_subspace_id
= gas_t
->access_width
;
754 if (pcc_data_alloc(pcc_subspace_id
))
756 } else if (pcc_subspace_id
!= gas_t
->access_width
) {
757 pr_debug("Mismatched PCC ids.\n");
760 } else if (gas_t
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
) {
761 if (gas_t
->address
) {
764 addr
= ioremap(gas_t
->address
, gas_t
->bit_width
/8);
767 cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
= addr
;
769 } else if (gas_t
->space_id
== ACPI_ADR_SPACE_SYSTEM_IO
) {
770 if (gas_t
->access_width
< 1 || gas_t
->access_width
> 3) {
772 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
773 * SystemIO doesn't implement 64-bit
776 pr_debug("Invalid access width %d for SystemIO register\n",
777 gas_t
->access_width
);
780 if (gas_t
->address
& OVER_16BTS_MASK
) {
781 /* SystemIO registers use 16-bit integer addresses */
782 pr_debug("Invalid IO port %llu for SystemIO register\n",
787 if (gas_t
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
|| !cpc_ffh_supported()) {
788 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
789 pr_debug("Unsupported register type: %d\n", gas_t
->space_id
);
794 cpc_ptr
->cpc_regs
[i
-2].type
= ACPI_TYPE_BUFFER
;
795 memcpy(&cpc_ptr
->cpc_regs
[i
-2].cpc_entry
.reg
, gas_t
, sizeof(*gas_t
));
797 pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i
, pr
->id
);
801 per_cpu(cpu_pcc_subspace_idx
, pr
->id
) = pcc_subspace_id
;
804 * Initialize the remaining cpc_regs as unsupported.
805 * Example: In case FW exposes CPPC v2, the below loop will initialize
806 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
808 for (i
= num_ent
- 2; i
< MAX_CPC_REG_ENT
; i
++) {
809 cpc_ptr
->cpc_regs
[i
].type
= ACPI_TYPE_INTEGER
;
810 cpc_ptr
->cpc_regs
[i
].cpc_entry
.int_value
= 0;
814 /* Store CPU Logical ID */
815 cpc_ptr
->cpu_id
= pr
->id
;
817 /* Parse PSD data for this CPU */
818 ret
= acpi_get_psd(cpc_ptr
, handle
);
822 /* Register PCC channel once for all PCC subspace ID. */
823 if (pcc_subspace_id
>= 0 && !pcc_data
[pcc_subspace_id
]->pcc_channel_acquired
) {
824 ret
= register_pcc_channel(pcc_subspace_id
);
828 init_rwsem(&pcc_data
[pcc_subspace_id
]->pcc_lock
);
829 init_waitqueue_head(&pcc_data
[pcc_subspace_id
]->pcc_write_wait_q
);
832 /* Everything looks okay */
833 pr_debug("Parsed CPC struct for CPU: %d\n", pr
->id
);
835 /* Add per logical CPU nodes for reading its feedback counters. */
836 cpu_dev
= get_cpu_device(pr
->id
);
842 /* Plug PSD data into this CPU's CPC descriptor. */
843 per_cpu(cpc_desc_ptr
, pr
->id
) = cpc_ptr
;
845 ret
= kobject_init_and_add(&cpc_ptr
->kobj
, &cppc_ktype
, &cpu_dev
->kobj
,
848 per_cpu(cpc_desc_ptr
, pr
->id
) = NULL
;
849 kobject_put(&cpc_ptr
->kobj
);
853 init_freq_invariance_cppc();
855 kfree(output
.pointer
);
859 /* Free all the mapped sys mem areas for this CPU */
860 for (i
= 2; i
< cpc_ptr
->num_entries
; i
++) {
861 void __iomem
*addr
= cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
;
869 kfree(output
.pointer
);
872 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe
);
875 * acpi_cppc_processor_exit - Cleanup CPC structs.
876 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
880 void acpi_cppc_processor_exit(struct acpi_processor
*pr
)
882 struct cpc_desc
*cpc_ptr
;
885 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, pr
->id
);
887 if (pcc_ss_id
>= 0 && pcc_data
[pcc_ss_id
]) {
888 if (pcc_data
[pcc_ss_id
]->pcc_channel_acquired
) {
889 pcc_data
[pcc_ss_id
]->refcount
--;
890 if (!pcc_data
[pcc_ss_id
]->refcount
) {
891 pcc_mbox_free_channel(pcc_data
[pcc_ss_id
]->pcc_channel
);
892 kfree(pcc_data
[pcc_ss_id
]);
893 pcc_data
[pcc_ss_id
] = NULL
;
898 cpc_ptr
= per_cpu(cpc_desc_ptr
, pr
->id
);
902 /* Free all the mapped sys mem areas for this CPU */
903 for (i
= 2; i
< cpc_ptr
->num_entries
; i
++) {
904 addr
= cpc_ptr
->cpc_regs
[i
-2].sys_mem_vaddr
;
909 kobject_put(&cpc_ptr
->kobj
);
912 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit
);
915 * cpc_read_ffh() - Read FFH register
916 * @cpunum: CPU number to read
917 * @reg: cppc register information
918 * @val: place holder for return value
920 * Read bit_width bits from a specified address and bit_offset
922 * Return: 0 for success and error code
924 int __weak
cpc_read_ffh(int cpunum
, struct cpc_reg
*reg
, u64
*val
)
930 * cpc_write_ffh() - Write FFH register
931 * @cpunum: CPU number to write
932 * @reg: cppc register information
933 * @val: value to write
935 * Write value of bit_width bits to a specified address and bit_offset
937 * Return: 0 for success and error code
939 int __weak
cpc_write_ffh(int cpunum
, struct cpc_reg
*reg
, u64 val
)
945 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
946 * as fast as possible. We have already mapped the PCC subspace during init, so
947 * we can directly write to it.
950 static int cpc_read(int cpu
, struct cpc_register_resource
*reg_res
, u64
*val
)
953 void __iomem
*vaddr
= NULL
;
954 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
955 struct cpc_reg
*reg
= ®_res
->cpc_entry
.reg
;
957 if (reg_res
->type
== ACPI_TYPE_INTEGER
) {
958 *val
= reg_res
->cpc_entry
.int_value
;
964 if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_IO
) {
965 u32 width
= 8 << (reg
->access_width
- 1);
968 status
= acpi_os_read_port((acpi_io_address
)reg
->address
,
970 if (ACPI_FAILURE(status
)) {
971 pr_debug("Error: Failed to read SystemIO port %llx\n",
977 } else if (reg
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
&& pcc_ss_id
>= 0)
978 vaddr
= GET_PCC_VADDR(reg
->address
, pcc_ss_id
);
979 else if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
980 vaddr
= reg_res
->sys_mem_vaddr
;
981 else if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
)
982 return cpc_read_ffh(cpu
, reg
, val
);
984 return acpi_os_read_memory((acpi_physical_address
)reg
->address
,
985 val
, reg
->bit_width
);
987 switch (reg
->bit_width
) {
989 *val
= readb_relaxed(vaddr
);
992 *val
= readw_relaxed(vaddr
);
995 *val
= readl_relaxed(vaddr
);
998 *val
= readq_relaxed(vaddr
);
1001 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1002 reg
->bit_width
, pcc_ss_id
);
1009 static int cpc_write(int cpu
, struct cpc_register_resource
*reg_res
, u64 val
)
1012 void __iomem
*vaddr
= NULL
;
1013 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
1014 struct cpc_reg
*reg
= ®_res
->cpc_entry
.reg
;
1016 if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_IO
) {
1017 u32 width
= 8 << (reg
->access_width
- 1);
1020 status
= acpi_os_write_port((acpi_io_address
)reg
->address
,
1022 if (ACPI_FAILURE(status
)) {
1023 pr_debug("Error: Failed to write SystemIO port %llx\n",
1029 } else if (reg
->space_id
== ACPI_ADR_SPACE_PLATFORM_COMM
&& pcc_ss_id
>= 0)
1030 vaddr
= GET_PCC_VADDR(reg
->address
, pcc_ss_id
);
1031 else if (reg
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
1032 vaddr
= reg_res
->sys_mem_vaddr
;
1033 else if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
)
1034 return cpc_write_ffh(cpu
, reg
, val
);
1036 return acpi_os_write_memory((acpi_physical_address
)reg
->address
,
1037 val
, reg
->bit_width
);
1039 switch (reg
->bit_width
) {
1041 writeb_relaxed(val
, vaddr
);
1044 writew_relaxed(val
, vaddr
);
1047 writel_relaxed(val
, vaddr
);
1050 writeq_relaxed(val
, vaddr
);
1053 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1054 reg
->bit_width
, pcc_ss_id
);
1062 static int cppc_get_perf(int cpunum
, enum cppc_regs reg_idx
, u64
*perf
)
1064 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpunum
);
1065 struct cpc_register_resource
*reg
;
1068 pr_debug("No CPC descriptor for CPU:%d\n", cpunum
);
1072 reg
= &cpc_desc
->cpc_regs
[reg_idx
];
1074 if (CPC_IN_PCC(reg
)) {
1075 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpunum
);
1076 struct cppc_pcc_data
*pcc_ss_data
= NULL
;
1082 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1084 down_write(&pcc_ss_data
->pcc_lock
);
1086 if (send_pcc_cmd(pcc_ss_id
, CMD_READ
) >= 0)
1087 cpc_read(cpunum
, reg
, perf
);
1091 up_write(&pcc_ss_data
->pcc_lock
);
1096 cpc_read(cpunum
, reg
, perf
);
1102 * cppc_get_desired_perf - Get the desired performance register value.
1103 * @cpunum: CPU from which to get desired performance.
1104 * @desired_perf: Return address.
1106 * Return: 0 for success, -EIO otherwise.
1108 int cppc_get_desired_perf(int cpunum
, u64
*desired_perf
)
1110 return cppc_get_perf(cpunum
, DESIRED_PERF
, desired_perf
);
1112 EXPORT_SYMBOL_GPL(cppc_get_desired_perf
);
1115 * cppc_get_nominal_perf - Get the nominal performance register value.
1116 * @cpunum: CPU from which to get nominal performance.
1117 * @nominal_perf: Return address.
1119 * Return: 0 for success, -EIO otherwise.
1121 int cppc_get_nominal_perf(int cpunum
, u64
*nominal_perf
)
1123 return cppc_get_perf(cpunum
, NOMINAL_PERF
, nominal_perf
);
1127 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1128 * @cpunum: CPU from which to get capabilities info.
1129 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1131 * Return: 0 for success with perf_caps populated else -ERRNO.
1133 int cppc_get_perf_caps(int cpunum
, struct cppc_perf_caps
*perf_caps
)
1135 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpunum
);
1136 struct cpc_register_resource
*highest_reg
, *lowest_reg
,
1137 *lowest_non_linear_reg
, *nominal_reg
, *guaranteed_reg
,
1138 *low_freq_reg
= NULL
, *nom_freq_reg
= NULL
;
1139 u64 high
, low
, guaranteed
, nom
, min_nonlinear
, low_f
= 0, nom_f
= 0;
1140 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpunum
);
1141 struct cppc_pcc_data
*pcc_ss_data
= NULL
;
1142 int ret
= 0, regs_in_pcc
= 0;
1145 pr_debug("No CPC descriptor for CPU:%d\n", cpunum
);
1149 highest_reg
= &cpc_desc
->cpc_regs
[HIGHEST_PERF
];
1150 lowest_reg
= &cpc_desc
->cpc_regs
[LOWEST_PERF
];
1151 lowest_non_linear_reg
= &cpc_desc
->cpc_regs
[LOW_NON_LINEAR_PERF
];
1152 nominal_reg
= &cpc_desc
->cpc_regs
[NOMINAL_PERF
];
1153 low_freq_reg
= &cpc_desc
->cpc_regs
[LOWEST_FREQ
];
1154 nom_freq_reg
= &cpc_desc
->cpc_regs
[NOMINAL_FREQ
];
1155 guaranteed_reg
= &cpc_desc
->cpc_regs
[GUARANTEED_PERF
];
1157 /* Are any of the regs PCC ?*/
1158 if (CPC_IN_PCC(highest_reg
) || CPC_IN_PCC(lowest_reg
) ||
1159 CPC_IN_PCC(lowest_non_linear_reg
) || CPC_IN_PCC(nominal_reg
) ||
1160 CPC_IN_PCC(low_freq_reg
) || CPC_IN_PCC(nom_freq_reg
)) {
1161 if (pcc_ss_id
< 0) {
1162 pr_debug("Invalid pcc_ss_id\n");
1165 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1167 down_write(&pcc_ss_data
->pcc_lock
);
1168 /* Ring doorbell once to update PCC subspace */
1169 if (send_pcc_cmd(pcc_ss_id
, CMD_READ
) < 0) {
1175 cpc_read(cpunum
, highest_reg
, &high
);
1176 perf_caps
->highest_perf
= high
;
1178 cpc_read(cpunum
, lowest_reg
, &low
);
1179 perf_caps
->lowest_perf
= low
;
1181 cpc_read(cpunum
, nominal_reg
, &nom
);
1182 perf_caps
->nominal_perf
= nom
;
1184 if (guaranteed_reg
->type
!= ACPI_TYPE_BUFFER
||
1185 IS_NULL_REG(&guaranteed_reg
->cpc_entry
.reg
)) {
1186 perf_caps
->guaranteed_perf
= 0;
1188 cpc_read(cpunum
, guaranteed_reg
, &guaranteed
);
1189 perf_caps
->guaranteed_perf
= guaranteed
;
1192 cpc_read(cpunum
, lowest_non_linear_reg
, &min_nonlinear
);
1193 perf_caps
->lowest_nonlinear_perf
= min_nonlinear
;
1195 if (!high
|| !low
|| !nom
|| !min_nonlinear
)
1198 /* Read optional lowest and nominal frequencies if present */
1199 if (CPC_SUPPORTED(low_freq_reg
))
1200 cpc_read(cpunum
, low_freq_reg
, &low_f
);
1202 if (CPC_SUPPORTED(nom_freq_reg
))
1203 cpc_read(cpunum
, nom_freq_reg
, &nom_f
);
1205 perf_caps
->lowest_freq
= low_f
;
1206 perf_caps
->nominal_freq
= nom_f
;
1211 up_write(&pcc_ss_data
->pcc_lock
);
1214 EXPORT_SYMBOL_GPL(cppc_get_perf_caps
);
1217 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1218 * @cpunum: CPU from which to read counters.
1219 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1221 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1223 int cppc_get_perf_ctrs(int cpunum
, struct cppc_perf_fb_ctrs
*perf_fb_ctrs
)
1225 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpunum
);
1226 struct cpc_register_resource
*delivered_reg
, *reference_reg
,
1227 *ref_perf_reg
, *ctr_wrap_reg
;
1228 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpunum
);
1229 struct cppc_pcc_data
*pcc_ss_data
= NULL
;
1230 u64 delivered
, reference
, ref_perf
, ctr_wrap_time
;
1231 int ret
= 0, regs_in_pcc
= 0;
1234 pr_debug("No CPC descriptor for CPU:%d\n", cpunum
);
1238 delivered_reg
= &cpc_desc
->cpc_regs
[DELIVERED_CTR
];
1239 reference_reg
= &cpc_desc
->cpc_regs
[REFERENCE_CTR
];
1240 ref_perf_reg
= &cpc_desc
->cpc_regs
[REFERENCE_PERF
];
1241 ctr_wrap_reg
= &cpc_desc
->cpc_regs
[CTR_WRAP_TIME
];
1244 * If reference perf register is not supported then we should
1245 * use the nominal perf value
1247 if (!CPC_SUPPORTED(ref_perf_reg
))
1248 ref_perf_reg
= &cpc_desc
->cpc_regs
[NOMINAL_PERF
];
1250 /* Are any of the regs PCC ?*/
1251 if (CPC_IN_PCC(delivered_reg
) || CPC_IN_PCC(reference_reg
) ||
1252 CPC_IN_PCC(ctr_wrap_reg
) || CPC_IN_PCC(ref_perf_reg
)) {
1253 if (pcc_ss_id
< 0) {
1254 pr_debug("Invalid pcc_ss_id\n");
1257 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1258 down_write(&pcc_ss_data
->pcc_lock
);
1260 /* Ring doorbell once to update PCC subspace */
1261 if (send_pcc_cmd(pcc_ss_id
, CMD_READ
) < 0) {
1267 cpc_read(cpunum
, delivered_reg
, &delivered
);
1268 cpc_read(cpunum
, reference_reg
, &reference
);
1269 cpc_read(cpunum
, ref_perf_reg
, &ref_perf
);
1272 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1273 * performance counters are assumed to never wrap during the lifetime of
1276 ctr_wrap_time
= (u64
)(~((u64
)0));
1277 if (CPC_SUPPORTED(ctr_wrap_reg
))
1278 cpc_read(cpunum
, ctr_wrap_reg
, &ctr_wrap_time
);
1280 if (!delivered
|| !reference
|| !ref_perf
) {
1285 perf_fb_ctrs
->delivered
= delivered
;
1286 perf_fb_ctrs
->reference
= reference
;
1287 perf_fb_ctrs
->reference_perf
= ref_perf
;
1288 perf_fb_ctrs
->wraparound_time
= ctr_wrap_time
;
1291 up_write(&pcc_ss_data
->pcc_lock
);
1294 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs
);
1297 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1298 * Continuous Performance Control package EnableRegister field.
1299 * @cpu: CPU for which to enable CPPC register.
1300 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1302 * Return: 0 for success, -ERRNO or -EIO otherwise.
1304 int cppc_set_enable(int cpu
, bool enable
)
1306 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
1307 struct cpc_register_resource
*enable_reg
;
1308 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpu
);
1309 struct cppc_pcc_data
*pcc_ss_data
= NULL
;
1313 pr_debug("No CPC descriptor for CPU:%d\n", cpu
);
1317 enable_reg
= &cpc_desc
->cpc_regs
[ENABLE
];
1319 if (CPC_IN_PCC(enable_reg
)) {
1324 ret
= cpc_write(cpu
, enable_reg
, enable
);
1328 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1330 down_write(&pcc_ss_data
->pcc_lock
);
1331 /* after writing CPC, transfer the ownership of PCC to platfrom */
1332 ret
= send_pcc_cmd(pcc_ss_id
, CMD_WRITE
);
1333 up_write(&pcc_ss_data
->pcc_lock
);
1337 return cpc_write(cpu
, enable_reg
, enable
);
1339 EXPORT_SYMBOL_GPL(cppc_set_enable
);
1342 * cppc_set_perf - Set a CPU's performance controls.
1343 * @cpu: CPU for which to set performance controls.
1344 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1346 * Return: 0 for success, -ERRNO otherwise.
1348 int cppc_set_perf(int cpu
, struct cppc_perf_ctrls
*perf_ctrls
)
1350 struct cpc_desc
*cpc_desc
= per_cpu(cpc_desc_ptr
, cpu
);
1351 struct cpc_register_resource
*desired_reg
;
1352 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu
);
1353 struct cppc_pcc_data
*pcc_ss_data
= NULL
;
1357 pr_debug("No CPC descriptor for CPU:%d\n", cpu
);
1361 desired_reg
= &cpc_desc
->cpc_regs
[DESIRED_PERF
];
1364 * This is Phase-I where we want to write to CPC registers
1365 * -> We want all CPUs to be able to execute this phase in parallel
1367 * Since read_lock can be acquired by multiple CPUs simultaneously we
1368 * achieve that goal here
1370 if (CPC_IN_PCC(desired_reg
)) {
1371 if (pcc_ss_id
< 0) {
1372 pr_debug("Invalid pcc_ss_id\n");
1375 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1376 down_read(&pcc_ss_data
->pcc_lock
); /* BEGIN Phase-I */
1377 if (pcc_ss_data
->platform_owns_pcc
) {
1378 ret
= check_pcc_chan(pcc_ss_id
, false);
1380 up_read(&pcc_ss_data
->pcc_lock
);
1385 * Update the pending_write to make sure a PCC CMD_READ will not
1386 * arrive and steal the channel during the switch to write lock
1388 pcc_ss_data
->pending_pcc_write_cmd
= true;
1389 cpc_desc
->write_cmd_id
= pcc_ss_data
->pcc_write_cnt
;
1390 cpc_desc
->write_cmd_status
= 0;
1394 * Skip writing MIN/MAX until Linux knows how to come up with
1397 cpc_write(cpu
, desired_reg
, perf_ctrls
->desired_perf
);
1399 if (CPC_IN_PCC(desired_reg
))
1400 up_read(&pcc_ss_data
->pcc_lock
); /* END Phase-I */
1402 * This is Phase-II where we transfer the ownership of PCC to Platform
1404 * Short Summary: Basically if we think of a group of cppc_set_perf
1405 * requests that happened in short overlapping interval. The last CPU to
1406 * come out of Phase-I will enter Phase-II and ring the doorbell.
1408 * We have the following requirements for Phase-II:
1409 * 1. We want to execute Phase-II only when there are no CPUs
1410 * currently executing in Phase-I
1411 * 2. Once we start Phase-II we want to avoid all other CPUs from
1413 * 3. We want only one CPU among all those who went through Phase-I
1416 * If write_trylock fails to get the lock and doesn't transfer the
1417 * PCC ownership to the platform, then one of the following will be TRUE
1418 * 1. There is at-least one CPU in Phase-I which will later execute
1419 * write_trylock, so the CPUs in Phase-I will be responsible for
1420 * executing the Phase-II.
1421 * 2. Some other CPU has beaten this CPU to successfully execute the
1422 * write_trylock and has already acquired the write_lock. We know for a
1423 * fact it (other CPU acquiring the write_lock) couldn't have happened
1424 * before this CPU's Phase-I as we held the read_lock.
1425 * 3. Some other CPU executing pcc CMD_READ has stolen the
1426 * down_write, in which case, send_pcc_cmd will check for pending
1427 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1428 * So this CPU can be certain that its request will be delivered
1429 * So in all cases, this CPU knows that its request will be delivered
1430 * by another CPU and can return
1432 * After getting the down_write we still need to check for
1433 * pending_pcc_write_cmd to take care of the following scenario
1434 * The thread running this code could be scheduled out between
1435 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1436 * could have delivered the request to Platform by triggering the
1437 * doorbell and transferred the ownership of PCC to platform. So this
1438 * avoids triggering an unnecessary doorbell and more importantly before
1439 * triggering the doorbell it makes sure that the PCC channel ownership
1440 * is still with OSPM.
1441 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1442 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1443 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1444 * case during a CMD_READ and if there are pending writes it delivers
1445 * the write command before servicing the read command
1447 if (CPC_IN_PCC(desired_reg
)) {
1448 if (down_write_trylock(&pcc_ss_data
->pcc_lock
)) {/* BEGIN Phase-II */
1449 /* Update only if there are pending write commands */
1450 if (pcc_ss_data
->pending_pcc_write_cmd
)
1451 send_pcc_cmd(pcc_ss_id
, CMD_WRITE
);
1452 up_write(&pcc_ss_data
->pcc_lock
); /* END Phase-II */
1454 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1455 wait_event(pcc_ss_data
->pcc_write_wait_q
,
1456 cpc_desc
->write_cmd_id
!= pcc_ss_data
->pcc_write_cnt
);
1458 /* send_pcc_cmd updates the status in case of failure */
1459 ret
= cpc_desc
->write_cmd_status
;
1463 EXPORT_SYMBOL_GPL(cppc_set_perf
);
1466 * cppc_get_transition_latency - returns frequency transition latency in ns
1468 * ACPI CPPC does not explicitly specify how a platform can specify the
1469 * transition latency for performance change requests. The closest we have
1470 * is the timing information from the PCCT tables which provides the info
1471 * on the number and frequency of PCC commands the platform can handle.
1473 unsigned int cppc_get_transition_latency(int cpu_num
)
1476 * Expected transition latency is based on the PCCT timing values
1477 * Below are definition from ACPI spec:
1478 * pcc_nominal- Expected latency to process a command, in microseconds
1479 * pcc_mpar - The maximum number of periodic requests that the subspace
1480 * channel can support, reported in commands per minute. 0
1481 * indicates no limitation.
1482 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1483 * completion of a command before issuing the next command,
1486 unsigned int latency_ns
= 0;
1487 struct cpc_desc
*cpc_desc
;
1488 struct cpc_register_resource
*desired_reg
;
1489 int pcc_ss_id
= per_cpu(cpu_pcc_subspace_idx
, cpu_num
);
1490 struct cppc_pcc_data
*pcc_ss_data
;
1492 cpc_desc
= per_cpu(cpc_desc_ptr
, cpu_num
);
1494 return CPUFREQ_ETERNAL
;
1496 desired_reg
= &cpc_desc
->cpc_regs
[DESIRED_PERF
];
1497 if (!CPC_IN_PCC(desired_reg
))
1498 return CPUFREQ_ETERNAL
;
1501 return CPUFREQ_ETERNAL
;
1503 pcc_ss_data
= pcc_data
[pcc_ss_id
];
1504 if (pcc_ss_data
->pcc_mpar
)
1505 latency_ns
= 60 * (1000 * 1000 * 1000 / pcc_ss_data
->pcc_mpar
);
1507 latency_ns
= max(latency_ns
, pcc_ss_data
->pcc_nominal
* 1000);
1508 latency_ns
= max(latency_ns
, pcc_ss_data
->pcc_mrtt
* 1000);
1512 EXPORT_SYMBOL_GPL(cppc_get_transition_latency
);