1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
22 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
23 module_param(download_mode
, bool, 0);
25 #define SCM_HAS_CORE_CLK BIT(0)
26 #define SCM_HAS_IFACE_CLK BIT(1)
27 #define SCM_HAS_BUS_CLK BIT(2)
32 struct clk
*iface_clk
;
34 struct reset_controller_dev reset
;
39 struct qcom_scm_current_perm_info
{
47 struct qcom_scm_mem_map_info
{
52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
62 struct qcom_scm_wb_entry
{
67 static struct qcom_scm_wb_entry qcom_scm_wb
[] = {
68 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU0
},
69 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU1
},
70 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU2
},
71 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU3
},
74 static const char *qcom_scm_convention_names
[] = {
75 [SMC_CONVENTION_UNKNOWN
] = "unknown",
76 [SMC_CONVENTION_ARM_32
] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64
] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY
] = "smc legacy",
81 static struct qcom_scm
*__scm
;
83 static int qcom_scm_clk_enable(void)
87 ret
= clk_prepare_enable(__scm
->core_clk
);
91 ret
= clk_prepare_enable(__scm
->iface_clk
);
95 ret
= clk_prepare_enable(__scm
->bus_clk
);
102 clk_disable_unprepare(__scm
->iface_clk
);
104 clk_disable_unprepare(__scm
->core_clk
);
109 static void qcom_scm_clk_disable(void)
111 clk_disable_unprepare(__scm
->core_clk
);
112 clk_disable_unprepare(__scm
->iface_clk
);
113 clk_disable_unprepare(__scm
->bus_clk
);
116 enum qcom_scm_convention qcom_scm_convention
;
117 static bool has_queried __read_mostly
;
118 static DEFINE_SPINLOCK(query_lock
);
120 static void __query_convention(void)
123 struct qcom_scm_desc desc
= {
124 .svc
= QCOM_SCM_SVC_INFO
,
125 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
126 .args
[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO
,
127 QCOM_SCM_INFO_IS_CALL_AVAIL
) |
128 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
),
129 .arginfo
= QCOM_SCM_ARGS(1),
130 .owner
= ARM_SMCCC_OWNER_SIP
,
132 struct qcom_scm_res res
;
135 spin_lock_irqsave(&query_lock
, flags
);
139 qcom_scm_convention
= SMC_CONVENTION_ARM_64
;
140 // Device isn't required as there is only one argument - no device
141 // needed to dma_map_single to secure world
142 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
143 if (!ret
&& res
.result
[0] == 1)
146 qcom_scm_convention
= SMC_CONVENTION_ARM_32
;
147 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
148 if (!ret
&& res
.result
[0] == 1)
151 qcom_scm_convention
= SMC_CONVENTION_LEGACY
;
154 spin_unlock_irqrestore(&query_lock
, flags
);
155 pr_info("qcom_scm: convention: %s\n",
156 qcom_scm_convention_names
[qcom_scm_convention
]);
159 static inline enum qcom_scm_convention
__get_convention(void)
161 if (unlikely(!has_queried
))
162 __query_convention();
163 return qcom_scm_convention
;
167 * qcom_scm_call() - Invoke a syscall in the secure world
169 * @svc_id: service identifier
170 * @cmd_id: command identifier
171 * @desc: Descriptor structure containing arguments and return values
173 * Sends a command to the SCM and waits for the command to finish processing.
174 * This should *only* be called in pre-emptible context.
176 static int qcom_scm_call(struct device
*dev
, const struct qcom_scm_desc
*desc
,
177 struct qcom_scm_res
*res
)
180 switch (__get_convention()) {
181 case SMC_CONVENTION_ARM_32
:
182 case SMC_CONVENTION_ARM_64
:
183 return scm_smc_call(dev
, desc
, res
, false);
184 case SMC_CONVENTION_LEGACY
:
185 return scm_legacy_call(dev
, desc
, res
);
187 pr_err("Unknown current SCM calling convention.\n");
193 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
195 * @svc_id: service identifier
196 * @cmd_id: command identifier
197 * @desc: Descriptor structure containing arguments and return values
198 * @res: Structure containing results from SMC/HVC call
200 * Sends a command to the SCM and waits for the command to finish processing.
201 * This can be called in atomic context.
203 static int qcom_scm_call_atomic(struct device
*dev
,
204 const struct qcom_scm_desc
*desc
,
205 struct qcom_scm_res
*res
)
207 switch (__get_convention()) {
208 case SMC_CONVENTION_ARM_32
:
209 case SMC_CONVENTION_ARM_64
:
210 return scm_smc_call(dev
, desc
, res
, true);
211 case SMC_CONVENTION_LEGACY
:
212 return scm_legacy_call_atomic(dev
, desc
, res
);
214 pr_err("Unknown current SCM calling convention.\n");
219 static bool __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
223 struct qcom_scm_desc desc
= {
224 .svc
= QCOM_SCM_SVC_INFO
,
225 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
226 .owner
= ARM_SMCCC_OWNER_SIP
,
228 struct qcom_scm_res res
;
230 desc
.arginfo
= QCOM_SCM_ARGS(1);
231 switch (__get_convention()) {
232 case SMC_CONVENTION_ARM_32
:
233 case SMC_CONVENTION_ARM_64
:
234 desc
.args
[0] = SCM_SMC_FNID(svc_id
, cmd_id
) |
235 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
);
237 case SMC_CONVENTION_LEGACY
:
238 desc
.args
[0] = SCM_LEGACY_FNID(svc_id
, cmd_id
);
241 pr_err("Unknown SMC convention being used\n");
245 ret
= qcom_scm_call(dev
, &desc
, &res
);
247 return ret
? false : !!res
.result
[0];
251 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
252 * @entry: Entry point function for the cpus
253 * @cpus: The cpumask of cpus that will use the entry point
255 * Set the Linux entry point for the SCM to transfer control to when coming
256 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
258 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
263 struct qcom_scm_desc desc
= {
264 .svc
= QCOM_SCM_SVC_BOOT
,
265 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
266 .arginfo
= QCOM_SCM_ARGS(2),
270 * Reassign only if we are switching from hotplug entry point
271 * to cpuidle entry point or vice versa.
273 for_each_cpu(cpu
, cpus
) {
274 if (entry
== qcom_scm_wb
[cpu
].entry
)
276 flags
|= qcom_scm_wb
[cpu
].flag
;
279 /* No change in entry function */
283 desc
.args
[0] = flags
;
284 desc
.args
[1] = virt_to_phys(entry
);
286 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
288 for_each_cpu(cpu
, cpus
)
289 qcom_scm_wb
[cpu
].entry
= entry
;
294 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
297 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
298 * @entry: Entry point function for the cpus
299 * @cpus: The cpumask of cpus that will use the entry point
301 * Set the cold boot address of the cpus. Any cpu outside the supported
302 * range would be removed from the cpu present mask.
304 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
308 int scm_cb_flags
[] = {
309 QCOM_SCM_FLAG_COLDBOOT_CPU0
,
310 QCOM_SCM_FLAG_COLDBOOT_CPU1
,
311 QCOM_SCM_FLAG_COLDBOOT_CPU2
,
312 QCOM_SCM_FLAG_COLDBOOT_CPU3
,
314 struct qcom_scm_desc desc
= {
315 .svc
= QCOM_SCM_SVC_BOOT
,
316 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
317 .arginfo
= QCOM_SCM_ARGS(2),
318 .owner
= ARM_SMCCC_OWNER_SIP
,
321 if (!cpus
|| (cpus
&& cpumask_empty(cpus
)))
324 for_each_cpu(cpu
, cpus
) {
325 if (cpu
< ARRAY_SIZE(scm_cb_flags
))
326 flags
|= scm_cb_flags
[cpu
];
328 set_cpu_present(cpu
, false);
331 desc
.args
[0] = flags
;
332 desc
.args
[1] = virt_to_phys(entry
);
334 return qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
336 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
339 * qcom_scm_cpu_power_down() - Power down the cpu
340 * @flags - Flags to flush cache
342 * This is an end point to power down cpu. If there was a pending interrupt,
343 * the control would return from this function, otherwise, the cpu jumps to the
344 * warm boot entry point set for this cpu upon reset.
346 void qcom_scm_cpu_power_down(u32 flags
)
348 struct qcom_scm_desc desc
= {
349 .svc
= QCOM_SCM_SVC_BOOT
,
350 .cmd
= QCOM_SCM_BOOT_TERMINATE_PC
,
351 .args
[0] = flags
& QCOM_SCM_FLUSH_FLAG_MASK
,
352 .arginfo
= QCOM_SCM_ARGS(1),
353 .owner
= ARM_SMCCC_OWNER_SIP
,
356 qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
358 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
360 int qcom_scm_set_remote_state(u32 state
, u32 id
)
362 struct qcom_scm_desc desc
= {
363 .svc
= QCOM_SCM_SVC_BOOT
,
364 .cmd
= QCOM_SCM_BOOT_SET_REMOTE_STATE
,
365 .arginfo
= QCOM_SCM_ARGS(2),
368 .owner
= ARM_SMCCC_OWNER_SIP
,
370 struct qcom_scm_res res
;
373 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
375 return ret
? : res
.result
[0];
377 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
379 static int __qcom_scm_set_dload_mode(struct device
*dev
, bool enable
)
381 struct qcom_scm_desc desc
= {
382 .svc
= QCOM_SCM_SVC_BOOT
,
383 .cmd
= QCOM_SCM_BOOT_SET_DLOAD_MODE
,
384 .arginfo
= QCOM_SCM_ARGS(2),
385 .args
[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE
,
386 .owner
= ARM_SMCCC_OWNER_SIP
,
389 desc
.args
[1] = enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0;
391 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
394 static void qcom_scm_set_download_mode(bool enable
)
399 avail
= __qcom_scm_is_call_available(__scm
->dev
,
401 QCOM_SCM_BOOT_SET_DLOAD_MODE
);
403 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
404 } else if (__scm
->dload_mode_addr
) {
405 ret
= qcom_scm_io_writel(__scm
->dload_mode_addr
,
406 enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0);
409 "No available mechanism for setting download mode\n");
413 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
417 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
418 * state machine for a given peripheral, using the
420 * @peripheral: peripheral id
421 * @metadata: pointer to memory containing ELF header, program header table
422 * and optional blob of data used for authenticating the metadata
423 * and the rest of the firmware
424 * @size: size of the metadata
426 * Returns 0 on success.
428 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
430 dma_addr_t mdata_phys
;
433 struct qcom_scm_desc desc
= {
434 .svc
= QCOM_SCM_SVC_PIL
,
435 .cmd
= QCOM_SCM_PIL_PAS_INIT_IMAGE
,
436 .arginfo
= QCOM_SCM_ARGS(2, QCOM_SCM_VAL
, QCOM_SCM_RW
),
437 .args
[0] = peripheral
,
438 .owner
= ARM_SMCCC_OWNER_SIP
,
440 struct qcom_scm_res res
;
443 * During the scm call memory protection will be enabled for the meta
444 * data blob, so make sure it's physically contiguous, 4K aligned and
445 * non-cachable to avoid XPU violations.
447 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
450 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
453 memcpy(mdata_buf
, metadata
, size
);
455 ret
= qcom_scm_clk_enable();
459 desc
.args
[1] = mdata_phys
;
461 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
463 qcom_scm_clk_disable();
466 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
468 return ret
? : res
.result
[0];
470 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
473 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
474 * for firmware loading
475 * @peripheral: peripheral id
476 * @addr: start address of memory area to prepare
477 * @size: size of the memory area to prepare
479 * Returns 0 on success.
481 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
484 struct qcom_scm_desc desc
= {
485 .svc
= QCOM_SCM_SVC_PIL
,
486 .cmd
= QCOM_SCM_PIL_PAS_MEM_SETUP
,
487 .arginfo
= QCOM_SCM_ARGS(3),
488 .args
[0] = peripheral
,
491 .owner
= ARM_SMCCC_OWNER_SIP
,
493 struct qcom_scm_res res
;
495 ret
= qcom_scm_clk_enable();
499 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
500 qcom_scm_clk_disable();
502 return ret
? : res
.result
[0];
504 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
507 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
508 * and reset the remote processor
509 * @peripheral: peripheral id
511 * Return 0 on success.
513 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
516 struct qcom_scm_desc desc
= {
517 .svc
= QCOM_SCM_SVC_PIL
,
518 .cmd
= QCOM_SCM_PIL_PAS_AUTH_AND_RESET
,
519 .arginfo
= QCOM_SCM_ARGS(1),
520 .args
[0] = peripheral
,
521 .owner
= ARM_SMCCC_OWNER_SIP
,
523 struct qcom_scm_res res
;
525 ret
= qcom_scm_clk_enable();
529 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
530 qcom_scm_clk_disable();
532 return ret
? : res
.result
[0];
534 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
537 * qcom_scm_pas_shutdown() - Shut down the remote processor
538 * @peripheral: peripheral id
540 * Returns 0 on success.
542 int qcom_scm_pas_shutdown(u32 peripheral
)
545 struct qcom_scm_desc desc
= {
546 .svc
= QCOM_SCM_SVC_PIL
,
547 .cmd
= QCOM_SCM_PIL_PAS_SHUTDOWN
,
548 .arginfo
= QCOM_SCM_ARGS(1),
549 .args
[0] = peripheral
,
550 .owner
= ARM_SMCCC_OWNER_SIP
,
552 struct qcom_scm_res res
;
554 ret
= qcom_scm_clk_enable();
558 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
560 qcom_scm_clk_disable();
562 return ret
? : res
.result
[0];
564 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
567 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
568 * available for the given peripherial
569 * @peripheral: peripheral id
571 * Returns true if PAS is supported for this peripheral, otherwise false.
573 bool qcom_scm_pas_supported(u32 peripheral
)
576 struct qcom_scm_desc desc
= {
577 .svc
= QCOM_SCM_SVC_PIL
,
578 .cmd
= QCOM_SCM_PIL_PAS_IS_SUPPORTED
,
579 .arginfo
= QCOM_SCM_ARGS(1),
580 .args
[0] = peripheral
,
581 .owner
= ARM_SMCCC_OWNER_SIP
,
583 struct qcom_scm_res res
;
585 if (!__qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
586 QCOM_SCM_PIL_PAS_IS_SUPPORTED
))
589 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
591 return ret
? false : !!res
.result
[0];
593 EXPORT_SYMBOL(qcom_scm_pas_supported
);
595 static int __qcom_scm_pas_mss_reset(struct device
*dev
, bool reset
)
597 struct qcom_scm_desc desc
= {
598 .svc
= QCOM_SCM_SVC_PIL
,
599 .cmd
= QCOM_SCM_PIL_PAS_MSS_RESET
,
600 .arginfo
= QCOM_SCM_ARGS(2),
603 .owner
= ARM_SMCCC_OWNER_SIP
,
605 struct qcom_scm_res res
;
608 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
610 return ret
? : res
.result
[0];
613 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
619 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
622 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
628 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
631 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
632 .assert = qcom_scm_pas_reset_assert
,
633 .deassert
= qcom_scm_pas_reset_deassert
,
636 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
638 struct qcom_scm_desc desc
= {
639 .svc
= QCOM_SCM_SVC_IO
,
640 .cmd
= QCOM_SCM_IO_READ
,
641 .arginfo
= QCOM_SCM_ARGS(1),
643 .owner
= ARM_SMCCC_OWNER_SIP
,
645 struct qcom_scm_res res
;
649 ret
= qcom_scm_call_atomic(__scm
->dev
, &desc
, &res
);
651 *val
= res
.result
[0];
653 return ret
< 0 ? ret
: 0;
655 EXPORT_SYMBOL(qcom_scm_io_readl
);
657 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
659 struct qcom_scm_desc desc
= {
660 .svc
= QCOM_SCM_SVC_IO
,
661 .cmd
= QCOM_SCM_IO_WRITE
,
662 .arginfo
= QCOM_SCM_ARGS(2),
665 .owner
= ARM_SMCCC_OWNER_SIP
,
668 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
670 EXPORT_SYMBOL(qcom_scm_io_writel
);
673 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
674 * supports restore security config interface.
676 * Return true if restore-cfg interface is supported, false if not.
678 bool qcom_scm_restore_sec_cfg_available(void)
680 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
681 QCOM_SCM_MP_RESTORE_SEC_CFG
);
683 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available
);
685 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
687 struct qcom_scm_desc desc
= {
688 .svc
= QCOM_SCM_SVC_MP
,
689 .cmd
= QCOM_SCM_MP_RESTORE_SEC_CFG
,
690 .arginfo
= QCOM_SCM_ARGS(2),
691 .args
[0] = device_id
,
693 .owner
= ARM_SMCCC_OWNER_SIP
,
695 struct qcom_scm_res res
;
698 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
700 return ret
? : res
.result
[0];
702 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
704 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
706 struct qcom_scm_desc desc
= {
707 .svc
= QCOM_SCM_SVC_MP
,
708 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE
,
709 .arginfo
= QCOM_SCM_ARGS(1),
711 .owner
= ARM_SMCCC_OWNER_SIP
,
713 struct qcom_scm_res res
;
716 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
719 *size
= res
.result
[0];
721 return ret
? : res
.result
[1];
723 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
725 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
727 struct qcom_scm_desc desc
= {
728 .svc
= QCOM_SCM_SVC_MP
,
729 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT
,
730 .arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
735 .owner
= ARM_SMCCC_OWNER_SIP
,
741 desc
.args
[2] = spare
;
742 desc
.arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
745 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
747 /* the pg table has been initialized already, ignore the error */
753 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
755 int qcom_scm_mem_protect_video_var(u32 cp_start
, u32 cp_size
,
756 u32 cp_nonpixel_start
,
757 u32 cp_nonpixel_size
)
760 struct qcom_scm_desc desc
= {
761 .svc
= QCOM_SCM_SVC_MP
,
762 .cmd
= QCOM_SCM_MP_VIDEO_VAR
,
763 .arginfo
= QCOM_SCM_ARGS(4, QCOM_SCM_VAL
, QCOM_SCM_VAL
,
764 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
767 .args
[2] = cp_nonpixel_start
,
768 .args
[3] = cp_nonpixel_size
,
769 .owner
= ARM_SMCCC_OWNER_SIP
,
771 struct qcom_scm_res res
;
773 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
775 return ret
? : res
.result
[0];
777 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var
);
779 static int __qcom_scm_assign_mem(struct device
*dev
, phys_addr_t mem_region
,
780 size_t mem_sz
, phys_addr_t src
, size_t src_sz
,
781 phys_addr_t dest
, size_t dest_sz
)
784 struct qcom_scm_desc desc
= {
785 .svc
= QCOM_SCM_SVC_MP
,
786 .cmd
= QCOM_SCM_MP_ASSIGN
,
787 .arginfo
= QCOM_SCM_ARGS(7, QCOM_SCM_RO
, QCOM_SCM_VAL
,
788 QCOM_SCM_RO
, QCOM_SCM_VAL
, QCOM_SCM_RO
,
789 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
790 .args
[0] = mem_region
,
797 .owner
= ARM_SMCCC_OWNER_SIP
,
799 struct qcom_scm_res res
;
801 ret
= qcom_scm_call(dev
, &desc
, &res
);
803 return ret
? : res
.result
[0];
807 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
808 * @mem_addr: mem region whose ownership need to be reassigned
809 * @mem_sz: size of the region.
810 * @srcvm: vmid for current set of owners, each set bit in
811 * flag indicate a unique owner
812 * @newvm: array having new owners and corresponding permission
814 * @dest_cnt: number of owners in next set.
816 * Return negative errno on failure or 0 on success with @srcvm updated.
818 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
820 const struct qcom_scm_vmperm
*newvm
,
821 unsigned int dest_cnt
)
823 struct qcom_scm_current_perm_info
*destvm
;
824 struct qcom_scm_mem_map_info
*mem_to_map
;
825 phys_addr_t mem_to_map_phys
;
826 phys_addr_t dest_phys
;
828 size_t mem_to_map_sz
;
836 unsigned long srcvm_bits
= *srcvm
;
838 src_sz
= hweight_long(srcvm_bits
) * sizeof(*src
);
839 mem_to_map_sz
= sizeof(*mem_to_map
);
840 dest_sz
= dest_cnt
* sizeof(*destvm
);
841 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
842 ALIGN(dest_sz
, SZ_64
);
844 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_phys
, GFP_KERNEL
);
848 /* Fill source vmid detail */
851 for_each_set_bit(b
, &srcvm_bits
, BITS_PER_LONG
)
852 src
[i
++] = cpu_to_le32(b
);
854 /* Fill details of mem buff to map */
855 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
856 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
857 mem_to_map
->mem_addr
= cpu_to_le64(mem_addr
);
858 mem_to_map
->mem_size
= cpu_to_le64(mem_sz
);
861 /* Fill details of next vmid detail */
862 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
863 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
864 for (i
= 0; i
< dest_cnt
; i
++, destvm
++, newvm
++) {
865 destvm
->vmid
= cpu_to_le32(newvm
->vmid
);
866 destvm
->perm
= cpu_to_le32(newvm
->perm
);
868 destvm
->ctx_size
= 0;
869 next_vm
|= BIT(newvm
->vmid
);
872 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
873 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
874 dma_free_coherent(__scm
->dev
, ptr_sz
, ptr
, ptr_phys
);
877 "Assign memory protection call failed %d\n", ret
);
884 EXPORT_SYMBOL(qcom_scm_assign_mem
);
887 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
889 bool qcom_scm_ocmem_lock_available(void)
891 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_OCMEM
,
892 QCOM_SCM_OCMEM_LOCK_CMD
);
894 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available
);
897 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
898 * region to the specified initiator
900 * @id: tz initiator id
901 * @offset: OCMEM offset
903 * @mode: access mode (WIDE/NARROW)
905 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
,
908 struct qcom_scm_desc desc
= {
909 .svc
= QCOM_SCM_SVC_OCMEM
,
910 .cmd
= QCOM_SCM_OCMEM_LOCK_CMD
,
915 .arginfo
= QCOM_SCM_ARGS(4),
918 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
920 EXPORT_SYMBOL(qcom_scm_ocmem_lock
);
923 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
924 * region from the specified initiator
926 * @id: tz initiator id
927 * @offset: OCMEM offset
930 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
)
932 struct qcom_scm_desc desc
= {
933 .svc
= QCOM_SCM_SVC_OCMEM
,
934 .cmd
= QCOM_SCM_OCMEM_UNLOCK_CMD
,
938 .arginfo
= QCOM_SCM_ARGS(3),
941 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
943 EXPORT_SYMBOL(qcom_scm_ocmem_unlock
);
946 * qcom_scm_ice_available() - Is the ICE key programming interface available?
948 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
949 * qcom_scm_ice_set_key() are available.
951 bool qcom_scm_ice_available(void)
953 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_ES
,
954 QCOM_SCM_ES_INVALIDATE_ICE_KEY
) &&
955 __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_ES
,
956 QCOM_SCM_ES_CONFIG_SET_ICE_KEY
);
958 EXPORT_SYMBOL(qcom_scm_ice_available
);
961 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
962 * @index: the keyslot to invalidate
964 * The UFSHCI and eMMC standards define a standard way to do this, but it
965 * doesn't work on these SoCs; only this SCM call does.
967 * It is assumed that the SoC has only one ICE instance being used, as this SCM
968 * call doesn't specify which ICE instance the keyslot belongs to.
970 * Return: 0 on success; -errno on failure.
972 int qcom_scm_ice_invalidate_key(u32 index
)
974 struct qcom_scm_desc desc
= {
975 .svc
= QCOM_SCM_SVC_ES
,
976 .cmd
= QCOM_SCM_ES_INVALIDATE_ICE_KEY
,
977 .arginfo
= QCOM_SCM_ARGS(1),
979 .owner
= ARM_SMCCC_OWNER_SIP
,
982 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
984 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key
);
987 * qcom_scm_ice_set_key() - Set an inline encryption key
988 * @index: the keyslot into which to set the key
989 * @key: the key to program
990 * @key_size: the size of the key in bytes
991 * @cipher: the encryption algorithm the key is for
992 * @data_unit_size: the encryption data unit size, i.e. the size of each
993 * individual plaintext and ciphertext. Given in 512-byte
994 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
996 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
997 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
999 * The UFSHCI and eMMC standards define a standard way to do this, but it
1000 * doesn't work on these SoCs; only this SCM call does.
1002 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1003 * call doesn't specify which ICE instance the keyslot belongs to.
1005 * Return: 0 on success; -errno on failure.
1007 int qcom_scm_ice_set_key(u32 index
, const u8
*key
, u32 key_size
,
1008 enum qcom_scm_ice_cipher cipher
, u32 data_unit_size
)
1010 struct qcom_scm_desc desc
= {
1011 .svc
= QCOM_SCM_SVC_ES
,
1012 .cmd
= QCOM_SCM_ES_CONFIG_SET_ICE_KEY
,
1013 .arginfo
= QCOM_SCM_ARGS(5, QCOM_SCM_VAL
, QCOM_SCM_RW
,
1014 QCOM_SCM_VAL
, QCOM_SCM_VAL
,
1017 .args
[2] = key_size
,
1019 .args
[4] = data_unit_size
,
1020 .owner
= ARM_SMCCC_OWNER_SIP
,
1023 dma_addr_t key_phys
;
1027 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1028 * physical address that's been properly flushed. The sanctioned way to
1029 * do this is by using the DMA API. But as is best practice for crypto
1030 * keys, we also must wipe the key after use. This makes kmemdup() +
1031 * dma_map_single() not clearly correct, since the DMA API can use
1032 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1033 * keys is normally rare and thus not performance-critical.
1036 keybuf
= dma_alloc_coherent(__scm
->dev
, key_size
, &key_phys
,
1040 memcpy(keybuf
, key
, key_size
);
1041 desc
.args
[1] = key_phys
;
1043 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1045 memzero_explicit(keybuf
, key_size
);
1047 dma_free_coherent(__scm
->dev
, key_size
, keybuf
, key_phys
);
1050 EXPORT_SYMBOL(qcom_scm_ice_set_key
);
1053 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1055 * Return true if HDCP is supported, false if not.
1057 bool qcom_scm_hdcp_available(void)
1060 int ret
= qcom_scm_clk_enable();
1065 avail
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
1066 QCOM_SCM_HDCP_INVOKE
);
1068 qcom_scm_clk_disable();
1072 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
1075 * qcom_scm_hdcp_req() - Send HDCP request.
1076 * @req: HDCP request array
1077 * @req_cnt: HDCP request array count
1078 * @resp: response buffer passed to SCM
1080 * Write HDCP register(s) through SCM.
1082 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
1085 struct qcom_scm_desc desc
= {
1086 .svc
= QCOM_SCM_SVC_HDCP
,
1087 .cmd
= QCOM_SCM_HDCP_INVOKE
,
1088 .arginfo
= QCOM_SCM_ARGS(10),
1101 .owner
= ARM_SMCCC_OWNER_SIP
,
1103 struct qcom_scm_res res
;
1105 if (req_cnt
> QCOM_SCM_HDCP_MAX_REQ_CNT
)
1108 ret
= qcom_scm_clk_enable();
1112 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
1113 *resp
= res
.result
[0];
1115 qcom_scm_clk_disable();
1119 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
1121 int qcom_scm_qsmmu500_wait_safe_toggle(bool en
)
1123 struct qcom_scm_desc desc
= {
1124 .svc
= QCOM_SCM_SVC_SMMU_PROGRAM
,
1125 .cmd
= QCOM_SCM_SMMU_CONFIG_ERRATA1
,
1126 .arginfo
= QCOM_SCM_ARGS(2),
1127 .args
[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL
,
1129 .owner
= ARM_SMCCC_OWNER_SIP
,
1133 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
1135 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle
);
1137 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
1139 struct device_node
*tcsr
;
1140 struct device_node
*np
= dev
->of_node
;
1141 struct resource res
;
1145 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
1149 ret
= of_address_to_resource(tcsr
, 0, &res
);
1154 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
1158 *addr
= res
.start
+ offset
;
1164 * qcom_scm_is_available() - Checks if SCM is available
1166 bool qcom_scm_is_available(void)
1170 EXPORT_SYMBOL(qcom_scm_is_available
);
1172 static int qcom_scm_probe(struct platform_device
*pdev
)
1174 struct qcom_scm
*scm
;
1178 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
1182 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
1186 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
1188 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
1189 if (IS_ERR(scm
->core_clk
)) {
1190 if (PTR_ERR(scm
->core_clk
) == -EPROBE_DEFER
)
1191 return PTR_ERR(scm
->core_clk
);
1193 if (clks
& SCM_HAS_CORE_CLK
) {
1194 dev_err(&pdev
->dev
, "failed to acquire core clk\n");
1195 return PTR_ERR(scm
->core_clk
);
1198 scm
->core_clk
= NULL
;
1201 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
1202 if (IS_ERR(scm
->iface_clk
)) {
1203 if (PTR_ERR(scm
->iface_clk
) == -EPROBE_DEFER
)
1204 return PTR_ERR(scm
->iface_clk
);
1206 if (clks
& SCM_HAS_IFACE_CLK
) {
1207 dev_err(&pdev
->dev
, "failed to acquire iface clk\n");
1208 return PTR_ERR(scm
->iface_clk
);
1211 scm
->iface_clk
= NULL
;
1214 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
1215 if (IS_ERR(scm
->bus_clk
)) {
1216 if (PTR_ERR(scm
->bus_clk
) == -EPROBE_DEFER
)
1217 return PTR_ERR(scm
->bus_clk
);
1219 if (clks
& SCM_HAS_BUS_CLK
) {
1220 dev_err(&pdev
->dev
, "failed to acquire bus clk\n");
1221 return PTR_ERR(scm
->bus_clk
);
1224 scm
->bus_clk
= NULL
;
1227 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
1228 scm
->reset
.nr_resets
= 1;
1229 scm
->reset
.of_node
= pdev
->dev
.of_node
;
1230 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
1234 /* vote for max clk rate for highest performance */
1235 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
1240 __scm
->dev
= &pdev
->dev
;
1242 __query_convention();
1245 * If requested enable "download mode", from this point on warmboot
1246 * will cause the the boot stages to enter download mode, unless
1247 * disabled below by a clean shutdown/reboot.
1250 qcom_scm_set_download_mode(true);
1255 static void qcom_scm_shutdown(struct platform_device
*pdev
)
1257 /* Clean shutdown, disable download mode to allow normal restart */
1259 qcom_scm_set_download_mode(false);
1262 static const struct of_device_id qcom_scm_dt_match
[] = {
1263 { .compatible
= "qcom,scm-apq8064",
1264 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1266 { .compatible
= "qcom,scm-apq8084", .data
= (void *)(SCM_HAS_CORE_CLK
|
1270 { .compatible
= "qcom,scm-ipq4019" },
1271 { .compatible
= "qcom,scm-msm8660", .data
= (void *) SCM_HAS_CORE_CLK
},
1272 { .compatible
= "qcom,scm-msm8960", .data
= (void *) SCM_HAS_CORE_CLK
},
1273 { .compatible
= "qcom,scm-msm8916", .data
= (void *)(SCM_HAS_CORE_CLK
|
1277 { .compatible
= "qcom,scm-msm8974", .data
= (void *)(SCM_HAS_CORE_CLK
|
1281 { .compatible
= "qcom,scm-msm8994" },
1282 { .compatible
= "qcom,scm-msm8996" },
1283 { .compatible
= "qcom,scm" },
1287 static struct platform_driver qcom_scm_driver
= {
1290 .of_match_table
= qcom_scm_dt_match
,
1292 .probe
= qcom_scm_probe
,
1293 .shutdown
= qcom_scm_shutdown
,
1296 static int __init
qcom_scm_init(void)
1298 return platform_driver_register(&qcom_scm_driver
);
1300 subsys_initcall(qcom_scm_init
);