1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm self-authenticating modem subsystem remoteproc driver
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
33 #include <linux/qcom_scm.h>
35 #define MPSS_CRASH_REASON_SMEM 421
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS 0x1
40 #define RMB_MBA_XPU_UNLOCKED 0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
43 #define RMB_MBA_AUTH_COMPLETE 0x4
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG 0x00
47 #define RMB_PBL_STATUS_REG 0x04
48 #define RMB_MBA_COMMAND_REG 0x08
49 #define RMB_MBA_STATUS_REG 0x0C
50 #define RMB_PMI_META_DATA_REG 0x10
51 #define RMB_PMI_CODE_START_REG 0x14
52 #define RMB_PMI_CODE_LENGTH_REG 0x18
53 #define RMB_MBA_MSS_STATUS 0x40
54 #define RMB_MBA_ALT_RESET 0x44
56 #define RMB_CMD_META_DATA_READY 0x1
57 #define RMB_CMD_LOAD_READY 0x2
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG 0x014
61 #define QDSP6SS_GFMUX_CTL_REG 0x020
62 #define QDSP6SS_PWR_CTL_REG 0x030
63 #define QDSP6SS_MEM_PWR_CTL 0x0B0
64 #define QDSP6V6SS_MEM_PWR_CTL 0x034
65 #define QDSP6SS_STRAP_ACC 0x110
67 /* AXI Halt Register Offsets */
68 #define AXI_HALTREQ_REG 0x0
69 #define AXI_HALTACK_REG 0x4
70 #define AXI_IDLE_REG 0x8
71 #define NAV_AXI_HALTREQ_BIT BIT(0)
72 #define NAV_AXI_HALTACK_BIT BIT(1)
73 #define NAV_AXI_IDLE_BIT BIT(2)
74 #define AXI_GATING_VALID_OVERRIDE BIT(0)
76 #define HALT_ACK_TIMEOUT_US 100000
77 #define NAV_HALT_ACK_TIMEOUT_US 200
80 #define Q6SS_STOP_CORE BIT(0)
81 #define Q6SS_CORE_ARES BIT(1)
82 #define Q6SS_BUS_ARES_ENABLE BIT(2)
85 #define Q6SS_CBCR_CLKEN BIT(0)
86 #define Q6SS_CBCR_CLKOFF BIT(31)
87 #define Q6SS_CBCR_TIMEOUT_US 200
89 /* QDSP6SS_GFMUX_CTL */
90 #define Q6SS_CLK_ENABLE BIT(1)
93 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
94 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
95 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
96 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
97 #define Q6SS_ETB_SLP_NRET_N BIT(17)
98 #define Q6SS_L2DATA_STBY_N BIT(18)
99 #define Q6SS_SLP_RET_N BIT(19)
100 #define Q6SS_CLAMP_IO BIT(20)
101 #define QDSS_BHS_ON BIT(21)
102 #define QDSS_LDO_BYP BIT(22)
104 /* QDSP6v56 parameters */
105 #define QDSP6v56_LDO_BYP BIT(25)
106 #define QDSP6v56_BHS_ON BIT(24)
107 #define QDSP6v56_CLAMP_WL BIT(21)
108 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
109 #define QDSP6SS_XO_CBCR 0x0038
110 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
112 /* QDSP6v65 parameters */
113 #define QDSP6SS_CORE_CBCR 0x20
114 #define QDSP6SS_SLEEP 0x3C
115 #define QDSP6SS_BOOT_CORE_START 0x400
116 #define QDSP6SS_BOOT_CMD 0x404
117 #define QDSP6SS_BOOT_STATUS 0x408
118 #define BOOT_STATUS_TIMEOUT_US 200
119 #define BOOT_FSM_TIMEOUT 10000
122 struct regulator
*reg
;
127 struct qcom_mss_reg_res
{
133 struct rproc_hexagon_res
{
134 const char *hexagon_mba_image
;
135 struct qcom_mss_reg_res
*proxy_supply
;
136 struct qcom_mss_reg_res
*active_supply
;
137 char **proxy_clk_names
;
138 char **reset_clk_names
;
139 char **active_clk_names
;
140 char **active_pd_names
;
141 char **proxy_pd_names
;
143 bool need_mem_protection
;
152 void __iomem
*reg_base
;
153 void __iomem
*rmb_base
;
155 struct regmap
*halt_map
;
156 struct regmap
*halt_nav_map
;
157 struct regmap
*conn_map
;
165 struct reset_control
*mss_restart
;
166 struct reset_control
*pdc_reset
;
168 struct qcom_q6v5 q6v5
;
170 struct clk
*active_clks
[8];
171 struct clk
*reset_clks
[4];
172 struct clk
*proxy_clks
[4];
173 struct device
*active_pds
[1];
174 struct device
*proxy_pds
[3];
175 int active_clk_count
;
181 struct reg_info active_regs
[1];
182 struct reg_info proxy_regs
[3];
183 int active_reg_count
;
188 bool dump_mba_loaded
;
189 unsigned long dump_segment_mask
;
190 unsigned long dump_complete_mask
;
192 phys_addr_t mba_phys
;
196 phys_addr_t mpss_phys
;
197 phys_addr_t mpss_reloc
;
201 struct qcom_rproc_glink glink_subdev
;
202 struct qcom_rproc_subdev smd_subdev
;
203 struct qcom_rproc_ssr ssr_subdev
;
204 struct qcom_sysmon
*sysmon
;
205 bool need_mem_protection
;
210 const char *hexagon_mdt_image
;
223 static int q6v5_regulator_init(struct device
*dev
, struct reg_info
*regs
,
224 const struct qcom_mss_reg_res
*reg_res
)
232 for (i
= 0; reg_res
[i
].supply
; i
++) {
233 regs
[i
].reg
= devm_regulator_get(dev
, reg_res
[i
].supply
);
234 if (IS_ERR(regs
[i
].reg
)) {
235 rc
= PTR_ERR(regs
[i
].reg
);
236 if (rc
!= -EPROBE_DEFER
)
237 dev_err(dev
, "Failed to get %s\n regulator",
242 regs
[i
].uV
= reg_res
[i
].uV
;
243 regs
[i
].uA
= reg_res
[i
].uA
;
249 static int q6v5_regulator_enable(struct q6v5
*qproc
,
250 struct reg_info
*regs
, int count
)
255 for (i
= 0; i
< count
; i
++) {
256 if (regs
[i
].uV
> 0) {
257 ret
= regulator_set_voltage(regs
[i
].reg
,
258 regs
[i
].uV
, INT_MAX
);
261 "Failed to request voltage for %d.\n",
267 if (regs
[i
].uA
> 0) {
268 ret
= regulator_set_load(regs
[i
].reg
,
272 "Failed to set regulator mode\n");
277 ret
= regulator_enable(regs
[i
].reg
);
279 dev_err(qproc
->dev
, "Regulator enable failed\n");
286 for (; i
>= 0; i
--) {
288 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
291 regulator_set_load(regs
[i
].reg
, 0);
293 regulator_disable(regs
[i
].reg
);
299 static void q6v5_regulator_disable(struct q6v5
*qproc
,
300 struct reg_info
*regs
, int count
)
304 for (i
= 0; i
< count
; i
++) {
306 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
309 regulator_set_load(regs
[i
].reg
, 0);
311 regulator_disable(regs
[i
].reg
);
315 static int q6v5_clk_enable(struct device
*dev
,
316 struct clk
**clks
, int count
)
321 for (i
= 0; i
< count
; i
++) {
322 rc
= clk_prepare_enable(clks
[i
]);
324 dev_err(dev
, "Clock enable failed\n");
331 for (i
--; i
>= 0; i
--)
332 clk_disable_unprepare(clks
[i
]);
337 static void q6v5_clk_disable(struct device
*dev
,
338 struct clk
**clks
, int count
)
342 for (i
= 0; i
< count
; i
++)
343 clk_disable_unprepare(clks
[i
]);
346 static int q6v5_pds_enable(struct q6v5
*qproc
, struct device
**pds
,
352 for (i
= 0; i
< pd_count
; i
++) {
353 dev_pm_genpd_set_performance_state(pds
[i
], INT_MAX
);
354 ret
= pm_runtime_get_sync(pds
[i
]);
356 goto unroll_pd_votes
;
362 for (i
--; i
>= 0; i
--) {
363 dev_pm_genpd_set_performance_state(pds
[i
], 0);
364 pm_runtime_put(pds
[i
]);
370 static void q6v5_pds_disable(struct q6v5
*qproc
, struct device
**pds
,
375 for (i
= 0; i
< pd_count
; i
++) {
376 dev_pm_genpd_set_performance_state(pds
[i
], 0);
377 pm_runtime_put(pds
[i
]);
381 static int q6v5_xfer_mem_ownership(struct q6v5
*qproc
, int *current_perm
,
382 bool remote_owner
, phys_addr_t addr
,
385 struct qcom_scm_vmperm next
;
387 if (!qproc
->need_mem_protection
)
389 if (remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_MSS_MSA
))
391 if (!remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_HLOS
))
394 next
.vmid
= remote_owner
? QCOM_SCM_VMID_MSS_MSA
: QCOM_SCM_VMID_HLOS
;
395 next
.perm
= remote_owner
? QCOM_SCM_PERM_RW
: QCOM_SCM_PERM_RWX
;
397 return qcom_scm_assign_mem(addr
, ALIGN(size
, SZ_4K
),
398 current_perm
, &next
, 1);
401 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
403 struct q6v5
*qproc
= rproc
->priv
;
405 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
410 static int q6v5_reset_assert(struct q6v5
*qproc
)
414 if (qproc
->has_alt_reset
) {
415 reset_control_assert(qproc
->pdc_reset
);
416 ret
= reset_control_reset(qproc
->mss_restart
);
417 reset_control_deassert(qproc
->pdc_reset
);
418 } else if (qproc
->has_halt_nav
) {
420 * When the AXI pipeline is being reset with the Q6 modem partly
421 * operational there is possibility of AXI valid signal to
422 * glitch, leading to spurious transactions and Q6 hangs. A work
423 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
424 * BIT before triggering Q6 MSS reset. Both the HALTREQ and
425 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
426 * followed by a MSS deassert, while holding the PDC reset.
428 reset_control_assert(qproc
->pdc_reset
);
429 regmap_update_bits(qproc
->conn_map
, qproc
->conn_box
,
430 AXI_GATING_VALID_OVERRIDE
, 1);
431 regmap_update_bits(qproc
->halt_nav_map
, qproc
->halt_nav
,
432 NAV_AXI_HALTREQ_BIT
, 0);
433 reset_control_assert(qproc
->mss_restart
);
434 reset_control_deassert(qproc
->pdc_reset
);
435 regmap_update_bits(qproc
->conn_map
, qproc
->conn_box
,
436 AXI_GATING_VALID_OVERRIDE
, 0);
437 ret
= reset_control_deassert(qproc
->mss_restart
);
439 ret
= reset_control_assert(qproc
->mss_restart
);
445 static int q6v5_reset_deassert(struct q6v5
*qproc
)
449 if (qproc
->has_alt_reset
) {
450 reset_control_assert(qproc
->pdc_reset
);
451 writel(1, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
452 ret
= reset_control_reset(qproc
->mss_restart
);
453 writel(0, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
454 reset_control_deassert(qproc
->pdc_reset
);
455 } else if (qproc
->has_halt_nav
) {
456 ret
= reset_control_reset(qproc
->mss_restart
);
458 ret
= reset_control_deassert(qproc
->mss_restart
);
464 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
466 unsigned long timeout
;
469 timeout
= jiffies
+ msecs_to_jiffies(ms
);
471 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
475 if (time_after(jiffies
, timeout
))
484 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
487 unsigned long timeout
;
490 timeout
= jiffies
+ msecs_to_jiffies(ms
);
492 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
498 else if (status
&& val
== status
)
501 if (time_after(jiffies
, timeout
))
510 static int q6v5proc_reset(struct q6v5
*qproc
)
516 if (qproc
->version
== MSS_SDM845
) {
517 val
= readl(qproc
->reg_base
+ QDSP6SS_SLEEP
);
518 val
|= Q6SS_CBCR_CLKEN
;
519 writel(val
, qproc
->reg_base
+ QDSP6SS_SLEEP
);
521 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_SLEEP
,
522 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
523 Q6SS_CBCR_TIMEOUT_US
);
525 dev_err(qproc
->dev
, "QDSP6SS Sleep clock timed out\n");
529 /* De-assert QDSP6 stop core */
530 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CORE_START
);
531 /* Trigger boot FSM */
532 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CMD
);
534 ret
= readl_poll_timeout(qproc
->rmb_base
+ RMB_MBA_MSS_STATUS
,
535 val
, (val
& BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT
);
537 dev_err(qproc
->dev
, "Boot FSM failed to complete.\n");
538 /* Reset the modem so that boot FSM is in reset state */
539 q6v5_reset_deassert(qproc
);
544 } else if (qproc
->version
== MSS_SC7180
) {
545 val
= readl(qproc
->reg_base
+ QDSP6SS_SLEEP
);
546 val
|= Q6SS_CBCR_CLKEN
;
547 writel(val
, qproc
->reg_base
+ QDSP6SS_SLEEP
);
549 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_SLEEP
,
550 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
551 Q6SS_CBCR_TIMEOUT_US
);
553 dev_err(qproc
->dev
, "QDSP6SS Sleep clock timed out\n");
557 /* Turn on the XO clock needed for PLL setup */
558 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
559 val
|= Q6SS_CBCR_CLKEN
;
560 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
562 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
563 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
564 Q6SS_CBCR_TIMEOUT_US
);
566 dev_err(qproc
->dev
, "QDSP6SS XO clock timed out\n");
570 /* Configure Q6 core CBCR to auto-enable after reset sequence */
571 val
= readl(qproc
->reg_base
+ QDSP6SS_CORE_CBCR
);
572 val
|= Q6SS_CBCR_CLKEN
;
573 writel(val
, qproc
->reg_base
+ QDSP6SS_CORE_CBCR
);
575 /* De-assert the Q6 stop core signal */
576 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CORE_START
);
578 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
579 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CMD
);
581 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
582 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_BOOT_STATUS
,
583 val
, (val
& BIT(0)) != 0, 1,
584 BOOT_STATUS_TIMEOUT_US
);
586 dev_err(qproc
->dev
, "Boot FSM failed to complete.\n");
587 /* Reset the modem so that boot FSM is in reset state */
588 q6v5_reset_deassert(qproc
);
592 } else if (qproc
->version
== MSS_MSM8996
||
593 qproc
->version
== MSS_MSM8998
) {
596 /* Override the ACC value if required */
597 writel(QDSP6SS_ACC_OVERRIDE_VAL
,
598 qproc
->reg_base
+ QDSP6SS_STRAP_ACC
);
600 /* Assert resets, stop core */
601 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
602 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
603 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
605 /* BHS require xo cbcr to be enabled */
606 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
607 val
|= Q6SS_CBCR_CLKEN
;
608 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
610 /* Read CLKOFF bit to go low indicating CLK is enabled */
611 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
612 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
613 Q6SS_CBCR_TIMEOUT_US
);
616 "xo cbcr enabling timed out (rc:%d)\n", ret
);
619 /* Enable power block headswitch and wait for it to stabilize */
620 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
621 val
|= QDSP6v56_BHS_ON
;
622 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
623 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
626 /* Put LDO in bypass mode */
627 val
|= QDSP6v56_LDO_BYP
;
628 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
630 /* Deassert QDSP6 compiler memory clamp */
631 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
632 val
&= ~QDSP6v56_CLAMP_QMC_MEM
;
633 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
635 /* Deassert memory peripheral sleep and L2 memory standby */
636 val
|= Q6SS_L2DATA_STBY_N
| Q6SS_SLP_RET_N
;
637 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
639 /* Turn on L1, L2, ETB and JU memories 1 at a time */
640 if (qproc
->version
== MSS_MSM8996
) {
641 mem_pwr_ctl
= QDSP6SS_MEM_PWR_CTL
;
645 mem_pwr_ctl
= QDSP6V6SS_MEM_PWR_CTL
;
648 val
= readl(qproc
->reg_base
+ mem_pwr_ctl
);
649 for (; i
>= 0; i
--) {
651 writel(val
, qproc
->reg_base
+ mem_pwr_ctl
);
653 * Read back value to ensure the write is done then
654 * wait for 1us for both memory peripheral and data
657 val
|= readl(qproc
->reg_base
+ mem_pwr_ctl
);
660 /* Remove word line clamp */
661 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
662 val
&= ~QDSP6v56_CLAMP_WL
;
663 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
665 /* Assert resets, stop core */
666 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
667 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
668 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
670 /* Enable power block headswitch and wait for it to stabilize */
671 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
672 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
673 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
674 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
677 * Turn on memories. L2 banks should be done individually
678 * to minimize inrush current.
680 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
681 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
682 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
683 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
684 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
685 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
686 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
687 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
688 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
689 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
691 /* Remove IO clamp */
692 val
&= ~Q6SS_CLAMP_IO
;
693 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
695 /* Bring core out of reset */
696 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
697 val
&= ~Q6SS_CORE_ARES
;
698 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
700 /* Turn on core clock */
701 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
702 val
|= Q6SS_CLK_ENABLE
;
703 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
705 /* Start core execution */
706 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
707 val
&= ~Q6SS_STOP_CORE
;
708 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
711 /* Wait for PBL status */
712 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
713 if (ret
== -ETIMEDOUT
) {
714 dev_err(qproc
->dev
, "PBL boot timed out\n");
715 } else if (ret
!= RMB_PBL_SUCCESS
) {
716 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
725 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
726 struct regmap
*halt_map
,
732 /* Check if we're already idle */
733 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
737 /* Assert halt request */
738 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
741 regmap_read_poll_timeout(halt_map
, offset
+ AXI_HALTACK_REG
, val
,
742 val
, 1000, HALT_ACK_TIMEOUT_US
);
744 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
746 dev_err(qproc
->dev
, "port failed halt\n");
748 /* Clear halt request (port will remain halted until reset) */
749 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
752 static void q6v5proc_halt_nav_axi_port(struct q6v5
*qproc
,
753 struct regmap
*halt_map
,
759 /* Check if we're already idle */
760 ret
= regmap_read(halt_map
, offset
, &val
);
761 if (!ret
&& (val
& NAV_AXI_IDLE_BIT
))
764 /* Assert halt request */
765 regmap_update_bits(halt_map
, offset
, NAV_AXI_HALTREQ_BIT
,
766 NAV_AXI_HALTREQ_BIT
);
768 /* Wait for halt ack*/
769 regmap_read_poll_timeout(halt_map
, offset
, val
,
770 (val
& NAV_AXI_HALTACK_BIT
),
771 5, NAV_HALT_ACK_TIMEOUT_US
);
773 ret
= regmap_read(halt_map
, offset
, &val
);
774 if (ret
|| !(val
& NAV_AXI_IDLE_BIT
))
775 dev_err(qproc
->dev
, "port failed halt\n");
778 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
780 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
789 metadata
= qcom_mdt_read_metadata(fw
, &size
);
790 if (IS_ERR(metadata
))
791 return PTR_ERR(metadata
);
793 ptr
= dma_alloc_attrs(qproc
->dev
, size
, &phys
, GFP_KERNEL
, dma_attrs
);
796 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
800 memcpy(ptr
, metadata
, size
);
802 /* Hypervisor mapping to access metadata by modem */
803 mdata_perm
= BIT(QCOM_SCM_VMID_HLOS
);
804 ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, true, phys
, size
);
807 "assigning Q6 access to metadata failed: %d\n", ret
);
812 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
813 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
815 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
816 if (ret
== -ETIMEDOUT
)
817 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
819 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
821 /* Metadata authentication done, remove modem access */
822 xferop_ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, false, phys
, size
);
825 "mdt buffer not reclaimed system may become unstable\n");
828 dma_free_attrs(qproc
->dev
, size
, ptr
, phys
, dma_attrs
);
831 return ret
< 0 ? ret
: 0;
834 static bool q6v5_phdr_valid(const struct elf32_phdr
*phdr
)
836 if (phdr
->p_type
!= PT_LOAD
)
839 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
848 static int q6v5_mba_load(struct q6v5
*qproc
)
853 qcom_q6v5_prepare(&qproc
->q6v5
);
855 ret
= q6v5_pds_enable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
857 dev_err(qproc
->dev
, "failed to enable active power domains\n");
861 ret
= q6v5_pds_enable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
863 dev_err(qproc
->dev
, "failed to enable proxy power domains\n");
864 goto disable_active_pds
;
867 ret
= q6v5_regulator_enable(qproc
, qproc
->proxy_regs
,
868 qproc
->proxy_reg_count
);
870 dev_err(qproc
->dev
, "failed to enable proxy supplies\n");
871 goto disable_proxy_pds
;
874 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->proxy_clks
,
875 qproc
->proxy_clk_count
);
877 dev_err(qproc
->dev
, "failed to enable proxy clocks\n");
878 goto disable_proxy_reg
;
881 ret
= q6v5_regulator_enable(qproc
, qproc
->active_regs
,
882 qproc
->active_reg_count
);
884 dev_err(qproc
->dev
, "failed to enable supplies\n");
885 goto disable_proxy_clk
;
888 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->reset_clks
,
889 qproc
->reset_clk_count
);
891 dev_err(qproc
->dev
, "failed to enable reset clocks\n");
895 ret
= q6v5_reset_deassert(qproc
);
897 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
898 goto disable_reset_clks
;
901 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->active_clks
,
902 qproc
->active_clk_count
);
904 dev_err(qproc
->dev
, "failed to enable clocks\n");
908 /* Assign MBA image access in DDR to q6 */
909 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true,
910 qproc
->mba_phys
, qproc
->mba_size
);
913 "assigning Q6 access to mba memory failed: %d\n", ret
);
914 goto disable_active_clks
;
917 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
919 ret
= q6v5proc_reset(qproc
);
923 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
924 if (ret
== -ETIMEDOUT
) {
925 dev_err(qproc
->dev
, "MBA boot timed out\n");
927 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
928 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
929 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
934 qproc
->dump_mba_loaded
= true;
938 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
939 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
940 if (qproc
->has_halt_nav
)
941 q6v5proc_halt_nav_axi_port(qproc
, qproc
->halt_nav_map
,
943 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
946 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
951 "Failed to reclaim mba buffer, system may become unstable\n");
955 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
956 qproc
->active_clk_count
);
958 q6v5_reset_assert(qproc
);
960 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
961 qproc
->reset_clk_count
);
963 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
964 qproc
->active_reg_count
);
966 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
967 qproc
->proxy_clk_count
);
969 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
970 qproc
->proxy_reg_count
);
972 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
974 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
976 qcom_q6v5_unprepare(&qproc
->q6v5
);
981 static void q6v5_mba_reclaim(struct q6v5
*qproc
)
986 qproc
->dump_mba_loaded
= false;
988 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
989 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
990 if (qproc
->has_halt_nav
)
991 q6v5proc_halt_nav_axi_port(qproc
, qproc
->halt_nav_map
,
993 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
994 if (qproc
->version
== MSS_MSM8996
) {
996 * To avoid high MX current during LPASS/MSS restart.
998 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
999 val
|= Q6SS_CLAMP_IO
| QDSP6v56_CLAMP_WL
|
1000 QDSP6v56_CLAMP_QMC_MEM
;
1001 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
1004 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
1005 false, qproc
->mpss_phys
,
1009 q6v5_reset_assert(qproc
);
1011 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
1012 qproc
->reset_clk_count
);
1013 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
1014 qproc
->active_clk_count
);
1015 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
1016 qproc
->active_reg_count
);
1017 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1019 /* In case of failure or coredump scenario where reclaiming MBA memory
1020 * could not happen reclaim it here.
1022 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
1027 ret
= qcom_q6v5_unprepare(&qproc
->q6v5
);
1029 q6v5_pds_disable(qproc
, qproc
->proxy_pds
,
1030 qproc
->proxy_pd_count
);
1031 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
1032 qproc
->proxy_clk_count
);
1033 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
1034 qproc
->proxy_reg_count
);
1038 static int q6v5_mpss_load(struct q6v5
*qproc
)
1040 const struct elf32_phdr
*phdrs
;
1041 const struct elf32_phdr
*phdr
;
1042 const struct firmware
*seg_fw
;
1043 const struct firmware
*fw
;
1044 struct elf32_hdr
*ehdr
;
1045 phys_addr_t mpss_reloc
;
1046 phys_addr_t boot_addr
;
1047 phys_addr_t min_addr
= PHYS_ADDR_MAX
;
1048 phys_addr_t max_addr
= 0;
1049 bool relocate
= false;
1058 fw_name_len
= strlen(qproc
->hexagon_mdt_image
);
1059 if (fw_name_len
<= 4)
1062 fw_name
= kstrdup(qproc
->hexagon_mdt_image
, GFP_KERNEL
);
1066 ret
= request_firmware(&fw
, fw_name
, qproc
->dev
);
1068 dev_err(qproc
->dev
, "unable to load %s\n", fw_name
);
1072 /* Initialize the RMB validator */
1073 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1075 ret
= q6v5_mpss_init_image(qproc
, fw
);
1077 goto release_firmware
;
1079 ehdr
= (struct elf32_hdr
*)fw
->data
;
1080 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
1082 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1085 if (!q6v5_phdr_valid(phdr
))
1088 if (phdr
->p_flags
& QCOM_MDT_RELOCATABLE
)
1091 if (phdr
->p_paddr
< min_addr
)
1092 min_addr
= phdr
->p_paddr
;
1094 if (phdr
->p_paddr
+ phdr
->p_memsz
> max_addr
)
1095 max_addr
= ALIGN(phdr
->p_paddr
+ phdr
->p_memsz
, SZ_4K
);
1098 mpss_reloc
= relocate
? min_addr
: qproc
->mpss_phys
;
1099 qproc
->mpss_reloc
= mpss_reloc
;
1100 /* Load firmware segments */
1101 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1104 if (!q6v5_phdr_valid(phdr
))
1107 offset
= phdr
->p_paddr
- mpss_reloc
;
1108 if (offset
< 0 || offset
+ phdr
->p_memsz
> qproc
->mpss_size
) {
1109 dev_err(qproc
->dev
, "segment outside memory range\n");
1111 goto release_firmware
;
1114 ptr
= qproc
->mpss_region
+ offset
;
1116 if (phdr
->p_filesz
&& phdr
->p_offset
< fw
->size
) {
1117 /* Firmware is large enough to be non-split */
1118 if (phdr
->p_offset
+ phdr
->p_filesz
> fw
->size
) {
1120 "failed to load segment %d from truncated file %s\n",
1123 goto release_firmware
;
1126 memcpy(ptr
, fw
->data
+ phdr
->p_offset
, phdr
->p_filesz
);
1127 } else if (phdr
->p_filesz
) {
1128 /* Replace "xxx.xxx" with "xxx.bxx" */
1129 sprintf(fw_name
+ fw_name_len
- 3, "b%02d", i
);
1130 ret
= request_firmware(&seg_fw
, fw_name
, qproc
->dev
);
1132 dev_err(qproc
->dev
, "failed to load %s\n", fw_name
);
1133 goto release_firmware
;
1136 memcpy(ptr
, seg_fw
->data
, seg_fw
->size
);
1138 release_firmware(seg_fw
);
1141 if (phdr
->p_memsz
> phdr
->p_filesz
) {
1142 memset(ptr
+ phdr
->p_filesz
, 0,
1143 phdr
->p_memsz
- phdr
->p_filesz
);
1145 size
+= phdr
->p_memsz
;
1148 /* Transfer ownership of modem ddr region to q6 */
1149 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, true,
1150 qproc
->mpss_phys
, qproc
->mpss_size
);
1153 "assigning Q6 access to mpss memory failed: %d\n", ret
);
1155 goto release_firmware
;
1158 boot_addr
= relocate
? qproc
->mpss_phys
: min_addr
;
1159 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
1160 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
1161 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1163 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
1164 if (ret
== -ETIMEDOUT
)
1165 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
1167 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
1170 release_firmware(fw
);
1174 return ret
< 0 ? ret
: 0;
1177 static void qcom_q6v5_dump_segment(struct rproc
*rproc
,
1178 struct rproc_dump_segment
*segment
,
1182 struct q6v5
*qproc
= rproc
->priv
;
1183 unsigned long mask
= BIT((unsigned long)segment
->priv
);
1184 void *ptr
= rproc_da_to_va(rproc
, segment
->da
, segment
->size
);
1186 /* Unlock mba before copying segments */
1187 if (!qproc
->dump_mba_loaded
)
1188 ret
= q6v5_mba_load(qproc
);
1191 memset(dest
, 0xff, segment
->size
);
1193 memcpy(dest
, ptr
, segment
->size
);
1195 qproc
->dump_segment_mask
|= mask
;
1197 /* Reclaim mba after copying segments */
1198 if (qproc
->dump_segment_mask
== qproc
->dump_complete_mask
) {
1199 if (qproc
->dump_mba_loaded
)
1200 q6v5_mba_reclaim(qproc
);
1204 static int q6v5_start(struct rproc
*rproc
)
1206 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1210 ret
= q6v5_mba_load(qproc
);
1214 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
1216 ret
= q6v5_mpss_load(qproc
);
1220 ret
= qcom_q6v5_wait_for_start(&qproc
->q6v5
, msecs_to_jiffies(5000));
1221 if (ret
== -ETIMEDOUT
) {
1222 dev_err(qproc
->dev
, "start timed out\n");
1226 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
1231 "Failed to reclaim mba buffer system may become unstable\n");
1233 /* Reset Dump Segment Mask */
1234 qproc
->dump_segment_mask
= 0;
1235 qproc
->running
= true;
1240 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
1241 false, qproc
->mpss_phys
,
1243 WARN_ON(xfermemop_ret
);
1244 q6v5_mba_reclaim(qproc
);
1249 static int q6v5_stop(struct rproc
*rproc
)
1251 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1254 qproc
->running
= false;
1256 ret
= qcom_q6v5_request_stop(&qproc
->q6v5
);
1257 if (ret
== -ETIMEDOUT
)
1258 dev_err(qproc
->dev
, "timed out on wait\n");
1260 q6v5_mba_reclaim(qproc
);
1265 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, int len
)
1267 struct q6v5
*qproc
= rproc
->priv
;
1270 offset
= da
- qproc
->mpss_reloc
;
1271 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
1274 return qproc
->mpss_region
+ offset
;
1277 static int qcom_q6v5_register_dump_segments(struct rproc
*rproc
,
1278 const struct firmware
*mba_fw
)
1280 const struct firmware
*fw
;
1281 const struct elf32_phdr
*phdrs
;
1282 const struct elf32_phdr
*phdr
;
1283 const struct elf32_hdr
*ehdr
;
1284 struct q6v5
*qproc
= rproc
->priv
;
1288 ret
= request_firmware(&fw
, qproc
->hexagon_mdt_image
, qproc
->dev
);
1290 dev_err(qproc
->dev
, "unable to load %s\n",
1291 qproc
->hexagon_mdt_image
);
1295 ehdr
= (struct elf32_hdr
*)fw
->data
;
1296 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
1297 qproc
->dump_complete_mask
= 0;
1299 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1302 if (!q6v5_phdr_valid(phdr
))
1305 ret
= rproc_coredump_add_custom_segment(rproc
, phdr
->p_paddr
,
1307 qcom_q6v5_dump_segment
,
1312 qproc
->dump_complete_mask
|= BIT(i
);
1315 release_firmware(fw
);
1319 static const struct rproc_ops q6v5_ops
= {
1320 .start
= q6v5_start
,
1322 .da_to_va
= q6v5_da_to_va
,
1323 .parse_fw
= qcom_q6v5_register_dump_segments
,
1327 static void qcom_msa_handover(struct qcom_q6v5
*q6v5
)
1329 struct q6v5
*qproc
= container_of(q6v5
, struct q6v5
, q6v5
);
1331 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
1332 qproc
->proxy_clk_count
);
1333 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
1334 qproc
->proxy_reg_count
);
1335 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1338 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
1340 struct of_phandle_args args
;
1341 struct resource
*res
;
1344 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
1345 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1346 if (IS_ERR(qproc
->reg_base
))
1347 return PTR_ERR(qproc
->reg_base
);
1349 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
1350 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1351 if (IS_ERR(qproc
->rmb_base
))
1352 return PTR_ERR(qproc
->rmb_base
);
1354 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1355 "qcom,halt-regs", 3, 0, &args
);
1357 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
1361 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
1362 of_node_put(args
.np
);
1363 if (IS_ERR(qproc
->halt_map
))
1364 return PTR_ERR(qproc
->halt_map
);
1366 qproc
->halt_q6
= args
.args
[0];
1367 qproc
->halt_modem
= args
.args
[1];
1368 qproc
->halt_nc
= args
.args
[2];
1370 if (qproc
->has_halt_nav
) {
1371 struct platform_device
*nav_pdev
;
1373 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1374 "qcom,halt-nav-regs",
1377 dev_err(&pdev
->dev
, "failed to parse halt-nav-regs\n");
1381 nav_pdev
= of_find_device_by_node(args
.np
);
1382 of_node_put(args
.np
);
1384 dev_err(&pdev
->dev
, "failed to get mss clock device\n");
1385 return -EPROBE_DEFER
;
1388 qproc
->halt_nav_map
= dev_get_regmap(&nav_pdev
->dev
, NULL
);
1389 if (!qproc
->halt_nav_map
) {
1390 dev_err(&pdev
->dev
, "failed to get map from device\n");
1393 qproc
->halt_nav
= args
.args
[0];
1395 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1396 "qcom,halt-nav-regs",
1399 dev_err(&pdev
->dev
, "failed to parse halt-nav-regs\n");
1403 qproc
->conn_map
= syscon_node_to_regmap(args
.np
);
1404 of_node_put(args
.np
);
1405 if (IS_ERR(qproc
->conn_map
))
1406 return PTR_ERR(qproc
->conn_map
);
1408 qproc
->conn_box
= args
.args
[0];
1414 static int q6v5_init_clocks(struct device
*dev
, struct clk
**clks
,
1422 for (i
= 0; clk_names
[i
]; i
++) {
1423 clks
[i
] = devm_clk_get(dev
, clk_names
[i
]);
1424 if (IS_ERR(clks
[i
])) {
1425 int rc
= PTR_ERR(clks
[i
]);
1427 if (rc
!= -EPROBE_DEFER
)
1428 dev_err(dev
, "Failed to get %s clock\n",
1437 static int q6v5_pds_attach(struct device
*dev
, struct device
**devs
,
1447 while (pd_names
[num_pds
])
1450 for (i
= 0; i
< num_pds
; i
++) {
1451 devs
[i
] = dev_pm_domain_attach_by_name(dev
, pd_names
[i
]);
1452 if (IS_ERR_OR_NULL(devs
[i
])) {
1453 ret
= PTR_ERR(devs
[i
]) ? : -ENODATA
;
1461 for (i
--; i
>= 0; i
--)
1462 dev_pm_domain_detach(devs
[i
], false);
1467 static void q6v5_pds_detach(struct q6v5
*qproc
, struct device
**pds
,
1472 for (i
= 0; i
< pd_count
; i
++)
1473 dev_pm_domain_detach(pds
[i
], false);
1476 static int q6v5_init_reset(struct q6v5
*qproc
)
1478 qproc
->mss_restart
= devm_reset_control_get_exclusive(qproc
->dev
,
1480 if (IS_ERR(qproc
->mss_restart
)) {
1481 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
1482 return PTR_ERR(qproc
->mss_restart
);
1485 if (qproc
->has_alt_reset
|| qproc
->has_halt_nav
) {
1486 qproc
->pdc_reset
= devm_reset_control_get_exclusive(qproc
->dev
,
1488 if (IS_ERR(qproc
->pdc_reset
)) {
1489 dev_err(qproc
->dev
, "failed to acquire pdc reset\n");
1490 return PTR_ERR(qproc
->pdc_reset
);
1497 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
1499 struct device_node
*child
;
1500 struct device_node
*node
;
1504 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
1505 node
= of_parse_phandle(child
, "memory-region", 0);
1506 ret
= of_address_to_resource(node
, 0, &r
);
1508 dev_err(qproc
->dev
, "unable to resolve mba region\n");
1513 qproc
->mba_phys
= r
.start
;
1514 qproc
->mba_size
= resource_size(&r
);
1515 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
1516 if (!qproc
->mba_region
) {
1517 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1518 &r
.start
, qproc
->mba_size
);
1522 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
1523 node
= of_parse_phandle(child
, "memory-region", 0);
1524 ret
= of_address_to_resource(node
, 0, &r
);
1526 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
1531 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
1532 qproc
->mpss_size
= resource_size(&r
);
1533 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
1534 if (!qproc
->mpss_region
) {
1535 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1536 &r
.start
, qproc
->mpss_size
);
1543 static int q6v5_probe(struct platform_device
*pdev
)
1545 const struct rproc_hexagon_res
*desc
;
1547 struct rproc
*rproc
;
1548 const char *mba_image
;
1551 desc
= of_device_get_match_data(&pdev
->dev
);
1555 if (desc
->need_mem_protection
&& !qcom_scm_is_available())
1556 return -EPROBE_DEFER
;
1558 mba_image
= desc
->hexagon_mba_image
;
1559 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1561 if (ret
< 0 && ret
!= -EINVAL
)
1564 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
1565 mba_image
, sizeof(*qproc
));
1567 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
1571 rproc
->auto_boot
= false;
1573 qproc
= (struct q6v5
*)rproc
->priv
;
1574 qproc
->dev
= &pdev
->dev
;
1575 qproc
->rproc
= rproc
;
1576 qproc
->hexagon_mdt_image
= "modem.mdt";
1577 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1578 1, &qproc
->hexagon_mdt_image
);
1579 if (ret
< 0 && ret
!= -EINVAL
)
1582 platform_set_drvdata(pdev
, qproc
);
1584 qproc
->has_halt_nav
= desc
->has_halt_nav
;
1585 ret
= q6v5_init_mem(qproc
, pdev
);
1589 ret
= q6v5_alloc_memory_region(qproc
);
1593 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->proxy_clks
,
1594 desc
->proxy_clk_names
);
1596 dev_err(&pdev
->dev
, "Failed to get proxy clocks.\n");
1599 qproc
->proxy_clk_count
= ret
;
1601 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->reset_clks
,
1602 desc
->reset_clk_names
);
1604 dev_err(&pdev
->dev
, "Failed to get reset clocks.\n");
1607 qproc
->reset_clk_count
= ret
;
1609 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->active_clks
,
1610 desc
->active_clk_names
);
1612 dev_err(&pdev
->dev
, "Failed to get active clocks.\n");
1615 qproc
->active_clk_count
= ret
;
1617 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->proxy_regs
,
1618 desc
->proxy_supply
);
1620 dev_err(&pdev
->dev
, "Failed to get proxy regulators.\n");
1623 qproc
->proxy_reg_count
= ret
;
1625 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->active_regs
,
1626 desc
->active_supply
);
1628 dev_err(&pdev
->dev
, "Failed to get active regulators.\n");
1631 qproc
->active_reg_count
= ret
;
1633 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->active_pds
,
1634 desc
->active_pd_names
);
1636 dev_err(&pdev
->dev
, "Failed to attach active power domains\n");
1639 qproc
->active_pd_count
= ret
;
1641 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->proxy_pds
,
1642 desc
->proxy_pd_names
);
1644 dev_err(&pdev
->dev
, "Failed to init power domains\n");
1645 goto detach_active_pds
;
1647 qproc
->proxy_pd_count
= ret
;
1649 qproc
->has_alt_reset
= desc
->has_alt_reset
;
1650 ret
= q6v5_init_reset(qproc
);
1652 goto detach_proxy_pds
;
1654 qproc
->version
= desc
->version
;
1655 qproc
->need_mem_protection
= desc
->need_mem_protection
;
1657 ret
= qcom_q6v5_init(&qproc
->q6v5
, pdev
, rproc
, MPSS_CRASH_REASON_SMEM
,
1660 goto detach_proxy_pds
;
1662 qproc
->mpss_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1663 qproc
->mba_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1664 qcom_add_glink_subdev(rproc
, &qproc
->glink_subdev
);
1665 qcom_add_smd_subdev(rproc
, &qproc
->smd_subdev
);
1666 qcom_add_ssr_subdev(rproc
, &qproc
->ssr_subdev
, "mpss");
1667 qproc
->sysmon
= qcom_add_sysmon_subdev(rproc
, "modem", 0x12);
1668 if (IS_ERR(qproc
->sysmon
)) {
1669 ret
= PTR_ERR(qproc
->sysmon
);
1670 goto detach_proxy_pds
;
1673 ret
= rproc_add(rproc
);
1675 goto detach_proxy_pds
;
1680 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1682 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1689 static int q6v5_remove(struct platform_device
*pdev
)
1691 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
1693 rproc_del(qproc
->rproc
);
1695 qcom_remove_sysmon_subdev(qproc
->sysmon
);
1696 qcom_remove_glink_subdev(qproc
->rproc
, &qproc
->glink_subdev
);
1697 qcom_remove_smd_subdev(qproc
->rproc
, &qproc
->smd_subdev
);
1698 qcom_remove_ssr_subdev(qproc
->rproc
, &qproc
->ssr_subdev
);
1700 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1701 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1703 rproc_free(qproc
->rproc
);
1708 static const struct rproc_hexagon_res sc7180_mss
= {
1709 .hexagon_mba_image
= "mba.mbn",
1710 .proxy_clk_names
= (char*[]){
1714 .reset_clk_names
= (char*[]){
1720 .active_clk_names
= (char*[]){
1727 .active_pd_names
= (char*[]){
1731 .proxy_pd_names
= (char*[]){
1737 .need_mem_protection
= true,
1738 .has_alt_reset
= false,
1739 .has_halt_nav
= true,
1740 .version
= MSS_SC7180
,
1743 static const struct rproc_hexagon_res sdm845_mss
= {
1744 .hexagon_mba_image
= "mba.mbn",
1745 .proxy_clk_names
= (char*[]){
1750 .reset_clk_names
= (char*[]){
1755 .active_clk_names
= (char*[]){
1762 .active_pd_names
= (char*[]){
1766 .proxy_pd_names
= (char*[]){
1772 .need_mem_protection
= true,
1773 .has_alt_reset
= true,
1774 .has_halt_nav
= false,
1775 .version
= MSS_SDM845
,
1778 static const struct rproc_hexagon_res msm8998_mss
= {
1779 .hexagon_mba_image
= "mba.mbn",
1780 .proxy_clk_names
= (char*[]){
1786 .active_clk_names
= (char*[]){
1794 .proxy_pd_names
= (char*[]){
1799 .need_mem_protection
= true,
1800 .has_alt_reset
= false,
1801 .has_halt_nav
= false,
1802 .version
= MSS_MSM8998
,
1805 static const struct rproc_hexagon_res msm8996_mss
= {
1806 .hexagon_mba_image
= "mba.mbn",
1807 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1814 .proxy_clk_names
= (char*[]){
1820 .active_clk_names
= (char*[]){
1829 .need_mem_protection
= true,
1830 .has_alt_reset
= false,
1831 .has_halt_nav
= false,
1832 .version
= MSS_MSM8996
,
1835 static const struct rproc_hexagon_res msm8916_mss
= {
1836 .hexagon_mba_image
= "mba.mbn",
1837 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1852 .proxy_clk_names
= (char*[]){
1856 .active_clk_names
= (char*[]){
1862 .need_mem_protection
= false,
1863 .has_alt_reset
= false,
1864 .has_halt_nav
= false,
1865 .version
= MSS_MSM8916
,
1868 static const struct rproc_hexagon_res msm8974_mss
= {
1869 .hexagon_mba_image
= "mba.b00",
1870 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1885 .active_supply
= (struct qcom_mss_reg_res
[]) {
1893 .proxy_clk_names
= (char*[]){
1897 .active_clk_names
= (char*[]){
1903 .need_mem_protection
= false,
1904 .has_alt_reset
= false,
1905 .has_halt_nav
= false,
1906 .version
= MSS_MSM8974
,
1909 static const struct of_device_id q6v5_of_match
[] = {
1910 { .compatible
= "qcom,q6v5-pil", .data
= &msm8916_mss
},
1911 { .compatible
= "qcom,msm8916-mss-pil", .data
= &msm8916_mss
},
1912 { .compatible
= "qcom,msm8974-mss-pil", .data
= &msm8974_mss
},
1913 { .compatible
= "qcom,msm8996-mss-pil", .data
= &msm8996_mss
},
1914 { .compatible
= "qcom,msm8998-mss-pil", .data
= &msm8998_mss
},
1915 { .compatible
= "qcom,sc7180-mss-pil", .data
= &sc7180_mss
},
1916 { .compatible
= "qcom,sdm845-mss-pil", .data
= &sdm845_mss
},
1919 MODULE_DEVICE_TABLE(of
, q6v5_of_match
);
1921 static struct platform_driver q6v5_driver
= {
1922 .probe
= q6v5_probe
,
1923 .remove
= q6v5_remove
,
1925 .name
= "qcom-q6v5-mss",
1926 .of_match_table
= q6v5_of_match
,
1929 module_platform_driver(q6v5_driver
);
1931 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1932 MODULE_LICENSE("GPL v2");