]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/remoteproc/qcom_q6v5_mss.c
Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[mirror_ubuntu-jammy-kernel.git] / drivers / remoteproc / qcom_q6v5_mss.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Qualcomm self-authenticating modem subsystem remoteproc driver
4 *
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
28
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
32
33 #include <linux/qcom_scm.h>
34
35 #define MPSS_CRASH_REASON_SMEM 421
36
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS 0x1
39
40 #define RMB_MBA_XPU_UNLOCKED 0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
43 #define RMB_MBA_AUTH_COMPLETE 0x4
44
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG 0x00
47 #define RMB_PBL_STATUS_REG 0x04
48 #define RMB_MBA_COMMAND_REG 0x08
49 #define RMB_MBA_STATUS_REG 0x0C
50 #define RMB_PMI_META_DATA_REG 0x10
51 #define RMB_PMI_CODE_START_REG 0x14
52 #define RMB_PMI_CODE_LENGTH_REG 0x18
53 #define RMB_MBA_MSS_STATUS 0x40
54 #define RMB_MBA_ALT_RESET 0x44
55
56 #define RMB_CMD_META_DATA_READY 0x1
57 #define RMB_CMD_LOAD_READY 0x2
58
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG 0x014
61 #define QDSP6SS_GFMUX_CTL_REG 0x020
62 #define QDSP6SS_PWR_CTL_REG 0x030
63 #define QDSP6SS_MEM_PWR_CTL 0x0B0
64 #define QDSP6V6SS_MEM_PWR_CTL 0x034
65 #define QDSP6SS_STRAP_ACC 0x110
66
67 /* AXI Halt Register Offsets */
68 #define AXI_HALTREQ_REG 0x0
69 #define AXI_HALTACK_REG 0x4
70 #define AXI_IDLE_REG 0x8
71 #define NAV_AXI_HALTREQ_BIT BIT(0)
72 #define NAV_AXI_HALTACK_BIT BIT(1)
73 #define NAV_AXI_IDLE_BIT BIT(2)
74 #define AXI_GATING_VALID_OVERRIDE BIT(0)
75
76 #define HALT_ACK_TIMEOUT_US 100000
77 #define NAV_HALT_ACK_TIMEOUT_US 200
78
79 /* QDSP6SS_RESET */
80 #define Q6SS_STOP_CORE BIT(0)
81 #define Q6SS_CORE_ARES BIT(1)
82 #define Q6SS_BUS_ARES_ENABLE BIT(2)
83
84 /* QDSP6SS CBCR */
85 #define Q6SS_CBCR_CLKEN BIT(0)
86 #define Q6SS_CBCR_CLKOFF BIT(31)
87 #define Q6SS_CBCR_TIMEOUT_US 200
88
89 /* QDSP6SS_GFMUX_CTL */
90 #define Q6SS_CLK_ENABLE BIT(1)
91
92 /* QDSP6SS_PWR_CTL */
93 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
94 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
95 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
96 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
97 #define Q6SS_ETB_SLP_NRET_N BIT(17)
98 #define Q6SS_L2DATA_STBY_N BIT(18)
99 #define Q6SS_SLP_RET_N BIT(19)
100 #define Q6SS_CLAMP_IO BIT(20)
101 #define QDSS_BHS_ON BIT(21)
102 #define QDSS_LDO_BYP BIT(22)
103
104 /* QDSP6v56 parameters */
105 #define QDSP6v56_LDO_BYP BIT(25)
106 #define QDSP6v56_BHS_ON BIT(24)
107 #define QDSP6v56_CLAMP_WL BIT(21)
108 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
109 #define QDSP6SS_XO_CBCR 0x0038
110 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
111
112 /* QDSP6v65 parameters */
113 #define QDSP6SS_CORE_CBCR 0x20
114 #define QDSP6SS_SLEEP 0x3C
115 #define QDSP6SS_BOOT_CORE_START 0x400
116 #define QDSP6SS_BOOT_CMD 0x404
117 #define QDSP6SS_BOOT_STATUS 0x408
118 #define BOOT_STATUS_TIMEOUT_US 200
119 #define BOOT_FSM_TIMEOUT 10000
120
121 struct reg_info {
122 struct regulator *reg;
123 int uV;
124 int uA;
125 };
126
127 struct qcom_mss_reg_res {
128 const char *supply;
129 int uV;
130 int uA;
131 };
132
133 struct rproc_hexagon_res {
134 const char *hexagon_mba_image;
135 struct qcom_mss_reg_res *proxy_supply;
136 struct qcom_mss_reg_res *active_supply;
137 char **proxy_clk_names;
138 char **reset_clk_names;
139 char **active_clk_names;
140 char **active_pd_names;
141 char **proxy_pd_names;
142 int version;
143 bool need_mem_protection;
144 bool has_alt_reset;
145 bool has_halt_nav;
146 };
147
148 struct q6v5 {
149 struct device *dev;
150 struct rproc *rproc;
151
152 void __iomem *reg_base;
153 void __iomem *rmb_base;
154
155 struct regmap *halt_map;
156 struct regmap *halt_nav_map;
157 struct regmap *conn_map;
158
159 u32 halt_q6;
160 u32 halt_modem;
161 u32 halt_nc;
162 u32 halt_nav;
163 u32 conn_box;
164
165 struct reset_control *mss_restart;
166 struct reset_control *pdc_reset;
167
168 struct qcom_q6v5 q6v5;
169
170 struct clk *active_clks[8];
171 struct clk *reset_clks[4];
172 struct clk *proxy_clks[4];
173 struct device *active_pds[1];
174 struct device *proxy_pds[3];
175 int active_clk_count;
176 int reset_clk_count;
177 int proxy_clk_count;
178 int active_pd_count;
179 int proxy_pd_count;
180
181 struct reg_info active_regs[1];
182 struct reg_info proxy_regs[3];
183 int active_reg_count;
184 int proxy_reg_count;
185
186 bool running;
187
188 bool dump_mba_loaded;
189 unsigned long dump_segment_mask;
190 unsigned long dump_complete_mask;
191
192 phys_addr_t mba_phys;
193 void *mba_region;
194 size_t mba_size;
195
196 phys_addr_t mpss_phys;
197 phys_addr_t mpss_reloc;
198 void *mpss_region;
199 size_t mpss_size;
200
201 struct qcom_rproc_glink glink_subdev;
202 struct qcom_rproc_subdev smd_subdev;
203 struct qcom_rproc_ssr ssr_subdev;
204 struct qcom_sysmon *sysmon;
205 bool need_mem_protection;
206 bool has_alt_reset;
207 bool has_halt_nav;
208 int mpss_perm;
209 int mba_perm;
210 const char *hexagon_mdt_image;
211 int version;
212 };
213
214 enum {
215 MSS_MSM8916,
216 MSS_MSM8974,
217 MSS_MSM8996,
218 MSS_MSM8998,
219 MSS_SC7180,
220 MSS_SDM845,
221 };
222
223 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
224 const struct qcom_mss_reg_res *reg_res)
225 {
226 int rc;
227 int i;
228
229 if (!reg_res)
230 return 0;
231
232 for (i = 0; reg_res[i].supply; i++) {
233 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
234 if (IS_ERR(regs[i].reg)) {
235 rc = PTR_ERR(regs[i].reg);
236 if (rc != -EPROBE_DEFER)
237 dev_err(dev, "Failed to get %s\n regulator",
238 reg_res[i].supply);
239 return rc;
240 }
241
242 regs[i].uV = reg_res[i].uV;
243 regs[i].uA = reg_res[i].uA;
244 }
245
246 return i;
247 }
248
249 static int q6v5_regulator_enable(struct q6v5 *qproc,
250 struct reg_info *regs, int count)
251 {
252 int ret;
253 int i;
254
255 for (i = 0; i < count; i++) {
256 if (regs[i].uV > 0) {
257 ret = regulator_set_voltage(regs[i].reg,
258 regs[i].uV, INT_MAX);
259 if (ret) {
260 dev_err(qproc->dev,
261 "Failed to request voltage for %d.\n",
262 i);
263 goto err;
264 }
265 }
266
267 if (regs[i].uA > 0) {
268 ret = regulator_set_load(regs[i].reg,
269 regs[i].uA);
270 if (ret < 0) {
271 dev_err(qproc->dev,
272 "Failed to set regulator mode\n");
273 goto err;
274 }
275 }
276
277 ret = regulator_enable(regs[i].reg);
278 if (ret) {
279 dev_err(qproc->dev, "Regulator enable failed\n");
280 goto err;
281 }
282 }
283
284 return 0;
285 err:
286 for (; i >= 0; i--) {
287 if (regs[i].uV > 0)
288 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
289
290 if (regs[i].uA > 0)
291 regulator_set_load(regs[i].reg, 0);
292
293 regulator_disable(regs[i].reg);
294 }
295
296 return ret;
297 }
298
299 static void q6v5_regulator_disable(struct q6v5 *qproc,
300 struct reg_info *regs, int count)
301 {
302 int i;
303
304 for (i = 0; i < count; i++) {
305 if (regs[i].uV > 0)
306 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
307
308 if (regs[i].uA > 0)
309 regulator_set_load(regs[i].reg, 0);
310
311 regulator_disable(regs[i].reg);
312 }
313 }
314
315 static int q6v5_clk_enable(struct device *dev,
316 struct clk **clks, int count)
317 {
318 int rc;
319 int i;
320
321 for (i = 0; i < count; i++) {
322 rc = clk_prepare_enable(clks[i]);
323 if (rc) {
324 dev_err(dev, "Clock enable failed\n");
325 goto err;
326 }
327 }
328
329 return 0;
330 err:
331 for (i--; i >= 0; i--)
332 clk_disable_unprepare(clks[i]);
333
334 return rc;
335 }
336
337 static void q6v5_clk_disable(struct device *dev,
338 struct clk **clks, int count)
339 {
340 int i;
341
342 for (i = 0; i < count; i++)
343 clk_disable_unprepare(clks[i]);
344 }
345
346 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
347 size_t pd_count)
348 {
349 int ret;
350 int i;
351
352 for (i = 0; i < pd_count; i++) {
353 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
354 ret = pm_runtime_get_sync(pds[i]);
355 if (ret < 0)
356 goto unroll_pd_votes;
357 }
358
359 return 0;
360
361 unroll_pd_votes:
362 for (i--; i >= 0; i--) {
363 dev_pm_genpd_set_performance_state(pds[i], 0);
364 pm_runtime_put(pds[i]);
365 }
366
367 return ret;
368 };
369
370 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
371 size_t pd_count)
372 {
373 int i;
374
375 for (i = 0; i < pd_count; i++) {
376 dev_pm_genpd_set_performance_state(pds[i], 0);
377 pm_runtime_put(pds[i]);
378 }
379 }
380
381 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
382 bool remote_owner, phys_addr_t addr,
383 size_t size)
384 {
385 struct qcom_scm_vmperm next;
386
387 if (!qproc->need_mem_protection)
388 return 0;
389 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
390 return 0;
391 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
392 return 0;
393
394 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
395 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
396
397 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
398 current_perm, &next, 1);
399 }
400
401 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
402 {
403 struct q6v5 *qproc = rproc->priv;
404
405 memcpy(qproc->mba_region, fw->data, fw->size);
406
407 return 0;
408 }
409
410 static int q6v5_reset_assert(struct q6v5 *qproc)
411 {
412 int ret;
413
414 if (qproc->has_alt_reset) {
415 reset_control_assert(qproc->pdc_reset);
416 ret = reset_control_reset(qproc->mss_restart);
417 reset_control_deassert(qproc->pdc_reset);
418 } else if (qproc->has_halt_nav) {
419 /*
420 * When the AXI pipeline is being reset with the Q6 modem partly
421 * operational there is possibility of AXI valid signal to
422 * glitch, leading to spurious transactions and Q6 hangs. A work
423 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
424 * BIT before triggering Q6 MSS reset. Both the HALTREQ and
425 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
426 * followed by a MSS deassert, while holding the PDC reset.
427 */
428 reset_control_assert(qproc->pdc_reset);
429 regmap_update_bits(qproc->conn_map, qproc->conn_box,
430 AXI_GATING_VALID_OVERRIDE, 1);
431 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
432 NAV_AXI_HALTREQ_BIT, 0);
433 reset_control_assert(qproc->mss_restart);
434 reset_control_deassert(qproc->pdc_reset);
435 regmap_update_bits(qproc->conn_map, qproc->conn_box,
436 AXI_GATING_VALID_OVERRIDE, 0);
437 ret = reset_control_deassert(qproc->mss_restart);
438 } else {
439 ret = reset_control_assert(qproc->mss_restart);
440 }
441
442 return ret;
443 }
444
445 static int q6v5_reset_deassert(struct q6v5 *qproc)
446 {
447 int ret;
448
449 if (qproc->has_alt_reset) {
450 reset_control_assert(qproc->pdc_reset);
451 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
452 ret = reset_control_reset(qproc->mss_restart);
453 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
454 reset_control_deassert(qproc->pdc_reset);
455 } else if (qproc->has_halt_nav) {
456 ret = reset_control_reset(qproc->mss_restart);
457 } else {
458 ret = reset_control_deassert(qproc->mss_restart);
459 }
460
461 return ret;
462 }
463
464 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
465 {
466 unsigned long timeout;
467 s32 val;
468
469 timeout = jiffies + msecs_to_jiffies(ms);
470 for (;;) {
471 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
472 if (val)
473 break;
474
475 if (time_after(jiffies, timeout))
476 return -ETIMEDOUT;
477
478 msleep(1);
479 }
480
481 return val;
482 }
483
484 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
485 {
486
487 unsigned long timeout;
488 s32 val;
489
490 timeout = jiffies + msecs_to_jiffies(ms);
491 for (;;) {
492 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
493 if (val < 0)
494 break;
495
496 if (!status && val)
497 break;
498 else if (status && val == status)
499 break;
500
501 if (time_after(jiffies, timeout))
502 return -ETIMEDOUT;
503
504 msleep(1);
505 }
506
507 return val;
508 }
509
510 static int q6v5proc_reset(struct q6v5 *qproc)
511 {
512 u32 val;
513 int ret;
514 int i;
515
516 if (qproc->version == MSS_SDM845) {
517 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
518 val |= Q6SS_CBCR_CLKEN;
519 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
520
521 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
522 val, !(val & Q6SS_CBCR_CLKOFF), 1,
523 Q6SS_CBCR_TIMEOUT_US);
524 if (ret) {
525 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
526 return -ETIMEDOUT;
527 }
528
529 /* De-assert QDSP6 stop core */
530 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
531 /* Trigger boot FSM */
532 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
533
534 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
535 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
536 if (ret) {
537 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
538 /* Reset the modem so that boot FSM is in reset state */
539 q6v5_reset_deassert(qproc);
540 return ret;
541 }
542
543 goto pbl_wait;
544 } else if (qproc->version == MSS_SC7180) {
545 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
546 val |= Q6SS_CBCR_CLKEN;
547 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
548
549 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
550 val, !(val & Q6SS_CBCR_CLKOFF), 1,
551 Q6SS_CBCR_TIMEOUT_US);
552 if (ret) {
553 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
554 return -ETIMEDOUT;
555 }
556
557 /* Turn on the XO clock needed for PLL setup */
558 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
559 val |= Q6SS_CBCR_CLKEN;
560 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
561
562 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
563 val, !(val & Q6SS_CBCR_CLKOFF), 1,
564 Q6SS_CBCR_TIMEOUT_US);
565 if (ret) {
566 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
567 return -ETIMEDOUT;
568 }
569
570 /* Configure Q6 core CBCR to auto-enable after reset sequence */
571 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
572 val |= Q6SS_CBCR_CLKEN;
573 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
574
575 /* De-assert the Q6 stop core signal */
576 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
577
578 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
579 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
580
581 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
582 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
583 val, (val & BIT(0)) != 0, 1,
584 BOOT_STATUS_TIMEOUT_US);
585 if (ret) {
586 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
587 /* Reset the modem so that boot FSM is in reset state */
588 q6v5_reset_deassert(qproc);
589 return ret;
590 }
591 goto pbl_wait;
592 } else if (qproc->version == MSS_MSM8996 ||
593 qproc->version == MSS_MSM8998) {
594 int mem_pwr_ctl;
595
596 /* Override the ACC value if required */
597 writel(QDSP6SS_ACC_OVERRIDE_VAL,
598 qproc->reg_base + QDSP6SS_STRAP_ACC);
599
600 /* Assert resets, stop core */
601 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
602 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
603 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
604
605 /* BHS require xo cbcr to be enabled */
606 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
607 val |= Q6SS_CBCR_CLKEN;
608 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
609
610 /* Read CLKOFF bit to go low indicating CLK is enabled */
611 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
612 val, !(val & Q6SS_CBCR_CLKOFF), 1,
613 Q6SS_CBCR_TIMEOUT_US);
614 if (ret) {
615 dev_err(qproc->dev,
616 "xo cbcr enabling timed out (rc:%d)\n", ret);
617 return ret;
618 }
619 /* Enable power block headswitch and wait for it to stabilize */
620 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
621 val |= QDSP6v56_BHS_ON;
622 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
623 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
624 udelay(1);
625
626 /* Put LDO in bypass mode */
627 val |= QDSP6v56_LDO_BYP;
628 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
629
630 /* Deassert QDSP6 compiler memory clamp */
631 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
632 val &= ~QDSP6v56_CLAMP_QMC_MEM;
633 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
634
635 /* Deassert memory peripheral sleep and L2 memory standby */
636 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
637 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
638
639 /* Turn on L1, L2, ETB and JU memories 1 at a time */
640 if (qproc->version == MSS_MSM8996) {
641 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
642 i = 19;
643 } else {
644 /* MSS_MSM8998 */
645 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
646 i = 28;
647 }
648 val = readl(qproc->reg_base + mem_pwr_ctl);
649 for (; i >= 0; i--) {
650 val |= BIT(i);
651 writel(val, qproc->reg_base + mem_pwr_ctl);
652 /*
653 * Read back value to ensure the write is done then
654 * wait for 1us for both memory peripheral and data
655 * array to turn on.
656 */
657 val |= readl(qproc->reg_base + mem_pwr_ctl);
658 udelay(1);
659 }
660 /* Remove word line clamp */
661 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
662 val &= ~QDSP6v56_CLAMP_WL;
663 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
664 } else {
665 /* Assert resets, stop core */
666 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
667 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
668 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
669
670 /* Enable power block headswitch and wait for it to stabilize */
671 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
672 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
673 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
674 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
675 udelay(1);
676 /*
677 * Turn on memories. L2 banks should be done individually
678 * to minimize inrush current.
679 */
680 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
681 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
682 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
683 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
684 val |= Q6SS_L2DATA_SLP_NRET_N_2;
685 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
686 val |= Q6SS_L2DATA_SLP_NRET_N_1;
687 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
688 val |= Q6SS_L2DATA_SLP_NRET_N_0;
689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
690 }
691 /* Remove IO clamp */
692 val &= ~Q6SS_CLAMP_IO;
693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694
695 /* Bring core out of reset */
696 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
697 val &= ~Q6SS_CORE_ARES;
698 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
699
700 /* Turn on core clock */
701 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
702 val |= Q6SS_CLK_ENABLE;
703 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
704
705 /* Start core execution */
706 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
707 val &= ~Q6SS_STOP_CORE;
708 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
709
710 pbl_wait:
711 /* Wait for PBL status */
712 ret = q6v5_rmb_pbl_wait(qproc, 1000);
713 if (ret == -ETIMEDOUT) {
714 dev_err(qproc->dev, "PBL boot timed out\n");
715 } else if (ret != RMB_PBL_SUCCESS) {
716 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
717 ret = -EINVAL;
718 } else {
719 ret = 0;
720 }
721
722 return ret;
723 }
724
725 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
726 struct regmap *halt_map,
727 u32 offset)
728 {
729 unsigned int val;
730 int ret;
731
732 /* Check if we're already idle */
733 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
734 if (!ret && val)
735 return;
736
737 /* Assert halt request */
738 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
739
740 /* Wait for halt */
741 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
742 val, 1000, HALT_ACK_TIMEOUT_US);
743
744 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
745 if (ret || !val)
746 dev_err(qproc->dev, "port failed halt\n");
747
748 /* Clear halt request (port will remain halted until reset) */
749 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
750 }
751
752 static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
753 struct regmap *halt_map,
754 u32 offset)
755 {
756 unsigned int val;
757 int ret;
758
759 /* Check if we're already idle */
760 ret = regmap_read(halt_map, offset, &val);
761 if (!ret && (val & NAV_AXI_IDLE_BIT))
762 return;
763
764 /* Assert halt request */
765 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
766 NAV_AXI_HALTREQ_BIT);
767
768 /* Wait for halt ack*/
769 regmap_read_poll_timeout(halt_map, offset, val,
770 (val & NAV_AXI_HALTACK_BIT),
771 5, NAV_HALT_ACK_TIMEOUT_US);
772
773 ret = regmap_read(halt_map, offset, &val);
774 if (ret || !(val & NAV_AXI_IDLE_BIT))
775 dev_err(qproc->dev, "port failed halt\n");
776 }
777
778 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
779 {
780 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
781 dma_addr_t phys;
782 void *metadata;
783 int mdata_perm;
784 int xferop_ret;
785 size_t size;
786 void *ptr;
787 int ret;
788
789 metadata = qcom_mdt_read_metadata(fw, &size);
790 if (IS_ERR(metadata))
791 return PTR_ERR(metadata);
792
793 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
794 if (!ptr) {
795 kfree(metadata);
796 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
797 return -ENOMEM;
798 }
799
800 memcpy(ptr, metadata, size);
801
802 /* Hypervisor mapping to access metadata by modem */
803 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
804 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
805 if (ret) {
806 dev_err(qproc->dev,
807 "assigning Q6 access to metadata failed: %d\n", ret);
808 ret = -EAGAIN;
809 goto free_dma_attrs;
810 }
811
812 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
813 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
814
815 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
816 if (ret == -ETIMEDOUT)
817 dev_err(qproc->dev, "MPSS header authentication timed out\n");
818 else if (ret < 0)
819 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
820
821 /* Metadata authentication done, remove modem access */
822 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
823 if (xferop_ret)
824 dev_warn(qproc->dev,
825 "mdt buffer not reclaimed system may become unstable\n");
826
827 free_dma_attrs:
828 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
829 kfree(metadata);
830
831 return ret < 0 ? ret : 0;
832 }
833
834 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
835 {
836 if (phdr->p_type != PT_LOAD)
837 return false;
838
839 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
840 return false;
841
842 if (!phdr->p_memsz)
843 return false;
844
845 return true;
846 }
847
848 static int q6v5_mba_load(struct q6v5 *qproc)
849 {
850 int ret;
851 int xfermemop_ret;
852
853 qcom_q6v5_prepare(&qproc->q6v5);
854
855 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
856 if (ret < 0) {
857 dev_err(qproc->dev, "failed to enable active power domains\n");
858 goto disable_irqs;
859 }
860
861 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
862 if (ret < 0) {
863 dev_err(qproc->dev, "failed to enable proxy power domains\n");
864 goto disable_active_pds;
865 }
866
867 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
868 qproc->proxy_reg_count);
869 if (ret) {
870 dev_err(qproc->dev, "failed to enable proxy supplies\n");
871 goto disable_proxy_pds;
872 }
873
874 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
875 qproc->proxy_clk_count);
876 if (ret) {
877 dev_err(qproc->dev, "failed to enable proxy clocks\n");
878 goto disable_proxy_reg;
879 }
880
881 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
882 qproc->active_reg_count);
883 if (ret) {
884 dev_err(qproc->dev, "failed to enable supplies\n");
885 goto disable_proxy_clk;
886 }
887
888 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
889 qproc->reset_clk_count);
890 if (ret) {
891 dev_err(qproc->dev, "failed to enable reset clocks\n");
892 goto disable_vdd;
893 }
894
895 ret = q6v5_reset_deassert(qproc);
896 if (ret) {
897 dev_err(qproc->dev, "failed to deassert mss restart\n");
898 goto disable_reset_clks;
899 }
900
901 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
902 qproc->active_clk_count);
903 if (ret) {
904 dev_err(qproc->dev, "failed to enable clocks\n");
905 goto assert_reset;
906 }
907
908 /* Assign MBA image access in DDR to q6 */
909 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
910 qproc->mba_phys, qproc->mba_size);
911 if (ret) {
912 dev_err(qproc->dev,
913 "assigning Q6 access to mba memory failed: %d\n", ret);
914 goto disable_active_clks;
915 }
916
917 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
918
919 ret = q6v5proc_reset(qproc);
920 if (ret)
921 goto reclaim_mba;
922
923 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
924 if (ret == -ETIMEDOUT) {
925 dev_err(qproc->dev, "MBA boot timed out\n");
926 goto halt_axi_ports;
927 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
928 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
929 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
930 ret = -EINVAL;
931 goto halt_axi_ports;
932 }
933
934 qproc->dump_mba_loaded = true;
935 return 0;
936
937 halt_axi_ports:
938 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
939 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
940 if (qproc->has_halt_nav)
941 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
942 qproc->halt_nav);
943 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
944
945 reclaim_mba:
946 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
947 qproc->mba_phys,
948 qproc->mba_size);
949 if (xfermemop_ret) {
950 dev_err(qproc->dev,
951 "Failed to reclaim mba buffer, system may become unstable\n");
952 }
953
954 disable_active_clks:
955 q6v5_clk_disable(qproc->dev, qproc->active_clks,
956 qproc->active_clk_count);
957 assert_reset:
958 q6v5_reset_assert(qproc);
959 disable_reset_clks:
960 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
961 qproc->reset_clk_count);
962 disable_vdd:
963 q6v5_regulator_disable(qproc, qproc->active_regs,
964 qproc->active_reg_count);
965 disable_proxy_clk:
966 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
967 qproc->proxy_clk_count);
968 disable_proxy_reg:
969 q6v5_regulator_disable(qproc, qproc->proxy_regs,
970 qproc->proxy_reg_count);
971 disable_proxy_pds:
972 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
973 disable_active_pds:
974 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
975 disable_irqs:
976 qcom_q6v5_unprepare(&qproc->q6v5);
977
978 return ret;
979 }
980
981 static void q6v5_mba_reclaim(struct q6v5 *qproc)
982 {
983 int ret;
984 u32 val;
985
986 qproc->dump_mba_loaded = false;
987
988 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
989 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
990 if (qproc->has_halt_nav)
991 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
992 qproc->halt_nav);
993 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
994 if (qproc->version == MSS_MSM8996) {
995 /*
996 * To avoid high MX current during LPASS/MSS restart.
997 */
998 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
999 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1000 QDSP6v56_CLAMP_QMC_MEM;
1001 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1002 }
1003
1004 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1005 false, qproc->mpss_phys,
1006 qproc->mpss_size);
1007 WARN_ON(ret);
1008
1009 q6v5_reset_assert(qproc);
1010
1011 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1012 qproc->reset_clk_count);
1013 q6v5_clk_disable(qproc->dev, qproc->active_clks,
1014 qproc->active_clk_count);
1015 q6v5_regulator_disable(qproc, qproc->active_regs,
1016 qproc->active_reg_count);
1017 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
1018
1019 /* In case of failure or coredump scenario where reclaiming MBA memory
1020 * could not happen reclaim it here.
1021 */
1022 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1023 qproc->mba_phys,
1024 qproc->mba_size);
1025 WARN_ON(ret);
1026
1027 ret = qcom_q6v5_unprepare(&qproc->q6v5);
1028 if (ret) {
1029 q6v5_pds_disable(qproc, qproc->proxy_pds,
1030 qproc->proxy_pd_count);
1031 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1032 qproc->proxy_clk_count);
1033 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1034 qproc->proxy_reg_count);
1035 }
1036 }
1037
1038 static int q6v5_mpss_load(struct q6v5 *qproc)
1039 {
1040 const struct elf32_phdr *phdrs;
1041 const struct elf32_phdr *phdr;
1042 const struct firmware *seg_fw;
1043 const struct firmware *fw;
1044 struct elf32_hdr *ehdr;
1045 phys_addr_t mpss_reloc;
1046 phys_addr_t boot_addr;
1047 phys_addr_t min_addr = PHYS_ADDR_MAX;
1048 phys_addr_t max_addr = 0;
1049 bool relocate = false;
1050 char *fw_name;
1051 size_t fw_name_len;
1052 ssize_t offset;
1053 size_t size = 0;
1054 void *ptr;
1055 int ret;
1056 int i;
1057
1058 fw_name_len = strlen(qproc->hexagon_mdt_image);
1059 if (fw_name_len <= 4)
1060 return -EINVAL;
1061
1062 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1063 if (!fw_name)
1064 return -ENOMEM;
1065
1066 ret = request_firmware(&fw, fw_name, qproc->dev);
1067 if (ret < 0) {
1068 dev_err(qproc->dev, "unable to load %s\n", fw_name);
1069 goto out;
1070 }
1071
1072 /* Initialize the RMB validator */
1073 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1074
1075 ret = q6v5_mpss_init_image(qproc, fw);
1076 if (ret)
1077 goto release_firmware;
1078
1079 ehdr = (struct elf32_hdr *)fw->data;
1080 phdrs = (struct elf32_phdr *)(ehdr + 1);
1081
1082 for (i = 0; i < ehdr->e_phnum; i++) {
1083 phdr = &phdrs[i];
1084
1085 if (!q6v5_phdr_valid(phdr))
1086 continue;
1087
1088 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1089 relocate = true;
1090
1091 if (phdr->p_paddr < min_addr)
1092 min_addr = phdr->p_paddr;
1093
1094 if (phdr->p_paddr + phdr->p_memsz > max_addr)
1095 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1096 }
1097
1098 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1099 qproc->mpss_reloc = mpss_reloc;
1100 /* Load firmware segments */
1101 for (i = 0; i < ehdr->e_phnum; i++) {
1102 phdr = &phdrs[i];
1103
1104 if (!q6v5_phdr_valid(phdr))
1105 continue;
1106
1107 offset = phdr->p_paddr - mpss_reloc;
1108 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1109 dev_err(qproc->dev, "segment outside memory range\n");
1110 ret = -EINVAL;
1111 goto release_firmware;
1112 }
1113
1114 ptr = qproc->mpss_region + offset;
1115
1116 if (phdr->p_filesz && phdr->p_offset < fw->size) {
1117 /* Firmware is large enough to be non-split */
1118 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1119 dev_err(qproc->dev,
1120 "failed to load segment %d from truncated file %s\n",
1121 i, fw_name);
1122 ret = -EINVAL;
1123 goto release_firmware;
1124 }
1125
1126 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1127 } else if (phdr->p_filesz) {
1128 /* Replace "xxx.xxx" with "xxx.bxx" */
1129 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1130 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1131 if (ret) {
1132 dev_err(qproc->dev, "failed to load %s\n", fw_name);
1133 goto release_firmware;
1134 }
1135
1136 memcpy(ptr, seg_fw->data, seg_fw->size);
1137
1138 release_firmware(seg_fw);
1139 }
1140
1141 if (phdr->p_memsz > phdr->p_filesz) {
1142 memset(ptr + phdr->p_filesz, 0,
1143 phdr->p_memsz - phdr->p_filesz);
1144 }
1145 size += phdr->p_memsz;
1146 }
1147
1148 /* Transfer ownership of modem ddr region to q6 */
1149 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1150 qproc->mpss_phys, qproc->mpss_size);
1151 if (ret) {
1152 dev_err(qproc->dev,
1153 "assigning Q6 access to mpss memory failed: %d\n", ret);
1154 ret = -EAGAIN;
1155 goto release_firmware;
1156 }
1157
1158 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1159 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1160 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1161 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1162
1163 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1164 if (ret == -ETIMEDOUT)
1165 dev_err(qproc->dev, "MPSS authentication timed out\n");
1166 else if (ret < 0)
1167 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1168
1169 release_firmware:
1170 release_firmware(fw);
1171 out:
1172 kfree(fw_name);
1173
1174 return ret < 0 ? ret : 0;
1175 }
1176
1177 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1178 struct rproc_dump_segment *segment,
1179 void *dest)
1180 {
1181 int ret = 0;
1182 struct q6v5 *qproc = rproc->priv;
1183 unsigned long mask = BIT((unsigned long)segment->priv);
1184 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1185
1186 /* Unlock mba before copying segments */
1187 if (!qproc->dump_mba_loaded)
1188 ret = q6v5_mba_load(qproc);
1189
1190 if (!ptr || ret)
1191 memset(dest, 0xff, segment->size);
1192 else
1193 memcpy(dest, ptr, segment->size);
1194
1195 qproc->dump_segment_mask |= mask;
1196
1197 /* Reclaim mba after copying segments */
1198 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1199 if (qproc->dump_mba_loaded)
1200 q6v5_mba_reclaim(qproc);
1201 }
1202 }
1203
1204 static int q6v5_start(struct rproc *rproc)
1205 {
1206 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1207 int xfermemop_ret;
1208 int ret;
1209
1210 ret = q6v5_mba_load(qproc);
1211 if (ret)
1212 return ret;
1213
1214 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1215
1216 ret = q6v5_mpss_load(qproc);
1217 if (ret)
1218 goto reclaim_mpss;
1219
1220 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1221 if (ret == -ETIMEDOUT) {
1222 dev_err(qproc->dev, "start timed out\n");
1223 goto reclaim_mpss;
1224 }
1225
1226 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1227 qproc->mba_phys,
1228 qproc->mba_size);
1229 if (xfermemop_ret)
1230 dev_err(qproc->dev,
1231 "Failed to reclaim mba buffer system may become unstable\n");
1232
1233 /* Reset Dump Segment Mask */
1234 qproc->dump_segment_mask = 0;
1235 qproc->running = true;
1236
1237 return 0;
1238
1239 reclaim_mpss:
1240 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1241 false, qproc->mpss_phys,
1242 qproc->mpss_size);
1243 WARN_ON(xfermemop_ret);
1244 q6v5_mba_reclaim(qproc);
1245
1246 return ret;
1247 }
1248
1249 static int q6v5_stop(struct rproc *rproc)
1250 {
1251 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1252 int ret;
1253
1254 qproc->running = false;
1255
1256 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1257 if (ret == -ETIMEDOUT)
1258 dev_err(qproc->dev, "timed out on wait\n");
1259
1260 q6v5_mba_reclaim(qproc);
1261
1262 return 0;
1263 }
1264
1265 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1266 {
1267 struct q6v5 *qproc = rproc->priv;
1268 int offset;
1269
1270 offset = da - qproc->mpss_reloc;
1271 if (offset < 0 || offset + len > qproc->mpss_size)
1272 return NULL;
1273
1274 return qproc->mpss_region + offset;
1275 }
1276
1277 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1278 const struct firmware *mba_fw)
1279 {
1280 const struct firmware *fw;
1281 const struct elf32_phdr *phdrs;
1282 const struct elf32_phdr *phdr;
1283 const struct elf32_hdr *ehdr;
1284 struct q6v5 *qproc = rproc->priv;
1285 unsigned long i;
1286 int ret;
1287
1288 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1289 if (ret < 0) {
1290 dev_err(qproc->dev, "unable to load %s\n",
1291 qproc->hexagon_mdt_image);
1292 return ret;
1293 }
1294
1295 ehdr = (struct elf32_hdr *)fw->data;
1296 phdrs = (struct elf32_phdr *)(ehdr + 1);
1297 qproc->dump_complete_mask = 0;
1298
1299 for (i = 0; i < ehdr->e_phnum; i++) {
1300 phdr = &phdrs[i];
1301
1302 if (!q6v5_phdr_valid(phdr))
1303 continue;
1304
1305 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1306 phdr->p_memsz,
1307 qcom_q6v5_dump_segment,
1308 (void *)i);
1309 if (ret)
1310 break;
1311
1312 qproc->dump_complete_mask |= BIT(i);
1313 }
1314
1315 release_firmware(fw);
1316 return ret;
1317 }
1318
1319 static const struct rproc_ops q6v5_ops = {
1320 .start = q6v5_start,
1321 .stop = q6v5_stop,
1322 .da_to_va = q6v5_da_to_va,
1323 .parse_fw = qcom_q6v5_register_dump_segments,
1324 .load = q6v5_load,
1325 };
1326
1327 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1328 {
1329 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1330
1331 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1332 qproc->proxy_clk_count);
1333 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1334 qproc->proxy_reg_count);
1335 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1336 }
1337
1338 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1339 {
1340 struct of_phandle_args args;
1341 struct resource *res;
1342 int ret;
1343
1344 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1345 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1346 if (IS_ERR(qproc->reg_base))
1347 return PTR_ERR(qproc->reg_base);
1348
1349 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1350 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1351 if (IS_ERR(qproc->rmb_base))
1352 return PTR_ERR(qproc->rmb_base);
1353
1354 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1355 "qcom,halt-regs", 3, 0, &args);
1356 if (ret < 0) {
1357 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1358 return -EINVAL;
1359 }
1360
1361 qproc->halt_map = syscon_node_to_regmap(args.np);
1362 of_node_put(args.np);
1363 if (IS_ERR(qproc->halt_map))
1364 return PTR_ERR(qproc->halt_map);
1365
1366 qproc->halt_q6 = args.args[0];
1367 qproc->halt_modem = args.args[1];
1368 qproc->halt_nc = args.args[2];
1369
1370 if (qproc->has_halt_nav) {
1371 struct platform_device *nav_pdev;
1372
1373 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1374 "qcom,halt-nav-regs",
1375 1, 0, &args);
1376 if (ret < 0) {
1377 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1378 return -EINVAL;
1379 }
1380
1381 nav_pdev = of_find_device_by_node(args.np);
1382 of_node_put(args.np);
1383 if (!nav_pdev) {
1384 dev_err(&pdev->dev, "failed to get mss clock device\n");
1385 return -EPROBE_DEFER;
1386 }
1387
1388 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
1389 if (!qproc->halt_nav_map) {
1390 dev_err(&pdev->dev, "failed to get map from device\n");
1391 return -EINVAL;
1392 }
1393 qproc->halt_nav = args.args[0];
1394
1395 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1396 "qcom,halt-nav-regs",
1397 1, 1, &args);
1398 if (ret < 0) {
1399 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
1400 return -EINVAL;
1401 }
1402
1403 qproc->conn_map = syscon_node_to_regmap(args.np);
1404 of_node_put(args.np);
1405 if (IS_ERR(qproc->conn_map))
1406 return PTR_ERR(qproc->conn_map);
1407
1408 qproc->conn_box = args.args[0];
1409 }
1410
1411 return 0;
1412 }
1413
1414 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1415 char **clk_names)
1416 {
1417 int i;
1418
1419 if (!clk_names)
1420 return 0;
1421
1422 for (i = 0; clk_names[i]; i++) {
1423 clks[i] = devm_clk_get(dev, clk_names[i]);
1424 if (IS_ERR(clks[i])) {
1425 int rc = PTR_ERR(clks[i]);
1426
1427 if (rc != -EPROBE_DEFER)
1428 dev_err(dev, "Failed to get %s clock\n",
1429 clk_names[i]);
1430 return rc;
1431 }
1432 }
1433
1434 return i;
1435 }
1436
1437 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1438 char **pd_names)
1439 {
1440 size_t num_pds = 0;
1441 int ret;
1442 int i;
1443
1444 if (!pd_names)
1445 return 0;
1446
1447 while (pd_names[num_pds])
1448 num_pds++;
1449
1450 for (i = 0; i < num_pds; i++) {
1451 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1452 if (IS_ERR_OR_NULL(devs[i])) {
1453 ret = PTR_ERR(devs[i]) ? : -ENODATA;
1454 goto unroll_attach;
1455 }
1456 }
1457
1458 return num_pds;
1459
1460 unroll_attach:
1461 for (i--; i >= 0; i--)
1462 dev_pm_domain_detach(devs[i], false);
1463
1464 return ret;
1465 };
1466
1467 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1468 size_t pd_count)
1469 {
1470 int i;
1471
1472 for (i = 0; i < pd_count; i++)
1473 dev_pm_domain_detach(pds[i], false);
1474 }
1475
1476 static int q6v5_init_reset(struct q6v5 *qproc)
1477 {
1478 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1479 "mss_restart");
1480 if (IS_ERR(qproc->mss_restart)) {
1481 dev_err(qproc->dev, "failed to acquire mss restart\n");
1482 return PTR_ERR(qproc->mss_restart);
1483 }
1484
1485 if (qproc->has_alt_reset || qproc->has_halt_nav) {
1486 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1487 "pdc_reset");
1488 if (IS_ERR(qproc->pdc_reset)) {
1489 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1490 return PTR_ERR(qproc->pdc_reset);
1491 }
1492 }
1493
1494 return 0;
1495 }
1496
1497 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1498 {
1499 struct device_node *child;
1500 struct device_node *node;
1501 struct resource r;
1502 int ret;
1503
1504 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1505 node = of_parse_phandle(child, "memory-region", 0);
1506 ret = of_address_to_resource(node, 0, &r);
1507 if (ret) {
1508 dev_err(qproc->dev, "unable to resolve mba region\n");
1509 return ret;
1510 }
1511 of_node_put(node);
1512
1513 qproc->mba_phys = r.start;
1514 qproc->mba_size = resource_size(&r);
1515 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1516 if (!qproc->mba_region) {
1517 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1518 &r.start, qproc->mba_size);
1519 return -EBUSY;
1520 }
1521
1522 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1523 node = of_parse_phandle(child, "memory-region", 0);
1524 ret = of_address_to_resource(node, 0, &r);
1525 if (ret) {
1526 dev_err(qproc->dev, "unable to resolve mpss region\n");
1527 return ret;
1528 }
1529 of_node_put(node);
1530
1531 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1532 qproc->mpss_size = resource_size(&r);
1533 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1534 if (!qproc->mpss_region) {
1535 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1536 &r.start, qproc->mpss_size);
1537 return -EBUSY;
1538 }
1539
1540 return 0;
1541 }
1542
1543 static int q6v5_probe(struct platform_device *pdev)
1544 {
1545 const struct rproc_hexagon_res *desc;
1546 struct q6v5 *qproc;
1547 struct rproc *rproc;
1548 const char *mba_image;
1549 int ret;
1550
1551 desc = of_device_get_match_data(&pdev->dev);
1552 if (!desc)
1553 return -EINVAL;
1554
1555 if (desc->need_mem_protection && !qcom_scm_is_available())
1556 return -EPROBE_DEFER;
1557
1558 mba_image = desc->hexagon_mba_image;
1559 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1560 0, &mba_image);
1561 if (ret < 0 && ret != -EINVAL)
1562 return ret;
1563
1564 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1565 mba_image, sizeof(*qproc));
1566 if (!rproc) {
1567 dev_err(&pdev->dev, "failed to allocate rproc\n");
1568 return -ENOMEM;
1569 }
1570
1571 rproc->auto_boot = false;
1572
1573 qproc = (struct q6v5 *)rproc->priv;
1574 qproc->dev = &pdev->dev;
1575 qproc->rproc = rproc;
1576 qproc->hexagon_mdt_image = "modem.mdt";
1577 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1578 1, &qproc->hexagon_mdt_image);
1579 if (ret < 0 && ret != -EINVAL)
1580 return ret;
1581
1582 platform_set_drvdata(pdev, qproc);
1583
1584 qproc->has_halt_nav = desc->has_halt_nav;
1585 ret = q6v5_init_mem(qproc, pdev);
1586 if (ret)
1587 goto free_rproc;
1588
1589 ret = q6v5_alloc_memory_region(qproc);
1590 if (ret)
1591 goto free_rproc;
1592
1593 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1594 desc->proxy_clk_names);
1595 if (ret < 0) {
1596 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1597 goto free_rproc;
1598 }
1599 qproc->proxy_clk_count = ret;
1600
1601 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1602 desc->reset_clk_names);
1603 if (ret < 0) {
1604 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1605 goto free_rproc;
1606 }
1607 qproc->reset_clk_count = ret;
1608
1609 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1610 desc->active_clk_names);
1611 if (ret < 0) {
1612 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1613 goto free_rproc;
1614 }
1615 qproc->active_clk_count = ret;
1616
1617 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1618 desc->proxy_supply);
1619 if (ret < 0) {
1620 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1621 goto free_rproc;
1622 }
1623 qproc->proxy_reg_count = ret;
1624
1625 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1626 desc->active_supply);
1627 if (ret < 0) {
1628 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1629 goto free_rproc;
1630 }
1631 qproc->active_reg_count = ret;
1632
1633 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1634 desc->active_pd_names);
1635 if (ret < 0) {
1636 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1637 goto free_rproc;
1638 }
1639 qproc->active_pd_count = ret;
1640
1641 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1642 desc->proxy_pd_names);
1643 if (ret < 0) {
1644 dev_err(&pdev->dev, "Failed to init power domains\n");
1645 goto detach_active_pds;
1646 }
1647 qproc->proxy_pd_count = ret;
1648
1649 qproc->has_alt_reset = desc->has_alt_reset;
1650 ret = q6v5_init_reset(qproc);
1651 if (ret)
1652 goto detach_proxy_pds;
1653
1654 qproc->version = desc->version;
1655 qproc->need_mem_protection = desc->need_mem_protection;
1656
1657 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1658 qcom_msa_handover);
1659 if (ret)
1660 goto detach_proxy_pds;
1661
1662 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1663 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1664 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
1665 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1666 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1667 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1668 if (IS_ERR(qproc->sysmon)) {
1669 ret = PTR_ERR(qproc->sysmon);
1670 goto detach_proxy_pds;
1671 }
1672
1673 ret = rproc_add(rproc);
1674 if (ret)
1675 goto detach_proxy_pds;
1676
1677 return 0;
1678
1679 detach_proxy_pds:
1680 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1681 detach_active_pds:
1682 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1683 free_rproc:
1684 rproc_free(rproc);
1685
1686 return ret;
1687 }
1688
1689 static int q6v5_remove(struct platform_device *pdev)
1690 {
1691 struct q6v5 *qproc = platform_get_drvdata(pdev);
1692
1693 rproc_del(qproc->rproc);
1694
1695 qcom_remove_sysmon_subdev(qproc->sysmon);
1696 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
1697 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1698 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1699
1700 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1701 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1702
1703 rproc_free(qproc->rproc);
1704
1705 return 0;
1706 }
1707
1708 static const struct rproc_hexagon_res sc7180_mss = {
1709 .hexagon_mba_image = "mba.mbn",
1710 .proxy_clk_names = (char*[]){
1711 "xo",
1712 NULL
1713 },
1714 .reset_clk_names = (char*[]){
1715 "iface",
1716 "bus",
1717 "snoc_axi",
1718 NULL
1719 },
1720 .active_clk_names = (char*[]){
1721 "mnoc_axi",
1722 "nav",
1723 "mss_nav",
1724 "mss_crypto",
1725 NULL
1726 },
1727 .active_pd_names = (char*[]){
1728 "load_state",
1729 NULL
1730 },
1731 .proxy_pd_names = (char*[]){
1732 "cx",
1733 "mx",
1734 "mss",
1735 NULL
1736 },
1737 .need_mem_protection = true,
1738 .has_alt_reset = false,
1739 .has_halt_nav = true,
1740 .version = MSS_SC7180,
1741 };
1742
1743 static const struct rproc_hexagon_res sdm845_mss = {
1744 .hexagon_mba_image = "mba.mbn",
1745 .proxy_clk_names = (char*[]){
1746 "xo",
1747 "prng",
1748 NULL
1749 },
1750 .reset_clk_names = (char*[]){
1751 "iface",
1752 "snoc_axi",
1753 NULL
1754 },
1755 .active_clk_names = (char*[]){
1756 "bus",
1757 "mem",
1758 "gpll0_mss",
1759 "mnoc_axi",
1760 NULL
1761 },
1762 .active_pd_names = (char*[]){
1763 "load_state",
1764 NULL
1765 },
1766 .proxy_pd_names = (char*[]){
1767 "cx",
1768 "mx",
1769 "mss",
1770 NULL
1771 },
1772 .need_mem_protection = true,
1773 .has_alt_reset = true,
1774 .has_halt_nav = false,
1775 .version = MSS_SDM845,
1776 };
1777
1778 static const struct rproc_hexagon_res msm8998_mss = {
1779 .hexagon_mba_image = "mba.mbn",
1780 .proxy_clk_names = (char*[]){
1781 "xo",
1782 "qdss",
1783 "mem",
1784 NULL
1785 },
1786 .active_clk_names = (char*[]){
1787 "iface",
1788 "bus",
1789 "gpll0_mss",
1790 "mnoc_axi",
1791 "snoc_axi",
1792 NULL
1793 },
1794 .proxy_pd_names = (char*[]){
1795 "cx",
1796 "mx",
1797 NULL
1798 },
1799 .need_mem_protection = true,
1800 .has_alt_reset = false,
1801 .has_halt_nav = false,
1802 .version = MSS_MSM8998,
1803 };
1804
1805 static const struct rproc_hexagon_res msm8996_mss = {
1806 .hexagon_mba_image = "mba.mbn",
1807 .proxy_supply = (struct qcom_mss_reg_res[]) {
1808 {
1809 .supply = "pll",
1810 .uA = 100000,
1811 },
1812 {}
1813 },
1814 .proxy_clk_names = (char*[]){
1815 "xo",
1816 "pnoc",
1817 "qdss",
1818 NULL
1819 },
1820 .active_clk_names = (char*[]){
1821 "iface",
1822 "bus",
1823 "mem",
1824 "gpll0_mss",
1825 "snoc_axi",
1826 "mnoc_axi",
1827 NULL
1828 },
1829 .need_mem_protection = true,
1830 .has_alt_reset = false,
1831 .has_halt_nav = false,
1832 .version = MSS_MSM8996,
1833 };
1834
1835 static const struct rproc_hexagon_res msm8916_mss = {
1836 .hexagon_mba_image = "mba.mbn",
1837 .proxy_supply = (struct qcom_mss_reg_res[]) {
1838 {
1839 .supply = "mx",
1840 .uV = 1050000,
1841 },
1842 {
1843 .supply = "cx",
1844 .uA = 100000,
1845 },
1846 {
1847 .supply = "pll",
1848 .uA = 100000,
1849 },
1850 {}
1851 },
1852 .proxy_clk_names = (char*[]){
1853 "xo",
1854 NULL
1855 },
1856 .active_clk_names = (char*[]){
1857 "iface",
1858 "bus",
1859 "mem",
1860 NULL
1861 },
1862 .need_mem_protection = false,
1863 .has_alt_reset = false,
1864 .has_halt_nav = false,
1865 .version = MSS_MSM8916,
1866 };
1867
1868 static const struct rproc_hexagon_res msm8974_mss = {
1869 .hexagon_mba_image = "mba.b00",
1870 .proxy_supply = (struct qcom_mss_reg_res[]) {
1871 {
1872 .supply = "mx",
1873 .uV = 1050000,
1874 },
1875 {
1876 .supply = "cx",
1877 .uA = 100000,
1878 },
1879 {
1880 .supply = "pll",
1881 .uA = 100000,
1882 },
1883 {}
1884 },
1885 .active_supply = (struct qcom_mss_reg_res[]) {
1886 {
1887 .supply = "mss",
1888 .uV = 1050000,
1889 .uA = 100000,
1890 },
1891 {}
1892 },
1893 .proxy_clk_names = (char*[]){
1894 "xo",
1895 NULL
1896 },
1897 .active_clk_names = (char*[]){
1898 "iface",
1899 "bus",
1900 "mem",
1901 NULL
1902 },
1903 .need_mem_protection = false,
1904 .has_alt_reset = false,
1905 .has_halt_nav = false,
1906 .version = MSS_MSM8974,
1907 };
1908
1909 static const struct of_device_id q6v5_of_match[] = {
1910 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1911 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1912 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1913 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1914 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
1915 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
1916 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1917 { },
1918 };
1919 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1920
1921 static struct platform_driver q6v5_driver = {
1922 .probe = q6v5_probe,
1923 .remove = q6v5_remove,
1924 .driver = {
1925 .name = "qcom-q6v5-mss",
1926 .of_match_table = q6v5_of_match,
1927 },
1928 };
1929 module_platform_driver(q6v5_driver);
1930
1931 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1932 MODULE_LICENSE("GPL v2");