1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
9 #include <drm/drm_gem.h>
12 #include "a6xx_gmu.xml.h"
16 static void a6xx_gmu_fault(struct a6xx_gmu
*gmu
)
18 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
19 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
20 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
21 struct drm_device
*dev
= gpu
->dev
;
22 struct msm_drm_private
*priv
= dev
->dev_private
;
24 /* FIXME: add a banner here */
27 /* Turn off the hangcheck timer while we are resetting */
28 del_timer(&gpu
->hangcheck_timer
);
30 /* Queue the GPU handler because we need to treat this as a recovery */
31 queue_work(priv
->wq
, &gpu
->recover_work
);
34 static irqreturn_t
a6xx_gmu_irq(int irq
, void *data
)
36 struct a6xx_gmu
*gmu
= data
;
39 status
= gmu_read(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS
);
40 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, status
);
42 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE
) {
43 dev_err_ratelimited(gmu
->dev
, "GMU watchdog expired\n");
48 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR
)
49 dev_err_ratelimited(gmu
->dev
, "GMU AHB bus error\n");
51 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR
)
52 dev_err_ratelimited(gmu
->dev
, "GMU fence error: 0x%x\n",
53 gmu_read(gmu
, REG_A6XX_GMU_AHB_FENCE_STATUS
));
58 static irqreturn_t
a6xx_hfi_irq(int irq
, void *data
)
60 struct a6xx_gmu
*gmu
= data
;
63 status
= gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
);
64 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, status
);
66 if (status
& A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT
) {
67 dev_err_ratelimited(gmu
->dev
, "GMU firmware fault\n");
75 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu
*gmu
)
79 /* This can be called from gpu state code so make sure GMU is valid */
80 if (!gmu
->initialized
)
83 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
86 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF
|
87 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF
));
90 /* Check to see if the GX rail is still powered */
91 bool a6xx_gmu_gx_is_on(struct a6xx_gmu
*gmu
)
95 /* This can be called from gpu state code so make sure GMU is valid */
96 if (!gmu
->initialized
)
99 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
102 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF
|
103 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF
));
106 void a6xx_gmu_set_freq(struct msm_gpu
*gpu
, struct dev_pm_opp
*opp
)
108 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
109 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
110 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
112 unsigned long gpu_freq
;
115 gpu_freq
= dev_pm_opp_get_freq(opp
);
117 if (gpu_freq
== gmu
->freq
)
120 for (perf_index
= 0; perf_index
< gmu
->nr_gpu_freqs
- 1; perf_index
++)
121 if (gpu_freq
== gmu
->gpu_freqs
[perf_index
])
124 gmu
->current_perf_index
= perf_index
;
125 gmu
->freq
= gmu
->gpu_freqs
[perf_index
];
128 * This can get called from devfreq while the hardware is idle. Don't
129 * bring up the power if it isn't already active
131 if (pm_runtime_get_if_in_use(gmu
->dev
) == 0)
135 a6xx_hfi_set_freq(gmu
, perf_index
);
136 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(7216));
137 pm_runtime_put(gmu
->dev
);
141 gmu_write(gmu
, REG_A6XX_GMU_DCVS_ACK_OPTION
, 0);
143 gmu_write(gmu
, REG_A6XX_GMU_DCVS_PERF_SETTING
,
144 ((3 & 0xf) << 28) | perf_index
);
147 * Send an invalid index as a vote for the bus bandwidth and let the
148 * firmware decide on the right vote
150 gmu_write(gmu
, REG_A6XX_GMU_DCVS_BW_SETTING
, 0xff);
152 /* Set and clear the OOB for DCVS to trigger the GMU */
153 a6xx_gmu_set_oob(gmu
, GMU_OOB_DCVS_SET
);
154 a6xx_gmu_clear_oob(gmu
, GMU_OOB_DCVS_SET
);
156 ret
= gmu_read(gmu
, REG_A6XX_GMU_DCVS_RETURN
);
158 dev_err(gmu
->dev
, "GMU set GPU frequency error: %d\n", ret
);
161 * Eventually we will want to scale the path vote with the frequency but
162 * for now leave it at max so that the performance is nominal.
164 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(7216));
165 pm_runtime_put(gmu
->dev
);
168 unsigned long a6xx_gmu_get_freq(struct msm_gpu
*gpu
)
170 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
171 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
172 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
177 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu
*gmu
)
180 int local
= gmu
->idle_level
;
182 /* SPTP and IFPC both report as IFPC */
183 if (gmu
->idle_level
== GMU_IDLE_STATE_SPTP
)
184 local
= GMU_IDLE_STATE_IFPC
;
186 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
189 if (gmu
->idle_level
!= GMU_IDLE_STATE_IFPC
||
190 !a6xx_gmu_gx_is_on(gmu
))
197 /* Wait for the GMU to get to its most idle state */
198 int a6xx_gmu_wait_for_idle(struct a6xx_gmu
*gmu
)
200 return spin_until(a6xx_gmu_check_idle_level(gmu
));
203 static int a6xx_gmu_start(struct a6xx_gmu
*gmu
)
208 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 1);
210 /* Set the log wptr index
211 * note: downstream saves the value in poweroff and restores it here
213 gmu_write(gmu
, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP
, 0);
215 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 0);
217 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, val
,
218 val
== 0xbabeface, 100, 10000);
221 DRM_DEV_ERROR(gmu
->dev
, "GMU firmware initialization timed out\n");
226 static int a6xx_gmu_hfi_start(struct a6xx_gmu
*gmu
)
231 gmu_write(gmu
, REG_A6XX_GMU_HFI_CTRL_INIT
, 1);
233 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_HFI_CTRL_STATUS
, val
,
234 val
& 1, 100, 10000);
236 DRM_DEV_ERROR(gmu
->dev
, "Unable to start the HFI queues\n");
241 /* Trigger a OOB (out of band) request to the GMU */
242 int a6xx_gmu_set_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
250 case GMU_OOB_GPU_SET
:
252 request
= GMU_OOB_GPU_SET_REQUEST
;
253 ack
= GMU_OOB_GPU_SET_ACK
;
255 request
= GMU_OOB_GPU_SET_REQUEST_NEW
;
256 ack
= GMU_OOB_GPU_SET_ACK_NEW
;
260 case GMU_OOB_BOOT_SLUMBER
:
261 request
= GMU_OOB_BOOT_SLUMBER_REQUEST
;
262 ack
= GMU_OOB_BOOT_SLUMBER_ACK
;
263 name
= "BOOT_SLUMBER";
265 case GMU_OOB_DCVS_SET
:
266 request
= GMU_OOB_DCVS_REQUEST
;
267 ack
= GMU_OOB_DCVS_ACK
;
274 /* Trigger the equested OOB operation */
275 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
, 1 << request
);
277 /* Wait for the acknowledge interrupt */
278 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
, val
,
279 val
& (1 << ack
), 100, 10000);
282 DRM_DEV_ERROR(gmu
->dev
,
283 "Timeout waiting for GMU OOB set %s: 0x%x\n",
285 gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
));
287 /* Clear the acknowledge interrupt */
288 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, 1 << ack
);
293 /* Clear a pending OOB state in the GMU */
294 void a6xx_gmu_clear_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
297 WARN_ON(state
!= GMU_OOB_GPU_SET
);
298 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
299 1 << GMU_OOB_GPU_SET_CLEAR_NEW
);
304 case GMU_OOB_GPU_SET
:
305 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
306 1 << GMU_OOB_GPU_SET_CLEAR
);
308 case GMU_OOB_BOOT_SLUMBER
:
309 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
310 1 << GMU_OOB_BOOT_SLUMBER_CLEAR
);
312 case GMU_OOB_DCVS_SET
:
313 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
314 1 << GMU_OOB_DCVS_CLEAR
);
319 /* Enable CPU control of SPTP power power collapse */
320 static int a6xx_sptprac_enable(struct a6xx_gmu
*gmu
)
328 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778000);
330 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
331 (val
& 0x38) == 0x28, 1, 100);
334 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on SPTPRAC: 0x%x\n",
335 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
341 /* Disable CPU control of SPTP power power collapse */
342 static void a6xx_sptprac_disable(struct a6xx_gmu
*gmu
)
350 /* Make sure retention is on */
351 gmu_rmw(gmu
, REG_A6XX_GPU_CC_GX_GDSCR
, 0, (1 << 11));
353 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778001);
355 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
356 (val
& 0x04), 100, 10000);
359 DRM_DEV_ERROR(gmu
->dev
, "failed to power off SPTPRAC: 0x%x\n",
360 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
363 /* Let the GMU know we are starting a boot sequence */
364 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu
*gmu
)
368 /* Let the GMU know we are getting ready for boot */
369 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 0);
371 /* Choose the "default" power level as the highest available */
372 vote
= gmu
->gx_arc_votes
[gmu
->nr_gpu_freqs
- 1];
374 gmu_write(gmu
, REG_A6XX_GMU_GX_VOTE_IDX
, vote
& 0xff);
375 gmu_write(gmu
, REG_A6XX_GMU_MX_VOTE_IDX
, (vote
>> 8) & 0xff);
377 /* Let the GMU know the boot sequence has started */
378 return a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
381 /* Let the GMU know that we are about to go into slumber */
382 static int a6xx_gmu_notify_slumber(struct a6xx_gmu
*gmu
)
386 /* Disable the power counter so the GMU isn't busy */
387 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 0);
389 /* Disable SPTP_PC if the CPU is responsible for it */
390 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
)
391 a6xx_sptprac_disable(gmu
);
394 ret
= a6xx_hfi_send_prep_slumber(gmu
);
398 /* Tell the GMU to get ready to slumber */
399 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 1);
401 ret
= a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
402 a6xx_gmu_clear_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
405 /* Check to see if the GMU really did slumber */
406 if (gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
)
408 DRM_DEV_ERROR(gmu
->dev
, "The GMU did not go into slumber\n");
414 /* Put fence into allow mode */
415 gmu_write(gmu
, REG_A6XX_GMU_AO_AHB_FENCE_CTRL
, 0);
419 static int a6xx_rpmh_start(struct a6xx_gmu
*gmu
)
424 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1 << 1);
425 /* Wait for the register to finish posting */
428 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_RSCC_CONTROL_ACK
, val
,
429 val
& (1 << 1), 100, 10000);
431 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on the GPU RSC\n");
435 ret
= gmu_poll_timeout_rscc(gmu
, REG_A6XX_RSCC_SEQ_BUSY_DRV0
, val
,
439 DRM_DEV_ERROR(gmu
->dev
, "GPU RSC sequence stuck while waking up the GPU\n");
443 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
445 /* Set up CX GMU counter 0 to count busy ticks */
446 gmu_write(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK
, 0xff000000);
447 gmu_rmw(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0
, 0xff, 0x20);
449 /* Enable the power counter */
450 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 1);
454 static void a6xx_rpmh_stop(struct a6xx_gmu
*gmu
)
459 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1);
461 ret
= gmu_poll_timeout_rscc(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
,
462 val
, val
& (1 << 16), 100, 10000);
464 DRM_DEV_ERROR(gmu
->dev
, "Unable to power off the GPU RSC\n");
466 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
469 static inline void pdc_write(void __iomem
*ptr
, u32 offset
, u32 value
)
471 return msm_writel(value
, ptr
+ (offset
<< 2));
474 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
477 static void a6xx_gmu_rpmh_init(struct a6xx_gmu
*gmu
)
479 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
480 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
481 struct platform_device
*pdev
= to_platform_device(gmu
->dev
);
482 void __iomem
*pdcptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc");
483 void __iomem
*seqptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc_seq");
484 uint32_t pdc_address_offset
;
486 if (!pdcptr
|| !seqptr
)
489 if (adreno_is_a618(adreno_gpu
) || adreno_is_a640(adreno_gpu
))
490 pdc_address_offset
= 0x30090;
491 else if (adreno_is_a650(adreno_gpu
))
492 pdc_address_offset
= 0x300a0;
494 pdc_address_offset
= 0x30080;
496 /* Disable SDE clock gating */
497 gmu_write_rscc(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
, BIT(24));
499 /* Setup RSC PDC handshake for sleep and wakeup */
500 gmu_write_rscc(gmu
, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0
, 1);
501 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
, 0);
502 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
, 0);
503 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 2, 0);
504 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 2, 0);
505 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 4, 0x80000000);
506 gmu_write_rscc(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 4, 0);
507 gmu_write_rscc(gmu
, REG_A6XX_RSCC_OVERRIDE_START_ADDR
, 0);
508 gmu_write_rscc(gmu
, REG_A6XX_RSCC_PDC_SEQ_START_ADDR
, 0x4520);
509 gmu_write_rscc(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO
, 0x4510);
510 gmu_write_rscc(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI
, 0x4514);
512 /* Load RSC sequencer uCode for sleep and wakeup */
513 if (adreno_is_a650(adreno_gpu
)) {
514 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
, 0xeaaae5a0);
515 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 1, 0xe1a1ebab);
516 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 2, 0xa2e0a581);
517 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 3, 0xecac82e2);
518 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 4, 0x0020edad);
520 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
, 0xa7a506a0);
521 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 1, 0xa1e6a6e7);
522 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 2, 0xa2e081e1);
523 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 3, 0xe9a982e2);
524 gmu_write_rscc(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 4, 0x0020e8a8);
527 /* Load PDC sequencer uCode for power up and power down sequence */
528 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
, 0xfebea1e1);
529 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 1, 0xa5a4a3a2);
530 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 2, 0x8382a6e0);
531 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 3, 0xbce3e284);
532 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 4, 0x002081fc);
534 /* Set TCS commands used by PDC sequence for low power modes */
535 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK
, 7);
536 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK
, 0);
537 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CONTROL
, 0);
538 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
, 0x10108);
539 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
, 0x30010);
540 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
, 1);
541 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 4, 0x10108);
542 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 4, 0x30000);
543 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 4, 0x0);
545 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 8, 0x10108);
546 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 8, pdc_address_offset
);
547 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 8, 0x0);
549 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK
, 7);
550 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK
, 0);
551 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CONTROL
, 0);
552 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
, 0x10108);
553 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
, 0x30010);
554 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
, 2);
556 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 4, 0x10108);
557 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 4, 0x30000);
558 if (adreno_is_a618(adreno_gpu
) || adreno_is_a650(adreno_gpu
))
559 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 4, 0x2);
561 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 4, 0x3);
562 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 8, 0x10108);
563 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 8, pdc_address_offset
);
564 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 8, 0x3);
567 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_SEQ_START_ADDR
, 0);
568 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_ENABLE_PDC
, 0x80000001);
570 /* ensure no writes happen before the uCode is fully written */
574 if (!IS_ERR_OR_NULL(pdcptr
))
576 if (!IS_ERR_OR_NULL(seqptr
))
581 * The lowest 16 bits of this value are the number of XO clock cycles for main
582 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
583 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
586 #define GMU_PWR_COL_HYST 0x000a1680
588 /* Set up the idle state for the GMU */
589 static void a6xx_gmu_power_config(struct a6xx_gmu
*gmu
)
591 /* Disable GMU WB/RB buffer */
592 gmu_write(gmu
, REG_A6XX_GMU_SYS_BUS_CONFIG
, 0x1);
593 gmu_write(gmu
, REG_A6XX_GMU_ICACHE_CONFIG
, 0x1);
594 gmu_write(gmu
, REG_A6XX_GMU_DCACHE_CONFIG
, 0x1);
596 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0x9c40400);
598 switch (gmu
->idle_level
) {
599 case GMU_IDLE_STATE_IFPC
:
600 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST
,
602 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
603 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
604 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE
);
606 case GMU_IDLE_STATE_SPTP
:
607 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST
,
609 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
610 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
611 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE
);
614 /* Enable RPMh GPU client */
615 gmu_rmw(gmu
, REG_A6XX_GMU_RPMH_CTRL
, 0,
616 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE
|
617 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE
|
618 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE
|
619 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE
|
620 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE
|
621 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE
);
624 struct block_header
{
632 /* this should be a general kernel helper */
633 static int in_range(u32 addr
, u32 start
, u32 size
)
635 return addr
>= start
&& addr
< start
+ size
;
638 static bool fw_block_mem(struct a6xx_gmu_bo
*bo
, const struct block_header
*blk
)
640 if (!in_range(blk
->addr
, bo
->iova
, bo
->size
))
643 memcpy(bo
->virt
+ blk
->addr
- bo
->iova
, blk
->data
, blk
->size
);
647 static int a6xx_gmu_fw_load(struct a6xx_gmu
*gmu
)
649 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
650 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
651 const struct firmware
*fw_image
= adreno_gpu
->fw
[ADRENO_FW_GMU
];
652 const struct block_header
*blk
;
655 u32 itcm_base
= 0x00000000;
656 u32 dtcm_base
= 0x00040000;
658 if (adreno_is_a650(adreno_gpu
))
659 dtcm_base
= 0x10004000;
662 /* Sanity check the size of the firmware that was loaded */
663 if (fw_image
->size
> 0x8000) {
664 DRM_DEV_ERROR(gmu
->dev
,
665 "GMU firmware is bigger than the available region\n");
669 gmu_write_bulk(gmu
, REG_A6XX_GMU_CM3_ITCM_START
,
670 (u32
*) fw_image
->data
, fw_image
->size
);
675 for (blk
= (const struct block_header
*) fw_image
->data
;
676 (const u8
*) blk
< fw_image
->data
+ fw_image
->size
;
677 blk
= (const struct block_header
*) &blk
->data
[blk
->size
>> 2]) {
681 if (in_range(blk
->addr
, itcm_base
, SZ_16K
)) {
682 reg_offset
= (blk
->addr
- itcm_base
) >> 2;
684 REG_A6XX_GMU_CM3_ITCM_START
+ reg_offset
,
685 blk
->data
, blk
->size
);
686 } else if (in_range(blk
->addr
, dtcm_base
, SZ_16K
)) {
687 reg_offset
= (blk
->addr
- dtcm_base
) >> 2;
689 REG_A6XX_GMU_CM3_DTCM_START
+ reg_offset
,
690 blk
->data
, blk
->size
);
691 } else if (!fw_block_mem(&gmu
->icache
, blk
) &&
692 !fw_block_mem(&gmu
->dcache
, blk
) &&
693 !fw_block_mem(&gmu
->dummy
, blk
)) {
694 DRM_DEV_ERROR(gmu
->dev
,
695 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
696 blk
->addr
, blk
->size
, blk
->data
[0]);
703 static int a6xx_gmu_fw_start(struct a6xx_gmu
*gmu
, unsigned int state
)
705 static bool rpmh_init
;
706 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
707 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
711 if (adreno_is_a650(adreno_gpu
))
712 gmu_write(gmu
, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF
, 1);
714 if (state
== GMU_WARM_BOOT
) {
715 ret
= a6xx_rpmh_start(gmu
);
719 if (WARN(!adreno_gpu
->fw
[ADRENO_FW_GMU
],
720 "GMU firmware is not loaded\n"))
723 /* Turn on register retention */
724 gmu_write(gmu
, REG_A6XX_GMU_GENERAL_7
, 1);
726 /* We only need to load the RPMh microcode once */
728 a6xx_gmu_rpmh_init(gmu
);
731 ret
= a6xx_rpmh_start(gmu
);
736 ret
= a6xx_gmu_fw_load(gmu
);
741 gmu_write(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, 0);
742 gmu_write(gmu
, REG_A6XX_GMU_CM3_BOOT_CONFIG
, 0x02);
744 /* Write the iova of the HFI table */
745 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_ADDR
, gmu
->hfi
.iova
);
746 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_INFO
, 1);
748 gmu_write(gmu
, REG_A6XX_GMU_AHB_FENCE_RANGE_0
,
749 (1 << 31) | (0xa << 18) | (0xa0));
751 chipid
= adreno_gpu
->rev
.core
<< 24;
752 chipid
|= adreno_gpu
->rev
.major
<< 16;
753 chipid
|= adreno_gpu
->rev
.minor
<< 12;
754 chipid
|= adreno_gpu
->rev
.patchid
<< 8;
756 gmu_write(gmu
, REG_A6XX_GMU_HFI_SFR_ADDR
, chipid
);
758 gmu_write(gmu
, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG
,
759 gmu
->log
.iova
| (gmu
->log
.size
/ SZ_4K
- 1));
761 /* Set up the lowest idle level on the GMU */
762 a6xx_gmu_power_config(gmu
);
764 ret
= a6xx_gmu_start(gmu
);
769 ret
= a6xx_gmu_gfx_rail_on(gmu
);
774 /* Enable SPTP_PC if the CPU is responsible for it */
775 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
) {
776 ret
= a6xx_sptprac_enable(gmu
);
781 ret
= a6xx_gmu_hfi_start(gmu
);
785 /* FIXME: Do we need this wmb() here? */
791 #define A6XX_HFI_IRQ_MASK \
792 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
794 #define A6XX_GMU_IRQ_MASK \
795 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
796 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
797 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
799 static void a6xx_gmu_irq_disable(struct a6xx_gmu
*gmu
)
801 disable_irq(gmu
->gmu_irq
);
802 disable_irq(gmu
->hfi_irq
);
804 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~0);
805 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~0);
808 static void a6xx_gmu_rpmh_off(struct a6xx_gmu
*gmu
)
812 /* Make sure there are no outstanding RPMh votes */
813 gmu_poll_timeout_rscc(gmu
, REG_A6XX_RSCC_TCS0_DRV0_STATUS
, val
,
814 (val
& 1), 100, 10000);
815 gmu_poll_timeout_rscc(gmu
, REG_A6XX_RSCC_TCS1_DRV0_STATUS
, val
,
816 (val
& 1), 100, 10000);
817 gmu_poll_timeout_rscc(gmu
, REG_A6XX_RSCC_TCS2_DRV0_STATUS
, val
,
818 (val
& 1), 100, 10000);
819 gmu_poll_timeout_rscc(gmu
, REG_A6XX_RSCC_TCS3_DRV0_STATUS
, val
,
820 (val
& 1), 100, 1000);
823 /* Force the GMU off in case it isn't responsive */
824 static void a6xx_gmu_force_off(struct a6xx_gmu
*gmu
)
826 /* Flush all the queues */
829 /* Stop the interrupts */
830 a6xx_gmu_irq_disable(gmu
);
832 /* Force off SPTP in case the GMU is managing it */
833 a6xx_sptprac_disable(gmu
);
835 /* Make sure there are no outstanding RPMh votes */
836 a6xx_gmu_rpmh_off(gmu
);
839 static void a6xx_gmu_set_initial_freq(struct msm_gpu
*gpu
, struct a6xx_gmu
*gmu
)
841 struct dev_pm_opp
*gpu_opp
;
842 unsigned long gpu_freq
= gmu
->gpu_freqs
[gmu
->current_perf_index
];
844 gpu_opp
= dev_pm_opp_find_freq_exact(&gpu
->pdev
->dev
, gpu_freq
, true);
845 if (IS_ERR_OR_NULL(gpu_opp
))
848 a6xx_gmu_set_freq(gpu
, gpu_opp
);
849 dev_pm_opp_put(gpu_opp
);
852 int a6xx_gmu_resume(struct a6xx_gpu
*a6xx_gpu
)
854 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
855 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
856 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
859 if (WARN(!gmu
->initialized
, "The GMU is not set up yet\n"))
864 /* Turn on the resources */
865 pm_runtime_get_sync(gmu
->dev
);
868 * "enable" the GX power domain which won't actually do anything but it
869 * will make sure that the refcounting is correct in case we need to
870 * bring down the GX after a GMU failure
872 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
873 pm_runtime_get_sync(gmu
->gxpd
);
875 /* Use a known rate to bring up the GMU */
876 clk_set_rate(gmu
->core_clk
, 200000000);
877 ret
= clk_bulk_prepare_enable(gmu
->nr_clocks
, gmu
->clocks
);
879 pm_runtime_put(gmu
->gxpd
);
880 pm_runtime_put(gmu
->dev
);
884 /* Set the bus quota to a reasonable value for boot */
885 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(3072));
887 /* Enable the GMU interrupt */
888 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, ~0);
889 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~A6XX_GMU_IRQ_MASK
);
890 enable_irq(gmu
->gmu_irq
);
892 /* Check to see if we are doing a cold or warm boot */
893 status
= gmu_read(gmu
, REG_A6XX_GMU_GENERAL_7
) == 1 ?
894 GMU_WARM_BOOT
: GMU_COLD_BOOT
;
897 * Warm boot path does not work on newer GPUs
898 * Presumably this is because icache/dcache regions must be restored
901 status
= GMU_COLD_BOOT
;
903 ret
= a6xx_gmu_fw_start(gmu
, status
);
907 ret
= a6xx_hfi_start(gmu
, status
);
912 * Turn on the GMU firmware fault interrupt after we know the boot
913 * sequence is successful
915 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, ~0);
916 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~A6XX_HFI_IRQ_MASK
);
917 enable_irq(gmu
->hfi_irq
);
919 /* Set the GPU to the current freq */
920 a6xx_gmu_set_initial_freq(gpu
, gmu
);
923 /* On failure, shut down the GMU to leave it in a good state */
925 disable_irq(gmu
->gmu_irq
);
927 pm_runtime_put(gmu
->gxpd
);
928 pm_runtime_put(gmu
->dev
);
934 bool a6xx_gmu_isidle(struct a6xx_gmu
*gmu
)
938 if (!gmu
->initialized
)
941 reg
= gmu_read(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
);
943 if (reg
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
)
949 #define GBIF_CLIENT_HALT_MASK BIT(0)
950 #define GBIF_ARB_HALT_MASK BIT(1)
952 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu
*adreno_gpu
)
954 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
956 if (!a6xx_has_gbif(adreno_gpu
)) {
957 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0xf);
958 spin_until((gpu_read(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL1
) &
960 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0);
965 /* Halt new client requests on GBIF */
966 gpu_write(gpu
, REG_A6XX_GBIF_HALT
, GBIF_CLIENT_HALT_MASK
);
967 spin_until((gpu_read(gpu
, REG_A6XX_GBIF_HALT_ACK
) &
968 (GBIF_CLIENT_HALT_MASK
)) == GBIF_CLIENT_HALT_MASK
);
970 /* Halt all AXI requests on GBIF */
971 gpu_write(gpu
, REG_A6XX_GBIF_HALT
, GBIF_ARB_HALT_MASK
);
972 spin_until((gpu_read(gpu
, REG_A6XX_GBIF_HALT_ACK
) &
973 (GBIF_ARB_HALT_MASK
)) == GBIF_ARB_HALT_MASK
);
975 /* The GBIF halt needs to be explicitly cleared */
976 gpu_write(gpu
, REG_A6XX_GBIF_HALT
, 0x0);
979 /* Gracefully try to shut down the GMU and by extension the GPU */
980 static void a6xx_gmu_shutdown(struct a6xx_gmu
*gmu
)
982 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
983 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
987 * The GMU may still be in slumber unless the GPU started so check and
988 * skip putting it back into slumber if so
990 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
993 int ret
= a6xx_gmu_wait_for_idle(gmu
);
995 /* If the GMU isn't responding assume it is hung */
997 a6xx_gmu_force_off(gmu
);
1001 a6xx_bus_clear_pending_transactions(adreno_gpu
);
1003 /* tell the GMU we want to slumber */
1004 a6xx_gmu_notify_slumber(gmu
);
1006 ret
= gmu_poll_timeout(gmu
,
1007 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
, val
,
1008 !(val
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
),
1012 * Let the user know we failed to slumber but don't worry too
1013 * much because we are powering down anyway
1017 DRM_DEV_ERROR(gmu
->dev
,
1018 "Unable to slumber GMU: status = 0%x/0%x\n",
1020 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
),
1022 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2
));
1028 /* Stop the interrupts and mask the hardware */
1029 a6xx_gmu_irq_disable(gmu
);
1031 /* Tell RPMh to power off the GPU */
1032 a6xx_rpmh_stop(gmu
);
1036 int a6xx_gmu_stop(struct a6xx_gpu
*a6xx_gpu
)
1038 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1039 struct msm_gpu
*gpu
= &a6xx_gpu
->base
.base
;
1041 if (!pm_runtime_active(gmu
->dev
))
1045 * Force the GMU off if we detected a hang, otherwise try to shut it
1049 a6xx_gmu_force_off(gmu
);
1051 a6xx_gmu_shutdown(gmu
);
1053 /* Remove the bus vote */
1054 icc_set_bw(gpu
->icc_path
, 0, 0);
1057 * Make sure the GX domain is off before turning off the GMU (CX)
1058 * domain. Usually the GMU does this but only if the shutdown sequence
1061 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
1062 pm_runtime_put_sync(gmu
->gxpd
);
1064 clk_bulk_disable_unprepare(gmu
->nr_clocks
, gmu
->clocks
);
1066 pm_runtime_put_sync(gmu
->dev
);
1071 static void a6xx_gmu_memory_free(struct a6xx_gmu
*gmu
)
1073 msm_gem_kernel_put(gmu
->hfi
.obj
, gmu
->aspace
, false);
1074 msm_gem_kernel_put(gmu
->debug
.obj
, gmu
->aspace
, false);
1075 msm_gem_kernel_put(gmu
->icache
.obj
, gmu
->aspace
, false);
1076 msm_gem_kernel_put(gmu
->dcache
.obj
, gmu
->aspace
, false);
1077 msm_gem_kernel_put(gmu
->dummy
.obj
, gmu
->aspace
, false);
1078 msm_gem_kernel_put(gmu
->log
.obj
, gmu
->aspace
, false);
1080 gmu
->aspace
->mmu
->funcs
->detach(gmu
->aspace
->mmu
);
1081 msm_gem_address_space_put(gmu
->aspace
);
1084 static int a6xx_gmu_memory_alloc(struct a6xx_gmu
*gmu
, struct a6xx_gmu_bo
*bo
,
1085 size_t size
, u64 iova
)
1087 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1088 struct drm_device
*dev
= a6xx_gpu
->base
.base
.dev
;
1089 uint32_t flags
= MSM_BO_WC
;
1090 u64 range_start
, range_end
;
1093 size
= PAGE_ALIGN(size
);
1095 /* no fixed address - use GMU's uncached range */
1096 range_start
= 0x60000000 + PAGE_SIZE
; /* skip dummy page */
1097 range_end
= 0x80000000;
1099 /* range for fixed address */
1101 range_end
= iova
+ size
;
1102 /* use IOMMU_PRIV for icache/dcache */
1103 flags
|= MSM_BO_MAP_PRIV
;
1106 bo
->obj
= msm_gem_new(dev
, size
, flags
);
1107 if (IS_ERR(bo
->obj
))
1108 return PTR_ERR(bo
->obj
);
1110 ret
= msm_gem_get_and_pin_iova_range(bo
->obj
, gmu
->aspace
, &bo
->iova
,
1111 range_start
>> PAGE_SHIFT
, range_end
>> PAGE_SHIFT
);
1113 drm_gem_object_put(bo
->obj
);
1117 bo
->virt
= msm_gem_get_vaddr(bo
->obj
);
1123 static int a6xx_gmu_memory_probe(struct a6xx_gmu
*gmu
)
1125 struct iommu_domain
*domain
;
1126 struct msm_mmu
*mmu
;
1128 domain
= iommu_domain_alloc(&platform_bus_type
);
1132 mmu
= msm_iommu_new(gmu
->dev
, domain
);
1133 gmu
->aspace
= msm_gem_address_space_create(mmu
, "gmu", 0x0, 0x80000000);
1134 if (IS_ERR(gmu
->aspace
)) {
1135 iommu_domain_free(domain
);
1136 return PTR_ERR(gmu
->aspace
);
1142 /* Return the 'arc-level' for the given frequency */
1143 static unsigned int a6xx_gmu_get_arc_level(struct device
*dev
,
1146 struct dev_pm_opp
*opp
;
1152 opp
= dev_pm_opp_find_freq_exact(dev
, freq
, true);
1156 val
= dev_pm_opp_get_level(opp
);
1158 dev_pm_opp_put(opp
);
1163 static int a6xx_gmu_rpmh_arc_votes_init(struct device
*dev
, u32
*votes
,
1164 unsigned long *freqs
, int freqs_count
, const char *id
)
1167 const u16
*pri
, *sec
;
1168 size_t pri_count
, sec_count
;
1170 pri
= cmd_db_read_aux_data(id
, &pri_count
);
1172 return PTR_ERR(pri
);
1174 * The data comes back as an array of unsigned shorts so adjust the
1181 sec
= cmd_db_read_aux_data("mx.lvl", &sec_count
);
1183 return PTR_ERR(sec
);
1189 /* Construct a vote for each frequency */
1190 for (i
= 0; i
< freqs_count
; i
++) {
1191 u8 pindex
= 0, sindex
= 0;
1192 unsigned int level
= a6xx_gmu_get_arc_level(dev
, freqs
[i
]);
1194 /* Get the primary index that matches the arc level */
1195 for (j
= 0; j
< pri_count
; j
++) {
1196 if (pri
[j
] >= level
) {
1202 if (j
== pri_count
) {
1204 "Level %u not found in the RPMh list\n",
1206 DRM_DEV_ERROR(dev
, "Available levels:\n");
1207 for (j
= 0; j
< pri_count
; j
++)
1208 DRM_DEV_ERROR(dev
, " %u\n", pri
[j
]);
1214 * Look for a level in in the secondary list that matches. If
1215 * nothing fits, use the maximum non zero vote
1218 for (j
= 0; j
< sec_count
; j
++) {
1219 if (sec
[j
] >= level
) {
1222 } else if (sec
[j
]) {
1227 /* Construct the vote */
1228 votes
[i
] = ((pri
[pindex
] & 0xffff) << 16) |
1229 (sindex
<< 8) | pindex
;
1236 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1237 * to construct the list of votes on the CPU and send it over. Query the RPMh
1238 * voltage levels and build the votes
1241 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu
*gmu
)
1243 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1244 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1245 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1248 /* Build the GX votes */
1249 ret
= a6xx_gmu_rpmh_arc_votes_init(&gpu
->pdev
->dev
, gmu
->gx_arc_votes
,
1250 gmu
->gpu_freqs
, gmu
->nr_gpu_freqs
, "gfx.lvl");
1252 /* Build the CX votes */
1253 ret
|= a6xx_gmu_rpmh_arc_votes_init(gmu
->dev
, gmu
->cx_arc_votes
,
1254 gmu
->gmu_freqs
, gmu
->nr_gmu_freqs
, "cx.lvl");
1259 static int a6xx_gmu_build_freq_table(struct device
*dev
, unsigned long *freqs
,
1262 int count
= dev_pm_opp_get_opp_count(dev
);
1263 struct dev_pm_opp
*opp
;
1265 unsigned long freq
= 1;
1268 * The OPP table doesn't contain the "off" frequency level so we need to
1269 * add 1 to the table size to account for it
1272 if (WARN(count
+ 1 > size
,
1273 "The GMU frequency table is being truncated\n"))
1276 /* Set the "off" frequency */
1279 for (i
= 0; i
< count
; i
++) {
1280 opp
= dev_pm_opp_find_freq_ceil(dev
, &freq
);
1284 dev_pm_opp_put(opp
);
1285 freqs
[index
++] = freq
++;
1291 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu
*gmu
)
1293 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1294 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1295 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1300 * The GMU handles its own frequency switching so build a list of
1301 * available frequencies to send during initialization
1303 ret
= dev_pm_opp_of_add_table(gmu
->dev
);
1305 DRM_DEV_ERROR(gmu
->dev
, "Unable to set the OPP table for the GMU\n");
1309 gmu
->nr_gmu_freqs
= a6xx_gmu_build_freq_table(gmu
->dev
,
1310 gmu
->gmu_freqs
, ARRAY_SIZE(gmu
->gmu_freqs
));
1313 * The GMU also handles GPU frequency switching so build a list
1314 * from the GPU OPP table
1316 gmu
->nr_gpu_freqs
= a6xx_gmu_build_freq_table(&gpu
->pdev
->dev
,
1317 gmu
->gpu_freqs
, ARRAY_SIZE(gmu
->gpu_freqs
));
1319 gmu
->current_perf_index
= gmu
->nr_gpu_freqs
- 1;
1321 /* Build the list of RPMh votes that we'll send to the GMU */
1322 return a6xx_gmu_rpmh_votes_init(gmu
);
1325 static int a6xx_gmu_clocks_probe(struct a6xx_gmu
*gmu
)
1327 int ret
= devm_clk_bulk_get_all(gmu
->dev
, &gmu
->clocks
);
1332 gmu
->nr_clocks
= ret
;
1334 gmu
->core_clk
= msm_clk_bulk_get_clock(gmu
->clocks
,
1335 gmu
->nr_clocks
, "gmu");
1340 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
1344 struct resource
*res
= platform_get_resource_byname(pdev
,
1345 IORESOURCE_MEM
, name
);
1348 DRM_DEV_ERROR(&pdev
->dev
, "Unable to find the %s registers\n", name
);
1349 return ERR_PTR(-EINVAL
);
1352 ret
= ioremap(res
->start
, resource_size(res
));
1354 DRM_DEV_ERROR(&pdev
->dev
, "Unable to map the %s registers\n", name
);
1355 return ERR_PTR(-EINVAL
);
1361 static int a6xx_gmu_get_irq(struct a6xx_gmu
*gmu
, struct platform_device
*pdev
,
1362 const char *name
, irq_handler_t handler
)
1366 irq
= platform_get_irq_byname(pdev
, name
);
1368 ret
= request_irq(irq
, handler
, IRQF_TRIGGER_HIGH
, name
, gmu
);
1370 DRM_DEV_ERROR(&pdev
->dev
, "Unable to get interrupt %s %d\n",
1380 void a6xx_gmu_remove(struct a6xx_gpu
*a6xx_gpu
)
1382 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1383 struct platform_device
*pdev
= to_platform_device(gmu
->dev
);
1385 if (!gmu
->initialized
)
1388 pm_runtime_force_suspend(gmu
->dev
);
1390 if (!IS_ERR_OR_NULL(gmu
->gxpd
)) {
1391 pm_runtime_disable(gmu
->gxpd
);
1392 dev_pm_domain_detach(gmu
->gxpd
, false);
1396 if (platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rscc"))
1401 a6xx_gmu_memory_free(gmu
);
1403 free_irq(gmu
->gmu_irq
, gmu
);
1404 free_irq(gmu
->hfi_irq
, gmu
);
1406 /* Drop reference taken in of_find_device_by_node */
1407 put_device(gmu
->dev
);
1409 gmu
->initialized
= false;
1412 int a6xx_gmu_init(struct a6xx_gpu
*a6xx_gpu
, struct device_node
*node
)
1414 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1415 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1416 struct platform_device
*pdev
= of_find_device_by_node(node
);
1422 gmu
->dev
= &pdev
->dev
;
1424 of_dma_configure(gmu
->dev
, node
, true);
1426 /* Fow now, don't do anything fancy until we get our feet under us */
1427 gmu
->idle_level
= GMU_IDLE_STATE_ACTIVE
;
1429 pm_runtime_enable(gmu
->dev
);
1431 /* Get the list of clocks */
1432 ret
= a6xx_gmu_clocks_probe(gmu
);
1434 goto err_put_device
;
1436 ret
= a6xx_gmu_memory_probe(gmu
);
1438 goto err_put_device
;
1440 /* Allocate memory for the GMU dummy page */
1441 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->dummy
, SZ_4K
, 0x60000000);
1445 if (adreno_is_a650(adreno_gpu
)) {
1446 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->icache
,
1447 SZ_16M
- SZ_16K
, 0x04000);
1450 } else if (adreno_is_a640(adreno_gpu
)) {
1451 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->icache
,
1452 SZ_256K
- SZ_16K
, 0x04000);
1456 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->dcache
,
1457 SZ_256K
- SZ_16K
, 0x44000);
1461 /* HFI v1, has sptprac */
1464 /* Allocate memory for the GMU debug region */
1465 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->debug
, SZ_16K
, 0);
1470 /* Allocate memory for for the HFI queues */
1471 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->hfi
, SZ_16K
, 0);
1475 /* Allocate memory for the GMU log region */
1476 ret
= a6xx_gmu_memory_alloc(gmu
, &gmu
->log
, SZ_4K
, 0);
1480 /* Map the GMU registers */
1481 gmu
->mmio
= a6xx_gmu_get_mmio(pdev
, "gmu");
1482 if (IS_ERR(gmu
->mmio
)) {
1483 ret
= PTR_ERR(gmu
->mmio
);
1487 if (adreno_is_a650(adreno_gpu
)) {
1488 gmu
->rscc
= a6xx_gmu_get_mmio(pdev
, "rscc");
1489 if (IS_ERR(gmu
->rscc
))
1492 gmu
->rscc
= gmu
->mmio
+ 0x23000;
1495 /* Get the HFI and GMU interrupts */
1496 gmu
->hfi_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "hfi", a6xx_hfi_irq
);
1497 gmu
->gmu_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "gmu", a6xx_gmu_irq
);
1499 if (gmu
->hfi_irq
< 0 || gmu
->gmu_irq
< 0)
1503 * Get a link to the GX power domain to reset the GPU in case of GMU
1506 gmu
->gxpd
= dev_pm_domain_attach_by_name(gmu
->dev
, "gx");
1508 /* Get the power levels for the GMU and GPU */
1509 a6xx_gmu_pwrlevels_probe(gmu
);
1511 /* Set up the HFI queues */
1514 gmu
->initialized
= true;
1520 if (platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rscc"))
1522 free_irq(gmu
->gmu_irq
, gmu
);
1523 free_irq(gmu
->hfi_irq
, gmu
);
1528 a6xx_gmu_memory_free(gmu
);
1530 /* Drop reference taken in of_find_device_by_node */
1531 put_device(gmu
->dev
);