1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
11 #include "a6xx_gmu.xml.h"
13 static void a6xx_gmu_fault(struct a6xx_gmu
*gmu
)
15 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
16 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
17 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
18 struct drm_device
*dev
= gpu
->dev
;
19 struct msm_drm_private
*priv
= dev
->dev_private
;
21 /* FIXME: add a banner here */
24 /* Turn off the hangcheck timer while we are resetting */
25 del_timer(&gpu
->hangcheck_timer
);
27 /* Queue the GPU handler because we need to treat this as a recovery */
28 queue_work(priv
->wq
, &gpu
->recover_work
);
31 static irqreturn_t
a6xx_gmu_irq(int irq
, void *data
)
33 struct a6xx_gmu
*gmu
= data
;
36 status
= gmu_read(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS
);
37 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, status
);
39 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE
) {
40 dev_err_ratelimited(gmu
->dev
, "GMU watchdog expired\n");
45 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR
)
46 dev_err_ratelimited(gmu
->dev
, "GMU AHB bus error\n");
48 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR
)
49 dev_err_ratelimited(gmu
->dev
, "GMU fence error: 0x%x\n",
50 gmu_read(gmu
, REG_A6XX_GMU_AHB_FENCE_STATUS
));
55 static irqreturn_t
a6xx_hfi_irq(int irq
, void *data
)
57 struct a6xx_gmu
*gmu
= data
;
60 status
= gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
);
61 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, status
);
63 if (status
& A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT
) {
64 dev_err_ratelimited(gmu
->dev
, "GMU firmware fault\n");
72 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu
*gmu
)
76 /* This can be called from gpu state code so make sure GMU is valid */
77 if (IS_ERR_OR_NULL(gmu
->mmio
))
80 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
83 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF
|
84 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF
));
87 /* Check to see if the GX rail is still powered */
88 bool a6xx_gmu_gx_is_on(struct a6xx_gmu
*gmu
)
92 /* This can be called from gpu state code so make sure GMU is valid */
93 if (IS_ERR_OR_NULL(gmu
->mmio
))
96 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
99 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF
|
100 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF
));
103 static void __a6xx_gmu_set_freq(struct a6xx_gmu
*gmu
, int index
)
105 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
106 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
107 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
110 gmu_write(gmu
, REG_A6XX_GMU_DCVS_ACK_OPTION
, 0);
112 gmu_write(gmu
, REG_A6XX_GMU_DCVS_PERF_SETTING
,
113 ((3 & 0xf) << 28) | index
);
116 * Send an invalid index as a vote for the bus bandwidth and let the
117 * firmware decide on the right vote
119 gmu_write(gmu
, REG_A6XX_GMU_DCVS_BW_SETTING
, 0xff);
121 /* Set and clear the OOB for DCVS to trigger the GMU */
122 a6xx_gmu_set_oob(gmu
, GMU_OOB_DCVS_SET
);
123 a6xx_gmu_clear_oob(gmu
, GMU_OOB_DCVS_SET
);
125 ret
= gmu_read(gmu
, REG_A6XX_GMU_DCVS_RETURN
);
127 dev_err(gmu
->dev
, "GMU set GPU frequency error: %d\n", ret
);
129 gmu
->freq
= gmu
->gpu_freqs
[index
];
132 * Eventually we will want to scale the path vote with the frequency but
133 * for now leave it at max so that the performance is nominal.
135 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(7216));
138 void a6xx_gmu_set_freq(struct msm_gpu
*gpu
, unsigned long freq
)
140 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
141 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
142 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
145 if (freq
== gmu
->freq
)
148 for (perf_index
= 0; perf_index
< gmu
->nr_gpu_freqs
- 1; perf_index
++)
149 if (freq
== gmu
->gpu_freqs
[perf_index
])
152 __a6xx_gmu_set_freq(gmu
, perf_index
);
155 unsigned long a6xx_gmu_get_freq(struct msm_gpu
*gpu
)
157 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
158 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
159 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
164 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu
*gmu
)
167 int local
= gmu
->idle_level
;
169 /* SPTP and IFPC both report as IFPC */
170 if (gmu
->idle_level
== GMU_IDLE_STATE_SPTP
)
171 local
= GMU_IDLE_STATE_IFPC
;
173 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
176 if (gmu
->idle_level
!= GMU_IDLE_STATE_IFPC
||
177 !a6xx_gmu_gx_is_on(gmu
))
184 /* Wait for the GMU to get to its most idle state */
185 int a6xx_gmu_wait_for_idle(struct a6xx_gmu
*gmu
)
187 return spin_until(a6xx_gmu_check_idle_level(gmu
));
190 static int a6xx_gmu_start(struct a6xx_gmu
*gmu
)
195 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 1);
196 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 0);
198 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, val
,
199 val
== 0xbabeface, 100, 10000);
202 DRM_DEV_ERROR(gmu
->dev
, "GMU firmware initialization timed out\n");
207 static int a6xx_gmu_hfi_start(struct a6xx_gmu
*gmu
)
212 gmu_write(gmu
, REG_A6XX_GMU_HFI_CTRL_INIT
, 1);
214 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_HFI_CTRL_STATUS
, val
,
215 val
& 1, 100, 10000);
217 DRM_DEV_ERROR(gmu
->dev
, "Unable to start the HFI queues\n");
222 /* Trigger a OOB (out of band) request to the GMU */
223 int a6xx_gmu_set_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
231 case GMU_OOB_GPU_SET
:
232 request
= GMU_OOB_GPU_SET_REQUEST
;
233 ack
= GMU_OOB_GPU_SET_ACK
;
236 case GMU_OOB_BOOT_SLUMBER
:
237 request
= GMU_OOB_BOOT_SLUMBER_REQUEST
;
238 ack
= GMU_OOB_BOOT_SLUMBER_ACK
;
239 name
= "BOOT_SLUMBER";
241 case GMU_OOB_DCVS_SET
:
242 request
= GMU_OOB_DCVS_REQUEST
;
243 ack
= GMU_OOB_DCVS_ACK
;
250 /* Trigger the equested OOB operation */
251 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
, 1 << request
);
253 /* Wait for the acknowledge interrupt */
254 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
, val
,
255 val
& (1 << ack
), 100, 10000);
258 DRM_DEV_ERROR(gmu
->dev
,
259 "Timeout waiting for GMU OOB set %s: 0x%x\n",
261 gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
));
263 /* Clear the acknowledge interrupt */
264 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, 1 << ack
);
269 /* Clear a pending OOB state in the GMU */
270 void a6xx_gmu_clear_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
273 case GMU_OOB_GPU_SET
:
274 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
275 1 << GMU_OOB_GPU_SET_CLEAR
);
277 case GMU_OOB_BOOT_SLUMBER
:
278 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
279 1 << GMU_OOB_BOOT_SLUMBER_CLEAR
);
281 case GMU_OOB_DCVS_SET
:
282 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
283 1 << GMU_OOB_DCVS_CLEAR
);
288 /* Enable CPU control of SPTP power power collapse */
289 static int a6xx_sptprac_enable(struct a6xx_gmu
*gmu
)
294 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778000);
296 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
297 (val
& 0x38) == 0x28, 1, 100);
300 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on SPTPRAC: 0x%x\n",
301 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
307 /* Disable CPU control of SPTP power power collapse */
308 static void a6xx_sptprac_disable(struct a6xx_gmu
*gmu
)
313 /* Make sure retention is on */
314 gmu_rmw(gmu
, REG_A6XX_GPU_CC_GX_GDSCR
, 0, (1 << 11));
316 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778001);
318 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
319 (val
& 0x04), 100, 10000);
322 DRM_DEV_ERROR(gmu
->dev
, "failed to power off SPTPRAC: 0x%x\n",
323 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
326 /* Let the GMU know we are starting a boot sequence */
327 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu
*gmu
)
331 /* Let the GMU know we are getting ready for boot */
332 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 0);
334 /* Choose the "default" power level as the highest available */
335 vote
= gmu
->gx_arc_votes
[gmu
->nr_gpu_freqs
- 1];
337 gmu_write(gmu
, REG_A6XX_GMU_GX_VOTE_IDX
, vote
& 0xff);
338 gmu_write(gmu
, REG_A6XX_GMU_MX_VOTE_IDX
, (vote
>> 8) & 0xff);
340 /* Let the GMU know the boot sequence has started */
341 return a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
344 /* Let the GMU know that we are about to go into slumber */
345 static int a6xx_gmu_notify_slumber(struct a6xx_gmu
*gmu
)
349 /* Disable the power counter so the GMU isn't busy */
350 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 0);
352 /* Disable SPTP_PC if the CPU is responsible for it */
353 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
)
354 a6xx_sptprac_disable(gmu
);
356 /* Tell the GMU to get ready to slumber */
357 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 1);
359 ret
= a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
360 a6xx_gmu_clear_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
363 /* Check to see if the GMU really did slumber */
364 if (gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
)
366 DRM_DEV_ERROR(gmu
->dev
, "The GMU did not go into slumber\n");
371 /* Put fence into allow mode */
372 gmu_write(gmu
, REG_A6XX_GMU_AO_AHB_FENCE_CTRL
, 0);
376 static int a6xx_rpmh_start(struct a6xx_gmu
*gmu
)
381 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1 << 1);
382 /* Wait for the register to finish posting */
385 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_RSCC_CONTROL_ACK
, val
,
386 val
& (1 << 1), 100, 10000);
388 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on the GPU RSC\n");
392 ret
= gmu_poll_timeout(gmu
, REG_A6XX_RSCC_SEQ_BUSY_DRV0
, val
,
396 DRM_DEV_ERROR(gmu
->dev
, "GPU RSC sequence stuck while waking up the GPU\n");
400 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
402 /* Set up CX GMU counter 0 to count busy ticks */
403 gmu_write(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK
, 0xff000000);
404 gmu_rmw(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0
, 0xff, 0x20);
406 /* Enable the power counter */
407 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 1);
411 static void a6xx_rpmh_stop(struct a6xx_gmu
*gmu
)
416 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1);
418 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
,
419 val
, val
& (1 << 16), 100, 10000);
421 DRM_DEV_ERROR(gmu
->dev
, "Unable to power off the GPU RSC\n");
423 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
426 static inline void pdc_write(void __iomem
*ptr
, u32 offset
, u32 value
)
428 return msm_writel(value
, ptr
+ (offset
<< 2));
431 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
434 static void a6xx_gmu_rpmh_init(struct a6xx_gmu
*gmu
)
436 struct platform_device
*pdev
= to_platform_device(gmu
->dev
);
437 void __iomem
*pdcptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc");
438 void __iomem
*seqptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc_seq");
440 if (!pdcptr
|| !seqptr
)
443 /* Disable SDE clock gating */
444 gmu_write(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
, BIT(24));
446 /* Setup RSC PDC handshake for sleep and wakeup */
447 gmu_write(gmu
, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0
, 1);
448 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
, 0);
449 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
, 0);
450 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 2, 0);
451 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 2, 0);
452 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 4, 0x80000000);
453 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 4, 0);
454 gmu_write(gmu
, REG_A6XX_RSCC_OVERRIDE_START_ADDR
, 0);
455 gmu_write(gmu
, REG_A6XX_RSCC_PDC_SEQ_START_ADDR
, 0x4520);
456 gmu_write(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO
, 0x4510);
457 gmu_write(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI
, 0x4514);
459 /* Load RSC sequencer uCode for sleep and wakeup */
460 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
, 0xa7a506a0);
461 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 1, 0xa1e6a6e7);
462 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 2, 0xa2e081e1);
463 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 3, 0xe9a982e2);
464 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 4, 0x0020e8a8);
466 /* Load PDC sequencer uCode for power up and power down sequence */
467 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
, 0xfebea1e1);
468 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 1, 0xa5a4a3a2);
469 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 2, 0x8382a6e0);
470 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 3, 0xbce3e284);
471 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 4, 0x002081fc);
473 /* Set TCS commands used by PDC sequence for low power modes */
474 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK
, 7);
475 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK
, 0);
476 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CONTROL
, 0);
477 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
, 0x10108);
478 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
, 0x30010);
479 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
, 1);
480 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 4, 0x10108);
481 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 4, 0x30000);
482 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 4, 0x0);
483 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 8, 0x10108);
484 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 8, 0x30080);
485 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 8, 0x0);
486 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK
, 7);
487 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK
, 0);
488 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CONTROL
, 0);
489 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
, 0x10108);
490 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
, 0x30010);
491 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
, 2);
492 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 4, 0x10108);
493 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 4, 0x30000);
494 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 4, 0x3);
495 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 8, 0x10108);
496 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 8, 0x30080);
497 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 8, 0x3);
500 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_SEQ_START_ADDR
, 0);
501 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_ENABLE_PDC
, 0x80000001);
503 /* ensure no writes happen before the uCode is fully written */
507 devm_iounmap(gmu
->dev
, pdcptr
);
508 devm_iounmap(gmu
->dev
, seqptr
);
512 * The lowest 16 bits of this value are the number of XO clock cycles for main
513 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
514 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
517 #define GMU_PWR_COL_HYST 0x000a1680
519 /* Set up the idle state for the GMU */
520 static void a6xx_gmu_power_config(struct a6xx_gmu
*gmu
)
522 /* Disable GMU WB/RB buffer */
523 gmu_write(gmu
, REG_A6XX_GMU_SYS_BUS_CONFIG
, 0x1);
525 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0x9c40400);
527 switch (gmu
->idle_level
) {
528 case GMU_IDLE_STATE_IFPC
:
529 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST
,
531 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
532 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
533 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE
);
535 case GMU_IDLE_STATE_SPTP
:
536 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST
,
538 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
539 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
540 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE
);
543 /* Enable RPMh GPU client */
544 gmu_rmw(gmu
, REG_A6XX_GMU_RPMH_CTRL
, 0,
545 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE
|
546 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE
|
547 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE
|
548 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE
|
549 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE
|
550 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE
);
553 static int a6xx_gmu_fw_start(struct a6xx_gmu
*gmu
, unsigned int state
)
555 static bool rpmh_init
;
556 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
557 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
562 if (state
== GMU_WARM_BOOT
) {
563 ret
= a6xx_rpmh_start(gmu
);
567 if (WARN(!adreno_gpu
->fw
[ADRENO_FW_GMU
],
568 "GMU firmware is not loaded\n"))
571 /* Sanity check the size of the firmware that was loaded */
572 if (adreno_gpu
->fw
[ADRENO_FW_GMU
]->size
> 0x8000) {
573 DRM_DEV_ERROR(gmu
->dev
,
574 "GMU firmware is bigger than the available region\n");
578 /* Turn on register retention */
579 gmu_write(gmu
, REG_A6XX_GMU_GENERAL_7
, 1);
581 /* We only need to load the RPMh microcode once */
583 a6xx_gmu_rpmh_init(gmu
);
586 ret
= a6xx_rpmh_start(gmu
);
591 image
= (u32
*) adreno_gpu
->fw
[ADRENO_FW_GMU
]->data
;
593 for (i
= 0; i
< adreno_gpu
->fw
[ADRENO_FW_GMU
]->size
>> 2; i
++)
594 gmu_write(gmu
, REG_A6XX_GMU_CM3_ITCM_START
+ i
,
598 gmu_write(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, 0);
599 gmu_write(gmu
, REG_A6XX_GMU_CM3_BOOT_CONFIG
, 0x02);
601 /* Write the iova of the HFI table */
602 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_ADDR
, gmu
->hfi
->iova
);
603 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_INFO
, 1);
605 gmu_write(gmu
, REG_A6XX_GMU_AHB_FENCE_RANGE_0
,
606 (1 << 31) | (0xa << 18) | (0xa0));
608 chipid
= adreno_gpu
->rev
.core
<< 24;
609 chipid
|= adreno_gpu
->rev
.major
<< 16;
610 chipid
|= adreno_gpu
->rev
.minor
<< 12;
611 chipid
|= adreno_gpu
->rev
.patchid
<< 8;
613 gmu_write(gmu
, REG_A6XX_GMU_HFI_SFR_ADDR
, chipid
);
615 /* Set up the lowest idle level on the GMU */
616 a6xx_gmu_power_config(gmu
);
618 ret
= a6xx_gmu_start(gmu
);
622 ret
= a6xx_gmu_gfx_rail_on(gmu
);
626 /* Enable SPTP_PC if the CPU is responsible for it */
627 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
) {
628 ret
= a6xx_sptprac_enable(gmu
);
633 ret
= a6xx_gmu_hfi_start(gmu
);
637 /* FIXME: Do we need this wmb() here? */
643 #define A6XX_HFI_IRQ_MASK \
644 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
646 #define A6XX_GMU_IRQ_MASK \
647 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
648 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
649 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
651 static void a6xx_gmu_irq_disable(struct a6xx_gmu
*gmu
)
653 disable_irq(gmu
->gmu_irq
);
654 disable_irq(gmu
->hfi_irq
);
656 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~0);
657 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~0);
660 static void a6xx_gmu_rpmh_off(struct a6xx_gmu
*gmu
)
664 /* Make sure there are no outstanding RPMh votes */
665 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS0_DRV0_STATUS
, val
,
666 (val
& 1), 100, 10000);
667 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS1_DRV0_STATUS
, val
,
668 (val
& 1), 100, 10000);
669 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS2_DRV0_STATUS
, val
,
670 (val
& 1), 100, 10000);
671 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS3_DRV0_STATUS
, val
,
672 (val
& 1), 100, 1000);
675 /* Force the GMU off in case it isn't responsive */
676 static void a6xx_gmu_force_off(struct a6xx_gmu
*gmu
)
678 /* Flush all the queues */
681 /* Stop the interrupts */
682 a6xx_gmu_irq_disable(gmu
);
684 /* Force off SPTP in case the GMU is managing it */
685 a6xx_sptprac_disable(gmu
);
687 /* Make sure there are no outstanding RPMh votes */
688 a6xx_gmu_rpmh_off(gmu
);
691 int a6xx_gmu_resume(struct a6xx_gpu
*a6xx_gpu
)
693 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
694 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
695 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
698 if (WARN(!gmu
->mmio
, "The GMU is not set up yet\n"))
703 /* Turn on the resources */
704 pm_runtime_get_sync(gmu
->dev
);
706 /* Use a known rate to bring up the GMU */
707 clk_set_rate(gmu
->core_clk
, 200000000);
708 ret
= clk_bulk_prepare_enable(gmu
->nr_clocks
, gmu
->clocks
);
710 pm_runtime_put(gmu
->dev
);
714 /* Set the bus quota to a reasonable value for boot */
715 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(3072));
717 /* Enable the GMU interrupt */
718 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, ~0);
719 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~A6XX_GMU_IRQ_MASK
);
720 enable_irq(gmu
->gmu_irq
);
722 /* Check to see if we are doing a cold or warm boot */
723 status
= gmu_read(gmu
, REG_A6XX_GMU_GENERAL_7
) == 1 ?
724 GMU_WARM_BOOT
: GMU_COLD_BOOT
;
726 ret
= a6xx_gmu_fw_start(gmu
, status
);
730 ret
= a6xx_hfi_start(gmu
, status
);
735 * Turn on the GMU firmware fault interrupt after we know the boot
736 * sequence is successful
738 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, ~0);
739 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~A6XX_HFI_IRQ_MASK
);
740 enable_irq(gmu
->hfi_irq
);
742 /* Set the GPU to the highest power frequency */
743 __a6xx_gmu_set_freq(gmu
, gmu
->nr_gpu_freqs
- 1);
746 * "enable" the GX power domain which won't actually do anything but it
747 * will make sure that the refcounting is correct in case we need to
748 * bring down the GX after a GMU failure
750 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
751 pm_runtime_get(gmu
->gxpd
);
754 /* On failure, shut down the GMU to leave it in a good state */
756 disable_irq(gmu
->gmu_irq
);
758 pm_runtime_put(gmu
->dev
);
764 bool a6xx_gmu_isidle(struct a6xx_gmu
*gmu
)
771 reg
= gmu_read(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
);
773 if (reg
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
)
779 /* Gracefully try to shut down the GMU and by extension the GPU */
780 static void a6xx_gmu_shutdown(struct a6xx_gmu
*gmu
)
782 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
783 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
784 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
788 * The GMU may still be in slumber unless the GPU started so check and
789 * skip putting it back into slumber if so
791 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
794 int ret
= a6xx_gmu_wait_for_idle(gmu
);
796 /* If the GMU isn't responding assume it is hung */
798 a6xx_gmu_force_off(gmu
);
802 /* Clear the VBIF pipe before shutting down */
803 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0xf);
804 spin_until((gpu_read(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL1
) & 0xf)
806 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0);
808 /* tell the GMU we want to slumber */
809 a6xx_gmu_notify_slumber(gmu
);
811 ret
= gmu_poll_timeout(gmu
,
812 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
, val
,
813 !(val
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
),
817 * Let the user know we failed to slumber but don't worry too
818 * much because we are powering down anyway
822 DRM_DEV_ERROR(gmu
->dev
,
823 "Unable to slumber GMU: status = 0%x/0%x\n",
825 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
),
827 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2
));
833 /* Stop the interrupts and mask the hardware */
834 a6xx_gmu_irq_disable(gmu
);
836 /* Tell RPMh to power off the GPU */
841 int a6xx_gmu_stop(struct a6xx_gpu
*a6xx_gpu
)
843 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
844 struct msm_gpu
*gpu
= &a6xx_gpu
->base
.base
;
846 if (!pm_runtime_active(gmu
->dev
))
850 * Force the GMU off if we detected a hang, otherwise try to shut it
854 a6xx_gmu_force_off(gmu
);
856 a6xx_gmu_shutdown(gmu
);
858 /* Remove the bus vote */
859 icc_set_bw(gpu
->icc_path
, 0, 0);
862 * Make sure the GX domain is off before turning off the GMU (CX)
863 * domain. Usually the GMU does this but only if the shutdown sequence
866 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
867 pm_runtime_put_sync(gmu
->gxpd
);
869 clk_bulk_disable_unprepare(gmu
->nr_clocks
, gmu
->clocks
);
871 pm_runtime_put_sync(gmu
->dev
);
876 static void a6xx_gmu_memory_free(struct a6xx_gmu
*gmu
, struct a6xx_gmu_bo
*bo
)
881 if (IS_ERR_OR_NULL(bo
))
884 count
= bo
->size
>> PAGE_SHIFT
;
887 for (i
= 0; i
< count
; i
++, iova
+= PAGE_SIZE
) {
888 iommu_unmap(gmu
->domain
, iova
, PAGE_SIZE
);
889 __free_pages(bo
->pages
[i
], 0);
896 static struct a6xx_gmu_bo
*a6xx_gmu_memory_alloc(struct a6xx_gmu
*gmu
,
899 struct a6xx_gmu_bo
*bo
;
902 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
904 return ERR_PTR(-ENOMEM
);
906 bo
->size
= PAGE_ALIGN(size
);
908 count
= bo
->size
>> PAGE_SHIFT
;
910 bo
->pages
= kcalloc(count
, sizeof(struct page
*), GFP_KERNEL
);
913 return ERR_PTR(-ENOMEM
);
916 for (i
= 0; i
< count
; i
++) {
917 bo
->pages
[i
] = alloc_page(GFP_KERNEL
);
922 bo
->iova
= gmu
->uncached_iova_base
;
924 for (i
= 0; i
< count
; i
++) {
925 ret
= iommu_map(gmu
->domain
,
926 bo
->iova
+ (PAGE_SIZE
* i
),
927 page_to_phys(bo
->pages
[i
]), PAGE_SIZE
,
928 IOMMU_READ
| IOMMU_WRITE
);
931 DRM_DEV_ERROR(gmu
->dev
, "Unable to map GMU buffer object\n");
933 for (i
= i
- 1 ; i
>= 0; i
--)
934 iommu_unmap(gmu
->domain
,
935 bo
->iova
+ (PAGE_SIZE
* i
),
942 bo
->virt
= vmap(bo
->pages
, count
, VM_IOREMAP
,
943 pgprot_writecombine(PAGE_KERNEL
));
947 /* Align future IOVA addresses on 1MB boundaries */
948 gmu
->uncached_iova_base
+= ALIGN(size
, SZ_1M
);
953 for (i
= 0; i
< count
; i
++) {
955 __free_pages(bo
->pages
[i
], 0);
961 return ERR_PTR(-ENOMEM
);
964 static int a6xx_gmu_memory_probe(struct a6xx_gmu
*gmu
)
969 * The GMU address space is hardcoded to treat the range
970 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
971 * between the GMU and the CPU will live in this space
973 gmu
->uncached_iova_base
= 0x60000000;
976 gmu
->domain
= iommu_domain_alloc(&platform_bus_type
);
980 ret
= iommu_attach_device(gmu
->domain
, gmu
->dev
);
983 iommu_domain_free(gmu
->domain
);
990 /* Return the 'arc-level' for the given frequency */
991 static unsigned int a6xx_gmu_get_arc_level(struct device
*dev
,
994 struct dev_pm_opp
*opp
;
1000 opp
= dev_pm_opp_find_freq_exact(dev
, freq
, true);
1004 val
= dev_pm_opp_get_level(opp
);
1006 dev_pm_opp_put(opp
);
1011 static int a6xx_gmu_rpmh_arc_votes_init(struct device
*dev
, u32
*votes
,
1012 unsigned long *freqs
, int freqs_count
, const char *id
)
1015 const u16
*pri
, *sec
;
1016 size_t pri_count
, sec_count
;
1018 pri
= cmd_db_read_aux_data(id
, &pri_count
);
1020 return PTR_ERR(pri
);
1022 * The data comes back as an array of unsigned shorts so adjust the
1029 sec
= cmd_db_read_aux_data("mx.lvl", &sec_count
);
1031 return PTR_ERR(sec
);
1037 /* Construct a vote for each frequency */
1038 for (i
= 0; i
< freqs_count
; i
++) {
1039 u8 pindex
= 0, sindex
= 0;
1040 unsigned int level
= a6xx_gmu_get_arc_level(dev
, freqs
[i
]);
1042 /* Get the primary index that matches the arc level */
1043 for (j
= 0; j
< pri_count
; j
++) {
1044 if (pri
[j
] >= level
) {
1050 if (j
== pri_count
) {
1052 "Level %u not found in in the RPMh list\n",
1054 DRM_DEV_ERROR(dev
, "Available levels:\n");
1055 for (j
= 0; j
< pri_count
; j
++)
1056 DRM_DEV_ERROR(dev
, " %u\n", pri
[j
]);
1062 * Look for a level in in the secondary list that matches. If
1063 * nothing fits, use the maximum non zero vote
1066 for (j
= 0; j
< sec_count
; j
++) {
1067 if (sec
[j
] >= level
) {
1070 } else if (sec
[j
]) {
1075 /* Construct the vote */
1076 votes
[i
] = ((pri
[pindex
] & 0xffff) << 16) |
1077 (sindex
<< 8) | pindex
;
1084 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1085 * to construct the list of votes on the CPU and send it over. Query the RPMh
1086 * voltage levels and build the votes
1089 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu
*gmu
)
1091 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1092 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1093 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1096 /* Build the GX votes */
1097 ret
= a6xx_gmu_rpmh_arc_votes_init(&gpu
->pdev
->dev
, gmu
->gx_arc_votes
,
1098 gmu
->gpu_freqs
, gmu
->nr_gpu_freqs
, "gfx.lvl");
1100 /* Build the CX votes */
1101 ret
|= a6xx_gmu_rpmh_arc_votes_init(gmu
->dev
, gmu
->cx_arc_votes
,
1102 gmu
->gmu_freqs
, gmu
->nr_gmu_freqs
, "cx.lvl");
1107 static int a6xx_gmu_build_freq_table(struct device
*dev
, unsigned long *freqs
,
1110 int count
= dev_pm_opp_get_opp_count(dev
);
1111 struct dev_pm_opp
*opp
;
1113 unsigned long freq
= 1;
1116 * The OPP table doesn't contain the "off" frequency level so we need to
1117 * add 1 to the table size to account for it
1120 if (WARN(count
+ 1 > size
,
1121 "The GMU frequency table is being truncated\n"))
1124 /* Set the "off" frequency */
1127 for (i
= 0; i
< count
; i
++) {
1128 opp
= dev_pm_opp_find_freq_ceil(dev
, &freq
);
1132 dev_pm_opp_put(opp
);
1133 freqs
[index
++] = freq
++;
1139 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu
*gmu
)
1141 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1142 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1143 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1148 * The GMU handles its own frequency switching so build a list of
1149 * available frequencies to send during initialization
1151 ret
= dev_pm_opp_of_add_table(gmu
->dev
);
1153 DRM_DEV_ERROR(gmu
->dev
, "Unable to set the OPP table for the GMU\n");
1157 gmu
->nr_gmu_freqs
= a6xx_gmu_build_freq_table(gmu
->dev
,
1158 gmu
->gmu_freqs
, ARRAY_SIZE(gmu
->gmu_freqs
));
1161 * The GMU also handles GPU frequency switching so build a list
1162 * from the GPU OPP table
1164 gmu
->nr_gpu_freqs
= a6xx_gmu_build_freq_table(&gpu
->pdev
->dev
,
1165 gmu
->gpu_freqs
, ARRAY_SIZE(gmu
->gpu_freqs
));
1167 /* Build the list of RPMh votes that we'll send to the GMU */
1168 return a6xx_gmu_rpmh_votes_init(gmu
);
1171 static int a6xx_gmu_clocks_probe(struct a6xx_gmu
*gmu
)
1173 int ret
= msm_clk_bulk_get(gmu
->dev
, &gmu
->clocks
);
1178 gmu
->nr_clocks
= ret
;
1180 gmu
->core_clk
= msm_clk_bulk_get_clock(gmu
->clocks
,
1181 gmu
->nr_clocks
, "gmu");
1186 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
1190 struct resource
*res
= platform_get_resource_byname(pdev
,
1191 IORESOURCE_MEM
, name
);
1194 DRM_DEV_ERROR(&pdev
->dev
, "Unable to find the %s registers\n", name
);
1195 return ERR_PTR(-EINVAL
);
1198 ret
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1200 DRM_DEV_ERROR(&pdev
->dev
, "Unable to map the %s registers\n", name
);
1201 return ERR_PTR(-EINVAL
);
1207 static int a6xx_gmu_get_irq(struct a6xx_gmu
*gmu
, struct platform_device
*pdev
,
1208 const char *name
, irq_handler_t handler
)
1212 irq
= platform_get_irq_byname(pdev
, name
);
1214 ret
= devm_request_irq(&pdev
->dev
, irq
, handler
, IRQF_TRIGGER_HIGH
,
1217 DRM_DEV_ERROR(&pdev
->dev
, "Unable to get interrupt %s\n", name
);
1226 void a6xx_gmu_remove(struct a6xx_gpu
*a6xx_gpu
)
1228 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1230 if (IS_ERR_OR_NULL(gmu
->mmio
))
1233 a6xx_gmu_stop(a6xx_gpu
);
1235 pm_runtime_disable(gmu
->dev
);
1237 if (!IS_ERR_OR_NULL(gmu
->gxpd
)) {
1238 pm_runtime_disable(gmu
->gxpd
);
1239 dev_pm_domain_detach(gmu
->gxpd
, false);
1242 a6xx_gmu_irq_disable(gmu
);
1243 a6xx_gmu_memory_free(gmu
, gmu
->hfi
);
1245 iommu_detach_device(gmu
->domain
, gmu
->dev
);
1247 iommu_domain_free(gmu
->domain
);
1250 int a6xx_gmu_probe(struct a6xx_gpu
*a6xx_gpu
, struct device_node
*node
)
1252 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1253 struct platform_device
*pdev
= of_find_device_by_node(node
);
1259 gmu
->dev
= &pdev
->dev
;
1261 of_dma_configure(gmu
->dev
, node
, true);
1263 /* Fow now, don't do anything fancy until we get our feet under us */
1264 gmu
->idle_level
= GMU_IDLE_STATE_ACTIVE
;
1266 pm_runtime_enable(gmu
->dev
);
1268 /* Get the list of clocks */
1269 ret
= a6xx_gmu_clocks_probe(gmu
);
1273 /* Set up the IOMMU context bank */
1274 ret
= a6xx_gmu_memory_probe(gmu
);
1278 /* Allocate memory for for the HFI queues */
1279 gmu
->hfi
= a6xx_gmu_memory_alloc(gmu
, SZ_16K
);
1280 if (IS_ERR(gmu
->hfi
))
1283 /* Allocate memory for the GMU debug region */
1284 gmu
->debug
= a6xx_gmu_memory_alloc(gmu
, SZ_16K
);
1285 if (IS_ERR(gmu
->debug
))
1288 /* Map the GMU registers */
1289 gmu
->mmio
= a6xx_gmu_get_mmio(pdev
, "gmu");
1290 if (IS_ERR(gmu
->mmio
))
1293 /* Get the HFI and GMU interrupts */
1294 gmu
->hfi_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "hfi", a6xx_hfi_irq
);
1295 gmu
->gmu_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "gmu", a6xx_gmu_irq
);
1297 if (gmu
->hfi_irq
< 0 || gmu
->gmu_irq
< 0)
1301 * Get a link to the GX power domain to reset the GPU in case of GMU
1304 gmu
->gxpd
= dev_pm_domain_attach_by_name(gmu
->dev
, "gx");
1306 /* Get the power levels for the GMU and GPU */
1307 a6xx_gmu_pwrlevels_probe(gmu
);
1309 /* Set up the HFI queues */
1314 a6xx_gmu_memory_free(gmu
, gmu
->hfi
);
1317 iommu_detach_device(gmu
->domain
, gmu
->dev
);
1319 iommu_domain_free(gmu
->domain
);