1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
7 #include "msm_gpu_trace.h"
9 #include "a6xx_gmu.xml.h"
11 #include <linux/bitfield.h>
12 #include <linux/devfreq.h>
13 #include <linux/soc/qcom/llcc-qcom.h>
17 static inline bool _a6xx_check_idle(struct msm_gpu
*gpu
)
19 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
20 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
22 /* Check that the GMU is idle */
23 if (!a6xx_gmu_isidle(&a6xx_gpu
->gmu
))
26 /* Check tha the CX master is idle */
27 if (gpu_read(gpu
, REG_A6XX_RBBM_STATUS
) &
28 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER
)
31 return !(gpu_read(gpu
, REG_A6XX_RBBM_INT_0_STATUS
) &
32 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT
);
35 static bool a6xx_idle(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
)
37 /* wait for CP to drain ringbuffer: */
38 if (!adreno_idle(gpu
, ring
))
41 if (spin_until(_a6xx_check_idle(gpu
))) {
42 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
43 gpu
->name
, __builtin_return_address(0),
44 gpu_read(gpu
, REG_A6XX_RBBM_STATUS
),
45 gpu_read(gpu
, REG_A6XX_RBBM_INT_0_STATUS
),
46 gpu_read(gpu
, REG_A6XX_CP_RB_RPTR
),
47 gpu_read(gpu
, REG_A6XX_CP_RB_WPTR
));
54 static void a6xx_flush(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
)
56 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
57 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
61 /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
62 if (a6xx_gpu
->has_whereami
&& !adreno_gpu
->base
.hw_apriv
) {
63 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
65 OUT_PKT7(ring
, CP_WHERE_AM_I
, 2);
66 OUT_RING(ring
, lower_32_bits(shadowptr(a6xx_gpu
, ring
)));
67 OUT_RING(ring
, upper_32_bits(shadowptr(a6xx_gpu
, ring
)));
70 spin_lock_irqsave(&ring
->preempt_lock
, flags
);
72 /* Copy the shadow to the actual register */
73 ring
->cur
= ring
->next
;
75 /* Make sure to wrap wptr if we need to */
76 wptr
= get_wptr(ring
);
78 spin_unlock_irqrestore(&ring
->preempt_lock
, flags
);
80 /* Make sure everything is posted before making a decision */
83 gpu_write(gpu
, REG_A6XX_CP_RB_WPTR
, wptr
);
86 static void get_stats_counter(struct msm_ringbuffer
*ring
, u32 counter
,
89 OUT_PKT7(ring
, CP_REG_TO_MEM
, 3);
90 OUT_RING(ring
, CP_REG_TO_MEM_0_REG(counter
) |
91 CP_REG_TO_MEM_0_CNT(2) |
93 OUT_RING(ring
, lower_32_bits(iova
));
94 OUT_RING(ring
, upper_32_bits(iova
));
97 static void a6xx_set_pagetable(struct a6xx_gpu
*a6xx_gpu
,
98 struct msm_ringbuffer
*ring
, struct msm_file_private
*ctx
)
102 u64 memptr
= rbmemptr(ring
, ttbr0
);
104 if (ctx
== a6xx_gpu
->cur_ctx
)
107 if (msm_iommu_pagetable_params(ctx
->aspace
->mmu
, &ttbr
, &asid
))
110 /* Execute the table update */
111 OUT_PKT7(ring
, CP_SMMU_TABLE_UPDATE
, 4);
112 OUT_RING(ring
, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr
)));
115 CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr
)) |
116 CP_SMMU_TABLE_UPDATE_1_ASID(asid
));
117 OUT_RING(ring
, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
118 OUT_RING(ring
, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
121 * Write the new TTBR0 to the memstore. This is good for debugging.
123 OUT_PKT7(ring
, CP_MEM_WRITE
, 4);
124 OUT_RING(ring
, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr
)));
125 OUT_RING(ring
, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr
)));
126 OUT_RING(ring
, lower_32_bits(ttbr
));
127 OUT_RING(ring
, (asid
<< 16) | upper_32_bits(ttbr
));
130 * And finally, trigger a uche flush to be sure there isn't anything
131 * lingering in that part of the GPU
134 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
135 OUT_RING(ring
, 0x31);
137 a6xx_gpu
->cur_ctx
= ctx
;
140 static void a6xx_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
)
142 unsigned int index
= submit
->seqno
% MSM_GPU_SUBMIT_STATS_COUNT
;
143 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
144 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
145 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
146 struct msm_ringbuffer
*ring
= submit
->ring
;
149 a6xx_set_pagetable(a6xx_gpu
, ring
, submit
->queue
->ctx
);
151 get_stats_counter(ring
, REG_A6XX_RBBM_PERFCTR_CP_0_LO
,
152 rbmemptr_stats(ring
, index
, cpcycles_start
));
155 * For PM4 the GMU register offsets are calculated from the base of the
156 * GPU registers so we need to add 0x1a800 to the register value on A630
157 * to get the right value from PM4.
159 get_stats_counter(ring
, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L
+ 0x1a800,
160 rbmemptr_stats(ring
, index
, alwayson_start
));
162 /* Invalidate CCU depth and color */
163 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
164 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH
));
166 OUT_PKT7(ring
, CP_EVENT_WRITE
, 1);
167 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR
));
169 /* Submit the commands */
170 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
171 switch (submit
->cmd
[i
].type
) {
172 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
174 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
175 if (priv
->lastctx
== submit
->queue
->ctx
)
178 case MSM_SUBMIT_CMD_BUF
:
179 OUT_PKT7(ring
, CP_INDIRECT_BUFFER_PFE
, 3);
180 OUT_RING(ring
, lower_32_bits(submit
->cmd
[i
].iova
));
181 OUT_RING(ring
, upper_32_bits(submit
->cmd
[i
].iova
));
182 OUT_RING(ring
, submit
->cmd
[i
].size
);
187 get_stats_counter(ring
, REG_A6XX_RBBM_PERFCTR_CP_0_LO
,
188 rbmemptr_stats(ring
, index
, cpcycles_end
));
189 get_stats_counter(ring
, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L
+ 0x1a800,
190 rbmemptr_stats(ring
, index
, alwayson_end
));
192 /* Write the fence to the scratch register */
193 OUT_PKT4(ring
, REG_A6XX_CP_SCRATCH_REG(2), 1);
194 OUT_RING(ring
, submit
->seqno
);
197 * Execute a CACHE_FLUSH_TS event. This will ensure that the
198 * timestamp is written to the memory and then triggers the interrupt
200 OUT_PKT7(ring
, CP_EVENT_WRITE
, 4);
201 OUT_RING(ring
, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS
) |
202 CP_EVENT_WRITE_0_IRQ
);
203 OUT_RING(ring
, lower_32_bits(rbmemptr(ring
, fence
)));
204 OUT_RING(ring
, upper_32_bits(rbmemptr(ring
, fence
)));
205 OUT_RING(ring
, submit
->seqno
);
207 trace_msm_gpu_submit_flush(submit
,
208 gmu_read64(&a6xx_gpu
->gmu
, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L
,
209 REG_A6XX_GMU_ALWAYS_ON_COUNTER_H
));
211 a6xx_flush(gpu
, ring
);
214 const struct adreno_reglist a630_hwcg
[] = {
215 {REG_A6XX_RBBM_CLOCK_CNTL_SP0
, 0x22222222},
216 {REG_A6XX_RBBM_CLOCK_CNTL_SP1
, 0x22222222},
217 {REG_A6XX_RBBM_CLOCK_CNTL_SP2
, 0x22222222},
218 {REG_A6XX_RBBM_CLOCK_CNTL_SP3
, 0x22222222},
219 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0
, 0x02022220},
220 {REG_A6XX_RBBM_CLOCK_CNTL2_SP1
, 0x02022220},
221 {REG_A6XX_RBBM_CLOCK_CNTL2_SP2
, 0x02022220},
222 {REG_A6XX_RBBM_CLOCK_CNTL2_SP3
, 0x02022220},
223 {REG_A6XX_RBBM_CLOCK_DELAY_SP0
, 0x00000080},
224 {REG_A6XX_RBBM_CLOCK_DELAY_SP1
, 0x00000080},
225 {REG_A6XX_RBBM_CLOCK_DELAY_SP2
, 0x00000080},
226 {REG_A6XX_RBBM_CLOCK_DELAY_SP3
, 0x00000080},
227 {REG_A6XX_RBBM_CLOCK_HYST_SP0
, 0x0000f3cf},
228 {REG_A6XX_RBBM_CLOCK_HYST_SP1
, 0x0000f3cf},
229 {REG_A6XX_RBBM_CLOCK_HYST_SP2
, 0x0000f3cf},
230 {REG_A6XX_RBBM_CLOCK_HYST_SP3
, 0x0000f3cf},
231 {REG_A6XX_RBBM_CLOCK_CNTL_TP0
, 0x02222222},
232 {REG_A6XX_RBBM_CLOCK_CNTL_TP1
, 0x02222222},
233 {REG_A6XX_RBBM_CLOCK_CNTL_TP2
, 0x02222222},
234 {REG_A6XX_RBBM_CLOCK_CNTL_TP3
, 0x02222222},
235 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0
, 0x22222222},
236 {REG_A6XX_RBBM_CLOCK_CNTL2_TP1
, 0x22222222},
237 {REG_A6XX_RBBM_CLOCK_CNTL2_TP2
, 0x22222222},
238 {REG_A6XX_RBBM_CLOCK_CNTL2_TP3
, 0x22222222},
239 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0
, 0x22222222},
240 {REG_A6XX_RBBM_CLOCK_CNTL3_TP1
, 0x22222222},
241 {REG_A6XX_RBBM_CLOCK_CNTL3_TP2
, 0x22222222},
242 {REG_A6XX_RBBM_CLOCK_CNTL3_TP3
, 0x22222222},
243 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0
, 0x00022222},
244 {REG_A6XX_RBBM_CLOCK_CNTL4_TP1
, 0x00022222},
245 {REG_A6XX_RBBM_CLOCK_CNTL4_TP2
, 0x00022222},
246 {REG_A6XX_RBBM_CLOCK_CNTL4_TP3
, 0x00022222},
247 {REG_A6XX_RBBM_CLOCK_HYST_TP0
, 0x77777777},
248 {REG_A6XX_RBBM_CLOCK_HYST_TP1
, 0x77777777},
249 {REG_A6XX_RBBM_CLOCK_HYST_TP2
, 0x77777777},
250 {REG_A6XX_RBBM_CLOCK_HYST_TP3
, 0x77777777},
251 {REG_A6XX_RBBM_CLOCK_HYST2_TP0
, 0x77777777},
252 {REG_A6XX_RBBM_CLOCK_HYST2_TP1
, 0x77777777},
253 {REG_A6XX_RBBM_CLOCK_HYST2_TP2
, 0x77777777},
254 {REG_A6XX_RBBM_CLOCK_HYST2_TP3
, 0x77777777},
255 {REG_A6XX_RBBM_CLOCK_HYST3_TP0
, 0x77777777},
256 {REG_A6XX_RBBM_CLOCK_HYST3_TP1
, 0x77777777},
257 {REG_A6XX_RBBM_CLOCK_HYST3_TP2
, 0x77777777},
258 {REG_A6XX_RBBM_CLOCK_HYST3_TP3
, 0x77777777},
259 {REG_A6XX_RBBM_CLOCK_HYST4_TP0
, 0x00077777},
260 {REG_A6XX_RBBM_CLOCK_HYST4_TP1
, 0x00077777},
261 {REG_A6XX_RBBM_CLOCK_HYST4_TP2
, 0x00077777},
262 {REG_A6XX_RBBM_CLOCK_HYST4_TP3
, 0x00077777},
263 {REG_A6XX_RBBM_CLOCK_DELAY_TP0
, 0x11111111},
264 {REG_A6XX_RBBM_CLOCK_DELAY_TP1
, 0x11111111},
265 {REG_A6XX_RBBM_CLOCK_DELAY_TP2
, 0x11111111},
266 {REG_A6XX_RBBM_CLOCK_DELAY_TP3
, 0x11111111},
267 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0
, 0x11111111},
268 {REG_A6XX_RBBM_CLOCK_DELAY2_TP1
, 0x11111111},
269 {REG_A6XX_RBBM_CLOCK_DELAY2_TP2
, 0x11111111},
270 {REG_A6XX_RBBM_CLOCK_DELAY2_TP3
, 0x11111111},
271 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0
, 0x11111111},
272 {REG_A6XX_RBBM_CLOCK_DELAY3_TP1
, 0x11111111},
273 {REG_A6XX_RBBM_CLOCK_DELAY3_TP2
, 0x11111111},
274 {REG_A6XX_RBBM_CLOCK_DELAY3_TP3
, 0x11111111},
275 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0
, 0x00011111},
276 {REG_A6XX_RBBM_CLOCK_DELAY4_TP1
, 0x00011111},
277 {REG_A6XX_RBBM_CLOCK_DELAY4_TP2
, 0x00011111},
278 {REG_A6XX_RBBM_CLOCK_DELAY4_TP3
, 0x00011111},
279 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE
, 0x22222222},
280 {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE
, 0x22222222},
281 {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE
, 0x22222222},
282 {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE
, 0x00222222},
283 {REG_A6XX_RBBM_CLOCK_HYST_UCHE
, 0x00000004},
284 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE
, 0x00000002},
285 {REG_A6XX_RBBM_CLOCK_CNTL_RB0
, 0x22222222},
286 {REG_A6XX_RBBM_CLOCK_CNTL_RB1
, 0x22222222},
287 {REG_A6XX_RBBM_CLOCK_CNTL_RB2
, 0x22222222},
288 {REG_A6XX_RBBM_CLOCK_CNTL_RB3
, 0x22222222},
289 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0
, 0x00002222},
290 {REG_A6XX_RBBM_CLOCK_CNTL2_RB1
, 0x00002222},
291 {REG_A6XX_RBBM_CLOCK_CNTL2_RB2
, 0x00002222},
292 {REG_A6XX_RBBM_CLOCK_CNTL2_RB3
, 0x00002222},
293 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0
, 0x00002220},
294 {REG_A6XX_RBBM_CLOCK_CNTL_CCU1
, 0x00002220},
295 {REG_A6XX_RBBM_CLOCK_CNTL_CCU2
, 0x00002220},
296 {REG_A6XX_RBBM_CLOCK_CNTL_CCU3
, 0x00002220},
297 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0
, 0x00040f00},
298 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1
, 0x00040f00},
299 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2
, 0x00040f00},
300 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3
, 0x00040f00},
301 {REG_A6XX_RBBM_CLOCK_CNTL_RAC
, 0x05022022},
302 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC
, 0x00005555},
303 {REG_A6XX_RBBM_CLOCK_DELAY_RAC
, 0x00000011},
304 {REG_A6XX_RBBM_CLOCK_HYST_RAC
, 0x00445044},
305 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM
, 0x04222222},
306 {REG_A6XX_RBBM_CLOCK_MODE_GPC
, 0x00222222},
307 {REG_A6XX_RBBM_CLOCK_MODE_VFD
, 0x00002222},
308 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM
, 0x00000000},
309 {REG_A6XX_RBBM_CLOCK_HYST_GPC
, 0x04104004},
310 {REG_A6XX_RBBM_CLOCK_HYST_VFD
, 0x00000000},
311 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ
, 0x00000000},
312 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM
, 0x00004000},
313 {REG_A6XX_RBBM_CLOCK_DELAY_GPC
, 0x00000200},
314 {REG_A6XX_RBBM_CLOCK_DELAY_VFD
, 0x00002222},
315 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2
, 0x00000002},
316 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ
, 0x00002222},
317 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX
, 0x00000222},
318 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX
, 0x00000111},
319 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX
, 0x00000555},
323 const struct adreno_reglist a640_hwcg
[] = {
324 {REG_A6XX_RBBM_CLOCK_CNTL_SP0
, 0x02222222},
325 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0
, 0x02222220},
326 {REG_A6XX_RBBM_CLOCK_DELAY_SP0
, 0x00000080},
327 {REG_A6XX_RBBM_CLOCK_HYST_SP0
, 0x0000F3CF},
328 {REG_A6XX_RBBM_CLOCK_CNTL_TP0
, 0x02222222},
329 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0
, 0x22222222},
330 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0
, 0x22222222},
331 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0
, 0x00022222},
332 {REG_A6XX_RBBM_CLOCK_DELAY_TP0
, 0x11111111},
333 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0
, 0x11111111},
334 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0
, 0x11111111},
335 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0
, 0x00011111},
336 {REG_A6XX_RBBM_CLOCK_HYST_TP0
, 0x77777777},
337 {REG_A6XX_RBBM_CLOCK_HYST2_TP0
, 0x77777777},
338 {REG_A6XX_RBBM_CLOCK_HYST3_TP0
, 0x77777777},
339 {REG_A6XX_RBBM_CLOCK_HYST4_TP0
, 0x00077777},
340 {REG_A6XX_RBBM_CLOCK_CNTL_RB0
, 0x22222222},
341 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0
, 0x01002222},
342 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0
, 0x00002220},
343 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0
, 0x00040F00},
344 {REG_A6XX_RBBM_CLOCK_CNTL_RAC
, 0x05222022},
345 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC
, 0x00005555},
346 {REG_A6XX_RBBM_CLOCK_DELAY_RAC
, 0x00000011},
347 {REG_A6XX_RBBM_CLOCK_HYST_RAC
, 0x00445044},
348 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM
, 0x04222222},
349 {REG_A6XX_RBBM_CLOCK_MODE_VFD
, 0x00002222},
350 {REG_A6XX_RBBM_CLOCK_MODE_GPC
, 0x00222222},
351 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2
, 0x00000002},
352 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ
, 0x00002222},
353 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM
, 0x00004000},
354 {REG_A6XX_RBBM_CLOCK_DELAY_VFD
, 0x00002222},
355 {REG_A6XX_RBBM_CLOCK_DELAY_GPC
, 0x00000200},
356 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ
, 0x00000000},
357 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM
, 0x00000000},
358 {REG_A6XX_RBBM_CLOCK_HYST_VFD
, 0x00000000},
359 {REG_A6XX_RBBM_CLOCK_HYST_GPC
, 0x04104004},
360 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ
, 0x00000000},
361 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE
, 0x00000222},
362 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE
, 0x00000111},
363 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE
, 0x00000000},
364 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE
, 0x22222222},
365 {REG_A6XX_RBBM_CLOCK_HYST_UCHE
, 0x00000004},
366 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE
, 0x00000002},
367 {REG_A6XX_RBBM_ISDB_CNT
, 0x00000182},
368 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT
, 0x00000000},
369 {REG_A6XX_RBBM_SP_HYST_CNT
, 0x00000000},
370 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX
, 0x00000222},
371 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX
, 0x00000111},
372 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX
, 0x00000555},
376 const struct adreno_reglist a650_hwcg
[] = {
377 {REG_A6XX_RBBM_CLOCK_CNTL_SP0
, 0x02222222},
378 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0
, 0x02222220},
379 {REG_A6XX_RBBM_CLOCK_DELAY_SP0
, 0x00000080},
380 {REG_A6XX_RBBM_CLOCK_HYST_SP0
, 0x0000F3CF},
381 {REG_A6XX_RBBM_CLOCK_CNTL_TP0
, 0x02222222},
382 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0
, 0x22222222},
383 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0
, 0x22222222},
384 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0
, 0x00022222},
385 {REG_A6XX_RBBM_CLOCK_DELAY_TP0
, 0x11111111},
386 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0
, 0x11111111},
387 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0
, 0x11111111},
388 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0
, 0x00011111},
389 {REG_A6XX_RBBM_CLOCK_HYST_TP0
, 0x77777777},
390 {REG_A6XX_RBBM_CLOCK_HYST2_TP0
, 0x77777777},
391 {REG_A6XX_RBBM_CLOCK_HYST3_TP0
, 0x77777777},
392 {REG_A6XX_RBBM_CLOCK_HYST4_TP0
, 0x00077777},
393 {REG_A6XX_RBBM_CLOCK_CNTL_RB0
, 0x22222222},
394 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0
, 0x01002222},
395 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0
, 0x00002220},
396 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0
, 0x00040F00},
397 {REG_A6XX_RBBM_CLOCK_CNTL_RAC
, 0x25222022},
398 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC
, 0x00005555},
399 {REG_A6XX_RBBM_CLOCK_DELAY_RAC
, 0x00000011},
400 {REG_A6XX_RBBM_CLOCK_HYST_RAC
, 0x00445044},
401 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM
, 0x04222222},
402 {REG_A6XX_RBBM_CLOCK_MODE_VFD
, 0x00002222},
403 {REG_A6XX_RBBM_CLOCK_MODE_GPC
, 0x00222222},
404 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2
, 0x00000002},
405 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ
, 0x00002222},
406 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM
, 0x00004000},
407 {REG_A6XX_RBBM_CLOCK_DELAY_VFD
, 0x00002222},
408 {REG_A6XX_RBBM_CLOCK_DELAY_GPC
, 0x00000200},
409 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ
, 0x00000000},
410 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM
, 0x00000000},
411 {REG_A6XX_RBBM_CLOCK_HYST_VFD
, 0x00000000},
412 {REG_A6XX_RBBM_CLOCK_HYST_GPC
, 0x04104004},
413 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ
, 0x00000000},
414 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE
, 0x00000222},
415 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE
, 0x00000111},
416 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE
, 0x00000777},
417 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE
, 0x22222222},
418 {REG_A6XX_RBBM_CLOCK_HYST_UCHE
, 0x00000004},
419 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE
, 0x00000002},
420 {REG_A6XX_RBBM_ISDB_CNT
, 0x00000182},
421 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT
, 0x00000000},
422 {REG_A6XX_RBBM_SP_HYST_CNT
, 0x00000000},
423 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX
, 0x00000222},
424 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX
, 0x00000111},
425 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX
, 0x00000555},
429 static void a6xx_set_hwcg(struct msm_gpu
*gpu
, bool state
)
431 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
432 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
433 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
434 const struct adreno_reglist
*reg
;
436 u32 val
, clock_cntl_on
;
438 if (!adreno_gpu
->info
->hwcg
)
441 if (adreno_is_a630(adreno_gpu
))
442 clock_cntl_on
= 0x8aa8aa02;
444 clock_cntl_on
= 0x8aa8aa82;
446 val
= gpu_read(gpu
, REG_A6XX_RBBM_CLOCK_CNTL
);
448 /* Don't re-program the registers if they are already correct */
449 if ((!state
&& !val
) || (state
&& (val
== clock_cntl_on
)))
452 /* Disable SP clock before programming HWCG registers */
453 gmu_rmw(gmu
, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL
, 1, 0);
455 for (i
= 0; (reg
= &adreno_gpu
->info
->hwcg
[i
], reg
->offset
); i
++)
456 gpu_write(gpu
, reg
->offset
, state
? reg
->value
: 0);
458 /* Enable SP clock */
459 gmu_rmw(gmu
, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL
, 0, 1);
461 gpu_write(gpu
, REG_A6XX_RBBM_CLOCK_CNTL
, state
? clock_cntl_on
: 0);
464 static void a6xx_set_ubwc_config(struct msm_gpu
*gpu
)
466 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
469 u32 rgb565_predicator
= 0;
470 u32 uavflagprd_inv
= 0;
472 /* a618 is using the hw default values */
473 if (adreno_is_a618(adreno_gpu
))
476 if (adreno_is_a640(adreno_gpu
))
479 if (adreno_is_a650(adreno_gpu
)) {
480 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
483 rgb565_predicator
= 1;
487 gpu_write(gpu
, REG_A6XX_RB_NC_MODE_CNTL
,
488 rgb565_predicator
<< 11 | amsbc
<< 4 | lower_bit
<< 1);
489 gpu_write(gpu
, REG_A6XX_TPL1_NC_MODE_CNTL
, lower_bit
<< 1);
490 gpu_write(gpu
, REG_A6XX_SP_NC_MODE_CNTL
,
491 uavflagprd_inv
<< 4 | lower_bit
<< 1);
492 gpu_write(gpu
, REG_A6XX_UCHE_MODE_CNTL
, lower_bit
<< 21);
495 static int a6xx_cp_init(struct msm_gpu
*gpu
)
497 struct msm_ringbuffer
*ring
= gpu
->rb
[0];
499 OUT_PKT7(ring
, CP_ME_INIT
, 8);
501 OUT_RING(ring
, 0x0000002f);
503 /* Enable multiple hardware contexts */
504 OUT_RING(ring
, 0x00000003);
506 /* Enable error detection */
507 OUT_RING(ring
, 0x20000000);
509 /* Don't enable header dump */
510 OUT_RING(ring
, 0x00000000);
511 OUT_RING(ring
, 0x00000000);
513 /* No workarounds enabled */
514 OUT_RING(ring
, 0x00000000);
516 /* Pad rest of the cmds with 0's */
517 OUT_RING(ring
, 0x00000000);
518 OUT_RING(ring
, 0x00000000);
520 a6xx_flush(gpu
, ring
);
521 return a6xx_idle(gpu
, ring
) ? 0 : -EINVAL
;
525 * Check that the microcode version is new enough to include several key
526 * security fixes. Return true if the ucode is safe.
528 static bool a6xx_ucode_check_version(struct a6xx_gpu
*a6xx_gpu
,
529 struct drm_gem_object
*obj
)
531 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
532 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
533 u32
*buf
= msm_gem_get_vaddr(obj
);
540 * Targets up to a640 (a618, a630 and a640) need to check for a
541 * microcode version that is patched to support the whereami opcode or
542 * one that is new enough to include it by default.
544 if (adreno_is_a618(adreno_gpu
) || adreno_is_a630(adreno_gpu
) ||
545 adreno_is_a640(adreno_gpu
)) {
547 * If the lowest nibble is 0xa that is an indication that this
548 * microcode has been patched. The actual version is in dword
549 * [3] but we only care about the patchlevel which is the lowest
550 * nibble of dword [3]
552 * Otherwise check that the firmware is greater than or equal
553 * to 1.90 which was the first version that had this fix built
556 if ((((buf
[0] & 0xf) == 0xa) && (buf
[2] & 0xf) >= 1) ||
557 (buf
[0] & 0xfff) >= 0x190) {
558 a6xx_gpu
->has_whereami
= true;
563 DRM_DEV_ERROR(&gpu
->pdev
->dev
,
564 "a630 SQE ucode is too old. Have version %x need at least %x\n",
565 buf
[0] & 0xfff, 0x190);
568 * a650 tier targets don't need whereami but still need to be
569 * equal to or newer than 0.95 for other security fixes
571 if (adreno_is_a650(adreno_gpu
)) {
572 if ((buf
[0] & 0xfff) >= 0x095) {
577 DRM_DEV_ERROR(&gpu
->pdev
->dev
,
578 "a650 SQE ucode is too old. Have version %x need at least %x\n",
579 buf
[0] & 0xfff, 0x095);
583 * When a660 is added those targets should return true here
584 * since those have all the critical security fixes built in
589 msm_gem_put_vaddr(obj
);
593 static int a6xx_ucode_init(struct msm_gpu
*gpu
)
595 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
596 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
598 if (!a6xx_gpu
->sqe_bo
) {
599 a6xx_gpu
->sqe_bo
= adreno_fw_create_bo(gpu
,
600 adreno_gpu
->fw
[ADRENO_FW_SQE
], &a6xx_gpu
->sqe_iova
);
602 if (IS_ERR(a6xx_gpu
->sqe_bo
)) {
603 int ret
= PTR_ERR(a6xx_gpu
->sqe_bo
);
605 a6xx_gpu
->sqe_bo
= NULL
;
606 DRM_DEV_ERROR(&gpu
->pdev
->dev
,
607 "Could not allocate SQE ucode: %d\n", ret
);
612 msm_gem_object_set_name(a6xx_gpu
->sqe_bo
, "sqefw");
613 if (!a6xx_ucode_check_version(a6xx_gpu
, a6xx_gpu
->sqe_bo
)) {
614 msm_gem_unpin_iova(a6xx_gpu
->sqe_bo
, gpu
->aspace
);
615 drm_gem_object_put(a6xx_gpu
->sqe_bo
);
617 a6xx_gpu
->sqe_bo
= NULL
;
622 gpu_write64(gpu
, REG_A6XX_CP_SQE_INSTR_BASE_LO
,
623 REG_A6XX_CP_SQE_INSTR_BASE_HI
, a6xx_gpu
->sqe_iova
);
628 static int a6xx_zap_shader_init(struct msm_gpu
*gpu
)
636 ret
= adreno_zap_shader_load(gpu
, GPU_PAS_ID
);
642 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
643 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
644 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
645 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
646 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
647 A6XX_RBBM_INT_0_MASK_CP_RB | \
648 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
649 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
650 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
651 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
652 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
654 static int a6xx_hw_init(struct msm_gpu
*gpu
)
656 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
657 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
660 /* Make sure the GMU keeps the GPU on while we set it up */
661 a6xx_gmu_set_oob(&a6xx_gpu
->gmu
, GMU_OOB_GPU_SET
);
663 gpu_write(gpu
, REG_A6XX_RBBM_SECVID_TSB_CNTL
, 0);
666 * Disable the trusted memory range - we don't actually supported secure
667 * memory rendering at this point in time and we don't want to block off
668 * part of the virtual memory space.
670 gpu_write64(gpu
, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO
,
671 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI
, 0x00000000);
672 gpu_write(gpu
, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE
, 0x00000000);
674 /* Turn on 64 bit addressing for all blocks */
675 gpu_write(gpu
, REG_A6XX_CP_ADDR_MODE_CNTL
, 0x1);
676 gpu_write(gpu
, REG_A6XX_VSC_ADDR_MODE_CNTL
, 0x1);
677 gpu_write(gpu
, REG_A6XX_GRAS_ADDR_MODE_CNTL
, 0x1);
678 gpu_write(gpu
, REG_A6XX_RB_ADDR_MODE_CNTL
, 0x1);
679 gpu_write(gpu
, REG_A6XX_PC_ADDR_MODE_CNTL
, 0x1);
680 gpu_write(gpu
, REG_A6XX_HLSQ_ADDR_MODE_CNTL
, 0x1);
681 gpu_write(gpu
, REG_A6XX_VFD_ADDR_MODE_CNTL
, 0x1);
682 gpu_write(gpu
, REG_A6XX_VPC_ADDR_MODE_CNTL
, 0x1);
683 gpu_write(gpu
, REG_A6XX_UCHE_ADDR_MODE_CNTL
, 0x1);
684 gpu_write(gpu
, REG_A6XX_SP_ADDR_MODE_CNTL
, 0x1);
685 gpu_write(gpu
, REG_A6XX_TPL1_ADDR_MODE_CNTL
, 0x1);
686 gpu_write(gpu
, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL
, 0x1);
688 /* enable hardware clockgating */
689 a6xx_set_hwcg(gpu
, true);
692 if (adreno_is_a640(adreno_gpu
) || adreno_is_a650(adreno_gpu
)) {
693 gpu_write(gpu
, REG_A6XX_GBIF_QSB_SIDE0
, 0x00071620);
694 gpu_write(gpu
, REG_A6XX_GBIF_QSB_SIDE1
, 0x00071620);
695 gpu_write(gpu
, REG_A6XX_GBIF_QSB_SIDE2
, 0x00071620);
696 gpu_write(gpu
, REG_A6XX_GBIF_QSB_SIDE3
, 0x00071620);
697 gpu_write(gpu
, REG_A6XX_GBIF_QSB_SIDE3
, 0x00071620);
698 gpu_write(gpu
, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL
, 0x3);
700 gpu_write(gpu
, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL
, 0x3);
703 if (adreno_is_a630(adreno_gpu
))
704 gpu_write(gpu
, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN
, 0x00000009);
706 /* Make all blocks contribute to the GPU BUSY perf counter */
707 gpu_write(gpu
, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED
, 0xffffffff);
709 /* Disable L2 bypass in the UCHE */
710 gpu_write(gpu
, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO
, 0xffffffc0);
711 gpu_write(gpu
, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI
, 0x0001ffff);
712 gpu_write(gpu
, REG_A6XX_UCHE_TRAP_BASE_LO
, 0xfffff000);
713 gpu_write(gpu
, REG_A6XX_UCHE_TRAP_BASE_HI
, 0x0001ffff);
714 gpu_write(gpu
, REG_A6XX_UCHE_WRITE_THRU_BASE_LO
, 0xfffff000);
715 gpu_write(gpu
, REG_A6XX_UCHE_WRITE_THRU_BASE_HI
, 0x0001ffff);
717 if (!adreno_is_a650(adreno_gpu
)) {
718 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
719 gpu_write64(gpu
, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO
,
720 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI
, 0x00100000);
722 gpu_write64(gpu
, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO
,
723 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI
,
724 0x00100000 + adreno_gpu
->gmem
- 1);
727 gpu_write(gpu
, REG_A6XX_UCHE_FILTER_CNTL
, 0x804);
728 gpu_write(gpu
, REG_A6XX_UCHE_CACHE_WAYS
, 0x4);
730 if (adreno_is_a640(adreno_gpu
) || adreno_is_a650(adreno_gpu
))
731 gpu_write(gpu
, REG_A6XX_CP_ROQ_THRESHOLDS_2
, 0x02000140);
733 gpu_write(gpu
, REG_A6XX_CP_ROQ_THRESHOLDS_2
, 0x010000c0);
734 gpu_write(gpu
, REG_A6XX_CP_ROQ_THRESHOLDS_1
, 0x8040362c);
736 /* Setting the mem pool size */
737 gpu_write(gpu
, REG_A6XX_CP_MEM_POOL_SIZE
, 128);
739 /* Setting the primFifo thresholds default values */
740 if (adreno_is_a650(adreno_gpu
))
741 gpu_write(gpu
, REG_A6XX_PC_DBG_ECO_CNTL
, 0x00300000);
742 else if (adreno_is_a640(adreno_gpu
))
743 gpu_write(gpu
, REG_A6XX_PC_DBG_ECO_CNTL
, 0x00200000);
745 gpu_write(gpu
, REG_A6XX_PC_DBG_ECO_CNTL
, (0x300 << 11));
747 /* Set the AHB default slave response to "ERROR" */
748 gpu_write(gpu
, REG_A6XX_CP_AHB_CNTL
, 0x1);
750 /* Turn on performance counters */
751 gpu_write(gpu
, REG_A6XX_RBBM_PERFCTR_CNTL
, 0x1);
753 /* Select CP0 to always count cycles */
754 gpu_write(gpu
, REG_A6XX_CP_PERFCTR_CP_SEL_0
, PERF_CP_ALWAYS_COUNT
);
756 a6xx_set_ubwc_config(gpu
);
758 /* Enable fault detection */
759 gpu_write(gpu
, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL
,
760 (1 << 30) | 0x1fffff);
762 gpu_write(gpu
, REG_A6XX_UCHE_CLIENT_PF
, 1);
764 /* Set weights for bicubic filtering */
765 if (adreno_is_a650(adreno_gpu
)) {
766 gpu_write(gpu
, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0
, 0);
767 gpu_write(gpu
, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1
,
769 gpu_write(gpu
, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2
,
771 gpu_write(gpu
, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3
,
773 gpu_write(gpu
, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4
,
777 /* Protect registers from the CP */
778 gpu_write(gpu
, REG_A6XX_CP_PROTECT_CNTL
, 0x00000003);
780 gpu_write(gpu
, REG_A6XX_CP_PROTECT(0),
781 A6XX_PROTECT_RDONLY(0x600, 0x51));
782 gpu_write(gpu
, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
783 gpu_write(gpu
, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
784 gpu_write(gpu
, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
785 gpu_write(gpu
, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
786 gpu_write(gpu
, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
787 gpu_write(gpu
, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
788 gpu_write(gpu
, REG_A6XX_CP_PROTECT(7),
789 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
790 gpu_write(gpu
, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
791 gpu_write(gpu
, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
792 gpu_write(gpu
, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
793 gpu_write(gpu
, REG_A6XX_CP_PROTECT(11),
794 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
795 gpu_write(gpu
, REG_A6XX_CP_PROTECT(12),
796 A6XX_PROTECT_RDONLY(0x501, 0xa));
797 gpu_write(gpu
, REG_A6XX_CP_PROTECT(13),
798 A6XX_PROTECT_RDONLY(0x511, 0x44));
799 gpu_write(gpu
, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
800 gpu_write(gpu
, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
801 gpu_write(gpu
, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
802 gpu_write(gpu
, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
803 gpu_write(gpu
, REG_A6XX_CP_PROTECT(18),
804 A6XX_PROTECT_RW(0xbe20, 0x11f3));
805 gpu_write(gpu
, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
806 gpu_write(gpu
, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
807 gpu_write(gpu
, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
808 gpu_write(gpu
, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
809 gpu_write(gpu
, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
810 gpu_write(gpu
, REG_A6XX_CP_PROTECT(24),
811 A6XX_PROTECT_RDONLY(0x980, 0x4));
812 gpu_write(gpu
, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
814 /* Enable expanded apriv for targets that support it */
816 gpu_write(gpu
, REG_A6XX_CP_APRIV_CNTL
,
817 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
820 /* Enable interrupts */
821 gpu_write(gpu
, REG_A6XX_RBBM_INT_0_MASK
, A6XX_INT_MASK
);
823 ret
= adreno_hw_init(gpu
);
827 ret
= a6xx_ucode_init(gpu
);
831 /* Set the ringbuffer address */
832 gpu_write64(gpu
, REG_A6XX_CP_RB_BASE
, REG_A6XX_CP_RB_BASE_HI
,
835 /* Targets that support extended APRIV can use the RPTR shadow from
836 * hardware but all the other ones need to disable the feature. Targets
837 * that support the WHERE_AM_I opcode can use that instead
839 if (adreno_gpu
->base
.hw_apriv
)
840 gpu_write(gpu
, REG_A6XX_CP_RB_CNTL
, MSM_GPU_RB_CNTL_DEFAULT
);
842 gpu_write(gpu
, REG_A6XX_CP_RB_CNTL
,
843 MSM_GPU_RB_CNTL_DEFAULT
| AXXX_CP_RB_CNTL_NO_UPDATE
);
846 * Expanded APRIV and targets that support WHERE_AM_I both need a
847 * privileged buffer to store the RPTR shadow
850 if (adreno_gpu
->base
.hw_apriv
|| a6xx_gpu
->has_whereami
) {
851 if (!a6xx_gpu
->shadow_bo
) {
852 a6xx_gpu
->shadow
= msm_gem_kernel_new_locked(gpu
->dev
,
853 sizeof(u32
) * gpu
->nr_rings
,
854 MSM_BO_UNCACHED
| MSM_BO_MAP_PRIV
,
855 gpu
->aspace
, &a6xx_gpu
->shadow_bo
,
856 &a6xx_gpu
->shadow_iova
);
858 if (IS_ERR(a6xx_gpu
->shadow
))
859 return PTR_ERR(a6xx_gpu
->shadow
);
862 gpu_write64(gpu
, REG_A6XX_CP_RB_RPTR_ADDR_LO
,
863 REG_A6XX_CP_RB_RPTR_ADDR_HI
,
864 shadowptr(a6xx_gpu
, gpu
->rb
[0]));
867 /* Always come up on rb 0 */
868 a6xx_gpu
->cur_ring
= gpu
->rb
[0];
870 a6xx_gpu
->cur_ctx
= NULL
;
872 /* Enable the SQE_to start the CP engine */
873 gpu_write(gpu
, REG_A6XX_CP_SQE_CNTL
, 1);
875 ret
= a6xx_cp_init(gpu
);
880 * Try to load a zap shader into the secure world. If successful
881 * we can use the CP to switch out of secure mode. If not then we
882 * have no resource but to try to switch ourselves out manually. If we
883 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
884 * be blocked and a permissions violation will soon follow.
886 ret
= a6xx_zap_shader_init(gpu
);
888 OUT_PKT7(gpu
->rb
[0], CP_SET_SECURE_MODE
, 1);
889 OUT_RING(gpu
->rb
[0], 0x00000000);
891 a6xx_flush(gpu
, gpu
->rb
[0]);
892 if (!a6xx_idle(gpu
, gpu
->rb
[0]))
894 } else if (ret
== -ENODEV
) {
896 * This device does not use zap shader (but print a warning
897 * just in case someone got their dt wrong.. hopefully they
898 * have a debug UART to realize the error of their ways...
899 * if you mess this up you are about to crash horribly)
901 dev_warn_once(gpu
->dev
->dev
,
902 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
903 gpu_write(gpu
, REG_A6XX_RBBM_SECVID_TRUST_CNTL
, 0x0);
911 * Tell the GMU that we are done touching the GPU and it can start power
914 a6xx_gmu_clear_oob(&a6xx_gpu
->gmu
, GMU_OOB_GPU_SET
);
916 if (a6xx_gpu
->gmu
.legacy
) {
917 /* Take the GMU out of its special boot mode */
918 a6xx_gmu_clear_oob(&a6xx_gpu
->gmu
, GMU_OOB_BOOT_SLUMBER
);
924 static void a6xx_dump(struct msm_gpu
*gpu
)
926 DRM_DEV_INFO(&gpu
->pdev
->dev
, "status: %08x\n",
927 gpu_read(gpu
, REG_A6XX_RBBM_STATUS
));
931 #define VBIF_RESET_ACK_TIMEOUT 100
932 #define VBIF_RESET_ACK_MASK 0x00f0
934 static void a6xx_recover(struct msm_gpu
*gpu
)
936 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
937 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
940 adreno_dump_info(gpu
);
942 for (i
= 0; i
< 8; i
++)
943 DRM_DEV_INFO(&gpu
->pdev
->dev
, "CP_SCRATCH_REG%d: %u\n", i
,
944 gpu_read(gpu
, REG_A6XX_CP_SCRATCH_REG(i
)));
950 * Turn off keep alive that might have been enabled by the hang
953 gmu_write(&a6xx_gpu
->gmu
, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE
, 0);
955 gpu
->funcs
->pm_suspend(gpu
);
956 gpu
->funcs
->pm_resume(gpu
);
958 msm_gpu_hw_init(gpu
);
961 static int a6xx_fault_handler(void *arg
, unsigned long iova
, int flags
)
963 struct msm_gpu
*gpu
= arg
;
965 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
967 gpu_read(gpu
, REG_A6XX_CP_SCRATCH_REG(4)),
968 gpu_read(gpu
, REG_A6XX_CP_SCRATCH_REG(5)),
969 gpu_read(gpu
, REG_A6XX_CP_SCRATCH_REG(6)),
970 gpu_read(gpu
, REG_A6XX_CP_SCRATCH_REG(7)));
975 static void a6xx_cp_hw_err_irq(struct msm_gpu
*gpu
)
977 u32 status
= gpu_read(gpu
, REG_A6XX_CP_INTERRUPT_STATUS
);
979 if (status
& A6XX_CP_INT_CP_OPCODE_ERROR
) {
982 gpu_write(gpu
, REG_A6XX_CP_SQE_STAT_ADDR
, 1);
983 val
= gpu_read(gpu
, REG_A6XX_CP_SQE_STAT_DATA
);
984 dev_err_ratelimited(&gpu
->pdev
->dev
,
985 "CP | opcode error | possible opcode=0x%8.8X\n",
989 if (status
& A6XX_CP_INT_CP_UCODE_ERROR
)
990 dev_err_ratelimited(&gpu
->pdev
->dev
,
991 "CP ucode error interrupt\n");
993 if (status
& A6XX_CP_INT_CP_HW_FAULT_ERROR
)
994 dev_err_ratelimited(&gpu
->pdev
->dev
, "CP | HW fault | status=0x%8.8X\n",
995 gpu_read(gpu
, REG_A6XX_CP_HW_FAULT
));
997 if (status
& A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR
) {
998 u32 val
= gpu_read(gpu
, REG_A6XX_CP_PROTECT_STATUS
);
1000 dev_err_ratelimited(&gpu
->pdev
->dev
,
1001 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1002 val
& (1 << 20) ? "READ" : "WRITE",
1003 (val
& 0x3ffff), val
);
1006 if (status
& A6XX_CP_INT_CP_AHB_ERROR
)
1007 dev_err_ratelimited(&gpu
->pdev
->dev
, "CP AHB error interrupt\n");
1009 if (status
& A6XX_CP_INT_CP_VSD_PARITY_ERROR
)
1010 dev_err_ratelimited(&gpu
->pdev
->dev
, "CP VSD decoder parity error\n");
1012 if (status
& A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR
)
1013 dev_err_ratelimited(&gpu
->pdev
->dev
, "CP illegal instruction error\n");
1017 static void a6xx_fault_detect_irq(struct msm_gpu
*gpu
)
1019 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1020 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1021 struct msm_ringbuffer
*ring
= gpu
->funcs
->active_ring(gpu
);
1024 * Force the GPU to stay on until after we finish
1025 * collecting information
1027 gmu_write(&a6xx_gpu
->gmu
, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE
, 1);
1029 DRM_DEV_ERROR(&gpu
->pdev
->dev
,
1030 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1031 ring
? ring
->id
: -1, ring
? ring
->seqno
: 0,
1032 gpu_read(gpu
, REG_A6XX_RBBM_STATUS
),
1033 gpu_read(gpu
, REG_A6XX_CP_RB_RPTR
),
1034 gpu_read(gpu
, REG_A6XX_CP_RB_WPTR
),
1035 gpu_read64(gpu
, REG_A6XX_CP_IB1_BASE
, REG_A6XX_CP_IB1_BASE_HI
),
1036 gpu_read(gpu
, REG_A6XX_CP_IB1_REM_SIZE
),
1037 gpu_read64(gpu
, REG_A6XX_CP_IB2_BASE
, REG_A6XX_CP_IB2_BASE_HI
),
1038 gpu_read(gpu
, REG_A6XX_CP_IB2_REM_SIZE
));
1040 /* Turn off the hangcheck timer to keep it from bothering us */
1041 del_timer(&gpu
->hangcheck_timer
);
1043 kthread_queue_work(gpu
->worker
, &gpu
->recover_work
);
1046 static irqreturn_t
a6xx_irq(struct msm_gpu
*gpu
)
1048 u32 status
= gpu_read(gpu
, REG_A6XX_RBBM_INT_0_STATUS
);
1050 gpu_write(gpu
, REG_A6XX_RBBM_INT_CLEAR_CMD
, status
);
1052 if (status
& A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT
)
1053 a6xx_fault_detect_irq(gpu
);
1055 if (status
& A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR
)
1056 dev_err_ratelimited(&gpu
->pdev
->dev
, "CP | AHB bus error\n");
1058 if (status
& A6XX_RBBM_INT_0_MASK_CP_HW_ERROR
)
1059 a6xx_cp_hw_err_irq(gpu
);
1061 if (status
& A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW
)
1062 dev_err_ratelimited(&gpu
->pdev
->dev
, "RBBM | ATB ASYNC overflow\n");
1064 if (status
& A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW
)
1065 dev_err_ratelimited(&gpu
->pdev
->dev
, "RBBM | ATB bus overflow\n");
1067 if (status
& A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS
)
1068 dev_err_ratelimited(&gpu
->pdev
->dev
, "UCHE | Out of bounds access\n");
1070 if (status
& A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS
)
1071 msm_gpu_retire(gpu
);
1076 static void a6xx_llc_rmw(struct a6xx_gpu
*a6xx_gpu
, u32 reg
, u32 mask
, u32
or)
1078 return msm_rmw(a6xx_gpu
->llc_mmio
+ (reg
<< 2), mask
, or);
1081 static void a6xx_llc_write(struct a6xx_gpu
*a6xx_gpu
, u32 reg
, u32 value
)
1083 return msm_writel(value
, a6xx_gpu
->llc_mmio
+ (reg
<< 2));
1086 static void a6xx_llc_deactivate(struct a6xx_gpu
*a6xx_gpu
)
1088 llcc_slice_deactivate(a6xx_gpu
->llc_slice
);
1089 llcc_slice_deactivate(a6xx_gpu
->htw_llc_slice
);
1092 static void a6xx_llc_activate(struct a6xx_gpu
*a6xx_gpu
)
1094 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1095 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1096 u32 cntl1_regval
= 0;
1098 if (IS_ERR(a6xx_gpu
->llc_mmio
))
1101 if (!llcc_slice_activate(a6xx_gpu
->llc_slice
)) {
1102 u32 gpu_scid
= llcc_get_slice_id(a6xx_gpu
->llc_slice
);
1105 cntl1_regval
= (gpu_scid
<< 0) | (gpu_scid
<< 5) | (gpu_scid
<< 10) |
1106 (gpu_scid
<< 15) | (gpu_scid
<< 20);
1110 * For targets with a MMU500, activate the slice but don't program the
1111 * register. The XBL will take care of that.
1113 if (!llcc_slice_activate(a6xx_gpu
->htw_llc_slice
)) {
1114 if (!a6xx_gpu
->have_mmu500
) {
1115 u32 gpuhtw_scid
= llcc_get_slice_id(a6xx_gpu
->htw_llc_slice
);
1117 gpuhtw_scid
&= 0x1f;
1118 cntl1_regval
|= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid
);
1124 * Program the slice IDs for the various GPU blocks and GPU MMU
1127 if (a6xx_gpu
->have_mmu500
)
1128 gpu_rmw(gpu
, REG_A6XX_GBIF_SCACHE_CNTL1
, GENMASK(24, 0),
1131 a6xx_llc_write(a6xx_gpu
,
1132 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1
, cntl1_regval
);
1135 * Program cacheability overrides to not allocate cache
1136 * lines on a write miss
1138 a6xx_llc_rmw(a6xx_gpu
,
1139 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0
, 0xF, 0x03);
1144 static void a6xx_llc_slices_destroy(struct a6xx_gpu
*a6xx_gpu
)
1146 llcc_slice_putd(a6xx_gpu
->llc_slice
);
1147 llcc_slice_putd(a6xx_gpu
->htw_llc_slice
);
1150 static void a6xx_llc_slices_init(struct platform_device
*pdev
,
1151 struct a6xx_gpu
*a6xx_gpu
)
1153 struct device_node
*phandle
;
1156 * There is a different programming path for targets with an mmu500
1157 * attached, so detect if that is the case
1159 phandle
= of_parse_phandle(pdev
->dev
.of_node
, "iommus", 0);
1160 a6xx_gpu
->have_mmu500
= (phandle
&&
1161 of_device_is_compatible(phandle
, "arm,mmu-500"));
1162 of_node_put(phandle
);
1164 if (a6xx_gpu
->have_mmu500
)
1165 a6xx_gpu
->llc_mmio
= NULL
;
1167 a6xx_gpu
->llc_mmio
= msm_ioremap(pdev
, "cx_mem", "gpu_cx");
1169 a6xx_gpu
->llc_slice
= llcc_slice_getd(LLCC_GPU
);
1170 a6xx_gpu
->htw_llc_slice
= llcc_slice_getd(LLCC_GPUHTW
);
1172 if (IS_ERR_OR_NULL(a6xx_gpu
->llc_slice
) && IS_ERR_OR_NULL(a6xx_gpu
->htw_llc_slice
))
1173 a6xx_gpu
->llc_mmio
= ERR_PTR(-EINVAL
);
1176 static int a6xx_pm_resume(struct msm_gpu
*gpu
)
1178 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1179 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1182 gpu
->needs_hw_init
= true;
1184 trace_msm_gpu_resume(0);
1186 ret
= a6xx_gmu_resume(a6xx_gpu
);
1190 msm_gpu_resume_devfreq(gpu
);
1192 a6xx_llc_activate(a6xx_gpu
);
1197 static int a6xx_pm_suspend(struct msm_gpu
*gpu
)
1199 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1200 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1203 trace_msm_gpu_suspend(0);
1205 a6xx_llc_deactivate(a6xx_gpu
);
1207 devfreq_suspend_device(gpu
->devfreq
.devfreq
);
1209 ret
= a6xx_gmu_stop(a6xx_gpu
);
1213 if (adreno_gpu
->base
.hw_apriv
|| a6xx_gpu
->has_whereami
)
1214 for (i
= 0; i
< gpu
->nr_rings
; i
++)
1215 a6xx_gpu
->shadow
[i
] = 0;
1220 static int a6xx_get_timestamp(struct msm_gpu
*gpu
, uint64_t *value
)
1222 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1223 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1224 static DEFINE_MUTEX(perfcounter_oob
);
1226 mutex_lock(&perfcounter_oob
);
1228 /* Force the GPU power on so we can read this register */
1229 a6xx_gmu_set_oob(&a6xx_gpu
->gmu
, GMU_OOB_PERFCOUNTER_SET
);
1231 *value
= gpu_read64(gpu
, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO
,
1232 REG_A6XX_CP_ALWAYS_ON_COUNTER_HI
);
1234 a6xx_gmu_clear_oob(&a6xx_gpu
->gmu
, GMU_OOB_PERFCOUNTER_SET
);
1235 mutex_unlock(&perfcounter_oob
);
1239 static struct msm_ringbuffer
*a6xx_active_ring(struct msm_gpu
*gpu
)
1241 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1242 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1244 return a6xx_gpu
->cur_ring
;
1247 static void a6xx_destroy(struct msm_gpu
*gpu
)
1249 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1250 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1252 if (a6xx_gpu
->sqe_bo
) {
1253 msm_gem_unpin_iova(a6xx_gpu
->sqe_bo
, gpu
->aspace
);
1254 drm_gem_object_put(a6xx_gpu
->sqe_bo
);
1257 if (a6xx_gpu
->shadow_bo
) {
1258 msm_gem_unpin_iova(a6xx_gpu
->shadow_bo
, gpu
->aspace
);
1259 drm_gem_object_put(a6xx_gpu
->shadow_bo
);
1262 a6xx_llc_slices_destroy(a6xx_gpu
);
1264 a6xx_gmu_remove(a6xx_gpu
);
1266 adreno_gpu_cleanup(adreno_gpu
);
1270 static unsigned long a6xx_gpu_busy(struct msm_gpu
*gpu
)
1272 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1273 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1274 u64 busy_cycles
, busy_time
;
1277 /* Only read the gpu busy if the hardware is already active */
1278 if (pm_runtime_get_if_in_use(a6xx_gpu
->gmu
.dev
) == 0)
1281 busy_cycles
= gmu_read64(&a6xx_gpu
->gmu
,
1282 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L
,
1283 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H
);
1285 busy_time
= (busy_cycles
- gpu
->devfreq
.busy_cycles
) * 10;
1286 do_div(busy_time
, 192);
1288 gpu
->devfreq
.busy_cycles
= busy_cycles
;
1290 pm_runtime_put(a6xx_gpu
->gmu
.dev
);
1292 if (WARN_ON(busy_time
> ~0LU))
1295 return (unsigned long)busy_time
;
1298 static struct msm_gem_address_space
*
1299 a6xx_create_private_address_space(struct msm_gpu
*gpu
)
1301 struct msm_mmu
*mmu
;
1303 mmu
= msm_iommu_pagetable_create(gpu
->aspace
->mmu
);
1306 return ERR_CAST(mmu
);
1308 return msm_gem_address_space_create(mmu
,
1309 "gpu", 0x100000000ULL
, 0x1ffffffffULL
);
1312 static uint32_t a6xx_get_rptr(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
)
1314 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
1315 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
1317 if (adreno_gpu
->base
.hw_apriv
|| a6xx_gpu
->has_whereami
)
1318 return a6xx_gpu
->shadow
[ring
->id
];
1320 return ring
->memptrs
->rptr
= gpu_read(gpu
, REG_A6XX_CP_RB_RPTR
);
1323 static const struct adreno_gpu_funcs funcs
= {
1325 .get_param
= adreno_get_param
,
1326 .hw_init
= a6xx_hw_init
,
1327 .pm_suspend
= a6xx_pm_suspend
,
1328 .pm_resume
= a6xx_pm_resume
,
1329 .recover
= a6xx_recover
,
1330 .submit
= a6xx_submit
,
1331 .active_ring
= a6xx_active_ring
,
1333 .destroy
= a6xx_destroy
,
1334 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1337 .gpu_busy
= a6xx_gpu_busy
,
1338 .gpu_get_freq
= a6xx_gmu_get_freq
,
1339 .gpu_set_freq
= a6xx_gmu_set_freq
,
1340 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1341 .gpu_state_get
= a6xx_gpu_state_get
,
1342 .gpu_state_put
= a6xx_gpu_state_put
,
1344 .create_address_space
= adreno_iommu_create_address_space
,
1345 .create_private_address_space
= a6xx_create_private_address_space
,
1346 .get_rptr
= a6xx_get_rptr
,
1348 .get_timestamp
= a6xx_get_timestamp
,
1351 struct msm_gpu
*a6xx_gpu_init(struct drm_device
*dev
)
1353 struct msm_drm_private
*priv
= dev
->dev_private
;
1354 struct platform_device
*pdev
= priv
->gpu_pdev
;
1355 struct adreno_platform_config
*config
= pdev
->dev
.platform_data
;
1356 const struct adreno_info
*info
;
1357 struct device_node
*node
;
1358 struct a6xx_gpu
*a6xx_gpu
;
1359 struct adreno_gpu
*adreno_gpu
;
1360 struct msm_gpu
*gpu
;
1363 a6xx_gpu
= kzalloc(sizeof(*a6xx_gpu
), GFP_KERNEL
);
1365 return ERR_PTR(-ENOMEM
);
1367 adreno_gpu
= &a6xx_gpu
->base
;
1368 gpu
= &adreno_gpu
->base
;
1370 adreno_gpu
->registers
= NULL
;
1373 * We need to know the platform type before calling into adreno_gpu_init
1374 * so that the hw_apriv flag can be correctly set. Snoop into the info
1375 * and grab the revision number
1377 info
= adreno_info(config
->rev
);
1379 if (info
&& info
->revn
== 650)
1380 adreno_gpu
->base
.hw_apriv
= true;
1382 a6xx_llc_slices_init(pdev
, a6xx_gpu
);
1384 ret
= adreno_gpu_init(dev
, pdev
, adreno_gpu
, &funcs
, 1);
1386 a6xx_destroy(&(a6xx_gpu
->base
.base
));
1387 return ERR_PTR(ret
);
1390 /* Check if there is a GMU phandle and set it up */
1391 node
= of_parse_phandle(pdev
->dev
.of_node
, "qcom,gmu", 0);
1393 /* FIXME: How do we gracefully handle this? */
1396 ret
= a6xx_gmu_init(a6xx_gpu
, node
);
1398 a6xx_destroy(&(a6xx_gpu
->base
.base
));
1399 return ERR_PTR(ret
);
1403 msm_mmu_set_fault_handler(gpu
->aspace
->mmu
, gpu
,
1404 a6xx_fault_handler
);