]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/etnaviv/etnaviv_buffer.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2014-2018 Etnaviv Project
4 */
5
6 #include <drm/drm_drv.h>
7
8 #include "etnaviv_cmdbuf.h"
9 #include "etnaviv_gpu.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12
13 #include "common.xml.h"
14 #include "state.xml.h"
15 #include "state_hi.xml.h"
16 #include "state_3d.xml.h"
17 #include "cmdstream.xml.h"
18
19 /*
20 * Command Buffer helper:
21 */
22
23
24 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
25 {
26 u32 *vaddr = (u32 *)buffer->vaddr;
27
28 BUG_ON(buffer->user_size >= buffer->size);
29
30 vaddr[buffer->user_size / 4] = data;
31 buffer->user_size += 4;
32 }
33
34 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
35 u32 reg, u32 value)
36 {
37 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
38
39 buffer->user_size = ALIGN(buffer->user_size, 8);
40
41 /* write a register via cmd stream */
42 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
43 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
44 VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
45 OUT(buffer, value);
46 }
47
48 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
49 {
50 buffer->user_size = ALIGN(buffer->user_size, 8);
51
52 OUT(buffer, VIV_FE_END_HEADER_OP_END);
53 }
54
55 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
56 {
57 buffer->user_size = ALIGN(buffer->user_size, 8);
58
59 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
60 }
61
62 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
63 u16 prefetch, u32 address)
64 {
65 buffer->user_size = ALIGN(buffer->user_size, 8);
66
67 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
68 VIV_FE_LINK_HEADER_PREFETCH(prefetch));
69 OUT(buffer, address);
70 }
71
72 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
73 u32 from, u32 to)
74 {
75 buffer->user_size = ALIGN(buffer->user_size, 8);
76
77 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
78 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
79 }
80
81 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
82 {
83 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
84 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
85 VIVS_GL_SEMAPHORE_TOKEN_TO(to));
86 }
87
88 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
89 struct etnaviv_cmdbuf *buffer, u8 pipe)
90 {
91 u32 flush = 0;
92
93 lockdep_assert_held(&gpu->lock);
94
95 /*
96 * This assumes that if we're switching to 2D, we're switching
97 * away from 3D, and vice versa. Hence, if we're switching to
98 * the 2D core, we need to flush the 3D depth and color caches,
99 * otherwise we need to flush the 2D pixel engine cache.
100 */
101 if (gpu->exec_state == ETNA_PIPE_2D)
102 flush = VIVS_GL_FLUSH_CACHE_PE2D;
103 else if (gpu->exec_state == ETNA_PIPE_3D)
104 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
105
106 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
107 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
108 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
109
110 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
111 VIVS_GL_PIPE_SELECT_PIPE(pipe));
112 }
113
114 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
115 struct etnaviv_cmdbuf *buf, u32 off, u32 len)
116 {
117 u32 size = buf->size;
118 u32 *ptr = buf->vaddr + off;
119
120 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
121 ptr, etnaviv_cmdbuf_get_va(buf,
122 &gpu->mmu_context->cmdbuf_mapping) +
123 off, size - len * 4 - off);
124
125 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
126 ptr, len * 4, 0);
127 }
128
129 /*
130 * Safely replace the WAIT of a waitlink with a new command and argument.
131 * The GPU may be executing this WAIT while we're modifying it, so we have
132 * to write it in a specific order to avoid the GPU branching to somewhere
133 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
134 */
135 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
136 unsigned int wl_offset, u32 cmd, u32 arg)
137 {
138 u32 *lw = buffer->vaddr + wl_offset;
139
140 lw[1] = arg;
141 mb();
142 lw[0] = cmd;
143 mb();
144 }
145
146 /*
147 * Ensure that there is space in the command buffer to contiguously write
148 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
149 */
150 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
151 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
152 {
153 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
154 buffer->user_size = 0;
155
156 return etnaviv_cmdbuf_get_va(buffer,
157 &gpu->mmu_context->cmdbuf_mapping) +
158 buffer->user_size;
159 }
160
161 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
162 {
163 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
164
165 lockdep_assert_held(&gpu->lock);
166
167 /* initialize buffer */
168 buffer->user_size = 0;
169
170 CMD_WAIT(buffer);
171 CMD_LINK(buffer, 2,
172 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
173 + buffer->user_size - 4);
174
175 return buffer->user_size / 8;
176 }
177
178 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
179 {
180 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
181
182 lockdep_assert_held(&gpu->lock);
183
184 buffer->user_size = 0;
185
186 if (gpu->identity.features & chipFeatures_PIPE_3D) {
187 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
188 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
189 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
190 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
191 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
192 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
193 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
194 }
195
196 if (gpu->identity.features & chipFeatures_PIPE_2D) {
197 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
198 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
199 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
200 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
201 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
202 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
203 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
204 }
205
206 CMD_END(buffer);
207
208 buffer->user_size = ALIGN(buffer->user_size, 8);
209
210 return buffer->user_size / 8;
211 }
212
213 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
214 {
215 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
216
217 lockdep_assert_held(&gpu->lock);
218
219 buffer->user_size = 0;
220
221 CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
222 VIVS_MMUv2_PTA_CONFIG_INDEX(id));
223
224 CMD_END(buffer);
225
226 buffer->user_size = ALIGN(buffer->user_size, 8);
227
228 return buffer->user_size / 8;
229 }
230
231 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
232 {
233 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
234 unsigned int waitlink_offset = buffer->user_size - 16;
235 u32 link_target, flush = 0;
236
237 lockdep_assert_held(&gpu->lock);
238
239 if (gpu->exec_state == ETNA_PIPE_2D)
240 flush = VIVS_GL_FLUSH_CACHE_PE2D;
241 else if (gpu->exec_state == ETNA_PIPE_3D)
242 flush = VIVS_GL_FLUSH_CACHE_DEPTH |
243 VIVS_GL_FLUSH_CACHE_COLOR |
244 VIVS_GL_FLUSH_CACHE_TEXTURE |
245 VIVS_GL_FLUSH_CACHE_TEXTUREVS |
246 VIVS_GL_FLUSH_CACHE_SHADER_L2;
247
248 if (flush) {
249 unsigned int dwords = 7;
250
251 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
252
253 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
254 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
255 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
256 if (gpu->exec_state == ETNA_PIPE_3D)
257 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
258 VIVS_TS_FLUSH_CACHE_FLUSH);
259 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
260 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
261 CMD_END(buffer);
262
263 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
264 VIV_FE_LINK_HEADER_OP_LINK |
265 VIV_FE_LINK_HEADER_PREFETCH(dwords),
266 link_target);
267 } else {
268 /* Replace the last link-wait with an "END" command */
269 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
270 VIV_FE_END_HEADER_OP_END, 0);
271 }
272 }
273
274 /* Append a 'sync point' to the ring buffer. */
275 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
276 {
277 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
278 unsigned int waitlink_offset = buffer->user_size - 16;
279 u32 dwords, target;
280
281 lockdep_assert_held(&gpu->lock);
282
283 /*
284 * We need at most 3 dwords in the return target:
285 * 1 event + 1 end + 1 wait + 1 link.
286 */
287 dwords = 4;
288 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
289
290 /* Signal sync point event */
291 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
292 VIVS_GL_EVENT_FROM_PE);
293
294 /* Stop the FE to 'pause' the GPU */
295 CMD_END(buffer);
296
297 /* Append waitlink */
298 CMD_WAIT(buffer);
299 CMD_LINK(buffer, 2,
300 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
301 + buffer->user_size - 4);
302
303 /*
304 * Kick off the 'sync point' command by replacing the previous
305 * WAIT with a link to the address in the ring buffer.
306 */
307 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
308 VIV_FE_LINK_HEADER_OP_LINK |
309 VIV_FE_LINK_HEADER_PREFETCH(dwords),
310 target);
311 }
312
313 /* Append a command buffer to the ring buffer. */
314 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
315 struct etnaviv_iommu_context *mmu_context, unsigned int event,
316 struct etnaviv_cmdbuf *cmdbuf)
317 {
318 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
319 unsigned int waitlink_offset = buffer->user_size - 16;
320 u32 return_target, return_dwords;
321 u32 link_target, link_dwords;
322 bool switch_context = gpu->exec_state != exec_state;
323 bool switch_mmu_context = gpu->mmu_context != mmu_context;
324 unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
325 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
326
327 lockdep_assert_held(&gpu->lock);
328
329 if (drm_debug_enabled(DRM_UT_DRIVER))
330 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
331
332 link_target = etnaviv_cmdbuf_get_va(cmdbuf,
333 &gpu->mmu_context->cmdbuf_mapping);
334 link_dwords = cmdbuf->size / 8;
335
336 /*
337 * If we need maintenance prior to submitting this buffer, we will
338 * need to append a mmu flush load state, followed by a new
339 * link to this buffer - a total of four additional words.
340 */
341 if (need_flush || switch_context) {
342 u32 target, extra_dwords;
343
344 /* link command */
345 extra_dwords = 1;
346
347 /* flush command */
348 if (need_flush) {
349 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
350 extra_dwords += 1;
351 else
352 extra_dwords += 3;
353 }
354
355 /* pipe switch commands */
356 if (switch_context)
357 extra_dwords += 4;
358
359 /* PTA load command */
360 if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
361 extra_dwords += 1;
362
363 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
364 /*
365 * Switch MMU context if necessary. Must be done after the
366 * link target has been calculated, as the jump forward in the
367 * kernel ring still uses the last active MMU context before
368 * the switch.
369 */
370 if (switch_mmu_context) {
371 struct etnaviv_iommu_context *old_context = gpu->mmu_context;
372
373 etnaviv_iommu_context_get(mmu_context);
374 gpu->mmu_context = mmu_context;
375 etnaviv_iommu_context_put(old_context);
376 }
377
378 if (need_flush) {
379 /* Add the MMU flush */
380 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
381 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
382 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
383 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
384 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
385 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
386 VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
387 } else {
388 u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
389 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
390
391 if (switch_mmu_context &&
392 gpu->sec_mode == ETNA_SEC_KERNEL) {
393 unsigned short id =
394 etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
395 CMD_LOAD_STATE(buffer,
396 VIVS_MMUv2_PTA_CONFIG,
397 VIVS_MMUv2_PTA_CONFIG_INDEX(id));
398 }
399
400 if (gpu->sec_mode == ETNA_SEC_NONE)
401 flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
402
403 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
404 flush);
405 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
406 SYNC_RECIPIENT_PE);
407 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
408 SYNC_RECIPIENT_PE);
409 }
410
411 gpu->flush_seq = new_flush_seq;
412 }
413
414 if (switch_context) {
415 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
416 gpu->exec_state = exec_state;
417 }
418
419 /* And the link to the submitted buffer */
420 link_target = etnaviv_cmdbuf_get_va(cmdbuf,
421 &gpu->mmu_context->cmdbuf_mapping);
422 CMD_LINK(buffer, link_dwords, link_target);
423
424 /* Update the link target to point to above instructions */
425 link_target = target;
426 link_dwords = extra_dwords;
427 }
428
429 /*
430 * Append a LINK to the submitted command buffer to return to
431 * the ring buffer. return_target is the ring target address.
432 * We need at most 7 dwords in the return target: 2 cache flush +
433 * 2 semaphore stall + 1 event + 1 wait + 1 link.
434 */
435 return_dwords = 7;
436 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
437 CMD_LINK(cmdbuf, return_dwords, return_target);
438
439 /*
440 * Append a cache flush, stall, event, wait and link pointing back to
441 * the wait command to the ring buffer.
442 */
443 if (gpu->exec_state == ETNA_PIPE_2D) {
444 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
445 VIVS_GL_FLUSH_CACHE_PE2D);
446 } else {
447 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
448 VIVS_GL_FLUSH_CACHE_DEPTH |
449 VIVS_GL_FLUSH_CACHE_COLOR);
450 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
451 VIVS_TS_FLUSH_CACHE_FLUSH);
452 }
453 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
454 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
455 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
456 VIVS_GL_EVENT_FROM_PE);
457 CMD_WAIT(buffer);
458 CMD_LINK(buffer, 2,
459 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
460 + buffer->user_size - 4);
461
462 if (drm_debug_enabled(DRM_UT_DRIVER))
463 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
464 return_target,
465 etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
466 cmdbuf->vaddr);
467
468 if (drm_debug_enabled(DRM_UT_DRIVER)) {
469 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
470 cmdbuf->vaddr, cmdbuf->size, 0);
471
472 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
473 pr_info("addr: 0x%08x\n", link_target);
474 pr_info("back: 0x%08x\n", return_target);
475 pr_info("event: %d\n", event);
476 }
477
478 /*
479 * Kick off the submitted command by replacing the previous
480 * WAIT with a link to the address in the ring buffer.
481 */
482 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
483 VIV_FE_LINK_HEADER_OP_LINK |
484 VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
485 link_target);
486
487 if (drm_debug_enabled(DRM_UT_DRIVER))
488 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
489 }