]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/etnaviv/etnaviv_buffer.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
1 /*
2 * Copyright (C) 2014 Etnaviv Project
3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_gpu.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_mmu.h"
22
23 #include "common.xml.h"
24 #include "state.xml.h"
25 #include "state_hi.xml.h"
26 #include "state_3d.xml.h"
27 #include "cmdstream.xml.h"
28
29 /*
30 * Command Buffer helper:
31 */
32
33
34 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
35 {
36 u32 *vaddr = (u32 *)buffer->vaddr;
37
38 BUG_ON(buffer->user_size >= buffer->size);
39
40 vaddr[buffer->user_size / 4] = data;
41 buffer->user_size += 4;
42 }
43
44 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
45 u32 reg, u32 value)
46 {
47 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
48
49 buffer->user_size = ALIGN(buffer->user_size, 8);
50
51 /* write a register via cmd stream */
52 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
53 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
54 VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
55 OUT(buffer, value);
56 }
57
58 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
59 {
60 buffer->user_size = ALIGN(buffer->user_size, 8);
61
62 OUT(buffer, VIV_FE_END_HEADER_OP_END);
63 }
64
65 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
66 {
67 buffer->user_size = ALIGN(buffer->user_size, 8);
68
69 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
70 }
71
72 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
73 u16 prefetch, u32 address)
74 {
75 buffer->user_size = ALIGN(buffer->user_size, 8);
76
77 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
78 VIV_FE_LINK_HEADER_PREFETCH(prefetch));
79 OUT(buffer, address);
80 }
81
82 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
83 u32 from, u32 to)
84 {
85 buffer->user_size = ALIGN(buffer->user_size, 8);
86
87 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
88 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
89 }
90
91 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
92 {
93 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
94 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
95 VIVS_GL_SEMAPHORE_TOKEN_TO(to));
96 }
97
98 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
99 struct etnaviv_cmdbuf *buffer, u8 pipe)
100 {
101 u32 flush = 0;
102
103 /*
104 * This assumes that if we're switching to 2D, we're switching
105 * away from 3D, and vice versa. Hence, if we're switching to
106 * the 2D core, we need to flush the 3D depth and color caches,
107 * otherwise we need to flush the 2D pixel engine cache.
108 */
109 if (gpu->exec_state == ETNA_PIPE_2D)
110 flush = VIVS_GL_FLUSH_CACHE_PE2D;
111 else if (gpu->exec_state == ETNA_PIPE_3D)
112 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
113
114 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
115 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
116 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
117
118 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
119 VIVS_GL_PIPE_SELECT_PIPE(pipe));
120 }
121
122 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
123 struct etnaviv_cmdbuf *buf, u32 off, u32 len)
124 {
125 u32 size = buf->size;
126 u32 *ptr = buf->vaddr + off;
127
128 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
129 ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
130
131 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
132 ptr, len * 4, 0);
133 }
134
135 /*
136 * Safely replace the WAIT of a waitlink with a new command and argument.
137 * The GPU may be executing this WAIT while we're modifying it, so we have
138 * to write it in a specific order to avoid the GPU branching to somewhere
139 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
140 */
141 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
142 unsigned int wl_offset, u32 cmd, u32 arg)
143 {
144 u32 *lw = buffer->vaddr + wl_offset;
145
146 lw[1] = arg;
147 mb();
148 lw[0] = cmd;
149 mb();
150 }
151
152 /*
153 * Ensure that there is space in the command buffer to contiguously write
154 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
155 */
156 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
157 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
158 {
159 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
160 buffer->user_size = 0;
161
162 return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
163 }
164
165 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
166 {
167 struct etnaviv_cmdbuf *buffer = gpu->buffer;
168
169 /* initialize buffer */
170 buffer->user_size = 0;
171
172 CMD_WAIT(buffer);
173 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
174 buffer->user_size - 4);
175
176 return buffer->user_size / 8;
177 }
178
179 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
180 {
181 struct etnaviv_cmdbuf *buffer = gpu->buffer;
182
183 buffer->user_size = 0;
184
185 if (gpu->identity.features & chipFeatures_PIPE_3D) {
186 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
187 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
188 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
189 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
190 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
191 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
192 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
193 }
194
195 if (gpu->identity.features & chipFeatures_PIPE_2D) {
196 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
197 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
198 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
199 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
200 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
201 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
202 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
203 }
204
205 CMD_END(buffer);
206
207 buffer->user_size = ALIGN(buffer->user_size, 8);
208
209 return buffer->user_size / 8;
210 }
211
212 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
213 {
214 struct etnaviv_cmdbuf *buffer = gpu->buffer;
215 unsigned int waitlink_offset = buffer->user_size - 16;
216 u32 link_target, flush = 0;
217
218 if (gpu->exec_state == ETNA_PIPE_2D)
219 flush = VIVS_GL_FLUSH_CACHE_PE2D;
220 else if (gpu->exec_state == ETNA_PIPE_3D)
221 flush = VIVS_GL_FLUSH_CACHE_DEPTH |
222 VIVS_GL_FLUSH_CACHE_COLOR |
223 VIVS_GL_FLUSH_CACHE_TEXTURE |
224 VIVS_GL_FLUSH_CACHE_TEXTUREVS |
225 VIVS_GL_FLUSH_CACHE_SHADER_L2;
226
227 if (flush) {
228 unsigned int dwords = 7;
229
230 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
231
232 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
233 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
234 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
235 if (gpu->exec_state == ETNA_PIPE_3D)
236 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
237 VIVS_TS_FLUSH_CACHE_FLUSH);
238 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
239 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
240 CMD_END(buffer);
241
242 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
243 VIV_FE_LINK_HEADER_OP_LINK |
244 VIV_FE_LINK_HEADER_PREFETCH(dwords),
245 link_target);
246 } else {
247 /* Replace the last link-wait with an "END" command */
248 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
249 VIV_FE_END_HEADER_OP_END, 0);
250 }
251 }
252
253 /* Append a command buffer to the ring buffer. */
254 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
255 struct etnaviv_cmdbuf *cmdbuf)
256 {
257 struct etnaviv_cmdbuf *buffer = gpu->buffer;
258 unsigned int waitlink_offset = buffer->user_size - 16;
259 u32 return_target, return_dwords;
260 u32 link_target, link_dwords;
261
262 if (drm_debug & DRM_UT_DRIVER)
263 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
264
265 link_target = etnaviv_cmdbuf_get_va(cmdbuf);
266 link_dwords = cmdbuf->size / 8;
267
268 /*
269 * If we need maintanence prior to submitting this buffer, we will
270 * need to append a mmu flush load state, followed by a new
271 * link to this buffer - a total of four additional words.
272 */
273 if (gpu->mmu->need_flush || gpu->switch_context) {
274 u32 target, extra_dwords;
275
276 /* link command */
277 extra_dwords = 1;
278
279 /* flush command */
280 if (gpu->mmu->need_flush) {
281 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
282 extra_dwords += 1;
283 else
284 extra_dwords += 3;
285 }
286
287 /* pipe switch commands */
288 if (gpu->switch_context)
289 extra_dwords += 4;
290
291 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
292
293 if (gpu->mmu->need_flush) {
294 /* Add the MMU flush */
295 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
296 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
297 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
298 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
299 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
300 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
301 VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
302 } else {
303 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
304 VIVS_MMUv2_CONFIGURATION_MODE_MASK |
305 VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
306 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
307 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
308 SYNC_RECIPIENT_PE);
309 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
310 SYNC_RECIPIENT_PE);
311 }
312
313 gpu->mmu->need_flush = false;
314 }
315
316 if (gpu->switch_context) {
317 etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
318 gpu->exec_state = cmdbuf->exec_state;
319 gpu->switch_context = false;
320 }
321
322 /* And the link to the submitted buffer */
323 CMD_LINK(buffer, link_dwords, link_target);
324
325 /* Update the link target to point to above instructions */
326 link_target = target;
327 link_dwords = extra_dwords;
328 }
329
330 /*
331 * Append a LINK to the submitted command buffer to return to
332 * the ring buffer. return_target is the ring target address.
333 * We need at most 7 dwords in the return target: 2 cache flush +
334 * 2 semaphore stall + 1 event + 1 wait + 1 link.
335 */
336 return_dwords = 7;
337 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
338 CMD_LINK(cmdbuf, return_dwords, return_target);
339
340 /*
341 * Append a cache flush, stall, event, wait and link pointing back to
342 * the wait command to the ring buffer.
343 */
344 if (gpu->exec_state == ETNA_PIPE_2D) {
345 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
346 VIVS_GL_FLUSH_CACHE_PE2D);
347 } else {
348 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
349 VIVS_GL_FLUSH_CACHE_DEPTH |
350 VIVS_GL_FLUSH_CACHE_COLOR);
351 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
352 VIVS_TS_FLUSH_CACHE_FLUSH);
353 }
354 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
355 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
356 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
357 VIVS_GL_EVENT_FROM_PE);
358 CMD_WAIT(buffer);
359 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
360 buffer->user_size - 4);
361
362 if (drm_debug & DRM_UT_DRIVER)
363 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
364 return_target, etnaviv_cmdbuf_get_va(cmdbuf),
365 cmdbuf->vaddr);
366
367 if (drm_debug & DRM_UT_DRIVER) {
368 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
369 cmdbuf->vaddr, cmdbuf->size, 0);
370
371 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
372 pr_info("addr: 0x%08x\n", link_target);
373 pr_info("back: 0x%08x\n", return_target);
374 pr_info("event: %d\n", event);
375 }
376
377 /*
378 * Kick off the submitted command by replacing the previous
379 * WAIT with a link to the address in the ring buffer.
380 */
381 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
382 VIV_FE_LINK_HEADER_OP_LINK |
383 VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
384 link_target);
385
386 if (drm_debug & DRM_UT_DRIVER)
387 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
388 }