]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/radeon/r600_dma.c
Merge branch 'acpi-config'
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / radeon / r600_dma.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "r600d.h"
28
29 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
30
31 /*
32 * DMA
33 * Starting with R600, the GPU has an asynchronous
34 * DMA engine. The programming model is very similar
35 * to the 3D engine (ring buffer, IBs, etc.), but the
36 * DMA controller has it's own packet format that is
37 * different form the PM4 format used by the 3D engine.
38 * It supports copying data, writing embedded data,
39 * solid fills, and a number of other things. It also
40 * has support for tiling/detiling of buffers.
41 */
42
43 /**
44 * r600_dma_get_rptr - get the current read pointer
45 *
46 * @rdev: radeon_device pointer
47 * @ring: radeon ring pointer
48 *
49 * Get the current rptr from the hardware (r6xx+).
50 */
51 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
52 struct radeon_ring *ring)
53 {
54 u32 rptr;
55
56 if (rdev->wb.enabled)
57 rptr = rdev->wb.wb[ring->rptr_offs/4];
58 else
59 rptr = RREG32(DMA_RB_RPTR);
60
61 return (rptr & 0x3fffc) >> 2;
62 }
63
64 /**
65 * r600_dma_get_wptr - get the current write pointer
66 *
67 * @rdev: radeon_device pointer
68 * @ring: radeon ring pointer
69 *
70 * Get the current wptr from the hardware (r6xx+).
71 */
72 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
73 struct radeon_ring *ring)
74 {
75 return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
76 }
77
78 /**
79 * r600_dma_set_wptr - commit the write pointer
80 *
81 * @rdev: radeon_device pointer
82 * @ring: radeon ring pointer
83 *
84 * Write the wptr back to the hardware (r6xx+).
85 */
86 void r600_dma_set_wptr(struct radeon_device *rdev,
87 struct radeon_ring *ring)
88 {
89 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
90 }
91
92 /**
93 * r600_dma_stop - stop the async dma engine
94 *
95 * @rdev: radeon_device pointer
96 *
97 * Stop the async dma engine (r6xx-evergreen).
98 */
99 void r600_dma_stop(struct radeon_device *rdev)
100 {
101 u32 rb_cntl = RREG32(DMA_RB_CNTL);
102
103 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
104 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
105
106 rb_cntl &= ~DMA_RB_ENABLE;
107 WREG32(DMA_RB_CNTL, rb_cntl);
108
109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
110 }
111
112 /**
113 * r600_dma_resume - setup and start the async dma engine
114 *
115 * @rdev: radeon_device pointer
116 *
117 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
118 * Returns 0 for success, error for failure.
119 */
120 int r600_dma_resume(struct radeon_device *rdev)
121 {
122 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
123 u32 rb_cntl, dma_cntl, ib_cntl;
124 u32 rb_bufsz;
125 int r;
126
127 /* Reset dma */
128 if (rdev->family >= CHIP_RV770)
129 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
130 else
131 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
132 RREG32(SRBM_SOFT_RESET);
133 udelay(50);
134 WREG32(SRBM_SOFT_RESET, 0);
135
136 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
137 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
138
139 /* Set ring buffer size in dwords */
140 rb_bufsz = order_base_2(ring->ring_size / 4);
141 rb_cntl = rb_bufsz << 1;
142 #ifdef __BIG_ENDIAN
143 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
144 #endif
145 WREG32(DMA_RB_CNTL, rb_cntl);
146
147 /* Initialize the ring buffer's read and write pointers */
148 WREG32(DMA_RB_RPTR, 0);
149 WREG32(DMA_RB_WPTR, 0);
150
151 /* set the wb address whether it's enabled or not */
152 WREG32(DMA_RB_RPTR_ADDR_HI,
153 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
154 WREG32(DMA_RB_RPTR_ADDR_LO,
155 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
156
157 if (rdev->wb.enabled)
158 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
159
160 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
161
162 /* enable DMA IBs */
163 ib_cntl = DMA_IB_ENABLE;
164 #ifdef __BIG_ENDIAN
165 ib_cntl |= DMA_IB_SWAP_ENABLE;
166 #endif
167 WREG32(DMA_IB_CNTL, ib_cntl);
168
169 dma_cntl = RREG32(DMA_CNTL);
170 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
171 WREG32(DMA_CNTL, dma_cntl);
172
173 if (rdev->family >= CHIP_RV770)
174 WREG32(DMA_MODE, 1);
175
176 ring->wptr = 0;
177 WREG32(DMA_RB_WPTR, ring->wptr << 2);
178
179 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
180
181 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
182
183 ring->ready = true;
184
185 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
186 if (r) {
187 ring->ready = false;
188 return r;
189 }
190
191 if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
192 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
193
194 return 0;
195 }
196
197 /**
198 * r600_dma_fini - tear down the async dma engine
199 *
200 * @rdev: radeon_device pointer
201 *
202 * Stop the async dma engine and free the ring (r6xx-evergreen).
203 */
204 void r600_dma_fini(struct radeon_device *rdev)
205 {
206 r600_dma_stop(rdev);
207 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
208 }
209
210 /**
211 * r600_dma_is_lockup - Check if the DMA engine is locked up
212 *
213 * @rdev: radeon_device pointer
214 * @ring: radeon_ring structure holding ring information
215 *
216 * Check if the async DMA engine is locked up.
217 * Returns true if the engine appears to be locked up, false if not.
218 */
219 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
220 {
221 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
222
223 if (!(reset_mask & RADEON_RESET_DMA)) {
224 radeon_ring_lockup_update(ring);
225 return false;
226 }
227 /* force ring activities */
228 radeon_ring_force_activity(rdev, ring);
229 return radeon_ring_test_lockup(rdev, ring);
230 }
231
232
233 /**
234 * r600_dma_ring_test - simple async dma engine test
235 *
236 * @rdev: radeon_device pointer
237 * @ring: radeon_ring structure holding ring information
238 *
239 * Test the DMA engine by writing using it to write an
240 * value to memory. (r6xx-SI).
241 * Returns 0 for success, error for failure.
242 */
243 int r600_dma_ring_test(struct radeon_device *rdev,
244 struct radeon_ring *ring)
245 {
246 unsigned i;
247 int r;
248 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
249 u32 tmp;
250
251 if (!ptr) {
252 DRM_ERROR("invalid vram scratch pointer\n");
253 return -EINVAL;
254 }
255
256 tmp = 0xCAFEDEAD;
257 writel(tmp, ptr);
258
259 r = radeon_ring_lock(rdev, ring, 4);
260 if (r) {
261 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
262 return r;
263 }
264 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
265 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
266 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
267 radeon_ring_write(ring, 0xDEADBEEF);
268 radeon_ring_unlock_commit(rdev, ring);
269
270 for (i = 0; i < rdev->usec_timeout; i++) {
271 tmp = readl(ptr);
272 if (tmp == 0xDEADBEEF)
273 break;
274 DRM_UDELAY(1);
275 }
276
277 if (i < rdev->usec_timeout) {
278 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
279 } else {
280 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
281 ring->idx, tmp);
282 r = -EINVAL;
283 }
284 return r;
285 }
286
287 /**
288 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
289 *
290 * @rdev: radeon_device pointer
291 * @fence: radeon fence object
292 *
293 * Add a DMA fence packet to the ring to write
294 * the fence seq number and DMA trap packet to generate
295 * an interrupt if needed (r6xx-r7xx).
296 */
297 void r600_dma_fence_ring_emit(struct radeon_device *rdev,
298 struct radeon_fence *fence)
299 {
300 struct radeon_ring *ring = &rdev->ring[fence->ring];
301 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
302
303 /* write the fence */
304 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
305 radeon_ring_write(ring, addr & 0xfffffffc);
306 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
307 radeon_ring_write(ring, lower_32_bits(fence->seq));
308 /* generate an interrupt */
309 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
310 }
311
312 /**
313 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
314 *
315 * @rdev: radeon_device pointer
316 * @ring: radeon_ring structure holding ring information
317 * @semaphore: radeon semaphore object
318 * @emit_wait: wait or signal semaphore
319 *
320 * Add a DMA semaphore packet to the ring wait on or signal
321 * other rings (r6xx-SI).
322 */
323 bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
324 struct radeon_ring *ring,
325 struct radeon_semaphore *semaphore,
326 bool emit_wait)
327 {
328 u64 addr = semaphore->gpu_addr;
329 u32 s = emit_wait ? 0 : 1;
330
331 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
332 radeon_ring_write(ring, addr & 0xfffffffc);
333 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
334
335 return true;
336 }
337
338 /**
339 * r600_dma_ib_test - test an IB on the DMA engine
340 *
341 * @rdev: radeon_device pointer
342 * @ring: radeon_ring structure holding ring information
343 *
344 * Test a simple IB in the DMA ring (r6xx-SI).
345 * Returns 0 on success, error on failure.
346 */
347 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
348 {
349 struct radeon_ib ib;
350 unsigned i;
351 int r;
352 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
353 u32 tmp = 0;
354
355 if (!ptr) {
356 DRM_ERROR("invalid vram scratch pointer\n");
357 return -EINVAL;
358 }
359
360 tmp = 0xCAFEDEAD;
361 writel(tmp, ptr);
362
363 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
364 if (r) {
365 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
366 return r;
367 }
368
369 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
370 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
371 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
372 ib.ptr[3] = 0xDEADBEEF;
373 ib.length_dw = 4;
374
375 r = radeon_ib_schedule(rdev, &ib, NULL);
376 if (r) {
377 radeon_ib_free(rdev, &ib);
378 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
379 return r;
380 }
381 r = radeon_fence_wait(ib.fence, false);
382 if (r) {
383 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
384 return r;
385 }
386 for (i = 0; i < rdev->usec_timeout; i++) {
387 tmp = readl(ptr);
388 if (tmp == 0xDEADBEEF)
389 break;
390 DRM_UDELAY(1);
391 }
392 if (i < rdev->usec_timeout) {
393 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
394 } else {
395 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
396 r = -EINVAL;
397 }
398 radeon_ib_free(rdev, &ib);
399 return r;
400 }
401
402 /**
403 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
404 *
405 * @rdev: radeon_device pointer
406 * @ib: IB object to schedule
407 *
408 * Schedule an IB in the DMA ring (r6xx-r7xx).
409 */
410 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
411 {
412 struct radeon_ring *ring = &rdev->ring[ib->ring];
413
414 if (rdev->wb.enabled) {
415 u32 next_rptr = ring->wptr + 4;
416 while ((next_rptr & 7) != 5)
417 next_rptr++;
418 next_rptr += 3;
419 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
420 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
421 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
422 radeon_ring_write(ring, next_rptr);
423 }
424
425 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
426 * Pad as necessary with NOPs.
427 */
428 while ((ring->wptr & 7) != 5)
429 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
430 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
431 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
432 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
433
434 }
435
436 /**
437 * r600_copy_dma - copy pages using the DMA engine
438 *
439 * @rdev: radeon_device pointer
440 * @src_offset: src GPU address
441 * @dst_offset: dst GPU address
442 * @num_gpu_pages: number of GPU pages to xfer
443 * @fence: radeon fence object
444 *
445 * Copy GPU paging using the DMA engine (r6xx).
446 * Used by the radeon ttm implementation to move pages if
447 * registered as the asic copy callback.
448 */
449 int r600_copy_dma(struct radeon_device *rdev,
450 uint64_t src_offset, uint64_t dst_offset,
451 unsigned num_gpu_pages,
452 struct radeon_fence **fence)
453 {
454 struct radeon_semaphore *sem = NULL;
455 int ring_index = rdev->asic->copy.dma_ring_index;
456 struct radeon_ring *ring = &rdev->ring[ring_index];
457 u32 size_in_dw, cur_size_in_dw;
458 int i, num_loops;
459 int r = 0;
460
461 r = radeon_semaphore_create(rdev, &sem);
462 if (r) {
463 DRM_ERROR("radeon: moving bo (%d).\n", r);
464 return r;
465 }
466
467 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
468 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
469 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
470 if (r) {
471 DRM_ERROR("radeon: moving bo (%d).\n", r);
472 radeon_semaphore_free(rdev, &sem, NULL);
473 return r;
474 }
475
476 radeon_semaphore_sync_to(sem, *fence);
477 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
478
479 for (i = 0; i < num_loops; i++) {
480 cur_size_in_dw = size_in_dw;
481 if (cur_size_in_dw > 0xFFFE)
482 cur_size_in_dw = 0xFFFE;
483 size_in_dw -= cur_size_in_dw;
484 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
485 radeon_ring_write(ring, dst_offset & 0xfffffffc);
486 radeon_ring_write(ring, src_offset & 0xfffffffc);
487 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
488 (upper_32_bits(src_offset) & 0xff)));
489 src_offset += cur_size_in_dw * 4;
490 dst_offset += cur_size_in_dw * 4;
491 }
492
493 r = radeon_fence_emit(rdev, fence, ring->idx);
494 if (r) {
495 radeon_ring_unlock_undo(rdev, ring);
496 return r;
497 }
498
499 radeon_ring_unlock_commit(rdev, ring);
500 radeon_semaphore_free(rdev, &sem, *fence);
501
502 return r;
503 }