]>
Commit | Line | Data |
---|---|---|
30d1574f KW |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include "amdgpu.h" | |
26 | #include "amdgpu_trace.h" | |
689957b1 | 27 | #include "sid.h" |
30d1574f KW |
28 | |
29 | const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | |
30 | { | |
31 | DMA0_REGISTER_OFFSET, | |
32 | DMA1_REGISTER_OFFSET | |
33 | }; | |
34 | ||
35 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev); | |
36 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); | |
37 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); | |
38 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev); | |
39 | ||
536fbf94 | 40 | static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) |
30d1574f | 41 | { |
cb5df31b | 42 | return ring->adev->wb.wb[ring->rptr_offs>>2]; |
30d1574f KW |
43 | } |
44 | ||
536fbf94 | 45 | static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) |
30d1574f | 46 | { |
30d1574f KW |
47 | struct amdgpu_device *adev = ring->adev; |
48 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
49 | ||
50 | return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | |
51 | } | |
52 | ||
53 | static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) | |
54 | { | |
55 | struct amdgpu_device *adev = ring->adev; | |
56 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
57 | ||
536fbf94 KW |
58 | WREG32(DMA_RB_WPTR + sdma_offsets[me], |
59 | (lower_32_bits(ring->wptr) << 2) & 0x3fffc); | |
30d1574f KW |
60 | } |
61 | ||
62 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, | |
63 | struct amdgpu_ib *ib, | |
64 | unsigned vm_id, bool ctx_switch) | |
65 | { | |
66 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | |
67 | * Pad as necessary with NOPs. | |
68 | */ | |
536fbf94 | 69 | while ((lower_32_bits(ring->wptr) & 7) != 5) |
30d1574f KW |
70 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
71 | amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); | |
72 | amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); | |
73 | amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | |
74 | ||
75 | } | |
76 | ||
77 | static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |
78 | { | |
79 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
80 | amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL)); | |
81 | amdgpu_ring_write(ring, 1); | |
82 | } | |
83 | ||
84 | static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) | |
85 | { | |
86 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
87 | amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0)); | |
88 | amdgpu_ring_write(ring, 1); | |
89 | } | |
90 | ||
91 | /** | |
92 | * si_dma_ring_emit_fence - emit a fence on the DMA ring | |
93 | * | |
94 | * @ring: amdgpu ring pointer | |
95 | * @fence: amdgpu fence object | |
96 | * | |
97 | * Add a DMA fence packet to the ring to write | |
98 | * the fence seq number and DMA trap packet to generate | |
99 | * an interrupt if needed (VI). | |
100 | */ | |
101 | static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
102 | unsigned flags) | |
103 | { | |
104 | ||
105 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
106 | /* write the fence */ | |
107 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
108 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
109 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
110 | amdgpu_ring_write(ring, seq); | |
111 | /* optionally write high bits as well */ | |
112 | if (write64bit) { | |
113 | addr += 4; | |
114 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
115 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
116 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
117 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
118 | } | |
119 | /* generate an interrupt */ | |
120 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); | |
121 | } | |
122 | ||
123 | static void si_dma_stop(struct amdgpu_device *adev) | |
124 | { | |
125 | struct amdgpu_ring *ring; | |
126 | u32 rb_cntl; | |
127 | unsigned i; | |
128 | ||
129 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
130 | ring = &adev->sdma.instance[i].ring; | |
131 | /* dma0 */ | |
132 | rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); | |
133 | rb_cntl &= ~DMA_RB_ENABLE; | |
134 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
135 | ||
e7b54945 MD |
136 | if (adev->mman.buffer_funcs_ring == ring) |
137 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | |
30d1574f KW |
138 | ring->ready = false; |
139 | } | |
140 | } | |
141 | ||
142 | static int si_dma_start(struct amdgpu_device *adev) | |
143 | { | |
144 | struct amdgpu_ring *ring; | |
145 | u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; | |
146 | int i, r; | |
147 | uint64_t rptr_addr; | |
148 | ||
149 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
150 | ring = &adev->sdma.instance[i].ring; | |
151 | ||
152 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); | |
153 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | |
154 | ||
155 | /* Set ring buffer size in dwords */ | |
156 | rb_bufsz = order_base_2(ring->ring_size / 4); | |
157 | rb_cntl = rb_bufsz << 1; | |
158 | #ifdef __BIG_ENDIAN | |
159 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; | |
160 | #endif | |
161 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
162 | ||
163 | /* Initialize the ring buffer's read and write pointers */ | |
164 | WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); | |
165 | WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); | |
166 | ||
167 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
168 | ||
169 | WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); | |
170 | WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); | |
171 | ||
172 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; | |
173 | ||
174 | WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | |
175 | ||
176 | /* enable DMA IBs */ | |
177 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; | |
178 | #ifdef __BIG_ENDIAN | |
179 | ib_cntl |= DMA_IB_SWAP_ENABLE; | |
180 | #endif | |
181 | WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); | |
182 | ||
183 | dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); | |
184 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; | |
185 | WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); | |
186 | ||
187 | ring->wptr = 0; | |
536fbf94 | 188 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); |
30d1574f KW |
189 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); |
190 | ||
191 | ring->ready = true; | |
192 | ||
193 | r = amdgpu_ring_test_ring(ring); | |
194 | if (r) { | |
195 | ring->ready = false; | |
196 | return r; | |
197 | } | |
e7b54945 MD |
198 | |
199 | if (adev->mman.buffer_funcs_ring == ring) | |
200 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); | |
30d1574f KW |
201 | } |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | /** | |
207 | * si_dma_ring_test_ring - simple async dma engine test | |
208 | * | |
209 | * @ring: amdgpu_ring structure holding ring information | |
210 | * | |
211 | * Test the DMA engine by writing using it to write an | |
212 | * value to memory. (VI). | |
213 | * Returns 0 for success, error for failure. | |
214 | */ | |
215 | static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |
216 | { | |
217 | struct amdgpu_device *adev = ring->adev; | |
218 | unsigned i; | |
219 | unsigned index; | |
220 | int r; | |
221 | u32 tmp; | |
222 | u64 gpu_addr; | |
223 | ||
224 | r = amdgpu_wb_get(adev, &index); | |
225 | if (r) { | |
226 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | |
227 | return r; | |
228 | } | |
229 | ||
230 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
231 | tmp = 0xCAFEDEAD; | |
232 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
233 | ||
234 | r = amdgpu_ring_alloc(ring, 4); | |
235 | if (r) { | |
236 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | |
237 | amdgpu_wb_free(adev, index); | |
238 | return r; | |
239 | } | |
240 | ||
241 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); | |
242 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | |
243 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); | |
244 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
245 | amdgpu_ring_commit(ring); | |
246 | ||
247 | for (i = 0; i < adev->usec_timeout; i++) { | |
248 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
249 | if (tmp == 0xDEADBEEF) | |
250 | break; | |
251 | DRM_UDELAY(1); | |
252 | } | |
253 | ||
254 | if (i < adev->usec_timeout) { | |
255 | DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); | |
256 | } else { | |
257 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | |
258 | ring->idx, tmp); | |
259 | r = -EINVAL; | |
260 | } | |
261 | amdgpu_wb_free(adev, index); | |
262 | ||
263 | return r; | |
264 | } | |
265 | ||
266 | /** | |
267 | * si_dma_ring_test_ib - test an IB on the DMA engine | |
268 | * | |
269 | * @ring: amdgpu_ring structure holding ring information | |
270 | * | |
271 | * Test a simple IB in the DMA ring (VI). | |
272 | * Returns 0 on success, error on failure. | |
273 | */ | |
274 | static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
275 | { | |
276 | struct amdgpu_device *adev = ring->adev; | |
277 | struct amdgpu_ib ib; | |
f54d1867 | 278 | struct dma_fence *f = NULL; |
30d1574f KW |
279 | unsigned index; |
280 | u32 tmp = 0; | |
281 | u64 gpu_addr; | |
282 | long r; | |
283 | ||
284 | r = amdgpu_wb_get(adev, &index); | |
285 | if (r) { | |
286 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | |
287 | return r; | |
288 | } | |
289 | ||
290 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
291 | tmp = 0xCAFEDEAD; | |
292 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
293 | memset(&ib, 0, sizeof(ib)); | |
294 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | |
295 | if (r) { | |
296 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | |
297 | goto err0; | |
298 | } | |
299 | ||
300 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); | |
301 | ib.ptr[1] = lower_32_bits(gpu_addr); | |
302 | ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; | |
303 | ib.ptr[3] = 0xDEADBEEF; | |
304 | ib.length_dw = 4; | |
50ddc75e | 305 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); |
30d1574f KW |
306 | if (r) |
307 | goto err1; | |
308 | ||
f54d1867 | 309 | r = dma_fence_wait_timeout(f, false, timeout); |
30d1574f KW |
310 | if (r == 0) { |
311 | DRM_ERROR("amdgpu: IB test timed out\n"); | |
312 | r = -ETIMEDOUT; | |
313 | goto err1; | |
314 | } else if (r < 0) { | |
315 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
316 | goto err1; | |
317 | } | |
318 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
319 | if (tmp == 0xDEADBEEF) { | |
320 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | |
321 | r = 0; | |
322 | } else { | |
323 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | |
324 | r = -EINVAL; | |
325 | } | |
326 | ||
327 | err1: | |
328 | amdgpu_ib_free(adev, &ib, NULL); | |
f54d1867 | 329 | dma_fence_put(f); |
30d1574f KW |
330 | err0: |
331 | amdgpu_wb_free(adev, index); | |
332 | return r; | |
333 | } | |
334 | ||
335 | /** | |
336 | * cik_dma_vm_copy_pte - update PTEs by copying them from the GART | |
337 | * | |
338 | * @ib: indirect buffer to fill with commands | |
339 | * @pe: addr of the page entry | |
340 | * @src: src addr to copy from | |
341 | * @count: number of page entries to update | |
342 | * | |
343 | * Update PTEs by copying them from the GART using DMA (SI). | |
344 | */ | |
345 | static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, | |
346 | uint64_t pe, uint64_t src, | |
347 | unsigned count) | |
348 | { | |
349 | unsigned bytes = count * 8; | |
350 | ||
351 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
352 | 1, 0, 0, bytes); | |
353 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
354 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | |
355 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
356 | ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; | |
357 | } | |
358 | ||
359 | /** | |
360 | * si_dma_vm_write_pte - update PTEs by writing them manually | |
361 | * | |
362 | * @ib: indirect buffer to fill with commands | |
363 | * @pe: addr of the page entry | |
364 | * @value: dst addr to write into pe | |
365 | * @count: number of page entries to update | |
366 | * @incr: increase next addr by incr bytes | |
367 | * | |
368 | * Update PTEs by writing them manually using DMA (SI). | |
369 | */ | |
370 | static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | |
371 | uint64_t value, unsigned count, | |
372 | uint32_t incr) | |
373 | { | |
374 | unsigned ndw = count * 2; | |
375 | ||
376 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); | |
377 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
378 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
379 | for (; ndw > 0; ndw -= 2) { | |
380 | ib->ptr[ib->length_dw++] = lower_32_bits(value); | |
381 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
382 | value += incr; | |
383 | } | |
384 | } | |
385 | ||
386 | /** | |
387 | * si_dma_vm_set_pte_pde - update the page tables using sDMA | |
388 | * | |
389 | * @ib: indirect buffer to fill with commands | |
390 | * @pe: addr of the page entry | |
391 | * @addr: dst addr to write into pe | |
392 | * @count: number of page entries to update | |
393 | * @incr: increase next addr by incr bytes | |
394 | * @flags: access flags | |
395 | * | |
396 | * Update the page tables using sDMA (CIK). | |
397 | */ | |
398 | static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, | |
399 | uint64_t pe, | |
400 | uint64_t addr, unsigned count, | |
6b777607 | 401 | uint32_t incr, uint64_t flags) |
30d1574f KW |
402 | { |
403 | uint64_t value; | |
404 | unsigned ndw; | |
405 | ||
406 | while (count) { | |
407 | ndw = count * 2; | |
408 | if (ndw > 0xFFFFE) | |
409 | ndw = 0xFFFFE; | |
410 | ||
411 | if (flags & AMDGPU_PTE_VALID) | |
412 | value = addr; | |
413 | else | |
414 | value = 0; | |
415 | ||
416 | /* for physically contiguous pages (vram) */ | |
417 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | |
418 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | |
419 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
b9be700e JZ |
420 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ |
421 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); | |
30d1574f KW |
422 | ib->ptr[ib->length_dw++] = value; /* value */ |
423 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
424 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
425 | ib->ptr[ib->length_dw++] = 0; | |
426 | pe += ndw * 4; | |
427 | addr += (ndw / 2) * incr; | |
428 | count -= ndw / 2; | |
429 | } | |
430 | } | |
431 | ||
432 | /** | |
433 | * si_dma_pad_ib - pad the IB to the required number of dw | |
434 | * | |
435 | * @ib: indirect buffer to fill with padding | |
436 | * | |
437 | */ | |
438 | static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | |
439 | { | |
440 | while (ib->length_dw & 0x7) | |
441 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); | |
442 | } | |
443 | ||
444 | /** | |
445 | * cik_sdma_ring_emit_pipeline_sync - sync the pipeline | |
446 | * | |
447 | * @ring: amdgpu_ring pointer | |
448 | * | |
449 | * Make sure all previous operations are completed (CIK). | |
450 | */ | |
451 | static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
452 | { | |
453 | uint32_t seq = ring->fence_drv.sync_seq; | |
454 | uint64_t addr = ring->fence_drv.gpu_addr; | |
455 | ||
456 | /* wait for idle */ | |
457 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | | |
458 | (1 << 27)); /* Poll memory */ | |
459 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
460 | amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ | |
461 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | |
462 | amdgpu_ring_write(ring, seq); /* value */ | |
463 | amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ | |
464 | } | |
465 | ||
466 | /** | |
467 | * si_dma_ring_emit_vm_flush - cik vm flush using sDMA | |
468 | * | |
469 | * @ring: amdgpu_ring pointer | |
470 | * @vm: amdgpu_vm pointer | |
471 | * | |
472 | * Update the page table base and flush the VM TLB | |
473 | * using sDMA (VI). | |
474 | */ | |
475 | static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
476 | unsigned vm_id, uint64_t pd_addr) | |
477 | { | |
478 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
cb5df31b | 479 | if (vm_id < 8) |
30d1574f | 480 | amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); |
cb5df31b | 481 | else |
30d1574f | 482 | amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); |
30d1574f KW |
483 | amdgpu_ring_write(ring, pd_addr >> 12); |
484 | ||
485 | /* bits 0-7 are the VM contexts0-7 */ | |
486 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
487 | amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); | |
488 | amdgpu_ring_write(ring, 1 << vm_id); | |
489 | ||
490 | /* wait for invalidate to complete */ | |
491 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); | |
492 | amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); | |
493 | amdgpu_ring_write(ring, 0xff << 16); /* retry */ | |
494 | amdgpu_ring_write(ring, 1 << vm_id); /* mask */ | |
495 | amdgpu_ring_write(ring, 0); /* value */ | |
496 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | |
497 | } | |
498 | ||
499 | static int si_dma_early_init(void *handle) | |
500 | { | |
501 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
502 | ||
503 | adev->sdma.num_instances = 2; | |
504 | ||
505 | si_dma_set_ring_funcs(adev); | |
506 | si_dma_set_buffer_funcs(adev); | |
507 | si_dma_set_vm_pte_funcs(adev); | |
508 | si_dma_set_irq_funcs(adev); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static int si_dma_sw_init(void *handle) | |
514 | { | |
515 | struct amdgpu_ring *ring; | |
516 | int r, i; | |
517 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
518 | ||
519 | /* DMA0 trap event */ | |
d766e6a3 | 520 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); |
30d1574f KW |
521 | if (r) |
522 | return r; | |
523 | ||
524 | /* DMA1 trap event */ | |
d766e6a3 | 525 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1); |
30d1574f KW |
526 | if (r) |
527 | return r; | |
528 | ||
529 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
530 | ring = &adev->sdma.instance[i].ring; | |
531 | ring->ring_obj = NULL; | |
532 | ring->use_doorbell = false; | |
533 | sprintf(ring->name, "sdma%d", i); | |
534 | r = amdgpu_ring_init(adev, ring, 1024, | |
30d1574f KW |
535 | &adev->sdma.trap_irq, |
536 | (i == 0) ? | |
21cd942e CK |
537 | AMDGPU_SDMA_IRQ_TRAP0 : |
538 | AMDGPU_SDMA_IRQ_TRAP1); | |
30d1574f KW |
539 | if (r) |
540 | return r; | |
541 | } | |
542 | ||
543 | return r; | |
544 | } | |
545 | ||
546 | static int si_dma_sw_fini(void *handle) | |
547 | { | |
548 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
549 | int i; | |
550 | ||
551 | for (i = 0; i < adev->sdma.num_instances; i++) | |
552 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | |
553 | ||
554 | return 0; | |
555 | } | |
556 | ||
557 | static int si_dma_hw_init(void *handle) | |
558 | { | |
30d1574f KW |
559 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
560 | ||
cb5df31b | 561 | return si_dma_start(adev); |
30d1574f KW |
562 | } |
563 | ||
564 | static int si_dma_hw_fini(void *handle) | |
565 | { | |
566 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
567 | ||
568 | si_dma_stop(adev); | |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
573 | static int si_dma_suspend(void *handle) | |
574 | { | |
575 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
576 | ||
577 | return si_dma_hw_fini(adev); | |
578 | } | |
579 | ||
580 | static int si_dma_resume(void *handle) | |
581 | { | |
582 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
583 | ||
584 | return si_dma_hw_init(adev); | |
585 | } | |
586 | ||
587 | static bool si_dma_is_idle(void *handle) | |
588 | { | |
589 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
590 | u32 tmp = RREG32(SRBM_STATUS2); | |
591 | ||
592 | if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) | |
593 | return false; | |
594 | ||
595 | return true; | |
596 | } | |
597 | ||
598 | static int si_dma_wait_for_idle(void *handle) | |
599 | { | |
600 | unsigned i; | |
30d1574f KW |
601 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
602 | ||
603 | for (i = 0; i < adev->usec_timeout; i++) { | |
cb5df31b | 604 | if (si_dma_is_idle(handle)) |
30d1574f KW |
605 | return 0; |
606 | udelay(1); | |
607 | } | |
608 | return -ETIMEDOUT; | |
609 | } | |
610 | ||
611 | static int si_dma_soft_reset(void *handle) | |
612 | { | |
613 | DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, | |
618 | struct amdgpu_irq_src *src, | |
619 | unsigned type, | |
620 | enum amdgpu_interrupt_state state) | |
621 | { | |
622 | u32 sdma_cntl; | |
623 | ||
624 | switch (type) { | |
625 | case AMDGPU_SDMA_IRQ_TRAP0: | |
626 | switch (state) { | |
627 | case AMDGPU_IRQ_STATE_DISABLE: | |
628 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
629 | sdma_cntl &= ~TRAP_ENABLE; | |
630 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
631 | break; | |
632 | case AMDGPU_IRQ_STATE_ENABLE: | |
633 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
634 | sdma_cntl |= TRAP_ENABLE; | |
635 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
636 | break; | |
637 | default: | |
638 | break; | |
639 | } | |
640 | break; | |
641 | case AMDGPU_SDMA_IRQ_TRAP1: | |
642 | switch (state) { | |
643 | case AMDGPU_IRQ_STATE_DISABLE: | |
644 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
645 | sdma_cntl &= ~TRAP_ENABLE; | |
646 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
647 | break; | |
648 | case AMDGPU_IRQ_STATE_ENABLE: | |
649 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
650 | sdma_cntl |= TRAP_ENABLE; | |
651 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
652 | break; | |
653 | default: | |
654 | break; | |
655 | } | |
656 | break; | |
657 | default: | |
658 | break; | |
659 | } | |
660 | return 0; | |
661 | } | |
662 | ||
663 | static int si_dma_process_trap_irq(struct amdgpu_device *adev, | |
664 | struct amdgpu_irq_src *source, | |
665 | struct amdgpu_iv_entry *entry) | |
666 | { | |
30d1574f KW |
667 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | static int si_dma_process_trap_irq_1(struct amdgpu_device *adev, | |
673 | struct amdgpu_irq_src *source, | |
674 | struct amdgpu_iv_entry *entry) | |
675 | { | |
30d1574f KW |
676 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
681 | static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev, | |
682 | struct amdgpu_irq_src *source, | |
683 | struct amdgpu_iv_entry *entry) | |
684 | { | |
685 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | |
686 | schedule_work(&adev->reset_work); | |
687 | return 0; | |
688 | } | |
689 | ||
690 | static int si_dma_set_clockgating_state(void *handle, | |
691 | enum amd_clockgating_state state) | |
692 | { | |
693 | u32 orig, data, offset; | |
694 | int i; | |
695 | bool enable; | |
696 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
697 | ||
698 | enable = (state == AMD_CG_STATE_GATE) ? true : false; | |
699 | ||
700 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { | |
701 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
702 | if (i == 0) | |
703 | offset = DMA0_REGISTER_OFFSET; | |
704 | else | |
705 | offset = DMA1_REGISTER_OFFSET; | |
706 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
707 | data &= ~MEM_POWER_OVERRIDE; | |
708 | if (data != orig) | |
709 | WREG32(DMA_POWER_CNTL + offset, data); | |
710 | WREG32(DMA_CLK_CTRL + offset, 0x00000100); | |
711 | } | |
712 | } else { | |
713 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
714 | if (i == 0) | |
715 | offset = DMA0_REGISTER_OFFSET; | |
716 | else | |
717 | offset = DMA1_REGISTER_OFFSET; | |
718 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
719 | data |= MEM_POWER_OVERRIDE; | |
720 | if (data != orig) | |
721 | WREG32(DMA_POWER_CNTL + offset, data); | |
722 | ||
723 | orig = data = RREG32(DMA_CLK_CTRL + offset); | |
724 | data = 0xff000000; | |
725 | if (data != orig) | |
726 | WREG32(DMA_CLK_CTRL + offset, data); | |
727 | } | |
728 | } | |
729 | ||
730 | return 0; | |
731 | } | |
732 | ||
733 | static int si_dma_set_powergating_state(void *handle, | |
734 | enum amd_powergating_state state) | |
735 | { | |
736 | u32 tmp; | |
737 | ||
738 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
739 | ||
740 | WREG32(DMA_PGFSM_WRITE, 0x00002000); | |
741 | WREG32(DMA_PGFSM_CONFIG, 0x100010ff); | |
742 | ||
743 | for (tmp = 0; tmp < 5; tmp++) | |
744 | WREG32(DMA_PGFSM_WRITE, 0); | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
a1255107 | 749 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
30d1574f KW |
750 | .name = "si_dma", |
751 | .early_init = si_dma_early_init, | |
752 | .late_init = NULL, | |
753 | .sw_init = si_dma_sw_init, | |
754 | .sw_fini = si_dma_sw_fini, | |
755 | .hw_init = si_dma_hw_init, | |
756 | .hw_fini = si_dma_hw_fini, | |
757 | .suspend = si_dma_suspend, | |
758 | .resume = si_dma_resume, | |
759 | .is_idle = si_dma_is_idle, | |
760 | .wait_for_idle = si_dma_wait_for_idle, | |
761 | .soft_reset = si_dma_soft_reset, | |
762 | .set_clockgating_state = si_dma_set_clockgating_state, | |
763 | .set_powergating_state = si_dma_set_powergating_state, | |
764 | }; | |
765 | ||
766 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |
21cd942e | 767 | .type = AMDGPU_RING_TYPE_SDMA, |
79887142 CK |
768 | .align_mask = 0xf, |
769 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | |
536fbf94 | 770 | .support_64bit_ptrs = false, |
30d1574f KW |
771 | .get_rptr = si_dma_ring_get_rptr, |
772 | .get_wptr = si_dma_ring_get_wptr, | |
773 | .set_wptr = si_dma_ring_set_wptr, | |
e12f3d7a CK |
774 | .emit_frame_size = |
775 | 3 + /* si_dma_ring_emit_hdp_flush */ | |
776 | 3 + /* si_dma_ring_emit_hdp_invalidate */ | |
777 | 6 + /* si_dma_ring_emit_pipeline_sync */ | |
778 | 12 + /* si_dma_ring_emit_vm_flush */ | |
779 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | |
780 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | |
30d1574f KW |
781 | .emit_ib = si_dma_ring_emit_ib, |
782 | .emit_fence = si_dma_ring_emit_fence, | |
783 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | |
784 | .emit_vm_flush = si_dma_ring_emit_vm_flush, | |
785 | .emit_hdp_flush = si_dma_ring_emit_hdp_flush, | |
786 | .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate, | |
787 | .test_ring = si_dma_ring_test_ring, | |
788 | .test_ib = si_dma_ring_test_ib, | |
789 | .insert_nop = amdgpu_ring_insert_nop, | |
790 | .pad_ib = si_dma_ring_pad_ib, | |
791 | }; | |
792 | ||
793 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | |
794 | { | |
795 | int i; | |
796 | ||
797 | for (i = 0; i < adev->sdma.num_instances; i++) | |
798 | adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; | |
799 | } | |
800 | ||
801 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { | |
802 | .set = si_dma_set_trap_irq_state, | |
803 | .process = si_dma_process_trap_irq, | |
804 | }; | |
805 | ||
806 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = { | |
807 | .set = si_dma_set_trap_irq_state, | |
808 | .process = si_dma_process_trap_irq_1, | |
809 | }; | |
810 | ||
811 | static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { | |
812 | .process = si_dma_process_illegal_inst_irq, | |
813 | }; | |
814 | ||
815 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) | |
816 | { | |
817 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | |
818 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; | |
819 | adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1; | |
820 | adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; | |
821 | } | |
822 | ||
823 | /** | |
824 | * si_dma_emit_copy_buffer - copy buffer using the sDMA engine | |
825 | * | |
826 | * @ring: amdgpu_ring structure holding ring information | |
827 | * @src_offset: src GPU address | |
828 | * @dst_offset: dst GPU address | |
829 | * @byte_count: number of bytes to xfer | |
830 | * | |
831 | * Copy GPU buffers using the DMA engine (VI). | |
832 | * Used by the amdgpu ttm implementation to move pages if | |
833 | * registered as the asic copy callback. | |
834 | */ | |
835 | static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, | |
836 | uint64_t src_offset, | |
837 | uint64_t dst_offset, | |
838 | uint32_t byte_count) | |
839 | { | |
840 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
841 | 1, 0, 0, byte_count); | |
842 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
843 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); | |
844 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; | |
845 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; | |
846 | } | |
847 | ||
848 | /** | |
849 | * si_dma_emit_fill_buffer - fill buffer using the sDMA engine | |
850 | * | |
851 | * @ring: amdgpu_ring structure holding ring information | |
852 | * @src_data: value to write to buffer | |
853 | * @dst_offset: dst GPU address | |
854 | * @byte_count: number of bytes to xfer | |
855 | * | |
856 | * Fill GPU buffers using the DMA engine (VI). | |
857 | */ | |
858 | static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, | |
859 | uint32_t src_data, | |
860 | uint64_t dst_offset, | |
861 | uint32_t byte_count) | |
862 | { | |
863 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, | |
864 | 0, 0, 0, byte_count / 4); | |
865 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
866 | ib->ptr[ib->length_dw++] = src_data; | |
867 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; | |
868 | } | |
869 | ||
870 | ||
871 | static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { | |
872 | .copy_max_bytes = 0xffff8, | |
873 | .copy_num_dw = 5, | |
874 | .emit_copy_buffer = si_dma_emit_copy_buffer, | |
875 | ||
876 | .fill_max_bytes = 0xffff8, | |
877 | .fill_num_dw = 4, | |
878 | .emit_fill_buffer = si_dma_emit_fill_buffer, | |
879 | }; | |
880 | ||
881 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) | |
882 | { | |
883 | if (adev->mman.buffer_funcs == NULL) { | |
884 | adev->mman.buffer_funcs = &si_dma_buffer_funcs; | |
885 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | |
886 | } | |
887 | } | |
888 | ||
889 | static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { | |
890 | .copy_pte = si_dma_vm_copy_pte, | |
891 | .write_pte = si_dma_vm_write_pte, | |
892 | .set_pte_pde = si_dma_vm_set_pte_pde, | |
893 | }; | |
894 | ||
895 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |
896 | { | |
897 | unsigned i; | |
898 | ||
899 | if (adev->vm_manager.vm_pte_funcs == NULL) { | |
900 | adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; | |
901 | for (i = 0; i < adev->sdma.num_instances; i++) | |
902 | adev->vm_manager.vm_pte_rings[i] = | |
903 | &adev->sdma.instance[i].ring; | |
904 | ||
905 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | |
906 | } | |
907 | } | |
a1255107 AD |
908 | |
909 | const struct amdgpu_ip_block_version si_dma_ip_block = | |
910 | { | |
911 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
912 | .major = 1, | |
913 | .minor = 0, | |
914 | .rev = 0, | |
915 | .funcs = &si_dma_ip_funcs, | |
916 | }; |