]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | ||
25 | #include "amdgpu.h" | |
26 | #include "amdgpu_trace.h" | |
27 | #include "si.h" | |
28 | #include "sid.h" | |
29 | ||
30 | const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | |
31 | { | |
32 | DMA0_REGISTER_OFFSET, | |
33 | DMA1_REGISTER_OFFSET | |
34 | }; | |
35 | ||
36 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev); | |
37 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); | |
38 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); | |
39 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev); | |
40 | ||
41 | static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) | |
42 | { | |
43 | return ring->adev->wb.wb[ring->rptr_offs>>2]; | |
44 | } | |
45 | ||
46 | static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) | |
47 | { | |
48 | struct amdgpu_device *adev = ring->adev; | |
49 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
50 | ||
51 | return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | |
52 | } | |
53 | ||
54 | static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) | |
55 | { | |
56 | struct amdgpu_device *adev = ring->adev; | |
57 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | |
58 | ||
59 | WREG32(DMA_RB_WPTR + sdma_offsets[me], | |
60 | (lower_32_bits(ring->wptr) << 2) & 0x3fffc); | |
61 | } | |
62 | ||
63 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, | |
64 | struct amdgpu_job *job, | |
65 | struct amdgpu_ib *ib, | |
66 | uint32_t flags) | |
67 | { | |
68 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | |
69 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | |
70 | * Pad as necessary with NOPs. | |
71 | */ | |
72 | while ((lower_32_bits(ring->wptr) & 7) != 5) | |
73 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | |
74 | amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); | |
75 | amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); | |
76 | amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | |
77 | ||
78 | } | |
79 | ||
80 | /** | |
81 | * si_dma_ring_emit_fence - emit a fence on the DMA ring | |
82 | * | |
83 | * @ring: amdgpu ring pointer | |
84 | * @fence: amdgpu fence object | |
85 | * | |
86 | * Add a DMA fence packet to the ring to write | |
87 | * the fence seq number and DMA trap packet to generate | |
88 | * an interrupt if needed (VI). | |
89 | */ | |
90 | static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
91 | unsigned flags) | |
92 | { | |
93 | ||
94 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
95 | /* write the fence */ | |
96 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
97 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
98 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
99 | amdgpu_ring_write(ring, seq); | |
100 | /* optionally write high bits as well */ | |
101 | if (write64bit) { | |
102 | addr += 4; | |
103 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); | |
104 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
105 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); | |
106 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
107 | } | |
108 | /* generate an interrupt */ | |
109 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); | |
110 | } | |
111 | ||
112 | static void si_dma_stop(struct amdgpu_device *adev) | |
113 | { | |
114 | struct amdgpu_ring *ring; | |
115 | u32 rb_cntl; | |
116 | unsigned i; | |
117 | ||
118 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
119 | ring = &adev->sdma.instance[i].ring; | |
120 | /* dma0 */ | |
121 | rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); | |
122 | rb_cntl &= ~DMA_RB_ENABLE; | |
123 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
124 | ||
125 | if (adev->mman.buffer_funcs_ring == ring) | |
126 | amdgpu_ttm_set_buffer_funcs_status(adev, false); | |
127 | ring->sched.ready = false; | |
128 | } | |
129 | } | |
130 | ||
131 | static int si_dma_start(struct amdgpu_device *adev) | |
132 | { | |
133 | struct amdgpu_ring *ring; | |
134 | u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; | |
135 | int i, r; | |
136 | uint64_t rptr_addr; | |
137 | ||
138 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
139 | ring = &adev->sdma.instance[i].ring; | |
140 | ||
141 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); | |
142 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | |
143 | ||
144 | /* Set ring buffer size in dwords */ | |
145 | rb_bufsz = order_base_2(ring->ring_size / 4); | |
146 | rb_cntl = rb_bufsz << 1; | |
147 | #ifdef __BIG_ENDIAN | |
148 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; | |
149 | #endif | |
150 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); | |
151 | ||
152 | /* Initialize the ring buffer's read and write pointers */ | |
153 | WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); | |
154 | WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); | |
155 | ||
156 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
157 | ||
158 | WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); | |
159 | WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); | |
160 | ||
161 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; | |
162 | ||
163 | WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | |
164 | ||
165 | /* enable DMA IBs */ | |
166 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; | |
167 | #ifdef __BIG_ENDIAN | |
168 | ib_cntl |= DMA_IB_SWAP_ENABLE; | |
169 | #endif | |
170 | WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); | |
171 | ||
172 | dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); | |
173 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; | |
174 | WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); | |
175 | ||
176 | ring->wptr = 0; | |
177 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); | |
178 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); | |
179 | ||
180 | ring->sched.ready = true; | |
181 | ||
182 | r = amdgpu_ring_test_helper(ring); | |
183 | if (r) | |
184 | return r; | |
185 | ||
186 | if (adev->mman.buffer_funcs_ring == ring) | |
187 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | |
188 | } | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | /** | |
194 | * si_dma_ring_test_ring - simple async dma engine test | |
195 | * | |
196 | * @ring: amdgpu_ring structure holding ring information | |
197 | * | |
198 | * Test the DMA engine by writing using it to write an | |
199 | * value to memory. (VI). | |
200 | * Returns 0 for success, error for failure. | |
201 | */ | |
202 | static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |
203 | { | |
204 | struct amdgpu_device *adev = ring->adev; | |
205 | unsigned i; | |
206 | unsigned index; | |
207 | int r; | |
208 | u32 tmp; | |
209 | u64 gpu_addr; | |
210 | ||
211 | r = amdgpu_device_wb_get(adev, &index); | |
212 | if (r) | |
213 | return r; | |
214 | ||
215 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
216 | tmp = 0xCAFEDEAD; | |
217 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
218 | ||
219 | r = amdgpu_ring_alloc(ring, 4); | |
220 | if (r) | |
221 | goto error_free_wb; | |
222 | ||
223 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); | |
224 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | |
225 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); | |
226 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
227 | amdgpu_ring_commit(ring); | |
228 | ||
229 | for (i = 0; i < adev->usec_timeout; i++) { | |
230 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
231 | if (tmp == 0xDEADBEEF) | |
232 | break; | |
233 | udelay(1); | |
234 | } | |
235 | ||
236 | if (i >= adev->usec_timeout) | |
237 | r = -ETIMEDOUT; | |
238 | ||
239 | error_free_wb: | |
240 | amdgpu_device_wb_free(adev, index); | |
241 | return r; | |
242 | } | |
243 | ||
244 | /** | |
245 | * si_dma_ring_test_ib - test an IB on the DMA engine | |
246 | * | |
247 | * @ring: amdgpu_ring structure holding ring information | |
248 | * | |
249 | * Test a simple IB in the DMA ring (VI). | |
250 | * Returns 0 on success, error on failure. | |
251 | */ | |
252 | static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
253 | { | |
254 | struct amdgpu_device *adev = ring->adev; | |
255 | struct amdgpu_ib ib; | |
256 | struct dma_fence *f = NULL; | |
257 | unsigned index; | |
258 | u32 tmp = 0; | |
259 | u64 gpu_addr; | |
260 | long r; | |
261 | ||
262 | r = amdgpu_device_wb_get(adev, &index); | |
263 | if (r) | |
264 | return r; | |
265 | ||
266 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
267 | tmp = 0xCAFEDEAD; | |
268 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
269 | memset(&ib, 0, sizeof(ib)); | |
270 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | |
271 | if (r) | |
272 | goto err0; | |
273 | ||
274 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); | |
275 | ib.ptr[1] = lower_32_bits(gpu_addr); | |
276 | ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; | |
277 | ib.ptr[3] = 0xDEADBEEF; | |
278 | ib.length_dw = 4; | |
279 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); | |
280 | if (r) | |
281 | goto err1; | |
282 | ||
283 | r = dma_fence_wait_timeout(f, false, timeout); | |
284 | if (r == 0) { | |
285 | r = -ETIMEDOUT; | |
286 | goto err1; | |
287 | } else if (r < 0) { | |
288 | goto err1; | |
289 | } | |
290 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
291 | if (tmp == 0xDEADBEEF) | |
292 | r = 0; | |
293 | else | |
294 | r = -EINVAL; | |
295 | ||
296 | err1: | |
297 | amdgpu_ib_free(adev, &ib, NULL); | |
298 | dma_fence_put(f); | |
299 | err0: | |
300 | amdgpu_device_wb_free(adev, index); | |
301 | return r; | |
302 | } | |
303 | ||
304 | /** | |
305 | * cik_dma_vm_copy_pte - update PTEs by copying them from the GART | |
306 | * | |
307 | * @ib: indirect buffer to fill with commands | |
308 | * @pe: addr of the page entry | |
309 | * @src: src addr to copy from | |
310 | * @count: number of page entries to update | |
311 | * | |
312 | * Update PTEs by copying them from the GART using DMA (SI). | |
313 | */ | |
314 | static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, | |
315 | uint64_t pe, uint64_t src, | |
316 | unsigned count) | |
317 | { | |
318 | unsigned bytes = count * 8; | |
319 | ||
320 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
321 | 1, 0, 0, bytes); | |
322 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
323 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | |
324 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
325 | ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; | |
326 | } | |
327 | ||
328 | /** | |
329 | * si_dma_vm_write_pte - update PTEs by writing them manually | |
330 | * | |
331 | * @ib: indirect buffer to fill with commands | |
332 | * @pe: addr of the page entry | |
333 | * @value: dst addr to write into pe | |
334 | * @count: number of page entries to update | |
335 | * @incr: increase next addr by incr bytes | |
336 | * | |
337 | * Update PTEs by writing them manually using DMA (SI). | |
338 | */ | |
339 | static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | |
340 | uint64_t value, unsigned count, | |
341 | uint32_t incr) | |
342 | { | |
343 | unsigned ndw = count * 2; | |
344 | ||
345 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); | |
346 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
347 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
348 | for (; ndw > 0; ndw -= 2) { | |
349 | ib->ptr[ib->length_dw++] = lower_32_bits(value); | |
350 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
351 | value += incr; | |
352 | } | |
353 | } | |
354 | ||
355 | /** | |
356 | * si_dma_vm_set_pte_pde - update the page tables using sDMA | |
357 | * | |
358 | * @ib: indirect buffer to fill with commands | |
359 | * @pe: addr of the page entry | |
360 | * @addr: dst addr to write into pe | |
361 | * @count: number of page entries to update | |
362 | * @incr: increase next addr by incr bytes | |
363 | * @flags: access flags | |
364 | * | |
365 | * Update the page tables using sDMA (CIK). | |
366 | */ | |
367 | static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, | |
368 | uint64_t pe, | |
369 | uint64_t addr, unsigned count, | |
370 | uint32_t incr, uint64_t flags) | |
371 | { | |
372 | uint64_t value; | |
373 | unsigned ndw; | |
374 | ||
375 | while (count) { | |
376 | ndw = count * 2; | |
377 | if (ndw > 0xFFFFE) | |
378 | ndw = 0xFFFFE; | |
379 | ||
380 | if (flags & AMDGPU_PTE_VALID) | |
381 | value = addr; | |
382 | else | |
383 | value = 0; | |
384 | ||
385 | /* for physically contiguous pages (vram) */ | |
386 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | |
387 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | |
388 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | |
389 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ | |
390 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); | |
391 | ib->ptr[ib->length_dw++] = value; /* value */ | |
392 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
393 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
394 | ib->ptr[ib->length_dw++] = 0; | |
395 | pe += ndw * 4; | |
396 | addr += (ndw / 2) * incr; | |
397 | count -= ndw / 2; | |
398 | } | |
399 | } | |
400 | ||
401 | /** | |
402 | * si_dma_pad_ib - pad the IB to the required number of dw | |
403 | * | |
404 | * @ib: indirect buffer to fill with padding | |
405 | * | |
406 | */ | |
407 | static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | |
408 | { | |
409 | while (ib->length_dw & 0x7) | |
410 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); | |
411 | } | |
412 | ||
413 | /** | |
414 | * cik_sdma_ring_emit_pipeline_sync - sync the pipeline | |
415 | * | |
416 | * @ring: amdgpu_ring pointer | |
417 | * | |
418 | * Make sure all previous operations are completed (CIK). | |
419 | */ | |
420 | static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
421 | { | |
422 | uint32_t seq = ring->fence_drv.sync_seq; | |
423 | uint64_t addr = ring->fence_drv.gpu_addr; | |
424 | ||
425 | /* wait for idle */ | |
426 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | | |
427 | (1 << 27)); /* Poll memory */ | |
428 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
429 | amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ | |
430 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | |
431 | amdgpu_ring_write(ring, seq); /* value */ | |
432 | amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ | |
433 | } | |
434 | ||
435 | /** | |
436 | * si_dma_ring_emit_vm_flush - cik vm flush using sDMA | |
437 | * | |
438 | * @ring: amdgpu_ring pointer | |
439 | * @vm: amdgpu_vm pointer | |
440 | * | |
441 | * Update the page table base and flush the VM TLB | |
442 | * using sDMA (VI). | |
443 | */ | |
444 | static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
445 | unsigned vmid, uint64_t pd_addr) | |
446 | { | |
447 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); | |
448 | ||
449 | /* wait for invalidate to complete */ | |
450 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); | |
451 | amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); | |
452 | amdgpu_ring_write(ring, 0xff << 16); /* retry */ | |
453 | amdgpu_ring_write(ring, 1 << vmid); /* mask */ | |
454 | amdgpu_ring_write(ring, 0); /* value */ | |
455 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | |
456 | } | |
457 | ||
458 | static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, | |
459 | uint32_t reg, uint32_t val) | |
460 | { | |
461 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
462 | amdgpu_ring_write(ring, (0xf << 16) | reg); | |
463 | amdgpu_ring_write(ring, val); | |
464 | } | |
465 | ||
466 | static int si_dma_early_init(void *handle) | |
467 | { | |
468 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
469 | ||
470 | adev->sdma.num_instances = 2; | |
471 | ||
472 | si_dma_set_ring_funcs(adev); | |
473 | si_dma_set_buffer_funcs(adev); | |
474 | si_dma_set_vm_pte_funcs(adev); | |
475 | si_dma_set_irq_funcs(adev); | |
476 | ||
477 | return 0; | |
478 | } | |
479 | ||
480 | static int si_dma_sw_init(void *handle) | |
481 | { | |
482 | struct amdgpu_ring *ring; | |
483 | int r, i; | |
484 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
485 | ||
486 | /* DMA0 trap event */ | |
487 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, | |
488 | &adev->sdma.trap_irq); | |
489 | if (r) | |
490 | return r; | |
491 | ||
492 | /* DMA1 trap event */ | |
493 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, | |
494 | &adev->sdma.trap_irq); | |
495 | if (r) | |
496 | return r; | |
497 | ||
498 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
499 | ring = &adev->sdma.instance[i].ring; | |
500 | ring->ring_obj = NULL; | |
501 | ring->use_doorbell = false; | |
502 | sprintf(ring->name, "sdma%d", i); | |
503 | r = amdgpu_ring_init(adev, ring, 1024, | |
504 | &adev->sdma.trap_irq, | |
505 | (i == 0) ? | |
506 | AMDGPU_SDMA_IRQ_INSTANCE0 : | |
507 | AMDGPU_SDMA_IRQ_INSTANCE1); | |
508 | if (r) | |
509 | return r; | |
510 | } | |
511 | ||
512 | return r; | |
513 | } | |
514 | ||
515 | static int si_dma_sw_fini(void *handle) | |
516 | { | |
517 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
518 | int i; | |
519 | ||
520 | for (i = 0; i < adev->sdma.num_instances; i++) | |
521 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
526 | static int si_dma_hw_init(void *handle) | |
527 | { | |
528 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
529 | ||
530 | return si_dma_start(adev); | |
531 | } | |
532 | ||
533 | static int si_dma_hw_fini(void *handle) | |
534 | { | |
535 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
536 | ||
537 | si_dma_stop(adev); | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
542 | static int si_dma_suspend(void *handle) | |
543 | { | |
544 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
545 | ||
546 | return si_dma_hw_fini(adev); | |
547 | } | |
548 | ||
549 | static int si_dma_resume(void *handle) | |
550 | { | |
551 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
552 | ||
553 | return si_dma_hw_init(adev); | |
554 | } | |
555 | ||
556 | static bool si_dma_is_idle(void *handle) | |
557 | { | |
558 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
559 | u32 tmp = RREG32(SRBM_STATUS2); | |
560 | ||
561 | if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) | |
562 | return false; | |
563 | ||
564 | return true; | |
565 | } | |
566 | ||
567 | static int si_dma_wait_for_idle(void *handle) | |
568 | { | |
569 | unsigned i; | |
570 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
571 | ||
572 | for (i = 0; i < adev->usec_timeout; i++) { | |
573 | if (si_dma_is_idle(handle)) | |
574 | return 0; | |
575 | udelay(1); | |
576 | } | |
577 | return -ETIMEDOUT; | |
578 | } | |
579 | ||
580 | static int si_dma_soft_reset(void *handle) | |
581 | { | |
582 | DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, | |
587 | struct amdgpu_irq_src *src, | |
588 | unsigned type, | |
589 | enum amdgpu_interrupt_state state) | |
590 | { | |
591 | u32 sdma_cntl; | |
592 | ||
593 | switch (type) { | |
594 | case AMDGPU_SDMA_IRQ_INSTANCE0: | |
595 | switch (state) { | |
596 | case AMDGPU_IRQ_STATE_DISABLE: | |
597 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
598 | sdma_cntl &= ~TRAP_ENABLE; | |
599 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
600 | break; | |
601 | case AMDGPU_IRQ_STATE_ENABLE: | |
602 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); | |
603 | sdma_cntl |= TRAP_ENABLE; | |
604 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); | |
605 | break; | |
606 | default: | |
607 | break; | |
608 | } | |
609 | break; | |
610 | case AMDGPU_SDMA_IRQ_INSTANCE1: | |
611 | switch (state) { | |
612 | case AMDGPU_IRQ_STATE_DISABLE: | |
613 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
614 | sdma_cntl &= ~TRAP_ENABLE; | |
615 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
616 | break; | |
617 | case AMDGPU_IRQ_STATE_ENABLE: | |
618 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); | |
619 | sdma_cntl |= TRAP_ENABLE; | |
620 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); | |
621 | break; | |
622 | default: | |
623 | break; | |
624 | } | |
625 | break; | |
626 | default: | |
627 | break; | |
628 | } | |
629 | return 0; | |
630 | } | |
631 | ||
632 | static int si_dma_process_trap_irq(struct amdgpu_device *adev, | |
633 | struct amdgpu_irq_src *source, | |
634 | struct amdgpu_iv_entry *entry) | |
635 | { | |
636 | if (entry->src_id == 224) | |
637 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | |
638 | else | |
639 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | |
640 | return 0; | |
641 | } | |
642 | ||
643 | static int si_dma_set_clockgating_state(void *handle, | |
644 | enum amd_clockgating_state state) | |
645 | { | |
646 | u32 orig, data, offset; | |
647 | int i; | |
648 | bool enable; | |
649 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
650 | ||
651 | enable = (state == AMD_CG_STATE_GATE) ? true : false; | |
652 | ||
653 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { | |
654 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
655 | if (i == 0) | |
656 | offset = DMA0_REGISTER_OFFSET; | |
657 | else | |
658 | offset = DMA1_REGISTER_OFFSET; | |
659 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
660 | data &= ~MEM_POWER_OVERRIDE; | |
661 | if (data != orig) | |
662 | WREG32(DMA_POWER_CNTL + offset, data); | |
663 | WREG32(DMA_CLK_CTRL + offset, 0x00000100); | |
664 | } | |
665 | } else { | |
666 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
667 | if (i == 0) | |
668 | offset = DMA0_REGISTER_OFFSET; | |
669 | else | |
670 | offset = DMA1_REGISTER_OFFSET; | |
671 | orig = data = RREG32(DMA_POWER_CNTL + offset); | |
672 | data |= MEM_POWER_OVERRIDE; | |
673 | if (data != orig) | |
674 | WREG32(DMA_POWER_CNTL + offset, data); | |
675 | ||
676 | orig = data = RREG32(DMA_CLK_CTRL + offset); | |
677 | data = 0xff000000; | |
678 | if (data != orig) | |
679 | WREG32(DMA_CLK_CTRL + offset, data); | |
680 | } | |
681 | } | |
682 | ||
683 | return 0; | |
684 | } | |
685 | ||
686 | static int si_dma_set_powergating_state(void *handle, | |
687 | enum amd_powergating_state state) | |
688 | { | |
689 | u32 tmp; | |
690 | ||
691 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
692 | ||
693 | WREG32(DMA_PGFSM_WRITE, 0x00002000); | |
694 | WREG32(DMA_PGFSM_CONFIG, 0x100010ff); | |
695 | ||
696 | for (tmp = 0; tmp < 5; tmp++) | |
697 | WREG32(DMA_PGFSM_WRITE, 0); | |
698 | ||
699 | return 0; | |
700 | } | |
701 | ||
702 | static const struct amd_ip_funcs si_dma_ip_funcs = { | |
703 | .name = "si_dma", | |
704 | .early_init = si_dma_early_init, | |
705 | .late_init = NULL, | |
706 | .sw_init = si_dma_sw_init, | |
707 | .sw_fini = si_dma_sw_fini, | |
708 | .hw_init = si_dma_hw_init, | |
709 | .hw_fini = si_dma_hw_fini, | |
710 | .suspend = si_dma_suspend, | |
711 | .resume = si_dma_resume, | |
712 | .is_idle = si_dma_is_idle, | |
713 | .wait_for_idle = si_dma_wait_for_idle, | |
714 | .soft_reset = si_dma_soft_reset, | |
715 | .set_clockgating_state = si_dma_set_clockgating_state, | |
716 | .set_powergating_state = si_dma_set_powergating_state, | |
717 | }; | |
718 | ||
719 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |
720 | .type = AMDGPU_RING_TYPE_SDMA, | |
721 | .align_mask = 0xf, | |
722 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | |
723 | .support_64bit_ptrs = false, | |
724 | .get_rptr = si_dma_ring_get_rptr, | |
725 | .get_wptr = si_dma_ring_get_wptr, | |
726 | .set_wptr = si_dma_ring_set_wptr, | |
727 | .emit_frame_size = | |
728 | 3 + 3 + /* hdp flush / invalidate */ | |
729 | 6 + /* si_dma_ring_emit_pipeline_sync */ | |
730 | SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ | |
731 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | |
732 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | |
733 | .emit_ib = si_dma_ring_emit_ib, | |
734 | .emit_fence = si_dma_ring_emit_fence, | |
735 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | |
736 | .emit_vm_flush = si_dma_ring_emit_vm_flush, | |
737 | .test_ring = si_dma_ring_test_ring, | |
738 | .test_ib = si_dma_ring_test_ib, | |
739 | .insert_nop = amdgpu_ring_insert_nop, | |
740 | .pad_ib = si_dma_ring_pad_ib, | |
741 | .emit_wreg = si_dma_ring_emit_wreg, | |
742 | }; | |
743 | ||
744 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | |
745 | { | |
746 | int i; | |
747 | ||
748 | for (i = 0; i < adev->sdma.num_instances; i++) | |
749 | adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; | |
750 | } | |
751 | ||
752 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { | |
753 | .set = si_dma_set_trap_irq_state, | |
754 | .process = si_dma_process_trap_irq, | |
755 | }; | |
756 | ||
757 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) | |
758 | { | |
759 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | |
760 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; | |
761 | } | |
762 | ||
763 | /** | |
764 | * si_dma_emit_copy_buffer - copy buffer using the sDMA engine | |
765 | * | |
766 | * @ring: amdgpu_ring structure holding ring information | |
767 | * @src_offset: src GPU address | |
768 | * @dst_offset: dst GPU address | |
769 | * @byte_count: number of bytes to xfer | |
770 | * | |
771 | * Copy GPU buffers using the DMA engine (VI). | |
772 | * Used by the amdgpu ttm implementation to move pages if | |
773 | * registered as the asic copy callback. | |
774 | */ | |
775 | static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, | |
776 | uint64_t src_offset, | |
777 | uint64_t dst_offset, | |
778 | uint32_t byte_count) | |
779 | { | |
780 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, | |
781 | 1, 0, 0, byte_count); | |
782 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
783 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); | |
784 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; | |
785 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; | |
786 | } | |
787 | ||
788 | /** | |
789 | * si_dma_emit_fill_buffer - fill buffer using the sDMA engine | |
790 | * | |
791 | * @ring: amdgpu_ring structure holding ring information | |
792 | * @src_data: value to write to buffer | |
793 | * @dst_offset: dst GPU address | |
794 | * @byte_count: number of bytes to xfer | |
795 | * | |
796 | * Fill GPU buffers using the DMA engine (VI). | |
797 | */ | |
798 | static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, | |
799 | uint32_t src_data, | |
800 | uint64_t dst_offset, | |
801 | uint32_t byte_count) | |
802 | { | |
803 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, | |
804 | 0, 0, 0, byte_count / 4); | |
805 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
806 | ib->ptr[ib->length_dw++] = src_data; | |
807 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; | |
808 | } | |
809 | ||
810 | ||
811 | static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { | |
812 | .copy_max_bytes = 0xffff8, | |
813 | .copy_num_dw = 5, | |
814 | .emit_copy_buffer = si_dma_emit_copy_buffer, | |
815 | ||
816 | .fill_max_bytes = 0xffff8, | |
817 | .fill_num_dw = 4, | |
818 | .emit_fill_buffer = si_dma_emit_fill_buffer, | |
819 | }; | |
820 | ||
821 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) | |
822 | { | |
823 | adev->mman.buffer_funcs = &si_dma_buffer_funcs; | |
824 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | |
825 | } | |
826 | ||
827 | static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { | |
828 | .copy_pte_num_dw = 5, | |
829 | .copy_pte = si_dma_vm_copy_pte, | |
830 | ||
831 | .write_pte = si_dma_vm_write_pte, | |
832 | .set_pte_pde = si_dma_vm_set_pte_pde, | |
833 | }; | |
834 | ||
835 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |
836 | { | |
837 | struct drm_gpu_scheduler *sched; | |
838 | unsigned i; | |
839 | ||
840 | adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; | |
841 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
842 | sched = &adev->sdma.instance[i].ring.sched; | |
843 | adev->vm_manager.vm_pte_rqs[i] = | |
844 | &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; | |
845 | } | |
846 | adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; | |
847 | } | |
848 | ||
849 | const struct amdgpu_ip_block_version si_dma_ip_block = | |
850 | { | |
851 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
852 | .major = 1, | |
853 | .minor = 0, | |
854 | .rev = 0, | |
855 | .funcs = &si_dma_ip_funcs, | |
856 | }; |