]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Jerome Glisse <glisse@freedesktop.org> | |
26 | */ | |
f9183127 | 27 | |
4330441a | 28 | #include <linux/list_sort.h> |
f9183127 SR |
29 | #include <linux/uaccess.h> |
30 | ||
31 | #include <drm/drm_device.h> | |
32 | #include <drm/drm_file.h> | |
33 | #include <drm/drm_pci.h> | |
760285e7 | 34 | #include <drm/radeon_drm.h> |
f9183127 | 35 | |
771fe6b9 | 36 | #include "radeon.h" |
f9183127 | 37 | #include "radeon_reg.h" |
860024e5 | 38 | #include "radeon_trace.h" |
771fe6b9 | 39 | |
c9b76548 MO |
40 | #define RADEON_CS_MAX_PRIORITY 32u |
41 | #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) | |
42 | ||
43 | /* This is based on the bucket sort with O(n) time complexity. | |
44 | * An item with priority "i" is added to bucket[i]. The lists are then | |
45 | * concatenated in descending order. | |
46 | */ | |
47 | struct radeon_cs_buckets { | |
48 | struct list_head bucket[RADEON_CS_NUM_BUCKETS]; | |
49 | }; | |
50 | ||
51 | static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) | |
52 | { | |
53 | unsigned i; | |
54 | ||
55 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) | |
56 | INIT_LIST_HEAD(&b->bucket[i]); | |
57 | } | |
58 | ||
59 | static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, | |
60 | struct list_head *item, unsigned priority) | |
61 | { | |
62 | /* Since buffers which appear sooner in the relocation list are | |
63 | * likely to be used more often than buffers which appear later | |
64 | * in the list, the sort mustn't change the ordering of buffers | |
65 | * with the same priority, i.e. it must be stable. | |
66 | */ | |
67 | list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); | |
68 | } | |
69 | ||
70 | static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, | |
71 | struct list_head *out_list) | |
72 | { | |
73 | unsigned i; | |
74 | ||
75 | /* Connect the sorted buckets in the output list. */ | |
76 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { | |
77 | list_splice(&b->bucket[i], out_list); | |
78 | } | |
79 | } | |
80 | ||
1109ca09 | 81 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
771fe6b9 | 82 | { |
771fe6b9 | 83 | struct radeon_cs_chunk *chunk; |
c9b76548 | 84 | struct radeon_cs_buckets buckets; |
466be338 CK |
85 | unsigned i; |
86 | bool need_mmap_lock = false; | |
f72a113a | 87 | int r; |
771fe6b9 | 88 | |
6d2d13dd | 89 | if (p->chunk_relocs == NULL) { |
771fe6b9 JG |
90 | return 0; |
91 | } | |
6d2d13dd | 92 | chunk = p->chunk_relocs; |
cf4ccd01 | 93 | p->dma_reloc_idx = 0; |
771fe6b9 JG |
94 | /* FIXME: we assume that each relocs use 4 dwords */ |
95 | p->nrelocs = chunk->length_dw / 4; | |
2098105e MH |
96 | p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list), |
97 | GFP_KERNEL | __GFP_ZERO); | |
771fe6b9 JG |
98 | if (p->relocs == NULL) { |
99 | return -ENOMEM; | |
100 | } | |
c9b76548 MO |
101 | |
102 | radeon_cs_buckets_init(&buckets); | |
103 | ||
771fe6b9 JG |
104 | for (i = 0; i < p->nrelocs; i++) { |
105 | struct drm_radeon_cs_reloc *r; | |
d33a8fc7 | 106 | struct drm_gem_object *gobj; |
c9b76548 | 107 | unsigned priority; |
771fe6b9 | 108 | |
771fe6b9 | 109 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
a8ad0bd8 | 110 | gobj = drm_gem_object_lookup(p->filp, r->handle); |
d33a8fc7 | 111 | if (gobj == NULL) { |
4474f3a9 CK |
112 | DRM_ERROR("gem object lookup failed 0x%x\n", |
113 | r->handle); | |
114 | return -ENOENT; | |
115 | } | |
d33a8fc7 | 116 | p->relocs[i].robj = gem_to_radeon_bo(gobj); |
c9b76548 MO |
117 | |
118 | /* The userspace buffer priorities are from 0 to 15. A higher | |
119 | * number means the buffer is more important. | |
120 | * Also, the buffers used for write have a higher priority than | |
121 | * the buffers used for read only, which doubles the range | |
122 | * to 0 to 31. 32 is reserved for the kernel driver. | |
123 | */ | |
701e1e78 CK |
124 | priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 |
125 | + !!r->write_domain; | |
4474f3a9 | 126 | |
8f12bbe6 CK |
127 | /* The first reloc of an UVD job is the msg and that must be in |
128 | * VRAM, the second reloc is the DPB and for WMV that must be in | |
129 | * VRAM as well. Also put everything into VRAM on AGP cards and older | |
130 | * IGP chips to avoid image corruptions | |
131 | */ | |
4f66c599 | 132 | if (p->ring == R600_RING_TYPE_UVD_INDEX && |
8f12bbe6 | 133 | (i <= 0 || pci_find_capability(p->rdev->ddev->pdev, |
2ce0264d | 134 | PCI_CAP_ID_AGP) || |
b6a7eeea CK |
135 | p->rdev->family == CHIP_RS780 || |
136 | p->rdev->family == CHIP_RS880)) { | |
137 | ||
bcf6f1e9 | 138 | /* TODO: is this still needed for NI+ ? */ |
5dcd3345 | 139 | p->relocs[i].preferred_domains = |
f2ba57b5 CK |
140 | RADEON_GEM_DOMAIN_VRAM; |
141 | ||
ce6758c8 | 142 | p->relocs[i].allowed_domains = |
f2ba57b5 CK |
143 | RADEON_GEM_DOMAIN_VRAM; |
144 | ||
c9b76548 MO |
145 | /* prioritize this over any other relocation */ |
146 | priority = RADEON_CS_MAX_PRIORITY; | |
f2ba57b5 CK |
147 | } else { |
148 | uint32_t domain = r->write_domain ? | |
149 | r->write_domain : r->read_domains; | |
150 | ||
ec65da38 MO |
151 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
152 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " | |
153 | "for command submission\n"); | |
154 | return -EINVAL; | |
155 | } | |
156 | ||
5dcd3345 | 157 | p->relocs[i].preferred_domains = domain; |
f2ba57b5 CK |
158 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
159 | domain |= RADEON_GEM_DOMAIN_GTT; | |
ce6758c8 | 160 | p->relocs[i].allowed_domains = domain; |
f2ba57b5 | 161 | } |
4474f3a9 | 162 | |
f72a113a | 163 | if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { |
5dcd3345 | 164 | uint32_t domain = p->relocs[i].preferred_domains; |
f72a113a CK |
165 | if (!(domain & RADEON_GEM_DOMAIN_GTT)) { |
166 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " | |
167 | "allowed for userptr BOs\n"); | |
168 | return -EINVAL; | |
169 | } | |
170 | need_mmap_lock = true; | |
171 | domain = RADEON_GEM_DOMAIN_GTT; | |
5dcd3345 | 172 | p->relocs[i].preferred_domains = domain; |
f72a113a CK |
173 | p->relocs[i].allowed_domains = domain; |
174 | } | |
175 | ||
ede2e019 CJHR |
176 | /* Objects shared as dma-bufs cannot be moved to VRAM */ |
177 | if (p->relocs[i].robj->prime_shared_count) { | |
178 | p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM; | |
179 | if (!p->relocs[i].allowed_domains) { | |
180 | DRM_ERROR("BO associated with dma-buf cannot " | |
181 | "be moved to VRAM\n"); | |
182 | return -EINVAL; | |
183 | } | |
184 | } | |
185 | ||
df0af440 | 186 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
a9f34c70 | 187 | p->relocs[i].tv.num_shared = !r->write_domain; |
4474f3a9 | 188 | |
df0af440 | 189 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
c9b76548 | 190 | priority); |
771fe6b9 | 191 | } |
c9b76548 MO |
192 | |
193 | radeon_cs_buckets_get_list(&buckets, &p->validated); | |
194 | ||
6d2f2944 CK |
195 | if (p->cs_flags & RADEON_CS_USE_VM) |
196 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, | |
197 | &p->validated); | |
f72a113a CK |
198 | if (need_mmap_lock) |
199 | down_read(¤t->mm->mmap_sem); | |
200 | ||
201 | r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); | |
202 | ||
203 | if (need_mmap_lock) | |
204 | up_read(¤t->mm->mmap_sem); | |
6d2f2944 | 205 | |
f72a113a | 206 | return r; |
771fe6b9 JG |
207 | } |
208 | ||
721604a1 JG |
209 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
210 | { | |
211 | p->priority = priority; | |
212 | ||
213 | switch (ring) { | |
214 | default: | |
215 | DRM_ERROR("unknown ring id: %d\n", ring); | |
216 | return -EINVAL; | |
217 | case RADEON_CS_RING_GFX: | |
218 | p->ring = RADEON_RING_TYPE_GFX_INDEX; | |
219 | break; | |
220 | case RADEON_CS_RING_COMPUTE: | |
963e81f9 | 221 | if (p->rdev->family >= CHIP_TAHITI) { |
8d5ef7b1 AD |
222 | if (p->priority > 0) |
223 | p->ring = CAYMAN_RING_TYPE_CP1_INDEX; | |
224 | else | |
225 | p->ring = CAYMAN_RING_TYPE_CP2_INDEX; | |
226 | } else | |
227 | p->ring = RADEON_RING_TYPE_GFX_INDEX; | |
721604a1 | 228 | break; |
278a334c AD |
229 | case RADEON_CS_RING_DMA: |
230 | if (p->rdev->family >= CHIP_CAYMAN) { | |
231 | if (p->priority > 0) | |
232 | p->ring = R600_RING_TYPE_DMA_INDEX; | |
233 | else | |
234 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; | |
b9ace36f | 235 | } else if (p->rdev->family >= CHIP_RV770) { |
278a334c AD |
236 | p->ring = R600_RING_TYPE_DMA_INDEX; |
237 | } else { | |
238 | return -EINVAL; | |
239 | } | |
240 | break; | |
f2ba57b5 CK |
241 | case RADEON_CS_RING_UVD: |
242 | p->ring = R600_RING_TYPE_UVD_INDEX; | |
243 | break; | |
d93f7937 CK |
244 | case RADEON_CS_RING_VCE: |
245 | /* TODO: only use the low priority ring for now */ | |
246 | p->ring = TN_RING_TYPE_VCE1_INDEX; | |
247 | break; | |
721604a1 JG |
248 | } |
249 | return 0; | |
250 | } | |
251 | ||
392a250b | 252 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
93504fce | 253 | { |
1d0c0942 | 254 | struct radeon_bo_list *reloc; |
c1f0a9c2 | 255 | int r; |
93504fce | 256 | |
c1f0a9c2 | 257 | list_for_each_entry(reloc, &p->validated, tv.head) { |
f2c24b83 | 258 | struct reservation_object *resv; |
f2c24b83 | 259 | |
336ac942 | 260 | resv = reloc->robj->tbo.base.resv; |
975700d2 | 261 | r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, |
a9f34c70 | 262 | reloc->tv.num_shared); |
392a250b | 263 | if (r) |
c1f0a9c2 | 264 | return r; |
8f676c4c | 265 | } |
c1f0a9c2 | 266 | return 0; |
93504fce CK |
267 | } |
268 | ||
9b00147d | 269 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
771fe6b9 JG |
270 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
271 | { | |
272 | struct drm_radeon_cs *cs = data; | |
273 | uint64_t *chunk_array_ptr; | |
721604a1 JG |
274 | unsigned size, i; |
275 | u32 ring = RADEON_CS_RING_GFX; | |
276 | s32 priority = 0; | |
771fe6b9 | 277 | |
a28b2a47 TR |
278 | INIT_LIST_HEAD(&p->validated); |
279 | ||
771fe6b9 JG |
280 | if (!cs->num_chunks) { |
281 | return 0; | |
282 | } | |
a28b2a47 | 283 | |
771fe6b9 | 284 | /* get chunks */ |
771fe6b9 | 285 | p->idx = 0; |
f2e39221 | 286 | p->ib.sa_bo = NULL; |
f2e39221 | 287 | p->const_ib.sa_bo = NULL; |
6d2d13dd CK |
288 | p->chunk_ib = NULL; |
289 | p->chunk_relocs = NULL; | |
290 | p->chunk_flags = NULL; | |
291 | p->chunk_const_ib = NULL; | |
771fe6b9 JG |
292 | p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); |
293 | if (p->chunks_array == NULL) { | |
294 | return -ENOMEM; | |
295 | } | |
296 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); | |
1d6ac185 | 297 | if (copy_from_user(p->chunks_array, chunk_array_ptr, |
771fe6b9 JG |
298 | sizeof(uint64_t)*cs->num_chunks)) { |
299 | return -EFAULT; | |
300 | } | |
721604a1 | 301 | p->cs_flags = 0; |
771fe6b9 JG |
302 | p->nchunks = cs->num_chunks; |
303 | p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); | |
304 | if (p->chunks == NULL) { | |
305 | return -ENOMEM; | |
306 | } | |
307 | for (i = 0; i < p->nchunks; i++) { | |
308 | struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; | |
309 | struct drm_radeon_cs_chunk user_chunk; | |
310 | uint32_t __user *cdata; | |
311 | ||
312 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; | |
1d6ac185 | 313 | if (copy_from_user(&user_chunk, chunk_ptr, |
771fe6b9 JG |
314 | sizeof(struct drm_radeon_cs_chunk))) { |
315 | return -EFAULT; | |
316 | } | |
5176fdc4 | 317 | p->chunks[i].length_dw = user_chunk.length_dw; |
6d2d13dd CK |
318 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { |
319 | p->chunk_relocs = &p->chunks[i]; | |
771fe6b9 | 320 | } |
6d2d13dd CK |
321 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
322 | p->chunk_ib = &p->chunks[i]; | |
5176fdc4 DA |
323 | /* zero length IB isn't useful */ |
324 | if (p->chunks[i].length_dw == 0) | |
325 | return -EINVAL; | |
771fe6b9 | 326 | } |
6d2d13dd CK |
327 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { |
328 | p->chunk_const_ib = &p->chunks[i]; | |
dfcf5f36 AD |
329 | /* zero length CONST IB isn't useful */ |
330 | if (p->chunks[i].length_dw == 0) | |
331 | return -EINVAL; | |
332 | } | |
6d2d13dd CK |
333 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
334 | p->chunk_flags = &p->chunks[i]; | |
721604a1 JG |
335 | /* zero length flags aren't useful */ |
336 | if (p->chunks[i].length_dw == 0) | |
337 | return -EINVAL; | |
e70f224c | 338 | } |
5176fdc4 | 339 | |
28a326c5 ML |
340 | size = p->chunks[i].length_dw; |
341 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; | |
342 | p->chunks[i].user_ptr = cdata; | |
6d2d13dd | 343 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) |
28a326c5 ML |
344 | continue; |
345 | ||
6d2d13dd | 346 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
28a326c5 ML |
347 | if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) |
348 | continue; | |
349 | } | |
350 | ||
2098105e | 351 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
28a326c5 ML |
352 | size *= sizeof(uint32_t); |
353 | if (p->chunks[i].kdata == NULL) { | |
354 | return -ENOMEM; | |
355 | } | |
1d6ac185 | 356 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
28a326c5 ML |
357 | return -EFAULT; |
358 | } | |
6d2d13dd | 359 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
28a326c5 ML |
360 | p->cs_flags = p->chunks[i].kdata[0]; |
361 | if (p->chunks[i].length_dw > 1) | |
362 | ring = p->chunks[i].kdata[1]; | |
363 | if (p->chunks[i].length_dw > 2) | |
364 | priority = (s32)p->chunks[i].kdata[2]; | |
771fe6b9 JG |
365 | } |
366 | } | |
721604a1 | 367 | |
9b00147d AD |
368 | /* these are KMS only */ |
369 | if (p->rdev) { | |
370 | if ((p->cs_flags & RADEON_CS_USE_VM) && | |
371 | !p->rdev->vm_manager.enabled) { | |
372 | DRM_ERROR("VM not active on asic!\n"); | |
373 | return -EINVAL; | |
374 | } | |
1b5475db | 375 | |
57449040 | 376 | if (radeon_cs_get_ring(p, ring, priority)) |
9b00147d | 377 | return -EINVAL; |
721604a1 | 378 | |
57449040 | 379 | /* we only support VM on some SI+ rings */ |
60a44540 CK |
380 | if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { |
381 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { | |
382 | DRM_ERROR("Ring %d requires VM!\n", p->ring); | |
383 | return -EINVAL; | |
384 | } | |
385 | } else { | |
386 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { | |
387 | DRM_ERROR("VM not supported on ring %d!\n", | |
388 | p->ring); | |
389 | return -EINVAL; | |
390 | } | |
57449040 | 391 | } |
9b00147d | 392 | } |
721604a1 | 393 | |
771fe6b9 JG |
394 | return 0; |
395 | } | |
396 | ||
4330441a MO |
397 | static int cmp_size_smaller_first(void *priv, struct list_head *a, |
398 | struct list_head *b) | |
399 | { | |
1d0c0942 CK |
400 | struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); |
401 | struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); | |
4330441a MO |
402 | |
403 | /* Sort A before B if A is smaller. */ | |
df0af440 | 404 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
4330441a MO |
405 | } |
406 | ||
771fe6b9 JG |
407 | /** |
408 | * cs_parser_fini() - clean parser states | |
409 | * @parser: parser structure holding parsing context. | |
410 | * @error: error number | |
411 | * | |
412 | * If error is set than unvalidate buffer, otherwise just free memory | |
413 | * used by parsing context. | |
414 | **/ | |
ecff665f | 415 | static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) |
771fe6b9 JG |
416 | { |
417 | unsigned i; | |
418 | ||
e43b5ec0 | 419 | if (!error) { |
4330441a MO |
420 | /* Sort the buffer list from the smallest to largest buffer, |
421 | * which affects the order of buffers in the LRU list. | |
422 | * This assures that the smallest buffers are added first | |
423 | * to the LRU list, so they are likely to be later evicted | |
424 | * first, instead of large buffers whose eviction is more | |
425 | * expensive. | |
426 | * | |
427 | * This slightly lowers the number of bytes moved by TTM | |
428 | * per frame under memory pressure. | |
429 | */ | |
430 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | |
431 | ||
ecff665f ML |
432 | ttm_eu_fence_buffer_objects(&parser->ticket, |
433 | &parser->validated, | |
f2c24b83 | 434 | &parser->ib.fence->base); |
ecff665f ML |
435 | } else if (backoff) { |
436 | ttm_eu_backoff_reservation(&parser->ticket, | |
437 | &parser->validated); | |
e43b5ec0 | 438 | } |
147666fb | 439 | |
fcbc451b PN |
440 | if (parser->relocs != NULL) { |
441 | for (i = 0; i < parser->nrelocs; i++) { | |
d33a8fc7 CK |
442 | struct radeon_bo *bo = parser->relocs[i].robj; |
443 | if (bo == NULL) | |
444 | continue; | |
445 | ||
ce77038f | 446 | drm_gem_object_put_unlocked(&bo->tbo.base); |
fcbc451b | 447 | } |
771fe6b9 | 448 | } |
48e113e5 | 449 | kfree(parser->track); |
2098105e MH |
450 | kvfree(parser->relocs); |
451 | kvfree(parser->vm_bos); | |
28a326c5 | 452 | for (i = 0; i < parser->nchunks; i++) |
2098105e | 453 | kvfree(parser->chunks[i].kdata); |
771fe6b9 JG |
454 | kfree(parser->chunks); |
455 | kfree(parser->chunks_array); | |
456 | radeon_ib_free(parser->rdev, &parser->ib); | |
f2e39221 | 457 | radeon_ib_free(parser->rdev, &parser->const_ib); |
771fe6b9 JG |
458 | } |
459 | ||
721604a1 JG |
460 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, |
461 | struct radeon_cs_parser *parser) | |
462 | { | |
721604a1 JG |
463 | int r; |
464 | ||
6d2d13dd | 465 | if (parser->chunk_ib == NULL) |
721604a1 JG |
466 | return 0; |
467 | ||
468 | if (parser->cs_flags & RADEON_CS_USE_VM) | |
469 | return 0; | |
470 | ||
eb0c19c5 | 471 | r = radeon_cs_parse(rdev, parser->ring, parser); |
721604a1 JG |
472 | if (r || parser->parser_error) { |
473 | DRM_ERROR("Invalid command stream !\n"); | |
474 | return r; | |
475 | } | |
ce3537d5 | 476 | |
392a250b ML |
477 | r = radeon_cs_sync_rings(parser); |
478 | if (r) { | |
479 | if (r != -ERESTARTSYS) | |
480 | DRM_ERROR("Failed to sync rings: %i\n", r); | |
481 | return r; | |
482 | } | |
483 | ||
ce3537d5 AD |
484 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
485 | radeon_uvd_note_usage(rdev); | |
03afe6f6 AD |
486 | else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || |
487 | (parser->ring == TN_RING_TYPE_VCE2_INDEX)) | |
488 | radeon_vce_note_usage(rdev); | |
ce3537d5 | 489 | |
1538a9e0 | 490 | r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
721604a1 JG |
491 | if (r) { |
492 | DRM_ERROR("Failed to schedule IB !\n"); | |
493 | } | |
93bf888c | 494 | return r; |
721604a1 JG |
495 | } |
496 | ||
6d2f2944 | 497 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, |
721604a1 JG |
498 | struct radeon_vm *vm) |
499 | { | |
6d2f2944 | 500 | struct radeon_device *rdev = p->rdev; |
036bf46a | 501 | struct radeon_bo_va *bo_va; |
6d2f2944 | 502 | int i, r; |
721604a1 | 503 | |
6d2f2944 CK |
504 | r = radeon_vm_update_page_directory(rdev, vm); |
505 | if (r) | |
3e8970f9 | 506 | return r; |
6d2f2944 | 507 | |
036bf46a CK |
508 | r = radeon_vm_clear_freed(rdev, vm); |
509 | if (r) | |
510 | return r; | |
511 | ||
cc9e67e3 | 512 | if (vm->ib_bo_va == NULL) { |
036bf46a CK |
513 | DRM_ERROR("Tmp BO not in VM!\n"); |
514 | return -EINVAL; | |
515 | } | |
516 | ||
cc9e67e3 CK |
517 | r = radeon_vm_bo_update(rdev, vm->ib_bo_va, |
518 | &rdev->ring_tmp_bo.bo->tbo.mem); | |
6d2f2944 CK |
519 | if (r) |
520 | return r; | |
521 | ||
522 | for (i = 0; i < p->nrelocs; i++) { | |
523 | struct radeon_bo *bo; | |
524 | ||
6d2f2944 | 525 | bo = p->relocs[i].robj; |
036bf46a CK |
526 | bo_va = radeon_vm_bo_find(vm, bo); |
527 | if (bo_va == NULL) { | |
528 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); | |
529 | return -EINVAL; | |
530 | } | |
531 | ||
532 | r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); | |
6d2f2944 | 533 | if (r) |
721604a1 | 534 | return r; |
94214635 CK |
535 | |
536 | radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); | |
721604a1 | 537 | } |
e31ad969 CK |
538 | |
539 | return radeon_vm_clear_invalids(rdev, vm); | |
721604a1 JG |
540 | } |
541 | ||
542 | static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |
543 | struct radeon_cs_parser *parser) | |
544 | { | |
721604a1 JG |
545 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
546 | struct radeon_vm *vm = &fpriv->vm; | |
547 | int r; | |
548 | ||
6d2d13dd | 549 | if (parser->chunk_ib == NULL) |
721604a1 | 550 | return 0; |
721604a1 JG |
551 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) |
552 | return 0; | |
553 | ||
28a326c5 | 554 | if (parser->const_ib.length_dw) { |
f2e39221 | 555 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); |
dfcf5f36 AD |
556 | if (r) { |
557 | return r; | |
558 | } | |
559 | } | |
560 | ||
f2e39221 | 561 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); |
721604a1 JG |
562 | if (r) { |
563 | return r; | |
564 | } | |
565 | ||
ce3537d5 AD |
566 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
567 | radeon_uvd_note_usage(rdev); | |
568 | ||
721604a1 | 569 | mutex_lock(&vm->mutex); |
721604a1 JG |
570 | r = radeon_bo_vm_update_pte(parser, vm); |
571 | if (r) { | |
572 | goto out; | |
573 | } | |
392a250b ML |
574 | |
575 | r = radeon_cs_sync_rings(parser); | |
576 | if (r) { | |
577 | if (r != -ERESTARTSYS) | |
578 | DRM_ERROR("Failed to sync rings: %i\n", r); | |
579 | goto out; | |
580 | } | |
4ef72566 | 581 | |
dfcf5f36 | 582 | if ((rdev->family >= CHIP_TAHITI) && |
6d2d13dd | 583 | (parser->chunk_const_ib != NULL)) { |
1538a9e0 | 584 | r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); |
4ef72566 | 585 | } else { |
1538a9e0 | 586 | r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
dfcf5f36 AD |
587 | } |
588 | ||
ee60e29f | 589 | out: |
36ff39c4 | 590 | mutex_unlock(&vm->mutex); |
721604a1 JG |
591 | return r; |
592 | } | |
593 | ||
6c6f4783 CK |
594 | static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) |
595 | { | |
596 | if (r == -EDEADLK) { | |
597 | r = radeon_gpu_reset(rdev); | |
598 | if (!r) | |
599 | r = -EAGAIN; | |
600 | } | |
601 | return r; | |
602 | } | |
603 | ||
28a326c5 ML |
604 | static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser) |
605 | { | |
606 | struct radeon_cs_chunk *ib_chunk; | |
607 | struct radeon_vm *vm = NULL; | |
608 | int r; | |
609 | ||
6d2d13dd | 610 | if (parser->chunk_ib == NULL) |
28a326c5 ML |
611 | return 0; |
612 | ||
613 | if (parser->cs_flags & RADEON_CS_USE_VM) { | |
614 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; | |
615 | vm = &fpriv->vm; | |
616 | ||
617 | if ((rdev->family >= CHIP_TAHITI) && | |
6d2d13dd CK |
618 | (parser->chunk_const_ib != NULL)) { |
619 | ib_chunk = parser->chunk_const_ib; | |
28a326c5 ML |
620 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
621 | DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); | |
622 | return -EINVAL; | |
623 | } | |
624 | r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, | |
625 | vm, ib_chunk->length_dw * 4); | |
626 | if (r) { | |
627 | DRM_ERROR("Failed to get const ib !\n"); | |
628 | return r; | |
629 | } | |
630 | parser->const_ib.is_const_ib = true; | |
631 | parser->const_ib.length_dw = ib_chunk->length_dw; | |
1d6ac185 | 632 | if (copy_from_user(parser->const_ib.ptr, |
28a326c5 ML |
633 | ib_chunk->user_ptr, |
634 | ib_chunk->length_dw * 4)) | |
635 | return -EFAULT; | |
636 | } | |
637 | ||
6d2d13dd | 638 | ib_chunk = parser->chunk_ib; |
28a326c5 ML |
639 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
640 | DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); | |
641 | return -EINVAL; | |
642 | } | |
643 | } | |
6d2d13dd | 644 | ib_chunk = parser->chunk_ib; |
28a326c5 ML |
645 | |
646 | r = radeon_ib_get(rdev, parser->ring, &parser->ib, | |
647 | vm, ib_chunk->length_dw * 4); | |
648 | if (r) { | |
649 | DRM_ERROR("Failed to get ib !\n"); | |
650 | return r; | |
651 | } | |
652 | parser->ib.length_dw = ib_chunk->length_dw; | |
653 | if (ib_chunk->kdata) | |
654 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); | |
1d6ac185 | 655 | else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) |
28a326c5 ML |
656 | return -EFAULT; |
657 | return 0; | |
658 | } | |
659 | ||
771fe6b9 JG |
660 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
661 | { | |
662 | struct radeon_device *rdev = dev->dev_private; | |
663 | struct radeon_cs_parser parser; | |
771fe6b9 JG |
664 | int r; |
665 | ||
dee53e7f | 666 | down_read(&rdev->exclusive_lock); |
6b7746e8 | 667 | if (!rdev->accel_working) { |
dee53e7f | 668 | up_read(&rdev->exclusive_lock); |
6b7746e8 JG |
669 | return -EBUSY; |
670 | } | |
9bb39ff4 ML |
671 | if (rdev->in_reset) { |
672 | up_read(&rdev->exclusive_lock); | |
673 | r = radeon_gpu_reset(rdev); | |
674 | if (!r) | |
675 | r = -EAGAIN; | |
676 | return r; | |
677 | } | |
771fe6b9 JG |
678 | /* initialize parser */ |
679 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | |
680 | parser.filp = filp; | |
681 | parser.rdev = rdev; | |
c8c15ff1 | 682 | parser.dev = rdev->dev; |
428c6e36 | 683 | parser.family = rdev->family; |
771fe6b9 JG |
684 | r = radeon_cs_parser_init(&parser, data); |
685 | if (r) { | |
686 | DRM_ERROR("Failed to initialize parser !\n"); | |
ecff665f | 687 | radeon_cs_parser_fini(&parser, r, false); |
dee53e7f | 688 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 689 | r = radeon_cs_handle_lockup(rdev, r); |
771fe6b9 JG |
690 | return r; |
691 | } | |
28a326c5 ML |
692 | |
693 | r = radeon_cs_ib_fill(rdev, &parser); | |
694 | if (!r) { | |
695 | r = radeon_cs_parser_relocs(&parser); | |
696 | if (r && r != -ERESTARTSYS) | |
97f23b3d | 697 | DRM_ERROR("Failed to parse relocation %d!\n", r); |
28a326c5 ML |
698 | } |
699 | ||
700 | if (r) { | |
ecff665f | 701 | radeon_cs_parser_fini(&parser, r, false); |
dee53e7f | 702 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 703 | r = radeon_cs_handle_lockup(rdev, r); |
771fe6b9 JG |
704 | return r; |
705 | } | |
55b51c88 | 706 | |
860024e5 CK |
707 | trace_radeon_cs(&parser); |
708 | ||
721604a1 | 709 | r = radeon_cs_ib_chunk(rdev, &parser); |
771fe6b9 | 710 | if (r) { |
721604a1 | 711 | goto out; |
771fe6b9 | 712 | } |
721604a1 | 713 | r = radeon_cs_ib_vm_chunk(rdev, &parser); |
771fe6b9 | 714 | if (r) { |
721604a1 | 715 | goto out; |
771fe6b9 | 716 | } |
721604a1 | 717 | out: |
ecff665f | 718 | radeon_cs_parser_fini(&parser, r, true); |
dee53e7f | 719 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 720 | r = radeon_cs_handle_lockup(rdev, r); |
771fe6b9 JG |
721 | return r; |
722 | } | |
513bcb46 | 723 | |
4db01311 IH |
724 | /** |
725 | * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet | |
726 | * @parser: parser structure holding parsing context. | |
727 | * @pkt: where to store packet information | |
728 | * | |
729 | * Assume that chunk_ib_index is properly set. Will return -EINVAL | |
730 | * if packet is bigger than remaining ib size. or if packets is unknown. | |
731 | **/ | |
732 | int radeon_cs_packet_parse(struct radeon_cs_parser *p, | |
733 | struct radeon_cs_packet *pkt, | |
734 | unsigned idx) | |
735 | { | |
6d2d13dd | 736 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
4db01311 IH |
737 | struct radeon_device *rdev = p->rdev; |
738 | uint32_t header; | |
e1b4e722 | 739 | int ret = 0, i; |
4db01311 IH |
740 | |
741 | if (idx >= ib_chunk->length_dw) { | |
742 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | |
743 | idx, ib_chunk->length_dw); | |
744 | return -EINVAL; | |
745 | } | |
746 | header = radeon_get_ib_value(p, idx); | |
747 | pkt->idx = idx; | |
748 | pkt->type = RADEON_CP_PACKET_GET_TYPE(header); | |
749 | pkt->count = RADEON_CP_PACKET_GET_COUNT(header); | |
750 | pkt->one_reg_wr = 0; | |
751 | switch (pkt->type) { | |
752 | case RADEON_PACKET_TYPE0: | |
753 | if (rdev->family < CHIP_R600) { | |
754 | pkt->reg = R100_CP_PACKET0_GET_REG(header); | |
755 | pkt->one_reg_wr = | |
756 | RADEON_CP_PACKET0_GET_ONE_REG_WR(header); | |
757 | } else | |
758 | pkt->reg = R600_CP_PACKET0_GET_REG(header); | |
759 | break; | |
760 | case RADEON_PACKET_TYPE3: | |
761 | pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header); | |
762 | break; | |
763 | case RADEON_PACKET_TYPE2: | |
764 | pkt->count = -1; | |
765 | break; | |
766 | default: | |
767 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | |
e1b4e722 AD |
768 | ret = -EINVAL; |
769 | goto dump_ib; | |
4db01311 IH |
770 | } |
771 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | |
772 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | |
773 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | |
e1b4e722 AD |
774 | ret = -EINVAL; |
775 | goto dump_ib; | |
4db01311 IH |
776 | } |
777 | return 0; | |
e1b4e722 AD |
778 | |
779 | dump_ib: | |
780 | for (i = 0; i < ib_chunk->length_dw; i++) { | |
781 | if (i == idx) | |
782 | printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); | |
783 | else | |
784 | printk("\t0x%08x\n", radeon_get_ib_value(p, i)); | |
785 | } | |
786 | return ret; | |
4db01311 | 787 | } |
9ffb7a6d IH |
788 | |
789 | /** | |
790 | * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP | |
791 | * @p: structure holding the parser context. | |
792 | * | |
793 | * Check if the next packet is NOP relocation packet3. | |
794 | **/ | |
795 | bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | |
796 | { | |
797 | struct radeon_cs_packet p3reloc; | |
798 | int r; | |
799 | ||
800 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); | |
801 | if (r) | |
802 | return false; | |
803 | if (p3reloc.type != RADEON_PACKET_TYPE3) | |
804 | return false; | |
805 | if (p3reloc.opcode != RADEON_PACKET3_NOP) | |
806 | return false; | |
807 | return true; | |
808 | } | |
c3ad63af IH |
809 | |
810 | /** | |
811 | * radeon_cs_dump_packet() - dump raw packet context | |
812 | * @p: structure holding the parser context. | |
813 | * @pkt: structure holding the packet. | |
814 | * | |
815 | * Used mostly for debugging and error reporting. | |
816 | **/ | |
817 | void radeon_cs_dump_packet(struct radeon_cs_parser *p, | |
818 | struct radeon_cs_packet *pkt) | |
819 | { | |
820 | volatile uint32_t *ib; | |
821 | unsigned i; | |
822 | unsigned idx; | |
823 | ||
824 | ib = p->ib.ptr; | |
825 | idx = pkt->idx; | |
826 | for (i = 0; i <= (pkt->count + 1); i++, idx++) | |
827 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | |
828 | } | |
829 | ||
e9716993 IH |
830 | /** |
831 | * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet | |
832 | * @parser: parser structure holding parsing context. | |
833 | * @data: pointer to relocation data | |
834 | * @offset_start: starting offset | |
835 | * @offset_mask: offset mask (to align start offset on) | |
836 | * @reloc: reloc informations | |
837 | * | |
838 | * Check if next packet is relocation packet3, do bo validation and compute | |
839 | * GPU offset using the provided start. | |
840 | **/ | |
841 | int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, | |
1d0c0942 | 842 | struct radeon_bo_list **cs_reloc, |
e9716993 IH |
843 | int nomm) |
844 | { | |
845 | struct radeon_cs_chunk *relocs_chunk; | |
846 | struct radeon_cs_packet p3reloc; | |
847 | unsigned idx; | |
848 | int r; | |
849 | ||
6d2d13dd | 850 | if (p->chunk_relocs == NULL) { |
e9716993 IH |
851 | DRM_ERROR("No relocation chunk !\n"); |
852 | return -EINVAL; | |
853 | } | |
854 | *cs_reloc = NULL; | |
6d2d13dd | 855 | relocs_chunk = p->chunk_relocs; |
e9716993 IH |
856 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
857 | if (r) | |
858 | return r; | |
859 | p->idx += p3reloc.count + 2; | |
860 | if (p3reloc.type != RADEON_PACKET_TYPE3 || | |
861 | p3reloc.opcode != RADEON_PACKET3_NOP) { | |
862 | DRM_ERROR("No packet3 for relocation for packet at %d.\n", | |
863 | p3reloc.idx); | |
864 | radeon_cs_dump_packet(p, &p3reloc); | |
865 | return -EINVAL; | |
866 | } | |
867 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); | |
868 | if (idx >= relocs_chunk->length_dw) { | |
869 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | |
870 | idx, relocs_chunk->length_dw); | |
871 | radeon_cs_dump_packet(p, &p3reloc); | |
872 | return -EINVAL; | |
873 | } | |
874 | /* FIXME: we assume reloc size is 4 dwords */ | |
875 | if (nomm) { | |
876 | *cs_reloc = p->relocs; | |
df0af440 | 877 | (*cs_reloc)->gpu_offset = |
e9716993 | 878 | (u64)relocs_chunk->kdata[idx + 3] << 32; |
df0af440 | 879 | (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
e9716993 | 880 | } else |
466be338 | 881 | *cs_reloc = &p->relocs[(idx / 4)]; |
e9716993 IH |
882 | return 0; |
883 | } |