]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Jerome Glisse <glisse@freedesktop.org> | |
26 | */ | |
568d7c76 | 27 | #include <linux/pagemap.h> |
d38ceaf9 AD |
28 | #include <drm/drmP.h> |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
d38ceaf9 AD |
33 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
34 | u32 ip_instance, u32 ring, | |
35 | struct amdgpu_ring **out_ring) | |
36 | { | |
37 | /* Right now all IPs have only one instance - multiple rings. */ | |
38 | if (ip_instance != 0) { | |
39 | DRM_ERROR("invalid ip instance: %d\n", ip_instance); | |
40 | return -EINVAL; | |
41 | } | |
42 | ||
43 | switch (ip_type) { | |
44 | default: | |
45 | DRM_ERROR("unknown ip type: %d\n", ip_type); | |
46 | return -EINVAL; | |
47 | case AMDGPU_HW_IP_GFX: | |
48 | if (ring < adev->gfx.num_gfx_rings) { | |
49 | *out_ring = &adev->gfx.gfx_ring[ring]; | |
50 | } else { | |
51 | DRM_ERROR("only %d gfx rings are supported now\n", | |
52 | adev->gfx.num_gfx_rings); | |
53 | return -EINVAL; | |
54 | } | |
55 | break; | |
56 | case AMDGPU_HW_IP_COMPUTE: | |
57 | if (ring < adev->gfx.num_compute_rings) { | |
58 | *out_ring = &adev->gfx.compute_ring[ring]; | |
59 | } else { | |
60 | DRM_ERROR("only %d compute rings are supported now\n", | |
61 | adev->gfx.num_compute_rings); | |
62 | return -EINVAL; | |
63 | } | |
64 | break; | |
65 | case AMDGPU_HW_IP_DMA: | |
c113ea1c AD |
66 | if (ring < adev->sdma.num_instances) { |
67 | *out_ring = &adev->sdma.instance[ring].ring; | |
d38ceaf9 | 68 | } else { |
c113ea1c AD |
69 | DRM_ERROR("only %d SDMA rings are supported\n", |
70 | adev->sdma.num_instances); | |
d38ceaf9 AD |
71 | return -EINVAL; |
72 | } | |
73 | break; | |
74 | case AMDGPU_HW_IP_UVD: | |
75 | *out_ring = &adev->uvd.ring; | |
76 | break; | |
77 | case AMDGPU_HW_IP_VCE: | |
034041f3 | 78 | if (ring < adev->vce.num_rings){ |
d38ceaf9 AD |
79 | *out_ring = &adev->vce.ring[ring]; |
80 | } else { | |
034041f3 | 81 | DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings); |
d38ceaf9 AD |
82 | return -EINVAL; |
83 | } | |
84 | break; | |
85 | } | |
c5f21c9f DP |
86 | |
87 | if (!(*out_ring && (*out_ring)->adev)) { | |
88 | DRM_ERROR("Ring %d is not initialized on IP %d\n", | |
89 | ring, ip_type); | |
90 | return -EINVAL; | |
91 | } | |
92 | ||
d38ceaf9 AD |
93 | return 0; |
94 | } | |
95 | ||
91acbeb6 | 96 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, |
758ac17f CK |
97 | struct drm_amdgpu_cs_chunk_fence *data, |
98 | uint32_t *offset) | |
91acbeb6 CK |
99 | { |
100 | struct drm_gem_object *gobj; | |
aa29040b | 101 | unsigned long size; |
91acbeb6 | 102 | |
a8ad0bd8 | 103 | gobj = drm_gem_object_lookup(p->filp, data->handle); |
91acbeb6 CK |
104 | if (gobj == NULL) |
105 | return -EINVAL; | |
106 | ||
758ac17f | 107 | p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
91acbeb6 CK |
108 | p->uf_entry.priority = 0; |
109 | p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | |
110 | p->uf_entry.tv.shared = true; | |
2f568dbd | 111 | p->uf_entry.user_pages = NULL; |
aa29040b CK |
112 | |
113 | size = amdgpu_bo_size(p->uf_entry.robj); | |
114 | if (size != PAGE_SIZE || (data->offset + 8) > size) | |
115 | return -EINVAL; | |
116 | ||
758ac17f | 117 | *offset = data->offset; |
91acbeb6 CK |
118 | |
119 | drm_gem_object_unreference_unlocked(gobj); | |
758ac17f CK |
120 | |
121 | if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { | |
122 | amdgpu_bo_unref(&p->uf_entry.robj); | |
123 | return -EINVAL; | |
124 | } | |
125 | ||
91acbeb6 CK |
126 | return 0; |
127 | } | |
128 | ||
d38ceaf9 AD |
129 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
130 | { | |
4c0b242c | 131 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
c5637837 | 132 | struct amdgpu_vm *vm = &fpriv->vm; |
d38ceaf9 AD |
133 | union drm_amdgpu_cs *cs = data; |
134 | uint64_t *chunk_array_user; | |
1d263474 | 135 | uint64_t *chunk_array; |
50838c8c | 136 | unsigned size, num_ibs = 0; |
758ac17f | 137 | uint32_t uf_offset = 0; |
54313503 | 138 | int i; |
1d263474 | 139 | int ret; |
d38ceaf9 | 140 | |
1d263474 DC |
141 | if (cs->in.num_chunks == 0) |
142 | return 0; | |
143 | ||
144 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | |
145 | if (!chunk_array) | |
146 | return -ENOMEM; | |
d38ceaf9 | 147 | |
3cb485f3 CK |
148 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
149 | if (!p->ctx) { | |
1d263474 DC |
150 | ret = -EINVAL; |
151 | goto free_chunk; | |
3cb485f3 | 152 | } |
1d263474 | 153 | |
d38ceaf9 | 154 | /* get chunks */ |
028423b0 | 155 | chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); |
d38ceaf9 AD |
156 | if (copy_from_user(chunk_array, chunk_array_user, |
157 | sizeof(uint64_t)*cs->in.num_chunks)) { | |
1d263474 | 158 | ret = -EFAULT; |
2a7d9bda | 159 | goto put_ctx; |
d38ceaf9 AD |
160 | } |
161 | ||
162 | p->nchunks = cs->in.num_chunks; | |
e60b344f | 163 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
d38ceaf9 | 164 | GFP_KERNEL); |
1d263474 DC |
165 | if (!p->chunks) { |
166 | ret = -ENOMEM; | |
2a7d9bda | 167 | goto put_ctx; |
d38ceaf9 AD |
168 | } |
169 | ||
170 | for (i = 0; i < p->nchunks; i++) { | |
171 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | |
172 | struct drm_amdgpu_cs_chunk user_chunk; | |
173 | uint32_t __user *cdata; | |
174 | ||
028423b0 | 175 | chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; |
d38ceaf9 AD |
176 | if (copy_from_user(&user_chunk, chunk_ptr, |
177 | sizeof(struct drm_amdgpu_cs_chunk))) { | |
1d263474 DC |
178 | ret = -EFAULT; |
179 | i--; | |
180 | goto free_partial_kdata; | |
d38ceaf9 AD |
181 | } |
182 | p->chunks[i].chunk_id = user_chunk.chunk_id; | |
183 | p->chunks[i].length_dw = user_chunk.length_dw; | |
d38ceaf9 AD |
184 | |
185 | size = p->chunks[i].length_dw; | |
028423b0 | 186 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
d38ceaf9 AD |
187 | |
188 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | |
189 | if (p->chunks[i].kdata == NULL) { | |
1d263474 DC |
190 | ret = -ENOMEM; |
191 | i--; | |
192 | goto free_partial_kdata; | |
d38ceaf9 AD |
193 | } |
194 | size *= sizeof(uint32_t); | |
195 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | |
1d263474 DC |
196 | ret = -EFAULT; |
197 | goto free_partial_kdata; | |
d38ceaf9 AD |
198 | } |
199 | ||
9a5e8fb1 CK |
200 | switch (p->chunks[i].chunk_id) { |
201 | case AMDGPU_CHUNK_ID_IB: | |
50838c8c | 202 | ++num_ibs; |
9a5e8fb1 CK |
203 | break; |
204 | ||
205 | case AMDGPU_CHUNK_ID_FENCE: | |
d38ceaf9 | 206 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
91acbeb6 | 207 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
1d263474 DC |
208 | ret = -EINVAL; |
209 | goto free_partial_kdata; | |
d38ceaf9 | 210 | } |
91acbeb6 | 211 | |
758ac17f CK |
212 | ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, |
213 | &uf_offset); | |
91acbeb6 CK |
214 | if (ret) |
215 | goto free_partial_kdata; | |
216 | ||
9a5e8fb1 CK |
217 | break; |
218 | ||
2b48d323 CK |
219 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
220 | break; | |
221 | ||
9a5e8fb1 | 222 | default: |
1d263474 DC |
223 | ret = -EINVAL; |
224 | goto free_partial_kdata; | |
d38ceaf9 AD |
225 | } |
226 | } | |
227 | ||
c5637837 | 228 | ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); |
50838c8c | 229 | if (ret) |
4acabfe3 | 230 | goto free_all_kdata; |
d38ceaf9 | 231 | |
b5f5acbc CK |
232 | if (p->uf_entry.robj) |
233 | p->job->uf_addr = uf_offset; | |
d38ceaf9 | 234 | kfree(chunk_array); |
1d263474 DC |
235 | return 0; |
236 | ||
237 | free_all_kdata: | |
238 | i = p->nchunks - 1; | |
239 | free_partial_kdata: | |
240 | for (; i >= 0; i--) | |
241 | drm_free_large(p->chunks[i].kdata); | |
242 | kfree(p->chunks); | |
2a7d9bda | 243 | put_ctx: |
1d263474 DC |
244 | amdgpu_ctx_put(p->ctx); |
245 | free_chunk: | |
246 | kfree(chunk_array); | |
247 | ||
248 | return ret; | |
d38ceaf9 AD |
249 | } |
250 | ||
95844d20 MO |
251 | /* Convert microseconds to bytes. */ |
252 | static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) | |
253 | { | |
254 | if (us <= 0 || !adev->mm_stats.log2_max_MBps) | |
255 | return 0; | |
256 | ||
257 | /* Since accum_us is incremented by a million per second, just | |
258 | * multiply it by the number of MB/s to get the number of bytes. | |
259 | */ | |
260 | return us << adev->mm_stats.log2_max_MBps; | |
261 | } | |
262 | ||
263 | static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) | |
264 | { | |
265 | if (!adev->mm_stats.log2_max_MBps) | |
266 | return 0; | |
267 | ||
268 | return bytes >> adev->mm_stats.log2_max_MBps; | |
269 | } | |
270 | ||
271 | /* Returns how many bytes TTM can move right now. If no bytes can be moved, | |
272 | * it returns 0. If it returns non-zero, it's OK to move at least one buffer, | |
273 | * which means it can go over the threshold once. If that happens, the driver | |
274 | * will be in debt and no other buffer migrations can be done until that debt | |
275 | * is repaid. | |
276 | * | |
277 | * This approach allows moving a buffer of any size (it's important to allow | |
278 | * that). | |
279 | * | |
280 | * The currency is simply time in microseconds and it increases as the clock | |
281 | * ticks. The accumulated microseconds (us) are converted to bytes and | |
282 | * returned. | |
d38ceaf9 AD |
283 | */ |
284 | static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |
285 | { | |
95844d20 MO |
286 | s64 time_us, increment_us; |
287 | u64 max_bytes; | |
288 | u64 free_vram, total_vram, used_vram; | |
d38ceaf9 | 289 | |
95844d20 MO |
290 | /* Allow a maximum of 200 accumulated ms. This is basically per-IB |
291 | * throttling. | |
d38ceaf9 | 292 | * |
95844d20 MO |
293 | * It means that in order to get full max MBps, at least 5 IBs per |
294 | * second must be submitted and not more than 200ms apart from each | |
295 | * other. | |
296 | */ | |
297 | const s64 us_upper_bound = 200000; | |
d38ceaf9 | 298 | |
95844d20 MO |
299 | if (!adev->mm_stats.log2_max_MBps) |
300 | return 0; | |
301 | ||
302 | total_vram = adev->mc.real_vram_size - adev->vram_pin_size; | |
303 | used_vram = atomic64_read(&adev->vram_usage); | |
304 | free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; | |
305 | ||
306 | spin_lock(&adev->mm_stats.lock); | |
307 | ||
308 | /* Increase the amount of accumulated us. */ | |
309 | time_us = ktime_to_us(ktime_get()); | |
310 | increment_us = time_us - adev->mm_stats.last_update_us; | |
311 | adev->mm_stats.last_update_us = time_us; | |
312 | adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, | |
313 | us_upper_bound); | |
314 | ||
315 | /* This prevents the short period of low performance when the VRAM | |
316 | * usage is low and the driver is in debt or doesn't have enough | |
317 | * accumulated us to fill VRAM quickly. | |
d38ceaf9 | 318 | * |
95844d20 MO |
319 | * The situation can occur in these cases: |
320 | * - a lot of VRAM is freed by userspace | |
321 | * - the presence of a big buffer causes a lot of evictions | |
322 | * (solution: split buffers into smaller ones) | |
d38ceaf9 | 323 | * |
95844d20 MO |
324 | * If 128 MB or 1/8th of VRAM is free, start filling it now by setting |
325 | * accum_us to a positive number. | |
d38ceaf9 | 326 | */ |
95844d20 MO |
327 | if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { |
328 | s64 min_us; | |
329 | ||
330 | /* Be more aggresive on dGPUs. Try to fill a portion of free | |
331 | * VRAM now. | |
332 | */ | |
333 | if (!(adev->flags & AMD_IS_APU)) | |
334 | min_us = bytes_to_us(adev, free_vram / 4); | |
335 | else | |
336 | min_us = 0; /* Reset accum_us on APUs. */ | |
337 | ||
338 | adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); | |
339 | } | |
d38ceaf9 | 340 | |
95844d20 MO |
341 | /* This returns 0 if the driver is in debt to disallow (optional) |
342 | * buffer moves. | |
343 | */ | |
344 | max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); | |
345 | ||
346 | spin_unlock(&adev->mm_stats.lock); | |
347 | return max_bytes; | |
348 | } | |
349 | ||
350 | /* Report how many bytes have really been moved for the last command | |
351 | * submission. This can result in a debt that can stop buffer migrations | |
352 | * temporarily. | |
353 | */ | |
fad06127 | 354 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) |
95844d20 MO |
355 | { |
356 | spin_lock(&adev->mm_stats.lock); | |
357 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); | |
358 | spin_unlock(&adev->mm_stats.lock); | |
d38ceaf9 AD |
359 | } |
360 | ||
14fd833e CZ |
361 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, |
362 | struct amdgpu_bo *bo) | |
363 | { | |
a7d64de6 | 364 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
14fd833e CZ |
365 | u64 initial_bytes_moved; |
366 | uint32_t domain; | |
367 | int r; | |
368 | ||
369 | if (bo->pin_count) | |
370 | return 0; | |
371 | ||
95844d20 MO |
372 | /* Don't move this buffer if we have depleted our allowance |
373 | * to move it. Don't move anything if the threshold is zero. | |
14fd833e | 374 | */ |
95844d20 | 375 | if (p->bytes_moved < p->bytes_moved_threshold) |
14fd833e CZ |
376 | domain = bo->prefered_domains; |
377 | else | |
378 | domain = bo->allowed_domains; | |
379 | ||
380 | retry: | |
381 | amdgpu_ttm_placement_from_domain(bo, domain); | |
a7d64de6 | 382 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
14fd833e | 383 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
a7d64de6 | 384 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
14fd833e CZ |
385 | initial_bytes_moved; |
386 | ||
1abdc3d7 CK |
387 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
388 | domain = bo->allowed_domains; | |
389 | goto retry; | |
14fd833e CZ |
390 | } |
391 | ||
392 | return r; | |
393 | } | |
394 | ||
662bfa61 CK |
395 | /* Last resort, try to evict something from the current working set */ |
396 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |
f7da30d9 | 397 | struct amdgpu_bo *validated) |
662bfa61 | 398 | { |
f7da30d9 | 399 | uint32_t domain = validated->allowed_domains; |
662bfa61 CK |
400 | int r; |
401 | ||
402 | if (!p->evictable) | |
403 | return false; | |
404 | ||
405 | for (;&p->evictable->tv.head != &p->validated; | |
406 | p->evictable = list_prev_entry(p->evictable, tv.head)) { | |
407 | ||
408 | struct amdgpu_bo_list_entry *candidate = p->evictable; | |
409 | struct amdgpu_bo *bo = candidate->robj; | |
a7d64de6 | 410 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
662bfa61 CK |
411 | u64 initial_bytes_moved; |
412 | uint32_t other; | |
413 | ||
414 | /* If we reached our current BO we can forget it */ | |
f7da30d9 | 415 | if (candidate->robj == validated) |
662bfa61 CK |
416 | break; |
417 | ||
418 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | |
419 | ||
420 | /* Check if this BO is in one of the domains we need space for */ | |
421 | if (!(other & domain)) | |
422 | continue; | |
423 | ||
424 | /* Check if we can move this BO somewhere else */ | |
425 | other = bo->allowed_domains & ~domain; | |
426 | if (!other) | |
427 | continue; | |
428 | ||
429 | /* Good we can try to move this BO somewhere else */ | |
430 | amdgpu_ttm_placement_from_domain(bo, other); | |
a7d64de6 | 431 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
662bfa61 | 432 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
a7d64de6 | 433 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
662bfa61 CK |
434 | initial_bytes_moved; |
435 | ||
436 | if (unlikely(r)) | |
437 | break; | |
438 | ||
439 | p->evictable = list_prev_entry(p->evictable, tv.head); | |
440 | list_move(&candidate->tv.head, &p->validated); | |
441 | ||
442 | return true; | |
443 | } | |
444 | ||
445 | return false; | |
446 | } | |
447 | ||
f7da30d9 CK |
448 | static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) |
449 | { | |
450 | struct amdgpu_cs_parser *p = param; | |
451 | int r; | |
452 | ||
453 | do { | |
454 | r = amdgpu_cs_bo_validate(p, bo); | |
455 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); | |
456 | if (r) | |
457 | return r; | |
458 | ||
459 | if (bo->shadow) | |
1cd99a8d | 460 | r = amdgpu_cs_bo_validate(p, bo->shadow); |
f7da30d9 CK |
461 | |
462 | return r; | |
463 | } | |
464 | ||
761c2e82 | 465 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
a5b75058 | 466 | struct list_head *validated) |
d38ceaf9 | 467 | { |
d38ceaf9 | 468 | struct amdgpu_bo_list_entry *lobj; |
d38ceaf9 AD |
469 | int r; |
470 | ||
a5b75058 | 471 | list_for_each_entry(lobj, validated, tv.head) { |
36409d12 | 472 | struct amdgpu_bo *bo = lobj->robj; |
2f568dbd | 473 | bool binding_userptr = false; |
cc325d19 | 474 | struct mm_struct *usermm; |
d38ceaf9 | 475 | |
cc325d19 CK |
476 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); |
477 | if (usermm && usermm != current->mm) | |
478 | return -EPERM; | |
479 | ||
2f568dbd CK |
480 | /* Check if we have user pages and nobody bound the BO already */ |
481 | if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { | |
482 | size_t size = sizeof(struct page *); | |
483 | ||
484 | size *= bo->tbo.ttm->num_pages; | |
485 | memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); | |
486 | binding_userptr = true; | |
487 | } | |
488 | ||
662bfa61 CK |
489 | if (p->evictable == lobj) |
490 | p->evictable = NULL; | |
491 | ||
f7da30d9 | 492 | r = amdgpu_cs_validate(p, bo); |
14fd833e | 493 | if (r) |
36409d12 | 494 | return r; |
662bfa61 | 495 | |
2f568dbd CK |
496 | if (binding_userptr) { |
497 | drm_free_large(lobj->user_pages); | |
498 | lobj->user_pages = NULL; | |
499 | } | |
d38ceaf9 AD |
500 | } |
501 | return 0; | |
502 | } | |
503 | ||
2a7d9bda CK |
504 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, |
505 | union drm_amdgpu_cs *cs) | |
d38ceaf9 AD |
506 | { |
507 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |
2f568dbd | 508 | struct amdgpu_bo_list_entry *e; |
a5b75058 | 509 | struct list_head duplicates; |
840d5144 | 510 | bool need_mmap_lock = false; |
2f568dbd | 511 | unsigned i, tries = 10; |
636ce25c | 512 | int r; |
d38ceaf9 | 513 | |
2a7d9bda CK |
514 | INIT_LIST_HEAD(&p->validated); |
515 | ||
516 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); | |
840d5144 | 517 | if (p->bo_list) { |
211dff55 CK |
518 | need_mmap_lock = p->bo_list->first_userptr != |
519 | p->bo_list->num_entries; | |
636ce25c | 520 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); |
840d5144 | 521 | } |
d38ceaf9 | 522 | |
3c0eea6c | 523 | INIT_LIST_HEAD(&duplicates); |
56467ebf | 524 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
d38ceaf9 | 525 | |
758ac17f | 526 | if (p->uf_entry.robj) |
91acbeb6 CK |
527 | list_add(&p->uf_entry.tv.head, &p->validated); |
528 | ||
d38ceaf9 AD |
529 | if (need_mmap_lock) |
530 | down_read(¤t->mm->mmap_sem); | |
531 | ||
2f568dbd CK |
532 | while (1) { |
533 | struct list_head need_pages; | |
534 | unsigned i; | |
535 | ||
536 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, | |
537 | &duplicates); | |
f1037950 | 538 | if (unlikely(r != 0)) { |
57d7f9b6 | 539 | if (r != -ERESTARTSYS) |
540 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); | |
2f568dbd | 541 | goto error_free_pages; |
f1037950 | 542 | } |
2f568dbd CK |
543 | |
544 | /* Without a BO list we don't have userptr BOs */ | |
545 | if (!p->bo_list) | |
546 | break; | |
547 | ||
548 | INIT_LIST_HEAD(&need_pages); | |
549 | for (i = p->bo_list->first_userptr; | |
550 | i < p->bo_list->num_entries; ++i) { | |
551 | ||
552 | e = &p->bo_list->array[i]; | |
553 | ||
554 | if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, | |
555 | &e->user_invalidated) && e->user_pages) { | |
556 | ||
557 | /* We acquired a page array, but somebody | |
558 | * invalidated it. Free it an try again | |
559 | */ | |
560 | release_pages(e->user_pages, | |
561 | e->robj->tbo.ttm->num_pages, | |
562 | false); | |
563 | drm_free_large(e->user_pages); | |
564 | e->user_pages = NULL; | |
565 | } | |
566 | ||
567 | if (e->robj->tbo.ttm->state != tt_bound && | |
568 | !e->user_pages) { | |
569 | list_del(&e->tv.head); | |
570 | list_add(&e->tv.head, &need_pages); | |
571 | ||
572 | amdgpu_bo_unreserve(e->robj); | |
573 | } | |
574 | } | |
575 | ||
576 | if (list_empty(&need_pages)) | |
577 | break; | |
578 | ||
579 | /* Unreserve everything again. */ | |
580 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | |
581 | ||
f1037950 | 582 | /* We tried too many times, just abort */ |
2f568dbd CK |
583 | if (!--tries) { |
584 | r = -EDEADLK; | |
f1037950 | 585 | DRM_ERROR("deadlock in %s\n", __func__); |
2f568dbd CK |
586 | goto error_free_pages; |
587 | } | |
588 | ||
589 | /* Fill the page arrays for all useptrs. */ | |
590 | list_for_each_entry(e, &need_pages, tv.head) { | |
591 | struct ttm_tt *ttm = e->robj->tbo.ttm; | |
592 | ||
593 | e->user_pages = drm_calloc_large(ttm->num_pages, | |
594 | sizeof(struct page*)); | |
595 | if (!e->user_pages) { | |
596 | r = -ENOMEM; | |
f1037950 | 597 | DRM_ERROR("calloc failure in %s\n", __func__); |
2f568dbd CK |
598 | goto error_free_pages; |
599 | } | |
600 | ||
601 | r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); | |
602 | if (r) { | |
f1037950 | 603 | DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); |
2f568dbd CK |
604 | drm_free_large(e->user_pages); |
605 | e->user_pages = NULL; | |
606 | goto error_free_pages; | |
607 | } | |
608 | } | |
609 | ||
610 | /* And try again. */ | |
611 | list_splice(&need_pages, &p->validated); | |
612 | } | |
a5b75058 | 613 | |
f69f90a1 CK |
614 | p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); |
615 | p->bytes_moved = 0; | |
662bfa61 CK |
616 | p->evictable = list_last_entry(&p->validated, |
617 | struct amdgpu_bo_list_entry, | |
618 | tv.head); | |
f69f90a1 | 619 | |
f7da30d9 CK |
620 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, |
621 | amdgpu_cs_validate, p); | |
622 | if (r) { | |
623 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); | |
624 | goto error_validate; | |
625 | } | |
626 | ||
f69f90a1 | 627 | r = amdgpu_cs_list_validate(p, &duplicates); |
f1037950 MO |
628 | if (r) { |
629 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); | |
a5b75058 | 630 | goto error_validate; |
f1037950 | 631 | } |
a5b75058 | 632 | |
f69f90a1 | 633 | r = amdgpu_cs_list_validate(p, &p->validated); |
f1037950 MO |
634 | if (r) { |
635 | DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); | |
a8480309 | 636 | goto error_validate; |
f1037950 | 637 | } |
a8480309 | 638 | |
95844d20 MO |
639 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); |
640 | ||
5a712a87 CK |
641 | fpriv->vm.last_eviction_counter = |
642 | atomic64_read(&p->adev->num_evictions); | |
643 | ||
a8480309 | 644 | if (p->bo_list) { |
d88bf583 CK |
645 | struct amdgpu_bo *gds = p->bo_list->gds_obj; |
646 | struct amdgpu_bo *gws = p->bo_list->gws_obj; | |
647 | struct amdgpu_bo *oa = p->bo_list->oa_obj; | |
a8480309 CK |
648 | struct amdgpu_vm *vm = &fpriv->vm; |
649 | unsigned i; | |
650 | ||
651 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
652 | struct amdgpu_bo *bo = p->bo_list->array[i].robj; | |
653 | ||
654 | p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); | |
655 | } | |
d88bf583 CK |
656 | |
657 | if (gds) { | |
658 | p->job->gds_base = amdgpu_bo_gpu_offset(gds); | |
659 | p->job->gds_size = amdgpu_bo_size(gds); | |
660 | } | |
661 | if (gws) { | |
662 | p->job->gws_base = amdgpu_bo_gpu_offset(gws); | |
663 | p->job->gws_size = amdgpu_bo_size(gws); | |
664 | } | |
665 | if (oa) { | |
666 | p->job->oa_base = amdgpu_bo_gpu_offset(oa); | |
667 | p->job->oa_size = amdgpu_bo_size(oa); | |
668 | } | |
a8480309 | 669 | } |
a5b75058 | 670 | |
c855e250 CK |
671 | if (!r && p->uf_entry.robj) { |
672 | struct amdgpu_bo *uf = p->uf_entry.robj; | |
673 | ||
bb990bb0 | 674 | r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem); |
c855e250 CK |
675 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); |
676 | } | |
b5f5acbc | 677 | |
a5b75058 | 678 | error_validate: |
eceb8a15 CK |
679 | if (r) { |
680 | amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); | |
a5b75058 | 681 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); |
eceb8a15 | 682 | } |
d38ceaf9 | 683 | |
2f568dbd CK |
684 | error_free_pages: |
685 | ||
d38ceaf9 AD |
686 | if (need_mmap_lock) |
687 | up_read(¤t->mm->mmap_sem); | |
688 | ||
2f568dbd CK |
689 | if (p->bo_list) { |
690 | for (i = p->bo_list->first_userptr; | |
691 | i < p->bo_list->num_entries; ++i) { | |
692 | e = &p->bo_list->array[i]; | |
693 | ||
694 | if (!e->user_pages) | |
695 | continue; | |
696 | ||
697 | release_pages(e->user_pages, | |
698 | e->robj->tbo.ttm->num_pages, | |
699 | false); | |
700 | drm_free_large(e->user_pages); | |
701 | } | |
702 | } | |
703 | ||
d38ceaf9 AD |
704 | return r; |
705 | } | |
706 | ||
707 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |
708 | { | |
709 | struct amdgpu_bo_list_entry *e; | |
710 | int r; | |
711 | ||
712 | list_for_each_entry(e, &p->validated, tv.head) { | |
713 | struct reservation_object *resv = e->robj->tbo.resv; | |
e86f9cee | 714 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); |
d38ceaf9 AD |
715 | |
716 | if (r) | |
717 | return r; | |
718 | } | |
719 | return 0; | |
720 | } | |
721 | ||
984810fc CK |
722 | /** |
723 | * cs_parser_fini() - clean parser states | |
724 | * @parser: parser structure holding parsing context. | |
725 | * @error: error number | |
726 | * | |
727 | * If error is set than unvalidate buffer, otherwise just free memory | |
728 | * used by parsing context. | |
729 | **/ | |
730 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | |
049fc527 | 731 | { |
eceb8a15 | 732 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
984810fc CK |
733 | unsigned i; |
734 | ||
d38ceaf9 | 735 | if (!error) { |
28b8d66e NH |
736 | amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); |
737 | ||
d38ceaf9 | 738 | ttm_eu_fence_buffer_objects(&parser->ticket, |
984810fc CK |
739 | &parser->validated, |
740 | parser->fence); | |
d38ceaf9 AD |
741 | } else if (backoff) { |
742 | ttm_eu_backoff_reservation(&parser->ticket, | |
743 | &parser->validated); | |
744 | } | |
f54d1867 | 745 | dma_fence_put(parser->fence); |
7e52a81c | 746 | |
3cb485f3 CK |
747 | if (parser->ctx) |
748 | amdgpu_ctx_put(parser->ctx); | |
a3348bb8 CZ |
749 | if (parser->bo_list) |
750 | amdgpu_bo_list_put(parser->bo_list); | |
751 | ||
d38ceaf9 AD |
752 | for (i = 0; i < parser->nchunks; i++) |
753 | drm_free_large(parser->chunks[i].kdata); | |
754 | kfree(parser->chunks); | |
50838c8c CK |
755 | if (parser->job) |
756 | amdgpu_job_free(parser->job); | |
91acbeb6 | 757 | amdgpu_bo_unref(&parser->uf_entry.robj); |
d38ceaf9 AD |
758 | } |
759 | ||
760 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |
761 | struct amdgpu_vm *vm) | |
762 | { | |
763 | struct amdgpu_device *adev = p->adev; | |
764 | struct amdgpu_bo_va *bo_va; | |
765 | struct amdgpu_bo *bo; | |
766 | int i, r; | |
767 | ||
768 | r = amdgpu_vm_update_page_directory(adev, vm); | |
769 | if (r) | |
770 | return r; | |
771 | ||
e86f9cee | 772 | r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); |
05906dec BN |
773 | if (r) |
774 | return r; | |
775 | ||
d38ceaf9 AD |
776 | r = amdgpu_vm_clear_freed(adev, vm); |
777 | if (r) | |
778 | return r; | |
779 | ||
2493664f ML |
780 | if (amdgpu_sriov_vf(adev)) { |
781 | struct dma_fence *f; | |
782 | bo_va = vm->csa_bo_va; | |
783 | BUG_ON(!bo_va); | |
784 | r = amdgpu_vm_bo_update(adev, bo_va, false); | |
785 | if (r) | |
786 | return r; | |
787 | ||
788 | f = bo_va->last_pt_update; | |
789 | r = amdgpu_sync_fence(adev, &p->job->sync, f); | |
790 | if (r) | |
791 | return r; | |
792 | } | |
793 | ||
d38ceaf9 AD |
794 | if (p->bo_list) { |
795 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
f54d1867 | 796 | struct dma_fence *f; |
91e1a520 | 797 | |
d38ceaf9 AD |
798 | /* ignore duplicates */ |
799 | bo = p->bo_list->array[i].robj; | |
800 | if (!bo) | |
801 | continue; | |
802 | ||
803 | bo_va = p->bo_list->array[i].bo_va; | |
804 | if (bo_va == NULL) | |
805 | continue; | |
806 | ||
99e124f4 | 807 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
d38ceaf9 AD |
808 | if (r) |
809 | return r; | |
810 | ||
bb1e38a4 | 811 | f = bo_va->last_pt_update; |
e86f9cee | 812 | r = amdgpu_sync_fence(adev, &p->job->sync, f); |
91e1a520 CK |
813 | if (r) |
814 | return r; | |
d38ceaf9 | 815 | } |
b495bd3a CK |
816 | |
817 | } | |
818 | ||
e86f9cee | 819 | r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); |
b495bd3a CK |
820 | |
821 | if (amdgpu_vm_debug && p->bo_list) { | |
822 | /* Invalidate all BOs to test for userspace bugs */ | |
823 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
824 | /* ignore duplicates */ | |
825 | bo = p->bo_list->array[i].robj; | |
826 | if (!bo) | |
827 | continue; | |
828 | ||
829 | amdgpu_vm_bo_invalidate(adev, bo); | |
830 | } | |
d38ceaf9 AD |
831 | } |
832 | ||
b495bd3a | 833 | return r; |
d38ceaf9 AD |
834 | } |
835 | ||
836 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |
b07c60c0 | 837 | struct amdgpu_cs_parser *p) |
d38ceaf9 | 838 | { |
b07c60c0 | 839 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
d38ceaf9 | 840 | struct amdgpu_vm *vm = &fpriv->vm; |
b07c60c0 | 841 | struct amdgpu_ring *ring = p->job->ring; |
d38ceaf9 AD |
842 | int i, r; |
843 | ||
d38ceaf9 | 844 | /* Only for UVD/VCE VM emulation */ |
b07c60c0 CK |
845 | if (ring->funcs->parse_cs) { |
846 | for (i = 0; i < p->job->num_ibs; i++) { | |
847 | r = amdgpu_ring_parse_cs(ring, p, i); | |
d38ceaf9 AD |
848 | if (r) |
849 | return r; | |
850 | } | |
45088efc CK |
851 | } |
852 | ||
853 | if (p->job->vm) { | |
9a79588c | 854 | p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
281d144d | 855 | |
9a79588c CK |
856 | r = amdgpu_bo_vm_update_pte(p, vm); |
857 | if (r) | |
858 | return r; | |
859 | } | |
d38ceaf9 | 860 | |
9a79588c | 861 | return amdgpu_cs_sync_rings(p); |
d38ceaf9 AD |
862 | } |
863 | ||
d38ceaf9 AD |
864 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, |
865 | struct amdgpu_cs_parser *parser) | |
866 | { | |
867 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
868 | struct amdgpu_vm *vm = &fpriv->vm; | |
869 | int i, j; | |
870 | int r; | |
871 | ||
50838c8c | 872 | for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { |
d38ceaf9 AD |
873 | struct amdgpu_cs_chunk *chunk; |
874 | struct amdgpu_ib *ib; | |
875 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |
d38ceaf9 | 876 | struct amdgpu_ring *ring; |
d38ceaf9 AD |
877 | |
878 | chunk = &parser->chunks[i]; | |
50838c8c | 879 | ib = &parser->job->ibs[j]; |
d38ceaf9 AD |
880 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; |
881 | ||
882 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | |
883 | continue; | |
884 | ||
d38ceaf9 AD |
885 | r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, |
886 | chunk_ib->ip_instance, chunk_ib->ring, | |
887 | &ring); | |
3ccec53c | 888 | if (r) |
d38ceaf9 | 889 | return r; |
d38ceaf9 | 890 | |
753ad49c ML |
891 | if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { |
892 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | |
893 | if (!parser->ctx->preamble_presented) { | |
894 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | |
895 | parser->ctx->preamble_presented = true; | |
896 | } | |
897 | } | |
898 | ||
b07c60c0 CK |
899 | if (parser->job->ring && parser->job->ring != ring) |
900 | return -EINVAL; | |
901 | ||
902 | parser->job->ring = ring; | |
903 | ||
d38ceaf9 | 904 | if (ring->funcs->parse_cs) { |
4802ce11 | 905 | struct amdgpu_bo_va_mapping *m; |
3ccec53c | 906 | struct amdgpu_bo *aobj = NULL; |
4802ce11 CK |
907 | uint64_t offset; |
908 | uint8_t *kptr; | |
3ccec53c | 909 | |
4802ce11 CK |
910 | m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, |
911 | &aobj); | |
3ccec53c MO |
912 | if (!aobj) { |
913 | DRM_ERROR("IB va_start is invalid\n"); | |
914 | return -EINVAL; | |
d38ceaf9 AD |
915 | } |
916 | ||
4802ce11 CK |
917 | if ((chunk_ib->va_start + chunk_ib->ib_bytes) > |
918 | (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { | |
919 | DRM_ERROR("IB va_start+ib_bytes is invalid\n"); | |
920 | return -EINVAL; | |
921 | } | |
922 | ||
3ccec53c | 923 | /* the IB should be reserved at this point */ |
4802ce11 | 924 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); |
d38ceaf9 | 925 | if (r) { |
d38ceaf9 AD |
926 | return r; |
927 | } | |
928 | ||
4802ce11 CK |
929 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
930 | kptr += chunk_ib->va_start - offset; | |
931 | ||
45088efc | 932 | r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); |
d38ceaf9 AD |
933 | if (r) { |
934 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
935 | return r; |
936 | } | |
937 | ||
938 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | |
939 | amdgpu_bo_kunmap(aobj); | |
d38ceaf9 | 940 | } else { |
b07c60c0 | 941 | r = amdgpu_ib_get(adev, vm, 0, ib); |
d38ceaf9 AD |
942 | if (r) { |
943 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
944 | return r; |
945 | } | |
946 | ||
d38ceaf9 | 947 | } |
d38ceaf9 | 948 | |
45088efc | 949 | ib->gpu_addr = chunk_ib->va_start; |
3ccec53c | 950 | ib->length_dw = chunk_ib->ib_bytes / 4; |
de807f81 | 951 | ib->flags = chunk_ib->flags; |
d38ceaf9 AD |
952 | j++; |
953 | } | |
954 | ||
758ac17f | 955 | /* UVD & VCE fw doesn't support user fences */ |
b5f5acbc | 956 | if (parser->job->uf_addr && ( |
21cd942e CK |
957 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
958 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) | |
758ac17f | 959 | return -EINVAL; |
d38ceaf9 AD |
960 | |
961 | return 0; | |
962 | } | |
963 | ||
2b48d323 CK |
964 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, |
965 | struct amdgpu_cs_parser *p) | |
966 | { | |
76a1ea61 | 967 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
2b48d323 CK |
968 | int i, j, r; |
969 | ||
2b48d323 CK |
970 | for (i = 0; i < p->nchunks; ++i) { |
971 | struct drm_amdgpu_cs_chunk_dep *deps; | |
972 | struct amdgpu_cs_chunk *chunk; | |
973 | unsigned num_deps; | |
974 | ||
975 | chunk = &p->chunks[i]; | |
976 | ||
977 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) | |
978 | continue; | |
979 | ||
980 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; | |
981 | num_deps = chunk->length_dw * 4 / | |
982 | sizeof(struct drm_amdgpu_cs_chunk_dep); | |
983 | ||
984 | for (j = 0; j < num_deps; ++j) { | |
2b48d323 | 985 | struct amdgpu_ring *ring; |
76a1ea61 | 986 | struct amdgpu_ctx *ctx; |
f54d1867 | 987 | struct dma_fence *fence; |
2b48d323 CK |
988 | |
989 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | |
990 | deps[j].ip_instance, | |
991 | deps[j].ring, &ring); | |
992 | if (r) | |
993 | return r; | |
994 | ||
76a1ea61 CK |
995 | ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); |
996 | if (ctx == NULL) | |
997 | return -EINVAL; | |
998 | ||
21c16bf6 CK |
999 | fence = amdgpu_ctx_get_fence(ctx, ring, |
1000 | deps[j].handle); | |
1001 | if (IS_ERR(fence)) { | |
1002 | r = PTR_ERR(fence); | |
76a1ea61 | 1003 | amdgpu_ctx_put(ctx); |
2b48d323 | 1004 | return r; |
91e1a520 | 1005 | |
21c16bf6 | 1006 | } else if (fence) { |
e86f9cee CK |
1007 | r = amdgpu_sync_fence(adev, &p->job->sync, |
1008 | fence); | |
f54d1867 | 1009 | dma_fence_put(fence); |
21c16bf6 CK |
1010 | amdgpu_ctx_put(ctx); |
1011 | if (r) | |
1012 | return r; | |
1013 | } | |
2b48d323 CK |
1014 | } |
1015 | } | |
1016 | ||
1017 | return 0; | |
1018 | } | |
1019 | ||
cd75dc68 CK |
1020 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
1021 | union drm_amdgpu_cs *cs) | |
1022 | { | |
b07c60c0 | 1023 | struct amdgpu_ring *ring = p->job->ring; |
92f25098 | 1024 | struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; |
cd75dc68 | 1025 | struct amdgpu_job *job; |
e686941a | 1026 | int r; |
cd75dc68 | 1027 | |
50838c8c CK |
1028 | job = p->job; |
1029 | p->job = NULL; | |
cd75dc68 | 1030 | |
595a9cd6 | 1031 | r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); |
e686941a | 1032 | if (r) { |
d71518b5 | 1033 | amdgpu_job_free(job); |
e686941a | 1034 | return r; |
cd75dc68 CK |
1035 | } |
1036 | ||
e686941a | 1037 | job->owner = p->filp; |
3aecd24c | 1038 | job->fence_ctx = entity->fence_context; |
f54d1867 | 1039 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
595a9cd6 | 1040 | cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); |
758ac17f | 1041 | job->uf_sequence = cs->out.handle; |
a5fb4ec2 | 1042 | amdgpu_job_free_resources(job); |
cd75dc68 CK |
1043 | |
1044 | trace_amdgpu_cs_ioctl(job); | |
1045 | amd_sched_entity_push_job(&job->base); | |
1046 | ||
1047 | return 0; | |
1048 | } | |
1049 | ||
049fc527 CZ |
1050 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
1051 | { | |
1052 | struct amdgpu_device *adev = dev->dev_private; | |
1053 | union drm_amdgpu_cs *cs = data; | |
7e52a81c | 1054 | struct amdgpu_cs_parser parser = {}; |
26a6980c CK |
1055 | bool reserved_buffers = false; |
1056 | int i, r; | |
049fc527 | 1057 | |
0c418f10 | 1058 | if (!adev->accel_working) |
049fc527 | 1059 | return -EBUSY; |
2b48d323 | 1060 | |
7e52a81c CK |
1061 | parser.adev = adev; |
1062 | parser.filp = filp; | |
1063 | ||
1064 | r = amdgpu_cs_parser_init(&parser, data); | |
d38ceaf9 | 1065 | if (r) { |
049fc527 | 1066 | DRM_ERROR("Failed to initialize parser !\n"); |
a414cd70 | 1067 | goto out; |
26a6980c CK |
1068 | } |
1069 | ||
a414cd70 HR |
1070 | r = amdgpu_cs_parser_bos(&parser, data); |
1071 | if (r) { | |
1072 | if (r == -ENOMEM) | |
1073 | DRM_ERROR("Not enough memory for command submission!\n"); | |
1074 | else if (r != -ERESTARTSYS) | |
1075 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | |
1076 | goto out; | |
26a6980c CK |
1077 | } |
1078 | ||
a414cd70 HR |
1079 | reserved_buffers = true; |
1080 | r = amdgpu_cs_ib_fill(adev, &parser); | |
26a6980c CK |
1081 | if (r) |
1082 | goto out; | |
1083 | ||
a414cd70 HR |
1084 | r = amdgpu_cs_dependencies(adev, &parser); |
1085 | if (r) { | |
1086 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | |
1087 | goto out; | |
1088 | } | |
1089 | ||
50838c8c | 1090 | for (i = 0; i < parser.job->num_ibs; i++) |
7e52a81c | 1091 | trace_amdgpu_cs(&parser, i); |
26a6980c | 1092 | |
7e52a81c | 1093 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); |
4fe63117 CZ |
1094 | if (r) |
1095 | goto out; | |
1096 | ||
4acabfe3 | 1097 | r = amdgpu_cs_submit(&parser, cs); |
d38ceaf9 | 1098 | |
d38ceaf9 | 1099 | out: |
7e52a81c | 1100 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
d38ceaf9 AD |
1101 | return r; |
1102 | } | |
1103 | ||
1104 | /** | |
1105 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | |
1106 | * | |
1107 | * @dev: drm device | |
1108 | * @data: data from userspace | |
1109 | * @filp: file private | |
1110 | * | |
1111 | * Wait for the command submission identified by handle to finish. | |
1112 | */ | |
1113 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |
1114 | struct drm_file *filp) | |
1115 | { | |
1116 | union drm_amdgpu_wait_cs *wait = data; | |
1117 | struct amdgpu_device *adev = dev->dev_private; | |
d38ceaf9 | 1118 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
03507c4f | 1119 | struct amdgpu_ring *ring = NULL; |
66b3cf2a | 1120 | struct amdgpu_ctx *ctx; |
f54d1867 | 1121 | struct dma_fence *fence; |
d38ceaf9 AD |
1122 | long r; |
1123 | ||
21c16bf6 CK |
1124 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, |
1125 | wait->in.ring, &ring); | |
1126 | if (r) | |
1127 | return r; | |
1128 | ||
66b3cf2a JZ |
1129 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
1130 | if (ctx == NULL) | |
1131 | return -EINVAL; | |
d38ceaf9 | 1132 | |
4b559c90 CZ |
1133 | fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); |
1134 | if (IS_ERR(fence)) | |
1135 | r = PTR_ERR(fence); | |
1136 | else if (fence) { | |
f54d1867 CW |
1137 | r = dma_fence_wait_timeout(fence, true, timeout); |
1138 | dma_fence_put(fence); | |
4b559c90 CZ |
1139 | } else |
1140 | r = 1; | |
049fc527 | 1141 | |
66b3cf2a | 1142 | amdgpu_ctx_put(ctx); |
d38ceaf9 AD |
1143 | if (r < 0) |
1144 | return r; | |
1145 | ||
1146 | memset(wait, 0, sizeof(*wait)); | |
1147 | wait->out.status = (r == 0); | |
1148 | ||
1149 | return 0; | |
1150 | } | |
1151 | ||
eef18a82 JZ |
1152 | /** |
1153 | * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence | |
1154 | * | |
1155 | * @adev: amdgpu device | |
1156 | * @filp: file private | |
1157 | * @user: drm_amdgpu_fence copied from user space | |
1158 | */ | |
1159 | static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, | |
1160 | struct drm_file *filp, | |
1161 | struct drm_amdgpu_fence *user) | |
1162 | { | |
1163 | struct amdgpu_ring *ring; | |
1164 | struct amdgpu_ctx *ctx; | |
1165 | struct dma_fence *fence; | |
1166 | int r; | |
1167 | ||
1168 | r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance, | |
1169 | user->ring, &ring); | |
1170 | if (r) | |
1171 | return ERR_PTR(r); | |
1172 | ||
1173 | ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); | |
1174 | if (ctx == NULL) | |
1175 | return ERR_PTR(-EINVAL); | |
1176 | ||
1177 | fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); | |
1178 | amdgpu_ctx_put(ctx); | |
1179 | ||
1180 | return fence; | |
1181 | } | |
1182 | ||
1183 | /** | |
1184 | * amdgpu_cs_wait_all_fence - wait on all fences to signal | |
1185 | * | |
1186 | * @adev: amdgpu device | |
1187 | * @filp: file private | |
1188 | * @wait: wait parameters | |
1189 | * @fences: array of drm_amdgpu_fence | |
1190 | */ | |
1191 | static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, | |
1192 | struct drm_file *filp, | |
1193 | union drm_amdgpu_wait_fences *wait, | |
1194 | struct drm_amdgpu_fence *fences) | |
1195 | { | |
1196 | uint32_t fence_count = wait->in.fence_count; | |
1197 | unsigned int i; | |
1198 | long r = 1; | |
1199 | ||
1200 | for (i = 0; i < fence_count; i++) { | |
1201 | struct dma_fence *fence; | |
1202 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |
1203 | ||
1204 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |
1205 | if (IS_ERR(fence)) | |
1206 | return PTR_ERR(fence); | |
1207 | else if (!fence) | |
1208 | continue; | |
1209 | ||
1210 | r = dma_fence_wait_timeout(fence, true, timeout); | |
1211 | if (r < 0) | |
1212 | return r; | |
1213 | ||
1214 | if (r == 0) | |
1215 | break; | |
1216 | } | |
1217 | ||
1218 | memset(wait, 0, sizeof(*wait)); | |
1219 | wait->out.status = (r > 0); | |
1220 | ||
1221 | return 0; | |
1222 | } | |
1223 | ||
1224 | /** | |
1225 | * amdgpu_cs_wait_any_fence - wait on any fence to signal | |
1226 | * | |
1227 | * @adev: amdgpu device | |
1228 | * @filp: file private | |
1229 | * @wait: wait parameters | |
1230 | * @fences: array of drm_amdgpu_fence | |
1231 | */ | |
1232 | static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, | |
1233 | struct drm_file *filp, | |
1234 | union drm_amdgpu_wait_fences *wait, | |
1235 | struct drm_amdgpu_fence *fences) | |
1236 | { | |
1237 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |
1238 | uint32_t fence_count = wait->in.fence_count; | |
1239 | uint32_t first = ~0; | |
1240 | struct dma_fence **array; | |
1241 | unsigned int i; | |
1242 | long r; | |
1243 | ||
1244 | /* Prepare the fence array */ | |
1245 | array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); | |
1246 | ||
1247 | if (array == NULL) | |
1248 | return -ENOMEM; | |
1249 | ||
1250 | for (i = 0; i < fence_count; i++) { | |
1251 | struct dma_fence *fence; | |
1252 | ||
1253 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |
1254 | if (IS_ERR(fence)) { | |
1255 | r = PTR_ERR(fence); | |
1256 | goto err_free_fence_array; | |
1257 | } else if (fence) { | |
1258 | array[i] = fence; | |
1259 | } else { /* NULL, the fence has been already signaled */ | |
1260 | r = 1; | |
1261 | goto out; | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, | |
1266 | &first); | |
1267 | if (r < 0) | |
1268 | goto err_free_fence_array; | |
1269 | ||
1270 | out: | |
1271 | memset(wait, 0, sizeof(*wait)); | |
1272 | wait->out.status = (r > 0); | |
1273 | wait->out.first_signaled = first; | |
1274 | /* set return value 0 to indicate success */ | |
1275 | r = 0; | |
1276 | ||
1277 | err_free_fence_array: | |
1278 | for (i = 0; i < fence_count; i++) | |
1279 | dma_fence_put(array[i]); | |
1280 | kfree(array); | |
1281 | ||
1282 | return r; | |
1283 | } | |
1284 | ||
1285 | /** | |
1286 | * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish | |
1287 | * | |
1288 | * @dev: drm device | |
1289 | * @data: data from userspace | |
1290 | * @filp: file private | |
1291 | */ | |
1292 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, | |
1293 | struct drm_file *filp) | |
1294 | { | |
1295 | struct amdgpu_device *adev = dev->dev_private; | |
1296 | union drm_amdgpu_wait_fences *wait = data; | |
1297 | uint32_t fence_count = wait->in.fence_count; | |
1298 | struct drm_amdgpu_fence *fences_user; | |
1299 | struct drm_amdgpu_fence *fences; | |
1300 | int r; | |
1301 | ||
1302 | /* Get the fences from userspace */ | |
1303 | fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), | |
1304 | GFP_KERNEL); | |
1305 | if (fences == NULL) | |
1306 | return -ENOMEM; | |
1307 | ||
1308 | fences_user = (void __user *)(unsigned long)(wait->in.fences); | |
1309 | if (copy_from_user(fences, fences_user, | |
1310 | sizeof(struct drm_amdgpu_fence) * fence_count)) { | |
1311 | r = -EFAULT; | |
1312 | goto err_free_fences; | |
1313 | } | |
1314 | ||
1315 | if (wait->in.wait_all) | |
1316 | r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); | |
1317 | else | |
1318 | r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); | |
1319 | ||
1320 | err_free_fences: | |
1321 | kfree(fences); | |
1322 | ||
1323 | return r; | |
1324 | } | |
1325 | ||
d38ceaf9 AD |
1326 | /** |
1327 | * amdgpu_cs_find_bo_va - find bo_va for VM address | |
1328 | * | |
1329 | * @parser: command submission parser context | |
1330 | * @addr: VM address | |
1331 | * @bo: resulting BO of the mapping found | |
1332 | * | |
1333 | * Search the buffer objects in the command submission context for a certain | |
1334 | * virtual memory address. Returns allocation structure when found, NULL | |
1335 | * otherwise. | |
1336 | */ | |
1337 | struct amdgpu_bo_va_mapping * | |
1338 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | |
1339 | uint64_t addr, struct amdgpu_bo **bo) | |
1340 | { | |
d38ceaf9 | 1341 | struct amdgpu_bo_va_mapping *mapping; |
15486fd2 CK |
1342 | unsigned i; |
1343 | ||
1344 | if (!parser->bo_list) | |
1345 | return NULL; | |
d38ceaf9 AD |
1346 | |
1347 | addr /= AMDGPU_GPU_PAGE_SIZE; | |
1348 | ||
15486fd2 CK |
1349 | for (i = 0; i < parser->bo_list->num_entries; i++) { |
1350 | struct amdgpu_bo_list_entry *lobj; | |
1351 | ||
1352 | lobj = &parser->bo_list->array[i]; | |
1353 | if (!lobj->bo_va) | |
d38ceaf9 AD |
1354 | continue; |
1355 | ||
15486fd2 | 1356 | list_for_each_entry(mapping, &lobj->bo_va->valids, list) { |
7fc11959 CK |
1357 | if (mapping->it.start > addr || |
1358 | addr > mapping->it.last) | |
1359 | continue; | |
1360 | ||
15486fd2 | 1361 | *bo = lobj->bo_va->bo; |
7fc11959 CK |
1362 | return mapping; |
1363 | } | |
1364 | ||
15486fd2 | 1365 | list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { |
d38ceaf9 AD |
1366 | if (mapping->it.start > addr || |
1367 | addr > mapping->it.last) | |
1368 | continue; | |
1369 | ||
15486fd2 | 1370 | *bo = lobj->bo_va->bo; |
d38ceaf9 AD |
1371 | return mapping; |
1372 | } | |
1373 | } | |
1374 | ||
1375 | return NULL; | |
1376 | } | |
c855e250 CK |
1377 | |
1378 | /** | |
1379 | * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM | |
1380 | * | |
1381 | * @parser: command submission parser context | |
1382 | * | |
1383 | * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM. | |
1384 | */ | |
1385 | int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) | |
1386 | { | |
1387 | unsigned i; | |
1388 | int r; | |
1389 | ||
1390 | if (!parser->bo_list) | |
1391 | return 0; | |
1392 | ||
1393 | for (i = 0; i < parser->bo_list->num_entries; i++) { | |
1394 | struct amdgpu_bo *bo = parser->bo_list->array[i].robj; | |
1395 | ||
bb990bb0 | 1396 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
c855e250 CK |
1397 | if (unlikely(r)) |
1398 | return r; | |
03f48dd5 CK |
1399 | |
1400 | if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) | |
1401 | continue; | |
1402 | ||
1403 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | |
1404 | amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains); | |
1405 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
1406 | if (unlikely(r)) | |
1407 | return r; | |
c855e250 CK |
1408 | } |
1409 | ||
1410 | return 0; | |
1411 | } |