]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
26 | #include "drmP.h" | |
27 | #include "drm.h" | |
28 | ||
29 | #include "nouveau_drv.h" | |
30 | #include "nouveau_drm.h" | |
31 | #include "nouveau_dma.h" | |
32 | ||
33 | #define nouveau_gem_pushbuf_sync(chan) 0 | |
34 | ||
35 | int | |
36 | nouveau_gem_object_new(struct drm_gem_object *gem) | |
37 | { | |
38 | return 0; | |
39 | } | |
40 | ||
41 | void | |
42 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
43 | { | |
44 | struct nouveau_bo *nvbo = gem->driver_private; | |
45 | struct ttm_buffer_object *bo = &nvbo->bo; | |
46 | ||
47 | if (!nvbo) | |
48 | return; | |
49 | nvbo->gem = NULL; | |
50 | ||
51 | if (unlikely(nvbo->cpu_filp)) | |
52 | ttm_bo_synccpu_write_release(bo); | |
53 | ||
54 | if (unlikely(nvbo->pin_refcnt)) { | |
55 | nvbo->pin_refcnt = 1; | |
56 | nouveau_bo_unpin(nvbo); | |
57 | } | |
58 | ||
59 | ttm_bo_unref(&bo); | |
60 | } | |
61 | ||
62 | int | |
63 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | |
64 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
65 | uint32_t tile_flags, bool no_vm, bool mappable, | |
66 | struct nouveau_bo **pnvbo) | |
67 | { | |
68 | struct nouveau_bo *nvbo; | |
69 | int ret; | |
70 | ||
71 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, | |
72 | tile_flags, no_vm, mappable, pnvbo); | |
73 | if (ret) | |
74 | return ret; | |
75 | nvbo = *pnvbo; | |
76 | ||
77 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | |
78 | if (!nvbo->gem) { | |
79 | nouveau_bo_ref(NULL, pnvbo); | |
80 | return -ENOMEM; | |
81 | } | |
82 | ||
83 | nvbo->bo.persistant_swap_storage = nvbo->gem->filp; | |
84 | nvbo->gem->driver_private = nvbo; | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int | |
89 | nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |
90 | { | |
91 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | |
92 | ||
93 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
94 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
95 | else | |
96 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
97 | ||
98 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
99 | rep->offset = nvbo->bo.offset; | |
100 | rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; | |
101 | rep->tile_mode = nvbo->tile_mode; | |
102 | rep->tile_flags = nvbo->tile_flags; | |
103 | return 0; | |
104 | } | |
105 | ||
106 | static bool | |
107 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { | |
108 | switch (tile_flags) { | |
109 | case 0x0000: | |
110 | case 0x1800: | |
111 | case 0x2800: | |
112 | case 0x4800: | |
113 | case 0x7000: | |
114 | case 0x7400: | |
115 | case 0x7a00: | |
116 | case 0xe000: | |
117 | break; | |
118 | default: | |
119 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | |
120 | return false; | |
121 | } | |
122 | ||
123 | return true; | |
124 | } | |
125 | ||
126 | int | |
127 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
128 | struct drm_file *file_priv) | |
129 | { | |
130 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
131 | struct drm_nouveau_gem_new *req = data; | |
132 | struct nouveau_bo *nvbo = NULL; | |
133 | struct nouveau_channel *chan = NULL; | |
134 | uint32_t flags = 0; | |
135 | int ret = 0; | |
136 | ||
137 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
138 | ||
139 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | |
140 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | |
141 | ||
142 | if (req->channel_hint) { | |
143 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, | |
144 | file_priv, chan); | |
145 | } | |
146 | ||
147 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
148 | flags |= TTM_PL_FLAG_VRAM; | |
149 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) | |
150 | flags |= TTM_PL_FLAG_TT; | |
151 | if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) | |
152 | flags |= TTM_PL_FLAG_SYSTEM; | |
153 | ||
154 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) | |
155 | return -EINVAL; | |
156 | ||
157 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | |
158 | req->info.tile_mode, req->info.tile_flags, false, | |
159 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | |
160 | &nvbo); | |
161 | if (ret) | |
162 | return ret; | |
163 | ||
164 | ret = nouveau_gem_info(nvbo->gem, &req->info); | |
165 | if (ret) | |
166 | goto out; | |
167 | ||
168 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | |
169 | out: | |
bc9025bd | 170 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
171 | |
172 | if (ret) | |
bc9025bd | 173 | drm_gem_object_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
174 | return ret; |
175 | } | |
176 | ||
177 | static int | |
178 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
179 | uint32_t write_domains, uint32_t valid_domains) | |
180 | { | |
181 | struct nouveau_bo *nvbo = gem->driver_private; | |
182 | struct ttm_buffer_object *bo = &nvbo->bo; | |
183 | uint64_t flags; | |
184 | ||
185 | if (!valid_domains || (!read_domains && !write_domains)) | |
186 | return -EINVAL; | |
187 | ||
188 | if (write_domains) { | |
189 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
190 | (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | |
191 | flags = TTM_PL_FLAG_VRAM; | |
192 | else | |
193 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
194 | (write_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
195 | flags = TTM_PL_FLAG_TT; | |
196 | else | |
197 | return -EINVAL; | |
198 | } else { | |
199 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
200 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
201 | bo->mem.mem_type == TTM_PL_VRAM) | |
202 | flags = TTM_PL_FLAG_VRAM; | |
203 | else | |
204 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
205 | (read_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
206 | bo->mem.mem_type == TTM_PL_TT) | |
207 | flags = TTM_PL_FLAG_TT; | |
208 | else | |
209 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
210 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | |
211 | flags = TTM_PL_FLAG_VRAM; | |
212 | else | |
213 | flags = TTM_PL_FLAG_TT; | |
214 | } | |
215 | ||
216 | nouveau_bo_placement_set(nvbo, flags); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | struct validate_op { | |
6ee73861 BS |
221 | struct list_head vram_list; |
222 | struct list_head gart_list; | |
223 | struct list_head both_list; | |
224 | }; | |
225 | ||
226 | static void | |
227 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |
228 | { | |
229 | struct list_head *entry, *tmp; | |
230 | struct nouveau_bo *nvbo; | |
231 | ||
232 | list_for_each_safe(entry, tmp, list) { | |
233 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
234 | if (likely(fence)) { | |
235 | struct nouveau_fence *prev_fence; | |
236 | ||
237 | spin_lock(&nvbo->bo.lock); | |
238 | prev_fence = nvbo->bo.sync_obj; | |
239 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); | |
240 | spin_unlock(&nvbo->bo.lock); | |
241 | nouveau_fence_unref((void *)&prev_fence); | |
242 | } | |
243 | ||
a1606a95 BS |
244 | if (unlikely(nvbo->validate_mapped)) { |
245 | ttm_bo_kunmap(&nvbo->kmap); | |
246 | nvbo->validate_mapped = false; | |
247 | } | |
248 | ||
6ee73861 BS |
249 | list_del(&nvbo->entry); |
250 | nvbo->reserved_by = NULL; | |
251 | ttm_bo_unreserve(&nvbo->bo); | |
252 | drm_gem_object_unreference(nvbo->gem); | |
253 | } | |
254 | } | |
255 | ||
256 | static void | |
234896a7 | 257 | validate_fini(struct validate_op *op, struct nouveau_fence* fence) |
6ee73861 | 258 | { |
234896a7 LB |
259 | validate_fini_list(&op->vram_list, fence); |
260 | validate_fini_list(&op->gart_list, fence); | |
261 | validate_fini_list(&op->both_list, fence); | |
6ee73861 BS |
262 | } |
263 | ||
264 | static int | |
265 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
266 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
267 | int nr_buffers, struct validate_op *op) | |
268 | { | |
269 | struct drm_device *dev = chan->dev; | |
270 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
271 | uint32_t sequence; | |
272 | int trycnt = 0; | |
273 | int ret, i; | |
274 | ||
275 | sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); | |
276 | retry: | |
277 | if (++trycnt > 100000) { | |
278 | NV_ERROR(dev, "%s failed and gave up.\n", __func__); | |
279 | return -EINVAL; | |
280 | } | |
281 | ||
282 | for (i = 0; i < nr_buffers; i++) { | |
283 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
284 | struct drm_gem_object *gem; | |
285 | struct nouveau_bo *nvbo; | |
286 | ||
287 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
288 | if (!gem) { | |
289 | NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); | |
290 | validate_fini(op, NULL); | |
291 | return -EINVAL; | |
292 | } | |
293 | nvbo = gem->driver_private; | |
294 | ||
295 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
296 | NV_ERROR(dev, "multiple instances of buffer %d on " | |
297 | "validation list\n", b->handle); | |
298 | validate_fini(op, NULL); | |
299 | return -EINVAL; | |
300 | } | |
301 | ||
302 | ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); | |
303 | if (ret) { | |
304 | validate_fini(op, NULL); | |
305 | if (ret == -EAGAIN) | |
306 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | |
307 | drm_gem_object_unreference(gem); | |
a1606a95 BS |
308 | if (ret) { |
309 | NV_ERROR(dev, "fail reserve\n"); | |
6ee73861 | 310 | return ret; |
a1606a95 | 311 | } |
6ee73861 BS |
312 | goto retry; |
313 | } | |
314 | ||
a1606a95 | 315 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
6ee73861 BS |
316 | nvbo->reserved_by = file_priv; |
317 | nvbo->pbbo_index = i; | |
318 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
319 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
320 | list_add_tail(&nvbo->entry, &op->both_list); | |
321 | else | |
322 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
323 | list_add_tail(&nvbo->entry, &op->vram_list); | |
324 | else | |
325 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
326 | list_add_tail(&nvbo->entry, &op->gart_list); | |
327 | else { | |
328 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | |
329 | b->valid_domains); | |
0208843d | 330 | list_add_tail(&nvbo->entry, &op->both_list); |
6ee73861 BS |
331 | validate_fini(op, NULL); |
332 | return -EINVAL; | |
333 | } | |
334 | ||
335 | if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { | |
336 | validate_fini(op, NULL); | |
337 | ||
338 | if (nvbo->cpu_filp == file_priv) { | |
339 | NV_ERROR(dev, "bo %p mapped by process trying " | |
340 | "to validate it!\n", nvbo); | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
344 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); | |
a1606a95 BS |
345 | if (ret) { |
346 | NV_ERROR(dev, "fail wait_cpu\n"); | |
6ee73861 | 347 | return ret; |
a1606a95 | 348 | } |
6ee73861 BS |
349 | goto retry; |
350 | } | |
351 | } | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | static int | |
357 | validate_list(struct nouveau_channel *chan, struct list_head *list, | |
358 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) | |
359 | { | |
360 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = | |
361 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
a1606a95 | 362 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
363 | struct nouveau_bo *nvbo; |
364 | int ret, relocs = 0; | |
365 | ||
366 | list_for_each_entry(nvbo, list, entry) { | |
367 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
368 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | |
369 | ||
370 | if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { | |
371 | spin_lock(&nvbo->bo.lock); | |
372 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | |
373 | spin_unlock(&nvbo->bo.lock); | |
a1606a95 BS |
374 | if (unlikely(ret)) { |
375 | NV_ERROR(dev, "fail wait other chan\n"); | |
6ee73861 | 376 | return ret; |
a1606a95 | 377 | } |
6ee73861 BS |
378 | } |
379 | ||
380 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | |
381 | b->write_domains, | |
382 | b->valid_domains); | |
a1606a95 BS |
383 | if (unlikely(ret)) { |
384 | NV_ERROR(dev, "fail set_domain\n"); | |
6ee73861 | 385 | return ret; |
a1606a95 | 386 | } |
6ee73861 BS |
387 | |
388 | nvbo->channel = chan; | |
389 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, | |
9d87fa21 | 390 | false, false, false); |
6ee73861 | 391 | nvbo->channel = NULL; |
a1606a95 BS |
392 | if (unlikely(ret)) { |
393 | NV_ERROR(dev, "fail ttm_validate\n"); | |
6ee73861 | 394 | return ret; |
a1606a95 | 395 | } |
6ee73861 | 396 | |
a1606a95 | 397 | if (nvbo->bo.offset == b->presumed.offset && |
6ee73861 | 398 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
a1606a95 | 399 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
6ee73861 | 400 | (nvbo->bo.mem.mem_type == TTM_PL_TT && |
a1606a95 | 401 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) |
6ee73861 BS |
402 | continue; |
403 | ||
404 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
a1606a95 | 405 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; |
6ee73861 | 406 | else |
a1606a95 BS |
407 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; |
408 | b->presumed.offset = nvbo->bo.offset; | |
409 | b->presumed.valid = 0; | |
6ee73861 BS |
410 | relocs++; |
411 | ||
a1606a95 BS |
412 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, |
413 | &b->presumed, sizeof(b->presumed))) | |
6ee73861 BS |
414 | return -EFAULT; |
415 | } | |
416 | ||
417 | return relocs; | |
418 | } | |
419 | ||
420 | static int | |
421 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
422 | struct drm_file *file_priv, | |
423 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
424 | uint64_t user_buffers, int nr_buffers, | |
425 | struct validate_op *op, int *apply_relocs) | |
426 | { | |
a1606a95 | 427 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
428 | int ret, relocs = 0; |
429 | ||
430 | INIT_LIST_HEAD(&op->vram_list); | |
431 | INIT_LIST_HEAD(&op->gart_list); | |
432 | INIT_LIST_HEAD(&op->both_list); | |
433 | ||
6ee73861 BS |
434 | if (nr_buffers == 0) |
435 | return 0; | |
436 | ||
437 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
a1606a95 BS |
438 | if (unlikely(ret)) { |
439 | NV_ERROR(dev, "validate_init\n"); | |
6ee73861 | 440 | return ret; |
a1606a95 | 441 | } |
6ee73861 BS |
442 | |
443 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); | |
444 | if (unlikely(ret < 0)) { | |
a1606a95 | 445 | NV_ERROR(dev, "validate vram_list\n"); |
6ee73861 BS |
446 | validate_fini(op, NULL); |
447 | return ret; | |
448 | } | |
449 | relocs += ret; | |
450 | ||
451 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); | |
452 | if (unlikely(ret < 0)) { | |
a1606a95 | 453 | NV_ERROR(dev, "validate gart_list\n"); |
6ee73861 BS |
454 | validate_fini(op, NULL); |
455 | return ret; | |
456 | } | |
457 | relocs += ret; | |
458 | ||
459 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); | |
460 | if (unlikely(ret < 0)) { | |
a1606a95 | 461 | NV_ERROR(dev, "validate both_list\n"); |
6ee73861 BS |
462 | validate_fini(op, NULL); |
463 | return ret; | |
464 | } | |
465 | relocs += ret; | |
466 | ||
467 | *apply_relocs = relocs; | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static inline void * | |
472 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
473 | { | |
474 | void *mem; | |
475 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
476 | ||
477 | mem = kmalloc(nmemb * size, GFP_KERNEL); | |
478 | if (!mem) | |
479 | return ERR_PTR(-ENOMEM); | |
480 | ||
481 | if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { | |
482 | kfree(mem); | |
483 | return ERR_PTR(-EFAULT); | |
484 | } | |
485 | ||
486 | return mem; | |
487 | } | |
488 | ||
489 | static int | |
a1606a95 BS |
490 | nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, |
491 | struct drm_nouveau_gem_pushbuf *req, | |
492 | struct drm_nouveau_gem_pushbuf_bo *bo) | |
6ee73861 BS |
493 | { |
494 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
12f735b7 LB |
495 | int ret = 0; |
496 | unsigned i; | |
6ee73861 | 497 | |
a1606a95 | 498 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
6ee73861 BS |
499 | if (IS_ERR(reloc)) |
500 | return PTR_ERR(reloc); | |
501 | ||
a1606a95 | 502 | for (i = 0; i < req->nr_relocs; i++) { |
6ee73861 BS |
503 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
504 | struct drm_nouveau_gem_pushbuf_bo *b; | |
a1606a95 | 505 | struct nouveau_bo *nvbo; |
6ee73861 BS |
506 | uint32_t data; |
507 | ||
a1606a95 BS |
508 | if (unlikely(r->bo_index > req->nr_buffers)) { |
509 | NV_ERROR(dev, "reloc bo index invalid\n"); | |
6ee73861 BS |
510 | ret = -EINVAL; |
511 | break; | |
512 | } | |
513 | ||
514 | b = &bo[r->bo_index]; | |
a1606a95 | 515 | if (b->presumed.valid) |
6ee73861 BS |
516 | continue; |
517 | ||
a1606a95 BS |
518 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
519 | NV_ERROR(dev, "reloc container bo index invalid\n"); | |
520 | ret = -EINVAL; | |
521 | break; | |
522 | } | |
523 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; | |
524 | ||
525 | if (unlikely(r->reloc_bo_offset + 4 > | |
526 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | |
527 | NV_ERROR(dev, "reloc outside of bo\n"); | |
528 | ret = -EINVAL; | |
529 | break; | |
530 | } | |
531 | ||
532 | if (!nvbo->kmap.virtual) { | |
533 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | |
534 | &nvbo->kmap); | |
535 | if (ret) { | |
536 | NV_ERROR(dev, "failed kmap for reloc\n"); | |
537 | break; | |
538 | } | |
539 | nvbo->validate_mapped = true; | |
540 | } | |
541 | ||
6ee73861 | 542 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
a1606a95 | 543 | data = b->presumed.offset + r->data; |
6ee73861 BS |
544 | else |
545 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
a1606a95 | 546 | data = (b->presumed.offset + r->data) >> 32; |
6ee73861 BS |
547 | else |
548 | data = r->data; | |
549 | ||
550 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
a1606a95 | 551 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
6ee73861 BS |
552 | data |= r->tor; |
553 | else | |
554 | data |= r->vor; | |
555 | } | |
556 | ||
a1606a95 BS |
557 | spin_lock(&nvbo->bo.lock); |
558 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | |
e32b2c88 | 559 | spin_unlock(&nvbo->bo.lock); |
a1606a95 BS |
560 | if (ret) { |
561 | NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); | |
562 | break; | |
563 | } | |
a1606a95 BS |
564 | |
565 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); | |
6ee73861 BS |
566 | } |
567 | ||
568 | kfree(reloc); | |
569 | return ret; | |
570 | } | |
571 | ||
572 | int | |
573 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
574 | struct drm_file *file_priv) | |
575 | { | |
a1606a95 | 576 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6ee73861 | 577 | struct drm_nouveau_gem_pushbuf *req = data; |
a1606a95 BS |
578 | struct drm_nouveau_gem_pushbuf_push *push; |
579 | struct drm_nouveau_gem_pushbuf_bo *bo; | |
6ee73861 BS |
580 | struct nouveau_channel *chan; |
581 | struct validate_op op; | |
a1606a95 BS |
582 | struct nouveau_fence *fence = 0; |
583 | int i, j, ret = 0, do_reloc = 0; | |
6ee73861 BS |
584 | |
585 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
586 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); | |
587 | ||
a1606a95 BS |
588 | req->vram_available = dev_priv->fb_aper_free; |
589 | req->gart_available = dev_priv->gart_info.aper_free; | |
590 | if (unlikely(req->nr_push == 0)) | |
591 | goto out_next; | |
6ee73861 | 592 | |
a1606a95 BS |
593 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
594 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", | |
595 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | |
596 | return -EINVAL; | |
6ee73861 BS |
597 | } |
598 | ||
a1606a95 BS |
599 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
600 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", | |
601 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | |
602 | return -EINVAL; | |
6ee73861 BS |
603 | } |
604 | ||
a1606a95 BS |
605 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
606 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", | |
607 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | |
6ee73861 BS |
608 | return -EINVAL; |
609 | } | |
610 | ||
a1606a95 BS |
611 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
612 | if (IS_ERR(push)) | |
613 | return PTR_ERR(push); | |
614 | ||
6ee73861 | 615 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
a1606a95 BS |
616 | if (IS_ERR(bo)) { |
617 | kfree(push); | |
6ee73861 | 618 | return PTR_ERR(bo); |
a1606a95 | 619 | } |
6ee73861 BS |
620 | |
621 | mutex_lock(&dev->struct_mutex); | |
622 | ||
623 | /* Validate buffer list */ | |
624 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
625 | req->nr_buffers, &op, &do_reloc); | |
626 | if (ret) { | |
627 | NV_ERROR(dev, "validate: %d\n", ret); | |
628 | goto out; | |
629 | } | |
630 | ||
6ee73861 BS |
631 | /* Apply any relocations that are required */ |
632 | if (do_reloc) { | |
a1606a95 | 633 | ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); |
6ee73861 | 634 | if (ret) { |
6ee73861 | 635 | NV_ERROR(dev, "reloc apply: %d\n", ret); |
6ee73861 BS |
636 | goto out; |
637 | } | |
6ee73861 | 638 | } |
6ee73861 | 639 | |
9a391ad8 | 640 | if (chan->dma.ib_max) { |
a1606a95 | 641 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); |
6ee73861 | 642 | if (ret) { |
9a391ad8 | 643 | NV_INFO(dev, "nv50cal_space: %d\n", ret); |
6ee73861 BS |
644 | goto out; |
645 | } | |
6ee73861 | 646 | |
a1606a95 BS |
647 | for (i = 0; i < req->nr_push; i++) { |
648 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
649 | bo[push[i].bo_index].user_priv; | |
650 | ||
651 | nv50_dma_push(chan, nvbo, push[i].offset, | |
652 | push[i].length); | |
653 | } | |
9a391ad8 | 654 | } else |
2ccb04ec | 655 | if (dev_priv->card_type >= NV_20) { |
a1606a95 | 656 | ret = RING_SPACE(chan, req->nr_push * 2); |
6ee73861 BS |
657 | if (ret) { |
658 | NV_ERROR(dev, "cal_space: %d\n", ret); | |
659 | goto out; | |
660 | } | |
a1606a95 BS |
661 | |
662 | for (i = 0; i < req->nr_push; i++) { | |
663 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
664 | bo[push[i].bo_index].user_priv; | |
665 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
666 | ||
667 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
668 | push[i].offset) | 2); | |
669 | OUT_RING(chan, 0); | |
670 | } | |
6ee73861 | 671 | } else { |
a1606a95 | 672 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
6ee73861 BS |
673 | if (ret) { |
674 | NV_ERROR(dev, "jmp_space: %d\n", ret); | |
675 | goto out; | |
676 | } | |
6ee73861 | 677 | |
a1606a95 BS |
678 | for (i = 0; i < req->nr_push; i++) { |
679 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
680 | bo[push[i].bo_index].user_priv; | |
681 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
682 | uint32_t cmd; | |
683 | ||
684 | cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); | |
685 | cmd |= 0x20000000; | |
686 | if (unlikely(cmd != req->suffix0)) { | |
687 | if (!nvbo->kmap.virtual) { | |
688 | ret = ttm_bo_kmap(&nvbo->bo, 0, | |
689 | nvbo->bo.mem. | |
690 | num_pages, | |
691 | &nvbo->kmap); | |
692 | if (ret) { | |
693 | WIND_RING(chan); | |
694 | goto out; | |
695 | } | |
696 | nvbo->validate_mapped = true; | |
697 | } | |
698 | ||
699 | nouveau_bo_wr32(nvbo, (push[i].offset + | |
700 | push[i].length - 8) / 4, cmd); | |
701 | } | |
702 | ||
703 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
704 | push[i].offset) | 0x20000000); | |
6ee73861 | 705 | OUT_RING(chan, 0); |
a1606a95 BS |
706 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
707 | OUT_RING(chan, 0); | |
708 | } | |
6ee73861 BS |
709 | } |
710 | ||
234896a7 | 711 | ret = nouveau_fence_new(chan, &fence, true); |
6ee73861 BS |
712 | if (ret) { |
713 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | |
714 | WIND_RING(chan); | |
715 | goto out; | |
716 | } | |
717 | ||
718 | out: | |
234896a7 LB |
719 | validate_fini(&op, fence); |
720 | nouveau_fence_unref((void**)&fence); | |
6ee73861 BS |
721 | mutex_unlock(&dev->struct_mutex); |
722 | kfree(bo); | |
a1606a95 | 723 | kfree(push); |
6ee73861 BS |
724 | |
725 | out_next: | |
9a391ad8 BS |
726 | if (chan->dma.ib_max) { |
727 | req->suffix0 = 0x00000000; | |
728 | req->suffix1 = 0x00000000; | |
729 | } else | |
2ccb04ec | 730 | if (dev_priv->card_type >= NV_20) { |
6ee73861 BS |
731 | req->suffix0 = 0x00020000; |
732 | req->suffix1 = 0x00000000; | |
733 | } else { | |
734 | req->suffix0 = 0x20000000 | | |
735 | (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); | |
736 | req->suffix1 = 0x00000000; | |
737 | } | |
738 | ||
739 | return ret; | |
740 | } | |
741 | ||
6ee73861 BS |
742 | static inline uint32_t |
743 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
744 | { | |
745 | uint32_t flags = 0; | |
746 | ||
747 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
748 | flags |= TTM_PL_FLAG_VRAM; | |
749 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
750 | flags |= TTM_PL_FLAG_TT; | |
751 | ||
752 | return flags; | |
753 | } | |
754 | ||
6ee73861 BS |
755 | int |
756 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
757 | struct drm_file *file_priv) | |
758 | { | |
759 | struct drm_nouveau_gem_cpu_prep *req = data; | |
760 | struct drm_gem_object *gem; | |
761 | struct nouveau_bo *nvbo; | |
762 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
763 | int ret = -EINVAL; | |
764 | ||
765 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
766 | ||
767 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
768 | if (!gem) | |
769 | return ret; | |
770 | nvbo = nouveau_gem_object(gem); | |
771 | ||
772 | if (nvbo->cpu_filp) { | |
773 | if (nvbo->cpu_filp == file_priv) | |
774 | goto out; | |
775 | ||
776 | ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); | |
6ee73861 BS |
777 | if (ret) |
778 | goto out; | |
779 | } | |
780 | ||
781 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | |
f0fbe3eb | 782 | spin_lock(&nvbo->bo.lock); |
6ee73861 | 783 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); |
f0fbe3eb | 784 | spin_unlock(&nvbo->bo.lock); |
6ee73861 BS |
785 | } else { |
786 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | |
6ee73861 BS |
787 | if (ret == 0) |
788 | nvbo->cpu_filp = file_priv; | |
789 | } | |
790 | ||
791 | out: | |
bc9025bd | 792 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
793 | return ret; |
794 | } | |
795 | ||
796 | int | |
797 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
798 | struct drm_file *file_priv) | |
799 | { | |
800 | struct drm_nouveau_gem_cpu_prep *req = data; | |
801 | struct drm_gem_object *gem; | |
802 | struct nouveau_bo *nvbo; | |
803 | int ret = -EINVAL; | |
804 | ||
805 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
806 | ||
807 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
808 | if (!gem) | |
809 | return ret; | |
810 | nvbo = nouveau_gem_object(gem); | |
811 | ||
812 | if (nvbo->cpu_filp != file_priv) | |
813 | goto out; | |
814 | nvbo->cpu_filp = NULL; | |
815 | ||
816 | ttm_bo_synccpu_write_release(&nvbo->bo); | |
817 | ret = 0; | |
818 | ||
819 | out: | |
bc9025bd | 820 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
821 | return ret; |
822 | } | |
823 | ||
824 | int | |
825 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
826 | struct drm_file *file_priv) | |
827 | { | |
828 | struct drm_nouveau_gem_info *req = data; | |
829 | struct drm_gem_object *gem; | |
830 | int ret; | |
831 | ||
832 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
833 | ||
834 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
835 | if (!gem) | |
836 | return -EINVAL; | |
837 | ||
838 | ret = nouveau_gem_info(gem, req); | |
bc9025bd | 839 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
840 | return ret; |
841 | } | |
842 |