]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
26 | #include "drmP.h" | |
27 | #include "drm.h" | |
28 | ||
29 | #include "nouveau_drv.h" | |
30 | #include "nouveau_drm.h" | |
31 | #include "nouveau_dma.h" | |
32 | ||
33 | #define nouveau_gem_pushbuf_sync(chan) 0 | |
34 | ||
35 | int | |
36 | nouveau_gem_object_new(struct drm_gem_object *gem) | |
37 | { | |
38 | return 0; | |
39 | } | |
40 | ||
41 | void | |
42 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
43 | { | |
44 | struct nouveau_bo *nvbo = gem->driver_private; | |
45 | struct ttm_buffer_object *bo = &nvbo->bo; | |
46 | ||
47 | if (!nvbo) | |
48 | return; | |
49 | nvbo->gem = NULL; | |
50 | ||
51 | if (unlikely(nvbo->cpu_filp)) | |
52 | ttm_bo_synccpu_write_release(bo); | |
53 | ||
54 | if (unlikely(nvbo->pin_refcnt)) { | |
55 | nvbo->pin_refcnt = 1; | |
56 | nouveau_bo_unpin(nvbo); | |
57 | } | |
58 | ||
59 | ttm_bo_unref(&bo); | |
60 | } | |
61 | ||
62 | int | |
63 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | |
64 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
65 | uint32_t tile_flags, bool no_vm, bool mappable, | |
66 | struct nouveau_bo **pnvbo) | |
67 | { | |
68 | struct nouveau_bo *nvbo; | |
69 | int ret; | |
70 | ||
71 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, | |
72 | tile_flags, no_vm, mappable, pnvbo); | |
73 | if (ret) | |
74 | return ret; | |
75 | nvbo = *pnvbo; | |
76 | ||
77 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | |
78 | if (!nvbo->gem) { | |
79 | nouveau_bo_ref(NULL, pnvbo); | |
80 | return -ENOMEM; | |
81 | } | |
82 | ||
83 | nvbo->bo.persistant_swap_storage = nvbo->gem->filp; | |
84 | nvbo->gem->driver_private = nvbo; | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int | |
89 | nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |
90 | { | |
91 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | |
92 | ||
93 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
94 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
95 | else | |
96 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
97 | ||
98 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
99 | rep->offset = nvbo->bo.offset; | |
100 | rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; | |
101 | rep->tile_mode = nvbo->tile_mode; | |
102 | rep->tile_flags = nvbo->tile_flags; | |
103 | return 0; | |
104 | } | |
105 | ||
106 | static bool | |
107 | nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { | |
108 | switch (tile_flags) { | |
109 | case 0x0000: | |
110 | case 0x1800: | |
111 | case 0x2800: | |
112 | case 0x4800: | |
113 | case 0x7000: | |
114 | case 0x7400: | |
115 | case 0x7a00: | |
116 | case 0xe000: | |
117 | break; | |
118 | default: | |
119 | NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); | |
120 | return false; | |
121 | } | |
122 | ||
123 | return true; | |
124 | } | |
125 | ||
126 | int | |
127 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
128 | struct drm_file *file_priv) | |
129 | { | |
130 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
131 | struct drm_nouveau_gem_new *req = data; | |
132 | struct nouveau_bo *nvbo = NULL; | |
133 | struct nouveau_channel *chan = NULL; | |
134 | uint32_t flags = 0; | |
135 | int ret = 0; | |
136 | ||
137 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
138 | ||
139 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | |
140 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | |
141 | ||
142 | if (req->channel_hint) { | |
143 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, | |
144 | file_priv, chan); | |
145 | } | |
146 | ||
147 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
148 | flags |= TTM_PL_FLAG_VRAM; | |
149 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) | |
150 | flags |= TTM_PL_FLAG_TT; | |
151 | if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) | |
152 | flags |= TTM_PL_FLAG_SYSTEM; | |
153 | ||
154 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) | |
155 | return -EINVAL; | |
156 | ||
157 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | |
158 | req->info.tile_mode, req->info.tile_flags, false, | |
159 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | |
160 | &nvbo); | |
161 | if (ret) | |
162 | return ret; | |
163 | ||
164 | ret = nouveau_gem_info(nvbo->gem, &req->info); | |
165 | if (ret) | |
166 | goto out; | |
167 | ||
168 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | |
169 | out: | |
170 | mutex_lock(&dev->struct_mutex); | |
171 | drm_gem_object_handle_unreference(nvbo->gem); | |
172 | mutex_unlock(&dev->struct_mutex); | |
173 | ||
174 | if (ret) | |
175 | drm_gem_object_unreference(nvbo->gem); | |
176 | return ret; | |
177 | } | |
178 | ||
179 | static int | |
180 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
181 | uint32_t write_domains, uint32_t valid_domains) | |
182 | { | |
183 | struct nouveau_bo *nvbo = gem->driver_private; | |
184 | struct ttm_buffer_object *bo = &nvbo->bo; | |
185 | uint64_t flags; | |
186 | ||
187 | if (!valid_domains || (!read_domains && !write_domains)) | |
188 | return -EINVAL; | |
189 | ||
190 | if (write_domains) { | |
191 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
192 | (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | |
193 | flags = TTM_PL_FLAG_VRAM; | |
194 | else | |
195 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
196 | (write_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
197 | flags = TTM_PL_FLAG_TT; | |
198 | else | |
199 | return -EINVAL; | |
200 | } else { | |
201 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
202 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
203 | bo->mem.mem_type == TTM_PL_VRAM) | |
204 | flags = TTM_PL_FLAG_VRAM; | |
205 | else | |
206 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
207 | (read_domains & NOUVEAU_GEM_DOMAIN_GART) && | |
208 | bo->mem.mem_type == TTM_PL_TT) | |
209 | flags = TTM_PL_FLAG_TT; | |
210 | else | |
211 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
212 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | |
213 | flags = TTM_PL_FLAG_VRAM; | |
214 | else | |
215 | flags = TTM_PL_FLAG_TT; | |
216 | } | |
217 | ||
218 | nouveau_bo_placement_set(nvbo, flags); | |
219 | return 0; | |
220 | } | |
221 | ||
222 | struct validate_op { | |
223 | struct nouveau_fence *fence; | |
224 | struct list_head vram_list; | |
225 | struct list_head gart_list; | |
226 | struct list_head both_list; | |
227 | }; | |
228 | ||
229 | static void | |
230 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |
231 | { | |
232 | struct list_head *entry, *tmp; | |
233 | struct nouveau_bo *nvbo; | |
234 | ||
235 | list_for_each_safe(entry, tmp, list) { | |
236 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
237 | if (likely(fence)) { | |
238 | struct nouveau_fence *prev_fence; | |
239 | ||
240 | spin_lock(&nvbo->bo.lock); | |
241 | prev_fence = nvbo->bo.sync_obj; | |
242 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); | |
243 | spin_unlock(&nvbo->bo.lock); | |
244 | nouveau_fence_unref((void *)&prev_fence); | |
245 | } | |
246 | ||
247 | list_del(&nvbo->entry); | |
248 | nvbo->reserved_by = NULL; | |
249 | ttm_bo_unreserve(&nvbo->bo); | |
250 | drm_gem_object_unreference(nvbo->gem); | |
251 | } | |
252 | } | |
253 | ||
254 | static void | |
255 | validate_fini(struct validate_op *op, bool success) | |
256 | { | |
257 | struct nouveau_fence *fence = op->fence; | |
258 | ||
259 | if (unlikely(!success)) | |
260 | op->fence = NULL; | |
261 | ||
262 | validate_fini_list(&op->vram_list, op->fence); | |
263 | validate_fini_list(&op->gart_list, op->fence); | |
264 | validate_fini_list(&op->both_list, op->fence); | |
265 | nouveau_fence_unref((void *)&fence); | |
266 | } | |
267 | ||
268 | static int | |
269 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
270 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
271 | int nr_buffers, struct validate_op *op) | |
272 | { | |
273 | struct drm_device *dev = chan->dev; | |
274 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
275 | uint32_t sequence; | |
276 | int trycnt = 0; | |
277 | int ret, i; | |
278 | ||
279 | sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); | |
280 | retry: | |
281 | if (++trycnt > 100000) { | |
282 | NV_ERROR(dev, "%s failed and gave up.\n", __func__); | |
283 | return -EINVAL; | |
284 | } | |
285 | ||
286 | for (i = 0; i < nr_buffers; i++) { | |
287 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
288 | struct drm_gem_object *gem; | |
289 | struct nouveau_bo *nvbo; | |
290 | ||
291 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
292 | if (!gem) { | |
293 | NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); | |
294 | validate_fini(op, NULL); | |
295 | return -EINVAL; | |
296 | } | |
297 | nvbo = gem->driver_private; | |
298 | ||
299 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
300 | NV_ERROR(dev, "multiple instances of buffer %d on " | |
301 | "validation list\n", b->handle); | |
302 | validate_fini(op, NULL); | |
303 | return -EINVAL; | |
304 | } | |
305 | ||
306 | ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); | |
307 | if (ret) { | |
308 | validate_fini(op, NULL); | |
309 | if (ret == -EAGAIN) | |
310 | ret = ttm_bo_wait_unreserved(&nvbo->bo, false); | |
311 | drm_gem_object_unreference(gem); | |
312 | if (ret) | |
313 | return ret; | |
314 | goto retry; | |
315 | } | |
316 | ||
317 | nvbo->reserved_by = file_priv; | |
318 | nvbo->pbbo_index = i; | |
319 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
320 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
321 | list_add_tail(&nvbo->entry, &op->both_list); | |
322 | else | |
323 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
324 | list_add_tail(&nvbo->entry, &op->vram_list); | |
325 | else | |
326 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
327 | list_add_tail(&nvbo->entry, &op->gart_list); | |
328 | else { | |
329 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | |
330 | b->valid_domains); | |
331 | validate_fini(op, NULL); | |
332 | return -EINVAL; | |
333 | } | |
334 | ||
335 | if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { | |
336 | validate_fini(op, NULL); | |
337 | ||
338 | if (nvbo->cpu_filp == file_priv) { | |
339 | NV_ERROR(dev, "bo %p mapped by process trying " | |
340 | "to validate it!\n", nvbo); | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
344 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); | |
345 | if (ret == -ERESTART) | |
346 | ret = -EAGAIN; | |
347 | if (ret) | |
348 | return ret; | |
349 | goto retry; | |
350 | } | |
351 | } | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | static int | |
357 | validate_list(struct nouveau_channel *chan, struct list_head *list, | |
358 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) | |
359 | { | |
360 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = | |
361 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
362 | struct nouveau_bo *nvbo; | |
363 | int ret, relocs = 0; | |
364 | ||
365 | list_for_each_entry(nvbo, list, entry) { | |
366 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
367 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | |
368 | ||
369 | if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { | |
370 | spin_lock(&nvbo->bo.lock); | |
371 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | |
372 | spin_unlock(&nvbo->bo.lock); | |
373 | if (unlikely(ret)) | |
374 | return ret; | |
375 | } | |
376 | ||
377 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | |
378 | b->write_domains, | |
379 | b->valid_domains); | |
380 | if (unlikely(ret)) | |
381 | return ret; | |
382 | ||
383 | nvbo->channel = chan; | |
384 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, | |
385 | false, false); | |
386 | nvbo->channel = NULL; | |
387 | if (unlikely(ret)) | |
388 | return ret; | |
389 | ||
390 | if (nvbo->bo.offset == b->presumed_offset && | |
391 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | |
392 | b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || | |
393 | (nvbo->bo.mem.mem_type == TTM_PL_TT && | |
394 | b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) | |
395 | continue; | |
396 | ||
397 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
398 | b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; | |
399 | else | |
400 | b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
401 | b->presumed_offset = nvbo->bo.offset; | |
402 | b->presumed_ok = 0; | |
403 | relocs++; | |
404 | ||
405 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) | |
406 | return -EFAULT; | |
407 | } | |
408 | ||
409 | return relocs; | |
410 | } | |
411 | ||
412 | static int | |
413 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
414 | struct drm_file *file_priv, | |
415 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
416 | uint64_t user_buffers, int nr_buffers, | |
417 | struct validate_op *op, int *apply_relocs) | |
418 | { | |
419 | int ret, relocs = 0; | |
420 | ||
421 | INIT_LIST_HEAD(&op->vram_list); | |
422 | INIT_LIST_HEAD(&op->gart_list); | |
423 | INIT_LIST_HEAD(&op->both_list); | |
424 | ||
425 | ret = nouveau_fence_new(chan, &op->fence, false); | |
426 | if (ret) | |
427 | return ret; | |
428 | ||
429 | if (nr_buffers == 0) | |
430 | return 0; | |
431 | ||
432 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
433 | if (unlikely(ret)) | |
434 | return ret; | |
435 | ||
436 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); | |
437 | if (unlikely(ret < 0)) { | |
438 | validate_fini(op, NULL); | |
439 | return ret; | |
440 | } | |
441 | relocs += ret; | |
442 | ||
443 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); | |
444 | if (unlikely(ret < 0)) { | |
445 | validate_fini(op, NULL); | |
446 | return ret; | |
447 | } | |
448 | relocs += ret; | |
449 | ||
450 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); | |
451 | if (unlikely(ret < 0)) { | |
452 | validate_fini(op, NULL); | |
453 | return ret; | |
454 | } | |
455 | relocs += ret; | |
456 | ||
457 | *apply_relocs = relocs; | |
458 | return 0; | |
459 | } | |
460 | ||
461 | static inline void * | |
462 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
463 | { | |
464 | void *mem; | |
465 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
466 | ||
467 | mem = kmalloc(nmemb * size, GFP_KERNEL); | |
468 | if (!mem) | |
469 | return ERR_PTR(-ENOMEM); | |
470 | ||
471 | if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { | |
472 | kfree(mem); | |
473 | return ERR_PTR(-EFAULT); | |
474 | } | |
475 | ||
476 | return mem; | |
477 | } | |
478 | ||
479 | static int | |
480 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, | |
481 | struct drm_nouveau_gem_pushbuf_bo *bo, | |
482 | int nr_relocs, uint64_t ptr_relocs, | |
483 | int nr_dwords, int first_dword, | |
484 | uint32_t *pushbuf, bool is_iomem) | |
485 | { | |
486 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
487 | struct drm_device *dev = chan->dev; | |
488 | int ret = 0, i; | |
489 | ||
490 | reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); | |
491 | if (IS_ERR(reloc)) | |
492 | return PTR_ERR(reloc); | |
493 | ||
494 | for (i = 0; i < nr_relocs; i++) { | |
495 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; | |
496 | struct drm_nouveau_gem_pushbuf_bo *b; | |
497 | uint32_t data; | |
498 | ||
499 | if (r->bo_index >= nr_bo || r->reloc_index < first_dword || | |
500 | r->reloc_index >= first_dword + nr_dwords) { | |
501 | NV_ERROR(dev, "Bad relocation %d\n", i); | |
502 | NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo); | |
503 | NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords); | |
504 | ret = -EINVAL; | |
505 | break; | |
506 | } | |
507 | ||
508 | b = &bo[r->bo_index]; | |
509 | if (b->presumed_ok) | |
510 | continue; | |
511 | ||
512 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) | |
513 | data = b->presumed_offset + r->data; | |
514 | else | |
515 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
516 | data = (b->presumed_offset + r->data) >> 32; | |
517 | else | |
518 | data = r->data; | |
519 | ||
520 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
521 | if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) | |
522 | data |= r->tor; | |
523 | else | |
524 | data |= r->vor; | |
525 | } | |
526 | ||
527 | if (is_iomem) | |
528 | iowrite32_native(data, (void __force __iomem *) | |
529 | &pushbuf[r->reloc_index]); | |
530 | else | |
531 | pushbuf[r->reloc_index] = data; | |
532 | } | |
533 | ||
534 | kfree(reloc); | |
535 | return ret; | |
536 | } | |
537 | ||
538 | int | |
539 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
540 | struct drm_file *file_priv) | |
541 | { | |
542 | struct drm_nouveau_gem_pushbuf *req = data; | |
543 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; | |
544 | struct nouveau_channel *chan; | |
545 | struct validate_op op; | |
546 | uint32_t *pushbuf = NULL; | |
547 | int ret = 0, do_reloc = 0, i; | |
548 | ||
549 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
550 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); | |
551 | ||
552 | if (req->nr_dwords >= chan->dma.max || | |
553 | req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || | |
554 | req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { | |
555 | NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); | |
556 | NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords, | |
557 | chan->dma.max - 1); | |
558 | NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, | |
559 | NOUVEAU_GEM_MAX_BUFFERS); | |
560 | NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, | |
561 | NOUVEAU_GEM_MAX_RELOCS); | |
562 | return -EINVAL; | |
563 | } | |
564 | ||
565 | pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t)); | |
566 | if (IS_ERR(pushbuf)) | |
567 | return PTR_ERR(pushbuf); | |
568 | ||
569 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); | |
570 | if (IS_ERR(bo)) { | |
571 | kfree(pushbuf); | |
572 | return PTR_ERR(bo); | |
573 | } | |
574 | ||
575 | mutex_lock(&dev->struct_mutex); | |
576 | ||
577 | /* Validate buffer list */ | |
578 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
579 | req->nr_buffers, &op, &do_reloc); | |
580 | if (ret) | |
581 | goto out; | |
582 | ||
583 | /* Apply any relocations that are required */ | |
584 | if (do_reloc) { | |
585 | ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, | |
586 | bo, req->nr_relocs, | |
587 | req->relocs, | |
588 | req->nr_dwords, 0, | |
589 | pushbuf, false); | |
590 | if (ret) | |
591 | goto out; | |
592 | } | |
593 | ||
594 | /* Emit push buffer to the hw | |
595 | */ | |
596 | ret = RING_SPACE(chan, req->nr_dwords); | |
597 | if (ret) | |
598 | goto out; | |
599 | ||
600 | OUT_RINGp(chan, pushbuf, req->nr_dwords); | |
601 | ||
602 | ret = nouveau_fence_emit(op.fence); | |
603 | if (ret) { | |
604 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | |
605 | WIND_RING(chan); | |
606 | goto out; | |
607 | } | |
608 | ||
609 | if (nouveau_gem_pushbuf_sync(chan)) { | |
610 | ret = nouveau_fence_wait(op.fence, NULL, false, false); | |
611 | if (ret) { | |
612 | for (i = 0; i < req->nr_dwords; i++) | |
613 | NV_ERROR(dev, "0x%08x\n", pushbuf[i]); | |
614 | NV_ERROR(dev, "^^ above push buffer is fail :(\n"); | |
615 | } | |
616 | } | |
617 | ||
618 | out: | |
619 | validate_fini(&op, ret == 0); | |
620 | mutex_unlock(&dev->struct_mutex); | |
621 | kfree(pushbuf); | |
622 | kfree(bo); | |
623 | return ret; | |
624 | } | |
625 | ||
626 | #define PUSHBUF_CAL (dev_priv->card_type >= NV_20) | |
627 | ||
628 | int | |
629 | nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |
630 | struct drm_file *file_priv) | |
631 | { | |
632 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
633 | struct drm_nouveau_gem_pushbuf_call *req = data; | |
634 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; | |
635 | struct nouveau_channel *chan; | |
636 | struct drm_gem_object *gem; | |
637 | struct nouveau_bo *pbbo; | |
638 | struct validate_op op; | |
639 | int i, ret = 0, do_reloc = 0; | |
640 | ||
641 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
642 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); | |
643 | ||
644 | if (unlikely(req->handle == 0)) | |
645 | goto out_next; | |
646 | ||
647 | if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || | |
648 | req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { | |
649 | NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); | |
650 | NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, | |
651 | NOUVEAU_GEM_MAX_BUFFERS); | |
652 | NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, | |
653 | NOUVEAU_GEM_MAX_RELOCS); | |
654 | return -EINVAL; | |
655 | } | |
656 | ||
657 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); | |
658 | if (IS_ERR(bo)) | |
659 | return PTR_ERR(bo); | |
660 | ||
661 | mutex_lock(&dev->struct_mutex); | |
662 | ||
663 | /* Validate buffer list */ | |
664 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
665 | req->nr_buffers, &op, &do_reloc); | |
666 | if (ret) { | |
667 | NV_ERROR(dev, "validate: %d\n", ret); | |
668 | goto out; | |
669 | } | |
670 | ||
671 | /* Validate DMA push buffer */ | |
672 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
673 | if (!gem) { | |
674 | NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle); | |
675 | ret = -EINVAL; | |
676 | goto out; | |
677 | } | |
678 | pbbo = nouveau_gem_object(gem); | |
679 | ||
680 | ret = ttm_bo_reserve(&pbbo->bo, false, false, true, | |
681 | chan->fence.sequence); | |
682 | if (ret) { | |
683 | NV_ERROR(dev, "resv pb: %d\n", ret); | |
684 | drm_gem_object_unreference(gem); | |
685 | goto out; | |
686 | } | |
687 | ||
688 | nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type); | |
689 | ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false); | |
690 | if (ret) { | |
691 | NV_ERROR(dev, "validate pb: %d\n", ret); | |
692 | ttm_bo_unreserve(&pbbo->bo); | |
693 | drm_gem_object_unreference(gem); | |
694 | goto out; | |
695 | } | |
696 | ||
697 | list_add_tail(&pbbo->entry, &op.both_list); | |
698 | ||
699 | /* If presumed return address doesn't match, we need to map the | |
700 | * push buffer and fix it.. | |
701 | */ | |
702 | if (!PUSHBUF_CAL) { | |
703 | uint32_t retaddy; | |
704 | ||
705 | if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) { | |
706 | ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS); | |
707 | if (ret) { | |
708 | NV_ERROR(dev, "jmp_space: %d\n", ret); | |
709 | goto out; | |
710 | } | |
711 | } | |
712 | ||
713 | retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); | |
714 | retaddy |= 0x20000000; | |
715 | if (retaddy != req->suffix0) { | |
716 | req->suffix0 = retaddy; | |
717 | do_reloc = 1; | |
718 | } | |
719 | } | |
720 | ||
721 | /* Apply any relocations that are required */ | |
722 | if (do_reloc) { | |
723 | void *pbvirt; | |
724 | bool is_iomem; | |
725 | ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages, | |
726 | &pbbo->kmap); | |
727 | if (ret) { | |
728 | NV_ERROR(dev, "kmap pb: %d\n", ret); | |
729 | goto out; | |
730 | } | |
731 | ||
732 | pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); | |
733 | ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, | |
734 | req->nr_relocs, | |
735 | req->relocs, | |
736 | req->nr_dwords, | |
737 | req->offset / 4, | |
738 | pbvirt, is_iomem); | |
739 | ||
740 | if (!PUSHBUF_CAL) { | |
741 | nouveau_bo_wr32(pbbo, | |
742 | req->offset / 4 + req->nr_dwords - 2, | |
743 | req->suffix0); | |
744 | } | |
745 | ||
746 | ttm_bo_kunmap(&pbbo->kmap); | |
747 | if (ret) { | |
748 | NV_ERROR(dev, "reloc apply: %d\n", ret); | |
749 | goto out; | |
750 | } | |
751 | } | |
752 | ||
753 | if (PUSHBUF_CAL) { | |
754 | ret = RING_SPACE(chan, 2); | |
755 | if (ret) { | |
756 | NV_ERROR(dev, "cal_space: %d\n", ret); | |
757 | goto out; | |
758 | } | |
759 | OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + | |
760 | req->offset) | 2); | |
761 | OUT_RING(chan, 0); | |
762 | } else { | |
763 | ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); | |
764 | if (ret) { | |
765 | NV_ERROR(dev, "jmp_space: %d\n", ret); | |
766 | goto out; | |
767 | } | |
768 | OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + | |
769 | req->offset) | 0x20000000); | |
770 | OUT_RING(chan, 0); | |
771 | ||
772 | /* Space the jumps apart with NOPs. */ | |
773 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | |
774 | OUT_RING(chan, 0); | |
775 | } | |
776 | ||
777 | ret = nouveau_fence_emit(op.fence); | |
778 | if (ret) { | |
779 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | |
780 | WIND_RING(chan); | |
781 | goto out; | |
782 | } | |
783 | ||
784 | out: | |
785 | validate_fini(&op, ret == 0); | |
786 | mutex_unlock(&dev->struct_mutex); | |
787 | kfree(bo); | |
788 | ||
789 | out_next: | |
790 | if (PUSHBUF_CAL) { | |
791 | req->suffix0 = 0x00020000; | |
792 | req->suffix1 = 0x00000000; | |
793 | } else { | |
794 | req->suffix0 = 0x20000000 | | |
795 | (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); | |
796 | req->suffix1 = 0x00000000; | |
797 | } | |
798 | ||
799 | return ret; | |
800 | } | |
801 | ||
802 | int | |
803 | nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data, | |
804 | struct drm_file *file_priv) | |
805 | { | |
806 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
807 | struct drm_nouveau_gem_pushbuf_call *req = data; | |
808 | ||
809 | req->vram_available = dev_priv->fb_aper_free; | |
810 | req->gart_available = dev_priv->gart_info.aper_free; | |
811 | ||
812 | return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv); | |
813 | } | |
814 | ||
815 | static inline uint32_t | |
816 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
817 | { | |
818 | uint32_t flags = 0; | |
819 | ||
820 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
821 | flags |= TTM_PL_FLAG_VRAM; | |
822 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
823 | flags |= TTM_PL_FLAG_TT; | |
824 | ||
825 | return flags; | |
826 | } | |
827 | ||
828 | int | |
829 | nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, | |
830 | struct drm_file *file_priv) | |
831 | { | |
832 | struct drm_nouveau_gem_pin *req = data; | |
833 | struct drm_gem_object *gem; | |
834 | struct nouveau_bo *nvbo; | |
835 | int ret = 0; | |
836 | ||
837 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
838 | ||
839 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
840 | NV_ERROR(dev, "pin only allowed without kernel modesetting\n"); | |
841 | return -EINVAL; | |
842 | } | |
843 | ||
844 | if (!DRM_SUSER(DRM_CURPROC)) | |
845 | return -EPERM; | |
846 | ||
847 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
848 | if (!gem) | |
849 | return -EINVAL; | |
850 | nvbo = nouveau_gem_object(gem); | |
851 | ||
852 | ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain)); | |
853 | if (ret) | |
854 | goto out; | |
855 | ||
856 | req->offset = nvbo->bo.offset; | |
857 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
858 | req->domain = NOUVEAU_GEM_DOMAIN_GART; | |
859 | else | |
860 | req->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
861 | ||
862 | out: | |
863 | mutex_lock(&dev->struct_mutex); | |
864 | drm_gem_object_unreference(gem); | |
865 | mutex_unlock(&dev->struct_mutex); | |
866 | ||
867 | return ret; | |
868 | } | |
869 | ||
870 | int | |
871 | nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, | |
872 | struct drm_file *file_priv) | |
873 | { | |
874 | struct drm_nouveau_gem_pin *req = data; | |
875 | struct drm_gem_object *gem; | |
876 | int ret; | |
877 | ||
878 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
879 | ||
880 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
881 | return -EINVAL; | |
882 | ||
883 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
884 | if (!gem) | |
885 | return -EINVAL; | |
886 | ||
887 | ret = nouveau_bo_unpin(nouveau_gem_object(gem)); | |
888 | ||
889 | mutex_lock(&dev->struct_mutex); | |
890 | drm_gem_object_unreference(gem); | |
891 | mutex_unlock(&dev->struct_mutex); | |
892 | ||
893 | return ret; | |
894 | } | |
895 | ||
896 | int | |
897 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
898 | struct drm_file *file_priv) | |
899 | { | |
900 | struct drm_nouveau_gem_cpu_prep *req = data; | |
901 | struct drm_gem_object *gem; | |
902 | struct nouveau_bo *nvbo; | |
903 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
904 | int ret = -EINVAL; | |
905 | ||
906 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
907 | ||
908 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
909 | if (!gem) | |
910 | return ret; | |
911 | nvbo = nouveau_gem_object(gem); | |
912 | ||
913 | if (nvbo->cpu_filp) { | |
914 | if (nvbo->cpu_filp == file_priv) | |
915 | goto out; | |
916 | ||
917 | ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); | |
918 | if (ret == -ERESTART) | |
919 | ret = -EAGAIN; | |
920 | if (ret) | |
921 | goto out; | |
922 | } | |
923 | ||
924 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | |
925 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); | |
926 | } else { | |
927 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | |
928 | if (ret == -ERESTART) | |
929 | ret = -EAGAIN; | |
930 | else | |
931 | if (ret == 0) | |
932 | nvbo->cpu_filp = file_priv; | |
933 | } | |
934 | ||
935 | out: | |
936 | mutex_lock(&dev->struct_mutex); | |
937 | drm_gem_object_unreference(gem); | |
938 | mutex_unlock(&dev->struct_mutex); | |
939 | return ret; | |
940 | } | |
941 | ||
942 | int | |
943 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
944 | struct drm_file *file_priv) | |
945 | { | |
946 | struct drm_nouveau_gem_cpu_prep *req = data; | |
947 | struct drm_gem_object *gem; | |
948 | struct nouveau_bo *nvbo; | |
949 | int ret = -EINVAL; | |
950 | ||
951 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
952 | ||
953 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
954 | if (!gem) | |
955 | return ret; | |
956 | nvbo = nouveau_gem_object(gem); | |
957 | ||
958 | if (nvbo->cpu_filp != file_priv) | |
959 | goto out; | |
960 | nvbo->cpu_filp = NULL; | |
961 | ||
962 | ttm_bo_synccpu_write_release(&nvbo->bo); | |
963 | ret = 0; | |
964 | ||
965 | out: | |
966 | mutex_lock(&dev->struct_mutex); | |
967 | drm_gem_object_unreference(gem); | |
968 | mutex_unlock(&dev->struct_mutex); | |
969 | return ret; | |
970 | } | |
971 | ||
972 | int | |
973 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
974 | struct drm_file *file_priv) | |
975 | { | |
976 | struct drm_nouveau_gem_info *req = data; | |
977 | struct drm_gem_object *gem; | |
978 | int ret; | |
979 | ||
980 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
981 | ||
982 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); | |
983 | if (!gem) | |
984 | return -EINVAL; | |
985 | ||
986 | ret = nouveau_gem_info(gem, req); | |
987 | mutex_lock(&dev->struct_mutex); | |
988 | drm_gem_object_unreference(gem); | |
989 | mutex_unlock(&dev->struct_mutex); | |
990 | return ret; | |
991 | } | |
992 |