]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_prime.c
drm/i915: Use intel_panel for DVO fixed mode handling
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_prime.c
1 /*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_gem.h>
33
34 #include "drm_internal.h"
35
36 /*
37 * DMA-BUF/GEM Object references and lifetime overview:
38 *
39 * On the export the dma_buf holds a reference to the exporting GEM
40 * object. It takes this reference in handle_to_fd_ioctl, when it
41 * first calls .prime_export and stores the exporting GEM object in
42 * the dma_buf priv. This reference is released when the dma_buf
43 * object goes away in the driver .release function.
44 *
45 * On the import the importing GEM object holds a reference to the
46 * dma_buf (which in turn holds a ref to the exporting GEM object).
47 * It takes that reference in the fd_to_handle ioctl.
48 * It calls dma_buf_get, creates an attachment to it and stores the
49 * attachment in the GEM object. When this attachment is destroyed
50 * when the imported object is destroyed, we remove the attachment
51 * and drop the reference to the dma_buf.
52 *
53 * Thus the chain of references always flows in one direction
54 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
55 *
56 * Self-importing: if userspace is using PRIME as a replacement for flink
57 * then it will get a fd->handle request for a GEM object that it created.
58 * Drivers should detect this situation and return back the gem object
59 * from the dma-buf private. Prime will do this automatically for drivers that
60 * use the drm_gem_prime_{import,export} helpers.
61 */
62
63 struct drm_prime_member {
64 struct list_head entry;
65 struct dma_buf *dma_buf;
66 uint32_t handle;
67 };
68
69 struct drm_prime_attachment {
70 struct sg_table *sgt;
71 enum dma_data_direction dir;
72 };
73
74 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
75 struct dma_buf *dma_buf, uint32_t handle)
76 {
77 struct drm_prime_member *member;
78
79 member = kmalloc(sizeof(*member), GFP_KERNEL);
80 if (!member)
81 return -ENOMEM;
82
83 get_dma_buf(dma_buf);
84 member->dma_buf = dma_buf;
85 member->handle = handle;
86 list_add(&member->entry, &prime_fpriv->head);
87 return 0;
88 }
89
90 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
91 uint32_t handle)
92 {
93 struct drm_prime_member *member;
94
95 list_for_each_entry(member, &prime_fpriv->head, entry) {
96 if (member->handle == handle)
97 return member->dma_buf;
98 }
99
100 return NULL;
101 }
102
103 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
104 struct dma_buf *dma_buf,
105 uint32_t *handle)
106 {
107 struct drm_prime_member *member;
108
109 list_for_each_entry(member, &prime_fpriv->head, entry) {
110 if (member->dma_buf == dma_buf) {
111 *handle = member->handle;
112 return 0;
113 }
114 }
115 return -ENOENT;
116 }
117
118 static int drm_gem_map_attach(struct dma_buf *dma_buf,
119 struct device *target_dev,
120 struct dma_buf_attachment *attach)
121 {
122 struct drm_prime_attachment *prime_attach;
123 struct drm_gem_object *obj = dma_buf->priv;
124 struct drm_device *dev = obj->dev;
125
126 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
127 if (!prime_attach)
128 return -ENOMEM;
129
130 prime_attach->dir = DMA_NONE;
131 attach->priv = prime_attach;
132
133 if (!dev->driver->gem_prime_pin)
134 return 0;
135
136 return dev->driver->gem_prime_pin(obj);
137 }
138
139 static void drm_gem_map_detach(struct dma_buf *dma_buf,
140 struct dma_buf_attachment *attach)
141 {
142 struct drm_prime_attachment *prime_attach = attach->priv;
143 struct drm_gem_object *obj = dma_buf->priv;
144 struct drm_device *dev = obj->dev;
145 struct sg_table *sgt;
146
147 if (dev->driver->gem_prime_unpin)
148 dev->driver->gem_prime_unpin(obj);
149
150 if (!prime_attach)
151 return;
152
153 sgt = prime_attach->sgt;
154 if (sgt) {
155 if (prime_attach->dir != DMA_NONE)
156 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
157 prime_attach->dir);
158 sg_free_table(sgt);
159 }
160
161 kfree(sgt);
162 kfree(prime_attach);
163 attach->priv = NULL;
164 }
165
166 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
167 struct dma_buf *dma_buf)
168 {
169 struct drm_prime_member *member, *safe;
170
171 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
172 if (member->dma_buf == dma_buf) {
173 dma_buf_put(dma_buf);
174 list_del(&member->entry);
175 kfree(member);
176 }
177 }
178 }
179
180 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
181 enum dma_data_direction dir)
182 {
183 struct drm_prime_attachment *prime_attach = attach->priv;
184 struct drm_gem_object *obj = attach->dmabuf->priv;
185 struct sg_table *sgt;
186
187 if (WARN_ON(dir == DMA_NONE || !prime_attach))
188 return ERR_PTR(-EINVAL);
189
190 /* return the cached mapping when possible */
191 if (prime_attach->dir == dir)
192 return prime_attach->sgt;
193
194 /*
195 * two mappings with different directions for the same attachment are
196 * not allowed
197 */
198 if (WARN_ON(prime_attach->dir != DMA_NONE))
199 return ERR_PTR(-EBUSY);
200
201 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
202
203 if (!IS_ERR(sgt)) {
204 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
205 sg_free_table(sgt);
206 kfree(sgt);
207 sgt = ERR_PTR(-ENOMEM);
208 } else {
209 prime_attach->sgt = sgt;
210 prime_attach->dir = dir;
211 }
212 }
213
214 return sgt;
215 }
216
217 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
218 struct sg_table *sgt,
219 enum dma_data_direction dir)
220 {
221 /* nothing to be done here */
222 }
223
224 /**
225 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
226 * @dma_buf: buffer to be released
227 *
228 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
229 * must use this in their dma_buf ops structure as the release callback.
230 */
231 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
232 {
233 struct drm_gem_object *obj = dma_buf->priv;
234
235 /* drop the reference on the export fd holds */
236 drm_gem_object_unreference_unlocked(obj);
237 }
238 EXPORT_SYMBOL(drm_gem_dmabuf_release);
239
240 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
241 {
242 struct drm_gem_object *obj = dma_buf->priv;
243 struct drm_device *dev = obj->dev;
244
245 return dev->driver->gem_prime_vmap(obj);
246 }
247
248 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
249 {
250 struct drm_gem_object *obj = dma_buf->priv;
251 struct drm_device *dev = obj->dev;
252
253 dev->driver->gem_prime_vunmap(obj, vaddr);
254 }
255
256 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
257 unsigned long page_num)
258 {
259 return NULL;
260 }
261
262 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
263 unsigned long page_num, void *addr)
264 {
265
266 }
267 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
268 unsigned long page_num)
269 {
270 return NULL;
271 }
272
273 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
274 unsigned long page_num, void *addr)
275 {
276
277 }
278
279 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
280 struct vm_area_struct *vma)
281 {
282 struct drm_gem_object *obj = dma_buf->priv;
283 struct drm_device *dev = obj->dev;
284
285 if (!dev->driver->gem_prime_mmap)
286 return -ENOSYS;
287
288 return dev->driver->gem_prime_mmap(obj, vma);
289 }
290
291 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
292 .attach = drm_gem_map_attach,
293 .detach = drm_gem_map_detach,
294 .map_dma_buf = drm_gem_map_dma_buf,
295 .unmap_dma_buf = drm_gem_unmap_dma_buf,
296 .release = drm_gem_dmabuf_release,
297 .kmap = drm_gem_dmabuf_kmap,
298 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
299 .kunmap = drm_gem_dmabuf_kunmap,
300 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
301 .mmap = drm_gem_dmabuf_mmap,
302 .vmap = drm_gem_dmabuf_vmap,
303 .vunmap = drm_gem_dmabuf_vunmap,
304 };
305
306 /**
307 * DOC: PRIME Helpers
308 *
309 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
310 * simpler APIs by using the helper functions @drm_gem_prime_export and
311 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
312 * six lower-level driver callbacks:
313 *
314 * Export callbacks:
315 *
316 * - @gem_prime_pin (optional): prepare a GEM object for exporting
317 *
318 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
319 *
320 * - @gem_prime_vmap: vmap a buffer exported by your driver
321 *
322 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
323 *
324 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
325 *
326 * Import callback:
327 *
328 * - @gem_prime_import_sg_table (import): produce a GEM object from another
329 * driver's scatter/gather table
330 */
331
332 /**
333 * drm_gem_prime_export - helper library implementation of the export callback
334 * @dev: drm_device to export from
335 * @obj: GEM object to export
336 * @flags: flags like DRM_CLOEXEC
337 *
338 * This is the implementation of the gem_prime_export functions for GEM drivers
339 * using the PRIME helpers.
340 */
341 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
342 struct drm_gem_object *obj, int flags)
343 {
344 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345
346 exp_info.ops = &drm_gem_prime_dmabuf_ops;
347 exp_info.size = obj->size;
348 exp_info.flags = flags;
349 exp_info.priv = obj;
350
351 if (dev->driver->gem_prime_res_obj)
352 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
353
354 return dma_buf_export(&exp_info);
355 }
356 EXPORT_SYMBOL(drm_gem_prime_export);
357
358 static struct dma_buf *export_and_register_object(struct drm_device *dev,
359 struct drm_gem_object *obj,
360 uint32_t flags)
361 {
362 struct dma_buf *dmabuf;
363
364 /* prevent races with concurrent gem_close. */
365 if (obj->handle_count == 0) {
366 dmabuf = ERR_PTR(-ENOENT);
367 return dmabuf;
368 }
369
370 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
371 if (IS_ERR(dmabuf)) {
372 /* normally the created dma-buf takes ownership of the ref,
373 * but if that fails then drop the ref
374 */
375 return dmabuf;
376 }
377
378 /*
379 * Note that callers do not need to clean up the export cache
380 * since the check for obj->handle_count guarantees that someone
381 * will clean it up.
382 */
383 obj->dma_buf = dmabuf;
384 get_dma_buf(obj->dma_buf);
385 /* Grab a new ref since the callers is now used by the dma-buf */
386 drm_gem_object_reference(obj);
387
388 return dmabuf;
389 }
390
391 /**
392 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
393 * @dev: dev to export the buffer from
394 * @file_priv: drm file-private structure
395 * @handle: buffer handle to export
396 * @flags: flags like DRM_CLOEXEC
397 * @prime_fd: pointer to storage for the fd id of the create dma-buf
398 *
399 * This is the PRIME export function which must be used mandatorily by GEM
400 * drivers to ensure correct lifetime management of the underlying GEM object.
401 * The actual exporting from GEM object to a dma-buf is done through the
402 * gem_prime_export driver callback.
403 */
404 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
405 struct drm_file *file_priv, uint32_t handle,
406 uint32_t flags,
407 int *prime_fd)
408 {
409 struct drm_gem_object *obj;
410 int ret = 0;
411 struct dma_buf *dmabuf;
412
413 mutex_lock(&file_priv->prime.lock);
414 obj = drm_gem_object_lookup(dev, file_priv, handle);
415 if (!obj) {
416 ret = -ENOENT;
417 goto out_unlock;
418 }
419
420 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
421 if (dmabuf) {
422 get_dma_buf(dmabuf);
423 goto out_have_handle;
424 }
425
426 mutex_lock(&dev->object_name_lock);
427 /* re-export the original imported object */
428 if (obj->import_attach) {
429 dmabuf = obj->import_attach->dmabuf;
430 get_dma_buf(dmabuf);
431 goto out_have_obj;
432 }
433
434 if (obj->dma_buf) {
435 get_dma_buf(obj->dma_buf);
436 dmabuf = obj->dma_buf;
437 goto out_have_obj;
438 }
439
440 dmabuf = export_and_register_object(dev, obj, flags);
441 if (IS_ERR(dmabuf)) {
442 /* normally the created dma-buf takes ownership of the ref,
443 * but if that fails then drop the ref
444 */
445 ret = PTR_ERR(dmabuf);
446 mutex_unlock(&dev->object_name_lock);
447 goto out;
448 }
449
450 out_have_obj:
451 /*
452 * If we've exported this buffer then cheat and add it to the import list
453 * so we get the correct handle back. We must do this under the
454 * protection of dev->object_name_lock to ensure that a racing gem close
455 * ioctl doesn't miss to remove this buffer handle from the cache.
456 */
457 ret = drm_prime_add_buf_handle(&file_priv->prime,
458 dmabuf, handle);
459 mutex_unlock(&dev->object_name_lock);
460 if (ret)
461 goto fail_put_dmabuf;
462
463 out_have_handle:
464 ret = dma_buf_fd(dmabuf, flags);
465 /*
466 * We must _not_ remove the buffer from the handle cache since the newly
467 * created dma buf is already linked in the global obj->dma_buf pointer,
468 * and that is invariant as long as a userspace gem handle exists.
469 * Closing the handle will clean out the cache anyway, so we don't leak.
470 */
471 if (ret < 0) {
472 goto fail_put_dmabuf;
473 } else {
474 *prime_fd = ret;
475 ret = 0;
476 }
477
478 goto out;
479
480 fail_put_dmabuf:
481 dma_buf_put(dmabuf);
482 out:
483 drm_gem_object_unreference_unlocked(obj);
484 out_unlock:
485 mutex_unlock(&file_priv->prime.lock);
486
487 return ret;
488 }
489 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
490
491 /**
492 * drm_gem_prime_import - helper library implementation of the import callback
493 * @dev: drm_device to import into
494 * @dma_buf: dma-buf object to import
495 *
496 * This is the implementation of the gem_prime_import functions for GEM drivers
497 * using the PRIME helpers.
498 */
499 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
500 struct dma_buf *dma_buf)
501 {
502 struct dma_buf_attachment *attach;
503 struct sg_table *sgt;
504 struct drm_gem_object *obj;
505 int ret;
506
507 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
508 obj = dma_buf->priv;
509 if (obj->dev == dev) {
510 /*
511 * Importing dmabuf exported from out own gem increases
512 * refcount on gem itself instead of f_count of dmabuf.
513 */
514 drm_gem_object_reference(obj);
515 return obj;
516 }
517 }
518
519 if (!dev->driver->gem_prime_import_sg_table)
520 return ERR_PTR(-EINVAL);
521
522 attach = dma_buf_attach(dma_buf, dev->dev);
523 if (IS_ERR(attach))
524 return ERR_CAST(attach);
525
526 get_dma_buf(dma_buf);
527
528 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
529 if (IS_ERR(sgt)) {
530 ret = PTR_ERR(sgt);
531 goto fail_detach;
532 }
533
534 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
535 if (IS_ERR(obj)) {
536 ret = PTR_ERR(obj);
537 goto fail_unmap;
538 }
539
540 obj->import_attach = attach;
541
542 return obj;
543
544 fail_unmap:
545 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
546 fail_detach:
547 dma_buf_detach(dma_buf, attach);
548 dma_buf_put(dma_buf);
549
550 return ERR_PTR(ret);
551 }
552 EXPORT_SYMBOL(drm_gem_prime_import);
553
554 /**
555 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
556 * @dev: dev to export the buffer from
557 * @file_priv: drm file-private structure
558 * @prime_fd: fd id of the dma-buf which should be imported
559 * @handle: pointer to storage for the handle of the imported buffer object
560 *
561 * This is the PRIME import function which must be used mandatorily by GEM
562 * drivers to ensure correct lifetime management of the underlying GEM object.
563 * The actual importing of GEM object from the dma-buf is done through the
564 * gem_import_export driver callback.
565 */
566 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
567 struct drm_file *file_priv, int prime_fd,
568 uint32_t *handle)
569 {
570 struct dma_buf *dma_buf;
571 struct drm_gem_object *obj;
572 int ret;
573
574 dma_buf = dma_buf_get(prime_fd);
575 if (IS_ERR(dma_buf))
576 return PTR_ERR(dma_buf);
577
578 mutex_lock(&file_priv->prime.lock);
579
580 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
581 dma_buf, handle);
582 if (ret == 0)
583 goto out_put;
584
585 /* never seen this one, need to import */
586 mutex_lock(&dev->object_name_lock);
587 obj = dev->driver->gem_prime_import(dev, dma_buf);
588 if (IS_ERR(obj)) {
589 ret = PTR_ERR(obj);
590 goto out_unlock;
591 }
592
593 if (obj->dma_buf) {
594 WARN_ON(obj->dma_buf != dma_buf);
595 } else {
596 obj->dma_buf = dma_buf;
597 get_dma_buf(dma_buf);
598 }
599
600 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
601 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
602 drm_gem_object_unreference_unlocked(obj);
603 if (ret)
604 goto out_put;
605
606 ret = drm_prime_add_buf_handle(&file_priv->prime,
607 dma_buf, *handle);
608 if (ret)
609 goto fail;
610
611 mutex_unlock(&file_priv->prime.lock);
612
613 dma_buf_put(dma_buf);
614
615 return 0;
616
617 fail:
618 /* hmm, if driver attached, we are relying on the free-object path
619 * to detach.. which seems ok..
620 */
621 drm_gem_handle_delete(file_priv, *handle);
622 out_unlock:
623 mutex_unlock(&dev->object_name_lock);
624 out_put:
625 dma_buf_put(dma_buf);
626 mutex_unlock(&file_priv->prime.lock);
627 return ret;
628 }
629 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
630
631 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
632 struct drm_file *file_priv)
633 {
634 struct drm_prime_handle *args = data;
635 uint32_t flags;
636
637 if (!drm_core_check_feature(dev, DRIVER_PRIME))
638 return -EINVAL;
639
640 if (!dev->driver->prime_handle_to_fd)
641 return -ENOSYS;
642
643 /* check flags are valid */
644 if (args->flags & ~DRM_CLOEXEC)
645 return -EINVAL;
646
647 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
648 flags = args->flags & DRM_CLOEXEC;
649
650 return dev->driver->prime_handle_to_fd(dev, file_priv,
651 args->handle, flags, &args->fd);
652 }
653
654 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
655 struct drm_file *file_priv)
656 {
657 struct drm_prime_handle *args = data;
658
659 if (!drm_core_check_feature(dev, DRIVER_PRIME))
660 return -EINVAL;
661
662 if (!dev->driver->prime_fd_to_handle)
663 return -ENOSYS;
664
665 return dev->driver->prime_fd_to_handle(dev, file_priv,
666 args->fd, &args->handle);
667 }
668
669 /**
670 * drm_prime_pages_to_sg - converts a page array into an sg list
671 * @pages: pointer to the array of page pointers to convert
672 * @nr_pages: length of the page vector
673 *
674 * This helper creates an sg table object from a set of pages
675 * the driver is responsible for mapping the pages into the
676 * importers address space for use with dma_buf itself.
677 */
678 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
679 {
680 struct sg_table *sg = NULL;
681 int ret;
682
683 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
684 if (!sg) {
685 ret = -ENOMEM;
686 goto out;
687 }
688
689 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
690 nr_pages << PAGE_SHIFT, GFP_KERNEL);
691 if (ret)
692 goto out;
693
694 return sg;
695 out:
696 kfree(sg);
697 return ERR_PTR(ret);
698 }
699 EXPORT_SYMBOL(drm_prime_pages_to_sg);
700
701 /**
702 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
703 * @sgt: scatter-gather table to convert
704 * @pages: array of page pointers to store the page array in
705 * @addrs: optional array to store the dma bus address of each page
706 * @max_pages: size of both the passed-in arrays
707 *
708 * Exports an sg table into an array of pages and addresses. This is currently
709 * required by the TTM driver in order to do correct fault handling.
710 */
711 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
712 dma_addr_t *addrs, int max_pages)
713 {
714 unsigned count;
715 struct scatterlist *sg;
716 struct page *page;
717 u32 len;
718 int pg_index;
719 dma_addr_t addr;
720
721 pg_index = 0;
722 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
723 len = sg->length;
724 page = sg_page(sg);
725 addr = sg_dma_address(sg);
726
727 while (len > 0) {
728 if (WARN_ON(pg_index >= max_pages))
729 return -1;
730 pages[pg_index] = page;
731 if (addrs)
732 addrs[pg_index] = addr;
733
734 page++;
735 addr += PAGE_SIZE;
736 len -= PAGE_SIZE;
737 pg_index++;
738 }
739 }
740 return 0;
741 }
742 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
743
744 /**
745 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
746 * @obj: GEM object which was created from a dma-buf
747 * @sg: the sg-table which was pinned at import time
748 *
749 * This is the cleanup functions which GEM drivers need to call when they use
750 * @drm_gem_prime_import to import dma-bufs.
751 */
752 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
753 {
754 struct dma_buf_attachment *attach;
755 struct dma_buf *dma_buf;
756 attach = obj->import_attach;
757 if (sg)
758 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
759 dma_buf = attach->dmabuf;
760 dma_buf_detach(attach->dmabuf, attach);
761 /* remove the reference */
762 dma_buf_put(dma_buf);
763 }
764 EXPORT_SYMBOL(drm_prime_gem_destroy);
765
766 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
767 {
768 INIT_LIST_HEAD(&prime_fpriv->head);
769 mutex_init(&prime_fpriv->lock);
770 }
771
772 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
773 {
774 /* by now drm_gem_release should've made sure the list is empty */
775 WARN_ON(!list_empty(&prime_fpriv->head));
776 }