1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018 Noralf Trønnes
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include <drm/drm_device.h>
14 #include <drm/drm_drv.h>
15 #include <drm/drm_gem_shmem_helper.h>
16 #include <drm/drm_prime.h>
17 #include <drm/drm_print.h>
22 * This library provides helpers for GEM objects backed by shmem buffers
23 * allocated using anonymous pageable memory.
26 static const struct drm_gem_object_funcs drm_gem_shmem_funcs
= {
27 .free
= drm_gem_shmem_free_object
,
28 .print_info
= drm_gem_shmem_print_info
,
29 .pin
= drm_gem_shmem_pin
,
30 .unpin
= drm_gem_shmem_unpin
,
31 .get_sg_table
= drm_gem_shmem_get_sg_table
,
32 .vmap
= drm_gem_shmem_vmap
,
33 .vunmap
= drm_gem_shmem_vunmap
,
34 .vm_ops
= &drm_gem_shmem_vm_ops
,
38 * drm_gem_shmem_create - Allocate an object with the given size
40 * @size: Size of the object to allocate
42 * This function creates a shmem GEM object.
45 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
46 * error code on failure.
48 struct drm_gem_shmem_object
*drm_gem_shmem_create(struct drm_device
*dev
, size_t size
)
50 struct drm_gem_shmem_object
*shmem
;
51 struct drm_gem_object
*obj
;
54 size
= PAGE_ALIGN(size
);
56 if (dev
->driver
->gem_create_object
)
57 obj
= dev
->driver
->gem_create_object(dev
, size
);
59 obj
= kzalloc(sizeof(*shmem
), GFP_KERNEL
);
61 return ERR_PTR(-ENOMEM
);
64 obj
->funcs
= &drm_gem_shmem_funcs
;
66 ret
= drm_gem_object_init(dev
, obj
, size
);
70 ret
= drm_gem_create_mmap_offset(obj
);
74 shmem
= to_drm_gem_shmem_obj(obj
);
75 mutex_init(&shmem
->pages_lock
);
76 mutex_init(&shmem
->vmap_lock
);
79 * Our buffers are kept pinned, so allocating them
80 * from the MOVABLE zone is a really bad idea, and
81 * conflicts with CMA. See comments above new_inode()
82 * why this is required _and_ expected if you're
83 * going to pin these pages.
85 mapping_set_gfp_mask(obj
->filp
->f_mapping
, GFP_HIGHUSER
|
86 __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
91 drm_gem_object_release(obj
);
97 EXPORT_SYMBOL_GPL(drm_gem_shmem_create
);
100 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
101 * @obj: GEM object to free
103 * This function cleans up the GEM object state and frees the memory used to
104 * store the object itself.
106 void drm_gem_shmem_free_object(struct drm_gem_object
*obj
)
108 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
110 WARN_ON(shmem
->vmap_use_count
);
112 if (obj
->import_attach
) {
113 shmem
->pages_use_count
--;
114 drm_prime_gem_destroy(obj
, shmem
->sgt
);
115 kvfree(shmem
->pages
);
118 dma_unmap_sg(obj
->dev
->dev
, shmem
->sgt
->sgl
,
119 shmem
->sgt
->nents
, DMA_BIDIRECTIONAL
);
121 drm_gem_shmem_put_pages(shmem
);
122 sg_free_table(shmem
->sgt
);
127 WARN_ON(shmem
->pages_use_count
);
129 drm_gem_object_release(obj
);
130 mutex_destroy(&shmem
->pages_lock
);
131 mutex_destroy(&shmem
->vmap_lock
);
134 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object
);
136 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object
*shmem
)
138 struct drm_gem_object
*obj
= &shmem
->base
;
141 if (shmem
->pages_use_count
++ > 0)
144 pages
= drm_gem_get_pages(obj
);
146 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages
));
147 shmem
->pages_use_count
= 0;
148 return PTR_ERR(pages
);
151 shmem
->pages
= pages
;
157 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
158 * @shmem: shmem GEM object
160 * This function makes sure that backing pages exists for the shmem GEM object
161 * and increases the use count.
164 * 0 on success or a negative error code on failure.
166 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object
*shmem
)
170 ret
= mutex_lock_interruptible(&shmem
->pages_lock
);
173 ret
= drm_gem_shmem_get_pages_locked(shmem
);
174 mutex_unlock(&shmem
->pages_lock
);
178 EXPORT_SYMBOL(drm_gem_shmem_get_pages
);
180 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object
*shmem
)
182 struct drm_gem_object
*obj
= &shmem
->base
;
184 if (WARN_ON_ONCE(!shmem
->pages_use_count
))
187 if (--shmem
->pages_use_count
> 0)
190 drm_gem_put_pages(obj
, shmem
->pages
,
191 shmem
->pages_mark_dirty_on_put
,
192 shmem
->pages_mark_accessed_on_put
);
197 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
200 * This function decreases the use count and puts the backing pages when use drops to zero.
202 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object
*shmem
)
204 mutex_lock(&shmem
->pages_lock
);
205 drm_gem_shmem_put_pages_locked(shmem
);
206 mutex_unlock(&shmem
->pages_lock
);
208 EXPORT_SYMBOL(drm_gem_shmem_put_pages
);
211 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
214 * This function makes sure the backing pages are pinned in memory while the
215 * buffer is exported.
218 * 0 on success or a negative error code on failure.
220 int drm_gem_shmem_pin(struct drm_gem_object
*obj
)
222 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
224 return drm_gem_shmem_get_pages(shmem
);
226 EXPORT_SYMBOL(drm_gem_shmem_pin
);
229 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
232 * This function removes the requirement that the backing pages are pinned in
235 void drm_gem_shmem_unpin(struct drm_gem_object
*obj
)
237 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
239 drm_gem_shmem_put_pages(shmem
);
241 EXPORT_SYMBOL(drm_gem_shmem_unpin
);
243 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object
*shmem
)
245 struct drm_gem_object
*obj
= &shmem
->base
;
248 if (shmem
->vmap_use_count
++ > 0)
251 ret
= drm_gem_shmem_get_pages(shmem
);
255 if (obj
->import_attach
)
256 shmem
->vaddr
= dma_buf_vmap(obj
->import_attach
->dmabuf
);
258 shmem
->vaddr
= vmap(shmem
->pages
, obj
->size
>> PAGE_SHIFT
,
259 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
262 DRM_DEBUG_KMS("Failed to vmap pages\n");
270 drm_gem_shmem_put_pages(shmem
);
272 shmem
->vmap_use_count
= 0;
278 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
279 * @shmem: shmem GEM object
281 * This function makes sure that a virtual address exists for the buffer backing
282 * the shmem GEM object.
285 * 0 on success or a negative error code on failure.
287 void *drm_gem_shmem_vmap(struct drm_gem_object
*obj
)
289 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
293 ret
= mutex_lock_interruptible(&shmem
->vmap_lock
);
296 vaddr
= drm_gem_shmem_vmap_locked(shmem
);
297 mutex_unlock(&shmem
->vmap_lock
);
301 EXPORT_SYMBOL(drm_gem_shmem_vmap
);
303 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object
*shmem
)
305 struct drm_gem_object
*obj
= &shmem
->base
;
307 if (WARN_ON_ONCE(!shmem
->vmap_use_count
))
310 if (--shmem
->vmap_use_count
> 0)
313 if (obj
->import_attach
)
314 dma_buf_vunmap(obj
->import_attach
->dmabuf
, shmem
->vaddr
);
316 vunmap(shmem
->vaddr
);
319 drm_gem_shmem_put_pages(shmem
);
323 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
324 * @shmem: shmem GEM object
326 * This function removes the virtual address when use count drops to zero.
328 void drm_gem_shmem_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
330 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
332 mutex_lock(&shmem
->vmap_lock
);
333 drm_gem_shmem_vunmap_locked(shmem
);
334 mutex_unlock(&shmem
->vmap_lock
);
336 EXPORT_SYMBOL(drm_gem_shmem_vunmap
);
338 struct drm_gem_shmem_object
*
339 drm_gem_shmem_create_with_handle(struct drm_file
*file_priv
,
340 struct drm_device
*dev
, size_t size
,
343 struct drm_gem_shmem_object
*shmem
;
346 shmem
= drm_gem_shmem_create(dev
, size
);
351 * Allocate an id of idr table where the obj is registered
352 * and handle has the id what user can see.
354 ret
= drm_gem_handle_create(file_priv
, &shmem
->base
, handle
);
355 /* drop reference from allocate - handle holds it now. */
356 drm_gem_object_put_unlocked(&shmem
->base
);
362 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle
);
365 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
366 * @file: DRM file structure to create the dumb buffer for
370 * This function computes the pitch of the dumb buffer and rounds it up to an
371 * integer number of bytes per pixel. Drivers for hardware that doesn't have
372 * any additional restrictions on the pitch can directly use this function as
373 * their &drm_driver.dumb_create callback.
375 * For hardware with additional restrictions, drivers can adjust the fields
376 * set up by userspace before calling into this function.
379 * 0 on success or a negative error code on failure.
381 int drm_gem_shmem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
382 struct drm_mode_create_dumb
*args
)
384 u32 min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
385 struct drm_gem_shmem_object
*shmem
;
387 if (!args
->pitch
|| !args
->size
) {
388 args
->pitch
= min_pitch
;
389 args
->size
= args
->pitch
* args
->height
;
391 /* ensure sane minimum values */
392 if (args
->pitch
< min_pitch
)
393 args
->pitch
= min_pitch
;
394 if (args
->size
< args
->pitch
* args
->height
)
395 args
->size
= args
->pitch
* args
->height
;
398 shmem
= drm_gem_shmem_create_with_handle(file
, dev
, args
->size
, &args
->handle
);
400 return PTR_ERR_OR_ZERO(shmem
);
402 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create
);
404 static vm_fault_t
drm_gem_shmem_fault(struct vm_fault
*vmf
)
406 struct vm_area_struct
*vma
= vmf
->vma
;
407 struct drm_gem_object
*obj
= vma
->vm_private_data
;
408 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
409 loff_t num_pages
= obj
->size
>> PAGE_SHIFT
;
412 if (vmf
->pgoff
>= num_pages
|| WARN_ON_ONCE(!shmem
->pages
))
413 return VM_FAULT_SIGBUS
;
415 page
= shmem
->pages
[vmf
->pgoff
];
417 return vmf_insert_page(vma
, vmf
->address
, page
);
420 static void drm_gem_shmem_vm_open(struct vm_area_struct
*vma
)
422 struct drm_gem_object
*obj
= vma
->vm_private_data
;
423 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
426 ret
= drm_gem_shmem_get_pages(shmem
);
427 WARN_ON_ONCE(ret
!= 0);
429 drm_gem_vm_open(vma
);
432 static void drm_gem_shmem_vm_close(struct vm_area_struct
*vma
)
434 struct drm_gem_object
*obj
= vma
->vm_private_data
;
435 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
437 drm_gem_shmem_put_pages(shmem
);
438 drm_gem_vm_close(vma
);
441 const struct vm_operations_struct drm_gem_shmem_vm_ops
= {
442 .fault
= drm_gem_shmem_fault
,
443 .open
= drm_gem_shmem_vm_open
,
444 .close
= drm_gem_shmem_vm_close
,
446 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops
);
449 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
451 * @vma: VMA for the area to be mapped
453 * This function implements an augmented version of the GEM DRM file mmap
454 * operation for shmem objects. Drivers which employ the shmem helpers should
455 * use this function as their &file_operations.mmap handler in the DRM device file's
456 * file_operations structure.
458 * Instead of directly referencing this function, drivers should use the
459 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
462 * 0 on success or a negative error code on failure.
464 int drm_gem_shmem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
466 struct drm_gem_shmem_object
*shmem
;
469 ret
= drm_gem_mmap(filp
, vma
);
473 shmem
= to_drm_gem_shmem_obj(vma
->vm_private_data
);
475 ret
= drm_gem_shmem_get_pages(shmem
);
477 drm_gem_vm_close(vma
);
481 /* VM_PFNMAP was set by drm_gem_mmap() */
482 vma
->vm_flags
&= ~VM_PFNMAP
;
483 vma
->vm_flags
|= VM_MIXEDMAP
;
485 /* Remove the fake offset */
486 vma
->vm_pgoff
-= drm_vma_node_start(&shmem
->base
.vma_node
);
490 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap
);
493 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
495 * @indent: Tab indentation level
498 void drm_gem_shmem_print_info(struct drm_printer
*p
, unsigned int indent
,
499 const struct drm_gem_object
*obj
)
501 const struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
503 drm_printf_indent(p
, indent
, "pages_use_count=%u\n", shmem
->pages_use_count
);
504 drm_printf_indent(p
, indent
, "vmap_use_count=%u\n", shmem
->vmap_use_count
);
505 drm_printf_indent(p
, indent
, "vaddr=%p\n", shmem
->vaddr
);
507 EXPORT_SYMBOL(drm_gem_shmem_print_info
);
510 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
511 * pages for a shmem GEM object
514 * This function exports a scatter/gather table suitable for PRIME usage by
515 * calling the standard DMA mapping API.
518 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
520 struct sg_table
*drm_gem_shmem_get_sg_table(struct drm_gem_object
*obj
)
522 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
524 return drm_prime_pages_to_sg(shmem
->pages
, obj
->size
>> PAGE_SHIFT
);
526 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table
);
529 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
530 * scatter/gather table for a shmem GEM object.
533 * This function returns a scatter/gather table suitable for driver usage. If
534 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
538 * A pointer to the scatter/gather table of pinned pages or errno on failure.
540 struct sg_table
*drm_gem_shmem_get_pages_sgt(struct drm_gem_object
*obj
)
543 struct drm_gem_shmem_object
*shmem
= to_drm_gem_shmem_obj(obj
);
544 struct sg_table
*sgt
;
549 WARN_ON(obj
->import_attach
);
551 ret
= drm_gem_shmem_get_pages(shmem
);
555 sgt
= drm_gem_shmem_get_sg_table(&shmem
->base
);
560 /* Map the pages for use by the h/w. */
561 dma_map_sg(obj
->dev
->dev
, sgt
->sgl
, sgt
->nents
, DMA_BIDIRECTIONAL
);
568 drm_gem_shmem_put_pages(shmem
);
571 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt
);
574 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
575 * another driver's scatter/gather table of pinned pages
576 * @dev: Device to import into
577 * @attach: DMA-BUF attachment
578 * @sgt: Scatter/gather table of pinned pages
580 * This function imports a scatter/gather table exported via DMA-BUF by
581 * another driver. Drivers that use the shmem helpers should set this as their
582 * &drm_driver.gem_prime_import_sg_table callback.
585 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
586 * error code on failure.
588 struct drm_gem_object
*
589 drm_gem_shmem_prime_import_sg_table(struct drm_device
*dev
,
590 struct dma_buf_attachment
*attach
,
591 struct sg_table
*sgt
)
593 size_t size
= PAGE_ALIGN(attach
->dmabuf
->size
);
594 size_t npages
= size
>> PAGE_SHIFT
;
595 struct drm_gem_shmem_object
*shmem
;
598 shmem
= drm_gem_shmem_create(dev
, size
);
600 return ERR_CAST(shmem
);
602 shmem
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
608 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, shmem
->pages
, NULL
, npages
);
613 shmem
->pages_use_count
= 1; /* Permanently pinned from our point of view */
615 DRM_DEBUG_PRIME("size = %zu\n", size
);
620 kvfree(shmem
->pages
);
622 drm_gem_object_put_unlocked(&shmem
->base
);
626 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table
);