2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include "drm_internal.h"
47 * This file provides some of the base ioctls and library routines for
48 * the graphics memory manager implemented by each device driver.
50 * Because various devices have different requirements in terms of
51 * synchronization and migration strategies, implementing that is left up to
52 * the driver, and all that the general API provides should be generic --
53 * allocating objects, reading/writing data with the cpu, freeing objects.
54 * Even there, platform-dependent optimizations for reading/writing data with
55 * the CPU mean we'll likely hook those out to driver-specific calls. However,
56 * the DRI2 implementation wants to have at least allocate/mmap be generic.
58 * The goal was to have swap-backed object allocation managed through
59 * struct file. However, file descriptors as handles to a struct file have
61 * - Process limits prevent more than 1024 or so being used at a time by
63 * - Inability to allocate high fds will aggravate the X Server's select()
64 * handling, and likely that of many GL client applications as well.
66 * This led to a plan of using our own integer IDs (called handles, following
67 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
68 * ioctls. The objects themselves will still include the struct file so
69 * that we can transition to fds if the required kernel infrastructure shows
70 * up at a later date, and as our interface with shmfs for memory allocation.
74 * We make up offsets for buffer objects so we can recognize them at
78 /* pgoff in mmap is an unsigned long, so we need to make sure that
79 * the faked up offset will fit
82 #if BITS_PER_LONG == 64
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
86 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
87 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
91 * drm_gem_init - Initialize the GEM device fields
92 * @dev: drm_devic structure to initialize
95 drm_gem_init(struct drm_device
*dev
)
97 struct drm_vma_offset_manager
*vma_offset_manager
;
99 mutex_init(&dev
->object_name_lock
);
100 idr_init(&dev
->object_name_idr
);
102 vma_offset_manager
= kzalloc(sizeof(*vma_offset_manager
), GFP_KERNEL
);
103 if (!vma_offset_manager
) {
104 DRM_ERROR("out of memory\n");
108 dev
->vma_offset_manager
= vma_offset_manager
;
109 drm_vma_offset_manager_init(vma_offset_manager
,
110 DRM_FILE_PAGE_OFFSET_START
,
111 DRM_FILE_PAGE_OFFSET_SIZE
);
117 drm_gem_destroy(struct drm_device
*dev
)
120 drm_vma_offset_manager_destroy(dev
->vma_offset_manager
);
121 kfree(dev
->vma_offset_manager
);
122 dev
->vma_offset_manager
= NULL
;
126 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
127 * @dev: drm_device the object should be initialized for
128 * @obj: drm_gem_object to initialize
131 * Initialize an already allocated GEM object of the specified size with
132 * shmfs backing store.
134 int drm_gem_object_init(struct drm_device
*dev
,
135 struct drm_gem_object
*obj
, size_t size
)
139 drm_gem_private_object_init(dev
, obj
, size
);
141 filp
= shmem_file_setup("drm mm object", size
, VM_NORESERVE
);
143 return PTR_ERR(filp
);
149 EXPORT_SYMBOL(drm_gem_object_init
);
152 * drm_gem_private_object_init - initialize an allocated private GEM object
153 * @dev: drm_device the object should be initialized for
154 * @obj: drm_gem_object to initialize
157 * Initialize an already allocated GEM object of the specified size with
158 * no GEM provided backing store. Instead the caller is responsible for
159 * backing the object and handling it.
161 void drm_gem_private_object_init(struct drm_device
*dev
,
162 struct drm_gem_object
*obj
, size_t size
)
164 BUG_ON((size
& (PAGE_SIZE
- 1)) != 0);
169 kref_init(&obj
->refcount
);
170 obj
->handle_count
= 0;
172 drm_vma_node_reset(&obj
->vma_node
);
174 EXPORT_SYMBOL(drm_gem_private_object_init
);
177 drm_gem_remove_prime_handles(struct drm_gem_object
*obj
, struct drm_file
*filp
)
180 * Note: obj->dma_buf can't disappear as long as we still hold a
181 * handle reference in obj->handle_count.
183 mutex_lock(&filp
->prime
.lock
);
185 drm_prime_remove_buf_handle_locked(&filp
->prime
,
188 mutex_unlock(&filp
->prime
.lock
);
192 * drm_gem_object_handle_free - release resources bound to userspace handles
193 * @obj: GEM object to clean up.
195 * Called after the last handle to the object has been closed
197 * Removes any name for the object. Note that this must be
198 * called before drm_gem_object_free or we'll be touching
201 static void drm_gem_object_handle_free(struct drm_gem_object
*obj
)
203 struct drm_device
*dev
= obj
->dev
;
205 /* Remove any name for this object */
207 idr_remove(&dev
->object_name_idr
, obj
->name
);
212 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object
*obj
)
214 /* Unbreak the reference cycle if we have an exported dma_buf. */
216 dma_buf_put(obj
->dma_buf
);
222 drm_gem_object_handle_put_unlocked(struct drm_gem_object
*obj
)
224 struct drm_device
*dev
= obj
->dev
;
227 if (WARN_ON(obj
->handle_count
== 0))
231 * Must bump handle count first as this may be the last
232 * ref, in which case the object would disappear before we
236 mutex_lock(&dev
->object_name_lock
);
237 if (--obj
->handle_count
== 0) {
238 drm_gem_object_handle_free(obj
);
239 drm_gem_object_exported_dma_buf_free(obj
);
242 mutex_unlock(&dev
->object_name_lock
);
245 drm_gem_object_put_unlocked(obj
);
249 * Called at device or object close to release the file's
250 * handle references on objects.
253 drm_gem_object_release_handle(int id
, void *ptr
, void *data
)
255 struct drm_file
*file_priv
= data
;
256 struct drm_gem_object
*obj
= ptr
;
257 struct drm_device
*dev
= obj
->dev
;
259 if (dev
->driver
->gem_close_object
)
260 dev
->driver
->gem_close_object(obj
, file_priv
);
262 if (drm_core_check_feature(dev
, DRIVER_PRIME
))
263 drm_gem_remove_prime_handles(obj
, file_priv
);
264 drm_vma_node_revoke(&obj
->vma_node
, file_priv
);
266 drm_gem_object_handle_put_unlocked(obj
);
272 * drm_gem_handle_delete - deletes the given file-private handle
273 * @filp: drm file-private structure to use for the handle look up
274 * @handle: userspace handle to delete
276 * Removes the GEM handle from the @filp lookup table which has been added with
277 * drm_gem_handle_create(). If this is the last handle also cleans up linked
278 * resources like GEM names.
281 drm_gem_handle_delete(struct drm_file
*filp
, u32 handle
)
283 struct drm_gem_object
*obj
;
285 /* This is gross. The idr system doesn't let us try a delete and
286 * return an error code. It just spews if you fail at deleting.
287 * So, we have to grab a lock around finding the object and then
288 * doing the delete on it and dropping the refcount, or the user
289 * could race us to double-decrement the refcount and cause a
290 * use-after-free later. Given the frequency of our handle lookups,
291 * we may want to use ida for number allocation and a hash table
292 * for the pointers, anyway.
294 spin_lock(&filp
->table_lock
);
296 /* Check if we currently have a reference on the object */
297 obj
= idr_replace(&filp
->object_idr
, NULL
, handle
);
298 spin_unlock(&filp
->table_lock
);
299 if (IS_ERR_OR_NULL(obj
))
302 /* Release driver's reference and decrement refcount. */
303 drm_gem_object_release_handle(handle
, obj
, filp
);
305 /* And finally make the handle available for future allocations. */
306 spin_lock(&filp
->table_lock
);
307 idr_remove(&filp
->object_idr
, handle
);
308 spin_unlock(&filp
->table_lock
);
312 EXPORT_SYMBOL(drm_gem_handle_delete
);
315 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
316 * @file: drm file-private structure containing the gem object
317 * @dev: corresponding drm_device
318 * @handle: gem object handle
319 * @offset: return location for the fake mmap offset
321 * This implements the &drm_driver.dumb_map_offset kms driver callback for
322 * drivers which use gem to manage their backing storage.
325 * 0 on success or a negative error code on failure.
327 int drm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
328 u32 handle
, u64
*offset
)
330 struct drm_gem_object
*obj
;
333 obj
= drm_gem_object_lookup(file
, handle
);
337 /* Don't allow imported objects to be mapped */
338 if (obj
->import_attach
) {
343 ret
= drm_gem_create_mmap_offset(obj
);
347 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
349 drm_gem_object_put_unlocked(obj
);
353 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset
);
356 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
357 * @file: drm file-private structure to remove the dumb handle from
358 * @dev: corresponding drm_device
359 * @handle: the dumb handle to remove
361 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
362 * which use gem to manage their backing storage.
364 int drm_gem_dumb_destroy(struct drm_file
*file
,
365 struct drm_device
*dev
,
368 return drm_gem_handle_delete(file
, handle
);
370 EXPORT_SYMBOL(drm_gem_dumb_destroy
);
373 * drm_gem_handle_create_tail - internal functions to create a handle
374 * @file_priv: drm file-private structure to register the handle for
375 * @obj: object to register
376 * @handlep: pointer to return the created handle to the caller
378 * This expects the &drm_device.object_name_lock to be held already and will
379 * drop it before returning. Used to avoid races in establishing new handles
380 * when importing an object from either an flink name or a dma-buf.
382 * Handles must be release again through drm_gem_handle_delete(). This is done
383 * when userspace closes @file_priv for all attached handles, or through the
384 * GEM_CLOSE ioctl for individual handles.
387 drm_gem_handle_create_tail(struct drm_file
*file_priv
,
388 struct drm_gem_object
*obj
,
391 struct drm_device
*dev
= obj
->dev
;
395 WARN_ON(!mutex_is_locked(&dev
->object_name_lock
));
396 if (obj
->handle_count
++ == 0)
397 drm_gem_object_get(obj
);
400 * Get the user-visible handle using idr. Preload and perform
401 * allocation under our spinlock.
403 idr_preload(GFP_KERNEL
);
404 spin_lock(&file_priv
->table_lock
);
406 ret
= idr_alloc(&file_priv
->object_idr
, obj
, 1, 0, GFP_NOWAIT
);
408 spin_unlock(&file_priv
->table_lock
);
411 mutex_unlock(&dev
->object_name_lock
);
417 ret
= drm_vma_node_allow(&obj
->vma_node
, file_priv
);
421 if (dev
->driver
->gem_open_object
) {
422 ret
= dev
->driver
->gem_open_object(obj
, file_priv
);
431 drm_vma_node_revoke(&obj
->vma_node
, file_priv
);
433 spin_lock(&file_priv
->table_lock
);
434 idr_remove(&file_priv
->object_idr
, handle
);
435 spin_unlock(&file_priv
->table_lock
);
437 drm_gem_object_handle_put_unlocked(obj
);
442 * drm_gem_handle_create - create a gem handle for an object
443 * @file_priv: drm file-private structure to register the handle for
444 * @obj: object to register
445 * @handlep: pionter to return the created handle to the caller
447 * Create a handle for this object. This adds a handle reference
448 * to the object, which includes a regular reference count. Callers
449 * will likely want to dereference the object afterwards.
451 int drm_gem_handle_create(struct drm_file
*file_priv
,
452 struct drm_gem_object
*obj
,
455 mutex_lock(&obj
->dev
->object_name_lock
);
457 return drm_gem_handle_create_tail(file_priv
, obj
, handlep
);
459 EXPORT_SYMBOL(drm_gem_handle_create
);
463 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
464 * @obj: obj in question
466 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
468 * Note that drm_gem_object_release() already calls this function, so drivers
469 * don't have to take care of releasing the mmap offset themselves when freeing
473 drm_gem_free_mmap_offset(struct drm_gem_object
*obj
)
475 struct drm_device
*dev
= obj
->dev
;
477 drm_vma_offset_remove(dev
->vma_offset_manager
, &obj
->vma_node
);
479 EXPORT_SYMBOL(drm_gem_free_mmap_offset
);
482 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
483 * @obj: obj in question
484 * @size: the virtual size
486 * GEM memory mapping works by handing back to userspace a fake mmap offset
487 * it can use in a subsequent mmap(2) call. The DRM core code then looks
488 * up the object based on the offset and sets up the various memory mapping
491 * This routine allocates and attaches a fake offset for @obj, in cases where
492 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
493 * Otherwise just use drm_gem_create_mmap_offset().
495 * This function is idempotent and handles an already allocated mmap offset
496 * transparently. Drivers do not need to check for this case.
499 drm_gem_create_mmap_offset_size(struct drm_gem_object
*obj
, size_t size
)
501 struct drm_device
*dev
= obj
->dev
;
503 return drm_vma_offset_add(dev
->vma_offset_manager
, &obj
->vma_node
,
506 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size
);
509 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
510 * @obj: obj in question
512 * GEM memory mapping works by handing back to userspace a fake mmap offset
513 * it can use in a subsequent mmap(2) call. The DRM core code then looks
514 * up the object based on the offset and sets up the various memory mapping
517 * This routine allocates and attaches a fake offset for @obj.
519 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
520 * the fake offset again.
522 int drm_gem_create_mmap_offset(struct drm_gem_object
*obj
)
524 return drm_gem_create_mmap_offset_size(obj
, obj
->size
);
526 EXPORT_SYMBOL(drm_gem_create_mmap_offset
);
529 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
531 * @obj: obj in question
533 * This reads the page-array of the shmem-backing storage of the given gem
534 * object. An array of pages is returned. If a page is not allocated or
535 * swapped-out, this will allocate/swap-in the required pages. Note that the
536 * whole object is covered by the page-array and pinned in memory.
538 * Use drm_gem_put_pages() to release the array and unpin all pages.
540 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
541 * If you require other GFP-masks, you have to do those allocations yourself.
543 * Note that you are not allowed to change gfp-zones during runtime. That is,
544 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
545 * set during initialization. If you have special zone constraints, set them
546 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
547 * to keep pages in the required zone during swap-in.
549 struct page
**drm_gem_get_pages(struct drm_gem_object
*obj
)
551 struct address_space
*mapping
;
552 struct page
*p
, **pages
;
555 /* This is the shared memory object that backs the GEM resource */
556 mapping
= obj
->filp
->f_mapping
;
558 /* We already BUG_ON() for non-page-aligned sizes in
559 * drm_gem_object_init(), so we should never hit this unless
560 * driver author is doing something really wrong:
562 WARN_ON((obj
->size
& (PAGE_SIZE
- 1)) != 0);
564 npages
= obj
->size
>> PAGE_SHIFT
;
566 pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
568 return ERR_PTR(-ENOMEM
);
570 for (i
= 0; i
< npages
; i
++) {
571 p
= shmem_read_mapping_page(mapping
, i
);
576 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
577 * correct region during swapin. Note that this requires
578 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
579 * so shmem can relocate pages during swapin if required.
581 BUG_ON(mapping_gfp_constraint(mapping
, __GFP_DMA32
) &&
582 (page_to_pfn(p
) >= 0x00100000UL
));
594 EXPORT_SYMBOL(drm_gem_get_pages
);
597 * drm_gem_put_pages - helper to free backing pages for a GEM object
598 * @obj: obj in question
599 * @pages: pages to free
600 * @dirty: if true, pages will be marked as dirty
601 * @accessed: if true, the pages will be marked as accessed
603 void drm_gem_put_pages(struct drm_gem_object
*obj
, struct page
**pages
,
604 bool dirty
, bool accessed
)
608 /* We already BUG_ON() for non-page-aligned sizes in
609 * drm_gem_object_init(), so we should never hit this unless
610 * driver author is doing something really wrong:
612 WARN_ON((obj
->size
& (PAGE_SIZE
- 1)) != 0);
614 npages
= obj
->size
>> PAGE_SHIFT
;
616 for (i
= 0; i
< npages
; i
++) {
618 set_page_dirty(pages
[i
]);
621 mark_page_accessed(pages
[i
]);
623 /* Undo the reference we took when populating the table */
629 EXPORT_SYMBOL(drm_gem_put_pages
);
632 * drm_gem_object_lookup - look up a GEM object from it's handle
633 * @filp: DRM file private date
634 * @handle: userspace handle
638 * A reference to the object named by the handle if such exists on @filp, NULL
641 struct drm_gem_object
*
642 drm_gem_object_lookup(struct drm_file
*filp
, u32 handle
)
644 struct drm_gem_object
*obj
;
646 spin_lock(&filp
->table_lock
);
648 /* Check if we currently have a reference on the object */
649 obj
= idr_find(&filp
->object_idr
, handle
);
651 drm_gem_object_get(obj
);
653 spin_unlock(&filp
->table_lock
);
657 EXPORT_SYMBOL(drm_gem_object_lookup
);
660 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
663 * @file_priv: drm file-private structure
665 * Releases the handle to an mm object.
668 drm_gem_close_ioctl(struct drm_device
*dev
, void *data
,
669 struct drm_file
*file_priv
)
671 struct drm_gem_close
*args
= data
;
674 if (!drm_core_check_feature(dev
, DRIVER_GEM
))
677 ret
= drm_gem_handle_delete(file_priv
, args
->handle
);
683 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
686 * @file_priv: drm file-private structure
688 * Create a global name for an object, returning the name.
690 * Note that the name does not hold a reference; when the object
691 * is freed, the name goes away.
694 drm_gem_flink_ioctl(struct drm_device
*dev
, void *data
,
695 struct drm_file
*file_priv
)
697 struct drm_gem_flink
*args
= data
;
698 struct drm_gem_object
*obj
;
701 if (!drm_core_check_feature(dev
, DRIVER_GEM
))
704 obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
708 mutex_lock(&dev
->object_name_lock
);
709 /* prevent races with concurrent gem_close. */
710 if (obj
->handle_count
== 0) {
716 ret
= idr_alloc(&dev
->object_name_idr
, obj
, 1, 0, GFP_KERNEL
);
723 args
->name
= (uint64_t) obj
->name
;
727 mutex_unlock(&dev
->object_name_lock
);
728 drm_gem_object_put_unlocked(obj
);
733 * drm_gem_open - implementation of the GEM_OPEN ioctl
736 * @file_priv: drm file-private structure
738 * Open an object using the global name, returning a handle and the size.
740 * This handle (of course) holds a reference to the object, so the object
741 * will not go away until the handle is deleted.
744 drm_gem_open_ioctl(struct drm_device
*dev
, void *data
,
745 struct drm_file
*file_priv
)
747 struct drm_gem_open
*args
= data
;
748 struct drm_gem_object
*obj
;
752 if (!drm_core_check_feature(dev
, DRIVER_GEM
))
755 mutex_lock(&dev
->object_name_lock
);
756 obj
= idr_find(&dev
->object_name_idr
, (int) args
->name
);
758 drm_gem_object_get(obj
);
760 mutex_unlock(&dev
->object_name_lock
);
764 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
765 ret
= drm_gem_handle_create_tail(file_priv
, obj
, &handle
);
766 drm_gem_object_put_unlocked(obj
);
770 args
->handle
= handle
;
771 args
->size
= obj
->size
;
777 * gem_gem_open - initalizes GEM file-private structures at devnode open time
778 * @dev: drm_device which is being opened by userspace
779 * @file_private: drm file-private structure to set up
781 * Called at device open time, sets up the structure for handling refcounting
785 drm_gem_open(struct drm_device
*dev
, struct drm_file
*file_private
)
787 idr_init(&file_private
->object_idr
);
788 spin_lock_init(&file_private
->table_lock
);
792 * drm_gem_release - release file-private GEM resources
793 * @dev: drm_device which is being closed by userspace
794 * @file_private: drm file-private structure to clean up
796 * Called at close time when the filp is going away.
798 * Releases any remaining references on objects by this filp.
801 drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
)
803 idr_for_each(&file_private
->object_idr
,
804 &drm_gem_object_release_handle
, file_private
);
805 idr_destroy(&file_private
->object_idr
);
809 * drm_gem_object_release - release GEM buffer object resources
810 * @obj: GEM buffer object
812 * This releases any structures and resources used by @obj and is the invers of
813 * drm_gem_object_init().
816 drm_gem_object_release(struct drm_gem_object
*obj
)
818 WARN_ON(obj
->dma_buf
);
823 drm_gem_free_mmap_offset(obj
);
825 EXPORT_SYMBOL(drm_gem_object_release
);
828 * drm_gem_object_free - free a GEM object
829 * @kref: kref of the object to free
831 * Called after the last reference to the object has been lost.
832 * Must be called holding &drm_device.struct_mutex.
837 drm_gem_object_free(struct kref
*kref
)
839 struct drm_gem_object
*obj
=
840 container_of(kref
, struct drm_gem_object
, refcount
);
841 struct drm_device
*dev
= obj
->dev
;
843 if (dev
->driver
->gem_free_object_unlocked
) {
844 dev
->driver
->gem_free_object_unlocked(obj
);
845 } else if (dev
->driver
->gem_free_object
) {
846 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
848 dev
->driver
->gem_free_object(obj
);
851 EXPORT_SYMBOL(drm_gem_object_free
);
854 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
855 * @obj: GEM buffer object
857 * This releases a reference to @obj. Callers must not hold the
858 * &drm_device.struct_mutex lock when calling this function.
860 * See also __drm_gem_object_put().
863 drm_gem_object_put_unlocked(struct drm_gem_object
*obj
)
865 struct drm_device
*dev
;
872 if (dev
->driver
->gem_free_object_unlocked
) {
873 kref_put(&obj
->refcount
, drm_gem_object_free
);
875 might_lock(&dev
->struct_mutex
);
876 if (kref_put_mutex(&obj
->refcount
, drm_gem_object_free
,
878 mutex_unlock(&dev
->struct_mutex
);
881 EXPORT_SYMBOL(drm_gem_object_put_unlocked
);
884 * drm_gem_object_put - release a GEM buffer object reference
885 * @obj: GEM buffer object
887 * This releases a reference to @obj. Callers must hold the
888 * &drm_device.struct_mutex lock when calling this function, even when the
889 * driver doesn't use &drm_device.struct_mutex for anything.
891 * For drivers not encumbered with legacy locking use
892 * drm_gem_object_put_unlocked() instead.
895 drm_gem_object_put(struct drm_gem_object
*obj
)
898 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
900 kref_put(&obj
->refcount
, drm_gem_object_free
);
903 EXPORT_SYMBOL(drm_gem_object_put
);
906 * drm_gem_vm_open - vma->ops->open implementation for GEM
907 * @vma: VM area structure
909 * This function implements the #vm_operations_struct open() callback for GEM
910 * drivers. This must be used together with drm_gem_vm_close().
912 void drm_gem_vm_open(struct vm_area_struct
*vma
)
914 struct drm_gem_object
*obj
= vma
->vm_private_data
;
916 drm_gem_object_get(obj
);
918 EXPORT_SYMBOL(drm_gem_vm_open
);
921 * drm_gem_vm_close - vma->ops->close implementation for GEM
922 * @vma: VM area structure
924 * This function implements the #vm_operations_struct close() callback for GEM
925 * drivers. This must be used together with drm_gem_vm_open().
927 void drm_gem_vm_close(struct vm_area_struct
*vma
)
929 struct drm_gem_object
*obj
= vma
->vm_private_data
;
931 drm_gem_object_put_unlocked(obj
);
933 EXPORT_SYMBOL(drm_gem_vm_close
);
936 * drm_gem_mmap_obj - memory map a GEM object
937 * @obj: the GEM object to map
938 * @obj_size: the object size to be mapped, in bytes
939 * @vma: VMA for the area to be mapped
941 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
942 * provided by the driver. Depending on their requirements, drivers can either
943 * provide a fault handler in their gem_vm_ops (in which case any accesses to
944 * the object will be trapped, to perform migration, GTT binding, surface
945 * register allocation, or performance monitoring), or mmap the buffer memory
946 * synchronously after calling drm_gem_mmap_obj.
948 * This function is mainly intended to implement the DMABUF mmap operation, when
949 * the GEM object is not looked up based on its fake offset. To implement the
950 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
952 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
953 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
954 * callers must verify access restrictions before calling this helper.
956 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
957 * size, or if no gem_vm_ops are provided.
959 int drm_gem_mmap_obj(struct drm_gem_object
*obj
, unsigned long obj_size
,
960 struct vm_area_struct
*vma
)
962 struct drm_device
*dev
= obj
->dev
;
964 /* Check for valid size. */
965 if (obj_size
< vma
->vm_end
- vma
->vm_start
)
968 if (!dev
->driver
->gem_vm_ops
)
971 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
972 vma
->vm_ops
= dev
->driver
->gem_vm_ops
;
973 vma
->vm_private_data
= obj
;
974 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
975 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
977 /* Take a ref for this mapping of the object, so that the fault
978 * handler can dereference the mmap offset's pointer to the object.
979 * This reference is cleaned up by the corresponding vm_close
980 * (which should happen whether the vma was created by this call, or
981 * by a vm_open due to mremap or partial unmap or whatever).
983 drm_gem_object_get(obj
);
987 EXPORT_SYMBOL(drm_gem_mmap_obj
);
990 * drm_gem_mmap - memory map routine for GEM objects
991 * @filp: DRM file pointer
992 * @vma: VMA for the area to be mapped
994 * If a driver supports GEM object mapping, mmap calls on the DRM file
995 * descriptor will end up here.
997 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
998 * contain the fake offset we created when the GTT map ioctl was called on
999 * the object) and map it with a call to drm_gem_mmap_obj().
1001 * If the caller is not granted access to the buffer object, the mmap will fail
1002 * with EACCES. Please see the vma manager for more information.
1004 int drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1006 struct drm_file
*priv
= filp
->private_data
;
1007 struct drm_device
*dev
= priv
->minor
->dev
;
1008 struct drm_gem_object
*obj
= NULL
;
1009 struct drm_vma_offset_node
*node
;
1012 if (drm_dev_is_unplugged(dev
))
1015 drm_vma_offset_lock_lookup(dev
->vma_offset_manager
);
1016 node
= drm_vma_offset_exact_lookup_locked(dev
->vma_offset_manager
,
1020 obj
= container_of(node
, struct drm_gem_object
, vma_node
);
1022 * When the object is being freed, after it hits 0-refcnt it
1023 * proceeds to tear down the object. In the process it will
1024 * attempt to remove the VMA offset and so acquire this
1025 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1026 * that matches our range, we know it is in the process of being
1027 * destroyed and will be freed as soon as we release the lock -
1028 * so we have to check for the 0-refcnted object and treat it as
1031 if (!kref_get_unless_zero(&obj
->refcount
))
1034 drm_vma_offset_unlock_lookup(dev
->vma_offset_manager
);
1039 if (!drm_vma_node_is_allowed(node
, priv
)) {
1040 drm_gem_object_put_unlocked(obj
);
1044 ret
= drm_gem_mmap_obj(obj
, drm_vma_node_size(node
) << PAGE_SHIFT
,
1047 drm_gem_object_put_unlocked(obj
);
1051 EXPORT_SYMBOL(drm_gem_mmap
);