]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_gem.c
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_gem.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <drm/drmP.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include "drm_internal.h"
44
45 /** @file drm_gem.c
46 *
47 * This file provides some of the base ioctls and library routines for
48 * the graphics memory manager implemented by each device driver.
49 *
50 * Because various devices have different requirements in terms of
51 * synchronization and migration strategies, implementing that is left up to
52 * the driver, and all that the general API provides should be generic --
53 * allocating objects, reading/writing data with the cpu, freeing objects.
54 * Even there, platform-dependent optimizations for reading/writing data with
55 * the CPU mean we'll likely hook those out to driver-specific calls. However,
56 * the DRI2 implementation wants to have at least allocate/mmap be generic.
57 *
58 * The goal was to have swap-backed object allocation managed through
59 * struct file. However, file descriptors as handles to a struct file have
60 * two major failings:
61 * - Process limits prevent more than 1024 or so being used at a time by
62 * default.
63 * - Inability to allocate high fds will aggravate the X Server's select()
64 * handling, and likely that of many GL client applications as well.
65 *
66 * This led to a plan of using our own integer IDs (called handles, following
67 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
68 * ioctls. The objects themselves will still include the struct file so
69 * that we can transition to fds if the required kernel infrastructure shows
70 * up at a later date, and as our interface with shmfs for memory allocation.
71 */
72
73 /*
74 * We make up offsets for buffer objects so we can recognize them at
75 * mmap time.
76 */
77
78 /* pgoff in mmap is an unsigned long, so we need to make sure that
79 * the faked up offset will fit
80 */
81
82 #if BITS_PER_LONG == 64
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #else
86 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
87 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
88 #endif
89
90 /**
91 * drm_gem_init - Initialize the GEM device fields
92 * @dev: drm_devic structure to initialize
93 */
94 int
95 drm_gem_init(struct drm_device *dev)
96 {
97 struct drm_vma_offset_manager *vma_offset_manager;
98
99 mutex_init(&dev->object_name_lock);
100 idr_init(&dev->object_name_idr);
101
102 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
103 if (!vma_offset_manager) {
104 DRM_ERROR("out of memory\n");
105 return -ENOMEM;
106 }
107
108 dev->vma_offset_manager = vma_offset_manager;
109 drm_vma_offset_manager_init(vma_offset_manager,
110 DRM_FILE_PAGE_OFFSET_START,
111 DRM_FILE_PAGE_OFFSET_SIZE);
112
113 return 0;
114 }
115
116 void
117 drm_gem_destroy(struct drm_device *dev)
118 {
119
120 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
121 kfree(dev->vma_offset_manager);
122 dev->vma_offset_manager = NULL;
123 }
124
125 /**
126 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
127 * @dev: drm_device the object should be initialized for
128 * @obj: drm_gem_object to initialize
129 * @size: object size
130 *
131 * Initialize an already allocated GEM object of the specified size with
132 * shmfs backing store.
133 */
134 int drm_gem_object_init(struct drm_device *dev,
135 struct drm_gem_object *obj, size_t size)
136 {
137 struct file *filp;
138
139 drm_gem_private_object_init(dev, obj, size);
140
141 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 if (IS_ERR(filp))
143 return PTR_ERR(filp);
144
145 obj->filp = filp;
146
147 return 0;
148 }
149 EXPORT_SYMBOL(drm_gem_object_init);
150
151 /**
152 * drm_gem_private_object_init - initialize an allocated private GEM object
153 * @dev: drm_device the object should be initialized for
154 * @obj: drm_gem_object to initialize
155 * @size: object size
156 *
157 * Initialize an already allocated GEM object of the specified size with
158 * no GEM provided backing store. Instead the caller is responsible for
159 * backing the object and handling it.
160 */
161 void drm_gem_private_object_init(struct drm_device *dev,
162 struct drm_gem_object *obj, size_t size)
163 {
164 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
165
166 obj->dev = dev;
167 obj->filp = NULL;
168
169 kref_init(&obj->refcount);
170 obj->handle_count = 0;
171 obj->size = size;
172 drm_vma_node_reset(&obj->vma_node);
173 }
174 EXPORT_SYMBOL(drm_gem_private_object_init);
175
176 static void
177 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
178 {
179 /*
180 * Note: obj->dma_buf can't disappear as long as we still hold a
181 * handle reference in obj->handle_count.
182 */
183 mutex_lock(&filp->prime.lock);
184 if (obj->dma_buf) {
185 drm_prime_remove_buf_handle_locked(&filp->prime,
186 obj->dma_buf);
187 }
188 mutex_unlock(&filp->prime.lock);
189 }
190
191 /**
192 * drm_gem_object_handle_free - release resources bound to userspace handles
193 * @obj: GEM object to clean up.
194 *
195 * Called after the last handle to the object has been closed
196 *
197 * Removes any name for the object. Note that this must be
198 * called before drm_gem_object_free or we'll be touching
199 * freed memory
200 */
201 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
202 {
203 struct drm_device *dev = obj->dev;
204
205 /* Remove any name for this object */
206 if (obj->name) {
207 idr_remove(&dev->object_name_idr, obj->name);
208 obj->name = 0;
209 }
210 }
211
212 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
213 {
214 /* Unbreak the reference cycle if we have an exported dma_buf. */
215 if (obj->dma_buf) {
216 dma_buf_put(obj->dma_buf);
217 obj->dma_buf = NULL;
218 }
219 }
220
221 static void
222 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
223 {
224 struct drm_device *dev = obj->dev;
225 bool final = false;
226
227 if (WARN_ON(obj->handle_count == 0))
228 return;
229
230 /*
231 * Must bump handle count first as this may be the last
232 * ref, in which case the object would disappear before we
233 * checked for a name
234 */
235
236 mutex_lock(&dev->object_name_lock);
237 if (--obj->handle_count == 0) {
238 drm_gem_object_handle_free(obj);
239 drm_gem_object_exported_dma_buf_free(obj);
240 final = true;
241 }
242 mutex_unlock(&dev->object_name_lock);
243
244 if (final)
245 drm_gem_object_put_unlocked(obj);
246 }
247
248 /*
249 * Called at device or object close to release the file's
250 * handle references on objects.
251 */
252 static int
253 drm_gem_object_release_handle(int id, void *ptr, void *data)
254 {
255 struct drm_file *file_priv = data;
256 struct drm_gem_object *obj = ptr;
257 struct drm_device *dev = obj->dev;
258
259 if (dev->driver->gem_close_object)
260 dev->driver->gem_close_object(obj, file_priv);
261
262 if (drm_core_check_feature(dev, DRIVER_PRIME))
263 drm_gem_remove_prime_handles(obj, file_priv);
264 drm_vma_node_revoke(&obj->vma_node, file_priv);
265
266 drm_gem_object_handle_put_unlocked(obj);
267
268 return 0;
269 }
270
271 /**
272 * drm_gem_handle_delete - deletes the given file-private handle
273 * @filp: drm file-private structure to use for the handle look up
274 * @handle: userspace handle to delete
275 *
276 * Removes the GEM handle from the @filp lookup table which has been added with
277 * drm_gem_handle_create(). If this is the last handle also cleans up linked
278 * resources like GEM names.
279 */
280 int
281 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
282 {
283 struct drm_gem_object *obj;
284
285 /* This is gross. The idr system doesn't let us try a delete and
286 * return an error code. It just spews if you fail at deleting.
287 * So, we have to grab a lock around finding the object and then
288 * doing the delete on it and dropping the refcount, or the user
289 * could race us to double-decrement the refcount and cause a
290 * use-after-free later. Given the frequency of our handle lookups,
291 * we may want to use ida for number allocation and a hash table
292 * for the pointers, anyway.
293 */
294 spin_lock(&filp->table_lock);
295
296 /* Check if we currently have a reference on the object */
297 obj = idr_replace(&filp->object_idr, NULL, handle);
298 spin_unlock(&filp->table_lock);
299 if (IS_ERR_OR_NULL(obj))
300 return -EINVAL;
301
302 /* Release driver's reference and decrement refcount. */
303 drm_gem_object_release_handle(handle, obj, filp);
304
305 /* And finally make the handle available for future allocations. */
306 spin_lock(&filp->table_lock);
307 idr_remove(&filp->object_idr, handle);
308 spin_unlock(&filp->table_lock);
309
310 return 0;
311 }
312 EXPORT_SYMBOL(drm_gem_handle_delete);
313
314 /**
315 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
316 * @file: drm file-private structure containing the gem object
317 * @dev: corresponding drm_device
318 * @handle: gem object handle
319 * @offset: return location for the fake mmap offset
320 *
321 * This implements the &drm_driver.dumb_map_offset kms driver callback for
322 * drivers which use gem to manage their backing storage.
323 *
324 * Returns:
325 * 0 on success or a negative error code on failure.
326 */
327 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
328 u32 handle, u64 *offset)
329 {
330 struct drm_gem_object *obj;
331 int ret;
332
333 obj = drm_gem_object_lookup(file, handle);
334 if (!obj)
335 return -ENOENT;
336
337 ret = drm_gem_create_mmap_offset(obj);
338 if (ret)
339 goto out;
340
341 *offset = drm_vma_node_offset_addr(&obj->vma_node);
342 out:
343 drm_gem_object_put_unlocked(obj);
344
345 return ret;
346 }
347 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
348
349 /**
350 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
351 * @file: drm file-private structure to remove the dumb handle from
352 * @dev: corresponding drm_device
353 * @handle: the dumb handle to remove
354 *
355 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
356 * which use gem to manage their backing storage.
357 */
358 int drm_gem_dumb_destroy(struct drm_file *file,
359 struct drm_device *dev,
360 uint32_t handle)
361 {
362 return drm_gem_handle_delete(file, handle);
363 }
364 EXPORT_SYMBOL(drm_gem_dumb_destroy);
365
366 /**
367 * drm_gem_handle_create_tail - internal functions to create a handle
368 * @file_priv: drm file-private structure to register the handle for
369 * @obj: object to register
370 * @handlep: pointer to return the created handle to the caller
371 *
372 * This expects the &drm_device.object_name_lock to be held already and will
373 * drop it before returning. Used to avoid races in establishing new handles
374 * when importing an object from either an flink name or a dma-buf.
375 *
376 * Handles must be release again through drm_gem_handle_delete(). This is done
377 * when userspace closes @file_priv for all attached handles, or through the
378 * GEM_CLOSE ioctl for individual handles.
379 */
380 int
381 drm_gem_handle_create_tail(struct drm_file *file_priv,
382 struct drm_gem_object *obj,
383 u32 *handlep)
384 {
385 struct drm_device *dev = obj->dev;
386 u32 handle;
387 int ret;
388
389 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
390 if (obj->handle_count++ == 0)
391 drm_gem_object_get(obj);
392
393 /*
394 * Get the user-visible handle using idr. Preload and perform
395 * allocation under our spinlock.
396 */
397 idr_preload(GFP_KERNEL);
398 spin_lock(&file_priv->table_lock);
399
400 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
401
402 spin_unlock(&file_priv->table_lock);
403 idr_preload_end();
404
405 mutex_unlock(&dev->object_name_lock);
406 if (ret < 0)
407 goto err_unref;
408
409 handle = ret;
410
411 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
412 if (ret)
413 goto err_remove;
414
415 if (dev->driver->gem_open_object) {
416 ret = dev->driver->gem_open_object(obj, file_priv);
417 if (ret)
418 goto err_revoke;
419 }
420
421 *handlep = handle;
422 return 0;
423
424 err_revoke:
425 drm_vma_node_revoke(&obj->vma_node, file_priv);
426 err_remove:
427 spin_lock(&file_priv->table_lock);
428 idr_remove(&file_priv->object_idr, handle);
429 spin_unlock(&file_priv->table_lock);
430 err_unref:
431 drm_gem_object_handle_put_unlocked(obj);
432 return ret;
433 }
434
435 /**
436 * drm_gem_handle_create - create a gem handle for an object
437 * @file_priv: drm file-private structure to register the handle for
438 * @obj: object to register
439 * @handlep: pionter to return the created handle to the caller
440 *
441 * Create a handle for this object. This adds a handle reference
442 * to the object, which includes a regular reference count. Callers
443 * will likely want to dereference the object afterwards.
444 */
445 int drm_gem_handle_create(struct drm_file *file_priv,
446 struct drm_gem_object *obj,
447 u32 *handlep)
448 {
449 mutex_lock(&obj->dev->object_name_lock);
450
451 return drm_gem_handle_create_tail(file_priv, obj, handlep);
452 }
453 EXPORT_SYMBOL(drm_gem_handle_create);
454
455
456 /**
457 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
458 * @obj: obj in question
459 *
460 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
461 *
462 * Note that drm_gem_object_release() already calls this function, so drivers
463 * don't have to take care of releasing the mmap offset themselves when freeing
464 * the GEM object.
465 */
466 void
467 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
468 {
469 struct drm_device *dev = obj->dev;
470
471 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
472 }
473 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
474
475 /**
476 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
477 * @obj: obj in question
478 * @size: the virtual size
479 *
480 * GEM memory mapping works by handing back to userspace a fake mmap offset
481 * it can use in a subsequent mmap(2) call. The DRM core code then looks
482 * up the object based on the offset and sets up the various memory mapping
483 * structures.
484 *
485 * This routine allocates and attaches a fake offset for @obj, in cases where
486 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
487 * Otherwise just use drm_gem_create_mmap_offset().
488 *
489 * This function is idempotent and handles an already allocated mmap offset
490 * transparently. Drivers do not need to check for this case.
491 */
492 int
493 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
494 {
495 struct drm_device *dev = obj->dev;
496
497 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
498 size / PAGE_SIZE);
499 }
500 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
501
502 /**
503 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
504 * @obj: obj in question
505 *
506 * GEM memory mapping works by handing back to userspace a fake mmap offset
507 * it can use in a subsequent mmap(2) call. The DRM core code then looks
508 * up the object based on the offset and sets up the various memory mapping
509 * structures.
510 *
511 * This routine allocates and attaches a fake offset for @obj.
512 *
513 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
514 * the fake offset again.
515 */
516 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
517 {
518 return drm_gem_create_mmap_offset_size(obj, obj->size);
519 }
520 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
521
522 /**
523 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
524 * from shmem
525 * @obj: obj in question
526 *
527 * This reads the page-array of the shmem-backing storage of the given gem
528 * object. An array of pages is returned. If a page is not allocated or
529 * swapped-out, this will allocate/swap-in the required pages. Note that the
530 * whole object is covered by the page-array and pinned in memory.
531 *
532 * Use drm_gem_put_pages() to release the array and unpin all pages.
533 *
534 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
535 * If you require other GFP-masks, you have to do those allocations yourself.
536 *
537 * Note that you are not allowed to change gfp-zones during runtime. That is,
538 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
539 * set during initialization. If you have special zone constraints, set them
540 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
541 * to keep pages in the required zone during swap-in.
542 */
543 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
544 {
545 struct address_space *mapping;
546 struct page *p, **pages;
547 int i, npages;
548
549 /* This is the shared memory object that backs the GEM resource */
550 mapping = obj->filp->f_mapping;
551
552 /* We already BUG_ON() for non-page-aligned sizes in
553 * drm_gem_object_init(), so we should never hit this unless
554 * driver author is doing something really wrong:
555 */
556 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
557
558 npages = obj->size >> PAGE_SHIFT;
559
560 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
561 if (pages == NULL)
562 return ERR_PTR(-ENOMEM);
563
564 for (i = 0; i < npages; i++) {
565 p = shmem_read_mapping_page(mapping, i);
566 if (IS_ERR(p))
567 goto fail;
568 pages[i] = p;
569
570 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
571 * correct region during swapin. Note that this requires
572 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
573 * so shmem can relocate pages during swapin if required.
574 */
575 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
576 (page_to_pfn(p) >= 0x00100000UL));
577 }
578
579 return pages;
580
581 fail:
582 while (i--)
583 put_page(pages[i]);
584
585 kvfree(pages);
586 return ERR_CAST(p);
587 }
588 EXPORT_SYMBOL(drm_gem_get_pages);
589
590 /**
591 * drm_gem_put_pages - helper to free backing pages for a GEM object
592 * @obj: obj in question
593 * @pages: pages to free
594 * @dirty: if true, pages will be marked as dirty
595 * @accessed: if true, the pages will be marked as accessed
596 */
597 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
598 bool dirty, bool accessed)
599 {
600 int i, npages;
601
602 /* We already BUG_ON() for non-page-aligned sizes in
603 * drm_gem_object_init(), so we should never hit this unless
604 * driver author is doing something really wrong:
605 */
606 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
607
608 npages = obj->size >> PAGE_SHIFT;
609
610 for (i = 0; i < npages; i++) {
611 if (dirty)
612 set_page_dirty(pages[i]);
613
614 if (accessed)
615 mark_page_accessed(pages[i]);
616
617 /* Undo the reference we took when populating the table */
618 put_page(pages[i]);
619 }
620
621 kvfree(pages);
622 }
623 EXPORT_SYMBOL(drm_gem_put_pages);
624
625 /**
626 * drm_gem_object_lookup - look up a GEM object from it's handle
627 * @filp: DRM file private date
628 * @handle: userspace handle
629 *
630 * Returns:
631 *
632 * A reference to the object named by the handle if such exists on @filp, NULL
633 * otherwise.
634 */
635 struct drm_gem_object *
636 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
637 {
638 struct drm_gem_object *obj;
639
640 spin_lock(&filp->table_lock);
641
642 /* Check if we currently have a reference on the object */
643 obj = idr_find(&filp->object_idr, handle);
644 if (obj)
645 drm_gem_object_get(obj);
646
647 spin_unlock(&filp->table_lock);
648
649 return obj;
650 }
651 EXPORT_SYMBOL(drm_gem_object_lookup);
652
653 /**
654 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
655 * @dev: drm_device
656 * @data: ioctl data
657 * @file_priv: drm file-private structure
658 *
659 * Releases the handle to an mm object.
660 */
661 int
662 drm_gem_close_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv)
664 {
665 struct drm_gem_close *args = data;
666 int ret;
667
668 if (!drm_core_check_feature(dev, DRIVER_GEM))
669 return -ENODEV;
670
671 ret = drm_gem_handle_delete(file_priv, args->handle);
672
673 return ret;
674 }
675
676 /**
677 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
678 * @dev: drm_device
679 * @data: ioctl data
680 * @file_priv: drm file-private structure
681 *
682 * Create a global name for an object, returning the name.
683 *
684 * Note that the name does not hold a reference; when the object
685 * is freed, the name goes away.
686 */
687 int
688 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
689 struct drm_file *file_priv)
690 {
691 struct drm_gem_flink *args = data;
692 struct drm_gem_object *obj;
693 int ret;
694
695 if (!drm_core_check_feature(dev, DRIVER_GEM))
696 return -ENODEV;
697
698 obj = drm_gem_object_lookup(file_priv, args->handle);
699 if (obj == NULL)
700 return -ENOENT;
701
702 mutex_lock(&dev->object_name_lock);
703 /* prevent races with concurrent gem_close. */
704 if (obj->handle_count == 0) {
705 ret = -ENOENT;
706 goto err;
707 }
708
709 if (!obj->name) {
710 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
711 if (ret < 0)
712 goto err;
713
714 obj->name = ret;
715 }
716
717 args->name = (uint64_t) obj->name;
718 ret = 0;
719
720 err:
721 mutex_unlock(&dev->object_name_lock);
722 drm_gem_object_put_unlocked(obj);
723 return ret;
724 }
725
726 /**
727 * drm_gem_open - implementation of the GEM_OPEN ioctl
728 * @dev: drm_device
729 * @data: ioctl data
730 * @file_priv: drm file-private structure
731 *
732 * Open an object using the global name, returning a handle and the size.
733 *
734 * This handle (of course) holds a reference to the object, so the object
735 * will not go away until the handle is deleted.
736 */
737 int
738 drm_gem_open_ioctl(struct drm_device *dev, void *data,
739 struct drm_file *file_priv)
740 {
741 struct drm_gem_open *args = data;
742 struct drm_gem_object *obj;
743 int ret;
744 u32 handle;
745
746 if (!drm_core_check_feature(dev, DRIVER_GEM))
747 return -ENODEV;
748
749 mutex_lock(&dev->object_name_lock);
750 obj = idr_find(&dev->object_name_idr, (int) args->name);
751 if (obj) {
752 drm_gem_object_get(obj);
753 } else {
754 mutex_unlock(&dev->object_name_lock);
755 return -ENOENT;
756 }
757
758 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
759 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
760 drm_gem_object_put_unlocked(obj);
761 if (ret)
762 return ret;
763
764 args->handle = handle;
765 args->size = obj->size;
766
767 return 0;
768 }
769
770 /**
771 * gem_gem_open - initalizes GEM file-private structures at devnode open time
772 * @dev: drm_device which is being opened by userspace
773 * @file_private: drm file-private structure to set up
774 *
775 * Called at device open time, sets up the structure for handling refcounting
776 * of mm objects.
777 */
778 void
779 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
780 {
781 idr_init(&file_private->object_idr);
782 spin_lock_init(&file_private->table_lock);
783 }
784
785 /**
786 * drm_gem_release - release file-private GEM resources
787 * @dev: drm_device which is being closed by userspace
788 * @file_private: drm file-private structure to clean up
789 *
790 * Called at close time when the filp is going away.
791 *
792 * Releases any remaining references on objects by this filp.
793 */
794 void
795 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
796 {
797 idr_for_each(&file_private->object_idr,
798 &drm_gem_object_release_handle, file_private);
799 idr_destroy(&file_private->object_idr);
800 }
801
802 /**
803 * drm_gem_object_release - release GEM buffer object resources
804 * @obj: GEM buffer object
805 *
806 * This releases any structures and resources used by @obj and is the invers of
807 * drm_gem_object_init().
808 */
809 void
810 drm_gem_object_release(struct drm_gem_object *obj)
811 {
812 WARN_ON(obj->dma_buf);
813
814 if (obj->filp)
815 fput(obj->filp);
816
817 drm_gem_free_mmap_offset(obj);
818 }
819 EXPORT_SYMBOL(drm_gem_object_release);
820
821 /**
822 * drm_gem_object_free - free a GEM object
823 * @kref: kref of the object to free
824 *
825 * Called after the last reference to the object has been lost.
826 * Must be called holding &drm_device.struct_mutex.
827 *
828 * Frees the object
829 */
830 void
831 drm_gem_object_free(struct kref *kref)
832 {
833 struct drm_gem_object *obj =
834 container_of(kref, struct drm_gem_object, refcount);
835 struct drm_device *dev = obj->dev;
836
837 if (dev->driver->gem_free_object_unlocked) {
838 dev->driver->gem_free_object_unlocked(obj);
839 } else if (dev->driver->gem_free_object) {
840 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
841
842 dev->driver->gem_free_object(obj);
843 }
844 }
845 EXPORT_SYMBOL(drm_gem_object_free);
846
847 /**
848 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
849 * @obj: GEM buffer object
850 *
851 * This releases a reference to @obj. Callers must not hold the
852 * &drm_device.struct_mutex lock when calling this function.
853 *
854 * See also __drm_gem_object_put().
855 */
856 void
857 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
858 {
859 struct drm_device *dev;
860
861 if (!obj)
862 return;
863
864 dev = obj->dev;
865
866 if (dev->driver->gem_free_object_unlocked) {
867 kref_put(&obj->refcount, drm_gem_object_free);
868 } else {
869 might_lock(&dev->struct_mutex);
870 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
871 &dev->struct_mutex))
872 mutex_unlock(&dev->struct_mutex);
873 }
874 }
875 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
876
877 /**
878 * drm_gem_object_put - release a GEM buffer object reference
879 * @obj: GEM buffer object
880 *
881 * This releases a reference to @obj. Callers must hold the
882 * &drm_device.struct_mutex lock when calling this function, even when the
883 * driver doesn't use &drm_device.struct_mutex for anything.
884 *
885 * For drivers not encumbered with legacy locking use
886 * drm_gem_object_put_unlocked() instead.
887 */
888 void
889 drm_gem_object_put(struct drm_gem_object *obj)
890 {
891 if (obj) {
892 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
893
894 kref_put(&obj->refcount, drm_gem_object_free);
895 }
896 }
897 EXPORT_SYMBOL(drm_gem_object_put);
898
899 /**
900 * drm_gem_vm_open - vma->ops->open implementation for GEM
901 * @vma: VM area structure
902 *
903 * This function implements the #vm_operations_struct open() callback for GEM
904 * drivers. This must be used together with drm_gem_vm_close().
905 */
906 void drm_gem_vm_open(struct vm_area_struct *vma)
907 {
908 struct drm_gem_object *obj = vma->vm_private_data;
909
910 drm_gem_object_get(obj);
911 }
912 EXPORT_SYMBOL(drm_gem_vm_open);
913
914 /**
915 * drm_gem_vm_close - vma->ops->close implementation for GEM
916 * @vma: VM area structure
917 *
918 * This function implements the #vm_operations_struct close() callback for GEM
919 * drivers. This must be used together with drm_gem_vm_open().
920 */
921 void drm_gem_vm_close(struct vm_area_struct *vma)
922 {
923 struct drm_gem_object *obj = vma->vm_private_data;
924
925 drm_gem_object_put_unlocked(obj);
926 }
927 EXPORT_SYMBOL(drm_gem_vm_close);
928
929 /**
930 * drm_gem_mmap_obj - memory map a GEM object
931 * @obj: the GEM object to map
932 * @obj_size: the object size to be mapped, in bytes
933 * @vma: VMA for the area to be mapped
934 *
935 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
936 * provided by the driver. Depending on their requirements, drivers can either
937 * provide a fault handler in their gem_vm_ops (in which case any accesses to
938 * the object will be trapped, to perform migration, GTT binding, surface
939 * register allocation, or performance monitoring), or mmap the buffer memory
940 * synchronously after calling drm_gem_mmap_obj.
941 *
942 * This function is mainly intended to implement the DMABUF mmap operation, when
943 * the GEM object is not looked up based on its fake offset. To implement the
944 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
945 *
946 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
947 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
948 * callers must verify access restrictions before calling this helper.
949 *
950 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
951 * size, or if no gem_vm_ops are provided.
952 */
953 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
954 struct vm_area_struct *vma)
955 {
956 struct drm_device *dev = obj->dev;
957
958 /* Check for valid size. */
959 if (obj_size < vma->vm_end - vma->vm_start)
960 return -EINVAL;
961
962 if (!dev->driver->gem_vm_ops)
963 return -EINVAL;
964
965 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
966 vma->vm_ops = dev->driver->gem_vm_ops;
967 vma->vm_private_data = obj;
968 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
969 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
970
971 /* Take a ref for this mapping of the object, so that the fault
972 * handler can dereference the mmap offset's pointer to the object.
973 * This reference is cleaned up by the corresponding vm_close
974 * (which should happen whether the vma was created by this call, or
975 * by a vm_open due to mremap or partial unmap or whatever).
976 */
977 drm_gem_object_get(obj);
978
979 return 0;
980 }
981 EXPORT_SYMBOL(drm_gem_mmap_obj);
982
983 /**
984 * drm_gem_mmap - memory map routine for GEM objects
985 * @filp: DRM file pointer
986 * @vma: VMA for the area to be mapped
987 *
988 * If a driver supports GEM object mapping, mmap calls on the DRM file
989 * descriptor will end up here.
990 *
991 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
992 * contain the fake offset we created when the GTT map ioctl was called on
993 * the object) and map it with a call to drm_gem_mmap_obj().
994 *
995 * If the caller is not granted access to the buffer object, the mmap will fail
996 * with EACCES. Please see the vma manager for more information.
997 */
998 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
999 {
1000 struct drm_file *priv = filp->private_data;
1001 struct drm_device *dev = priv->minor->dev;
1002 struct drm_gem_object *obj = NULL;
1003 struct drm_vma_offset_node *node;
1004 int ret;
1005
1006 if (drm_dev_is_unplugged(dev))
1007 return -ENODEV;
1008
1009 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1010 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1011 vma->vm_pgoff,
1012 vma_pages(vma));
1013 if (likely(node)) {
1014 obj = container_of(node, struct drm_gem_object, vma_node);
1015 /*
1016 * When the object is being freed, after it hits 0-refcnt it
1017 * proceeds to tear down the object. In the process it will
1018 * attempt to remove the VMA offset and so acquire this
1019 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1020 * that matches our range, we know it is in the process of being
1021 * destroyed and will be freed as soon as we release the lock -
1022 * so we have to check for the 0-refcnted object and treat it as
1023 * invalid.
1024 */
1025 if (!kref_get_unless_zero(&obj->refcount))
1026 obj = NULL;
1027 }
1028 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1029
1030 if (!obj)
1031 return -EINVAL;
1032
1033 if (!drm_vma_node_is_allowed(node, priv)) {
1034 drm_gem_object_put_unlocked(obj);
1035 return -EACCES;
1036 }
1037
1038 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1039 vma);
1040
1041 drm_gem_object_put_unlocked(obj);
1042
1043 return ret;
1044 }
1045 EXPORT_SYMBOL(drm_gem_mmap);